-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathserver.py
More file actions
282 lines (234 loc) · 9.42 KB
/
server.py
File metadata and controls
282 lines (234 loc) · 9.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
import json
import os
import time
from dotenv import load_dotenv
from flask import Flask, render_template, request, Response, jsonify
from flask_cors import CORS
from scripts import the_big_dipper
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from prometheus_client import Counter, Histogram, Summary, Gauge, Info, generate_latest, REGISTRY, CollectorRegistry
load_dotenv()
app = Flask(__name__)
CORS(app)
# Directory where your resource text files are stored
RESOURCES_DIR = "static/resources"
df = pd.read_csv("static/assets/facebook_dream_archetypes.csv")
dream_text = ""
selected_archetype = ""
# Define Prometheus metrics
DREAM_SUBMISSIONS = Counter('dream_submissions_total', 'Total number of dreams submitted', ['status'])
ENDPOINT_REQUESTS = Counter('endpoint_requests_total', 'Total requests per endpoint', ['endpoint', 'method', 'status_code'])
REQUEST_LATENCY = Histogram('request_latency_seconds', 'Request latency in seconds', ['endpoint'])
DREAM_PROCESSING_TIME = Summary('dream_processing_seconds', 'Time spent processing dreams')
ACTIVE_USERS = Gauge('active_users', 'Number of active users')
APP_INFO = Info('dream_analyzer', 'Dream analyzer application information')
# Set application info
APP_INFO.info({'version': '1.0.0', 'maintainer': 'Dream Team'})
# Archetype distribution gauge
ARCHETYPE_DISTRIBUTION = Gauge('archetype_distribution', 'Distribution of dream archetypes', ['archetype'])
# Initialize distribution based on data frame
for archetype, count in df['archetype'].value_counts().items():
ARCHETYPE_DISTRIBUTION.labels(archetype).set(count)
# Custom JSON encoder to handle NumPy types
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return super(NumpyEncoder, self).default(obj)
# Configure Flask to use the custom encoder
app.json_encoder = NumpyEncoder
def json_listify(data: dict) -> str:
spam = []
for key in data:
d = {}
d["_id_"] = key
d["_text_"] = data[key]
spam.append(d)
return json.dumps(spam)
# Generate time series data for archetypes
def generate_time_series_data():
archetypes = ['explorer', 'everyman', 'hero', 'outlaw', 'sage']
end_date = datetime.now()
# Generate dates for the past 6 months
dates = [(end_date - timedelta(days=i*30)).strftime('%Y-%m') for i in range(6)]
dates.reverse() # Chronological order
data = []
for archetype in archetypes:
# Generate somewhat smooth trend with some randomness
base_value = np.random.randint(5, 15)
trend = np.random.choice([-1, 0, 1]) # Trend direction
values = []
for i in range(len(dates)):
# Value changes with some trend and randomness
val = max(1, base_value + trend * i + np.random.randint(-3, 4))
# Convert NumPy int64 to regular Python int
val = int(val)
values.append(val)
data.append({
'archetype': archetype,
'values': values
})
return {
'dates': dates,
'data': data
}
# Calculate rarity score based on archetype distribution
def calculate_rarity_score(archetype):
# Rarity is inversely proportional to frequency
# higher the number here, more common it is
archetype_weights = {
'explorer': 0.3,
'everyman': 1,
'hero': 0.15,
'outlaw': 0.1,
'sage': 0.1,
'creator': 0.1,
'caregiver': 0.5,
'lover': 0.7
}
# Calculate base rarity (rare archetypes = high score)
base_rarity = 100 - (archetype_weights.get(archetype, 0.5) * 100)
# Add some randomness for variability
randomness = np.random.normal(0, 10)
# Ensure score is between 0 and 100
score = max(0, min(100, base_rarity + randomness))
# Convert to native Python float
score = float(round(score, 1))
return score
# Middleware for tracking request metrics
@app.before_request
def before_request():
request.start_time = time.time()
ACTIVE_USERS.inc()
@app.after_request
def after_request(response):
request_latency = time.time() - request.start_time
ENDPOINT_REQUESTS.labels(
endpoint=request.path,
method=request.method,
status_code=response.status_code
).inc()
REQUEST_LATENCY.labels(endpoint=request.path).observe(request_latency)
ACTIVE_USERS.dec()
return response
@app.route("/", methods=["GET"])
def home():
return render_template("index.html")
# Prometheus metrics endpoint on the same server
@app.route('/metrics')
def metrics():
return Response(generate_latest(REGISTRY), mimetype='text/plain')
# Process dream text and return interpretation
@app.route("/llm", methods=["POST"])
def llm_():
global dream_text
global selected_archetype
if request.method == "POST":
dream_text = request.form["dream"]
# Track dream processing time
with DREAM_PROCESSING_TIME.time():
try:
data = the_big_dipper.main(dream_text=dream_text)
selected_archetype = data['archetype']
# Update archetype distribution
ARCHETYPE_DISTRIBUTION.labels(selected_archetype).inc()
# Record successful submission
DREAM_SUBMISSIONS.labels(status='success').inc()
response = Response(json_listify(data), mimetype="application/json")
response.headers.add("Access-Control-Allow-Origin", "*")
return response
except Exception as e:
# Record failed submission
DREAM_SUBMISSIONS.labels(status='error').inc()
print(f"Error processing dream: {e}")
return jsonify({"error": "Failed to process dream"}), 500
# Visualization API endpoints
@app.route('/get_bar_data')
def get_bar_data():
# Convert to the format expected by Chart.js
counts = df['archetype'].value_counts()
# Convert NumPy types to native Python types
data = {
'labels': counts.index.tolist(),
'values': [int(val) for val in counts.values.tolist()]
}
return jsonify(data)
@app.route('/get_doughnut_data')
def get_doughnut_data():
__v__ = the_big_dipper.vector_store_reader(
load_dir_path="scripts/pickles",
store_names=["facebook_dream_archetypes_store.dat"],
use_cpu=int(os.getenv("USE_CPU", "1")),
)
results = __v__.vector_store["facebook_dream_archetypes_store"].similarity_search(dream_text)
# Convert to the format expected by Chart.js
counts = pd.Series([df.loc[_.metadata["row"]]["archetype"] for _ in results]).value_counts()
# Convert NumPy types to native Python types
data = {
'labels': counts.index.tolist(),
'values': [int(val) for val in counts.values.tolist()]
}
return jsonify(data)
@app.route('/get_time_series_data')
def get_time_series_data():
time_data = generate_time_series_data()
return jsonify(time_data)
@app.route('/get_rarity_score')
def get_rarity_score():
score = calculate_rarity_score(selected_archetype)
return jsonify({
'score': score,
'archetype': selected_archetype
})
@app.route('/get_resources/<archetype>')
def get_resources(archetype):
"""
Load resources for a specific archetype from text files.
Each archetype should have a JSON file in the resources directory.
Falls back to default resources if the file doesn't exist.
"""
try:
# Sanitize the archetype name to prevent directory traversal
archetype = os.path.basename(archetype.lower())
# Path to the archetype's resource file
resource_file = os.path.join(RESOURCES_DIR, f"{archetype}.json")
# Check if the file exists
if os.path.exists(resource_file):
with open(resource_file, 'r') as f:
resources = json.load(f)
else:
# Load default resources if archetype-specific file doesn't exist
default_file = os.path.join(RESOURCES_DIR, "default.json")
with open(default_file, 'r') as f:
resources = json.load(f)
print("sent reading notes...")
return jsonify(resources)
except Exception as e:
print(f"Error loading resources: {e}")
# Return default resources as fallback
default_resources = [
{
"title": "Understanding Jungian Archetypes",
"description": "An introduction to Carl Jung's theory of archetypes and their significance in dream interpretation.",
"links": [
{"type": "Article", "url": "https://conorneill.com/2018/04/21/understanding-personality-the-12-jungian-archetypes/"},
]
},
{
"title": "Dream Symbolism Dictionary",
"description": "Comprehensive guide to common dream symbols and their potential meanings across cultures.",
"links": [
{"type": "Reference", "url": "https://www.dreamdictionary.org/"}
]
}
]
return jsonify(default_resources)
if __name__ == "__main__":
print("Flask server started on port 8000 (includes Prometheus metrics at /metrics)")
app.run(port=8000, debug=True)