-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtasks.py
More file actions
334 lines (281 loc) · 8.6 KB
/
tasks.py
File metadata and controls
334 lines (281 loc) · 8.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
# tasks.py
from celery import Celery
from bin.run_masstRecords_queries import get_library_table, get_masst_and_redu_tables
from bin.workflow_stepwise import retrieve_raw_data_matches
import pandas as pd
import gc
import time
import redis
# Connect to Redis (broker) and store results back in Redis
celery_app = Celery(
"structuremasst_tasks",
broker="redis://structure-masst-redis",
backend="redis://structure-masst-redis"
)
celery_app.conf.update(
result_expires=900
)
from celery.signals import worker_init
from bin.shared_data import get_redu_table_cached
from bin.run_masstRecords_queries import _get_fetcher
import importlib.util
@worker_init.connect
def preload_redu_table(**kwargs):
"""
Preload the ReDU table once per Celery *parent process* before workers fork.
This makes the table available to all worker subprocesses via copy-on-write.
"""
# 🔹 Load config.py dynamically (Celery runs in its own context)
config_path = "/app/config.py"
spec = importlib.util.spec_from_file_location("config", config_path)
config = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config)
sqlite_path = config.PATH_TO_SQLITE
api_endpoint = config.MASSTRECORDS_ENDPOINT
timeout = config.MASSTRECORDS_TIMEOUT
# 🔹 Create the fetcher for SQLite or Datasette
fetch = _get_fetcher(sqlite_path, api_endpoint, timeout)
# 🔹 Dynamically detect ReDU columns (excluding heavy/unused ones)
sql = "SELECT name FROM pragma_table_info('redu_table')"
redu_columns = fetch(sql)
redu_columns_list = redu_columns["name"].tolist()
columns_to_exclude = [
"filename", "TermsofPosition", "ComorbidityListDOIDIndex", "SampleCollectionDateandTime",
"ENVOBroadScale", "ENVOLocalScale", "ENVOMediumScale", "qiita_sample_name",
"UniqueSubjectID", "UBERONOntologyIndex", "DOIDOntologyIndex", "ENVOEnvironmentBiomeIndex",
"ENVOEnvironmentMaterialIndex", "ENVOLocalScaleIndex", "ENVOBroadScaleIndex",
"ENVOMediumScaleIndex", "classification", "MS2spectra_count",
"InternalStandardsUsed", "HumanPopulationDensity"
]
redu_columns_list = [col for col in redu_columns_list if col not in columns_to_exclude]
# 🔹 Preload ReDU cache once per worker
print("[INIT] Preloading ReDU table for this worker...")
get_redu_table_cached(fetch, redu_columns_list, sqlite_path)
print("[INIT] Worker preload complete.")
@celery_app.task()
def heartbeat_task():
return "Structure MASST worker is alive."
def run_get_library_table(
smiles,
searchtype,
tanimoto_threshold,
sqlite_path,
api_endpoint,
timeout
):
try:
# Call the heavy function
result = _run_get_library_table.delay(
smiles,
searchtype,
tanimoto_threshold,
sqlite_path,
api_endpoint,
timeout
)
# Waiting
while(1):
if result.ready():
break
time.sleep(0.1)
df_library_structurematch = pd.read_json(result.get())
return df_library_structurematch
except:
# falling back on calling the actual function directly
result = _run_get_library_table(
smiles,
searchtype,
tanimoto_threshold,
sqlite_path,
api_endpoint,
timeout
)
df_library_structurematch = pd.read_json(result)
return df_library_structurematch
@celery_app.task()
def _run_get_library_table(smiles, searchtype, tanimoto_threshold, sqlite_path, api_endpoint, timeout):
df = get_library_table(
smiles=smiles,
searchtype=searchtype,
tanimoto_threshold=tanimoto_threshold,
sqlite_path=sqlite_path,
api_endpoint=api_endpoint,
timeout=timeout
)
return df.to_json(orient="records")
def run_get_masst_and_redu_tables(
df_for_name,
cosine_threshold,
matching_peaks,
sqlite_path,
api_endpoint,
timeout,
chunk_size=200,
):
try:
# Serialize the DataFrame to JSON
df_for_name_json = df_for_name.to_json(orient="records")
# Call the heavy function asynchronously
result = _run_get_masst_and_redu_tables.delay(
df_for_name_json,
cosine_threshold,
matching_peaks,
sqlite_path,
api_endpoint,
timeout,
chunk_size,
)
# Waiting
while(1):
if result.ready():
break
time.sleep(0.1)
result_dict = result.get()
masst_df = pd.read_json(result_dict["masst"])
redu_df = pd.read_json(result_dict["redu"])
return masst_df, redu_df
except:
# falling back on calling the actual function directly
result_dict = _run_get_masst_and_redu_tables(
df_for_name.to_json(orient="records"),
cosine_threshold,
matching_peaks,
sqlite_path,
api_endpoint,
timeout,
chunk_size,
)
masst_df = pd.read_json(result_dict["masst"])
redu_df = pd.read_json(result_dict["redu"])
return masst_df, redu_df
@celery_app.task()
def _run_get_masst_and_redu_tables(
df_for_name_json,
cosine_threshold,
matching_peaks,
sqlite_path,
api_endpoint,
timeout,
chunk_size=200,
):
# Rebuild the DataFrame from JSON
df_for_name = pd.read_json(df_for_name_json)
# Run the actual heavy function
masst_df, redu_df = get_masst_and_redu_tables(
df_for_name,
cosine_threshold=cosine_threshold,
matching_peaks=matching_peaks,
sqlite_path=sqlite_path,
api_endpoint=api_endpoint,
timeout=timeout,
chunk_size=chunk_size,
)
# Return both results as JSON strings
return {
"masst": masst_df.to_json(orient="records"),
"redu": redu_df.to_json(orient="records"),
}
def run_retrieve_raw_data_matches(
df_for_name,
database,
precursor_mz_tol,
fragment_mz_tol,
min_cos,
matching_peaks,
analog,
modimass,
elimination,
addition,
modification_condition,
sqlite_path,
api_endpoint,
timeout
):
try:
# Serialize the DataFrame to JSON
df_for_name_json = df_for_name.to_json(orient="records")
# Call the heavy function asynchronously
result = _run_retrieve_raw_data_matches.delay(
df_for_name_json,
database,
precursor_mz_tol,
fragment_mz_tol,
min_cos,
matching_peaks,
analog,
modimass,
elimination,
addition,
modification_condition,
sqlite_path,
api_endpoint,
timeout
)
# Waiting
while(1):
if result.ready():
break
time.sleep(0.1)
result_dict = result.get()
redu_df = pd.read_json(result_dict["redu"])
return redu_df
except:
# falling back on calling the actual function directly
result_dict = _run_retrieve_raw_data_matches(
df_for_name.to_json(orient="records"),
database,
precursor_mz_tol,
fragment_mz_tol,
min_cos,
matching_peaks,
analog,
modimass,
elimination,
addition,
modification_condition,
sqlite_path,
api_endpoint,
timeout
)
redu_df = pd.read_json(result_dict["redu"])
return redu_df
@celery_app.task()
def _run_retrieve_raw_data_matches(
df_for_name_json,
database,
precursor_mz_tol,
fragment_mz_tol,
min_cos,
matching_peaks,
analog,
modimass,
elimination,
addition,
modification_condition,
sqlite_path,
api_endpoint,
timeout
):
# Rebuild the input DataFrame
df_for_name = pd.read_json(df_for_name_json)
# Call the heavy function
masst_df, redu_df = retrieve_raw_data_matches(
df_for_name,
database=database,
precursor_mz_tol=precursor_mz_tol,
fragment_mz_tol=fragment_mz_tol,
min_cos=min_cos,
matching_peaks=matching_peaks,
analog=analog,
modimass=modimass,
elimination=elimination,
addition=addition,
modification_condition=modification_condition,
sqlite_path=sqlite_path,
api_endpoint=api_endpoint,
timeout=timeout,
)
# Return both DataFrames serialized as JSON
return {
"redu": redu_df.to_json(orient="records"),
}