@@ -99,7 +99,7 @@ def outputIndicator(doc, indicatorName, itype, stype=None, text=None, added_filt
9999 for span in indicator ['offsets' ]:
100100 indicator ['text' ].append (text [int (span [0 ]):int (span [0 ]) + int (span [1 ])])
101101
102- return indicator
102+ return indicator , doc
103103
104104
105105def process_text (text , doc = None , options = None ):
@@ -121,14 +121,14 @@ def process_text(text, doc=None, options=None):
121121 continue
122122 indicator = writing_observer .nlp_indicators .INDICATORS [item ]
123123 (id , label , infoType , select , filterInfo , summaryType ) = indicator
124- results [id ] = outputIndicator (doc , select , infoType , stype = summaryType , text = text , added_filter = filterInfo )
124+ results [id ], doc = outputIndicator (doc , select , infoType , stype = summaryType , text = text , added_filter = filterInfo )
125125 results [id ].update ({
126126 "label" : label ,
127127 "type" : infoType ,
128128 "name" : id ,
129129 "summary_type" : summaryType
130130 })
131- return results
131+ return results , doc
132132
133133
134134async def process_texts_serial (texts , doc = None , options = None ):
@@ -307,7 +307,8 @@ async def process_and_cache_missing_features(unfound_features, found_features, r
307307 doc = spacy .tokens .Doc (nlp .vocab ).from_json (text_cache_data ['spacy_doc' ])
308308
309309 # Get the nlp features for the text and update the cache
310- annotated_text = process_text (writing .get ("text" , "" ), doc , list (unfound_features ))
310+ annotated_text , doc = process_text (writing .get ("text" , "" ), doc , list (unfound_features ))
311+ text_cache_data ['spacy_doc' ] = doc .to_json ()
311312 text_cache_data ['running_features' ] = json .dumps ([])
312313 text_cache_data ['stop_time' ] = timestamp ()
313314 text_cache_data ['features_available' ].update (annotated_text )
0 commit comments