-
Notifications
You must be signed in to change notification settings - Fork 170
Expand file tree
/
Copy pathbackend.py
More file actions
1289 lines (1127 loc) · 51.2 KB
/
backend.py
File metadata and controls
1289 lines (1127 loc) · 51.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
"""
Simple but Complete Youtu-GraphRAG Backend
Integrates real GraphRAG functionality with a simple interface
"""
import os
import re
import sys
import json
import asyncio
import glob
import shutil
from typing import List, Dict, Optional
from datetime import datetime
# Add project root to path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# FastAPI imports
from fastapi import FastAPI, UploadFile, File, HTTPException, WebSocket, WebSocketDisconnect
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
import uvicorn
from utils.logger import logger
import ast
# Import document parser
try:
from utils.document_parser import get_parser
DOCUMENT_PARSER_AVAILABLE = True
except ImportError as e:
DOCUMENT_PARSER_AVAILABLE = False
logger.warning(f"Document parser not available: {e}")
# Try to import GraphRAG components
try:
from models.constructor import kt_gen as constructor
from models.retriever import agentic_decomposer as decomposer, enhanced_kt_retriever as retriever
from config import get_config, ConfigManager
GRAPHRAG_AVAILABLE = True
logger.info("✅ GraphRAG components loaded successfully")
except ImportError as e:
GRAPHRAG_AVAILABLE = False
logger.error(f"⚠️ GraphRAG components not available: {e}")
app = FastAPI(title="Youtu-GraphRAG Unified Interface", version="1.0.0")
# Mount static files (assets directory)
app.mount("/assets", StaticFiles(directory="assets"), name="assets")
# Mount frontend directory for frontend assets
app.mount("/frontend", StaticFiles(directory="frontend"), name="frontend")
# CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Global variables
active_connections: Dict[str, WebSocket] = {}
config = None
class ConnectionManager:
def __init__(self):
self.active_connections: Dict[str, WebSocket] = {}
async def connect(self, websocket: WebSocket, client_id: str):
await websocket.accept()
self.active_connections[client_id] = websocket
def disconnect(self, client_id: str):
if client_id in self.active_connections:
del self.active_connections[client_id]
async def send_message(self, message: dict, client_id: str):
if client_id in self.active_connections:
try:
await self.active_connections[client_id].send_text(json.dumps(message))
except Exception as e:
logger.error(f"Error sending message to {client_id}: {e}")
self.disconnect(client_id)
manager = ConnectionManager()
# Request/Response models
class FileUploadResponse(BaseModel):
success: bool
message: str
dataset_name: Optional[str] = None
files_count: Optional[int] = None
class GraphConstructionRequest(BaseModel):
dataset_name: str
class GraphConstructionResponse(BaseModel):
success: bool
message: str
graph_data: Optional[Dict] = None
class QuestionRequest(BaseModel):
question: str
dataset_name: str
class QuestionResponse(BaseModel):
answer: str
sub_questions: List[Dict]
retrieved_triples: List[str]
retrieved_chunks: List[str]
reasoning_steps: List[Dict]
visualization_data: Dict
def ensure_demo_schema_exists() -> str:
"""Ensure default demo schema exists and return its path."""
os.makedirs("schemas", exist_ok=True)
schema_path = "schemas/demo.json"
if not os.path.exists(schema_path):
demo_schema = {
"Nodes": [
"person", "location", "organization", "event", "object",
"concept", "time_period", "creative_work", "biological_entity", "natural_phenomenon"
],
"Relations": [
"is_a", "part_of", "located_in", "created_by", "used_by", "participates_in",
"related_to", "belongs_to", "influences", "precedes", "arrives_in", "comparable_to"
],
"Attributes": [
"name", "date", "size", "type", "description", "status",
"quantity", "value", "position", "duration", "time"
]
}
with open(schema_path, 'w') as f:
json.dump(demo_schema, f, indent=2)
return schema_path
def get_schema_path_for_dataset(dataset_name: str) -> str:
"""Return dataset-specific schema if present; otherwise fallback to demo schema."""
if dataset_name and dataset_name != "demo":
ds_schema = f"schemas/{dataset_name}.json"
if os.path.exists(ds_schema):
return ds_schema
return ensure_demo_schema_exists()
async def send_progress_update(client_id: str, stage: str, progress: int, message: str):
"""Send progress update via WebSocket"""
await manager.send_message({
"type": "progress",
"stage": stage,
"progress": progress,
"message": message,
"timestamp": datetime.now().isoformat()
}, client_id)
# -------- Encoding detection helpers --------
def _detect_encoding_from_bytes(data: bytes) -> Optional[str]:
"""Detect encoding using chardet if available; return lower-cased encoding name or None."""
try:
import chardet # type: ignore
result = chardet.detect(data) or {}
enc = result.get("encoding")
if enc:
return enc.lower()
except Exception:
pass
return None
def decode_bytes_with_detection(data: bytes) -> str:
"""Decode bytes to string with encoding detection and robust fallbacks.
Order: detected -> utf-8/utf-8-sig -> common Chinese encodings -> utf-16 variants -> latin-1 -> replace.
"""
candidates = []
detected = _detect_encoding_from_bytes(data)
if detected:
candidates.append(detected)
candidates.extend([
"utf-8", "utf-8-sig", "gb18030", "gbk", "big5",
"utf-16", "utf-16le", "utf-16be", "latin-1"
])
# De-duplicate while preserving order
tried = set()
for enc in candidates:
if enc in tried or not enc:
continue
tried.add(enc)
try:
return data.decode(enc)
except Exception:
continue
# Last resort
return data.decode("utf-8", errors="replace")
async def clear_cache_files(dataset_name: str):
"""Clear all cache files for a dataset before graph construction"""
try:
# Clear FAISS cache files
faiss_cache_dir = f"retriever/faiss_cache_new/{dataset_name}"
if os.path.exists(faiss_cache_dir):
shutil.rmtree(faiss_cache_dir)
logger.info(f"Cleared FAISS cache directory: {faiss_cache_dir}")
# Clear output chunks
chunk_file = f"output/chunks/{dataset_name}.txt"
if os.path.exists(chunk_file):
os.remove(chunk_file)
logger.info(f"Cleared chunk file: {chunk_file}")
# Clear output graphs
graph_file = f"output/graphs/{dataset_name}_new.json"
if os.path.exists(graph_file):
os.remove(graph_file)
logger.info(f"Cleared graph file: {graph_file}")
# Clear any other cache files with dataset name pattern
cache_patterns = [
f"output/logs/{dataset_name}_*.log",
f"output/chunks/{dataset_name}_*",
f"output/graphs/{dataset_name}_*"
]
for pattern in cache_patterns:
for file_path in glob.glob(pattern):
try:
if os.path.isfile(file_path):
os.remove(file_path)
logger.info(f"Cleared cache file: {file_path}")
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
logger.info(f"Cleared cache directory: {file_path}")
except Exception as e:
logger.warning(f"Failed to clear {file_path}: {e}")
logger.info(f"Cache cleanup completed for dataset: {dataset_name}")
except Exception as e:
logger.error(f"Error clearing cache files for {dataset_name}: {e}")
# Don't raise exception, just log the error
# Serve frontend HTML
@app.get("/")
async def read_root():
frontend_path = "frontend/index.html"
if os.path.exists(frontend_path):
return FileResponse(frontend_path)
return {"message": "Youtu-GraphRAG Unified Interface is running!", "status": "ok"}
@app.get("/api/status")
async def get_status():
return {
"message": "Youtu-GraphRAG Unified Interface is running!",
"status": "ok",
"graphrag_available": GRAPHRAG_AVAILABLE
}
@app.websocket("/ws/{client_id}")
async def websocket_endpoint(websocket: WebSocket, client_id: str):
await manager.connect(websocket, client_id)
try:
while True:
data = await websocket.receive_text()
except WebSocketDisconnect:
manager.disconnect(client_id)
@app.post("/api/upload", response_model=FileUploadResponse)
async def upload_files(files: List[UploadFile] = File(...), client_id: str = "default"):
"""Upload files and prepare for graph construction"""
try:
# Generate dataset name based on file count
if len(files) == 1:
# Single file: use its name
main_file = files[0]
original_name = os.path.splitext(main_file.filename)[0]
# Clean filename to be filesystem-safe
dataset_name = "".join(c for c in original_name if c.isalnum() or c in (' ', '-', '_')).rstrip()
dataset_name = dataset_name.replace(' ', '_')
else:
# Multiple files: create a descriptive name with date
from datetime import datetime
date_str = datetime.now().strftime("%Y%m%d")
dataset_name = f"{len(files)}files_{date_str}"
# Add counter if dataset already exists
base_name = dataset_name
counter = 1
while os.path.exists(f"data/uploaded/{dataset_name}"):
dataset_name = f"{base_name}_{counter}"
counter += 1
upload_dir = f"data/uploaded/{dataset_name}"
os.makedirs(upload_dir, exist_ok=True)
await send_progress_update(client_id, "upload", 10, "Starting file upload...")
# Process uploaded files
corpus_data = []
skipped_files: List[str] = []
processed_count = 0
allowed_extensions = {".txt", ".md", ".json", ".pdf", ".docx", ".doc"}
# Initialize document parser if needed
doc_parser = None
if DOCUMENT_PARSER_AVAILABLE:
doc_parser = get_parser()
for i, file in enumerate(files):
file_path = os.path.join(upload_dir, file.filename)
with open(file_path, "wb") as buffer:
content_bytes = await file.read()
buffer.write(content_bytes)
# Process file content using encoding detection
filename_lower = (file.filename or "").lower()
ext = os.path.splitext(filename_lower)[1]
if ext not in allowed_extensions:
# Skip unsupported file types to avoid processing binary files as text
logger.warning(f"Skipping unsupported file type: {file.filename}")
skipped_files.append(file.filename)
progress = 10 + (i + 1) * 80 // len(files)
await send_progress_update(client_id, "upload", progress, f"Skipped unsupported file: {file.filename}")
continue
# Handle PDF and DOCX/DOC files with document parser
if ext in ['.pdf', '.docx', '.doc']:
if not doc_parser:
logger.warning(f"Document parser not available, skipping {file.filename}")
skipped_files.append(file.filename)
progress = 10 + (i + 1) * 80 // len(files)
await send_progress_update(client_id, "upload", progress, f"Skipped {file.filename} (parser unavailable)")
continue
try:
text = doc_parser.parse_file(file_path, ext)
if text and text.strip():
corpus_data.append({
"title": file.filename,
"text": text
})
processed_count += 1
await send_progress_update(client_id, "upload", 10 + (i + 1) * 80 // len(files), f"Parsed {file.filename}")
else:
logger.warning(f"No text extracted from {file.filename}")
skipped_files.append(file.filename)
await send_progress_update(client_id, "upload", 10 + (i + 1) * 80 // len(files), f"No text in {file.filename}")
except Exception as e:
logger.error(f"Error parsing {file.filename}: {e}")
skipped_files.append(file.filename)
await send_progress_update(client_id, "upload", 10 + (i + 1) * 80 // len(files), f"Failed to parse {file.filename}")
continue
# Treat plain text formats explicitly (.txt and .md)
if filename_lower.endswith(('.txt', '.md')):
text = decode_bytes_with_detection(content_bytes)
corpus_data.append({
"title": file.filename,
"text": text
})
processed_count += 1
elif filename_lower.endswith('.json'):
try:
json_text = decode_bytes_with_detection(content_bytes)
data_obj = json.loads(json_text)
if isinstance(data_obj, list):
corpus_data.extend(data_obj)
else:
corpus_data.append(data_obj)
processed_count += 1
except Exception:
# If JSON parsing fails, treat as text
text = decode_bytes_with_detection(content_bytes)
corpus_data.append({
"title": file.filename,
"text": text
})
progress = 10 + (i + 1) * 80 // len(files)
await send_progress_update(client_id, "upload", progress, f"Processed {file.filename}")
# Ensure at least one valid file processed
if processed_count == 0:
msg = "No supported files were uploaded. Allowed: .txt, .md, .json, .pdf, .docx, .doc"
if skipped_files:
msg += f"; skipped: {', '.join(skipped_files)}"
await send_progress_update(client_id, "upload", 0, msg)
raise HTTPException(status_code=400, detail=msg)
# Save corpus data
corpus_path = f"{upload_dir}/corpus.json"
with open(corpus_path, 'w', encoding='utf-8') as f:
json.dump(corpus_data, f, ensure_ascii=False, indent=2)
# Create dataset configuration
await create_dataset_config()
await send_progress_update(client_id, "upload", 100, "Upload completed successfully!")
msg_ok = "Files uploaded successfully"
if skipped_files:
msg_ok += f"; skipped unsupported: {', '.join(skipped_files)}"
return FileUploadResponse(
success=True,
message=msg_ok,
dataset_name=dataset_name,
files_count=processed_count
)
except Exception as e:
await send_progress_update(client_id, "upload", 0, f"Upload failed: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
async def create_dataset_config():
"""Create dataset configuration"""
# Ensure default demo schema exists
ensure_demo_schema_exists()
@app.post("/api/construct-graph", response_model=GraphConstructionResponse)
async def construct_graph(request: GraphConstructionRequest, client_id: str = "default"):
"""Construct knowledge graph from uploaded data"""
try:
if not GRAPHRAG_AVAILABLE:
raise HTTPException(status_code=503, detail="GraphRAG components not available. Please install or configure them.")
dataset_name = request.dataset_name
await send_progress_update(client_id, "construction", 2, "Cleaning old cache files...")
# Clear all cache files before construction
await clear_cache_files(dataset_name)
await send_progress_update(client_id, "construction", 5, "Initializing graph builder...")
# Get dataset paths
corpus_path = f"data/uploaded/{dataset_name}/corpus.json"
# Choose schema: dataset-specific or default demo
schema_path = get_schema_path_for_dataset(dataset_name)
if not os.path.exists(corpus_path):
# Try demo dataset
corpus_path = "data/demo/demo_corpus.json"
if not os.path.exists(corpus_path):
raise HTTPException(status_code=404, detail="Dataset not found")
await send_progress_update(client_id, "construction", 10, "Loading configuration and corpus...")
# Initialize config
global config
if config is None:
config = get_config("config/base_config.yaml")
# Initialize KTBuilder
builder = constructor.KTBuilder(
dataset_name,
schema_path,
mode=config.construction.mode,
config=config
)
await send_progress_update(client_id, "construction", 20, "Starting entity-relation extraction...")
# Build knowledge graph
def build_graph_sync():
return builder.build_knowledge_graph(corpus_path)
# Run in executor to avoid blocking
loop = asyncio.get_event_loop()
# Run graph construction without simulated progress updates
knowledge_graph = await loop.run_in_executor(None, build_graph_sync)
await send_progress_update(client_id, "construction", 95, "Preparing visualization data...")
# Load constructed graph for visualization
graph_path = f"output/graphs/{dataset_name}_new.json"
graph_vis_data = await prepare_graph_visualization(graph_path)
await send_progress_update(client_id, "construction", 100, "Graph construction completed!")
# Notify completion via WebSocket
try:
await manager.send_message({
"type": "complete",
"stage": "construction",
"message": "Graph construction completed!",
"timestamp": datetime.now().isoformat()
}, client_id)
except Exception as _e:
logger.warning(f"Failed to send completion message: {_e}")
return GraphConstructionResponse(
success=True,
message="Knowledge graph constructed successfully",
graph_data=graph_vis_data
)
except Exception as e:
await send_progress_update(client_id, "construction", 0, f"Construction failed: {str(e)}")
try:
await manager.send_message({
"type": "error",
"stage": "construction",
"message": f"Construction failed: {str(e)}",
"timestamp": datetime.now().isoformat()
}, client_id)
except Exception as _e:
logger.warning(f"Failed to send error message: {_e}")
raise HTTPException(status_code=500, detail=str(e))
async def prepare_graph_visualization(graph_path: str) -> Dict:
"""Prepare graph data for visualization"""
try:
if os.path.exists(graph_path):
with open(graph_path, 'r', encoding='utf-8') as f:
graph_data = json.load(f)
else:
return {"nodes": [], "links": [], "categories": [], "stats": {}}
# Handle different graph data formats
if isinstance(graph_data, list):
# GraphRAG format: list of relationships
return convert_graphrag_format(graph_data)
elif isinstance(graph_data, dict) and "nodes" in graph_data:
# Standard format: {nodes: [], edges: []}
return convert_standard_format(graph_data)
else:
return {"nodes": [], "links": [], "categories": [], "stats": {}}
except Exception as e:
logger.error(f"Error preparing visualization: {e}")
return {"nodes": [], "links": [], "categories": [], "stats": {}}
def convert_graphrag_format(graph_data: List) -> Dict:
"""Convert GraphRAG relationship list to ECharts format"""
nodes_dict = {}
links = []
# Extract nodes and relationships from the list
for item in graph_data:
if not isinstance(item, dict):
continue
start_node = item.get("start_node", {})
end_node = item.get("end_node", {})
relation = item.get("relation", "related_to")
# Process start node
start_id = ""
end_id = ""
if start_node:
start_id = start_node.get("properties", {}).get("name", "")
if start_id and start_id not in nodes_dict:
nodes_dict[start_id] = {
"id": start_id,
"name": start_id[:30],
"category": start_node.get("properties", {}).get("schema_type", start_node.get("label", "entity")),
"symbolSize": 25,
"properties": start_node.get("properties", {})
}
# Process end node
if end_node:
end_id = end_node.get("properties", {}).get("name", "")
if end_id and end_id not in nodes_dict:
nodes_dict[end_id] = {
"id": end_id,
"name": end_id[:30],
"category": end_node.get("properties", {}).get("schema_type", end_node.get("label", "entity")),
"symbolSize": 25,
"properties": end_node.get("properties", {})
}
# Add relationship
if start_id and end_id:
links.append({
"source": start_id,
"target": end_id,
"name": relation,
"value": 1
})
# Create categories
categories_set = set()
for node in nodes_dict.values():
categories_set.add(node["category"])
categories = []
for i, cat_name in enumerate(categories_set):
categories.append({
"name": cat_name,
"itemStyle": {
"color": f"hsl({i * 360 / len(categories_set)}, 70%, 60%)"
}
})
nodes = list(nodes_dict.values())
return {
"nodes": nodes[:500], # Limit for better visual effects
"links": links[:1000],
"categories": categories,
"stats": {
"total_nodes": len(nodes),
"total_edges": len(links),
"displayed_nodes": len(nodes[:500]),
"displayed_edges": len(links[:1000])
}
}
def convert_standard_format(graph_data: Dict) -> Dict:
"""Convert standard {nodes: [], edges: []} format to ECharts format"""
nodes = []
links = []
categories = []
# Extract unique categories
node_types = set()
for node in graph_data.get("nodes", []):
node_type = node.get("type", "entity")
node_types.add(node_type)
for i, node_type in enumerate(node_types):
categories.append({
"name": node_type,
"itemStyle": {
"color": f"hsl({i * 360 / len(node_types)}, 70%, 60%)"
}
})
# Process nodes
for node in graph_data.get("nodes", []):
nodes.append({
"id": node.get("id", ""),
"name": node.get("name", node.get("id", ""))[:30],
"category": node.get("type", "entity"),
"value": len(node.get("attributes", [])),
"symbolSize": min(max(len(node.get("attributes", [])) * 3 + 15, 15), 40),
"attributes": node.get("attributes", [])
})
# Process edges
for edge in graph_data.get("edges", []):
links.append({
"source": edge.get("source", ""),
"target": edge.get("target", ""),
"name": edge.get("relation", "related_to"),
"value": edge.get("weight", 1)
})
return {
"nodes": nodes[:500], # Limit for performance
"links": links[:1000],
"categories": categories,
"stats": {
"total_nodes": len(graph_data.get("nodes", [])),
"total_edges": len(graph_data.get("edges", [])),
"displayed_nodes": len(nodes[:500]),
"displayed_edges": len(links[:1000])
}
}
@app.post("/api/ask-question", response_model=QuestionResponse)
async def ask_question(request: QuestionRequest, client_id: str = "default"):
"""Process question using agent mode (iterative retrieval + reasoning) and return answer."""
try:
if not GRAPHRAG_AVAILABLE:
raise HTTPException(status_code=503, detail="GraphRAG components not available. Please install or configure them.")
dataset_name = request.dataset_name
question = request.question
await send_progress_update(client_id, "retrieval", 10, "Initializing retrieval system (agent mode)...")
graph_path = f"output/graphs/{dataset_name}_new.json"
schema_path = get_schema_path_for_dataset(dataset_name)
if not os.path.exists(graph_path):
graph_path = "output/graphs/demo_new.json"
if not os.path.exists(graph_path):
raise HTTPException(status_code=404, detail="Graph not found. Please construct graph first.")
# Config & components
global config
if config is None:
config = get_config("config/base_config.yaml")
graphq = decomposer.GraphQ(dataset_name, config=config)
kt_retriever = retriever.KTRetriever(
dataset_name,
graph_path,
recall_paths=config.retrieval.recall_paths,
schema_path=schema_path,
top_k=config.retrieval.top_k_filter,
mode="agent", # force agent mode
config=config
)
await send_progress_update(client_id, "retrieval", 40, "Building indices...")
loop = asyncio.get_running_loop()
# Offload index building to thread executor to avoid blocking event loop
await loop.run_in_executor(None, kt_retriever.build_indices)
# Notify QA start via WS so frontend can show immediate progress
try:
await manager.send_message({
"type": "qa_update",
"stage": "start",
"message": "Question processing started",
"dataset": dataset_name,
"question": question,
"timestamp": datetime.now().isoformat()
}, client_id)
await asyncio.sleep(0)
except Exception as _e:
logger.debug(f"QA start ws send failed: {_e}")
# Helper functions (reuse a simplified version of main.py logic)
def _dedup(items):
return list({x: None for x in items}.keys())
def _merge_chunk_contents(ids, mapping):
chunks = []
for idx, i in enumerate(ids, 1):
content = mapping.get(i, f"[Missing content for chunk {i}]")
chunks.append(f"[Chunk {idx}] {content}")
return chunks
# Step 1: decomposition
await send_progress_update(client_id, "retrieval", 50, "Decomposing question...")
try:
# Offload decomposition to executor
loop = asyncio.get_running_loop()
decomposition = await loop.run_in_executor(None, lambda: graphq.decompose(question, schema_path))
sub_questions = decomposition.get("sub_questions", [])
involved_types = decomposition.get("involved_types", {})
try:
await manager.send_message({
"type": "qa_update",
"stage": "decompose",
"sub_questions_count": len(sub_questions),
"sub_questions": [sq.get("sub-question", "") for sq in sub_questions][:5],
"timestamp": datetime.now().isoformat()
}, client_id)
await asyncio.sleep(0.05)
except Exception:
pass
except Exception as e:
logger.error(f"Decompose failed: {e}")
sub_questions = [{"sub-question": question}]
involved_types = {"nodes": [], "relations": [], "attributes": []}
decomposition = {"sub_questions": sub_questions, "involved_types": involved_types}
reasoning_steps = []
all_triples = set()
all_chunk_ids = set()
all_chunk_contents: Dict[str, str] = {}
# Step 2: initial retrieval for each sub-question
await send_progress_update(client_id, "retrieval", 65, "Initial retrieval...")
import time as _time
for idx, sq in enumerate(sub_questions):
sq_text = sq.get("sub-question", question)
start_t = _time.time()
# Offload retrieval to thread executor to avoid blocking event loop
def _run_retrieval():
return kt_retriever.process_retrieval_results(
sq_text,
top_k=config.retrieval.top_k_filter,
involved_types=involved_types
)
retrieval_results, elapsed = await loop.run_in_executor(None, _run_retrieval)
triples = retrieval_results.get('triples', []) or []
chunk_ids = retrieval_results.get('chunk_ids', []) or []
chunk_contents = retrieval_results.get('chunk_contents', []) or []
if isinstance(chunk_contents, dict):
for cid, ctext in chunk_contents.items():
all_chunk_contents[cid] = ctext
else:
for i_c, cid in enumerate(chunk_ids):
if i_c < len(chunk_contents):
all_chunk_contents[cid] = chunk_contents[i_c]
all_triples.update(triples)
all_chunk_ids.update(chunk_ids)
reasoning_steps.append({
"type": "sub_question",
"question": sq_text,
"triples": triples[:10],
"triples_count": len(triples),
"chunks_count": len(chunk_ids),
"processing_time": elapsed,
"chunk_contents": list(all_chunk_contents.values())[:3]
})
# Stream this sub-question's partial result to frontend via WebSocket
try:
await manager.send_message({
"type": "qa_update",
"stage": "sub_question",
"index": idx + 1,
"total": len(sub_questions),
"question": sq_text,
"triples_preview": list(dict.fromkeys(triples))[:5],
"triples_count": len(triples),
"chunks_count": len(chunk_ids),
"processing_time": elapsed,
"timestamp": datetime.now().isoformat()
}, client_id)
# yield to event loop to flush WS frames
await asyncio.sleep(0)
except Exception as _e:
logger.debug(f"QA update ws send failed for sub_question {idx+1}: {_e}")
# Step 3: IRCoT iterative refinement
await send_progress_update(client_id, "retrieval", 75, "Iterative reasoning...")
try:
await manager.send_message({
"type": "qa_update",
"stage": "ircot_start",
"message": "Starting iterative reasoning",
"timestamp": datetime.now().isoformat()
}, client_id)
await asyncio.sleep(0.05)
except Exception:
pass
max_steps = getattr(getattr(config.retrieval, 'agent', object()), 'max_steps', 3)
current_query = question
thoughts = []
# Initial answer attempt
initial_triples = _dedup(list(all_triples))
initial_chunk_ids = list(set(all_chunk_ids))
initial_chunk_contents = _merge_chunk_contents(initial_chunk_ids, all_chunk_contents)
context_initial = "=== Triples ===\n" + "\n".join(initial_triples[:20]) + "\n=== Chunks ===\n" + "\n---\n".join(initial_chunk_contents[:10])
init_prompt = kt_retriever.generate_prompt(question, context_initial)
try:
# Offload LLM call to thread executor
initial_answer = await loop.run_in_executor(None, lambda: kt_retriever.generate_answer(init_prompt))
except Exception as e:
initial_answer = f"Initial answer failed: {e}"
thoughts.append(f"Initial: {initial_answer[:200]}")
final_answer = initial_answer
for step in range(1, max_steps + 1):
loop_triples = _dedup(list(all_triples))
loop_chunk_ids = list(set(all_chunk_ids))
loop_chunk_contents = _merge_chunk_contents(loop_chunk_ids, all_chunk_contents)
loop_ctx = "=== Triples ===\n" + "\n".join(loop_triples[:20]) + "\n=== Chunks ===\n" + "\n---\n".join(loop_chunk_contents[:10])
loop_prompt = f"""
You are an expert knowledge assistant using iterative retrieval with chain-of-thought reasoning.
Current Question: {question}
Current Iteration Query: {current_query}
Knowledge Context:\n{loop_ctx}
Previous Thoughts: {' | '.join(thoughts) if thoughts else 'None'}
Instructions:
1. If enough info answer with: So the answer is: <answer>
2. Else propose new query with: The new query is: <query>
Your reasoning:
"""
try:
reasoning = await loop.run_in_executor(None, lambda: kt_retriever.generate_answer(loop_prompt))
except Exception as e:
reasoning = f"Reasoning error: {e}"
thoughts.append(reasoning[:400])
reasoning_steps.append({
"type": "ircot_step",
"question": current_query,
"triples": loop_triples[:10],
"triples_count": len(loop_triples),
"chunks_count": len(loop_chunk_ids),
"processing_time": 0,
"chunk_contents": loop_chunk_contents[:3],
"thought": reasoning[:300]
})
# Stream iterative reasoning step updates (optional but helpful)
try:
await manager.send_message({
"type": "qa_update",
"stage": "ircot",
"step": step,
"max_steps": max_steps,
"current_query": current_query,
"thought_preview": (reasoning or "")[:200],
"timestamp": datetime.now().isoformat()
}, client_id)
# yield to event loop to flush WS frames
await asyncio.sleep(0)
except Exception as _e:
logger.debug(f"QA update ws send failed for ircot step {step}: {_e}")
if "So the answer is:" in reasoning:
m = re.search(r"So the answer is:\s*(.*)", reasoning, flags=re.IGNORECASE | re.DOTALL)
final_answer = m.group(1).strip() if m else reasoning
break
if "The new query is:" not in reasoning:
final_answer = initial_answer or reasoning
break
new_query = reasoning.split("The new query is:", 1)[1].strip().splitlines()[0]
if not new_query or new_query == current_query:
final_answer = initial_answer or reasoning
break
current_query = new_query
await send_progress_update(client_id, "retrieval", min(90, 75 + step * 5), f"Iterative retrieval step {step}...")
try:
def _run_more_retrieval():
return kt_retriever.process_retrieval_results(current_query, top_k=config.retrieval.top_k_filter)
new_ret, _ = await loop.run_in_executor(None, _run_more_retrieval)
new_triples = new_ret.get('triples', []) or []
new_chunk_ids = new_ret.get('chunk_ids', []) or []
new_chunk_contents = new_ret.get('chunk_contents', []) or []
if isinstance(new_chunk_contents, dict):
for cid, ctext in new_chunk_contents.items():
all_chunk_contents[cid] = ctext
else:
for i_c, cid in enumerate(new_chunk_ids):
if i_c < len(new_chunk_contents):
all_chunk_contents[cid] = new_chunk_contents[i_c]
all_triples.update(new_triples)
all_chunk_ids.update(new_chunk_ids)
except Exception as e:
logger.error(f"Iterative retrieval failed: {e}")
break
# Final aggregation
final_triples = _dedup(list(all_triples))[:20]
final_chunk_ids = list(set(all_chunk_ids))
final_chunk_contents = _merge_chunk_contents(final_chunk_ids, all_chunk_contents)[:10]
await send_progress_update(client_id, "retrieval", 100, "Answer generation completed!")
# Notify frontend that QA process is complete with a compact summary
try:
await manager.send_message({
"type": "qa_complete",
"answer_preview": (final_answer or "")[:300],
"sub_questions_count": len(sub_questions),
"triples_final_count": len(final_triples),
"chunks_final_count": len(final_chunk_contents),
"timestamp": datetime.now().isoformat()
}, client_id)
except Exception as _e:
logger.debug(f"QA complete ws send failed: {_e}")
visualization_data = {
"subqueries": prepare_subquery_visualization(sub_questions, reasoning_steps),
"knowledge_graph": prepare_retrieved_graph_visualization(final_triples),
"reasoning_flow": prepare_reasoning_flow_visualization(reasoning_steps),
"retrieval_details": {
"total_triples": len(final_triples),
"total_chunks": len(final_chunk_contents),
"sub_questions_count": len(sub_questions),
"triples_by_subquery": [s.get("triples_count", 0) for s in reasoning_steps if s.get("type") == "sub_question"]
}
}
return QuestionResponse(
answer=final_answer,
sub_questions=sub_questions,
retrieved_triples=final_triples,
retrieved_chunks=final_chunk_contents,
reasoning_steps=reasoning_steps,
visualization_data=visualization_data
)
except Exception as e:
await send_progress_update(client_id, "retrieval", 0, f"Question answering failed: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
def prepare_subquery_visualization(sub_questions: List[Dict], reasoning_steps: List[Dict]) -> Dict:
"""Prepare subquery visualization"""
nodes = [{"id": "original", "name": "Original Question", "category": "question", "symbolSize": 40}]
links = []
for i, sub_q in enumerate(sub_questions):
sub_id = f"sub_{i}"
nodes.append({
"id": sub_id,
"name": sub_q.get("sub-question", "")[:20] + "...",
"category": "sub_question",
"symbolSize": 30
})
links.append({"source": "original", "target": sub_id, "name": "decomposed to"})
return {
"nodes": nodes,
"links": links,
"categories": [
{"name": "question", "itemStyle": {"color": "#ff6b6b"}},
{"name": "sub_question", "itemStyle": {"color": "#4ecdc4"}}
]
}
def prepare_retrieved_graph_visualization(triples: List[str]) -> Dict:
"""Prepare retrieved knowledge visualization"""
nodes = []
links = []
node_set = set()
for triple in triples[:10]:
try:
if triple.startswith('[') and triple.endswith(']'):
try:
parts = ast.literal_eval(triple)