-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
124 lines (107 loc) · 4.43 KB
/
main.py
File metadata and controls
124 lines (107 loc) · 4.43 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
"""
TSqualityAgent – entry point
"""
import argparse
from dataclasses import asdict
from config import Config, build_llm, build_perceiver_llm
from models.llm import CHATANYWHERE_BASE_URL
from workflow import run_pipeline
from synthetic_cases import get_cases, CASE_NAMES
from run_logger import save_run
def hello_world():
print('Hello, TSQualityAgent!')
if __name__ == "__main__":
hello_world()
parser = argparse.ArgumentParser(description="TSqualityAgent – pairwise time series quality assessment")
# ── LLM ───────────────────────────────────────────────────────────────────
parser.add_argument(
"--model",
type=str,
default="gpt-5.4-mini",
help="Model name, e.g. gpt-5.4-mini (cloud) or Qwen/Qwen3-4B (local vLLM)",
)
parser.add_argument(
"--base_url",
type=str,
default=CHATANYWHERE_BASE_URL,
help="OpenAI-compatible API base URL. Use http://localhost:8000/v1 for local vLLM.",
)
parser.add_argument(
"--api_key",
type=str,
default="",
help="API key. Leave empty to use OPENAI_API_KEY env var. Use 'EMPTY' for local vLLM.",
)
parser.add_argument(
"--perceiver_model",
type=str,
default="",
help="Model name for the Perceiver agent only (e.g. a LoRA alias). "
"Defaults to --model when not set.",
)
parser.add_argument(
"--perceiver_base_url",
type=str,
default="",
help="Base URL for the Perceiver agent only (e.g. a separate vLLM port). "
"Defaults to --base_url when not set.",
)
parser.add_argument(
"--enable_thinking",
action="store_true",
default=False,
help="Enable Qwen3 thinking mode (disabled by default for speed).",
)
# ── Test case selection ────────────────────────────────────────────────────
parser.add_argument(
"--case",
type=str,
nargs="+",
default=None,
help=f"Test case(s) to run: all | one or more of: {' '.join(CASE_NAMES)} (default: all)",
)
# ── Inspector ─────────────────────────────────────────────────────────────
parser.add_argument(
"--max_steps",
type=int,
default=6,
help="Max ReAct steps per quality dimension in Inspector",
)
# ── Adjudicator reflection limits ─────────────────────────────────────────
parser.add_argument(
"--max_recheck",
type=int,
default=2,
help="Max times Adjudicator sends Inspector back for recheck",
)
parser.add_argument(
"--max_replan",
type=int,
default=1,
help="Max times Adjudicator sends Perceiver back for replanning",
)
args = parser.parse_args()
cfg = Config.from_args(args)
llm = build_llm(cfg)
perceiver_llm = build_perceiver_llm(cfg)
cases = None if (args.case is None or "all" in args.case) else args.case
test_cases = get_cases(cases)
case_label = " ".join(args.case) if args.case else "all"
perceiver_model_label = cfg.perceiver_model or cfg.model
perceiver_url_label = cfg.perceiver_base_url or cfg.base_url
print(f"\nModel: {args.model} ({args.base_url}) | Perceiver: {perceiver_model_label} ({perceiver_url_label}) | Case: {case_label} | Cases: {len(test_cases)}")
for name, input_data in test_cases:
print(f"\n{'=' * 60}")
print(f" {name}")
print("=" * 60)
state = run_pipeline(input_data, llm, cfg, perceiver_llm=perceiver_llm)
result = state.get("final_result", {})
print(f" Winner : {result.get('winner', 'N/A').upper()}")
print(f" Confidence : {result.get('confidence', 0):.0%}")
print(f" Explanation:")
explanation = result.get("explanation", "")
for line in explanation.split(". "):
if line.strip():
print(f" • {line.strip().rstrip('.')}.")
log_path = save_run(state, name, asdict(cfg))
print(f" Log saved : {log_path}")