Skip to content

Commit 480fe2a

Browse files
committed
remove more code
1 parent a533dcb commit 480fe2a

File tree

2 files changed

+0
-39
lines changed

2 files changed

+0
-39
lines changed

eval_protocol/cli_commands/upload.py

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
from eval_protocol.platform_api import create_or_update_fireworks_secret
2222

2323
from eval_protocol.evaluation import create_evaluation
24-
from eval_protocol.fireworks_rft import save_evaluator_trace, detect_dataset_builder
2524

2625

2726
@dataclass
@@ -718,23 +717,6 @@ def upload_command(args: argparse.Namespace) -> int:
718717
)
719718
name = result.get("name", evaluator_id) if isinstance(result, dict) else evaluator_id
720719

721-
# Persist local evaluator trace for later `create rft`
722-
try:
723-
metric_dir = os.path.dirname(source_file_path) if source_file_path else root
724-
builder_spec = detect_dataset_builder(metric_dir) or None
725-
trace_payload = {
726-
"evaluator_id": evaluator_id,
727-
"evaluator_resource_name": name,
728-
"entry_point": entry_point,
729-
"metric_dir": metric_dir,
730-
"project_root": root,
731-
"dataset_builder": builder_spec,
732-
}
733-
save_evaluator_trace(project_root=root, evaluator_id=evaluator_id, trace=trace_payload)
734-
except Exception:
735-
# Non-fatal; continue
736-
pass
737-
738720
# Print success message with Fireworks dashboard link
739721
print(f"\n✅ Successfully uploaded evaluator: {evaluator_id}")
740722
print("📊 View in Fireworks Dashboard:")

eval_protocol/fireworks_rft.py

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -37,25 +37,6 @@ def _map_api_host_to_app_host(api_base: str) -> str:
3737
return "https://app.fireworks.ai"
3838

3939

40-
def load_evaluator_trace(project_root: str, evaluator_id: str) -> Optional[Dict[str, Any]]:
41-
trace_path = Path(project_root) / ".eval_protocol" / "evaluators" / f"{evaluator_id}.json"
42-
if not trace_path.exists():
43-
return None
44-
try:
45-
with open(trace_path, "r", encoding="utf-8") as f:
46-
return json.load(f)
47-
except Exception:
48-
return None
49-
50-
51-
def save_evaluator_trace(project_root: str, evaluator_id: str, trace: Dict[str, Any]) -> None:
52-
base_dir = Path(project_root) / ".eval_protocol" / "evaluators"
53-
base_dir.mkdir(parents=True, exist_ok=True)
54-
trace_path = base_dir / f"{evaluator_id}.json"
55-
with open(trace_path, "w", encoding="utf-8") as f:
56-
json.dump(trace, f, indent=2, ensure_ascii=False)
57-
58-
5940
def detect_dataset_builder(metric_dir: str) -> Optional[str]:
6041
"""
6142
Best-effort scan for a dataset builder callable inside the metric directory.
@@ -228,8 +209,6 @@ def build_default_output_model(evaluator_id: str) -> str:
228209

229210

230211
__all__ = [
231-
"load_evaluator_trace",
232-
"save_evaluator_trace",
233212
"detect_dataset_builder",
234213
"materialize_dataset_via_builder",
235214
"create_dataset_from_jsonl",

0 commit comments

Comments
 (0)