Skip to content

Commit 3dc9935

Browse files
committed
change name
1 parent 19d491a commit 3dc9935

File tree

4 files changed

+7
-7
lines changed

4 files changed

+7
-7
lines changed

eval_protocol/adapters/base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,6 @@ def get_evaluation_rows(self, *args, **kwargs) -> List[EvaluationRow]:
1616
"""Get evaluation rows from the data source."""
1717
pass
1818

19-
def push_scores(self, rows: List[EvaluationRow], model_name: str, mean_score: float) -> None:
20-
"""Push evaluation scores back to the data source for tracking and analysis."""
19+
def upload_scores(self, rows: List[EvaluationRow], model_name: str, mean_score: float) -> None:
20+
"""Upload evaluation scores back to the data source for tracking and analysis."""
2121
pass

eval_protocol/adapters/braintrust.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -224,8 +224,8 @@ def get_evaluation_rows(
224224
logger.info("Successfully processed %d BTQL results into %d evaluation rows", len(all_traces), len(eval_rows))
225225
return eval_rows
226226

227-
def push_scores(self, rows: List[EvaluationRow], model_name: str, mean_score: float) -> None:
228-
"""Push evaluation scores back to Braintrust traces for tracking and analysis.
227+
def upload_scores(self, rows: List[EvaluationRow], model_name: str, mean_score: float) -> None:
228+
"""Upload evaluation scores back to Braintrust traces for tracking and analysis.
229229
230230
Creates score entries in Braintrust for each unique trace_id found in the evaluation
231231
rows' session data. This allows you to see evaluation results directly in the

eval_protocol/adapters/langfuse.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -434,8 +434,8 @@ def get_evaluation_rows_by_ids(
434434
continue
435435
return eval_rows
436436

437-
def push_scores(self, rows: List[EvaluationRow], model_name: str, mean_score: float) -> None:
438-
"""Push evaluation scores back to Langfuse traces for tracking and analysis.
437+
def upload_scores(self, rows: List[EvaluationRow], model_name: str, mean_score: float) -> None:
438+
"""Upload evaluation scores back to Langfuse traces for tracking and analysis.
439439
440440
Creates a score entry in Langfuse for each unique trace_id found in the evaluation
441441
rows' session data. This allows you to see evaluation results directly in the

eval_protocol/quickstart/llm_judge.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,6 @@ async def run_judgment(row):
9595

9696
# Push scores back to adapter if provided. Note that one score per model will be pushed back onto same trace.
9797
if adapter:
98-
adapter.push_scores(rows, model_name, mean_score)
98+
adapter.upload_scores(rows, model_name, mean_score)
9999

100100
return rows

0 commit comments

Comments
 (0)