From 16b0aa5bb137d4fd86d11531682c571e62ec4955 Mon Sep 17 00:00:00 2001 From: Dror Ivry Date: Thu, 29 Jan 2026 23:12:53 +0200 Subject: [PATCH] added metadata --- pyproject.toml | 2 +- qualifire/client.py | 5 +++++ qualifire/types.py | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index cd95f22..7f67aaf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "qualifire" -version = "0.14.0" +version = "0.15.0" description = "Qualifire Python SDK" authors = [{ name = "qualifire-dev", email = "dror@qualifire.ai" }] requires-python = ">=3.8,<4" diff --git a/qualifire/client.py b/qualifire/client.py index 8cbac40..035cccb 100644 --- a/qualifire/client.py +++ b/qualifire/client.py @@ -67,6 +67,7 @@ def evaluate( topic_scoping_multi_turn_mode: bool = False, topic_scoping_target: PolicyTarget = PolicyTarget.BOTH, allowed_topics: Optional[List[str]] = None, + metadata: Optional[Dict[str, str]] = None, ) -> Union[EvaluationResponse, None]: """ Evaluates the given input and output pairs. @@ -113,6 +114,7 @@ def evaluate( :param topic_scoping_multi_turn_mode: Enable multi-turn mode for topic scoping check. :param topic_scoping_target: Target topic for topic scoping check. :param allowed_topics: List of allowed topics for topic scoping check. + :param metadata: Optional dictionary of string key-value pairs to attach to the evaluation invocation. :return: An EvaluationResponse object containing the evaluation results. :raises Exception: If an error occurs during the evaluation. @@ -236,6 +238,7 @@ def evaluate( topic_scoping_multi_turn_mode=topic_scoping_multi_turn_mode, topic_scoping_target=topic_scoping_target, allowed_topics=allowed_topics, + metadata=metadata, ) response = requests.post( @@ -264,6 +267,7 @@ def invoke_evaluation( Optional[List[Dict[str, Any]]], ] = None, available_tools: Optional[List[LLMToolDefinition]] = None, + metadata: Optional[Dict[str, str]] = None, ) -> EvaluationResponse: url = f"{self._base_url}/api/evaluation/invoke/" @@ -279,6 +283,7 @@ def invoke_evaluation( output=output, messages=messages, # type: ignore available_tools=available_tools, + metadata=metadata, ) response = requests.request( diff --git a/qualifire/types.py b/qualifire/types.py index a6167bc..58f5408 100644 --- a/qualifire/types.py +++ b/qualifire/types.py @@ -70,6 +70,7 @@ class EvaluationRequest(BaseModel): topic_scoping_multi_turn_mode: bool = False topic_scoping_target: PolicyTarget = PolicyTarget.BOTH allowed_topics: Optional[List[str]] = None + metadata: Optional[Dict[str, str]] = None @model_validator(mode="after") def validate_model(self) -> "EvaluationRequest": @@ -118,6 +119,7 @@ class EvaluationInvokeRequest(BaseModel): output: Optional[str] = None messages: Optional[List[LLMMessage]] = None available_tools: Optional[List[LLMToolDefinition]] = None + metadata: Optional[Dict[str, str]] = None @model_validator(mode="after") def validate_model(self) -> "EvaluationInvokeRequest":