diff --git a/tools/llm_api.py b/tools/llm_api.py index 14ba864..15592ac 100644 --- a/tools/llm_api.py +++ b/tools/llm_api.py @@ -199,7 +199,7 @@ def query_llm(prompt: str, client=None, model=None, provider="openai", image_pat prompt_tokens=response.usage.prompt_tokens, completion_tokens=response.usage.completion_tokens, total_tokens=response.usage.total_tokens, - reasoning_tokens=response.usage.completion_tokens_details.reasoning_tokens if model.lower().startswith("o") else None # Only checks if model starts with "o", e.g., o1, o1-preview, o1-mini, o3, etc. Can update this logic to specific models in the future. + reasoning_tokens=response.usage.reasoning_tokens if hasattr(response.usage, 'reasoning_tokens') and model.lower().startswith("o") else None ) # Calculate cost