@@ -28,6 +28,7 @@ def completion(
2828 temperature = 0.5 ,
2929 max_tokens = 8192 ,
3030 retry_times = 3 ,
31+ reasoning_effort = None ,
3132):
3233 """
3334 Call OpenAI's completion interface for text generation
@@ -39,6 +40,8 @@ def completion(
3940 image_paths (List[str], optional): List of image paths, defaults to None
4041 temperature (float, optional): Temperature for text generation, defaults to 0.5
4142 max_tokens (int, optional): Maximum number of tokens for generated text, defaults to 8192
43+ retry_times (int, optional): Number of retries, defaults to 3
44+ reasoning_effort (str, optional): Reasoning effort, defaults to None
4245 Returns:
4346 str: Generated text content
4447 """
@@ -57,6 +60,11 @@ def completion(
5760 model = os .getenv ("OPENAI_DEFAULT_MODEL" )
5861 if not model :
5962 model = "gpt-4o"
63+
64+ if not reasoning_effort :
65+ reasoning_effort = os .getenv ("OPENAI_REASONING_EFFORT" )
66+ if not reasoning_effort :
67+ reasoning_effort = None
6068
6169 # Initialize LLMClient
6270 client = LLMClient .LLMClient (base_url = base_url , api_key = api_key , model = model )
@@ -69,6 +77,7 @@ def completion(
6977 image_paths = image_paths ,
7078 temperature = temperature ,
7179 max_tokens = max_tokens ,
80+ reasoning_effort = reasoning_effort ,
7281 )
7382 return response
7483 except Exception as e :
0 commit comments