@@ -124,36 +124,17 @@ def run_spl(system_prompt: str, initial_query: str, client, model: str, request_
124124 for i , strategy in enumerate (selected_strategies , 1 ):
125125 logger .info (f"Selected strategy { i } /{ MAX_STRATEGIES_FOR_INFERENCE } for inference: { strategy .strategy_id } (success rate: { strategy .success_rate :.2f} )" )
126126
127- # 7. If no strategies selected, use fallback
127+ # 7. Handle strategies for the problem type
128128 if not selected_strategies :
129- logger .info (f"No strategies selected for problem type: { problem_type } " )
130- if not learning_mode :
131- logger .info ("Suggesting to enable learning mode" )
132- fallback_message = (
133- "I don't have any problem-solving strategies yet for this type of problem. "
134- "To enable me to learn and improve strategies for similar problems in the future, "
135- "you can set `spl_learning=True` in the request configuration.\n \n "
136- )
137- else :
138- fallback_message = ""
129+ logger .info (f"No existing strategies found for problem type: { problem_type } " )
130+ logger .info (f"Running without strategy augmentation - using base system prompt only" )
139131
140- fallback_strategy = Strategy (
141- strategy_id = "fallback_temporary" ,
142- problem_type = problem_type ,
143- strategy_text = (
144- f"When solving { problem_type } problems:\n "
145- "1. Break down the problem into manageable parts\n "
146- "2. Analyze each part systematically\n "
147- "3. Apply appropriate techniques for each component\n "
148- "4. Combine the results into a cohesive solution"
149- ),
150- examples = [initial_query ]
151- )
152- selected_strategies = [fallback_strategy ]
153-
154- # 8. Augment the system prompt with the selected strategies
155- augmented_prompt = augment_system_prompt (system_prompt , selected_strategies )
156- logger .info (f"Augmented system prompt with { len (selected_strategies )} strategies (inference limit: { MAX_STRATEGIES_FOR_INFERENCE } )" )
132+ # Just use the original system prompt with no augmentation
133+ augmented_prompt = system_prompt
134+ else :
135+ # 8. Augment the system prompt with the selected strategies
136+ augmented_prompt = augment_system_prompt (system_prompt , selected_strategies )
137+ logger .info (f"Augmented system prompt with { len (selected_strategies )} strategies (inference limit: { MAX_STRATEGIES_FOR_INFERENCE } )" )
157138
158139 # 9. Forward the request to the LLM with the augmented prompt
159140 try :
@@ -168,11 +149,12 @@ def run_spl(system_prompt: str, initial_query: str, client, model: str, request_
168149 elif request_params ['max_tokens' ] < DEFAULT_MAX_TOKENS :
169150 request_params ['max_tokens' ] = DEFAULT_MAX_TOKENS
170151
171- # Adjust the query if we're suggesting learning mode
152+ # Log a suggestion if no strategies found in inference mode
172153 if not learning_mode and not existing_strategies :
173- initial_query_with_suggestion = fallback_message + initial_query
174- else :
175- initial_query_with_suggestion = initial_query
154+ logger .info ("Suggesting to enable learning mode: To create and learn strategies for this problem type, enable learning mode by setting 'spl_learning=True' in the request config." )
155+
156+ # Use unmodified query - no need to add fallback message to the actual query
157+ initial_query_with_suggestion = initial_query
176158
177159 response = client .chat .completions .create (
178160 model = model ,
@@ -194,8 +176,8 @@ def run_spl(system_prompt: str, initial_query: str, client, model: str, request_
194176 logger .debug (f"Main response - thinking extracted: '{ thinking } '" )
195177 logger .debug (f"Main response - final answer after removing thinking: '{ final_response } '" )
196178
197- # Only perform learning operations if in learning mode
198- if learning_mode :
179+ # Only perform learning operations if in learning mode and we have strategies
180+ if learning_mode and selected_strategies :
199181 # 10. Evaluate the effectiveness of the strategies
200182 strategy_effectiveness = evaluate_strategy_effectiveness (
201183 final_response ,
@@ -226,6 +208,8 @@ def run_spl(system_prompt: str, initial_query: str, client, model: str, request_
226208 logger .info (f"Refining strategy { strategy .strategy_id } after { strategy .total_attempts } attempts" )
227209 refined_strategy = refine_strategy (strategy , initial_query , final_response , thinking , client , model )
228210 db .refine_strategy (strategy .strategy_id , refined_strategy .strategy_text )
211+ elif learning_mode :
212+ logger .info ("No strategies to evaluate" )
229213 else :
230214 logger .info ("Strategy evaluation and refinement skipped (not in learning mode)" )
231215
@@ -241,12 +225,17 @@ def run_spl(system_prompt: str, initial_query: str, client, model: str, request_
241225 except Exception as e :
242226 logger .error (f"Error in SPL plugin: { str (e )} " )
243227 # Fall back to regular completion on error
244- response = client .chat .completions .create (
245- model = model ,
246- messages = [
247- {"role" : "system" , "content" : system_prompt },
248- {"role" : "user" , "content" : initial_query }
249- ],
250- max_tokens = DEFAULT_MAX_TOKENS # Ensure fallback also uses sufficient tokens
251- )
252- return response .choices [0 ].message .content , response .usage .completion_tokens
228+ try :
229+ response = client .chat .completions .create (
230+ model = model ,
231+ messages = [
232+ {"role" : "system" , "content" : system_prompt },
233+ {"role" : "user" , "content" : initial_query }
234+ ],
235+ max_tokens = DEFAULT_MAX_TOKENS # Ensure fallback also uses sufficient tokens
236+ )
237+ return response .choices [0 ].message .content , response .usage .completion_tokens
238+ except Exception as inner_e :
239+ logger .error (f"Error in fallback completion: { str (inner_e )} " )
240+ # Return a simple error message if even the fallback fails
241+ return f"Error processing request: { str (e )} " , 0
0 commit comments