Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ async def detect_and_switch_language(params: FunctionCallParams):
return

# Append new language instruction to clean base prompt
updated_content = f'{base_prompt}\n\n{language_instruction}'
updated_content = f'{language_instruction}\n\n{base_prompt}'
updated_system_message = {
'role': 'system',
'content': updated_content,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,13 @@ def create_llm_service(llm_config: Dict[str, Any]):
logger.info(f'Creating LLM service: {llm_type} / {model}')

if llm_type == 'openai':
return LLMServiceFactory._create_openai_llm(api_key, model, parameters)
base_url = llm_config.get('base_url')
if not base_url or base_url == 'https://api.openai.com/v1':
return LLMServiceFactory._create_openai_llm(api_key, model, parameters)
else:
return LLMServiceFactory._create_openai_compatible_llm(
api_key, model, parameters, base_url
)
Comment on lines +53 to +59
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Normalize base_url before default-endpoint comparison.

On Line 54, raw string comparison can misroute equivalent default URLs (e.g., trailing slash/whitespace) into the OpenAI-compatible path.

Suggested fix
         if llm_type == 'openai':
-            base_url = llm_config.get('base_url')
-            if not base_url or base_url == 'https://api.openai.com/v1':
+            raw_base_url = llm_config.get('base_url')
+            base_url = raw_base_url.strip().rstrip('/') if isinstance(raw_base_url, str) else raw_base_url
+            if not base_url or base_url == 'https://api.openai.com/v1':
                 return LLMServiceFactory._create_openai_llm(api_key, model, parameters)
             else:
                 return LLMServiceFactory._create_openai_compatible_llm(
                     api_key, model, parameters, base_url
                 )
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
base_url = llm_config.get('base_url')
if not base_url or base_url == 'https://api.openai.com/v1':
return LLMServiceFactory._create_openai_llm(api_key, model, parameters)
else:
return LLMServiceFactory._create_openai_compatible_llm(
api_key, model, parameters, base_url
)
raw_base_url = llm_config.get('base_url')
base_url = raw_base_url.strip().rstrip('/') if isinstance(raw_base_url, str) else raw_base_url
if not base_url or base_url == 'https://api.openai.com/v1':
return LLMServiceFactory._create_openai_llm(api_key, model, parameters)
else:
return LLMServiceFactory._create_openai_compatible_llm(
api_key, model, parameters, base_url
)
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@wavefront/server/apps/call_processing/call_processing/services/llm_service.py`
around lines 53 - 59, Normalize the retrieved base_url from llm_config before
comparing to the default OpenAI endpoint: get the raw value (e.g., base_url_raw
= llm_config.get('base_url')), treat None as empty string, strip surrounding
whitespace and trailing slashes (and optionally lower-case) into a normalized
base_url, then compare normalized base_url to the normalized default
"https://api.openai.com/v1" and call LLMServiceFactory._create_openai_llm when
it matches or is empty; otherwise call
LLMServiceFactory._create_openai_compatible_llm with the original/normalized
base_url.

elif llm_type == 'azure_openai':
base_url = llm_config.get('base_url')
if not base_url:
Expand Down Expand Up @@ -97,6 +103,36 @@ def _create_openai_llm(api_key: str, model: str, parameters: Dict[str, Any]):

return OpenAILLMService(api_key=api_key, model=model, params=input_params)

@staticmethod
def _create_openai_compatible_llm(
api_key: str, model: str, parameters: Dict[str, Any], base_url: str
):
"""Create a BaseOpenAILLMService for OpenAI-compatible endpoints"""
params_dict = {}

if 'temperature' in parameters:
params_dict['temperature'] = parameters['temperature']
if 'max_completion_tokens' in parameters:
params_dict['max_completion_tokens'] = parameters['max_completion_tokens']
if 'top_p' in parameters:
params_dict['top_p'] = parameters['top_p']
if 'frequency_penalty' in parameters:
params_dict['frequency_penalty'] = parameters['frequency_penalty']
if 'presence_penalty' in parameters:
params_dict['presence_penalty'] = parameters['presence_penalty']
if 'seed' in parameters:
params_dict['seed'] = parameters['seed']

Comment on lines +113 to +125
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

service_tier is silently ignored for OpenAI-compatible configs.

In Line 113–125 mapping, service_tier is missing even though the OpenAI path supports it (Line 94–95). That creates inconsistent runtime behavior based only on base_url.

Suggested fix
         if 'seed' in parameters:
             params_dict['seed'] = parameters['seed']
+        if 'service_tier' in parameters:
+            params_dict['service_tier'] = parameters['service_tier']
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if 'temperature' in parameters:
params_dict['temperature'] = parameters['temperature']
if 'max_completion_tokens' in parameters:
params_dict['max_completion_tokens'] = parameters['max_completion_tokens']
if 'top_p' in parameters:
params_dict['top_p'] = parameters['top_p']
if 'frequency_penalty' in parameters:
params_dict['frequency_penalty'] = parameters['frequency_penalty']
if 'presence_penalty' in parameters:
params_dict['presence_penalty'] = parameters['presence_penalty']
if 'seed' in parameters:
params_dict['seed'] = parameters['seed']
if 'temperature' in parameters:
params_dict['temperature'] = parameters['temperature']
if 'max_completion_tokens' in parameters:
params_dict['max_completion_tokens'] = parameters['max_completion_tokens']
if 'top_p' in parameters:
params_dict['top_p'] = parameters['top_p']
if 'frequency_penalty' in parameters:
params_dict['frequency_penalty'] = parameters['frequency_penalty']
if 'presence_penalty' in parameters:
params_dict['presence_penalty'] = parameters['presence_penalty']
if 'seed' in parameters:
params_dict['seed'] = parameters['seed']
if 'service_tier' in parameters:
params_dict['service_tier'] = parameters['service_tier']
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In
`@wavefront/server/apps/call_processing/call_processing/services/llm_service.py`
around lines 113 - 125, The mapping block that builds params_dict from
parameters omits the 'service_tier' key, causing OpenAI-compatible configs
(handled earlier in the OpenAI path around the code that checks base_url) to
ignore it; update the mapping in llm_service.py to include if 'service_tier' in
parameters: params_dict['service_tier'] = parameters['service_tier'] so that the
same 'service_tier' value is forwarded for both OpenAI and non-OpenAI flows
(ensure you modify the same function that constructs params_dict and references
the parameters dict).

input_params = BaseOpenAILLMService.InputParams(**params_dict)

logger.info(
f"OpenAI-compatible LLM config: model={model}, base_url={base_url}, temp={params_dict.get('temperature', 'default')}"
)

return BaseOpenAILLMService(
api_key=api_key, model=model, base_url=base_url, params=input_params
)

@staticmethod
def _create_google_llm(api_key: str, model: str, parameters: Dict[str, Any]):
"""Create Google LLM service"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ async def run_conversation(
f'- Do NOT switch based on a greeting (e.g. "Namaste") or incidental use of another language.\n'
f'- If the user requests a language not in the supported list, apologise and tell them which languages are available. Do not call the switch tool.'
)
system_content = f'{base_system_prompt}\n\n{initial_language_instruction}{language_switching_rules}'
system_content = f'{initial_language_instruction}\n\n{base_system_prompt}{language_switching_rules}'
# Store base prompt without language instruction for switching (rules persist across switches)
language_state['original_system_prompt'] = (
base_system_prompt + language_switching_rules
Expand Down
Loading