refactor: enhance error handling and response validation in tool execution and output processing for multi-modal functioanlity

pull/904/head
harshalmore31 2 weeks ago
parent 64ac3125b0
commit fb494b9ce2

@ -2800,48 +2800,72 @@ class Agent:
)
def execute_tools(self, response: any, loop_count: int):
try:
output = (
self.tool_struct.execute_function_calls_from_api_response(
response
)
)
# Handle empty or None output
if not output:
logger.info("No tool function calls found in response")
return
output = (
self.tool_struct.execute_function_calls_from_api_response(
response
self.short_memory.add(
role="Tool Executor",
content=format_data_structure(output),
)
)
self.short_memory.add(
role="Tool Executor",
content=format_data_structure(output),
)
self.pretty_print(
f"{format_data_structure(output)}",
loop_count,
)
self.pretty_print(
f"{format_data_structure(output)}",
loop_count,
)
# Now run the LLM again without tools - create a temporary LLM instance
# instead of modifying the cached one
# Create a temporary LLM instance without tools for the follow-up call
try:
temp_llm = self.temp_llm_instance_for_tool_summary()
# Now run the LLM again without tools - create a temporary LLM instance
# instead of modifying the cached one
# Create a temporary LLM instance without tools for the follow-up call
temp_llm = self.temp_llm_instance_for_tool_summary()
tool_response = temp_llm.run(
f"""
Please analyze and summarize the following tool execution output in a clear and concise way.
Focus on the key information and insights that would be most relevant to the user's original request.
If there are any errors or issues, highlight them prominently.
Tool Output:
{output}
"""
)
tool_response = temp_llm.run(
f"""
Please analyze and summarize the following tool execution output in a clear and concise way.
Focus on the key information and insights that would be most relevant to the user's original request.
If there are any errors or issues, highlight them prominently.
Tool Output:
{output}
"""
)
self.short_memory.add(
role=self.agent_name,
content=tool_response,
)
self.short_memory.add(
role=self.agent_name,
content=tool_response,
)
self.pretty_print(
f"{tool_response}",
loop_count,
)
except Exception as e:
logger.error(f"Error in tool summary generation: {e}")
# Add a fallback summary
fallback_summary = f"Tool execution completed. Output: {format_data_structure(output)}"
self.short_memory.add(
role=self.agent_name,
content=fallback_summary,
)
self.pretty_print(fallback_summary, loop_count)
self.pretty_print(
f"{tool_response}",
loop_count,
)
except Exception as e:
logger.error(f"Error in tool execution: {e}")
error_message = f"Tool execution failed: {str(e)}"
self.short_memory.add(
role="Tool Executor",
content=error_message,
)
self.pretty_print(error_message, loop_count)
def list_output_types(self):
return OutputType

@ -2253,17 +2253,31 @@ class BaseTool(BaseModel):
else:
# Convert string to dict if needed
if isinstance(api_response, str):
# Handle empty or whitespace-only strings
api_response = api_response.strip()
if not api_response:
self._log_if_verbose(
"warning",
"Empty API response string received, returning empty list"
)
return []
try:
api_response = json.loads(api_response)
except json.JSONDecodeError as e:
raise ToolValidationError(
f"Invalid JSON in API response: {e}"
) from e
self._log_if_verbose(
"error",
f"Failed to parse JSON from API response: {e}. Response: '{api_response[:100]}...'"
)
# If JSON parsing fails, try to continue without function calls
return []
if not isinstance(api_response, dict):
raise ToolValidationError(
"API response must be a dictionary, JSON string, BaseModel, or list of tool calls"
self._log_if_verbose(
"warning",
f"API response is not a dictionary (type: {type(api_response)}), returning empty list"
)
return []
# Extract function calls from dictionary response
function_calls = (
@ -2387,11 +2401,15 @@ class BaseTool(BaseModel):
if name:
try:
# Parse arguments JSON string
arguments = (
json.loads(arguments_str)
if isinstance(arguments_str, str)
else arguments_str
)
if isinstance(arguments_str, str):
# Handle empty or whitespace-only arguments
arguments_str = arguments_str.strip()
if not arguments_str:
arguments = {}
else:
arguments = json.loads(arguments_str)
else:
arguments = arguments_str if arguments_str is not None else {}
function_calls.append(
{
@ -2404,7 +2422,16 @@ class BaseTool(BaseModel):
except json.JSONDecodeError as e:
self._log_if_verbose(
"error",
f"Failed to parse arguments for {name}: {e}",
f"Failed to parse arguments for {name}: {e}. Using empty dict instead.",
)
# Use empty dict as fallback
function_calls.append(
{
"name": name,
"arguments": {},
"id": response.get("id"),
"type": "openai",
}
)
# Check for choices[].message.tool_calls format
@ -2885,12 +2912,15 @@ class BaseTool(BaseModel):
if name:
try:
# Parse arguments JSON string
arguments = (
json.loads(arguments_str)
if isinstance(arguments_str, str)
else arguments_str
)
# Parse arguments JSON string with better error handling
if isinstance(arguments_str, str):
arguments_str = arguments_str.strip()
if not arguments_str:
arguments = {}
else:
arguments = json.loads(arguments_str)
else:
arguments = arguments_str if arguments_str is not None else {}
function_calls.append(
{
@ -2905,7 +2935,18 @@ class BaseTool(BaseModel):
except json.JSONDecodeError as e:
self._log_if_verbose(
"error",
f"Failed to parse arguments for {name}: {e}",
f"Failed to parse arguments for {name}: {e}. Using empty dict instead.",
)
# Use empty dict as fallback
function_calls.append(
{
"name": name,
"arguments": {},
"id": getattr(
tool_call, "id", None
),
"type": "openai",
}
)
# Handle dictionary representations of tool calls

@ -152,21 +152,45 @@ class LiteLLM:
)
def output_for_tools(self, response: any):
if self.mcp_call is True:
out = response.choices[0].message.tool_calls[0].function
output = {
"function": {
"name": out.name,
"arguments": out.arguments,
}
}
return output
else:
out = response.choices[0].message.tool_calls
if isinstance(out, BaseModel):
out = out.model_dump()
return out
try:
if self.mcp_call is True:
# Validate response structure for MCP calls
if (hasattr(response, 'choices') and
len(response.choices) > 0 and
hasattr(response.choices[0], 'message') and
hasattr(response.choices[0].message, 'tool_calls') and
response.choices[0].message.tool_calls and
len(response.choices[0].message.tool_calls) > 0):
out = response.choices[0].message.tool_calls[0].function
output = {
"function": {
"name": out.name,
"arguments": out.arguments,
}
}
return output
else:
logger.warning("Invalid MCP response structure, returning empty dict")
return {}
else:
# Validate response structure for regular tool calls
if (hasattr(response, 'choices') and
len(response.choices) > 0 and
hasattr(response.choices[0], 'message') and
hasattr(response.choices[0].message, 'tool_calls')):
out = response.choices[0].message.tool_calls
if isinstance(out, BaseModel):
out = out.model_dump()
return out
else:
logger.warning("Invalid tool response structure, returning empty list")
return []
except Exception as e:
logger.error(f"Error processing tool response: {e}")
return {} if self.mcp_call else []
def _prepare_messages(
self,
@ -449,14 +473,29 @@ class LiteLLM:
# Make the completion call
response = completion(**completion_params)
# Validate response structure before processing
if not hasattr(response, 'choices') or not response.choices:
logger.error("Invalid response: no choices found")
return "Error: Invalid response from API"
if not hasattr(response.choices[0], 'message'):
logger.error("Invalid response: no message found in first choice")
return "Error: Invalid response structure"
# Handle tool-based response
if self.tools_list_dictionary is not None:
if (self.tools_list_dictionary is not None and
hasattr(response.choices[0].message, 'tool_calls') and
response.choices[0].message.tool_calls is not None):
return self.output_for_tools(response)
elif self.return_all is True:
return response.model_dump()
else:
# Return standard response content
return response.choices[0].message.content
content = response.choices[0].message.content
if content is None:
logger.warning("Response content is None, returning empty string")
return ""
return content
except LiteLLMException as error:
logger.error(

Loading…
Cancel
Save