refactor: streamline tool response handling and improve error logging in LiteLLM and BaseTool

pull/904/head
harshalmore31 2 weeks ago
parent 58921ffd84
commit d6fa21af46

@ -2800,72 +2800,48 @@ class Agent:
)
def execute_tools(self, response: any, loop_count: int):
try:
output = (
self.tool_struct.execute_function_calls_from_api_response(
response
)
)
# Handle empty or None output
if not output:
logger.info("No tool function calls found in response")
return
self.short_memory.add(
role="Tool Executor",
content=format_data_structure(output),
output = (
self.tool_struct.execute_function_calls_from_api_response(
response
)
)
self.pretty_print(
f"{format_data_structure(output)}",
loop_count,
)
self.short_memory.add(
role="Tool Executor",
content=format_data_structure(output),
)
# Now run the LLM again without tools - create a temporary LLM instance
# instead of modifying the cached one
# Create a temporary LLM instance without tools for the follow-up call
try:
temp_llm = self.temp_llm_instance_for_tool_summary()
self.pretty_print(
f"{format_data_structure(output)}",
loop_count,
)
tool_response = temp_llm.run(
f"""
Please analyze and summarize the following tool execution output in a clear and concise way.
Focus on the key information and insights that would be most relevant to the user's original request.
If there are any errors or issues, highlight them prominently.
Tool Output:
{output}
"""
)
# Now run the LLM again without tools - create a temporary LLM instance
# instead of modifying the cached one
# Create a temporary LLM instance without tools for the follow-up call
temp_llm = self.temp_llm_instance_for_tool_summary()
self.short_memory.add(
role=self.agent_name,
content=tool_response,
)
tool_response = temp_llm.run(
f"""
Please analyze and summarize the following tool execution output in a clear and concise way.
Focus on the key information and insights that would be most relevant to the user's original request.
If there are any errors or issues, highlight them prominently.
Tool Output:
{output}
"""
)
self.pretty_print(
f"{tool_response}",
loop_count,
)
except Exception as e:
logger.error(f"Error in tool summary generation: {e}")
# Add a fallback summary
fallback_summary = f"Tool execution completed. Output: {format_data_structure(output)}"
self.short_memory.add(
role=self.agent_name,
content=fallback_summary,
)
self.pretty_print(fallback_summary, loop_count)
self.short_memory.add(
role=self.agent_name,
content=tool_response,
)
except Exception as e:
logger.error(f"Error in tool execution: {e}")
error_message = f"Tool execution failed: {str(e)}"
self.short_memory.add(
role="Tool Executor",
content=error_message,
)
self.pretty_print(error_message, loop_count)
self.pretty_print(
f"{tool_response}",
loop_count,
)
def list_output_types(self):
return OutputType

@ -2253,15 +2253,6 @@ class BaseTool(BaseModel):
else:
# Convert string to dict if needed
if isinstance(api_response, str):
# Handle empty or whitespace-only strings
api_response = api_response.strip()
if not api_response:
self._log_if_verbose(
"warning",
"Empty API response string received, returning empty list"
)
return []
try:
api_response = json.loads(api_response)
except json.JSONDecodeError as e:
@ -2269,7 +2260,6 @@ class BaseTool(BaseModel):
"error",
f"Failed to parse JSON from API response: {e}. Response: '{api_response[:100]}...'"
)
# If JSON parsing fails, try to continue without function calls
return []
if not isinstance(api_response, dict):
@ -2401,10 +2391,11 @@ class BaseTool(BaseModel):
if name:
try:
# Parse arguments JSON string
if isinstance(arguments_str, str):
arguments = self._parse_json_string(arguments_str)
else:
arguments = arguments_str if arguments_str is not None else {}
arguments = (
json.loads(arguments_str)
if isinstance(arguments_str, str)
else arguments_str
)
function_calls.append(
{
@ -2417,16 +2408,7 @@ class BaseTool(BaseModel):
except json.JSONDecodeError as e:
self._log_if_verbose(
"error",
f"Failed to parse arguments for {name}: {e}. Using empty dict instead.",
)
# Use empty dict as fallback
function_calls.append(
{
"name": name,
"arguments": {},
"id": response.get("id"),
"type": "openai",
}
f"Failed to parse arguments for {name}: {e}",
)
# Check for choices[].message.tool_calls format
@ -2907,15 +2889,12 @@ class BaseTool(BaseModel):
if name:
try:
# Parse arguments JSON string with better error handling
if isinstance(arguments_str, str):
arguments_str = arguments_str.strip()
if not arguments_str:
arguments = {}
else:
arguments = json.loads(arguments_str)
else:
arguments = arguments_str if arguments_str is not None else {}
# Parse arguments JSON string
arguments = (
json.loads(arguments_str)
if isinstance(arguments_str, str)
else arguments_str
)
function_calls.append(
{
@ -2930,18 +2909,7 @@ class BaseTool(BaseModel):
except json.JSONDecodeError as e:
self._log_if_verbose(
"error",
f"Failed to parse arguments for {name}: {e}. Using empty dict instead.",
)
# Use empty dict as fallback
function_calls.append(
{
"name": name,
"arguments": {},
"id": getattr(
tool_call, "id", None
),
"type": "openai",
}
f"Failed to parse arguments for {name}: {e}",
)
# Handle dictionary representations of tool calls

@ -152,45 +152,21 @@ class LiteLLM:
)
def output_for_tools(self, response: any):
try:
if self.mcp_call is True:
# Validate response structure for MCP calls
if (hasattr(response, 'choices') and
len(response.choices) > 0 and
hasattr(response.choices[0], 'message') and
hasattr(response.choices[0].message, 'tool_calls') and
response.choices[0].message.tool_calls and
len(response.choices[0].message.tool_calls) > 0):
out = response.choices[0].message.tool_calls[0].function
output = {
"function": {
"name": out.name,
"arguments": out.arguments,
}
}
return output
else:
logger.warning("Invalid MCP response structure, returning empty dict")
return {}
else:
# Validate response structure for regular tool calls
if (hasattr(response, 'choices') and
len(response.choices) > 0 and
hasattr(response.choices[0], 'message') and
hasattr(response.choices[0].message, 'tool_calls')):
out = response.choices[0].message.tool_calls
if isinstance(out, BaseModel):
out = out.model_dump()
return out
else:
logger.warning("Invalid tool response structure, returning empty list")
return []
except Exception as e:
logger.error(f"Error processing tool response: {e}")
return {} if self.mcp_call else []
if self.mcp_call is True:
out = response.choices[0].message.tool_calls[0].function
output = {
"function": {
"name": out.name,
"arguments": out.arguments,
}
}
return output
else:
out = response.choices[0].message.tool_calls
if isinstance(out, BaseModel):
out = out.model_dump()
return out
def _prepare_messages(
self,
@ -473,29 +449,14 @@ class LiteLLM:
# Make the completion call
response = completion(**completion_params)
# Validate response structure before processing
if not hasattr(response, 'choices') or not response.choices:
logger.error("Invalid response: no choices found")
return "Error: Invalid response from API"
if not hasattr(response.choices[0], 'message'):
logger.error("Invalid response: no message found in first choice")
return "Error: Invalid response structure"
# Handle tool-based response
if (self.tools_list_dictionary is not None and
hasattr(response.choices[0].message, 'tool_calls') and
response.choices[0].message.tool_calls is not None):
if self.tools_list_dictionary is not None:
return self.output_for_tools(response)
elif self.return_all is True:
return response.model_dump()
else:
# Return standard response content
content = response.choices[0].message.content
if content is None:
logger.warning("Response content is None, returning empty string")
return ""
return content
return response.choices[0].message.content
except LiteLLMException as error:
logger.error(

Loading…
Cancel
Save