Fix: Gracefully handle None LLM responses to prevent agent crashes

pull/923/head
harshalmore31 6 days ago
parent a24cc89cba
commit e3f1265934

@ -0,0 +1,91 @@
import json
import logging
from swarms.structs import Agent
from swarms.prompts.logistics import (
Quality_Control_Agent_Prompt,
)
from swarms import BaseTool
# Set up debug logging
logging.basicConfig(level=logging.DEBUG)
# Image for analysis
# factory_image="image.png" # normal image of a factory
factory_image = "image2.png" # image of a burning factory
def security_analysis(danger_level: str) -> str:
"""
Analyzes the security danger level and returns an appropriate response.
Args:
danger_level (str): The level of danger to analyze.
Must be one of: "low", "medium", "high"
Returns:
str: A detailed security analysis based on the danger level.
"""
if danger_level == "low":
return """SECURITY ANALYSIS - LOW DANGER LEVEL:
Environment appears safe and well-controlled
Standard security measures are adequate
Low risk of accidents or security breaches
Normal operational protocols can continue
Recommendations: Maintain current security standards and continue regular monitoring."""
elif danger_level == "medium":
return """SECURITY ANALYSIS - MEDIUM DANGER LEVEL:
Moderate security concerns identified
Enhanced monitoring recommended
Some security measures may need strengthening
Risk of incidents exists but manageable
Recommendations: Implement additional safety protocols, increase surveillance, and conduct safety briefings."""
elif danger_level == "high":
return """SECURITY ANALYSIS - HIGH DANGER LEVEL:
🚨 CRITICAL SECURITY CONCERNS DETECTED
🚨 Immediate action required
🚨 High risk of accidents or security breaches
🚨 Operations may need to be suspended
Recommendations: Immediate intervention required, evacuate if necessary, implement emergency protocols, and conduct thorough security review."""
else:
return f"ERROR: Invalid danger level '{danger_level}'. Must be 'low', 'medium', or 'high'."
# Custom system prompt that includes tool usage
custom_system_prompt = f"""
{Quality_Control_Agent_Prompt}
You have access to tools that can help you with your analysis. When you need to perform a security analysis, you MUST use the security_analysis function with an appropriate danger level (low, medium, or high) based on your observations.
Always use the available tools when they are relevant to the task. If you determine there is any level of danger or security concern, call the security_analysis function with the appropriate danger level.
"""
# Quality control agent
quality_control_agent = Agent(
agent_name="Quality Control Agent",
agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.",
# model_name="anthropic/claude-3-opus-20240229",
model_name="gpt-4o",
system_prompt=custom_system_prompt,
multi_modal=True,
max_loops=1,
output_type="str-all-except-first",
# tools_list_dictionary=[schema],
tools=[security_analysis],
)
response = quality_control_agent.run(
task="Analyze the image and then perform a security analysis. Based on what you see in the image, determine if there is a low, medium, or high danger level and call the security_analysis function with that danger level.",
img=factory_image,
)
# The response is already printed by the agent's pretty_print method

@ -1080,19 +1080,25 @@ class Agent:
# Print
self.pretty_print(response, loop_count)
# Check and execute callable tools
if exists(self.tools):
if (
self.output_raw_json_from_tool_call
is True
):
response = response
else:
# Handle tools
if (
hasattr(self, "tool_struct")
and self.tool_struct is not None
and self.output_raw_json_from_tool_call
is True
):
response = response
else:
# Only execute tools if response is not None
if response is not None:
self.execute_tools(
response=response,
loop_count=loop_count,
)
else:
logger.warning(
f"LLM returned None response in loop {loop_count}, skipping tool execution"
)
# Handle MCP tools
if (
@ -1100,10 +1106,16 @@ class Agent:
or exists(self.mcp_config)
or exists(self.mcp_urls)
):
self.mcp_tool_handling(
response=response,
current_loop=loop_count,
)
# Only handle MCP tools if response is not None
if response is not None:
self.mcp_tool_handling(
response=response,
current_loop=loop_count,
)
else:
logger.warning(
f"LLM returned None response in loop {loop_count}, skipping MCP tool handling"
)
self.sentiment_and_evaluator(response)
@ -2858,6 +2870,13 @@ class Agent:
)
def execute_tools(self, response: any, loop_count: int):
# Handle None response gracefully
if response is None:
logger.warning(
f"Cannot execute tools with None response in loop {loop_count}. "
"This may indicate the LLM did not return a valid response."
)
return
output = (
self.tool_struct.execute_function_calls_from_api_response(

@ -2223,8 +2223,13 @@ class BaseTool(BaseModel):
>>> tool_calls = [ChatCompletionMessageToolCall(...), ...]
>>> results = tool.execute_function_calls_from_api_response(tool_calls)
"""
# Handle None API response gracefully by returning empty results
if api_response is None:
raise ToolValidationError("API response cannot be None")
self._log_if_verbose(
"warning",
"API response is None, returning empty results. This may indicate the LLM did not return a valid response."
)
return [] if not return_as_string else []
# Handle direct list of tool call objects (e.g., from OpenAI ChatCompletionMessageToolCall or Anthropic BaseModels)
if isinstance(api_response, list):

Loading…
Cancel
Save