Update agent.py and add project structure documentation

pull/819/head
ascender1729 3 months ago
parent eb9d337b45
commit dba38f25cf

@ -0,0 +1,137 @@
Swarms Project Structure
=======================
1. Core Package (swarms/)
-----------------------
├── agents/
│ ├── agent_judge.py
│ ├── agent_print.py
│ ├── ape_agent.py
│ ├── auto_generate_swarm_config.py
│ ├── consistency_agent.py
│ ├── create_agents_from_yaml.py
│ ├── flexion_agent.py
│ ├── gkp_agent.py
│ ├── i_agent.py
│ ├── openai_assistant.py
│ ├── reasoning_agents.py
│ ├── reasoning_duo.py
│ ├── tool_agent.py
│ └── __init__.py
├── prompts/
│ ├── accountant_swarm_prompts.py
│ ├── agent_judge_prompt.py
│ ├── agent_prompt.py
│ ├── agent_prompts.py
│ ├── agent_system_prompts.py
│ ├── autoswarm.py
│ ├── finance_agent_prompt.py
│ ├── finance_agent_sys_prompt.py
│ ├── math_agent_prompt.py
│ ├── security_team.py
│ ├── support_agent_prompt.py
│ └── many more prompt files...
├── structs/
│ ├── agent.py
│ ├── agents_available.py
│ ├── agent_registry.py
│ ├── agent_roles.py
│ ├── agent_router.py
│ ├── async_workflow.py
│ ├── base_structure.py
│ ├── base_swarm.py
│ ├── base_workflow.py
│ ├── concurrent_workflow.py
│ ├── deep_research_swarm.py
│ ├── graph_swarm.py
│ ├── groupchat.py
│ ├── majority_voting.py
│ ├── matrix_swarm.py
│ ├── mixture_of_agents.py
│ ├── sequential_workflow.py
│ └── many more structure files...
├── tools/
│ ├── base_tool.py
│ ├── mcp_integration.py
│ ├── tool_registry.py
│ ├── tool_utils.py
│ └── many more tool files...
├── utils/
│ ├── any_to_str.py
│ ├── file_processing.py
│ ├── formatter.py
│ ├── loguru_logger.py
│ ├── pdf_to_text.py
│ └── many more utility files...
├── schemas/
│ ├── agent_input_schema.py
│ ├── agent_step_schemas.py
│ ├── base_schemas.py
│ └── __init__.py
├── telemetry/
│ ├── bootup.py
│ ├── main.py
│ └── __init__.py
├── client/
│ ├── main.py
│ └── __init__.py
└── cli/
├── create_agent.py
├── main.py
├── onboarding_process.py
└── __init__.py
2. Examples (examples/)
---------------------
├── advanced_market_analysis/
├── crypto/
├── document_processing/
├── forest_swarm_examples/
├── groupchat_examples/
├── healthcare/
├── mcp_example/
├── sequential_workflow/
├── tools_examples/
└── many more example directories...
3. Documentation (docs/)
----------------------
├── swarms/
├── swarms_cloud/
├── swarms_memory/
├── swarms_platform/
├── swarms_tools/
└── many more documentation sections...
4. Tests (tests/)
---------------
├── agents/
├── artifacts/
├── prompts/
├── structs/
├── utils/
└── many more test directories...
5. Configuration Files
--------------------
├── pyproject.toml
├── requirements.txt
├── poetry.lock
├── Dockerfile
└── .env.example
6. Main Project Files
-------------------
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
└── SECURITY.md

@ -845,9 +845,7 @@ class Agent:
# Use a default temperature # Use a default temperature
self.llm.temperature = 0.5 self.llm.temperature = 0.5
except Exception as error: except Exception as error:
logger.error( logger.error(f"Error dynamically changing temperature: {error}")
f"Error dynamically changing temperature: {error}"
)
def print_dashboard(self): def print_dashboard(self):
"""Print dashboard""" """Print dashboard"""
@ -1873,7 +1871,7 @@ class Agent:
self.retry_interval = retry_interval self.retry_interval = retry_interval
def reset(self): def reset(self):
"""Reset the agent"""Reset the agent""" """Reset the agent"""
self.short_memory = None self.short_memory = None
def ingest_docs(self, docs: List[str], *args, **kwargs): def ingest_docs(self, docs: List[str], *args, **kwargs):
@ -2642,45 +2640,98 @@ class Agent:
) )
def parse_llm_output(self, response: Any) -> str: def parse_llm_output(self, response: Any) -> str:
"""Parse and standardize the output from the LLM. """Parse the LLM output to a string."""
if isinstance(response, str):
return response
elif isinstance(response, dict):
return json.dumps(response)
elif isinstance(response, list):
return json.dumps(response)
else:
return str(response)
def mcp_execution_flow(self, response: str) -> str:
"""Forward tool calls to MCP servers with support for various input formats.
Args: Args:
response (Any): The response from the LLM in any format response (str): The response from the LLM containing tool calls or natural language.
Returns: Returns:
str: Standardized string output str: The result of executing the tool calls with preserved formatting.
Raises:
ValueError: If the response format is unexpected and cannot be handled
""" """
try: try:
# Handle dictionary responses # Try to parse as JSON first
if isinstance(response, dict): try:
if "choices" in response: tool_calls = json.loads(response)
return response["choices"][0]["message"][ is_json = True
"content" except json.JSONDecodeError:
] # If not JSON, treat as natural language
return json.dumps( tool_calls = [response]
response is_json = False
) # Convert other dicts to string
# Execute tool calls against MCP servers
# Handle string responses results = []
elif isinstance(response, str): errors = []
return response
# Handle both single tool call and array of tool calls
if isinstance(tool_calls, dict):
tool_calls = [tool_calls]
for tool_call in tool_calls:
try:
# Execute the tool call against all MCP servers
result = batch_mcp_flow(self.mcp_servers, tool_call)
if result:
results.extend(result)
# Add successful result to memory with context
self.short_memory.add(
role="assistant",
content=f"Tool execution result: {result}"
)
else:
error_msg = "No result from tool execution"
errors.append(error_msg)
self.short_memory.add(
role="error",
content=error_msg
)
# Handle list responses (from check_llm_outputs) except Exception as e:
elif isinstance(response, list): error_msg = f"Error executing tool call: {str(e)}"
return "\n".join(response) errors.append(error_msg)
logger.error(error_msg)
self.short_memory.add(
role="error",
content=error_msg
)
# Handle any other type by converting to string # Format the final response
if results:
if len(results) == 1:
# For single results, return as is to preserve formatting
return results[0]
else: else:
return str(response) # For multiple results, combine with context
formatted_results = []
for i, result in enumerate(results, 1):
formatted_results.append(f"Result {i}: {result}")
return "\n".join(formatted_results)
elif errors:
if len(errors) == 1:
return errors[0]
else:
return "Multiple errors occurred:\n" + "\n".join(f"- {err}" for err in errors)
else:
return "No results or errors returned"
except Exception as e: except Exception as e:
logger.error(f"Error parsing LLM output: {e}") error_msg = f"Error in MCP execution flow: {str(e)}"
raise ValueError( logger.error(error_msg)
f"Failed to parse LLM output: {type(response)}" self.short_memory.add(
role="error",
content=error_msg
) )
return error_msg
def sentiment_and_evaluator(self, response: str): def sentiment_and_evaluator(self, response: str):
if self.evaluator: if self.evaluator:
@ -2711,16 +2762,3 @@ class Agent:
role="Output Cleaner", role="Output Cleaner",
content=response, content=response,
) )
def mcp_execution_flow(self, response: str) -> str:
"""
Forward the JSON tool-call coming from the LLM to all MCP servers
listed in self.mcp_servers.
"""
try:
payload = json.loads(response) # {"tool_name": ...}
results = batch_mcp_flow(self.mcp_servers, payload)
# batch_mcp_flow already blocks, so results is a list[str]
return any_to_str(results[0] if len(results) == 1 else results)
except Exception as err:
logger.error(f"MCP flow failed: {err}")
return f"[MCP-error] {err}"
Loading…
Cancel
Save