[fix][autoswarm builder] [new examples and fix liutelln wrapper and tools issue]

pull/1102/head
Kye Gomez 2 weeks ago
parent b69a26c3c0
commit 0161a5e39f

@ -1,20 +0,0 @@
from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
import json
swarm = AutoSwarmBuilder(
name="My Swarm",
description="A swarm of agents",
verbose=True,
max_loops=1,
return_agents=True,
model_name="gpt-4.1",
)
print(
json.dumps(
swarm.run(
task="Create an accounting team to analyze crypto transactions, there must be 5 agents in the team with extremely extensive prompts. Make the prompts extremely detailed and specific and long and comprehensive. Make sure to include all the details of the task in the prompts."
),
indent=4,
)
)

@ -0,0 +1,18 @@
import json
from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
swarm = AutoSwarmBuilder(
name="My Swarm",
description="A swarm of agents",
verbose=True,
max_loops=1,
model_name="claude-sonnet-4-20250514",
execution_type="return-agents",
)
out = swarm.run(
task="Create an accounting team to analyze crypto transactions, there must be 5 agents in the team with extremely extensive prompts. Make the prompts extremely detailed and specific and long and comprehensive. Make sure to include all the details of the task in the prompts."
)
print(json.dumps(out, indent=4))

@ -0,0 +1,20 @@
from swarms import Agent
from swarms_tools import exa_search
# Initialize the agent
agent = Agent(
agent_name="Quantitative-Trading-Agent",
agent_description="Advanced quantitative trading and algorithmic analysis agent",
model_name="gpt-4o-mini",
dynamic_temperature_enabled=True,
max_loops=1,
dynamic_context_window=True,
tools=[exa_search],
streaming_on=False,
)
out = agent.run(
task="What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?",
)
print(out)

@ -0,0 +1,3 @@
from swarms_tools import exa_search
print(exa_search("What are the best multi-agent frameworks "))

@ -0,0 +1,26 @@
from swarms_tools import exa_search
from swarms import HeavySwarm
swarm = HeavySwarm(
name="Gold ETF Research Team",
description="A team of agents that research the best gold ETFs",
worker_model_name="claude-sonnet-4-20250514",
show_dashboard=True,
question_agent_model_name="gpt-4.1",
loops_per_agent=1,
agent_prints_on=False,
worker_tools=[exa_search],
random_loops_per_agent=False,
)
prompt = (
"Find the best 3 gold ETFs. For each ETF, provide the ticker symbol, "
"full name, current price, expense ratio, assets under management, and "
"a brief explanation of why it is considered among the best. Present the information "
"in a clear, structured format suitable for investors. Scrape the data from the web. "
)
out = swarm.run(prompt)
print(out)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "8.3.4"
version = "8.3.7"
description = "Swarms - TGSC"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -1,17 +1,16 @@
import os
import json
import traceback
from typing import List, Literal, Optional
from typing import List, Optional
from dotenv import load_dotenv
from loguru import logger
from pydantic import BaseModel, Field
from swarms.prompts.reasoning_prompt import INTERNAL_MONOLGUE_PROMPT
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.structs.ma_utils import set_random_models_for_agents
from swarms.structs.swarm_router import SwarmRouter, SwarmType
from swarms.utils.function_caller_model import OpenAIFunctionCaller
from swarms.utils.litellm_wrapper import LiteLLM
load_dotenv()
@ -128,6 +127,14 @@ When creating a multi-agent system, provide:
"""
execution_types = [
"return-agents",
"execute-swarm-router",
"return-swarm-router-config",
"return-agent-configurations",
]
class AgentConfig(BaseModel):
"""Configuration for an individual agent in a swarm"""
@ -201,34 +208,6 @@ class SwarmRouterConfig(BaseModel):
arbitrary_types_allowed = True
def reasoning_agent_run(
task: str,
img: Optional[str] = None,
name: str = None,
model_name: str = "gpt-4.1",
system_prompt: str = None,
):
"""
Run a reasoning agent to analyze the task before the main director processes it.
Args:
task (str): The task to reason about
img (Optional[str]): Optional image input
Returns:
str: The reasoning output from the agent
"""
agent = Agent(
agent_name=name,
agent_description=f"You're the {name} agent that is responsible for reasoning about the task and creating a plan for the swarm to accomplish the task.",
model_name=model_name,
system_prompt=INTERNAL_MONOLGUE_PROMPT + system_prompt,
max_loops=1,
)
return agent.run(task=task, img=img)
class AutoSwarmBuilder:
"""A class that automatically builds and manages swarms of AI agents.
@ -250,17 +229,11 @@ class AutoSwarmBuilder:
description: str = "Auto Swarm Builder",
verbose: bool = True,
max_loops: int = 1,
random_models: bool = False,
return_agents: bool = False,
model_name: str = "gpt-4.1",
generate_router_config: bool = False,
interactive: bool = False,
max_tokens: int = 8000,
execution_type: Literal[
"return-agents",
"execute-swarm-router",
"return-agent-configurations",
] = "return-agents",
execution_type: execution_types = "return-agents",
):
"""Initialize the AutoSwarmBuilder.
@ -275,8 +248,6 @@ class AutoSwarmBuilder:
self.description = description
self.verbose = verbose
self.max_loops = max_loops
self.random_models = random_models
self.return_agents = return_agents
self.model_name = model_name
self.generate_router_config = generate_router_config
self.interactive = interactive
@ -302,7 +273,7 @@ class AutoSwarmBuilder:
agents = self.create_agents(task)
if self.random_models:
if self.execution_type == "return-agents":
logger.info("Setting random models for agents")
agents = set_random_models_for_agents(agents=agents)
@ -324,9 +295,9 @@ class AutoSwarmBuilder:
"""
try:
if self.generate_router_config:
if self.execution_type == "return-swarm-router-config":
return self.create_router_config(task)
elif self.return_agents:
elif self.execution_type == "return-agent-configurations":
return self.create_agents(task)
else:
return self._execute_task(task)
@ -338,41 +309,6 @@ class AutoSwarmBuilder:
)
raise
# def run(
# self, task: str, correct_answer: str = None, *args, **kwargs
# ):
# """
# Executes the swarm on the given task. If correct_answer is provided, the method will retry until this answer is found in the output, up to max_loops times.
# If correct_answer is not provided, the method will execute the task once and return the output.
# Args:
# task (str): The task to execute.
# correct_answer (str, optional): If provided, the method will retry until this answer is found in the output.
# *args: Additional positional arguments.
# **kwargs: Additional keyword arguments.
# Returns:
# Any: The output of the swarm execution, or the output containing the correct answer if specified.
# """
# if correct_answer is None:
# # If no correct_answer is specified, just run once and return the output
# return self._run(task, *args, **kwargs)
# else:
# # If correct_answer is specified, retry up to max_loops times
# for attempt in range(1, self.max_loops + 1):
# output = self._run(task, *args, **kwargs)
# if correct_answer in str(output):
# logger.info(
# f"AutoSwarmBuilder: Correct answer found on attempt {attempt}."
# )
# return output
# else:
# logger.info(
# f"AutoSwarmBuilder: Attempt {attempt} did not yield the correct answer, retrying..."
# )
# # If correct_answer was not found after max_loops, return the last output
# return output
def dict_to_agent(self, output: dict):
agents = []
if isinstance(output, dict):
@ -402,7 +338,9 @@ class AutoSwarmBuilder:
f"Create the multi-agent team for the following task: {task}"
)
return output.model_dump()
output = json.loads(output)
return output
except Exception as e:
logger.error(
@ -412,12 +350,19 @@ class AutoSwarmBuilder:
raise e
def build_llm_agent(self, config: BaseModel):
return OpenAIFunctionCaller(
# return OpenAIFunctionCaller(
# system_prompt=BOSS_SYSTEM_PROMPT,
# api_key=os.getenv("OPENAI_API_KEY"),
# temperature=0.5,
# base_model=config,
# model_name=self.model_name,
# max_tokens=self.max_tokens,
# )
return LiteLLM(
model_name=self.model_name,
system_prompt=BOSS_SYSTEM_PROMPT,
api_key=os.getenv("OPENAI_API_KEY"),
temperature=0.5,
base_model=config,
model_name=self.model_name,
response_format=config,
max_tokens=self.max_tokens,
)
@ -440,10 +385,7 @@ class AutoSwarmBuilder:
f"Create the agents for the following task: {task}"
)
if self.return_agents:
output = output.model_dump()
else:
output = self.dict_to_agent(output)
output = json.loads(output)
return output
@ -477,7 +419,7 @@ class AutoSwarmBuilder:
try:
agent = Agent(
agent_name=agent_name,
description=agent_description,
agent_description=agent_description,
system_prompt=agent_system_prompt,
verbose=self.verbose,
dynamic_temperature_enabled=False,
@ -508,16 +450,14 @@ class AutoSwarmBuilder:
logger.info("Initializing swarm router")
model = self.build_llm_agent(config=SwarmRouterConfig)
logger.info("Creating swarm specification")
swarm_spec = model.run(
f"Create the swarm spec for the following task: {task}"
)
logger.debug(
f"Received swarm specification: {swarm_spec.model_dump()}"
)
swarm_spec = swarm_spec.model_dump()
logger.info("Initializing SwarmRouter")
print(swarm_spec)
print(type(swarm_spec))
swarm_router = SwarmRouter(
name=swarm_spec["name"],
description=swarm_spec["description"],
@ -555,3 +495,6 @@ class AutoSwarmBuilder:
"""
return [self.run(task) for task in tasks]
def list_types(self):
return execution_types

@ -47,7 +47,7 @@ class SequentialWorkflow:
max_loops: int = 1,
output_type: OutputType = "dict",
shared_memory_system: callable = None,
multi_agent_collab_prompt: bool = True,
multi_agent_collab_prompt: bool = False,
team_awareness: bool = False,
*args,
**kwargs,
@ -110,7 +110,14 @@ class SequentialWorkflow:
if self.multi_agent_collab_prompt is True:
for agent in self.agents:
agent.system_prompt += MULTI_AGENT_COLLAB_PROMPT
if hasattr(agent, "system_prompt"):
if agent.system_prompt is None:
agent.system_prompt = ""
agent.system_prompt += MULTI_AGENT_COLLAB_PROMPT
else:
logger.warning(
f"Agent {getattr(agent, 'name', str(agent))} does not have a 'system_prompt' attribute."
)
logger.info(
f"Sequential Workflow Name: {self.name} is ready to run."

@ -6,6 +6,7 @@ from pathlib import Path
from typing import List, Optional
import litellm
from pydantic import BaseModel
import requests
from litellm import completion, supports_vision
from loguru import logger
@ -394,74 +395,108 @@ class LiteLLM:
# Store other types of runtime_args for debugging
completion_params["runtime_args"] = runtime_args
# def output_for_tools(self, response: any):
# """
# Process tool calls from the LLM response and return formatted output.
# Args:
# response: The response object from the LLM API call
# Returns:
# dict or list: Formatted tool call data, or default response if no tool calls
# """
# try:
# # Convert response to dict if it's a Pydantic model
# if hasattr(response, "model_dump"):
# response_dict = response.model_dump()
# else:
# response_dict = response
# print(f"Response dict: {response_dict}")
# # Check if tool_calls exists and is not None
# if (
# response_dict.get("choices")
# and response_dict["choices"][0].get("message")
# and response_dict["choices"][0]["message"].get(
# "tool_calls"
# )
# and len(
# response_dict["choices"][0]["message"][
# "tool_calls"
# ]
# )
# > 0
# ):
# tool_call = response_dict["choices"][0]["message"][
# "tool_calls"
# ][0]
# if "function" in tool_call:
# return {
# "function": {
# "name": tool_call["function"].get(
# "name", ""
# ),
# "arguments": tool_call["function"].get(
# "arguments", "{}"
# ),
# }
# }
# else:
# # Handle case where tool_call structure is different
# return tool_call
# else:
# # Return a default response when no tool calls are present
# logger.warning(
# "No tool calls found in response, returning default response"
# )
# return {
# "function": {
# "name": "no_tool_call",
# "arguments": "{}",
# }
# }
# except Exception as e:
# logger.error(f"Error processing tool calls: {str(e)} Traceback: {traceback.format_exc()}")
def output_for_tools(self, response: any):
"""
Process tool calls from the LLM response and return formatted output.
Process and extract tool call information from the LLM response.
This function handles the output for tool-based responses, supporting both
MCP (Multi-Call Protocol) and standard tool call formats. It extracts the
relevant function name and arguments from the response, handling both
BaseModel and dictionary outputs.
Args:
response: The response object from the LLM API call
response (any): The response object returned by the LLM API call.
Returns:
dict or list: Formatted tool call data, or default response if no tool calls
dict or list: A dictionary containing the function name and arguments
if MCP call is used, or the tool calls output (as a dict or list)
for standard tool call responses.
"""
try:
# Convert response to dict if it's a Pydantic model
if hasattr(response, "model_dump"):
response_dict = response.model_dump()
else:
response_dict = response
if self.mcp_call is True:
out = response.choices[0].message.tool_calls[0].function
# Check if tool_calls exists and is not None
if (
response_dict.get("choices")
and response_dict["choices"][0].get("message")
and response_dict["choices"][0]["message"].get(
"tool_calls"
)
and len(
response_dict["choices"][0]["message"][
"tool_calls"
]
)
> 0
):
tool_call = response_dict["choices"][0]["message"][
"tool_calls"
][0]
if "function" in tool_call:
return {
"function": {
"name": tool_call["function"].get(
"name", ""
),
"arguments": tool_call["function"].get(
"arguments", "{}"
),
}
}
else:
# Handle case where tool_call structure is different
return tool_call
if len(out) > 1:
return out
else:
# Return a default response when no tool calls are present
logger.warning(
"No tool calls found in response, returning default response"
)
return {
"function": {
"name": "no_tool_call",
"arguments": "{}",
}
}
except Exception as e:
logger.error(f"Error processing tool calls: {str(e)}")
# Return a safe default response
return {
out = out[0]
output = {
"function": {
"name": "error_processing_tool_calls",
"arguments": f'{{"error": "{str(e)}"}}',
"name": out.name,
"arguments": out.arguments,
}
}
return output
else:
out = response.choices[0].message.tool_calls
if isinstance(out, BaseModel):
out = out.model_dump()
return out
def output_for_reasoning(self, response: any):
"""

Loading…
Cancel
Save