[fix][autoswarm builder] [new examples and fix liutelln wrapper and tools issue]

pull/1102/head
Kye Gomez 2 weeks ago
parent b69a26c3c0
commit 0161a5e39f

@ -1,20 +0,0 @@
from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
import json
swarm = AutoSwarmBuilder(
name="My Swarm",
description="A swarm of agents",
verbose=True,
max_loops=1,
return_agents=True,
model_name="gpt-4.1",
)
print(
json.dumps(
swarm.run(
task="Create an accounting team to analyze crypto transactions, there must be 5 agents in the team with extremely extensive prompts. Make the prompts extremely detailed and specific and long and comprehensive. Make sure to include all the details of the task in the prompts."
),
indent=4,
)
)

@ -0,0 +1,18 @@
import json
from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
swarm = AutoSwarmBuilder(
name="My Swarm",
description="A swarm of agents",
verbose=True,
max_loops=1,
model_name="claude-sonnet-4-20250514",
execution_type="return-agents",
)
out = swarm.run(
task="Create an accounting team to analyze crypto transactions, there must be 5 agents in the team with extremely extensive prompts. Make the prompts extremely detailed and specific and long and comprehensive. Make sure to include all the details of the task in the prompts."
)
print(json.dumps(out, indent=4))

@ -0,0 +1,20 @@
from swarms import Agent
from swarms_tools import exa_search
# Initialize the agent
agent = Agent(
agent_name="Quantitative-Trading-Agent",
agent_description="Advanced quantitative trading and algorithmic analysis agent",
model_name="gpt-4o-mini",
dynamic_temperature_enabled=True,
max_loops=1,
dynamic_context_window=True,
tools=[exa_search],
streaming_on=False,
)
out = agent.run(
task="What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?",
)
print(out)

@ -0,0 +1,3 @@
from swarms_tools import exa_search
print(exa_search("What are the best multi-agent frameworks "))

@ -0,0 +1,26 @@
from swarms_tools import exa_search
from swarms import HeavySwarm
swarm = HeavySwarm(
name="Gold ETF Research Team",
description="A team of agents that research the best gold ETFs",
worker_model_name="claude-sonnet-4-20250514",
show_dashboard=True,
question_agent_model_name="gpt-4.1",
loops_per_agent=1,
agent_prints_on=False,
worker_tools=[exa_search],
random_loops_per_agent=False,
)
prompt = (
"Find the best 3 gold ETFs. For each ETF, provide the ticker symbol, "
"full name, current price, expense ratio, assets under management, and "
"a brief explanation of why it is considered among the best. Present the information "
"in a clear, structured format suitable for investors. Scrape the data from the web. "
)
out = swarm.run(prompt)
print(out)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "8.3.4" version = "8.3.7"
description = "Swarms - TGSC" description = "Swarms - TGSC"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]

@ -1,17 +1,16 @@
import os import json
import traceback import traceback
from typing import List, Literal, Optional from typing import List, Optional
from dotenv import load_dotenv from dotenv import load_dotenv
from loguru import logger from loguru import logger
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from swarms.prompts.reasoning_prompt import INTERNAL_MONOLGUE_PROMPT
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.structs.ma_utils import set_random_models_for_agents from swarms.structs.ma_utils import set_random_models_for_agents
from swarms.structs.swarm_router import SwarmRouter, SwarmType from swarms.structs.swarm_router import SwarmRouter, SwarmType
from swarms.utils.function_caller_model import OpenAIFunctionCaller from swarms.utils.litellm_wrapper import LiteLLM
load_dotenv() load_dotenv()
@ -128,6 +127,14 @@ When creating a multi-agent system, provide:
""" """
execution_types = [
"return-agents",
"execute-swarm-router",
"return-swarm-router-config",
"return-agent-configurations",
]
class AgentConfig(BaseModel): class AgentConfig(BaseModel):
"""Configuration for an individual agent in a swarm""" """Configuration for an individual agent in a swarm"""
@ -201,34 +208,6 @@ class SwarmRouterConfig(BaseModel):
arbitrary_types_allowed = True arbitrary_types_allowed = True
def reasoning_agent_run(
task: str,
img: Optional[str] = None,
name: str = None,
model_name: str = "gpt-4.1",
system_prompt: str = None,
):
"""
Run a reasoning agent to analyze the task before the main director processes it.
Args:
task (str): The task to reason about
img (Optional[str]): Optional image input
Returns:
str: The reasoning output from the agent
"""
agent = Agent(
agent_name=name,
agent_description=f"You're the {name} agent that is responsible for reasoning about the task and creating a plan for the swarm to accomplish the task.",
model_name=model_name,
system_prompt=INTERNAL_MONOLGUE_PROMPT + system_prompt,
max_loops=1,
)
return agent.run(task=task, img=img)
class AutoSwarmBuilder: class AutoSwarmBuilder:
"""A class that automatically builds and manages swarms of AI agents. """A class that automatically builds and manages swarms of AI agents.
@ -250,17 +229,11 @@ class AutoSwarmBuilder:
description: str = "Auto Swarm Builder", description: str = "Auto Swarm Builder",
verbose: bool = True, verbose: bool = True,
max_loops: int = 1, max_loops: int = 1,
random_models: bool = False,
return_agents: bool = False,
model_name: str = "gpt-4.1", model_name: str = "gpt-4.1",
generate_router_config: bool = False, generate_router_config: bool = False,
interactive: bool = False, interactive: bool = False,
max_tokens: int = 8000, max_tokens: int = 8000,
execution_type: Literal[ execution_type: execution_types = "return-agents",
"return-agents",
"execute-swarm-router",
"return-agent-configurations",
] = "return-agents",
): ):
"""Initialize the AutoSwarmBuilder. """Initialize the AutoSwarmBuilder.
@ -275,8 +248,6 @@ class AutoSwarmBuilder:
self.description = description self.description = description
self.verbose = verbose self.verbose = verbose
self.max_loops = max_loops self.max_loops = max_loops
self.random_models = random_models
self.return_agents = return_agents
self.model_name = model_name self.model_name = model_name
self.generate_router_config = generate_router_config self.generate_router_config = generate_router_config
self.interactive = interactive self.interactive = interactive
@ -302,7 +273,7 @@ class AutoSwarmBuilder:
agents = self.create_agents(task) agents = self.create_agents(task)
if self.random_models: if self.execution_type == "return-agents":
logger.info("Setting random models for agents") logger.info("Setting random models for agents")
agents = set_random_models_for_agents(agents=agents) agents = set_random_models_for_agents(agents=agents)
@ -324,9 +295,9 @@ class AutoSwarmBuilder:
""" """
try: try:
if self.generate_router_config: if self.execution_type == "return-swarm-router-config":
return self.create_router_config(task) return self.create_router_config(task)
elif self.return_agents: elif self.execution_type == "return-agent-configurations":
return self.create_agents(task) return self.create_agents(task)
else: else:
return self._execute_task(task) return self._execute_task(task)
@ -338,41 +309,6 @@ class AutoSwarmBuilder:
) )
raise raise
# def run(
# self, task: str, correct_answer: str = None, *args, **kwargs
# ):
# """
# Executes the swarm on the given task. If correct_answer is provided, the method will retry until this answer is found in the output, up to max_loops times.
# If correct_answer is not provided, the method will execute the task once and return the output.
# Args:
# task (str): The task to execute.
# correct_answer (str, optional): If provided, the method will retry until this answer is found in the output.
# *args: Additional positional arguments.
# **kwargs: Additional keyword arguments.
# Returns:
# Any: The output of the swarm execution, or the output containing the correct answer if specified.
# """
# if correct_answer is None:
# # If no correct_answer is specified, just run once and return the output
# return self._run(task, *args, **kwargs)
# else:
# # If correct_answer is specified, retry up to max_loops times
# for attempt in range(1, self.max_loops + 1):
# output = self._run(task, *args, **kwargs)
# if correct_answer in str(output):
# logger.info(
# f"AutoSwarmBuilder: Correct answer found on attempt {attempt}."
# )
# return output
# else:
# logger.info(
# f"AutoSwarmBuilder: Attempt {attempt} did not yield the correct answer, retrying..."
# )
# # If correct_answer was not found after max_loops, return the last output
# return output
def dict_to_agent(self, output: dict): def dict_to_agent(self, output: dict):
agents = [] agents = []
if isinstance(output, dict): if isinstance(output, dict):
@ -402,7 +338,9 @@ class AutoSwarmBuilder:
f"Create the multi-agent team for the following task: {task}" f"Create the multi-agent team for the following task: {task}"
) )
return output.model_dump() output = json.loads(output)
return output
except Exception as e: except Exception as e:
logger.error( logger.error(
@ -412,12 +350,19 @@ class AutoSwarmBuilder:
raise e raise e
def build_llm_agent(self, config: BaseModel): def build_llm_agent(self, config: BaseModel):
return OpenAIFunctionCaller( # return OpenAIFunctionCaller(
# system_prompt=BOSS_SYSTEM_PROMPT,
# api_key=os.getenv("OPENAI_API_KEY"),
# temperature=0.5,
# base_model=config,
# model_name=self.model_name,
# max_tokens=self.max_tokens,
# )
return LiteLLM(
model_name=self.model_name,
system_prompt=BOSS_SYSTEM_PROMPT, system_prompt=BOSS_SYSTEM_PROMPT,
api_key=os.getenv("OPENAI_API_KEY"),
temperature=0.5, temperature=0.5,
base_model=config, response_format=config,
model_name=self.model_name,
max_tokens=self.max_tokens, max_tokens=self.max_tokens,
) )
@ -440,10 +385,7 @@ class AutoSwarmBuilder:
f"Create the agents for the following task: {task}" f"Create the agents for the following task: {task}"
) )
if self.return_agents: output = json.loads(output)
output = output.model_dump()
else:
output = self.dict_to_agent(output)
return output return output
@ -477,7 +419,7 @@ class AutoSwarmBuilder:
try: try:
agent = Agent( agent = Agent(
agent_name=agent_name, agent_name=agent_name,
description=agent_description, agent_description=agent_description,
system_prompt=agent_system_prompt, system_prompt=agent_system_prompt,
verbose=self.verbose, verbose=self.verbose,
dynamic_temperature_enabled=False, dynamic_temperature_enabled=False,
@ -508,16 +450,14 @@ class AutoSwarmBuilder:
logger.info("Initializing swarm router") logger.info("Initializing swarm router")
model = self.build_llm_agent(config=SwarmRouterConfig) model = self.build_llm_agent(config=SwarmRouterConfig)
logger.info("Creating swarm specification")
swarm_spec = model.run( swarm_spec = model.run(
f"Create the swarm spec for the following task: {task}" f"Create the swarm spec for the following task: {task}"
) )
logger.debug(
f"Received swarm specification: {swarm_spec.model_dump()}"
)
swarm_spec = swarm_spec.model_dump()
logger.info("Initializing SwarmRouter") print(swarm_spec)
print(type(swarm_spec))
swarm_router = SwarmRouter( swarm_router = SwarmRouter(
name=swarm_spec["name"], name=swarm_spec["name"],
description=swarm_spec["description"], description=swarm_spec["description"],
@ -555,3 +495,6 @@ class AutoSwarmBuilder:
""" """
return [self.run(task) for task in tasks] return [self.run(task) for task in tasks]
def list_types(self):
return execution_types

@ -47,7 +47,7 @@ class SequentialWorkflow:
max_loops: int = 1, max_loops: int = 1,
output_type: OutputType = "dict", output_type: OutputType = "dict",
shared_memory_system: callable = None, shared_memory_system: callable = None,
multi_agent_collab_prompt: bool = True, multi_agent_collab_prompt: bool = False,
team_awareness: bool = False, team_awareness: bool = False,
*args, *args,
**kwargs, **kwargs,
@ -110,7 +110,14 @@ class SequentialWorkflow:
if self.multi_agent_collab_prompt is True: if self.multi_agent_collab_prompt is True:
for agent in self.agents: for agent in self.agents:
if hasattr(agent, "system_prompt"):
if agent.system_prompt is None:
agent.system_prompt = ""
agent.system_prompt += MULTI_AGENT_COLLAB_PROMPT agent.system_prompt += MULTI_AGENT_COLLAB_PROMPT
else:
logger.warning(
f"Agent {getattr(agent, 'name', str(agent))} does not have a 'system_prompt' attribute."
)
logger.info( logger.info(
f"Sequential Workflow Name: {self.name} is ready to run." f"Sequential Workflow Name: {self.name} is ready to run."

@ -6,6 +6,7 @@ from pathlib import Path
from typing import List, Optional from typing import List, Optional
import litellm import litellm
from pydantic import BaseModel
import requests import requests
from litellm import completion, supports_vision from litellm import completion, supports_vision
from loguru import logger from loguru import logger
@ -394,74 +395,108 @@ class LiteLLM:
# Store other types of runtime_args for debugging # Store other types of runtime_args for debugging
completion_params["runtime_args"] = runtime_args completion_params["runtime_args"] = runtime_args
# def output_for_tools(self, response: any):
# """
# Process tool calls from the LLM response and return formatted output.
# Args:
# response: The response object from the LLM API call
# Returns:
# dict or list: Formatted tool call data, or default response if no tool calls
# """
# try:
# # Convert response to dict if it's a Pydantic model
# if hasattr(response, "model_dump"):
# response_dict = response.model_dump()
# else:
# response_dict = response
# print(f"Response dict: {response_dict}")
# # Check if tool_calls exists and is not None
# if (
# response_dict.get("choices")
# and response_dict["choices"][0].get("message")
# and response_dict["choices"][0]["message"].get(
# "tool_calls"
# )
# and len(
# response_dict["choices"][0]["message"][
# "tool_calls"
# ]
# )
# > 0
# ):
# tool_call = response_dict["choices"][0]["message"][
# "tool_calls"
# ][0]
# if "function" in tool_call:
# return {
# "function": {
# "name": tool_call["function"].get(
# "name", ""
# ),
# "arguments": tool_call["function"].get(
# "arguments", "{}"
# ),
# }
# }
# else:
# # Handle case where tool_call structure is different
# return tool_call
# else:
# # Return a default response when no tool calls are present
# logger.warning(
# "No tool calls found in response, returning default response"
# )
# return {
# "function": {
# "name": "no_tool_call",
# "arguments": "{}",
# }
# }
# except Exception as e:
# logger.error(f"Error processing tool calls: {str(e)} Traceback: {traceback.format_exc()}")
def output_for_tools(self, response: any): def output_for_tools(self, response: any):
""" """
Process tool calls from the LLM response and return formatted output. Process and extract tool call information from the LLM response.
This function handles the output for tool-based responses, supporting both
MCP (Multi-Call Protocol) and standard tool call formats. It extracts the
relevant function name and arguments from the response, handling both
BaseModel and dictionary outputs.
Args: Args:
response: The response object from the LLM API call response (any): The response object returned by the LLM API call.
Returns: Returns:
dict or list: Formatted tool call data, or default response if no tool calls dict or list: A dictionary containing the function name and arguments
if MCP call is used, or the tool calls output (as a dict or list)
for standard tool call responses.
""" """
try: if self.mcp_call is True:
# Convert response to dict if it's a Pydantic model out = response.choices[0].message.tool_calls[0].function
if hasattr(response, "model_dump"):
response_dict = response.model_dump() if len(out) > 1:
return out
else: else:
response_dict = response out = out[0]
# Check if tool_calls exists and is not None output = {
if (
response_dict.get("choices")
and response_dict["choices"][0].get("message")
and response_dict["choices"][0]["message"].get(
"tool_calls"
)
and len(
response_dict["choices"][0]["message"][
"tool_calls"
]
)
> 0
):
tool_call = response_dict["choices"][0]["message"][
"tool_calls"
][0]
if "function" in tool_call:
return {
"function": { "function": {
"name": tool_call["function"].get( "name": out.name,
"name", "" "arguments": out.arguments,
),
"arguments": tool_call["function"].get(
"arguments", "{}"
),
} }
} }
return output
else: else:
# Handle case where tool_call structure is different out = response.choices[0].message.tool_calls
return tool_call
else: if isinstance(out, BaseModel):
# Return a default response when no tool calls are present out = out.model_dump()
logger.warning( return out
"No tool calls found in response, returning default response"
)
return {
"function": {
"name": "no_tool_call",
"arguments": "{}",
}
}
except Exception as e:
logger.error(f"Error processing tool calls: {str(e)}")
# Return a safe default response
return {
"function": {
"name": "error_processing_tool_calls",
"arguments": f'{{"error": "{str(e)}"}}',
}
}
def output_for_reasoning(self, response: any): def output_for_reasoning(self, response: any):
""" """

Loading…
Cancel
Save