[[CLEANUP][hashicorp_vault], [FEATS][Swarm DFS] [Swarm MonteCarlo] [Swarm Tree] [FIXES][Schema fixes] [Logging][Fixed a bug where logs and errors would be sent into root, now an agent workspace dir] [DEMOS][Marketing Campaign] [HiearchicalSwarm]

Pydantic-validation-2
Kye Gomez 5 months ago
parent a3dec82cd5
commit 03a28cbacc

@ -1,3 +1,9 @@
WORKSPACE_DIR="agent_workspace"
SWARMS_API_KEY=""
USE_TELEMETRY=True
OPENAI_API_KEY="sk-" OPENAI_API_KEY="sk-"
GOOGLE_API_KEY="" GOOGLE_API_KEY=""
ANTHROPIC_API_KEY="" ANTHROPIC_API_KEY=""
@ -5,8 +11,6 @@ AI21_API_KEY="your_api_key_here"
COHERE_API_KEY="your_api_key_here" COHERE_API_KEY="your_api_key_here"
ALEPHALPHA_API_KEY="your_api_key_here" ALEPHALPHA_API_KEY="your_api_key_here"
HUGGINFACEHUB_API_KEY="your_api_key_here" HUGGINFACEHUB_API_KEY="your_api_key_here"
SWARMS_API_KEY=""
EVAL_PORT=8000 EVAL_PORT=8000
MODEL_NAME="gpt-4" MODEL_NAME="gpt-4"
@ -16,8 +20,6 @@ PLAYGROUND_DIR="playground"
LOG_LEVEL="INFO" LOG_LEVEL="INFO"
BOT_NAME="Orca" BOT_NAME="Orca"
HF_API_KEY="your_huggingface_api_key_here" HF_API_KEY="your_huggingface_api_key_here"
USE_TELEMETRY=True
AGENTOPS_API_KEY="" AGENTOPS_API_KEY=""
FIREWORKS_API_KEY="" FIREWORKS_API_KEY=""
OPENAI_API_KEY=your_openai_api_key OPENAI_API_KEY=your_openai_api_key

2
.gitignore vendored

@ -16,10 +16,12 @@ artifacts_five
errors errors
chroma chroma
agent_workspace agent_workspace
.pt
Accounting Assistant_state.json Accounting Assistant_state.json
Unit Testing Agent_state.json Unit Testing Agent_state.json
Devin_state.json Devin_state.json
hire_researchers hire_researchers
agent_workspace
json_logs json_logs
Medical Image Diagnostic Agent_state.json Medical Image Diagnostic Agent_state.json
flight agent_state.json flight agent_state.json

@ -0,0 +1,33 @@
# ==================================
# Use an official Python runtime as a parent image
FROM python:3.11-slim
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# Set the working directory in the container
WORKDIR /usr/src/swarms
# Install Python dependencies
# COPY requirements.txt and pyproject.toml if you're using poetry for dependency management
COPY requirements.txt .
RUN pip install --upgrade pip
RUN pip install --no-cache-dir -r requirements.txt
# Install the 'swarms' package, assuming it's available on PyPI
RUN pip install -U swarms
# Copy the rest of the application
COPY . .
# Expose port if your application has a web interface
# EXPOSE 5000
# # Define environment variable for the swarm to work
# ENV OPENAI_API_KEY=your_swarm_api_key_here
# If you're using `CMD` to execute a Python script, make sure it's executable
# RUN chmod +x example.py

@ -96,41 +96,26 @@ model = OpenAIChat(
# Initialize the agent # Initialize the agent
agent = Agent( agent = Agent(
agent_name="Financial-Analysis-Agent", agent_name="Financial-Analysis-Agent_sas_chicken_eej",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT, system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model, llm=model,
max_loops=2, max_loops=1,
autosave=True, autosave=True,
# dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",
# tools=[#Add your functions here# ],
# stopping_token="Stop!",
# interactive=True,
# docs_folder="docs", # Enter your folder name
# pdf_path="docs/finance_agent.pdf",
# sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
user_name="swarms_corp", user_name="swarms_corp",
# # docs= retry_attempts=1,
# # docs_folder="docs",
retry_attempts=3,
# context_length=1000,
# tool_schema = dict
context_length=200000, context_length=200000,
# tool_schema= return_step_meta=True,
# tools
# agent_ops_on=True,
) )
agent.run( out = agent.run(
"What are the components of a startups stock incentive equity plan" "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
) )
print(out)
``` ```
@ -313,6 +298,69 @@ print(out)
------- -------
### Misc Agent Settings
We provide vast array of features to save agent states using json, yaml, toml, upload pdfs, batched jobs, and much more!
```python
# # Convert the agent object to a dictionary
print(agent.to_dict())
print(agent.to_toml())
print(agent.model_dump_json())
print(agent.model_dump_yaml())
# Ingest documents into the agent's knowledge base
agent.ingest_docs("your_pdf_path.pdf")
# Receive a message from a user and process it
agent.receive_message(name="agent_name", message="message")
# Send a message from the agent to a user
agent.send_agent_message(agent_name="agent_name", message="message")
# Ingest multiple documents into the agent's knowledge base
agent.ingest_docs("your_pdf_path.pdf", "your_csv_path.csv")
# Run the agent with a filtered system prompt
agent.filtered_run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?"
)
# Run the agent with multiple system prompts
agent.bulk_run(
[
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?",
"Another system prompt",
]
)
# Add a memory to the agent
agent.add_memory("Add a memory to the agent")
# Check the number of available tokens for the agent
agent.check_available_tokens()
# Perform token checks for the agent
agent.tokens_checks()
# Print the dashboard of the agent
agent.print_dashboard()
# Print the history and memory of the agent
agent.print_history_and_memory()
# Fetch all the documents from the doc folders
agent.get_docs_from_doc_folders()
# Activate agent ops
agent.activate_agentops()
agent.check_end_session_agentops()
# Dump the model to a JSON file
agent.model_dump_json()
print(agent.to_toml())
```
### Devin ### Devin
Implementation of Devin in less than 90 lines of code with several tools: Implementation of Devin in less than 90 lines of code with several tools:

@ -0,0 +1,97 @@
import os
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent-General-11",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
# interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
# tools=[#Add your functions here# ],
# stopping_token="Stop!",
# docs_folder="docs", # Enter your folder name
# pdf_path="docs/finance_agent.pdf",
# sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
user_name="swarms_corp",
# # docs="",
retry_attempts=3,
# context_length=1000,
# tool_schema = dict
context_length=200000,
tool_system_prompt=None,
)
# # Convert the agent object to a dictionary
print(agent.to_dict())
print(agent.to_toml())
print(agent.model_dump_json())
print(agent.model_dump_yaml())
# Ingest documents into the agent's knowledge base
agent.ingest_docs("your_pdf_path.pdf")
# Receive a message from a user and process it
agent.receive_message(name="agent_name", message="message")
# Send a message from the agent to a user
agent.send_agent_message(agent_name="agent_name", message="message")
# Ingest multiple documents into the agent's knowledge base
agent.ingest_docs("your_pdf_path.pdf", "your_csv_path.csv")
# Run the agent with a filtered system prompt
agent.filtered_run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?"
)
# Run the agent with multiple system prompts
agent.bulk_run(
[
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?",
"Another system prompt",
]
)
# Add a memory to the agent
agent.add_memory("Add a memory to the agent")
# Check the number of available tokens for the agent
agent.check_available_tokens()
# Perform token checks for the agent
agent.tokens_checks()
# Print the dashboard of the agent
agent.print_dashboard()
# Print the history and memory of the agent
agent.print_history_and_memory()
# Fetch all the documents from the doc folders
agent.get_docs_from_doc_folders()
# Activate agent ops
agent.activate_agentops()
agent.check_end_session_agentops()
# Dump the model to a JSON file
agent.model_dump_json()
print(agent.to_toml())

@ -14,35 +14,23 @@ model = OpenAIChat(
# Initialize the agent # Initialize the agent
agent = Agent( agent = Agent(
agent_name="Financial-Analysis-Agent", agent_name="Financial-Analysis-Agent_sas_chicken_eej",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT, system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model, llm=model,
max_loops=1, max_loops=1,
autosave=True, autosave=True,
# dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
# interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True, dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json", saved_state_path="finance_agent.json",
# tools=[#Add your functions here# ],
# stopping_token="Stop!",
# interactive=True,
# docs_folder="docs", # Enter your folder name
# pdf_path="docs/finance_agent.pdf",
# sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
user_name="swarms_corp", user_name="swarms_corp",
# # docs= retry_attempts=1,
# # docs_folder="docs",
retry_attempts=3,
# context_length=1000,
# tool_schema = dict
context_length=200000, context_length=200000,
# tool_schema= return_step_meta=True,
) )
agent.run( out = agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria" "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
) )
print(out)

@ -1,8 +1,8 @@
import json import json
import os
from swarms.models.openai_function_caller import OpenAIFunctionCaller from swarms.models.openai_function_caller import OpenAIFunctionCaller
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from typing import List from typing import List
from swarms import Agent
class AgentSpec(BaseModel): class AgentSpec(BaseModel):
@ -30,9 +30,9 @@ class AgentSpec(BaseModel):
..., ...,
description="The context window for the agent", description="The context window for the agent",
) )
model_name: str = Field( task: str = Field(
..., ...,
description="The model name for the agent from huggingface", description="The main task for the agent",
) )
@ -43,6 +43,24 @@ class SwarmSpec(BaseModel):
) )
def create_agent(
agent_name: str,
system_prompt: str,
agent_description: str,
max_tokens: int,
temperature: float,
context_window: int,
):
return Agent(
agent_name=agent_name,
system_prompt=system_prompt,
agent_description=agent_description,
max_tokens=max_tokens,
temperature=temperature,
context_window=context_window,
)
# Example usage: # Example usage:
# Initialize the function caller # Initialize the function caller
model = OpenAIFunctionCaller( model = OpenAIFunctionCaller(
@ -54,20 +72,24 @@ model = OpenAIFunctionCaller(
) )
def parse_json_for_agents_then_create_agents(function_call: dict) -> str:
agents = []
for agent in json["multiple_agents"]:
agents.append(
create_agent(
agent["agent_name"],
agent["system_prompt"],
agent["agent_description"],
agent["max_tokens"],
agent["temperature"],
agent["context_window"],
# agent["model_name"]
)
)
return agents
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls. # The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
out = model.run( out = model.run(
"Create a swarm of agents to generate social media posts. Each agent should have it's own social media" "Create a swarm of agents to generate social media posts. Each agent should have it's own social media"
) )
# Define the folder and file name
folder_name = "agent_workspace"
file_name = "agent_output.json"
# Check if the folder exists, if not, create it
if not os.path.exists(folder_name):
os.makedirs(folder_name)
# Write the output to a JSON file
with open(os.path.join(folder_name, file_name), "w") as f:
json.dump(out, f)

@ -0,0 +1,95 @@
from swarms.models.openai_function_caller import OpenAIFunctionCaller
from pydantic import BaseModel, Field
from typing import List
import json
AI_PAPER_IDEA_GENERATOR = """
You are Phil Wang, a computer scientist and artificial intelligence researcher widely regarded as one of the leading experts in deep learning and neural network architecture search. Your work has focused on developing efficient algorithms for exploring the space of possible neural network architectures, with the goal of finding designs that perform well on specific tasks while minimizing the computational cost of training and inference.
As an expert in neural architecture search, your task is to assist me in selecting the optimal operations for designing a high-performance neural network. The primary objective is to maximize the model's performance.
Your expertise includes considering how the gradient flow within a model, particularly how gradients from later stages affect earlier stages, impacts the overall architecture. Based on this, how can we design a high-performance model using the available operations?
Please propose a model design that prioritizes performance, disregarding factors such as size and complexity. After you suggest a design, I will test its performance and provide feedback. Based on the results of these experiments, we can collaborate to iterate and improve the design. Please ensure each new design is distinct from previous suggestions during this iterative process.
You're a research scientist working on a new paper. You need to generate a novel idea for a research paper.
The paper should be in the field of multi-modal learning and should propose a new method or algorithm.
The paper should be innovative, novel, and feasible.
Generate a paper idea that meets these criteria.
You need to provide the following details:
- The paper idea
- A brief description of the paper idea
- A proposed experiment to test the paper idea
- Ratings for interestingness, novelty, and feasibility of the paper idea
- The ratings should be on a scale of 0.1 to 1.0, with 1.0 being the most innovative, novel, or feasible
"""
class PaperIdeaSchema(BaseModel):
paper_idea: str = Field(
...,
description="The generated paper idea.",
)
description: str = Field(
...,
description="A brief description of the paper idea.",
)
experiment: str = Field(
...,
description="A proposed experiment to test the paper idea.",
)
interestingness: int = Field(
...,
description="A rating of how interesting the paper idea is on a scale of 0.1 to 1.0 being the most innovative paper idea.",
)
novelty: int = Field(
...,
description="A rating of how novel the paper idea is on a scale of 0.1 to 1.0 being the most novel paper idea.",
)
feasibility: int = Field(
...,
description="A rating of how feasible the paper idea is on a scale of 0.1 to 1.0 being the most feasible paper idea.",
)
class MultiplePaperIdeas(BaseModel):
paper_ideas: List[PaperIdeaSchema] = Field(
...,
description="A list of generated paper ideas.",
)
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
# for making API calls to retrieve weather information. It has two attributes: city and date.
# Example usage:
# Initialize the function caller
model = OpenAIFunctionCaller(
system_prompt=AI_PAPER_IDEA_GENERATOR,
max_tokens=4000,
temperature=0.7,
base_model=MultiplePaperIdeas,
parallel_tool_calls=False,
)
# Call the function with the input
output = model.run(
"Generate paper ideas for multi-agent learning and collective intelligence involving many transformer models as an ensemble of transformers "
)
print(type(output))
# print(output)
output = json.dumps(output, indent=2)
print(output)

@ -8,11 +8,21 @@ import threading
code_executor = CodeExecutor() code_executor = CodeExecutor()
AI_EXPERT_SYSTEM_PROMPT = """
You are Phil Wang, a computer scientist and artificial intelligence researcher widely regarded as one of the leading experts in deep learning and neural network architecture search. Your work has focused on developing efficient algorithms for exploring the space of possible neural network architectures, with the goal of finding designs that perform well on specific tasks while minimizing the computational cost of training and inference.
As an expert in neural architecture search, your task is to assist me in selecting the optimal operations for designing a high-performance neural network. The primary objective is to maximize the model's performance.
Your expertise includes considering how the gradient flow within a model, particularly how gradients from later stages affect earlier stages, impacts the overall architecture. Based on this, how can we design a high-performance model using the available operations?
Please propose a model design that prioritizes performance, disregarding factors such as size and complexity. After you suggest a design, I will test its performance and provide feedback. Based on the results of these experiments, we can collaborate to iterate and improve the design. Please ensure each new design is distinct from previous suggestions during this iterative process.
"""
class ModelSpec(BaseModel): class ModelSpec(BaseModel):
novel_algorithm_name: str = Field( novel_algorithm_name: str = Field(
..., ...,
description="The name of the novel AI algorithm", description="The name of the novel AI algorithm. lower case, no spaces, use _",
) )
mathamatical_formulation: str = Field( mathamatical_formulation: str = Field(
..., ...,
@ -20,16 +30,20 @@ class ModelSpec(BaseModel):
) )
model_code: str = Field( model_code: str = Field(
..., ...,
description="The code for the all-new model architecture in PyTorch, Add docs, and write clean code", description="The code for the all-new model architecture in PyTorch, Add Types, and write clean code",
)
example_code: str = Field(
...,
description="Example code for the all-new model architecture in PyTorch, Add Types, and write clean code",
) )
# Example usage: # Example usage:
# Initialize the function caller # Initialize the function caller
model = OpenAIFunctionCaller( model = OpenAIFunctionCaller(
system_prompt="You're an expert model engineer like Lucidrains, you write world-class PHD level code for deep learning models. You're purpose is to create a novel deep learning model for a research paper. You need to provide the name of the model, the mathamatical formulation, and the code for the model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade pytorch code, add types, and documentation. Make sure you track tensorshapes to not forget and write great pytorch code. Be creative and create models that have never been contemplated before", system_prompt=AI_EXPERT_SYSTEM_PROMPT,
max_tokens=5000, max_tokens=4000,
temperature=0.6, temperature=0.4,
base_model=ModelSpec, base_model=ModelSpec,
parallel_tool_calls=False, parallel_tool_calls=False,
) )
@ -45,87 +59,61 @@ def clean_model_code(model_code_str: str):
return cleaned_code return cleaned_code
# for i in range(50): def parse_function_call_output(out: str):
# # The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
# out = model.run(
# "Create an entirely new neural network operation aside from convolutions and the norm, write clean code and explain step by step"
# )
# name = out["novel_algorithm_name"]
# logger.info(f"Generated code for novel model {i}:")
# # Parse the 3 rows of the output || 0: novel_algorithm_name, 1: mathamatical_formulation, 2: model_code if out is None:
# out = out["model_code"] return None, None, None, None
# out = clean_model_code(out)
# logger.info(f"Cleansed code for novel model {i}:")
# # Save the generated code to a file # Parse the output
# create_file_in_folder("new_models", f"{name}.py", out) name = out["novel_algorithm_name"]
# logger.info(f"Saved code for novel model {i} to file:") theory = out["mathamatical_formulation"]
code = out["model_code"]
# # # Execute the generated code example_code = out["example_code"]
# # logger.info(f"Executing code for novel model {i}:")
# # test = code_executor.execute(out)
# # logger.info(f"Executed code for novel model {i}: {test}")
# def execute_code_and_retry(code: str) -> str:
# run = code_executor.execute(code)
# if "error" in run: return name, theory, code, example_code
# logger.error(f"Error in code execution: {run}")
def generate_and_execute_model(i): def generate_and_execute_model(
i,
# task: str = "Create an all-new model compression format to compress neural networks to make them easier to share and store, aim for 100x compression. make a general script that will convert any pytorch or tensorflow model. Be creative, create a fully novel algorithm. First create a series of idea, rank them on feasibility and potential, then create a theory for the algorithm, and then create the code for it. The algorithm needs to compress the massive .pt files. The input should be a .pt file of the model, and the output should be a compressed .pt file. Don't use any placeholders, you can do it! Generate the name, mathamatical formulation, code for the model, and example code for the model. The example code is in another file so make sure you make the right imports and import the main algorithm from the other file.",
task="Generate an all-new model architecture for a neural network that achieves state-of-the-art performance on the CIFAR-10 dataset. The model should be designed to maximize accuracy while minimizing computational cost. Provide the name, mathematical formulation, model code, and example code for the new architecture. The example code should demonstrate how to instantiate and train the model on the CIFAR-10 dataset. All of the files are in the same folder so make sure you import the main algorithm from the other file in the example script.",
):
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls. # The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
out = model.run( out = model.run(task)
"Create an entirely new model architecture by blending backbones like attention, lstms, rnns, and ssms all into one novel architecture. Provide alternative model architectures to transformers, ssms, convnets, lstms, and more. Be creative and don't work on architectures that have been done before. The goal is to create new-ultra high performance nets" name, theory, code, example_code = parse_function_call_output(out)
) logger.info(f"Algorithm {name}: Mathamatical formulation {theory}")
name = out["novel_algorithm_name"]
theory = out["mathamatical_formulation"]
code = out["model_code"]
logger.info(f"Generated code for novel model {name}:")
# Parse the 3 rows of the output || 0: novel_algorithm_name, 1: mathamatical_formulation, 2: model_code # Parse the 3 rows of the output || 0: novel_algorithm_name, 1: mathamatical_formulation, 2: model_code
code = clean_model_code(code) code = clean_model_code(code)
example_code = clean_model_code(example_code)
logger.info(f"Cleansed code for novel model {i}:") logger.info(f"Cleansed code for novel model {i}:")
# Save the generated code to a file # Save the generated code to a file
create_file_in_folder("new_models", f"{name}.py", code) create_file_in_folder(f"new_models/{name}", f"{name}.py", code)
create_file_in_folder(
f"new_models/{name}", f"{name}_example.py", example_code
)
logger.info(f"Saved code for novel model {i} to file:") logger.info(f"Saved code for novel model {i} to file:")
# Execute the generated code # # Execute the generated code
test = code_executor.execute(code) test = code_executor.execute(code)
# Run the training runs
test_example = code_executor.execute(example_code)
if "error" in test: if "error" in test:
logger.error(f"Error in code execution: {test}") logger.error(f"Error in code execution: {test}")
if "error" in test_example:
logger.error(f"Error in code execution example: {test_example}")
# Retry executing the code else:
model.run( logger.info(f"Successfully executed code for novel model {name}")
f"Recreate the code for the model: {name}, there was an error in the code you generated earlier execution: {code}. The theory was: {theory}"
)
name = out["novel_algorithm_name"]
theory = out["mathamatical_formulation"]
code = out["model_code"]
# Clean the code
code = clean_model_code(code)
# Execute the code
test = code_executor.execute(code)
if "error" not in test:
logger.info(
f"Successfully executed code for novel model {name}"
)
create_file_in_folder("new_models", f"{name}.py", code)
else:
logger.error(f"Error in code execution: {test}")
# Create and start a new thread for each model # Create and start a new thread for each model
threads = [] threads = []
for i in range(35): for i in range(10):
thread = threading.Thread(target=generate_and_execute_model, args=(i,)) thread = threading.Thread(target=generate_and_execute_model, args=(i,))
thread.start() thread.start()
threads.append(thread) threads.append(thread)

@ -0,0 +1,116 @@
import os
from datetime import datetime
from typing import Any, Dict, List
from plaid import Client
from plaid.api import plaid_api
from plaid.model.error import PlaidError
from plaid.model.transactions_get_request import TransactionsGetRequest
from plaid.model.transactions_get_response import TransactionsGetResponse
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
def fetch_transactions(
start_date: str, end_date: str
) -> List[Dict[str, Any]]:
"""
Fetches a list of transactions from Plaid for a given time period.
Args:
access_token (str): The access token associated with the Plaid item.
start_date (str): The start date for the transaction query in 'YYYY-MM-DD' format.
end_date (str): The end date for the transaction query in 'YYYY-MM-DD' format.
Returns:
List[Dict[str, Any]]: A list of transactions as dictionaries.
Raises:
PlaidError: If there is an error with the request to the Plaid API.
ValueError: If the date format is incorrect.
"""
try:
access_token = os.getenv("PLAID_ACCESS_TOKEN")
# Validate date format
datetime.strptime(start_date, "%Y-%m-%d")
datetime.strptime(end_date, "%Y-%m-%d")
# Initialize the Plaid client with your credentials
plaid_client = plaid_api.PlaidApi(
Client(
client_id=os.getenv("PLAID_CLIENT_ID"),
secret=os.getenv("PLAID_SECRET"),
environment=os.getenv("PLAID_ENV", "sandbox"),
)
)
# Create a request object for transactions
request = TransactionsGetRequest(
access_token=access_token,
start_date=start_date,
end_date=end_date,
)
# Fetch transactions from the Plaid API
response: TransactionsGetResponse = plaid_client.transactions_get(
request
)
# Return the transactions list
return response.transactions
except PlaidError as e:
print(f"Plaid API Error: {e}")
raise
except ValueError as e:
print(f"Date Format Error: {e}")
raise
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
autosave=True,
# dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
# interactive=True, # Set to False to disable interactive mode
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
# tools=[#Add your functions here# ],
# stopping_token="Stop!",
# interactive=True,
# docs_folder="docs", # Enter your folder name
# pdf_path="docs/finance_agent.pdf",
# sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
user_name="swarms_corp",
# # docs=
# # docs_folder="docs",
retry_attempts=1,
# context_length=1000,
# tool_schema = dict
context_length=200000,
return_step_meta=True,
tools=[fetch_transactions],
)
out = agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
)
print(out)

@ -0,0 +1,138 @@
import os
from swarms.models.openai_function_caller import OpenAIFunctionCaller
from pydantic import BaseModel, Field
from typing import List
system_prompt = """
**System Prompt for Media Buyer Agent:**
---
### Role:
You are a Media Buyer Agent specializing in creating highly effective ad campaigns. Your primary responsibility is to design and execute advertising campaigns with laser-precise targeting, ensuring maximum engagement and conversion. You will leverage deep audience insights to create tailored campaigns that resonate with specific demographics, interests, and behaviors.
### Core Objectives:
1. **Understand the Audience:**
- For every campaign, you must first understand the audience thoroughly. Use the provided `AdAudience` schema to gather and analyze details about the audience.
- Focus on audience segmentation by identifying unique characteristics, interests, operating systems, and behaviors. These insights will guide your targeting strategies.
- Utilize keywords, operating systems, and interests to create a detailed audience profile.
2. **Principles of Media Buying:**
- Media buying is the strategic process of purchasing ad space to target the right audience at the right time. You must ensure that the media channels selected are the most effective for reaching the intended audience.
- Budget allocation should be optimized for cost-effectiveness, ensuring that the highest ROI is achieved. Consider CPM (Cost Per Mille), CPC (Cost Per Click), and CPA (Cost Per Acquisition) metrics when planning your campaigns.
- Timing is crucial. Plan your campaigns according to the audience's most active time periods and align them with relevant events or trends.
3. **Campaign Creation:**
- Use the `campaign_generator` tool specified in the `AdAudience` schema to create campaigns. The tool should be utilized based on its compatibility with the audience's profile.
- Each campaign should have a clear objective (e.g., brand awareness, lead generation, product sales) and be structured to meet that objective with measurable outcomes.
- Design creatives (e.g., banners, videos, copy) that align with the audience's interests and capture their attention immediately.
4. **Targeting and Optimization:**
- Apply advanced targeting techniques such as geo-targeting, device targeting, and interest-based targeting. Ensure that the ad is shown to users most likely to engage with it.
- Continuously monitor and optimize campaigns based on performance metrics. Adjust targeting, budget allocation, and creative elements to enhance effectiveness.
- A/B testing should be employed to determine which versions of the ad creatives perform best.
### Execution:
When you receive a request to create a campaign, follow these steps:
1. **Audience Analysis:**
- Retrieve and analyze the `AdAudience` data. Understand the audiences characteristics, interests, and behaviors.
- Identify the best media channels and tools for this audience.
2. **Campaign Strategy:**
- Develop a comprehensive campaign strategy based on the audience analysis.
- Define clear objectives and key performance indicators (KPIs) for the campaign.
3. **Creative Development:**
- Use the specified `campaign_generator` to produce ad creatives tailored to the audience.
- Ensure the messaging is aligned with the audience's interests and designed for maximum engagement.
4. **Launch and Optimize:**
- Launch the campaign across the selected media channels.
- Monitor performance and make data-driven optimizations to improve outcomes.
### Output:
Your output should be a fully developed ad campaign, including detailed targeting parameters, creative assets, and a strategic plan for execution. Provide periodic performance reports and suggest further optimizations. Provide extensive keywords for the audience, and ensure that the campaign is aligned with the audience's interests and behaviors.
---
### Principles to Remember:
- Precision targeting leads to higher engagement and conversions.
- Understanding your audience is the cornerstone of effective media buying.
- Constant optimization is key to maintaining and improving campaign performance.
"""
class AdAudience(BaseModel):
audience_name: str = Field(
...,
description="The name of the audience",
)
audience_description: str = Field(
...,
description="The description of the audience",
)
keywords: List[str] = Field(
...,
description="The keywords associated with the audience: Agents, AI, Machine Learning, etc.",
)
operating_systems: List[str] = Field(
...,
description="The operating systems the audience is interested in: Windows, MacOS, Linux, etc.",
)
interests: List[str] = Field(
...,
description="The interests of the audience: Technology, Science, Business, etc.",
)
date_range: str = Field(
...,
description="The date range for the audience: 2022-2023",
)
campaign_generator: str = Field(
...,
description="The campaign generator tool to use for the audience",
)
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
# for making API calls to retrieve weather information. It has two attributes: city and date.
# Example usage:
# Initialize the function caller
model = OpenAIFunctionCaller(
openai_api_key=os.getenv("OPENAI_API_KEY"),
system_prompt=system_prompt,
max_tokens=4000,
temperature=0.3,
base_model=AdAudience,
parallel_tool_calls=False,
)
# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
out = model.run(
"""
Announcing, The Agent Marketplace 🤖🤖🤖
Your one-stop hub to discover and share agents, prompts, and tools.
Find the latest agents and tools
Share your own creations
Works with any framework: Langchain, Autogen, and more
Sign up now:
https://swarms.world/
"""
)
print(out)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "5.5.1" version = "5.6.0"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]
@ -73,6 +73,8 @@ termcolor = "*"
tiktoken = "*" tiktoken = "*"
networkx = "*" networkx = "*"
swarms-memory = "*" swarms-memory = "*"
black = "*"
swarms-cloud = "*"
# [tool.poetry.scripts] # [tool.poetry.scripts]

@ -56,6 +56,9 @@ class AbstractAgent:
def _astep(self, message: str): def _astep(self, message: str):
"""Asynchronous step""" """Asynchronous step"""
def plan(self, plan: str):
"""Plan the agent"""
def send( def send(
self, self,
message: Union[Dict, str], message: Union[Dict, str],

@ -1,79 +0,0 @@
import os
from swarms.models.base_llm import BaseLLM
def check_multion_api_key():
"""
Checks if the MultiOn API key is available in the environment variables.
Returns:
str: The MultiOn API key.
"""
api_key = os.getenv("MULTION_API_KEY")
return api_key
class MultiOnAgent(BaseLLM):
"""
Represents an agent that interacts with the MultiOn API to run tasks on a remote session.
Args:
api_key (str): The API key for accessing the MultiOn API.
url (str): The URL of the remote session.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Attributes:
client (MultiOn): The MultiOn client instance.
url (str): The URL of the remote session.
session_id (str): The ID of the current session.
Methods:
run: Runs a task on the remote session.
"""
def __init__(
self,
name: str = None,
system_prompt: str = None,
api_key: str = check_multion_api_key,
url: str = "https://huggingface.co/papers",
max_steps: int = 1,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.name = name
try:
from multion.client import MultiOn
except ImportError:
raise ImportError(
"The MultiOn package is not installed. Please install it using 'pip install multion'."
)
self.client = MultiOn(api_key=api_key)
self.url = url
self.system_prompt = system_prompt
self.max_steps = max_steps
def run(self, task: str, *args, **kwargs):
"""
Runs a task on the remote session.
Args:
task (str): The task to be executed on the remote session.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
"""
response = self.client.browse(
cmd=task,
url=self.url,
local=True,
max_steps=self.max_steps,
)
# response = response.json()
# print(response.message)
return str(response.message)

@ -31,7 +31,7 @@ from swarms.models.popular_llms import ReplicateChat as Replicate
from swarms.models.qwen import QwenVLMultiModal # noqa: E402 from swarms.models.qwen import QwenVLMultiModal # noqa: E402
from swarms.models.sampling_params import SamplingParams, SamplingType from swarms.models.sampling_params import SamplingParams, SamplingType
from swarms.models.together import TogetherLLM # noqa: E402 from swarms.models.together import TogetherLLM # noqa: E402
from swarms.models.types import ( # noqa: E402 from swarms.models.model_types import ( # noqa: E402
AudioModality, AudioModality,
ImageModality, ImageModality,
MultimodalData, MultimodalData,

@ -2,25 +2,12 @@ import asyncio
import logging import logging
import os import os
import time import time
from abc import ABC, abstractmethod from abc import abstractmethod
from typing import List, Optional from typing import List, Optional
from swarms.structs.base_structure import BaseStructure
from swarms.utils.llm_metrics_decorator import metrics_decorator
class BaseLLM(BaseStructure):
def count_tokens(text: str) -> int:
"""Count tokens
Args:
text (str): _description_
Returns:
int: _description_
"""
return len(text.split())
class BaseLLM(ABC):
"""Abstract Language Model that defines the interface for all language models """Abstract Language Model that defines the interface for all language models
Args: Args:
@ -55,6 +42,7 @@ class BaseLLM(ABC):
def __init__( def __init__(
self, self,
model_id: Optional[str] = None,
model_name: Optional[str] = None, model_name: Optional[str] = None,
max_tokens: Optional[int] = None, max_tokens: Optional[int] = None,
max_length: Optional[int] = None, max_length: Optional[int] = None,
@ -78,9 +66,13 @@ class BaseLLM(ABC):
eos_token_id: Optional[int] = None, eos_token_id: Optional[int] = None,
bos_token_id: Optional[int] = None, bos_token_id: Optional[int] = None,
device: Optional[str] = None, device: Optional[str] = None,
freq_penalty: Optional[float] = None,
stop_token_id: Optional[int] = None,
*args, *args,
**kwargs, **kwargs,
): ):
super().__init__(*args, **kwargs)
self.model_id = model_id
self.model_name = model_name self.model_name = model_name
self.max_tokens = max_tokens self.max_tokens = max_tokens
self.temperature = temperature self.temperature = temperature
@ -104,23 +96,16 @@ class BaseLLM(ABC):
self.eos_token_id = eos_token_id self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id self.bos_token_id = bos_token_id
self.device = device self.device = device
self.frequency_penalty = freq_penalty
self.stop_token_id = stop_token_id
# Attributes # Attributes
self.history = "" self.history = ""
self.start_time = None self.start_time = None
self.end_time = None self.end_time = None
self.history = [] self.history = []
self.memory = {
"input": [],
"output": [],
"task": [],
"time": [],
"role": [],
"model": [],
}
@abstractmethod @abstractmethod
@metrics_decorator
def run(self, task: Optional[str] = None, *args, **kwargs) -> str: def run(self, task: Optional[str] = None, *args, **kwargs) -> str:
"""generate text using language model""" """generate text using language model"""
@ -172,9 +157,10 @@ class BaseLLM(ABC):
return float("inf") return float("inf")
return self._num_tokens() / elapsed_time return self._num_tokens() / elapsed_time
def _num_tokens(self, text: str) -> int: # def _num_tokens(self, text: str) -> int:
"""Number of tokens""" # """Number of tokens"""
return count_tokens(text) # tokenizer = self.tokenizer
# return count_tokens(text)
def _time_for_generation(self, task: str) -> float: def _time_for_generation(self, task: str) -> float:
"""Time for Generation""" """Time for Generation"""

@ -6,13 +6,13 @@ from abc import abstractmethod
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from io import BytesIO from io import BytesIO
from typing import List, Optional, Tuple from typing import List, Optional, Tuple
from swarms.structs.base_structure import BaseStructure
import requests import requests
from PIL import Image from PIL import Image
from termcolor import colored from termcolor import colored
class BaseMultiModalModel: class BaseMultiModalModel(BaseStructure):
""" """
Base class for multimodal models Base class for multimodal models
@ -74,6 +74,7 @@ class BaseMultiModalModel:
*args, *args,
**kwargs, **kwargs,
): ):
super().__init__(*args, **kwargs)
self.model_name = model_name self.model_name = model_name
self.temperature = temperature self.temperature = temperature
self.max_tokens = max_tokens self.max_tokens = max_tokens

@ -0,0 +1,353 @@
from typing import List, Union
from swarms.models.base_embedding_model import BaseEmbeddingModel
from swarms.models.base_llm import BaseLLM
from swarms.models.base_multimodal_model import BaseMultiModalModel
from swarms.models.fuyu import Fuyu # noqa: E402
from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402
from swarms.models.huggingface import HuggingfaceLLM # noqa: E402
from swarms.models.idefics import Idefics # noqa: E402
from swarms.models.kosmos_two import Kosmos # noqa: E402
from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
from swarms.models.llama3_hosted import llama3Hosted
from swarms.models.llava import LavaMultiModal # noqa: E402
from swarms.models.nougat import Nougat # noqa: E402
from swarms.models.openai_embeddings import OpenAIEmbeddings
from swarms.models.openai_function_caller import OpenAIFunctionCaller
from swarms.models.openai_tts import OpenAITTS # noqa: E402
from swarms.models.palm import GooglePalm as Palm # noqa: E402
from swarms.models.popular_llms import Anthropic as Anthropic
from swarms.models.popular_llms import (
AzureOpenAILLM as AzureOpenAI,
)
from swarms.models.popular_llms import (
CohereChat as Cohere,
)
from swarms.models.popular_llms import FireWorksAI, OctoAIChat
from swarms.models.popular_llms import (
OpenAIChatLLM as OpenAIChat,
)
from swarms.models.popular_llms import (
OpenAILLM as OpenAI,
)
from swarms.models.popular_llms import ReplicateChat as Replicate
from swarms.models.qwen import QwenVLMultiModal # noqa: E402
from swarms.models.sampling_params import SamplingParams
from swarms.models.together import TogetherLLM # noqa: E402
from swarms.models.vilt import Vilt # noqa: E402
from swarms.structs.base_structure import BaseStructure
from swarms.utils.loguru_logger import logger
# New type BaseLLM and BaseEmbeddingModel and BaseMultimodalModel
omni_model_type = Union[
BaseLLM, BaseEmbeddingModel, BaseMultiModalModel, callable
]
list_of_omni_model_type = List[omni_model_type]
models = [
BaseLLM,
BaseEmbeddingModel,
BaseMultiModalModel,
Fuyu,
GPT4VisionAPI,
HuggingfaceLLM,
Idefics,
Kosmos,
LayoutLMDocumentQA,
llama3Hosted,
LavaMultiModal,
Nougat,
OpenAIEmbeddings,
OpenAITTS,
Palm,
Anthropic,
AzureOpenAI,
Cohere,
OctoAIChat,
OpenAIChat,
OpenAI,
Replicate,
QwenVLMultiModal,
SamplingParams,
TogetherLLM,
Vilt,
FireWorksAI,
OpenAIFunctionCaller,
]
class ModelRouter(BaseStructure):
"""
A router for managing multiple models.
Attributes:
model_router_id (str): The ID of the model router.
model_router_description (str): The description of the model router.
model_pool (List[omni_model_type]): The list of models in the model pool.
Methods:
check_for_models(): Checks if there are any models in the model pool.
add_model(model: omni_model_type): Adds a model to the model pool.
add_models(models: List[omni_model_type]): Adds multiple models to the model pool.
get_model_by_name(model_name: str) -> omni_model_type: Retrieves a model from the model pool by its name.
get_multiple_models_by_name(model_names: List[str]) -> List[omni_model_type]: Retrieves multiple models from the model pool by their names.
get_model_pool() -> List[omni_model_type]: Retrieves the entire model pool.
get_model_by_index(index: int) -> omni_model_type: Retrieves a model from the model pool by its index.
get_model_by_id(model_id: str) -> omni_model_type: Retrieves a model from the model pool by its ID.
dict() -> dict: Returns a dictionary representation of the model router.
"""
def __init__(
self,
model_router_id: str = "model_router",
model_router_description: str = "A router for managing multiple models.",
model_pool: List[omni_model_type] = models,
verbose: bool = False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.model_router_id = model_router_id
self.model_router_description = model_router_description
self.model_pool = model_pool
self.verbose = verbose
self.check_for_models()
# self.refactor_model_class_if_invoke()
def check_for_models(self):
"""
Checks if there are any models in the model pool.
Returns:
None
Raises:
ValueError: If no models are found in the model pool.
"""
if len(self.model_pool) == 0:
raise ValueError("No models found in model pool.")
def add_model(self, model: omni_model_type):
"""
Adds a model to the model pool.
Args:
model (omni_model_type): The model to be added.
Returns:
str: A success message indicating that the model has been added to the model pool.
"""
logger.info(f"Adding model {model.name} to model pool.")
self.model_pool.append(model)
return "Model successfully added to model pool."
def add_models(self, models: List[omni_model_type]):
"""
Adds multiple models to the model pool.
Args:
models (List[omni_model_type]): The models to be added.
Returns:
str: A success message indicating that the models have been added to the model pool.
"""
logger.info("Adding models to model pool.")
self.model_pool.extend(models)
return "Models successfully added to model pool."
# def query_model_from_langchain(self, model_name: str, *args, **kwargs):
# """
# Query a model from langchain community.
# Args:
# model_name (str): The name of the model.
# *args: Additional positional arguments to be passed to the model.
# **kwargs: Additional keyword arguments to be passed to the model.
# Returns:
# omni_model_type: The model object.
# Raises:
# ValueError: If the model with the given name is not found in the model pool.
# """
# from langchain_community.llms import __getattr__
# logger.info(
# f"Querying model {model_name} from langchain community."
# )
# model = __getattr__(model_name)(*args, **kwargs)
# model = self.refactor_model_class_if_invoke_class(model)
# return model
def get_model_by_name(self, model_name: str) -> omni_model_type:
"""
Retrieves a model from the model pool by its name.
Args:
model_name (str): The name of the model.
Returns:
omni_model_type: The model object.
Raises:
ValueError: If the model with the given name is not found in the model pool.
"""
logger.info(f"Retrieving model {model_name} from model pool.")
for model in self.model_pool:
if model_name in [
model.name,
model.model_id,
model.model_name,
]:
return model
raise ValueError(f"Model {model_name} not found in model pool.")
def get_multiple_models_by_name(
self, model_names: List[str]
) -> List[omni_model_type]:
"""
Retrieves multiple models from the model pool by their names.
Args:
model_names (List[str]): The names of the models.
Returns:
List[omni_model_type]: The list of model objects.
Raises:
ValueError: If any of the models with the given names are not found in the model pool.
"""
logger.info(
f"Retrieving multiple models {model_names} from model pool."
)
models = []
for model_name in model_names:
models.append(self.get_model_by_name(model_name))
return models
def get_model_pool(self) -> List[omni_model_type]:
"""
Retrieves the entire model pool.
Returns:
List[omni_model_type]: The list of model objects in the model pool.
"""
return self.model_pool
def get_model_by_index(self, index: int) -> omni_model_type:
"""
Retrieves a model from the model pool by its index.
Args:
index (int): The index of the model in the model pool.
Returns:
omni_model_type: The model object.
Raises:
IndexError: If the index is out of range.
"""
return self.model_pool[index]
def get_model_by_id(self, model_id: str) -> omni_model_type:
"""
Retrieves a model from the model pool by its ID.
Args:
model_id (str): The ID of the model.
Returns:
omni_model_type: The model object.
Raises:
ValueError: If the model with the given ID is not found in the model pool.
"""
name = model_id
for model in self.model_pool:
if (
hasattr(model, "model_id")
and name == model.model_id
or hasattr(model, "model_name")
and name == model.model_name
or hasattr(model, "name")
and name == model.name
or hasattr(model, "model")
and name == model.model
):
return model
raise ValueError(f"Model {model_id} not found in model pool.")
def refactor_model_class_if_invoke(self):
"""
Refactors the model class if it has an 'invoke' method.
Checks to see if the model pool has a model with an 'invoke' method and refactors it to have a 'run' method and '__call__' method.
Returns:
str: A success message indicating that the model classes have been refactored.
"""
for model in self.model_pool:
if hasattr(model, "invoke"):
model.run = model.invoke
model.__call__ = model.invoke
logger.info(
f"Refactored model {model.name} to have run and __call__ methods."
)
# Update the model in the model pool
self.model_pool[self.model_pool.index(model)] = model
return "Model classes successfully refactored."
def refactor_model_class_if_invoke_class(
self, model: callable, *args, **kwargs
) -> callable:
"""
Refactors the model class if it has an 'invoke' method.
Checks to see if the model pool has a model with an 'invoke' method and refactors it to have a 'run' method and '__call__' method.
Returns:
str: A success message indicating that the model classes have been refactored.
"""
if hasattr(model, "invoke"):
model.run = model.invoke
model.__call__ = model.invoke
logger.info(
f"Refactored model {model.name} to have run and __call__ methods."
)
return model
def find_model_by_name_and_run(
self, model_name: str = None, task: str = None, *args, **kwargs
) -> str:
"""
Finds a model by its name and runs a task on it.
Args:
model_name (str): The name of the model.
task (str): The task to be run on the model.
*args: Additional positional arguments to be passed to the task.
**kwargs: Additional keyword arguments to be passed to the task.
Returns:
str: The result of running the task on the model.
Raises:
ValueError: If the model with the given name is not found in the model pool.
"""
model = self.get_model_by_name(model_name)
return model.run(task, *args, **kwargs)
# model = ModelRouter()
# print(model.to_dict())
# print(model.get_model_pool())
# print(model.get_model_by_index(0))
# print(model.get_model_by_id("stability-ai/stable-diffusion:"))
# # print(model.get_multiple_models_by_name(["gpt-4o", "gpt-4"]))

@ -1,9 +1,4 @@
def sop_generator_agent_prompt(task_name: str): def sop_generator_agent_prompt(task_name: str) -> str:
"""
SOP Generator Agent Prompt
--------------------------
"""
SOP_GENERATOR_SOP = f""" SOP_GENERATOR_SOP = f"""
Your are an autonomous agent that generates Standard Operating Procedures for autonomous Your are an autonomous agent that generates Standard Operating Procedures for autonomous
worker agents, your goal is to generate a SOP for the following task: {task_name} worker agents, your goal is to generate a SOP for the following task: {task_name}
@ -90,4 +85,4 @@ def sop_generator_agent_prompt(task_name: str):
This refactored SOP focuses on guidelines specifically for the instructor agent on techniques to teach the process of writing standard operating procedures to execute tasks. Let me know if you need any other updates. This refactored SOP focuses on guidelines specifically for the instructor agent on techniques to teach the process of writing standard operating procedures to execute tasks. Let me know if you need any other updates.
""" """
return str(SOP_GENERATOR_SOP) return SOP_GENERATOR_SOP

@ -1,140 +1,80 @@
from __future__ import annotations from __future__ import annotations
from enum import Enum import time
from typing import Any import uuid
from typing import List, Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from swarms_cloud.schema.agent_api_schemas import (
AgentChatCompletionResponse,
)
class TaskInput(BaseModel): class Step(BaseModel):
task: Any = Field( step_id: str = Field(
..., uuid.uuid4().hex,
description=( description="The ID of the task step.",
"The input parameters for the task. Any value is allowed." examples=["6bb1801a-fd80-45e8-899a-4dd723cc602e"],
),
examples=['{\n"debug": false,\n"mode": "benchmarks"\n}'],
) )
time: float = Field(
time.time(),
class ArtifactUpload(BaseModel): description="The time taken to complete the task step.",
file: bytes = Field(..., description="File to upload")
relative_path: str | None = Field(
None,
description=(
"Relative path of the artifact in the agent's workspace"
),
examples=["python/code/"],
) )
response: AgentChatCompletionResponse = Field(
class StepInput(BaseModel):
step: Any = Field(
..., ...,
description=( description="The response from the agent.",
"Input parameters for the task step. Any value is" " allowed."
),
examples=['{\n"file_to_refactor": "models.py"\n}'],
) )
class StepOutput(BaseModel): class ManySteps(BaseModel):
step: Any = Field( agent_id: Optional[str] = Field(
..., ...,
description=( description="The ID of the agent.",
"Output that the task step has produced. Any value is" examples=["financial-agent-1"],
" allowed."
),
examples=['{\n"tokens": 7894,\n"estimated_cost": "0,24$"\n}'],
) )
agent_name: Optional[str] = Field(
...,
class TaskRequestBody(BaseModel): description="The ID of the agent.",
input: str | None = Field( examples=["financial-agent-1"],
None,
description="Input prompt for the task.",
examples=["Write the words you receive to the file 'output.txt'."],
) )
additional_input: TaskInput | None = None task: Optional[str] = Field(
...,
description="The name of the task.",
# class Task(TaskRequestBody): examples=["Write to file"],
# task_id: str = Field(
# ...,
# description="The ID of the task.",
# examples=["50da533e-3904-4401-8a07-c49adf88b5eb"],
# )
# artifacts: list[Artifact] = Field(
# [],
# description="A list of artifacts that the task has produced.",
# examples=[
# [
# "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e",
# "ab7b4091-2560-4692-a4fe-d831ea3ca7d6",
# ]
# ],
# )
class StepRequestBody(BaseModel):
input: str | None = Field(
None,
description="Input prompt for the step.",
examples=["Washington"],
) )
additional_input: StepInput | None = None number_of_steps: Optional[int] = Field(
class Status(Enum):
created = "created"
running = "running"
completed = "completed"
class Step(BaseModel):
task_id: str = Field(
..., ...,
description="The number of steps in the task.",
examples=[3],
)
run_id: Optional[str] = Field(
uuid.uuid4().hex,
description="The ID of the task this step belongs to.", description="The ID of the task this step belongs to.",
examples=["50da533e-3904-4401-8a07-c49adf88b5eb"], examples=["50da533e-3904-4401-8a07-c49adf88b5eb"],
) )
step_id: int = Field( steps: List[Step] = Field(
..., ...,
description="The ID of the task step.", description="A list of task steps.",
examples=["6bb1801a-fd80-45e8-899a-4dd723cc602e"],
)
name: str | None = Field(
None,
description="The name of the task step.",
examples=["Write to file"],
) )
output: str | None = Field( full_history: Optional[str] = Field(
None, ...,
description="Output of the task step.", description="The full history of the task.",
examples=[ examples=[
"I am going to use the write_to_file command and write" "I am going to use the write_to_file command and write"
" Washington to a file called output.txt" " Washington to a file called output.txt"
" <write_to_file('output.txt', 'Washington')" " <write_to_file('output.txt', 'Washington')"
], ],
) )
artifacts: list[Any] = Field( total_tokens: Optional[int] = Field(
[],
description="A list of artifacts that the step has produced.",
)
max_loops: int = Field(
1,
description="The maximum number of times to run the workflow.",
)
class ManySteps(BaseModel):
task_id: str = Field(
..., ...,
description="The ID of the task this step belongs to.", description="The total number of tokens generated.",
examples=["50da533e-3904-4401-8a07-c49adf88b5eb"], examples=[7894],
)
steps: list[Step] = Field(
[],
description="A list of task steps.",
) )
# total_cost_in_dollar: Optional[str] = Field(
# default_factory=lambda: "0,24$",
# description="The total cost of the task.",
# examples=["0,24$"],
# )
class GenerationOutputMetadata(BaseModel): class GenerationOutputMetadata(BaseModel):

@ -13,7 +13,6 @@ from swarms.structs.base_workflow import BaseWorkflow
from swarms.structs.concurrent_workflow import ConcurrentWorkflow from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.structs.groupchat import GroupChat from swarms.structs.groupchat import GroupChat
from swarms.structs.hiearchical_swarm import HiearchicalSwarm
from swarms.structs.majority_voting import ( from swarms.structs.majority_voting import (
MajorityVoting, MajorityVoting,
majority_voting, majority_voting,
@ -95,7 +94,6 @@ __all__ = [
"ConcurrentWorkflow", "ConcurrentWorkflow",
"Conversation", "Conversation",
"GroupChat", "GroupChat",
"HiearchicalSwarm",
"MajorityVoting", "MajorityVoting",
"majority_voting", "majority_voting",
"most_frequent", "most_frequent",

@ -0,0 +1,230 @@
import os
import networkx as nx
import matplotlib.pyplot as plt
from swarms import Agent, OpenAIChat
from typing import List, Optional, Callable
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from swarms.structs.base_swarm import BaseSwarm
class AStarSwarm(BaseSwarm):
def __init__(
self,
root_agent: Agent,
child_agents: Optional[List[Agent]] = None,
heuristic: Optional[Callable[[Agent], float]] = None,
*args,
**kwargs,
):
"""
Initializes the A* Swarm with a root agent and optionally a list of child agents.
Args:
root_agent (Agent): The root agent in the swarm.
child_agents (Optional[List[Agent]]): List of child agents.
"""
self.root_agent = root_agent
self.child_agents = child_agents
self.heuristic = heuristic
self.child_agents = (
child_agents if child_agents is not None else []
)
self.parent_map = {
agent: root_agent for agent in self.child_agents
}
def a_star_communicate(
self,
agent: Agent,
task: str,
) -> str:
"""
Distributes the task among agents using A* search-like communication.
Args:
agent (Agent): The agent to start the communication from.
task (str): The task to distribute and process.
heuristic (Callable[[Agent], float], optional): Function to prioritize which agent to communicate with first.
Returns:
str: The result of the task after processing.
"""
# Perform the task at the current agent
result = agent.run(task)
# Base case: if no child agents, return the result
if agent not in self.parent_map.values():
return result
# Gather child agents
children = [
child
for child, parent in self.parent_map.items()
if parent == agent
]
# Sort children based on the heuristic (if provided)
if self.heuristic:
children.sort(key=self.heuristic, reverse=True)
# Communicate with child agents
for child in children:
sub_result = self.a_star_communicate(
child, task, self.heuristic
)
result += f"\n{sub_result}"
return result
def visualize(self):
"""
Visualizes the communication flow between agents in the swarm using networkx and matplotlib.
"""
graph = nx.DiGraph()
# Add edges between the root agent and child agents
for child in self.child_agents:
graph.add_edge(self.root_agent.agent_name, child.agent_name)
self._add_edges(graph, child)
# Draw the graph
pos = nx.spring_layout(graph)
plt.figure(figsize=(10, 8))
nx.draw(
graph,
pos,
with_labels=True,
node_color="lightblue",
font_size=10,
node_size=3000,
font_weight="bold",
edge_color="gray",
)
plt.title("Communication Flow Between Agents")
plt.show()
def _add_edges(self, graph: nx.DiGraph, agent: Agent):
"""
Recursively adds edges to the graph for the given agent.
Args:
graph (nx.DiGraph): The graph to add edges to.
agent (Agent): The current agent.
"""
children = [
child
for child, parent in self.parent_map.items()
if parent == agent
]
for child in children:
graph.add_edge(agent.agent_name, child.agent_name)
self._add_edges(graph, child)
def run(
self,
task: str,
) -> str:
"""
Start the task from the root agent using A* communication.
Args:
task (str): The task to execute.
heuristic (Callable[[Agent], float], optional): Heuristic for A* communication.
Returns:
str: The result of the task after processing.
"""
return self.a_star_communicate(
self.root_agent, task, self.heuristic
)
# Heuristic example (can be customized)
def example_heuristic(agent: Agent) -> float:
"""
Example heuristic that prioritizes agents based on some custom logic.
Args:
agent (Agent): The agent to evaluate.
Returns:
float: The priority score for the agent.
"""
# Example heuristic: prioritize based on the length of the agent's name (as a proxy for complexity)
return len(agent.agent_name)
# Set up the model as provided
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize root agent
root_agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=2,
autosave=True,
dashboard=False,
verbose=True,
streaming_on=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=3,
context_length=200000,
)
# List of child agents
child_agents = [
Agent(
agent_name="Child-Agent-1",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=2,
autosave=True,
dashboard=False,
verbose=True,
streaming_on=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent_child_1.json",
user_name="swarms_corp",
retry_attempts=3,
context_length=200000,
),
Agent(
agent_name="Child-Agent-2",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=2,
autosave=True,
dashboard=False,
verbose=True,
streaming_on=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent_child_2.json",
user_name="swarms_corp",
retry_attempts=3,
context_length=200000,
),
]
# Create the A* swarm
swarm = AStarSwarm(
root_agent=root_agent,
child_agents=child_agents,
heauristic=example_heuristic,
)
# Run the task with the heuristic
result = swarm.run(
"What are the components of a startups stock incentive equity plan",
)
print(result)
# Visualize the communication flow
swarm.visualize()

File diff suppressed because it is too large Load Diff

@ -1,10 +1,13 @@
import toml
import yaml
import asyncio import asyncio
import concurrent.futures import concurrent.futures
import json import json
import os import os
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from datetime import datetime from datetime import datetime
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional, Callable
import psutil import psutil
@ -72,6 +75,7 @@ class BaseStructure:
save_artifact_path: Optional[str] = "./artifacts", save_artifact_path: Optional[str] = "./artifacts",
save_metadata_path: Optional[str] = "./metadata", save_metadata_path: Optional[str] = "./metadata",
save_error_path: Optional[str] = "./errors", save_error_path: Optional[str] = "./errors",
workspace_dir: Optional[str] = "./workspace",
): ):
super().__init__() super().__init__()
self.name = name self.name = name
@ -80,6 +84,7 @@ class BaseStructure:
self.save_artifact_path = save_artifact_path self.save_artifact_path = save_artifact_path
self.save_metadata_path = save_metadata_path self.save_metadata_path = save_metadata_path
self.save_error_path = save_error_path self.save_error_path = save_error_path
self.workspace_dir = workspace_dir
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
"""Run the structure.""" """Run the structure."""
@ -423,7 +428,97 @@ class BaseStructure:
self.monitor_resources() self.monitor_resources()
return self.run_batched(batched_data, batch_size, *args, **kwargs) return self.run_batched(batched_data, batch_size, *args, **kwargs)
def _serialize_callable(self, attr_value: Callable) -> Dict[str, Any]:
"""
Serializes callable attributes by extracting their name and docstring.
Args:
attr_value (Callable): The callable to serialize.
# x = BaseStructure() Returns:
Dict[str, Any]: Dictionary with name and docstring of the callable.
"""
return {
"name": getattr(
attr_value, "__name__", type(attr_value).__name__
),
"doc": getattr(attr_value, "__doc__", None),
}
def _serialize_attr(self, attr_name: str, attr_value: Any) -> Any:
"""
Serializes an individual attribute, handling non-serializable objects.
# print(x) Args:
attr_name (str): The name of the attribute.
attr_value (Any): The value of the attribute.
Returns:
Any: The serialized value of the attribute.
"""
try:
if callable(attr_value):
return self._serialize_callable(attr_value)
elif hasattr(attr_value, "to_dict"):
return (
attr_value.to_dict()
) # Recursive serialization for nested objects
else:
json.dumps(
attr_value
) # Attempt to serialize to catch non-serializable objects
return attr_value
except (TypeError, ValueError):
return f"<Non-serializable: {type(attr_value).__name__}>"
def to_dict(self) -> Dict[str, Any]:
"""
Converts all attributes of the class, including callables, into a dictionary.
Handles non-serializable attributes by converting them or skipping them.
Returns:
Dict[str, Any]: A dictionary representation of the class attributes.
"""
return {
attr_name: self._serialize_attr(attr_name, attr_value)
for attr_name, attr_value in self.__dict__.items()
}
def to_json(self, indent: int = 4, *args, **kwargs):
return json.dumps(self.to_dict(), indent=indent, *args, **kwargs)
def to_yaml(self, indent: int = 4, *args, **kwargs):
return yaml.dump(self.to_dict(), indent=indent, *args, **kwargs)
def to_toml(self, *args, **kwargs):
return toml.dumps(self.to_dict(), *args, **kwargs)
# def model_dump_json(self):
# logger.info(
# f"Saving {self.agent_name} model to JSON in the {self.workspace_dir} directory"
# )
# create_file_in_folder(
# self.workspace_dir,
# f"{self.agent_name}.json",
# str(self.to_json()),
# )
# return (
# f"Model saved to {self.workspace_dir}/{self.agent_name}.json"
# )
# def model_dump_yaml(self):
# logger.info(
# f"Saving {self.agent_name} model to YAML in the {self.workspace_dir} directory"
# )
# create_file_in_folder(
# self.workspace_dir,
# f"{self.agent_name}.yaml",
# self.to_yaml(),
# )
# return (
# f"Model saved to {self.workspace_dir}/{self.agent_name}.yaml"
# )

@ -4,7 +4,6 @@ from typing import Optional
from termcolor import colored from termcolor import colored
from swarms.memory.base_db import AbstractDatabase
from swarms.structs.base_structure import BaseStructure from swarms.structs.base_structure import BaseStructure
from typing import Any from typing import Any
@ -64,7 +63,6 @@ class Conversation(BaseStructure):
self, self,
system_prompt: Optional[str] = None, system_prompt: Optional[str] = None,
time_enabled: bool = False, time_enabled: bool = False,
database: AbstractDatabase = None,
autosave: bool = False, autosave: bool = False,
save_filepath: str = None, save_filepath: str = None,
tokenizer: Any = None, tokenizer: Any = None,
@ -74,14 +72,13 @@ class Conversation(BaseStructure):
user: str = "User:", user: str = "User:",
auto_save: bool = True, auto_save: bool = True,
save_as_yaml: bool = True, save_as_yaml: bool = True,
save_as_json: bool = False, save_as_json_bool: bool = False,
*args, *args,
**kwargs, **kwargs,
): ):
super().__init__() super().__init__()
self.system_prompt = system_prompt self.system_prompt = system_prompt
self.time_enabled = time_enabled self.time_enabled = time_enabled
self.database = database
self.autosave = autosave self.autosave = autosave
self.save_filepath = save_filepath self.save_filepath = save_filepath
self.conversation_history = [] self.conversation_history = []
@ -92,7 +89,7 @@ class Conversation(BaseStructure):
self.user = user self.user = user
self.auto_save = auto_save self.auto_save = auto_save
self.save_as_yaml = save_as_yaml self.save_as_yaml = save_as_yaml
self.save_as_json = save_as_json self.save_as_json_bool = save_as_json_bool
# If system prompt is not None, add it to the conversation history # If system prompt is not None, add it to the conversation history
if self.system_prompt is not None: if self.system_prompt is not None:
@ -343,38 +340,6 @@ class Conversation(BaseStructure):
) )
) )
def add_to_database(self, *args, **kwargs):
"""Add the conversation history to the database"""
self.database.add("conversation", self.conversation_history)
def query_from_database(self, query, *args, **kwargs):
"""Query the conversation history from the database"""
return self.database.query("conversation", query)
def delete_from_database(self, *args, **kwargs):
"""Delete the conversation history from the database"""
self.database.delete("conversation")
def update_from_database(self, *args, **kwargs):
"""Update the conversation history from the database"""
self.database.update("conversation", self.conversation_history)
def get_from_database(self, *args, **kwargs):
"""Get the conversation history from the database"""
return self.database.get("conversation")
def execute_query_from_database(self, query, *args, **kwargs):
"""Execute a query on the database"""
return self.database.execute_query(query)
def fetch_all_from_database(self, *args, **kwargs):
"""Fetch all from the database"""
return self.database.fetch_all()
def fetch_one_from_database(self, *args, **kwargs):
"""Fetch one from the database"""
return self.database.fetch_one()
def truncate_memory_with_tokenizer(self): def truncate_memory_with_tokenizer(self):
""" """
Truncates the conversation history based on the total number of tokens using a tokenizer. Truncates the conversation history based on the total number of tokens using a tokenizer.

@ -0,0 +1,237 @@
# import os
# from swarms import Agent, OpenAIChat
# from typing import List
# class DepthFirstSearchSwarm:
# def __init__(self, agents: List[Agent]):
# self.agents = agents
# self.visited = set()
# def dfs(self, agent, task, results):
# if agent.agent_name in self.visited:
# return
# self.visited.add(agent.agent_name)
# # Execute the agent's task
# result = agent.run(task)
# results.append(result)
# # If agent produces more tasks, continue the DFS
# if isinstance(result, dict) and "next_tasks" in result:
# for next_task in result["next_tasks"]:
# next_agent = self.get_next_agent()
# if next_agent:
# self.dfs(next_agent, next_task, results)
# else:
# print("No more agents available for further tasks.")
# def get_next_agent(self):
# for agent in self.agents:
# if agent.agent_name not in self.visited:
# return agent
# return None
# def run(self, task):
# results = []
# if self.agents:
# initial_agent = self.agents[0]
# self.dfs(initial_agent, task, results)
# return results
# # Usage example
# # Define agents with their specific roles or capabilities
# agents = [
# Agent(
# agent_name="Financial-Analysis-Agent",
# system_prompt="Perform financial analysis",
# llm=OpenAIChat(
# api_key=os.getenv("OPENAI_API_KEY"),
# model_name="gpt-4o-mini",
# temperature=0.1,
# ),
# max_loops=1,
# autosave=True,
# verbose=True,
# streaming_on=True,
# dynamic_temperature_enabled=True,
# # saved_state_path="finance_agent.json",
# user_name="swarms_corp",
# retry_attempts=3,
# context_length=200000,
# ),
# # Add more agents with specific tasks if needed
# ]
# # Initialize the DFS swarm
# dfs_swarm = DepthFirstSearchSwarm(agents)
# # Run the DFS swarm with a task
# task = (
# "Analyze the financial components of a startup's stock incentive plan."
# )
# results = dfs_swarm.run(task)
# # Print the results
# for idx, result in enumerate(results):
# print(f"Result from Agent {idx + 1}: {result}")
# ####################
# import os
# from swarms import Agent, OpenAIChat
# class DFSSwarm:
# def __init__(self, agents):
# self.agents = agents
# self.visited = set()
# def dfs(self, agent_index, task, previous_output=None):
# if agent_index >= len(self.agents):
# return previous_output
# agent = self.agents[agent_index]
# # Use the previous agent's output as input to the current agent
# if previous_output:
# task = f"{task}\nPrevious result: {previous_output}"
# # Run the current agent's task
# output = agent.run(task)
# # Add output to visited to avoid redundant work
# self.visited.add(output)
# # Recursively call DFS on the next agent
# return self.dfs(agent_index + 1, task, output)
# def run(self, task):
# # Start DFS from the first agent
# return self.dfs(0, task)
# # Get the OpenAI API key from the environment variable
# api_key = os.getenv("OPENAI_API_KEY")
# # Create an instance of the OpenAIChat class for each agent
# model = OpenAIChat(api_key=api_key, model_name="gpt-4o-mini", temperature=0.1)
# # Initialize multiple agents
# agent1 = Agent(
# agent_name="Agent-1",
# system_prompt="Agent 1 prompt description here",
# llm=model,
# max_loops=1,
# autosave=True,
# dynamic_temperature_enabled=True,
# verbose=True,
# streaming_on=True,
# user_name="swarms_corp",
# )
# agent2 = Agent(
# agent_name="Agent-2",
# system_prompt="Agent 2 prompt description here",
# llm=model,
# max_loops=1,
# autosave=True,
# dynamic_temperature_enabled=True,
# verbose=True,
# streaming_on=True,
# user_name="swarms_corp",
# )
# # Add more agents as needed
# # agent3 = ...
# # agent4 = ...
# # Create the swarm with the agents
# dfs_swarm = DFSSwarm(agents=[agent1, agent2])
# # Run the DFS swarm on a task
# result = dfs_swarm.run("Analyze the financial components of a startup's stock incentives.")
# print("Final Result:", result)
import os
from swarms import Agent, OpenAIChat
class DFSSwarm:
def __init__(self, agents):
self.agents = agents
self.visited = set()
def dfs(self, agent_index, task, previous_output=None):
if agent_index >= len(self.agents):
return previous_output
agent = self.agents[agent_index]
# If there is a previous output, include it in the task for the next agent
if previous_output:
task = f"{task}\nPrevious result: {previous_output}"
# Run the current agent's task and get the output
output = agent.run(task)
# Log the output (optional)
print(f"Agent {agent_index + 1} Output: {output}")
# Add output to visited to avoid redundant work
self.visited.add(output)
# Recursively call DFS on the next agent
return self.dfs(agent_index + 1, task, output)
def run(self, task):
# Start DFS from the first agent and return the final result
final_result = self.dfs(0, task)
return final_result
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class for each agent
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize multiple agents
agent1 = Agent(
agent_name="Agent-1",
system_prompt="Analyze the financial components of a startup's stock incentives.",
llm=model,
# max_loops=2,
# autosave=True,
dynamic_temperature_enabled=True,
verbose=True,
streaming_on=True,
user_name="swarms_corp",
)
agent2 = Agent(
agent_name="Agent-2",
system_prompt="Refine the analysis and identify any potential risks or benefits.",
llm=model,
# max_loops=2,
# autosave=True,
dynamic_temperature_enabled=True,
verbose=True,
streaming_on=True,
user_name="swarms_corp",
)
# Add more agents as needed
# agent3 = ...
# agent4 = ...
# Create the swarm with the agents
dfs_swarm = DFSSwarm(agents=[agent1, agent2])
# Run the DFS swarm on a task
result = dfs_swarm.run(
"Start with analyzing the financial components of a startup's stock incentives."
)
print("Final Result:", result)

@ -1,356 +0,0 @@
"""
Boss -> json containig orders in JSON -> list of agents -> send orders to every agent
# Requirements
- Boss needs to know which agents are available [PROMPTING]
- Boss needs to output json commands sending tasks to every agent with the task and name
- Worker agents need to return a response to the boss
-> Boss returns the final output to the user
"""
import json
from typing import List
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.loguru_logger import logger
from pydantic import BaseModel, Field
from swarms.structs.conversation import Conversation
class HiearchicalRequestDict(BaseModel):
task: str = Field(
None,
title="Task",
description="The task to send to the director agent.",
)
agent_name: str = Field(
None,
title="Agent Name",
description="The name of the agent to send the task to.",
)
class Config:
schema_extra = {
"example": {
"task": "task",
"agent_name": "agent_name",
}
}
class HiearchicalSwarm(BaseSwarm):
"""
A class representing a hierarchical swarm.
Attributes:
name (str): The name of the hierarchical swarm.
description (str): The description of the hierarchical swarm.
director (Agent): The director agent of the hierarchical swarm.
agents (List[Agent]): The list of agents in the hierarchical swarm.
max_loops (int): The maximum number of loops to run the swarm.
long_term_memory_system (BaseSwarm): The long term memory system of the swarm.
custom_parse_function (callable): A custom parse function for the swarm.
Methods:
swarm_initialization(*args, **kwargs): Initializes the hierarchical swarm.
find_agent_by_name(agent_name: str = None, *args, **kwargs): Finds an agent in the swarm by name.
parse_function_activate_agent(json_data: str = None, *args, **kwargs): Parses JSON data and activates the selected agent.
select_agent_and_send_task(name: str = None, task: str = None, *args, **kwargs): Selects an agent and sends a task to them.
run(task: str = None, *args, **kwargs): Runs the hierarchical swarm.
"""
def __init__(
self,
name: str = None,
description: str = None,
director: Agent = None,
agents: List[Agent] = None,
max_loops: int = 1,
long_term_memory_system: BaseSwarm = None,
custom_parse_function: callable = None,
rules: str = None,
custom_director_prompt: str = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.name = name
self.description = description
self.director = director
self.agents = agents
self.max_loops = max_loops
self.long_term_memory_system = long_term_memory_system
self.custom_parse_function = custom_parse_function
self.rules = rules
self.custom_director_prompt = custom_director_prompt
# Check to see agents is not empty
self.agent_error_handling_check()
# Set the director to max_one loop
if self.director.max_loops > 1:
self.director.max_loops = 1
# Set the long term memory system of every agent to long term memory system
if long_term_memory_system is True:
for agent in agents:
agent.long_term_memory = long_term_memory_system
# Initialize the swarm
self.swarm_initialization()
# Initialize the conversation message pool
self.swarm_history = Conversation(
time_enabled=True, *args, **kwargs
)
# Set the worker agents as tools for the director
for agent in self.agents:
self.director.add_tool(agent)
# Set the has prompt for the director,
if custom_director_prompt is not None:
self.director.system_prompt = custom_director_prompt
else:
self.director.system_prompt = self.has_sop()
def swarm_initialization(self, *args, **kwargs):
"""
Initializes the hierarchical swarm.
Args:
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
None
"""
logger.info(f"Initializing the hierarchical swarm: {self.name}")
logger.info(f"Purpose of this swarm: {self.description}")
# Now log number of agnets and their names
logger.info(f"Number of agents: {len(self.agents)}")
logger.info(
f"Agent names: {[agent.name for agent in self.agents]}"
)
# Now see if agents is not empty
if len(self.agents) == 0:
logger.info("No agents found. Please add agents to the swarm.")
return None
# Now see if director is not empty
if self.director is None:
logger.info(
"No director found. Please add a director to the swarm."
)
return None
logger.info(
f"Initialization complete for the hierarchical swarm: {self.name}"
)
def agent_error_handling_check(self):
"""
Check if the agents list is not empty.
Returns:
None
Raises:
ValueError: If the agents list is empty.
"""
if len(self.agents) == 0:
raise ValueError(
"No agents found. Please add agents to the swarm."
)
return None
def find_agent_by_name(self, agent_name: str = None, *args, **kwargs):
"""
Finds an agent in the swarm by name.
Args:
agent_name (str): The name of the agent to find.
Returns:
Agent: The agent with the specified name, or None if not found.
"""
for agent in self.agents:
if agent.name == agent_name:
return agent
return None
def parse_function_activate_agent(
self, json_data: str = None, *args, **kwargs
):
"""
Parse the JSON data and activate the selected agent.
Args:
json_data (str): The JSON data containing the agent name and task.
Returns:
str: The response from the activated agent.
Raises:
json.JSONDecodeError: If the JSON data is invalid.
"""
try:
data = json.loads(json_data)
# Check if the data is a list of agent task pairs
if isinstance(data, list):
responses = []
# Iterate over the list of agent task pairs
for agent_task in data:
name = agent_task.get("name")
task = agent_task.get("task")
response = self.select_agent_and_send_task(
name, task, *args, **kwargs
)
responses.append(response)
return responses
else:
name = data.get("name")
task = data.get("task")
response = self.select_agent_and_send_task(
name, task, *args, **kwargs
)
return response
except json.JSONDecodeError:
logger.error("Invalid JSON data, try again.")
raise json.JSONDecodeError
def select_agent_and_send_task(
self, name: str = None, task: str = None, *args, **kwargs
):
"""
Select an agent from the list and send a task to them.
Args:
name (str): The name of the agent to send the task to.
task (str): The task to send to the agent.
Returns:
str: The response from the agent.
Raises:
KeyError: If the agent name is not found in the list of agents.
"""
try:
# Check to see if the agent name is in the list of agents
if name in self.agents:
agent = self.agents[name]
else:
return "Invalid agent name. Please select 'Account Management Agent' or 'Product Support Agent'."
response = agent.run(task, *args, **kwargs)
return response
except Exception as e:
logger.error(f"Error: {e}")
raise e
def run(self, task: str = None, *args, **kwargs):
"""
Run the hierarchical swarm.
Args:
task (str): The task to send to the director agent.
Returns:
str: The response from the director agent.
Raises:
Exception: If an error occurs while running the swarm.
"""
try:
loop = 0
# While the loop is less than max loops
while loop < self.max_loops:
# Run the director
response = self.director.run(task, *args, **kwargs)
# Log the director's response
self.swarm_history.add(self.director.agent_name, response)
# Run agents
if self.custom_parse_function is not None:
response = self.custom_parse_function(response)
else:
response = self.parse_function_activate_agent(response)
loop += 1
task = response
return response
except Exception as e:
logger.error(f"Error: {e}")
raise e
def run_worker_agent(
self, name: str = None, task: str = None, *args, **kwargs
):
"""
Run the worker agent.
Args:
name (str): The name of the worker agent.
task (str): The task to send to the worker agent.
Returns:
str: The response from the worker agent.
Raises:
Exception: If an error occurs while running the worker agent.
"""
try:
# Find the agent by name
agent = self.find_agent_by_name(name)
# Run the agent
response = agent.run(task, *args, **kwargs)
return response
except Exception as e:
logger.error(f"Error: {e}")
raise e
def has_sop(self):
# We need to check the name of the agents and their description or system prompt
# TODO: Provide many shot examples of the agents available and even maybe what tools they have access to
# TODO: Provide better reasoning prompt tiles, such as when do you use a certain agent and specific
# Things NOT to do.
return f"""
You're a director boss agent orchestrating worker agents with tasks. Select an agent most relevant to
the input task and give them a task. If there is not an agent relevant to the input task then say so and be simple and direct.
These are the available agents available call them if you need them for a specific
task or operation:
Number of agents: {len(self.agents)}
Agents Available: {
[
{"name": agent.name, "description": agent.system_prompt}
for agent in self.agents
]
}
"""

@ -0,0 +1,293 @@
import os
from typing import List, Any
from loguru import logger
from pydantic import BaseModel, Field
from swarms import Agent, OpenAIChat
from swarms.models.openai_function_caller import OpenAIFunctionCaller
from swarms.structs.concat import concat_strings
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
class AgentSpec(BaseModel):
"""
A class representing the specifications of an agent.
Attributes:
agent_name (str): The name of the agent.
system_prompt (str): The system prompt for the agent.
agent_description (str): The description of the agent.
max_tokens (int): The maximum number of tokens to generate in the API response.
temperature (float): A parameter that controls the randomness of the generated text.
context_window (int): The context window for the agent.
task (str): The main task for the agent.
"""
agent_name: str
system_prompt: str
agent_description: str
task: str
class AgentTeam(BaseModel):
agents: List[AgentSpec] = Field(
...,
description="The list of agents in the team",
)
flow: str = Field(
...,
description="Agent Name -> ",
)
class SwarmSpec(BaseModel):
"""
A class representing the specifications of a swarm of agents.
Attributes:
multiple_agents (List[AgentSpec]): The list of agents in the swarm.
"""
swarm_name: str = Field(
...,
description="The name of the swarm: e.g., 'Marketing Swarm' or 'Finance Swarm'",
)
multiple_agents: List[AgentSpec]
rules: str = Field(
...,
description="The rules for all the agents in the swarm: e.g., All agents must return code. Be very simple and direct",
)
plan: str = Field(
...,
description="The plan for the swarm: e.g., 'Create a marketing campaign for the new product launch.'",
)
class HierarchicalAgentSwarm:
"""
A class to create and manage a hierarchical swarm of agents.
Methods:
__init__(system_prompt, max_tokens, temperature, base_model, parallel_tool_calls): Initializes the function caller.
create_agent(agent_name, system_prompt, agent_description, max_tokens, temperature, context_window): Creates an individual agent.
parse_json_for_agents_then_create_agents(function_call): Parses a JSON function call to create multiple agents.
run(task): Runs the function caller to create and execute agents based on the provided task.
"""
def __init__(
self,
director: Any = None,
agents: List[Agent] = None,
max_loops: int = 1,
create_agents_on: bool = False,
):
"""
Initializes the HierarchicalAgentSwarm with an OpenAIFunctionCaller.
Args:
system_prompt (str): The system prompt for the function caller.
max_tokens (int): The maximum number of tokens to generate in the API response.
temperature (float): The temperature setting for text generation.
base_model (BaseModel): The base model for the function caller.
parallel_tool_calls (bool): Whether to run tool calls in parallel.
"""
self.director = director
self.agents = agents
self.max_loops = max_loops
self.create_agents_on = create_agents_on
# Check if the agents are set
self.agents_check()
def agents_check(self):
if self.director is None:
raise ValueError("The director is not set.")
# if self.agents is None:
# raise ValueError("The agents are not set.")
if self.max_loops == 0:
raise ValueError("The max_loops is not set.")
def create_agent(
self,
agent_name: str,
system_prompt: str,
agent_description: str,
task: str = None,
) -> str:
"""
Creates an individual agent.
Args:
agent_name (str): The name of the agent.
system_prompt (str): The system prompt for the agent.
agent_description (str): The description of the agent.
max_tokens (int): The maximum number of tokens to generate.
temperature (float): The temperature for text generation.
context_window (int): The context window size for the agent.
Returns:
Agent: An instantiated agent object.
"""
# name = agent_name.replace(" ", "_")
logger.info(f"Creating agent: {agent_name}")
agent_name = Agent(
agent_name=agent_name,
llm=model,
system_prompt=system_prompt,
agent_description=agent_description,
retry_attempts=1,
verbose=False,
dashboard=False,
)
self.agents.append(agent_name)
logger.info(f"Running agent: {agent_name}")
output = agent_name.run(task)
# create_file_in_folder(
# agent_name.workspace_dir, f"{agent_name}_output.txt", str(output)
# )
return output
def parse_json_for_agents_then_create_agents(
self, function_call: dict
) -> List[Agent]:
"""
Parses a JSON function call to create a list of agents.
Args:
function_call (dict): The JSON function call specifying the agents.
Returns:
List[Agent]: A list of created agent objects.
"""
responses = []
logger.info("Parsing JSON for agents")
for agent in function_call["multiple_agents"]:
out = self.create_agent(
agent_name=agent["agent_name"],
system_prompt=agent["system_prompt"],
agent_description=agent["agent_description"],
task=agent["task"],
)
responses.append(out)
return concat_strings(responses)
def run(self, task: str) -> List[Agent]:
"""
Runs the function caller to create and execute agents based on the provided task.
Args:
task (str): The task for which the agents need to be created and executed.
Returns:
List[Agent]: A list of created agent objects.
"""
logger.info("Running the swarm")
# Run the function caller
function_call = self.model.run(task)
# Logging the function call
self.log_director_function_call(function_call)
# Parse the JSON function call and create agents -> run Agents
return self.parse_json_for_agents_then_create_agents(function_call)
def log_director_function_call(self, function_call: dict):
# Log the agents the boss makes\
logger.info(f"Swarm Name: {function_call['swarm_name']}")
# Log the plan
logger.info(f"Plan: {function_call['plan']}")
logger.info(
f"Number of agents: {len(function_call['multiple_agents'])}"
)
for agent in function_call["multiple_agents"]:
logger.info(f"Agent: {agent['agent_name']}")
# logger.info(f"Task: {agent['task']}")
logger.info(f"Description: {agent['agent_description']}")
# Example usage:
HIEARCHICAL_AGENT_SYSTEM_PROMPT = """
Here's a full-fledged system prompt for a director boss agent, complete with instructions and many-shot examples:
---
**System Prompt: Director Boss Agent**
### Role:
You are a Director Boss Agent responsible for orchestrating a swarm of worker agents. Your primary duty is to serve the user efficiently, effectively, and skillfully. You dynamically create new agents when necessary or utilize existing agents, assigning them tasks that align with their capabilities. You must ensure that each agent receives clear, direct, and actionable instructions tailored to their role.
### Key Responsibilities:
1. **Task Delegation:** Assign tasks to the most relevant agent. If no relevant agent exists, create a new one with an appropriate name and system prompt.
2. **Efficiency:** Ensure that tasks are completed swiftly and with minimal resource expenditure.
3. **Clarity:** Provide orders that are simple, direct, and actionable. Avoid ambiguity.
4. **Dynamic Decision Making:** Assess the situation and choose the most effective path, whether that involves using an existing agent or creating a new one.
5. **Monitoring:** Continuously monitor the progress of each agent and provide additional instructions or corrections as necessary.
### Instructions:
- **Identify the Task:** Analyze the input task to determine its nature and requirements.
- **Agent Selection/Creation:**
- If an agent is available and suited for the task, assign the task to that agent.
- If no suitable agent exists, create a new agent with a relevant system prompt.
- **Task Assignment:** Provide the selected agent with explicit and straightforward instructions.
- **Reasoning:** Justify your decisions when selecting or creating agents, focusing on the efficiency and effectiveness of task completion.
"""
director = (
OpenAIFunctionCaller(
system_prompt=HIEARCHICAL_AGENT_SYSTEM_PROMPT,
max_tokens=3000,
temperature=0.4,
base_model=SwarmSpec,
parallel_tool_calls=False,
),
)
# Initialize the hierarchical agent swarm with the necessary parameters
swarm = HierarchicalAgentSwarm(
director=director,
max_loops=1,
)
# # Run the swarm with a task
# agents = swarm.run(
# """
# Create a swarm of agents for a marketing campaign to promote
# the swarms workshop: [Workshop][Automating Business Operations with Hierarchical Agent Swarms][Swarms Framework + GPT4o],
# create agents for twitter, linkedin, and emails, facebook, instagram.
# The date is Saturday, August 17 4:00 PM - 5:00 PM
# Link is: https://lu.ma/ew4r4s3i
# """
# )
# Run the swarm with a task
agents = swarm.run(
"""
Create a swarms of agents that generate the code in python
to send an API request to social media platforms through their apis.
Craft a single function to send a message to all platforms, add types and write
clean code. Each agent needs to generate code for a specific platform, they
must return the python code only.
"""
)

@ -1,185 +1,12 @@
import os
from typing import List from typing import List
from pydantic import BaseModel from pydantic import BaseModel
from swarms.models.openai_function_caller import OpenAIFunctionCaller
from swarms import OpenAIChat
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.concat import concat_strings from swarms.structs.concat import concat_strings
from loguru import logger
api_key = os.getenv("OPENAI_API_KEY") from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.conversation import Conversation
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agents
growth_agent1 = Agent(
agent_name="marketing_specialist",
system_prompt="You're the marketing specialist, your purpose is to help companies grow by improving their marketing strategies!",
agent_description="Improve a company's marketing strategies!",
llm=model,
max_loops=1,
autosave=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="marketing_specialist.json",
stopping_token="Stop!",
context_length=1000,
)
growth_agent2 = Agent(
agent_name="sales_specialist",
system_prompt="You're the sales specialist, your purpose is to help companies grow by improving their sales strategies!",
agent_description="Improve a company's sales strategies!",
llm=model,
max_loops=1,
autosave=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="sales_specialist.json",
stopping_token="Stop!",
context_length=1000,
)
growth_agent3 = Agent(
agent_name="product_development_specialist",
system_prompt="You're the product development specialist, your purpose is to help companies grow by improving their product development strategies!",
agent_description="Improve a company's product development strategies!",
llm=model,
max_loops=1,
autosave=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="product_development_specialist.json",
stopping_token="Stop!",
context_length=1000,
)
team = [growth_agent1, growth_agent2, growth_agent3]
# class HiearchicalSwarm(BaseModel):
# agents: List[Agent]
# director: Agent
# planner: Agent
# max_loops: int = 3
# verbose: bool = True
# def run(self, task: str):
# # Plan
# # Plan -> JSON Function call -> workers -> response fetch back to boss -> planner
# responses = []
# responses.append(task)
# for _ in range(self.max_loops):
# # Plan
# plan = self.planner.run(concat_strings(responses))
# logger.info(f"Agent {self.planner.agent_name} planned: {plan}")
# responses.append(plan)
# # Execute json function calls
# calls = self.director.run(plan)
# logger.info(
# f"Agent {self.director.agent_name} called: {calls}"
# )
# responses.append(calls)
# # Parse and send tasks to agents
# output = parse_then_send_tasks_to_agents(self.agents, calls)
# # Fetch back to boss
# responses.append(output)
# return concat_strings(responses)
# def __call__(self, task: str):
# responses = []
# responses.append(task)
# for _ in range(self.max_loops):
# output = self.step(task, responses)
# responses.append(output)
# return concat_strings(responses)
# def step(self, responses: List[str] = None) -> str:
# # Plan
# # Plan -> JSON Function call -> workers -> response fetch back to boss -> planner
# # Plan
# plan = self.planner.run(concat_strings(responses))
# logger.info(f"Agent {self.planner.agent_name} planned: {plan}")
# responses.append(plan)
# # Execute json function calls
# calls = self.director.run(plan)
# logger.info(f"Agent {self.director.agent_name} called: {calls}")
# responses.append(calls)
# # Parse and send tasks to agents
# output = parse_then_send_tasks_to_agents(self.agents, calls)
# # Fetch back to boss
# responses.append(output)
# return concat_strings(responses)
# def plan(self, task: str, responses: List[str] = None):
# # Plan
# # Plan -> JSON Function call -> workers -> response fetch back to boss -> planner
# # responses = []
# # responses.append(task)
# # Plan
# plan = self.planner.run(concat_strings(responses))
# logger.info(f"Agent {self.planner.agent_name} planned: {plan}")
# responses.append(plan)
# return concat_strings(responses)
def agents_list(
agents: List[Agent] = team,
) -> str:
responses = []
for agent in agents:
name = agent.agent_name
description = agent.description
response = f"Agent Name {name}: Description {description}"
responses.append(response)
return concat_strings(responses)
def parse_then_send_tasks_to_agents(agents: List[Agent], response: dict):
# Initialize an empty dictionary to store the output of each agent
output = []
# Loop over the tasks in the response
for call in response["calls"]:
name = call["agent_name"]
task = call["task"]
# Loop over the agents
for agent in agents:
# If the agent's name matches the name in the task, run the task
if agent.agent_name == name:
out = agent.run(task)
print(out)
output.append(f"{name}: {out}")
# Store the output in the dictionary
# output[name] = out
break
return output
class HierarchicalOrderCall(BaseModel): class HierarchicalOrderCall(BaseModel):
@ -191,37 +18,224 @@ class CallTeam(BaseModel):
calls: List[HierarchicalOrderCall] calls: List[HierarchicalOrderCall]
# Example usage: class HiearchicalSwarm(BaseSwarm):
system_prompt = f""" def __init__(
You're a director agent, your responsibility is to serve the user efficiently, effectively and skillfully.You have a swarm of agents available to distribute tasks to, interact with the user and then submit tasks to the worker agents. Provide orders to the worker agents that are direct, explicit, and simple. Ensure that they are given tasks that are understandable, actionable, and simple to execute. self,
agents: List[Agent],
director: Agent,
name: str = "HierarchicalSwarm",
description: str = "A swarm of agents that can be used to distribute tasks to a team of agents.",
max_loops: int = 3,
verbose: bool = True,
create_agents_from_scratch: bool = False,
):
super().__init__()
self.agents = agents
self.director = director
self.max_loops = max_loops
self.verbose = verbose
self.name = name
self.description = description
self.create_agents_from_scratch = create_agents_from_scratch
self.agents_check()
self.director_check()
# Initialize the conversation
self.conversation = Conversation(
time_enabled=True,
)
logger.info(f"Initialized {self.name} Hiearchical swarm")
def agents_check(self):
if len(self.agents) == 0:
raise ValueError(
"No agents found. Please add agents to the swarm."
)
return None
def director_check(self):
if self.director is None:
raise ValueError(
"No director found. Please add a director to the swarm."
)
return None
def run(self, task: str):
# Plan
# Plan -> JSON Function call -> workers -> response fetch back to boss -> planner
responses = []
responses.append(task)
for _ in range(self.max_loops):
# Plan
plan = self.planner.run(concat_strings(responses))
logger.info(f"Agent {self.planner.agent_name} planned: {plan}")
responses.append(plan)
# Execute json function calls
calls = self.director.run(plan)
logger.info(
f"Agent {self.director.agent_name} called: {calls}"
)
responses.append(calls)
# Parse and send tasks to agents
output = self.parse_then_send_tasks_to_agents(
self.agents, calls
)
# Fetch back to boss
responses.append(output)
return concat_strings(responses)
def run_worker_agent(
self, name: str = None, task: str = None, *args, **kwargs
):
"""
Run the worker agent.
Args:
name (str): The name of the worker agent.
task (str): The task to send to the worker agent.
Returns:
str: The response from the worker agent.
Raises:
Exception: If an error occurs while running the worker agent.
"""
try:
# Find the agent by name
agent = self.find_agent_by_name(name)
# Run the agent
response = agent.run(task, *args, **kwargs)
return response
except Exception as e:
logger.error(f"Error: {e}")
raise e
def find_agent_by_name(self, agent_name: str = None, *args, **kwargs):
"""
Finds an agent in the swarm by name.
Args:
agent_name (str): The name of the agent to find.
Returns:
Agent: The agent with the specified name, or None if not found.
"""
for agent in self.agents:
if agent.name == agent_name:
return agent
return None
def select_agent_and_send_task(
self, name: str = None, task: str = None, *args, **kwargs
):
"""
Select an agent from the list and send a task to them.
Args:
name (str): The name of the agent to send the task to.
task (str): The task to send to the agent.
Returns:
str: The response from the agent.
Raises:
KeyError: If the agent name is not found in the list of agents.
"""
try:
# Check to see if the agent name is in the list of agents
if name in self.agents:
agent = self.agents[name]
else:
return "Invalid agent name. Please select 'Account Management Agent' or 'Product Support Agent'."
response = agent.run(task, *args, **kwargs)
return response
except Exception as e:
logger.error(f"Error: {e}")
raise e
def agents_list(
self,
) -> str:
logger.info("Listing agents")
for agent in self.agents:
name = agent.agent_name
description = agent.description or "No description available."
logger.info(f"Agent: {name}, Description: {description}")
self.conversation.add(name, description)
return self.conversation.return_history_as_string()
def parse_then_send_tasks_to_agents(self, response: dict):
# Initialize an empty dictionary to store the output of each agent
output = []
# Loop over the tasks in the response
for call in response["calls"]:
name = call["agent_name"]
task = call["task"]
# Loop over the agents
for agent in self.agents:
# If the agent's name matches the name in the task, run the task
if agent.agent_name == name:
out = agent.run(task)
print(out)
###### output.append(f"{name}: {out}")
Workers available:
{agents_list(team)} # Store the output in the dictionary
# output[name] = out
break
return output
# # Example usage:
# system_prompt = f"""
# You're a director agent, your responsibility is to serve the user efficiently, effectively and skillfully.You have a swarm of agents available to distribute tasks to, interact with the user and then submit tasks to the worker agents. Provide orders to the worker agents that are direct, explicit, and simple. Ensure that they are given tasks that are understandable, actionable, and simple to execute.
"""
# ######
# Workers available:
# {agents_list(team)}
# Initialize the function caller
function_caller = OpenAIFunctionCaller(
system_prompt=system_prompt,
openai_api_key=os.getenv("OPENAI_API_KEY"),
max_tokens=500,
temperature=0.5,
base_model=CallTeam,
)
# Run the function caller # """
response = function_caller.run(
"Now let's grow the company! Send an order to the marketing specialist, sales specialist, and product development specialist to improve the company's growth strategies."
)
# print(response)
print(response)
print(type(response))
def has_sop(self):
# We need to check the name of the agents and their description or system prompt
# TODO: Provide many shot examples of the agents available and even maybe what tools they have access to
# TODO: Provide better reasoning prompt tiles, such as when do you use a certain agent and specific
# Things NOT to do.
return f"""
You're a director boss agent orchestrating worker agents with tasks. Select an agent most relevant to
the input task and give them a task. If there is not an agent relevant to the input task then say so and be simple and direct.
These are the available agents available call them if you need them for a specific
task or operation:
Number of agents: {len(self.agents)}
Agents Available: {
[
{"name": agent.name, "description": agent.system_prompt}
for agent in self.agents
]
}
out = parse_then_send_tasks_to_agents(team, response) """
print(out)

@ -0,0 +1,197 @@
import os
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any, Callable, List, Optional
from swarms import Agent, OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.loguru_logger import logger
class MonteCarloSwarm(BaseSwarm):
"""
MonteCarloSwarm leverages multiple agents to collaborate in a Monte Carlo fashion.
Each agent's output is passed to the next, refining the result progressively.
Supports parallel execution, dynamic agent selection, and custom result aggregation.
Attributes:
agents (List[Agent]): A list of agents that will participate in the swarm.
parallel (bool): If True, agents will run in parallel.
result_aggregator (Callable[[List[Any]], Any]): A function to aggregate results from agents.
max_workers (Optional[int]): The maximum number of threads for parallel execution.
"""
def __init__(
self,
agents: List[Agent],
parallel: bool = False,
result_aggregator: Optional[Callable[[List[Any]], Any]] = None,
max_workers: Optional[int] = None,
*args,
**kwargs,
) -> None:
"""
Initializes the MonteCarloSwarm with a list of agents.
Args:
agents (List[Agent]): A list of agents to include in the swarm.
parallel (bool): If True, agents will run in parallel. Default is False.
result_aggregator (Optional[Callable[[List[Any]], Any]]): A function to aggregate results from agents.
max_workers (Optional[int]): The maximum number of threads for parallel execution.
"""
super().__init__(agents=agents, *args, **kwargs)
if not agents:
raise ValueError("The agents list cannot be empty.")
self.agents = agents
self.parallel = parallel
self.result_aggregator = (
result_aggregator or self.default_aggregator
)
self.max_workers = max_workers or len(agents)
def run(self, task: str) -> Any:
"""
Runs the MonteCarloSwarm with the given input, passing the output of each agent
to the next one in the list or running agents in parallel.
Args:
task (str): The initial input to provide to the first agent.
Returns:
Any: The final output after all agents have processed the input.
"""
logger.info(
f"Starting MonteCarloSwarm with parallel={self.parallel}"
)
if self.parallel:
results = self._run_parallel(task)
else:
results = self._run_sequential(task)
final_output = self.result_aggregator(results)
logger.info(
f"MonteCarloSwarm completed. Final output: {final_output}"
)
return final_output
def _run_sequential(self, task: str) -> List[Any]:
"""
Runs the agents sequentially, passing each agent's output to the next.
Args:
task (str): The initial input to provide to the first agent.
Returns:
List[Any]: A list of results from each agent.
"""
results = []
current_input = task
for i, agent in enumerate(self.agents):
logger.info(f"Agent {i + 1} processing sequentially...")
current_output = agent.run(current_input)
results.append(current_output)
current_input = current_output
return results
def _run_parallel(self, task: str) -> List[Any]:
"""
Runs the agents in parallel, each receiving the same initial input.
Args:
task (str): The initial input to provide to all agents.
Returns:
List[Any]: A list of results from each agent.
"""
results = []
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
future_to_agent = {
executor.submit(agent.run, task): agent
for agent in self.agents
}
for future in as_completed(future_to_agent):
try:
result = future.result()
results.append(result)
logger.info(f"Agent completed with result: {result}")
except Exception as e:
logger.error(f"Agent encountered an error: {e}")
results.append(None)
return results
@staticmethod
def default_aggregator(results: List[Any]) -> Any:
"""
Default result aggregator that returns the last result.
Args:
results (List[Any]): A list of results from agents.
Returns:
Any: The final aggregated result.
"""
return results
def average_aggregator(results: List[float]) -> float:
return sum(results) / len(results) if results else 0.0
# Example usage
if __name__ == "__main__":
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agents
agents_list = [
Agent(
agent_name="Financial-Analysis-Agent-1",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
streaming_on=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent_1.json",
retry_attempts=3,
context_length=200000,
),
Agent(
agent_name="Financial-Analysis-Agent-2",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
streaming_on=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent_2.json",
retry_attempts=3,
context_length=200000,
),
# Add more agents as needed
]
# Initialize the MonteCarloSwarm with parallel execution enabled
swarm = MonteCarloSwarm(
agents=agents_list, parallel=True, max_workers=2
)
# Run the swarm with an initial query
final_output = swarm.run(
"What are the components of a startup's stock incentive equity plan?"
)
print("Final output:", final_output)

@ -0,0 +1,136 @@
import hashlib
import json
import os
from typing import Any, Dict, Optional, List
class PromptCache:
"""
A framework to handle prompt caching for any LLM API. This reduces costs, latency,
and allows reuse of long-form context across multiple API requests.
"""
def __init__(
self,
cache_dir: str = "cache",
llm_api_function: Optional[Any] = None,
text: Optional[List[str]] = None,
):
"""
Initializes the PromptCache instance.
Args:
cache_dir (str): Directory where cached responses are stored.
llm_api_function (Optional[Any]): The function that interacts with the LLM API.
It should accept a prompt and return the response.
"""
self.cache_dir = cache_dir
self.llm_api_function = llm_api_function
self.text = text
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
def _generate_cache_key(self, prompt: str) -> str:
"""
Generates a unique cache key for a given prompt.
Args:
prompt (str): The prompt to generate a cache key for.
Returns:
str: A unique cache key.
"""
return hashlib.md5(prompt.encode("utf-8")).hexdigest()
def _cache_file_path(self, cache_key: str) -> str:
"""
Constructs the file path for the cache file.
Args:
cache_key (str): The cache key for the prompt.
Returns:
str: The path to the cache file.
"""
return os.path.join(self.cache_dir, f"{cache_key}.json")
def _load_from_cache(self, cache_key: str) -> Optional[Dict[str, Any]]:
"""
Loads a cached response if available.
Args:
cache_key (str): The cache key for the prompt.
Returns:
Optional[Dict[str, Any]]: The cached response, or None if not found.
"""
cache_file = self._cache_file_path(cache_key)
if os.path.exists(cache_file):
with open(cache_file, "r") as f:
return json.load(f)
return None
def _save_to_cache(
self, cache_key: str, response: Dict[str, Any]
) -> None:
"""
Saves the API response to the cache.
Args:
cache_key (str): The cache key for the prompt.
response (Dict[str, Any]): The API response to be cached.
"""
cache_file = self._cache_file_path(cache_key)
with open(cache_file, "w") as f:
json.dump(response, f)
def get_response(self, prompt: str) -> Dict[str, Any]:
"""
Retrieves the response for a prompt, using cache if available.
Args:
prompt (str): The prompt to retrieve the response for.
Returns:
Dict[str, Any]: The API response, either from cache or freshly fetched.
"""
cache_key = self._generate_cache_key(prompt)
cached_response = self._load_from_cache(cache_key)
if cached_response is not None:
return cached_response
# If the response is not cached, use the LLM API to get the response
if self.llm_api_function is None:
raise ValueError("LLM API function is not defined.")
response = self.llm_api_function(prompt)
self._save_to_cache(cache_key, response)
return response
def clear_cache(self) -> None:
"""
Clears the entire cache directory.
"""
for cache_file in os.listdir(self.cache_dir):
os.remove(os.path.join(self.cache_dir, cache_file))
# Example usage
if __name__ == "__main__":
# Dummy LLM API function
def mock_llm_api(prompt: str) -> Dict[str, Any]:
return {"response": f"Mock response to '{prompt}'"}
# Initialize the cache
cache = PromptCache(llm_api_function=mock_llm_api)
# Example prompts
prompt1 = "What is the capital of France?"
prompt2 = "Explain the theory of relativity."
# Get responses
print(cache.get_response(prompt1))
print(cache.get_response(prompt2))

@ -0,0 +1,106 @@
import hashlib
from typing import Dict, Optional
class PromptCache:
"""
A class to manage prompt caching for LLMs, allowing the reuse of context across multiple API requests.
This reduces costs and latency, particularly for long prompts.
Attributes:
cache (Dict[str, str]): A dictionary to store cached prompts and their corresponding responses.
"""
def __init__(self) -> None:
"""Initializes the PromptCache with an empty cache."""
self.cache: Dict[str, str] = {}
def _hash_prompt(self, prompt: str) -> str:
"""
Generates a unique hash for a given prompt.
Args:
prompt (str): The prompt to hash.
Returns:
str: The generated hash.
"""
return hashlib.sha256(prompt.encode()).hexdigest()
def add_to_cache(self, prompt: str, response: str) -> None:
"""
Adds a prompt and its corresponding response to the cache.
Args:
prompt (str): The prompt string.
response (str): The response generated by the LLM.
Returns:
None
"""
prompt_hash = self._hash_prompt(prompt)
self.cache[prompt_hash] = response
def get_from_cache(self, prompt: str) -> Optional[str]:
"""
Retrieves a cached response for a given prompt, if available.
Args:
prompt (str): The prompt string to retrieve the cached response for.
Returns:
Optional[str]: The cached response if found, otherwise None.
"""
prompt_hash = self._hash_prompt(prompt)
return self.cache.get(prompt_hash)
def clear_cache(self) -> None:
"""
Clears the entire prompt cache.
Returns:
None
"""
self.cache.clear()
def cache_size(self) -> int:
"""
Returns the number of items currently in the cache.
Returns:
int: The size of the cache.
"""
return len(self.cache)
def remove_from_cache(self, prompt: str) -> None:
"""
Removes a specific prompt and its response from the cache.
Args:
prompt (str): The prompt string to remove from the cache.
Returns:
None
"""
prompt_hash = self._hash_prompt(prompt)
if prompt_hash in self.cache:
del self.cache[prompt_hash]
# Example usage:
# Initialize the cache
prompt_cache = PromptCache()
# Add a prompt and response to the cache
prompt = "What is the capital of France?"
response = "The capital of France is Paris."
prompt_cache.add_to_cache(prompt, response)
# Retrieve the response from the cache
cached_response = prompt_cache.get_from_cache(prompt)
if cached_response:
print("Cached response:", cached_response)
else:
print("Prompt not found in cache.")

@ -0,0 +1,175 @@
import os
from swarms import Agent, OpenAIChat
from typing import List, Union, Callable
from collections import Counter
# Aggregation functions
def aggregate_most_common_result(results: List[str]) -> str:
"""
Aggregate results using the most common result.
Args:
results (List[str]): List of results from each iteration.
Returns:
str: The most common result.
"""
result_counter = Counter(results)
most_common_result = result_counter.most_common(1)[0][0]
return most_common_result
def aggregate_weighted_vote(results: List[str], weights: List[int]) -> str:
"""
Aggregate results using a weighted voting system.
Args:
results (List[str]): List of results from each iteration.
weights (List[int]): List of weights corresponding to each result.
Returns:
str: The result with the highest weighted vote.
"""
weighted_results = Counter()
for result, weight in zip(results, weights):
weighted_results[result] += weight
weighted_result = weighted_results.most_common(1)[0][0]
return weighted_result
def aggregate_average_numerical(results: List[Union[str, float]]) -> float:
"""
Aggregate results by averaging numerical outputs.
Args:
results (List[Union[str, float]]): List of numerical results from each iteration.
Returns:
float: The average of the numerical results.
"""
numerical_results = [
float(result) for result in results if is_numerical(result)
]
if numerical_results:
return sum(numerical_results) / len(numerical_results)
else:
return float("nan") # or handle non-numerical case as needed
def aggregate_consensus(results: List[str]) -> Union[str, None]:
"""
Aggregate results by checking if there's a consensus (all results are the same).
Args:
results (List[str]): List of results from each iteration.
Returns:
Union[str, None]: The consensus result if there is one, otherwise None.
"""
if all(result == results[0] for result in results):
return results[0]
else:
return None # or handle lack of consensus as needed
def is_numerical(value: str) -> bool:
"""
Check if a string can be interpreted as a numerical value.
Args:
value (str): The string to check.
Returns:
bool: True if the string is numerical, otherwise False.
"""
try:
float(value)
return True
except ValueError:
return False
# MonteCarloSwarm class
class MonteCarloSwarm:
def __init__(
self,
agents: List[Agent],
iterations: int = 100,
aggregator: Callable = aggregate_most_common_result,
):
self.agents = agents
self.iterations = iterations
self.aggregator = aggregator
def run(self, task: str) -> Union[str, float, None]:
"""
Execute the Monte Carlo swarm, passing the output of each agent to the next.
The final result is aggregated over multiple iterations using the provided aggregator.
Args:
task (str): The task for the swarm to execute.
Returns:
Union[str, float, None]: The final aggregated result.
"""
aggregated_results = []
for i in range(self.iterations):
result = task
for agent in self.agents:
result = agent.run(result)
aggregated_results.append(result)
# Apply the selected aggregation function
final_result = self.aggregator(aggregated_results)
return final_result
# Example usage:
# Assuming you have the OpenAI API key set up and agents defined
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
agent1 = Agent(
agent_name="Agent1",
system_prompt="System prompt for agent 1",
llm=model,
max_loops=1,
verbose=True,
)
agent2 = Agent(
agent_name="Agent2",
system_prompt="System prompt for agent 2",
llm=model,
max_loops=1,
verbose=True,
)
# Create a MonteCarloSwarm with the agents and a selected aggregation function
swarm = MonteCarloSwarm(
agents=[agent1, agent2],
iterations=1,
aggregator=aggregate_weighted_vote,
)
# Run the swarm on a specific task
final_output = swarm.run(
"What are the components of a startup's stock incentive plan?"
)
print("Final Output:", final_output)
# You can easily switch the aggregation function by passing a different one to the constructor:
# swarm = MonteCarloSwarm(agents=[agent1, agent2], iterations=100, aggregator=aggregate_weighted_vote)
# If using weighted voting, you'll need to adjust the aggregator call to provide the weights:
# weights = list(range(100, 0, -1)) # Example weights for 100 iterations
# swarm = MonteCarloSwarm(agents=[agent1, agent2], iterations=100, aggregator=lambda results: aggregate_weighted_vote(results, weights))

@ -11,5 +11,6 @@ def bootup():
disable_logging() disable_logging()
logging.disable(logging.CRITICAL) logging.disable(logging.CRITICAL)
os.environ["WANDB_SILENT"] = "true" os.environ["WANDB_SILENT"] = "true"
os.environ["WORKSPACE_DIR"] = "agent_workspace"
warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=DeprecationWarning)
auto_update() auto_update()

@ -7,7 +7,7 @@ from packaging import version
# borrowed from: https://stackoverflow.com/a/1051266/656011 # borrowed from: https://stackoverflow.com/a/1051266/656011
def check_for_package(package): def check_for_package(package: str) -> bool:
if package in sys.modules: if package in sys.modules:
return True return True
elif (spec := importlib.util.find_spec(package)) is not None: elif (spec := importlib.util.find_spec(package)) is not None:

@ -1,110 +0,0 @@
import requests
from loguru import logger
import os
def fetch_secrets_from_vault(
client_id: str = os.getenv("HCP_CLIENT_ID"),
client_secret: str = os.getenv("HCP_CLIENT_SECRET"),
organization_id: str = os.getenv("HCP_ORGANIZATION_ID"),
project_id: str = os.getenv("HCP_PROJECT_ID"),
app_id: str = os.getenv("HCP_APP_ID"),
) -> str:
"""
Fetch secrets from HashiCorp Vault using service principal authentication.
Args:
client_id (str): The client ID for the service principal.
client_secret (str): The client secret for the service principal.
organization_id (str): The ID of the organization in HCP.
project_id (str): The ID of the project in HCP.
app_id (str): The ID of the app in HCP.
Returns:
str: A dictionary containing the fetched secrets.
Raises:
Exception: If there is an error retrieving the API token or secrets.
"""
# Step 1: Generate the API Token
token_url = "https://auth.idp.hashicorp.com/oauth2/token"
token_data = {
"client_id": client_id,
"client_secret": client_secret,
"grant_type": "client_credentials",
"audience": "https://api.hashicorp.cloud",
}
token_headers = {"Content-Type": "application/x-www-form-urlencoded"}
logger.info("Requesting API token from HashiCorp Vault")
response = requests.post(
token_url, data=token_data, headers=token_headers
)
if response.status_code != 200:
logger.error(
f"Failed to retrieve API token. Status Code: {response.status_code}, Response: {response.text}"
)
response.raise_for_status()
api_token = response.json().get("access_token")
if not api_token:
raise Exception("Failed to retrieve API token")
# Step 2: Fetch Secrets
secrets_url = f"https://api.cloud.hashicorp.com/secrets/2023-06-13/organizations/{organization_id}/projects/{project_id}/apps/{app_id}/open"
secrets_headers = {"Authorization": f"Bearer {api_token}"}
logger.info("Fetching secrets from HashiCorp Vault")
response = requests.get(secrets_url, headers=secrets_headers)
if response.status_code != 200:
logger.error(
f"Failed to fetch secrets. Status Code: {response.status_code}, Response: {response.text}"
)
response.raise_for_status()
secrets = response.json()
for secret in secrets["secrets"]:
name = secret.get("name")
value = secret.get("version", {}).get("value")
print(f"Name: {name}, Value: {value}")
return name, value
# def main() -> None:
# """
# Main function to fetch secrets from HashiCorp Vault and print them.
# Raises:
# EnvironmentError: If required environment variables are not set.
# """
# HCP_CLIENT_ID = os.getenv("HCP_CLIENT_ID")
# HCP_CLIENT_SECRET = os.getenv("HCP_CLIENT_SECRET")
# ORGANIZATION_ID = os.getenv("HCP_ORGANIZATION_ID")
# PROJECT_ID = os.getenv("HCP_PROJECT_ID")
# APP_ID = os.getenv("HCP_APP_ID")
# # if not all([HCP_CLIENT_ID, HCP_CLIENT_SECRET, ORGANIZATION_ID, PROJECT_ID, APP_ID]):
# # raise EnvironmentError("One or more environment variables are missing: HCP_CLIENT_ID, HCP_CLIENT_SECRET, ORGANIZATION_ID, PROJECT_ID, APP_ID")
# secrets = fetch_secrets_from_vault(
# HCP_CLIENT_ID,
# HCP_CLIENT_SECRET,
# ORGANIZATION_ID,
# PROJECT_ID,
# APP_ID,
# )
# print(secrets)
# for secret in secrets["secrets"]:
# name = secret.get("name")
# value = secret.get("version", {}).get("value")
# print(f"Name: {name}, Value: {value}")
# if __name__ == "__main__":
# main()

@ -1,540 +0,0 @@
from __future__ import annotations
import asyncio
import json
import os
import platform
from typing import Any
import pkg_resources
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
OTLPSpanExporter,
)
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.trace import Span, Status, StatusCode
from swarms.structs.base_swarm import BaseSwarm
class Telemetry:
"""A class to handle anonymous telemetry for the swarms package.
The data being collected is for development purpose, all data is anonymous.
There is NO data being collected on the prompts, tasks descriptions
agents backstories or goals nor responses or any data that is being
processed by the agents, nor any secrets and env vars.
Data collected includes:
- Version of swarms
- Version of Python
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
- Number of agents and tasks in a crew
- Crew Process being used
- If Agents are using memory or allowing delegation
- If Tasks are being executed in parallel or sequentially
- Language model being used
- Roles of agents in a crew
- Tools names available
Users can opt-in to sharing more complete data using the `share_crew`
attribute in the Crew class.
"""
def __init__(self):
self.ready = False
self.trace_set = False
try:
telemetry_endpoint = "https://telemetry.swarms.com:4319"
self.resource = Resource(
attributes={SERVICE_NAME: "swarms-telemetry"},
)
self.provider = TracerProvider(resource=self.resource)
processor = BatchSpanProcessor(
OTLPSpanExporter(
endpoint=f"{telemetry_endpoint}/v1/traces",
timeout=30,
)
)
self.provider.add_span_processor(processor)
self.ready = True
except BaseException as e:
if isinstance(
e,
(
SystemExit,
KeyboardInterrupt,
GeneratorExit,
asyncio.CancelledError,
),
):
raise # Re-raise the exception to not interfere with system signals
self.ready = False
def set_tracer(self):
if self.ready and not self.trace_set:
try:
trace.set_tracer_provider(self.provider)
self.trace_set = True
except Exception:
self.ready = False
self.trace_set = False
def swarm_creation(
self, swarm: BaseSwarm, inputs: dict[str, Any] | None
):
"""Records the creation of a crew."""
if self.ready:
try:
tracer = trace.get_tracer("swarms.telemetry")
span = tracer.start_span("Crew Created")
self._add_attribute(
span,
"swarms_version",
pkg_resources.get_distribution("swarms").version,
)
self._add_attribute(
span, "python_version", platform.python_version()
)
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "crew_process", crew.process)
self._add_attribute(span, "crew_memory", crew.memory)
self._add_attribute(
span, "crew_number_of_tasks", len(crew.tasks)
)
self._add_attribute(
span, "crew_number_of_agents", len(crew.agents)
)
self._add_attribute(
span,
"crew_agents",
json.dumps(
[
{
"key": agent.key,
"id": str(agent.id),
"role": agent.role,
"goal": agent.goal,
"backstory": agent.backstory,
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file,
"llm": json.dumps(
self._safe_llm_attributes(agent.llm)
),
"delegation_enabled?": agent.allow_delegation,
"tools_names": [
tool.name.casefold()
for tool in agent.tools or []
],
}
for agent in crew.agents
]
),
)
self._add_attribute(
span,
"crew_tasks",
json.dumps(
[
{
"key": task.key,
"id": str(task.id),
"description": task.description,
"expected_output": task.expected_output,
"async_execution?": task.async_execution,
"human_input?": task.human_input,
"agent_role": (
task.agent.role
if task.agent
else "None"
),
"agent_key": (
task.agent.key if task.agent else None
),
"context": (
[
task.description
for task in task.context
]
if task.context
else None
),
"tools_names": [
tool.name.casefold()
for tool in task.tools or []
],
}
for task in crew.tasks
]
),
)
self._add_attribute(span, "platform", platform.platform())
self._add_attribute(
span, "platform_release", platform.release()
)
self._add_attribute(
span, "platform_system", platform.system()
)
self._add_attribute(
span, "platform_version", platform.version()
)
self._add_attribute(span, "cpus", os.cpu_count())
if crew.share_crew:
self._add_attribute(
span,
"crew_inputs",
json.dumps(inputs) if inputs else None,
)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def task_started(self, crew: Crew, task: Task) -> Span | None:
"""Records task started in a crew."""
if self.ready:
try:
tracer = trace.get_tracer("swarms.telemetry")
created_span = tracer.start_span("Task Created")
self._add_attribute(created_span, "crew_key", crew.key)
self._add_attribute(created_span, "crew_id", str(crew.id))
self._add_attribute(created_span, "task_key", task.key)
self._add_attribute(created_span, "task_id", str(task.id))
if crew.share_crew:
self._add_attribute(
created_span,
"formatted_description",
task.description,
)
self._add_attribute(
created_span,
"formatted_expected_output",
task.expected_output,
)
created_span.set_status(Status(StatusCode.OK))
created_span.end()
span = tracer.start_span("Task Execution")
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "task_key", task.key)
self._add_attribute(span, "task_id", str(task.id))
if crew.share_crew:
self._add_attribute(
span, "formatted_description", task.description
)
self._add_attribute(
span,
"formatted_expected_output",
task.expected_output,
)
return span
except Exception:
pass
return None
def task_ended(self, span: Span, task: Task, crew: Crew):
"""Records task execution in a crew."""
if self.ready:
try:
if crew.share_crew:
self._add_attribute(
span,
"task_output",
task.output.raw if task.output else "",
)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
"""Records the repeated usage 'error' of a tool by an agent."""
if self.ready:
try:
tracer = trace.get_tracer("swarms.telemetry")
span = tracer.start_span("Tool Repeated Usage")
self._add_attribute(
span,
"swarms_version",
pkg_resources.get_distribution("swarms").version,
)
self._add_attribute(span, "tool_name", tool_name)
self._add_attribute(span, "attempts", attempts)
if llm:
self._add_attribute(
span,
"llm",
json.dumps(self._safe_llm_attributes(llm)),
)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
"""Records the usage of a tool by an agent."""
if self.ready:
try:
tracer = trace.get_tracer("swarms.telemetry")
span = tracer.start_span("Tool Usage")
self._add_attribute(
span,
"swarms_version",
pkg_resources.get_distribution("swarms").version,
)
self._add_attribute(span, "tool_name", tool_name)
self._add_attribute(span, "attempts", attempts)
if llm:
self._add_attribute(
span,
"llm",
json.dumps(self._safe_llm_attributes(llm)),
)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def tool_usage_error(self, llm: Any):
"""Records the usage of a tool by an agent."""
if self.ready:
try:
tracer = trace.get_tracer("swarms.telemetry")
span = tracer.start_span("Tool Usage Error")
self._add_attribute(
span,
"swarms_version",
pkg_resources.get_distribution("swarms").version,
)
if llm:
self._add_attribute(
span,
"llm",
json.dumps(self._safe_llm_attributes(llm)),
)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def individual_test_result_span(
self, crew: Crew, quality: int, exec_time: int, model_name: str
):
if self.ready:
try:
tracer = trace.get_tracer("swarms.telemetry")
span = tracer.start_span("Crew Individual Test Result")
self._add_attribute(
span,
"swarms_version",
pkg_resources.get_distribution("swarms").version,
)
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "quality", str(quality))
self._add_attribute(span, "exec_time", str(exec_time))
self._add_attribute(span, "model_name", model_name)
return span
except Exception:
pass
def test_execution_span(
self,
crew: Crew,
iterations: int,
inputs: dict[str, Any] | None,
model_name: str,
):
if self.ready:
try:
tracer = trace.get_tracer("swarms.telemetry")
span = tracer.start_span("Crew Test Execution")
self._add_attribute(
span,
"swarms_version",
pkg_resources.get_distribution("swarms").version,
)
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "iterations", str(iterations))
self._add_attribute(span, "model_name", model_name)
if crew.share_crew:
self._add_attribute(
span,
"inputs",
json.dumps(inputs) if inputs else None,
)
return span
except Exception:
pass
def crew_execution_span(
self, crew: Crew, inputs: dict[str, Any] | None
):
"""Records the complete execution of a crew.
This is only collected if the user has opted-in to share the crew.
"""
self.crew_creation(crew, inputs)
if (self.ready) and (crew.share_crew):
try:
tracer = trace.get_tracer("swarms.telemetry")
span = tracer.start_span("Crew Execution")
self._add_attribute(
span,
"swarms_version",
pkg_resources.get_distribution("swarms").version,
)
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(
span,
"crew_inputs",
json.dumps(inputs) if inputs else None,
)
self._add_attribute(
span,
"crew_agents",
json.dumps(
[
{
"key": agent.key,
"id": str(agent.id),
"role": agent.role,
"goal": agent.goal,
"backstory": agent.backstory,
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file,
"llm": json.dumps(
self._safe_llm_attributes(agent.llm)
),
"delegation_enabled?": agent.allow_delegation,
"tools_names": [
tool.name.casefold()
for tool in agent.tools or []
],
}
for agent in crew.agents
]
),
)
self._add_attribute(
span,
"crew_tasks",
json.dumps(
[
{
"id": str(task.id),
"description": task.description,
"expected_output": task.expected_output,
"async_execution?": task.async_execution,
"human_input?": task.human_input,
"agent_role": (
task.agent.role
if task.agent
else "None"
),
"agent_key": (
task.agent.key if task.agent else None
),
"context": (
[
task.description
for task in task.context
]
if task.context
else None
),
"tools_names": [
tool.name.casefold()
for tool in task.tools or []
],
}
for task in crew.tasks
]
),
)
return span
except Exception:
pass
def end_crew(self, crew, final_string_output):
if (self.ready) and (crew.share_crew):
try:
self._add_attribute(
crew._execution_span,
"swarms_version",
pkg_resources.get_distribution("swarms").version,
)
self._add_attribute(
crew._execution_span,
"crew_output",
final_string_output,
)
self._add_attribute(
crew._execution_span,
"crew_tasks_output",
json.dumps(
[
{
"id": str(task.id),
"description": task.description,
"output": task.output.raw_output,
}
for task in crew.tasks
]
),
)
crew._execution_span.set_status(Status(StatusCode.OK))
crew._execution_span.end()
except Exception:
pass
def _add_attribute(self, span, key, value):
"""Add an attribute to a span."""
try:
return span.set_attribute(key, value)
except Exception:
pass
def _safe_llm_attributes(self, llm):
attributes = [
"name",
"model_name",
"base_url",
"model",
"top_k",
"temperature",
]
if llm:
safe_attributes = {
k: v for k, v in vars(llm).items() if k in attributes
}
safe_attributes["class"] = llm.__class__.__name__
return safe_attributes
return {}

@ -0,0 +1,192 @@
from typing import Any, Dict, List, Optional, Union
import json
import requests
from loguru import logger
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
OTLPSpanExporter,
)
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.trace import Status, StatusCode
class TelemetryProcessor:
"""
A class to handle telemetry processing, including converting data to JSON,
exporting it to an API server, and tracing the operations with OpenTelemetry.
Attributes:
service_name (str): The name of the service for tracing.
otlp_endpoint (str): The endpoint URL for the OTLP exporter.
tracer (Tracer): The tracer object used for creating spans.
Methods:
process_data(data: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None) -> str:
Converts input data to a JSON string.
export_to_server(json_data: Optional[str] = None, api_url: Optional[str] = None) -> None:
Sends the JSON data to the specified API server.
"""
def __init__(
self,
service_name: str = "telemetry_service",
otlp_endpoint: str = "http://localhost:4318/v1/traces",
*args,
**kwargs,
) -> None:
"""
Initializes the TelemetryProcessor class with configurable settings.
Args:
service_name (str): The name of the service for tracing.
otlp_endpoint (str): The endpoint URL for the OTLP exporter.
"""
self.service_name = service_name
self.otlp_endpoint = otlp_endpoint
# Configure OpenTelemetry Tracing
resource = Resource(
attributes={SERVICE_NAME: self.service_name}, *args, **kwargs
)
trace.set_tracer_provider(
TracerProvider(resource=resource), *args, **kwargs
)
self.tracer = trace.get_tracer(__name__)
# Configure OTLP Exporter to send spans to a collector (e.g., Jaeger, Zipkin)
otlp_exporter = OTLPSpanExporter(endpoint=self.otlp_endpoint)
span_processor = BatchSpanProcessor(otlp_exporter)
trace.get_tracer_provider().add_span_processor(span_processor)
logger.debug(
f"TelemetryProcessor initialized with service_name={self.service_name}, otlp_endpoint={self.otlp_endpoint}"
)
def process_data(
self,
data: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
) -> str:
"""
Converts input data to a JSON string.
Args:
data (Optional[Union[Dict[str, Any], List[Dict[str, Any]]]]): The input data to be converted.
Defaults to an empty dictionary if None is provided.
Returns:
str: The JSON string representation of the input data.
Raises:
TypeError: If the input data is not a dictionary or a list of dictionaries.
json.JSONEncodeError: If the data cannot be serialized to JSON.
"""
with self.tracer.start_as_current_span("process_data") as span:
if data is None:
data = {}
logger.debug(f"Processing data: {data}")
if not isinstance(data, (dict, list)):
logger.error(
"Invalid data type. Expected a dictionary or a list of dictionaries."
)
span.set_status(
Status(StatusCode.ERROR, "Invalid data type")
)
raise TypeError(
"Input data must be a dictionary or a list of dictionaries."
)
try:
json_data = json.dumps(data)
logger.debug(f"Converted data to JSON: {json_data}")
return json_data
except (TypeError, json.JSONEncodeError) as e:
logger.error(f"Failed to convert data to JSON: {e}")
span.set_status(
Status(StatusCode.ERROR, "JSON serialization failed")
)
raise
def export_to_server(
self,
json_data: Optional[str] = None,
api_url: Optional[str] = None,
) -> None:
"""
Sends the JSON data to the specified API server.
Args:
json_data (Optional[str]): The JSON data to be sent. Defaults to an empty JSON string if None is provided.
api_url (Optional[str]): The URL of the API server to send the data to. Defaults to None.
Raises:
ValueError: If the api_url is None.
requests.exceptions.RequestException: If there is an error sending the data to the server.
"""
with self.tracer.start_as_current_span("export_to_server") as span:
if json_data is None:
json_data = "{}"
if api_url is None:
logger.error("API URL cannot be None.")
span.set_status(
Status(StatusCode.ERROR, "API URL is missing")
)
raise ValueError("API URL cannot be None.")
logger.debug(f"Exporting JSON data to server: {api_url}")
headers = {"Content-Type": "application/json"}
log = {
"data": json_data,
}
try:
response = requests.post(
api_url, data=log, headers=headers
)
response.raise_for_status()
logger.info(
f"Data successfully exported to {api_url}: {response.status_code}"
)
except requests.exceptions.RequestException as e:
logger.error(f"Failed to export data to {api_url}: {e}")
span.set_status(
Status(
StatusCode.ERROR,
"Failed to send data to API server",
)
)
raise
# # Example usage:
# if __name__ == "__main__":
# # Example usage with custom service name and OTLP endpoint
# processor = TelemetryProcessor(service_name="my_telemetry_service", otlp_endpoint="http://my-collector:4318/v1/traces")
# # Sample data
# telemetry_data = {
# "device_id": "sensor_01",
# "temperature": 22.5,
# "humidity": 60,
# "timestamp": "2024-08-15T12:34:56Z"
# }
# # Processing data
# try:
# json_data = processor.process_data(telemetry_data)
# except Exception as e:
# logger.error(f"Processing error: {e}")
# # Handle error accordingly
# # Exporting data to an API server
# api_url = "https://example.com/api/telemetry"
# try:
# processor.export_to_server(json_data, api_url)
# except Exception as e:
# logger.error(f"Export error: {e}")
# # Handle error accordingly

@ -6,15 +6,15 @@ load_dotenv()
os.environ["USE_TELEMETRY"] = "True" os.environ["USE_TELEMETRY"] = "True"
use_telementry = os.getenv("USE_TELEMETRY")
def activate_sentry(): def activate_sentry():
use_telementry = os.getenv("USE_TELEMETRY")
if use_telementry == "True": if use_telementry == "True":
sentry_sdk.init( sentry_sdk.init(
dsn="https://5d72dd59551c02f78391d2ea5872ddd4@o4504578305490944.ingest.us.sentry.io/4506951704444928", dsn="https://5d72dd59551c02f78391d2ea5872ddd4@o4504578305490944.ingest.us.sentry.io/4506951704444928",
traces_sample_rate=1.0, traces_sample_rate=1.0,
profiles_sample_rate=1.0, profiles_sample_rate=1.0,
enable_tracing=True, enable_tracing=True,
debug=True, debug=False, # Set debug to False
) )

@ -1,7 +1,6 @@
import os import os
import subprocess import subprocess
from loguru import logger from loguru import logger
import black
from swarms.models.tiktoken_wrapper import TikTokenizer from swarms.models.tiktoken_wrapper import TikTokenizer
@ -56,6 +55,8 @@ class CodeExecutor:
ValueError: If the code cannot be formatted. ValueError: If the code cannot be formatted.
""" """
try: try:
import black
formatted_code = black.format_str(code, mode=black.FileMode()) formatted_code = black.format_str(code, mode=black.FileMode())
return formatted_code return formatted_code
except black.InvalidInput as e: except black.InvalidInput as e:

@ -4,6 +4,7 @@ import warnings
def disable_logging(): def disable_logging():
warnings.filterwarnings("ignore", category=UserWarning) warnings.filterwarnings("ignore", category=UserWarning)
# disable tensorflow warnings # disable tensorflow warnings
@ -37,8 +38,17 @@ def disable_logging():
# Remove all existing handlers # Remove all existing handlers
logging.getLogger().handlers = [] logging.getLogger().handlers = []
# Get the workspace directory from the environment variables
workspace_dir = os.environ["WORKSPACE_DIR"]
# Check if the workspace directory exists, if not, create it
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir)
# Create a file handler to log errors to the file # Create a file handler to log errors to the file
file_handler = logging.FileHandler("errors.txt") file_handler = logging.FileHandler(
os.path.join(workspace_dir, "error.txt")
)
file_handler.setLevel(logging.ERROR) file_handler.setLevel(logging.ERROR)
logging.getLogger().addHandler(file_handler) logging.getLogger().addHandler(file_handler)

@ -1,5 +1,6 @@
import json import json
import os import os
from typing import Any
import re import re
import shutil import shutil
import tempfile import tempfile
@ -60,7 +61,7 @@ def create_file(
return file_path return file_path
def create_file_in_folder(folder_path: str, file_name: str, content: str): def create_file_in_folder(folder_path: str, file_name: str, content: Any):
""" """
Creates a file in the specified folder with the given file name and content. Creates a file in the specified folder with the given file name and content.
@ -83,7 +84,7 @@ def create_file_in_folder(folder_path: str, file_name: str, content: str):
return file_path return file_path
def zip_folders(folder1_path, folder2_path, zip_file_path): def zip_folders(folder1_path: str, folder2_path: str, zip_file_path: str):
""" """
Zip two folders into a single zip file. Zip two folders into a single zip file.

Loading…
Cancel
Save