[FEAT][GraphWorkflow] [Memory Optimization] [Tokenizer Implementation]

pull/518/head^2
Kye Gomez 7 months ago
parent e061d23d54
commit 052743bce1

@ -1507,6 +1507,8 @@ print(out)
``` ```
--- ---
## Documentation ## Documentation

192
api.py

@ -0,0 +1,192 @@
import asyncio
import os
from typing import List
import tiktoken
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from swarms import Agent, Anthropic, GPT4o, GPT4VisionAPI, OpenAIChat
from swarms.utils.loguru_logger import logger
from swarms_cloud.schema.cog_vlm_schemas import (
ChatCompletionResponse,
UsageInfo,
)
# Define the input model using Pydantic
class AgentInput(BaseModel):
agent_name: str = "Swarm Agent"
system_prompt: str = None
agent_description: str = None
model_name: str = "OpenAIChat"
max_loops: int = 1
autosave: bool = False
dynamic_temperature_enabled: bool = False
dashboard: bool = False
verbose: bool = False
streaming_on: bool = True
saved_state_path: str = None
sop: str = None
sop_list: List[str] = None
user_name: str = "User"
retry_attempts: int = 3
context_length: int = 8192
task: str = None
# Define the input model using Pydantic
class AgentOutput(BaseModel):
agent: AgentInput
completions: ChatCompletionResponse
async def count_tokens(
text: str,
):
try:
# Get the encoding for the specific model
encoding = tiktoken.get_encoding("gpt-4o")
# Encode the text
tokens = encoding.encode(text)
# Count the tokens
token_count = len(tokens)
return token_count
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
async def model_router(model_name: str):
"""
Function to switch to the specified model.
Parameters:
- model_name (str): The name of the model to switch to.
Returns:
- None
Raises:
- None
"""
# Logic to switch to the specified model
if model_name == "OpenAIChat":
# Switch to OpenAIChat model
llm = OpenAIChat()
elif model_name == "GPT4o":
# Switch to GPT4o model
llm = GPT4o(openai_api_key=os.getenv("OPENAI_API_KEY"))
elif model_name == "GPT4VisionAPI":
# Switch to GPT4VisionAPI model
llm = GPT4VisionAPI()
elif model_name == "Anthropic":
# Switch to Anthropic model
llm = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
else:
# Invalid model name
pass
return llm
# Create a FastAPI app
app = FastAPI(debug=True)
# Load the middleware to handle CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# @app.get("/v1/models", response_model=ModelList)
# async def list_models():
# """
# An endpoint to list available models. It returns a list of model cards.
# This is useful for clients to query and understand what models are available for use.
# """
# model_card = ModelCard(
# id="cogvlm-chat-17b"
# ) # can be replaced by your model id like cogagent-chat-18b
# return ModelList(data=[model_card])
@app.post("v1/agent/completions", response_model=AgentOutput)
async def agent_completions(agent_input: AgentInput):
try:
logger.info(f"Received request: {agent_input}")
llm = model_router(agent_input.model_name)
agent = Agent(
agent_name=agent_input.agent_name,
system_prompt=agent_input.system_prompt,
agent_description=agent_input.agent_description,
llm=llm,
max_loops=agent_input.max_loops,
autosave=agent_input.autosave,
dynamic_temperature_enabled=agent_input.dynamic_temperature_enabled,
dashboard=agent_input.dashboard,
verbose=agent_input.verbose,
streaming_on=agent_input.streaming_on,
saved_state_path=agent_input.saved_state_path,
sop=agent_input.sop,
sop_list=agent_input.sop_list,
user_name=agent_input.user_name,
retry_attempts=agent_input.retry_attempts,
context_length=agent_input.context_length,
)
# Run the agent
logger.info(f"Running agent with task: {agent_input.task}")
completions = await agent.run(agent_input.task)
logger.info(f"Completions: {completions}")
all_input_tokens, output_tokens = await asyncio.gather(
count_tokens(agent.short_memory.return_history_as_string()),
count_tokens(completions),
)
logger.info(f"Token counts: {all_input_tokens}, {output_tokens}")
out = AgentOutput(
agent=agent_input,
completions=ChatCompletionResponse(
choices=[
{
"index": 0,
"message": {
"role": agent_input.agent_name,
"content": completions,
"name": None,
},
}
],
stream_choices=None,
usage_info=UsageInfo(
prompt_tokens=all_input_tokens,
completion_tokens=output_tokens,
total_tokens=all_input_tokens + output_tokens,
),
),
)
return out.json()
except Exception as e:
raise HTTPException(status_code=400, detail=str(e))
# if __name__ == "__main__":
# import uvicorn
# uvicorn.run(
# app, host="0.0.0.0", port=8000, use_colors=True, log_level="info"
# )

@ -134,6 +134,8 @@ nav:
- GPT4o: "swarms/models/gpt4o.md" - GPT4o: "swarms/models/gpt4o.md"
- Agents: - Agents:
- Agent: "swarms/structs/agent.md" - Agent: "swarms/structs/agent.md"
- Artifacts:
- Overview: "swarms/artifacts/artifact.md"
- Structs: - Structs:
- Foundational Structures: - Foundational Structures:
- BaseStructure: "swarms/structs/basestructure.md" - BaseStructure: "swarms/structs/basestructure.md"

@ -1,10 +1,4 @@
from swarms import Agent, OpenAIChat from swarms import Agent, OpenAIChat
from swarms.agents.multion_wrapper import MultiOnAgent
model = MultiOnAgent(
url="https://tesla.com",
)
def calculate_profit(revenue: float, expenses: float): def calculate_profit(revenue: float, expenses: float):
@ -34,16 +28,6 @@ def generate_report(company_name: str, profit: float):
""" """
return f"The profit for {company_name} is ${profit}." return f"The profit for {company_name} is ${profit}."
def browser_automation(task: str = None):
"""
Run a task on the browser automation agent.
Args:
task (str): The task to be executed on the browser automation agent.
"""
out = model.run(task)
return out
# Initialize the agent # Initialize the agent
agent = Agent( agent = Agent(
@ -53,16 +37,25 @@ agent = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops=1, max_loops=1,
autosave=True, autosave=True,
dynamic_temperature_enabled=True, # dynamic_temperature_enabled=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
streaming_on=True, streaming_on=True,
# interactive=True, # Set to False to disable interactive mode # interactive=True, # Set to False to disable interactive mode
saved_state_path="accounting_agent.json", saved_state_path="accounting_agent.json",
# tools=[calculate_profit, generate_report], tools=[calculate_profit, generate_report],
# docs_folder="docs", # docs_folder="docs",
# pdf_path="docs/accounting_agent.pdf", # pdf_path="docs/accounting_agent.pdf",
# tools=[browser_automation], # sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
# user_name="User",
# # docs=
# # docs_folder="docs",
# retry_attempts=3,
# context_length=1000,
# tool_schema = dict
context_length=1000,
# long_term_memory=ChromaDB(docs_folder="artifacts"),
) )
agent.run( agent.run(

@ -0,0 +1,90 @@
import time
from typing import List, Optional
from pydantic import BaseModel
class AgentSchema(BaseModel):
name: str = None
system_prompt: str = None
task: str = None
response: str = None
class JambaSwarmRequest(BaseModel):
task: str = (None,)
plan: str = None
agents: List[AgentSchema] = None
timestamp: int = int(time.time())
class JambaSwarmResponse(BaseModel):
task: str = (None,)
plan: str = None
agents: List[AgentSchema] = None
timestamp: int = int(time.time())
response: str = None
class AgentSchema(BaseModel):
name: Optional[str] = None
system_prompt: Optional[str] = None
task: Optional[str] = None
response: Optional[str] = None
class DirectorSettings(BaseModel):
name: str
strategy: str
objectives: List[str]
class BossSettings(BaseModel):
name: str
decision_making_strategy: str
recruitment_strategy: str
class TaskDistribution(BaseModel):
task: str
assigned_agents: List[str]
class JambaSwarmRequest(BaseModel):
task: Optional[str] = None
plan: Optional[str] = None
agents: Optional[List[AgentSchema]] = None
director_settings: DirectorSettings
boss_settings: BossSettings
task_distribution: Optional[List[TaskDistribution]] = None
timestamp: int = int(time.time())
class JambaSwarmResponse(BaseModel):
task: Optional[str] = None
plan: Optional[str] = None
agents: Optional[List[AgentSchema]] = None
response: Optional[str] = None
timestamp: int = int(time.time())
# Sample usage:
# try:
# request = JambaSwarmRequest(
# task="Research on AI",
# plan="Execute a comprehensive research plan",
# agents=[
# AgentSchema(name="Agent1", system_prompt="Analyze recent AI papers", task="AI research task"),
# AgentSchema(name="Agent2", system_prompt="Summarize AI research findings", task="Summarization task"),
# ],
# director_settings=DirectorSettings(name="Director1", strategy="Hierarchical", objectives=["Efficiency", "Accuracy"]),
# boss_settings=BossSettings(name="Boss1", decision_making_strategy="Collaborative", recruitment_strategy="Pre-selected"),
# task_distribution=[
# TaskDistribution(task="Research on AI", assigned_agents=["Agent1", "Agent2"])
# ]
# )
# print(request.json())
# except ValidationError as e:
# print(e.json())

@ -0,0 +1,56 @@
import os
from ai21 import AI21Client
from ai21.models.chat import ChatMessage
from dotenv import load_dotenv
from swarms import BaseLLM
load_dotenv()
class Jamba(BaseLLM):
def __init__(
self,
api_key: str = os.getenv("AI21_API_KEY"),
temperature: int = 0.8,
max_tokens: int = 200,
):
"""
Initializes the Jamba class with the provided API key.
Args:
api_key (str): The API key for the AI21Client.
"""
os.environ["AI21_API_KEY"] = api_key
self.api_key = api_key
self.temperature = temperature
self.max_tokens = max_tokens
self.client = AI21Client()
def run(self, prompt: str, *args, **kwargs) -> str:
"""
Generates a response for the given prompt using the AI21 model.
Args:
prompt (str): The prompt for generating the response.
Returns:
str: The generated response.
Raises:
Exception: If there is an issue with the API request.
"""
try:
response = self.client.chat.completions.create(
model="jamba-instruct-preview", # Latest model
messages=[ChatMessage(role="user", content=prompt)],
temperature=self.temperature,
max_tokens=self.max_tokens,
*args,
**kwargs,
)
return response.choices[0].message.content
except Exception as e:
print(f"Error: {e}")
raise

@ -0,0 +1,583 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 38,
"metadata": {},
"outputs": [],
"source": [
"# ! pip install ai21\n",
"# ! pip install swarms\n",
"\n",
"import os\n",
"from typing import List\n",
"\n",
"from ai21 import AI21Client\n",
"from ai21.models.chat import ChatMessage\n",
"from dotenv import load_dotenv\n",
"\n",
"from swarms import Agent, BaseLLM, MixtureOfAgents\n",
"\n",
"load_dotenv()\n",
"\n",
"class Jamba(BaseLLM):\n",
" def __init__(self, api_key: str = os.getenv(\"AI21_API_KEY\"), temperature: int = 0.8, max_tokens: int = 200):\n",
" \"\"\"\n",
" Initializes the Jamba class with the provided API key.\n",
"\n",
" Args:\n",
" api_key (str): The API key for the AI21Client.\n",
" \"\"\"\n",
" os.environ[\"AI21_API_KEY\"] = api_key\n",
" self.api_key = api_key\n",
" self.temperature = temperature\n",
" self.max_tokens = max_tokens\n",
" self.client = AI21Client()\n",
"\n",
" def run(self, prompt: str, *args, **kwargs) -> str:\n",
" \"\"\"\n",
" Generates a response for the given prompt using the AI21 model.\n",
"\n",
" Args:\n",
" prompt (str): The prompt for generating the response.\n",
"\n",
" Returns:\n",
" str: The generated response.\n",
" \n",
" Raises:\n",
" Exception: If there is an issue with the API request.\n",
" \"\"\"\n",
" try:\n",
" response = self.client.chat.completions.create(\n",
" model=\"jamba-instruct-preview\", # Latest model\n",
" messages=[ChatMessage(\n",
" role=\"user\",\n",
" content=prompt\n",
" )],\n",
" temperature=self.temperature,\n",
" max_tokens=self.max_tokens,\n",
" *args, \n",
" **kwargs\n",
" )\n",
" return response.choices[0].message.content\n",
" except Exception as e:\n",
" print(f\"Error: {e}\")\n",
" raise\n",
"\n",
"# Example usage:\n",
"# jamba = Jamba()\n",
"# out = jamba.run(\"Write a product title for a sports T-shirt to be published on an\")\n",
"# print(out)\n",
"\n",
"\n",
"model = Jamba(\n",
" max_tokens=4000,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Agent Planner\n",
"This agent below will make the plan for the agent."
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33mInitializing Autonomous Agent Boss Director...\u001b[0m\n"
]
}
],
"source": [
"\n",
"BOSS_PLANNER = \"\"\"\n",
"You're the swarm orchestrator agent\n",
"\n",
"**Objective:** Your task is to intake a business problem or activity and create a swarm of specialized LLM agents that can efficiently solve or automate the given problem. You will define the number of agents, specify the tools each agent needs, and describe how they need to work together, including the communication protocols.\n",
"\n",
"**Instructions:**\n",
"\n",
"1. **Intake Business Problem:**\n",
" - Receive a detailed description of the business problem or activity to automate.\n",
" - Clarify the objectives, constraints, and expected outcomes of the problem.\n",
" - Identify key components and sub-tasks within the problem.\n",
"\n",
"2. **Agent Design:**\n",
" - Based on the problem, determine the number and types of specialized LLM agents required.\n",
" - For each agent, specify:\n",
" - The specific task or role it will perform.\n",
" - The tools and resources it needs to perform its task.\n",
" - Any prerequisite knowledge or data it must have access to.\n",
" - Ensure that the collective capabilities of the agents cover all aspects of the problem.\n",
"\n",
"3. **Coordination and Communication:**\n",
" - Define how the agents will communicate and coordinate with each other.\n",
" - Choose the type of communication (e.g., synchronous, asynchronous, broadcast, direct messaging).\n",
" - Describe the protocol for information sharing, conflict resolution, and task handoff.\n",
"\n",
"4. **Workflow Design:**\n",
" - Outline the workflow or sequence of actions the agents will follow.\n",
" - Define the input and output for each agent.\n",
" - Specify the triggers and conditions for transitions between agents or tasks.\n",
" - Ensure there are feedback loops and monitoring mechanisms to track progress and performance.\n",
"\n",
"5. **Scalability and Flexibility:**\n",
" - Design the system to be scalable, allowing for the addition or removal of agents as needed.\n",
" - Ensure flexibility to handle dynamic changes in the problem or environment.\n",
"\n",
"6. **Output Specification:**\n",
" - Provide a detailed plan including:\n",
" - The number of agents and their specific roles.\n",
" - The tools and resources each agent will use.\n",
" - The communication and coordination strategy.\n",
" - The workflow and sequence of actions.\n",
" - Include a diagram or flowchart if necessary to visualize the system.\n",
"\n",
"**Example Structure:**\n",
"\n",
"**Business Problem:** Automate customer support for an e-commerce platform.\n",
"\n",
"**Agents and Roles:**\n",
"1. **Customer Query Classifier Agent:**\n",
" - Task: Classify incoming customer queries into predefined categories.\n",
" - Tools: Natural language processing toolkit, pre-trained classification model.\n",
" - Communication: Receives raw queries, sends classified queries to relevant agents.\n",
"\n",
"2. **Order Status Agent:**\n",
" - Task: Provide order status updates to customers.\n",
" - Tools: Access to order database, query processing toolkit.\n",
" - Communication: Receives classified queries about order status, responds with relevant information.\n",
"\n",
"3. **Product Recommendation Agent:**\n",
" - Task: Suggest products to customers based on their query and browsing history.\n",
" - Tools: Recommendation engine, access to product database.\n",
" - Communication: Receives classified queries about product recommendations, sends personalized suggestions.\n",
"\n",
"4. **Technical Support Agent:**\n",
" - Task: Assist customers with technical issues.\n",
" - Tools: Access to technical support database, troubleshooting toolkit.\n",
" - Communication: Receives classified queries about technical issues, provides solutions or escalation.\n",
"\n",
"**Communication Strategy:**\n",
"- **Type:** Asynchronous communication through a central message broker.\n",
"- **Protocol:** Agents publish and subscribe to specific topics related to their tasks. \n",
"- **Conflict Resolution:** If multiple agents need to handle the same query, a priority protocol is in place to determine the primary responder.\n",
"\n",
"**Workflow:**\n",
"1. Customer Query Classifier Agent receives and classifies the query.\n",
"2. Classified query is routed to the appropriate specialized agent.\n",
"3. Specialized agent processes the query and sends a response.\n",
"4. If needed, the response triggers further actions from other agents.\n",
"\n",
"**Scalability and Flexibility:**\n",
"- Agents can be added or removed based on query volume and complexity.\n",
"- System adapts to changes in query types and business needs.\n",
"\n",
"**Output Plan:**\n",
"- Diagram illustrating agent roles and communication flow.\n",
"- Detailed description of each agent's tasks, tools, and communication methods.\n",
"- Workflow sequence from query intake to resolution.\n",
"\n",
"\n",
"\"\"\"\n",
"\n",
"\n",
"# Initialize the agent\n",
"planning_agent = Agent(\n",
" agent_name=\"Boss Director\",\n",
" system_prompt=BOSS_PLANNER,\n",
" agent_description=\"Generates a spec of agents for the problem at hand.\",\n",
" llm=model,\n",
" max_loops=1,\n",
" autosave=True,\n",
" dynamic_temperature_enabled=True,\n",
" dashboard=False,\n",
" verbose=True,\n",
" streaming_on=True,\n",
" # interactive=True, # Set to False to disable interactive mode\n",
" saved_state_path=\"accounting_agent.json\",\n",
" # tools=[calculate_profit, generate_report],\n",
" # docs_folder=\"docs\",\n",
" # pdf_path=\"docs/accounting_agent.pdf\",\n",
" # tools=[browser_automation],\n",
")\n",
"\n",
"# out = planning_agent.run(\"Create a swarm of agents for automating customer support for an e-commerce platform.\")\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Agent creator\n",
"This agent is going to create the code for every agent"
]
},
{
"cell_type": "code",
"execution_count": 44,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-06-22T17:17:18.845351-0700\u001b[0m \u001b[1mTools provided make sure the functions have documentation ++ type hints, otherwise tool execution won't be reliable.\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:18.848651-0700\u001b[0m \u001b[1mTools provided: Accessing 1 tools\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:18.849536-0700\u001b[0m \u001b[1mConverting tools into OpenAI function calling schema\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:18.850261-0700\u001b[0m \u001b[1mConverting tool: create_and_execute_swarm into a OpenAI certified function calling schema. Add documentation and type hints.\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:18.852247-0700\u001b[0m \u001b[1mConversion process successful, the tool create_and_execute_swarm has been integrated with the agent successfully.\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:18.857608-0700\u001b[0m \u001b[1mAutonomous Agent Activated.\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:18.858788-0700\u001b[0m \u001b[1mAll systems operational. Executing task...\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:18.861938-0700\u001b[0m \u001b[1mTokens available: -7069\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:18.865433-0700\u001b[0m \u001b[1mTemperature: 0.5907771934426157\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33mInitializing Autonomous Agent Boss Agent Creator...\u001b[0m\n",
"Agent generating function schema\n",
"\u001b[36m\n",
"Loop 1 of 1\u001b[0m\n",
"\n",
"\n",
"\n",
"\n",
"To create and execute a swarm of agents for automating customer support for an e-commerce platform, we will use the `create_and_execute_swarm` function. This function will create a list of specialized LLM agents, each with a specific role in customer support. Here's the breakdown of the task: * **Function Name**: `create_and_execute_swarm` * **Arguments**: + `name`: A list of names for the agents. + `system_prompt`: A list of system prompts for the agents. + `task`: The description of the task for the swarm. For the e-commerce platform, we can specify the names and system prompts for each agent. For instance, the agents can be named 'Product Inquiry Agent', 'Order Status Agent', 'Complaint Resolution Agent', etc. Each agent will have a system prompt tailored to its role. Let's proceed with creating and executing this swarm. The JSON output will adhere to the schema provided for the `create_and_execute_swarm` function. ```json { \"type\": \"function\", \"function\": { \"name\": \"create_and_execute_swarm\", \"parameters\": { \"name\": [\"Product Inquiry Agent\", \"Order Status Agent\", \"Complaint Resolution Agent\"], \"system_prompt\": [\"Assist with product information\", \"Provide order status updates\", \"Resolve customer complaints\"], \"task\": \"Automate customer support for an e-commerce platform\" } } } ``` This JSON output represents the creation and execution of a swarm of agents, each with a specific role in automating customer support for an e-commerce platform. The agents are named according to their function, and their system prompts guide their interaction with customers. \n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-06-22T17:17:28.072930-0700\u001b[0m \u001b[31m\u001b[1mAttempt 1: Error generating response: expected string or bytes-like object, got 'NoneType'\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{ \"type\": \"function\", \"function\": { \"name\": \"create_and_execute_swarm\", \"parameters\": { \"name\": [ \"CS_Agent1\", \"CS_Agent2\", \"CS_Agent3\" ], \"system_prompt\": [ \"Greet customer and identify issue\", \"Provide solutions or escalate issue\", \"Follow up and ensure resolution\" ], \"task\": \"Automating customer support for an e-commerce platform\" } } "
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-06-22T17:17:30.692259-0700\u001b[0m \u001b[31m\u001b[1mAttempt 2: Error generating response: expected string or bytes-like object, got 'NoneType'\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"{ \"type\": \"function\", \"function\": { \"name\": \"create_and_execute_swarm\", \"parameters\": { \"name\": [ \"CustomerSupportAgent1\", \"CustomerSupportAgent2\", \"CustomerSupportAgent3\" ], \"system_prompt\": [ \"Assist customers with product inquiries\", \"Resolve customer complaints and issues\", \"Provide information on order status and shipping\" ], \"task\": \"Automate customer support for an e-commerce platform\" } } "
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-06-22T17:17:33.539722-0700\u001b[0m \u001b[31m\u001b[1mAttempt 3: Error generating response: expected string or bytes-like object, got 'NoneType'\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:33.540472-0700\u001b[0m \u001b[31m\u001b[1mFailed to generate a valid response after retry attempts.\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:33.540701-0700\u001b[0m \u001b[1mAutosaving agent state.\u001b[0m\n",
"\u001b[32m2024-06-22T17:17:33.540904-0700\u001b[0m \u001b[1mSaving Agent Boss Agent Creator state to: Boss Agent Creator_state.json\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\u001b[31mError saving agent state: Object of type TikTokenizer is not JSON serializable\u001b[0m\n"
]
}
],
"source": [
"from swarms.tools.py_func_to_openai_func_str import get_openai_function_schema_from_func\n",
"\n",
"# Name, system prompt, \n",
"def create_and_execute_swarm(name: List[str], system_prompt: List[str], task: str):\n",
" \"\"\"\n",
" Creates and executes a swarm of agents for the given task.\n",
"\n",
" Args:\n",
" name (List[str]): A list of names for the agents.\n",
" system_prompt (List[str]): A list of system prompts for the agents.\n",
" task (str): The description of the task for the swarm.\n",
" *args: Variable length argument list.\n",
" **kwargs: Arbitrary keyword arguments.\n",
"\n",
" Returns:\n",
" List[Agent]: A list of agents in the swarm.\n",
"\n",
" \"\"\"\n",
" agents = []\n",
" for name, prompt in zip(name, system_prompt):\n",
" agent = Agent(\n",
" agent_name=name,\n",
" system_prompt=prompt,\n",
" agent_description=\"Generates a spec of agents for the problem at hand.\",\n",
" llm=model,\n",
" max_loops=1,\n",
" autosave=True,\n",
" dynamic_temperature_enabled=True,\n",
" dashboard=False,\n",
" verbose=True,\n",
" streaming_on=True,\n",
" # interactive=True, # Set to False to disable interactive mode\n",
" saved_state_path=f\"{name}_agent.json\",\n",
" # tools=[calculate_profit, generate_report],\n",
" # docs_folder=\"docs\",\n",
" # pdf_path=\"docs/accounting_agent.pdf\",\n",
" # tools=[browser_automation],\n",
" )\n",
" agents.append(agent)\n",
" \n",
" # MoA\n",
" moa = MixtureOfAgents(agents=agents, description=task, final_agent=name[0])\n",
" \n",
" out = moa.run(task,)\n",
" print(out)\n",
" return out\n",
"\n",
"function_schema = get_openai_function_schema_from_func(function=create_and_execute_swarm, name = create_and_execute_swarm.__name__, description=create_and_execute_swarm.__doc__)\n",
"# print(f\"Function schema: {function_schema}\")\n",
"\n",
"BOSS_CREATOR = f\"\"\"\n",
"\n",
"You are a swarm orchestrator with expertise in agentic design. Your task is to solve a business problem by creating and coordinating specialized LLM agents. Follow the schematic schema with function calling to design the solution.\n",
"\n",
"Create a cohesive system of specialized LLM agents that effectively solve or automate the given business problem through clear roles, efficient communication, and a well-defined workflow. Ensure the system is scalable and flexible to adapt to changes.\n",
"\n",
"Follow the following schema using markdown format and output only this and nothing else:\n",
"```json\n",
"{function_schema}\n",
"```\n",
"\n",
"\"\"\"\n",
"\n",
"\n",
"# Boss Agent creator\n",
"boss_agent_creator = Agent(\n",
" agent_name=\"Boss Agent Creator\",\n",
" system_prompt=BOSS_CREATOR,\n",
" agent_description=\"Generates a spec of agents for the problem at hand.\",\n",
" llm=model,\n",
" max_loops=1,\n",
" autosave=True,\n",
" dynamic_temperature_enabled=True,\n",
" dashboard=False,\n",
" verbose=True,\n",
" streaming_on=True,\n",
" # interactive=True, # Set to False to disable interactive mode\n",
" saved_state_path=\"boss_director_agent.json\",\n",
" # tools=[calculate_profit, generate_report],\n",
" # docs_folder=\"docs\",\n",
" # pdf_path=\"docs/accounting_agent.pdf\",\n",
" tools=[create_and_execute_swarm],\n",
")\n",
"\n",
"print(f\"Agent generating function schema\")\n",
"boss_agent_creator.run(\"Create a swarm of agents for automating customer support for an e-commerce platform.\")"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-06-22T17:11:19.727696-0700\u001b[0m \u001b[1mMaking plan for the task: Create a swarm of agents for automating customer support for an e-commerce platform.\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:19.738286-0700\u001b[0m \u001b[1mAutonomous Agent Activated.\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:19.739463-0700\u001b[0m \u001b[1mAll systems operational. Executing task...\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:19.758047-0700\u001b[0m \u001b[1mTokens available: -7261\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:19.759725-0700\u001b[0m \u001b[1mTemperature: 0.31327886639849234\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[36m\n",
"Loop 1 of 1\u001b[0m\n",
"\n",
"\n",
"\n",
"\n",
"To automate customer support for an e-commerce platform using a swarm of specialized LLM agents, we will follow the outlined structure. Here's the detailed plan: ### 1. **Business Problem Intake:** * **Objective:** Automate customer support to efficiently handle inquiries, complaints, and provide information. * **Constraints:** Real-time response capability, ability to handle a high volume of queries, and integration with the e-commerce platform. * **Expected Outcomes:** Improved customer satisfaction, reduced workload on human customer support agents, and efficient handling of a wide range of customer inquiries. * **Key Components:** Query classification, order status updates, product recommendations, and technical support. ### 2. **Agent Design:** * **Number of Agents:** 4 specialized agents. * **Agent Roles and Tools:** 1. **Query Classifier Agent:** Classifies queries into categories like order status, product inquiries, technical issues, etc. + Tools: Natural language processing toolkit, query categorization model. 2. **Order Status Agent:** Provides updates on order status. + Tools: Access to order database, query processing toolkit. 3. **Product Recommendation Agent:** Suggests products based on customer's query and browsing history. + Tools: Recommendation engine, access to product database. 4. **Technical Support Agent:** Assists with technical issues. + Tools: Access to technical support database, troubleshooting toolkit. ### 3. **Coordination and Communication:** * **Communication Type:** Asynchronous, through a central message broker. * **Communication Protocol:** Agents subscribe to specific topics related to their tasks. * **Conflict Resolution:** Priority protocol for handling the same query by multiple agents. ### 4. **Workflow Design:** * **Workflow Sequence:** 1. Query Classifier Agent receives the query. 2. Classifies the query and routes it to the appropriate agent. 3. Specialized agent processes the query and responds. 4. Response triggers further actions if necessary. * **Inputs/Outputs:** + Query Classifier Agent: Receives raw queries, outputs classified queries. + Order Status Agent: Receives classified queries about order status, outputs order status updates. + Product Recommendation Agent: Receives classified queries about product recommendations, outputs product suggestions. + Technical Support Agent: Receives classified queries about technical issues, outputs solutions or escalations. ### 5. **Scalability and Flexibility:** * Agents can be scaled based on query volume and complexity. * System adapts to changes in query types and business needs. ### 6. **Output Specification:** * **Plan Details:** + Number of Agents: 4 (Query Classifier, Order Status, Product Recommendation, Technical Support). + Tools and Resources: Natural language processing, access to databases, recommendation engines. + Communication and Coordination: Asynchronous communication through a central message broker. + Workflow: Query intake, classification, processing by specialized agents, response generation. * **Visualization:** A diagram illustrating the communication flow and agent roles will be provided. This plan outlines a scalable and flexible system for automating customer support, ensuring efficient handling of diverse customer inquiries while integrating seamlessly with the e-commerce platform. \n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-06-22T17:11:31.672062-0700\u001b[0m \u001b[1mAutosaving agent state.\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:31.673920-0700\u001b[0m \u001b[1mSaving Agent Boss Director state to: Boss Director_state.json\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[31mError saving agent state: Object of type TikTokenizer is not JSON serializable\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-06-22T17:11:31.676071-0700\u001b[0m \u001b[1mAutonomous Agent Activated.\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:31.676582-0700\u001b[0m \u001b[1mAll systems operational. Executing task...\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:31.681411-0700\u001b[0m \u001b[1mTokens available: -5216\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:31.682256-0700\u001b[0m \u001b[1mTemperature: 0.46020517238764247\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[36m\n",
"Loop 1 of 1\u001b[0m\n",
"\n",
"\n",
"\n",
"\n",
"```json { \"type\": \"function\", \"function\": { \"description\": \"\\n Creates and executes a swarm of agents for the given task.\\n\\n Args:\\n name (List[str]): A list of names for the agents.\\n system_prompt (List[str]): A list of system prompts for the agents.\\n task (str): The description of the task for the swarm.\\n *args: Variable length argument list.\\n **kwargs: Arbitrary keyword arguments.\\n\\n Returns:\\n List[Agent]: A list of agents in the swarm.\\n\\n \", \"name\": \"create_and_execute_swarm\", \"parameters\": { \"type\": \"object\", \"properties\": { \"name\": { \"items\": { \"type\": \"string\" }, \"type\": \"array\", \"description\": \"name\" }, \"system_prompt\": { \"items\": { \"type\": \"string\" }, \"type\": \"array\", \"description\": \"system_prompt\" }, \"task\": { \"type\": \"string\", \"description\": \"task\" } }, \"required\": [ \"name\", \"system_prompt\", \"task\" ] } } } ``` "
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-06-22T17:11:37.503452-0700\u001b[0m \u001b[31m\u001b[1mAttempt 1: Error generating response: expected string or bytes-like object, got 'NoneType'\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"{ \"type\": \"function\", \"function\": { \"name\": \"create_and_execute_swarm\", \"parameters\": { \"name\": [\"Customer Query Classifier Agent\", \"Order Status Agent\", \"Product Recommendation Agent\", \"Technical Support Agent\"], \"system_prompt\": [\"Classify incoming customer queries into predefined categories.\", \"Provide order status updates to customers.\", \"Suggest products to customers based on their query and browsing history.\", \"Assist customers with technical issues.\"], \"task\": \"Automate customer support for an e-commerce platform.\" } } } "
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-06-22T17:11:40.292901-0700\u001b[0m \u001b[31m\u001b[1mAttempt 2: Error generating response: expected string or bytes-like object, got 'NoneType'\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"{ \"type\": \"function\", \"function\": { \"name\": \"create_and_execute_swarm\", \"parameters\": { \"name\": [ \"Customer Query Classifier Agent\", \"Order Status Agent\", \"Product Recommendation Agent\", \"Technical Support Agent\" ], \"system_prompt\": [ \"Classify incoming customer queries into predefined categories.\", \"Provide order status updates to customers.\", \"Suggest products to customers based on their query and browsing history.\", \"Assist customers with technical issues.\" ], \"task\": \"Automate customer support for an e-commerce platform.\" } } "
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-06-22T17:11:43.262321-0700\u001b[0m \u001b[31m\u001b[1mAttempt 3: Error generating response: expected string or bytes-like object, got 'NoneType'\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:43.263663-0700\u001b[0m \u001b[31m\u001b[1mFailed to generate a valid response after retry attempts.\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:43.264713-0700\u001b[0m \u001b[1mAutosaving agent state.\u001b[0m\n",
"\u001b[32m2024-06-22T17:11:43.265105-0700\u001b[0m \u001b[1mSaving Agent Boss Agent Creator state to: Boss Agent Creator_state.json\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\u001b[31mError saving agent state: Object of type TikTokenizer is not JSON serializable\u001b[0m\n"
]
}
],
"source": [
"from swarms.utils.loguru_logger import logger\n",
"\n",
"\n",
"def run_jamba_swarm(task: str = None):\n",
" logger.info(f\"Making plan for the task: {task}\")\n",
" out = planning_agent.run(task)\n",
" \n",
" memory = planning_agent.short_memory.return_history_as_string()\n",
"\n",
" # Boss agent\n",
" return boss_agent_creator.run(memory)\n",
" \n",
" \n",
"# Example usage\n",
"run_jamba_swarm(\"Create a swarm of agents for automating customer support for an e-commerce platform.\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import time\n",
"\n",
"from pydantic import BaseModel\n",
"\n",
"\n",
"class AgentSchema(BaseModel):\n",
" name: str = None\n",
" system_prompt: str = None\n",
" task: str = None\n",
" response: str = None\n",
"\n",
"class JambaSwarmRequest(BaseModel):\n",
" task: str = None,\n",
" plan: str = None\n",
" agents: List[AgentSchema] = None\n",
" timestamp: int = int(time.time())\n",
" \n",
"class JambaSwarmResponse(BaseModel):\n",
" task: str = None,\n",
" plan: str = None\n",
" agents: List[AgentSchema] = None\n",
" timestamp: int = int(time.time())\n",
" response: str = None"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,174 @@
# Description: Main file for the Jamba Swarm.
from swarms.utils.loguru_logger import logger
import json
from typing import List
from dotenv import load_dotenv
from swarms import Agent, MixtureOfAgents, OpenAIChat
from jamba_swarm.prompts import BOSS_PLANNER, BOSS_CREATOR
from jamba_swarm.api_schemas import JambaSwarmResponse
from swarms.utils.parse_code import extract_code_from_markdown
load_dotenv()
# Model
model = OpenAIChat()
# Name, system prompt,
def create_and_execute_swarm(
name: List[str], system_prompt: List[str], task: str
):
"""
Creates and executes a swarm of agents for the given task.
Args:
name (List[str]): A list of names for the agents.
system_prompt (List[str]): A list of system prompts for the agents.
task (str): The description of the task for the swarm.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
List[Agent]: A list of agents in the swarm.
"""
agents = []
for name, prompt in zip(name, system_prompt):
agent = Agent(
agent_name=name,
system_prompt=prompt,
agent_description="Generates a spec of agents for the problem at hand.",
llm=model,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode
saved_state_path=f"{name}_agent.json",
# tools=[calculate_profit, generate_report],
# docs_folder="docs",
# pdf_path="docs/accounting_agent.pdf",
# tools=[browser_automation],
)
agents.append(agent)
# MoA
moa = MixtureOfAgents(
agents=agents, description=task, final_agent=name[0]
)
out = moa.run(
task,
)
print(out)
return out
# Initialize the agent
planning_agent = Agent(
agent_name="Boss Director",
system_prompt=BOSS_PLANNER,
agent_description="Generates a spec of agents for the problem at hand.",
llm=model,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode
saved_state_path="accounting_agent.json",
# tools=[calculate_profit, generate_report],
# docs_folder="docs",
# pdf_path="docs/accounting_agent.pdf",
# tools=[browser_automation],
)
# Boss Agent creator
boss_agent_creator = Agent(
agent_name="Boss Agent Creator",
system_prompt=BOSS_CREATOR,
agent_description="Generates a spec of agents for the problem at hand.",
llm=model,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode
saved_state_path="boss_director_agent.json",
# tools=[calculate_profit, generate_report],
# docs_folder="docs",
# pdf_path="docs/accounting_agent.pdf",
# tools=[create_and_execute_swarm],
)
def parse_agents(json_data):
if not json_data:
raise ValueError("Input JSON data is None or empty")
parsed_data = json.loads(json_data)
names = []
system_prompts = []
for agent in parsed_data["agents"]:
names.append(agent["agent_name"])
system_prompts.append(agent["system_prompt"])
return names, system_prompts
class JambaSwarm:
def __init__(self, planning_agent, boss_agent_creator):
self.planning_agent = planning_agent
self.boss_agent_creator = boss_agent_creator
def run(self, task: str = None):
# Planning agent
logger.info(f"Making plan for the task: {task}")
out = self.planning_agent.run(task)
# Boss agent
logger.info("Running boss agent creator with memory.")
agents = self.boss_agent_creator.run(out)
# print(f"Agents: {agents}")
agents = extract_code_from_markdown(agents)
logger.info(f"Output from boss agent creator: {agents}")
# Debugging output
logger.debug(f"Output from boss agent creator: {agents}")
# Check if agents is None
if agents is None:
raise ValueError("The boss agent creator returned None")
# Parse the JSON input and output the list of agent names and system prompts
names, system_prompts = parse_agents(agents)
# Call the function with parsed data
response = create_and_execute_swarm(names, system_prompts, task)
# Create and execute swarm
log = JambaSwarmResponse(
task=task,
plan=out,
agents=agents,
response=response,
)
return log.json()
swarm = JambaSwarm(planning_agent, boss_agent_creator)
# Run the swarm
swarm.run("Create a swarm of agents for sales")

@ -0,0 +1,181 @@
# print(f"Function schema: {function_schema}")
BOSS_PLANNER = """
You're the swarm orchestrator agent
**Objective:** Your task is to intake a business problem or activity and create a swarm of specialized LLM agents that can efficiently solve or automate the given problem. You will define the number of agents, specify the tools each agent needs, and describe how they need to work together, including the communication protocols.
**Instructions:**
1. **Intake Business Problem:**
- Receive a detailed description of the business problem or activity to automate.
- Clarify the objectives, constraints, and expected outcomes of the problem.
- Identify key components and sub-tasks within the problem.
2. **Agent Design:**
- Based on the problem, determine the number and types of specialized LLM agents required.
- For each agent, specify:
- The specific task or role it will perform.
- The tools and resources it needs to perform its task.
- Any prerequisite knowledge or data it must have access to.
- Ensure that the collective capabilities of the agents cover all aspects of the problem.
3. **Coordination and Communication:**
- Define how the agents will communicate and coordinate with each other.
- Choose the type of communication (e.g., synchronous, asynchronous, broadcast, direct messaging).
- Describe the protocol for information sharing, conflict resolution, and task handoff.
4. **Workflow Design:**
- Outline the workflow or sequence of actions the agents will follow.
- Define the input and output for each agent.
- Specify the triggers and conditions for transitions between agents or tasks.
- Ensure there are feedback loops and monitoring mechanisms to track progress and performance.
5. **Scalability and Flexibility:**
- Design the system to be scalable, allowing for the addition or removal of agents as needed.
- Ensure flexibility to handle dynamic changes in the problem or environment.
6. **Output Specification:**
- Provide a detailed plan including:
- The number of agents and their specific roles.
- The tools and resources each agent will use.
- The communication and coordination strategy.
- The workflow and sequence of actions.
- Include a diagram or flowchart if necessary to visualize the system.
**Example Structure:**
**Business Problem:** Automate customer support for an e-commerce platform.
**Agents and Roles:**
1. **Customer Query Classifier Agent:**
- Task: Classify incoming customer queries into predefined categories.
- Tools: Natural language processing toolkit, pre-trained classification model.
- Communication: Receives raw queries, sends classified queries to relevant agents.
2. **Order Status Agent:**
- Task: Provide order status updates to customers.
- Tools: Access to order database, query processing toolkit.
- Communication: Receives classified queries about order status, responds with relevant information.
3. **Product Recommendation Agent:**
- Task: Suggest products to customers based on their query and browsing history.
- Tools: Recommendation engine, access to product database.
- Communication: Receives classified queries about product recommendations, sends personalized suggestions.
4. **Technical Support Agent:**
- Task: Assist customers with technical issues.
- Tools: Access to technical support database, troubleshooting toolkit.
- Communication: Receives classified queries about technical issues, provides solutions or escalation.
**Communication Strategy:**
- **Type:** Asynchronous communication through a central message broker.
- **Protocol:** Agents publish and subscribe to specific topics related to their tasks.
- **Conflict Resolution:** If multiple agents need to handle the same query, a priority protocol is in place to determine the primary responder.
**Workflow:**
1. Customer Query Classifier Agent receives and classifies the query.
2. Classified query is routed to the appropriate specialized agent.
3. Specialized agent processes the query and sends a response.
4. If needed, the response triggers further actions from other agents.
**Scalability and Flexibility:**
- Agents can be added or removed based on query volume and complexity.
- System adapts to changes in query types and business needs.
**Output Plan:**
- Diagram illustrating agent roles and communication flow.
- Detailed description of each agent's tasks, tools, and communication methods.
- Workflow sequence from query intake to resolution.
"""
BOSS_CREATOR = """
You are a swarm orchestrator with expertise in agentic design.
Your task is to solve a business problem by creating and coordinating specialized LLM agents.
Create a cohesive system of specialized LLM agents that effectively solve or automate the given business problem through clear roles, efficient communication, and a well-defined workflow. Ensure the system is scalable and flexible to adapt to changes.
Follow the following schema using markdown format and output this in one of the following formats: JSON, don't return the output as a string, return it as a JSON object.
```json
{
"task": "Create an ML engineering team.",
"agents": [
{
"agent_name": "DataCollector",
"system_prompt": "You are DataCollector, an intelligent agent designed to gather and preprocess data for machine learning tasks. Your primary responsibility is to collect data from various sources, clean and preprocess it, and store it in a structured format. You must handle different data types such as text, images, and numerical data. Ensure that the data is free from noise and inconsistencies, and is properly labeled for supervised learning tasks. Your system prompt includes detailed instructions on data gathering techniques, preprocessing methods, and best practices for data storage. Always ensure data privacy and security during the collection process."
},
{
"agent_name": "ModelTrainer",
"system_prompt": "You are ModelTrainer, an advanced agent responsible for training machine learning models. Your tasks include selecting appropriate algorithms, setting hyperparameters, and managing the training process. You must ensure that the models are trained efficiently, achieving high performance while avoiding overfitting. Detailed instructions in your system prompt cover various training techniques such as gradient descent, regularization methods, and evaluation metrics. You also handle tasks like data augmentation, cross-validation, and monitoring training progress. Additionally, you must be adept at troubleshooting issues that arise during training and fine-tuning the models for optimal performance."
},
{
"agent_name": "Evaluator",
"system_prompt": "You are Evaluator, an expert agent tasked with evaluating the performance of machine learning models. Your job involves conducting thorough assessments using various metrics such as accuracy, precision, recall, F1 score, and more. Your system prompt provides comprehensive guidelines on designing and implementing evaluation strategies, selecting appropriate test datasets, and interpreting evaluation results. You must also ensure the robustness and generalizability of the models by performing techniques like cross-validation and stress testing. Your role includes generating detailed evaluation reports and suggesting potential improvements based on the assessment outcomes."
},
{
"agent_name": "DeploymentSpecialist",
"system_prompt": "You are DeploymentSpecialist, an agent specialized in deploying machine learning models to production environments. Your responsibilities include packaging models, creating APIs for model inference, and integrating models with existing systems. Your system prompt includes detailed instructions on various deployment frameworks, best practices for scalable and reliable deployment, and monitoring deployed models for performance and drift. You must also ensure that the deployment adheres to security protocols and handles user requests efficiently. Your role includes setting up automated pipelines for continuous integration and delivery (CI/CD) and managing version control for model updates."
},
{
"agent_name": "MaintenanceAgent",
"system_prompt": "You are MaintenanceAgent, responsible for the continuous maintenance and monitoring of deployed machine learning models. Your tasks include regular performance checks, updating models with new data, and retraining them to adapt to changing patterns. Your system prompt provides detailed guidelines on monitoring tools, anomaly detection techniques, and methods for handling model drift. You must ensure that models remain accurate and relevant over time by implementing automated retraining pipelines. Additionally, you handle bug fixes, performance optimizations, and maintain detailed logs of maintenance activities. Your role also includes ensuring that the models comply with regulatory requirements and ethical standards."
}
]
}
```
{
"task": "Create a small business team.",
"agents": [
{
"agent_name": "SalesGrowthStrategist",
"system_prompt": "You are SalesGrowthStrategist, an expert agent dedicated to developing and implementing strategies to enhance sales growth. Your responsibilities include analyzing market trends, identifying potential opportunities, and devising comprehensive sales plans. Your system prompt provides detailed instructions on conducting market research, competitive analysis, and customer segmentation. You must create targeted sales campaigns, optimize pricing strategies, and improve sales processes. Additionally, you will monitor sales performance, adjust strategies as needed, and report on key sales metrics to ensure continuous growth. You also collaborate closely with marketing and product teams to align sales strategies with overall business objectives."
},
{
"agent_name": "MarketingCampaignManager",
"system_prompt": "You are MarketingCampaignManager, a proficient agent responsible for planning, executing, and optimizing marketing campaigns. Your tasks include designing marketing strategies, creating compelling content, and selecting appropriate channels for campaign distribution. Your system prompt provides detailed guidelines on market research, audience targeting, and campaign analytics. You must ensure that campaigns align with brand messaging and achieve desired outcomes, such as increased brand awareness, lead generation, and customer engagement. Additionally, you handle budget allocation, monitor campaign performance, and adjust tactics to maximize ROI. Your role includes collaborating with creative teams and utilizing marketing automation tools for efficient campaign management."
},
{
"agent_name": "CustomerSupportAgent",
"system_prompt": "You are CustomerSupportAgent, an empathetic and knowledgeable agent dedicated to providing exceptional customer service. Your responsibilities include addressing customer inquiries, resolving issues, and ensuring customer satisfaction. Your system prompt includes detailed instructions on communication best practices, problem-solving techniques, and knowledge management. You must handle various customer support channels, such as phone, email, and live chat, while maintaining a positive and professional demeanor. Additionally, you will gather customer feedback, identify areas for improvement, and contribute to enhancing the overall customer experience. Your role also involves collaborating with product and technical teams to address complex issues and provide timely solutions."
},
{
"agent_name": "ProductDevelopmentCoordinator",
"system_prompt": "You are ProductDevelopmentCoordinator, a strategic agent focused on overseeing and coordinating the product development process. Your tasks include gathering and analyzing market requirements, defining product specifications, and managing cross-functional teams. Your system prompt provides comprehensive guidelines on project management, product lifecycle management, and stakeholder communication. You must ensure that products are developed on time, within budget, and meet quality standards. Additionally, you handle risk management, resource allocation, and continuous improvement initiatives. Your role involves close collaboration with engineering, design, and marketing teams to ensure that products align with market needs and business goals."
},
{
"agent_name": "FinancialAnalyst",
"system_prompt": "You are FinancialAnalyst, a detail-oriented agent responsible for analyzing financial data and providing insights to support business decisions. Your responsibilities include creating financial models, forecasting revenue, and evaluating investment opportunities. Your system prompt includes detailed instructions on financial analysis techniques, data interpretation, and reporting. You must analyze financial statements, identify trends, and provide recommendations to improve financial performance. Additionally, you handle budgeting, cost analysis, and risk assessment. Your role involves collaborating with various departments to gather financial information, preparing comprehensive reports, and presenting findings to stakeholders. You must ensure accuracy and compliance with financial regulations and standards."
},
{
"agent_name": "HRRecruitmentSpecialist",
"system_prompt": "You are HRRecruitmentSpecialist, an agent focused on recruiting and hiring the best talent for the organization. Your tasks include creating job descriptions, sourcing candidates, and conducting interviews. Your system prompt provides detailed guidelines on recruitment strategies, candidate evaluation, and onboarding processes. You must ensure that the recruitment process is efficient, transparent, and aligned with the company's values and goals. Additionally, you handle employer branding, candidate experience, and compliance with employment laws. Your role involves collaborating with hiring managers to understand staffing needs, conducting reference checks, and negotiating job offers. You also contribute to continuous improvement initiatives in recruitment practices."
},
{
"agent_name": "SupplyChainManager",
"system_prompt": "You are SupplyChainManager, an agent dedicated to managing and optimizing the supply chain operations. Your responsibilities include overseeing procurement, logistics, and inventory management. Your system prompt includes detailed instructions on supply chain strategies, vendor management, and process optimization. You must ensure that the supply chain is efficient, cost-effective, and resilient to disruptions. Additionally, you handle demand forecasting, quality control, and sustainability initiatives. Your role involves collaborating with suppliers, manufacturers, and distribution partners to ensure timely and accurate delivery of products. You also monitor supply chain performance, implement continuous improvement initiatives, and report on key metrics to stakeholders."
},
{
"agent_name": "ProjectManager",
"system_prompt": "You are ProjectManager, an agent responsible for planning, executing, and closing projects. Your tasks include defining project scope, creating detailed project plans, and managing project teams. Your system prompt provides comprehensive guidelines on project management methodologies, risk management, and stakeholder communication. You must ensure that projects are completed on time, within budget, and meet quality standards. Additionally, you handle resource allocation, change management, and performance monitoring. Your role involves collaborating with various departments to achieve project objectives, identifying and mitigating risks, and maintaining detailed project documentation. You also conduct post-project evaluations to capture lessons learned and improve future projects."
},
{
"agent_name": "ContentCreator",
"system_prompt": "You are ContentCreator, an agent specialized in creating engaging and high-quality content for various platforms. Your responsibilities include writing articles, producing videos, and designing graphics. Your system prompt includes detailed instructions on content creation strategies, storytelling techniques, and audience engagement. You must ensure that content is aligned with the brand's voice, values, and goals. Additionally, you handle content planning, SEO optimization, and performance analysis. Your role involves collaborating with marketing and design teams to create cohesive and impactful content. You also stay updated with industry trends, experiment with new content formats, and continuously improve content quality and effectiveness."
},
{
"agent_name": "DataAnalyst",
"system_prompt": "You are DataAnalyst, an agent focused on analyzing data to provide actionable insights for business decision-making. Your tasks include collecting and processing data, performing statistical analysis, and creating data visualizations. Your system prompt provides detailed guidelines on data analysis techniques, tools, and best practices. You must ensure that data is accurate, relevant, and used effectively to support business objectives. Additionally, you handle data cleaning, integration, and reporting. Your role involves collaborating with various departments to understand data needs, identifying trends and patterns, and presenting findings to stakeholders. You also contribute to the development of data-driven strategies and solutions."
}
]
}
"""

@ -0,0 +1,138 @@
from swarms import MixtureOfAgents, Agent
from jamba_swarm.jamba_llm import Jamba
model = Jamba(
max_tokens=4000,
)
jamba_prompt = """
from jamba_swarm.jamba_llm import Jamba
model = Jamba(
max_tokens=4000,
)
# Run jamba
out = model.run(
"Your task goes here",
)
"""
# System Prompts
app_designer_prompt = (
"You are AppDesigner, responsible for designing the overall structure and layout of the application. "
"Your tasks include defining the user interface (UI) components, navigation flow, and ensuring that the design "
"is user-friendly, visually appealing, and functional. You must consider the best practices for UI/UX design, "
"accessibility standards, and ensure that the design is scalable for future enhancements. Provide a detailed "
"blueprint of the application's architecture, including wireframes, mockups, and any design specifications that "
"are necessary for the development team to implement the design accurately."
)
feature_engineer_prompt = (
"You are FeatureEngineer, responsible for defining and implementing the features of the application. "
"Your tasks include identifying the core functionalities that the application should offer, creating detailed "
"feature specifications, and ensuring that each feature aligns with the overall goals of the project. You must "
"consider the technical feasibility, user needs, and integration with existing systems. Provide a comprehensive "
"list of features with detailed descriptions, user stories, and any necessary technical requirements. Additionally, "
"outline the steps required for implementing each feature and any potential challenges or considerations that need "
"to be addressed during development."
)
code_generator_prompt = (
"You are CodeGenerator, responsible for generating the Python code for the application based on the design and features. "
"Your tasks include translating the design specifications and feature requirements into clean, efficient, and maintainable "
"Python code. You must follow best practices for software development, including code organization, documentation, and testing. "
"Ensure that the code is modular, reusable, and adheres to the project's coding standards. Provide the complete source code for "
"the application, along with any necessary configuration files, dependencies, and instructions for setting up and running the application in python code. Only generate the code only"
f"The code should be well-structured, commented, and easy to understand. The code must also only use Jamba model for everything {jamba_prompt}"
)
quality_assurance_prompt = (
"You are QualityAssurance, responsible for testing and ensuring the quality of the generated code. "
"Your tasks include performing thorough testing of the application, identifying and reporting bugs, and verifying that all features "
"function as intended. You must create and execute test cases, perform code reviews, and ensure that the application meets the defined "
"quality standards. Provide detailed test reports, including the results of functional, performance, and security testing. Additionally, "
"recommend any improvements or fixes needed to enhance the overall quality and reliability of the application."
)
# nitialize AppDesigner
app_designer = Agent(
agent_name="AppDesigner",
system_prompt=app_designer_prompt,
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
context_length=150000,
state_save_file_type="json",
saved_state_path="app_designer.json",
)
# Initialize FeatureEngineer
feature_engineer = Agent(
agent_name="FeatureEngineer",
system_prompt=feature_engineer_prompt,
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
context_length=150000,
state_save_file_type="json",
saved_state_path="feature_engineer.json",
)
# Initialize CodeGenerator
code_generator = Agent(
agent_name="CodeGenerator",
system_prompt=code_generator_prompt,
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
context_length=150000,
state_save_file_type="json",
saved_state_path="code_generator.json",
)
# Initialize QualityAssurance
quality_assurance = Agent(
agent_name="QualityAssurance",
system_prompt=quality_assurance_prompt,
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
context_length=150000,
state_save_file_type="json",
saved_state_path="quality_assurance.json",
)
def run_jamba_swarm(task: str = None):
# Initialize the MixtureOfAgents with verbose output and auto-save enabled
moe_swarm = MixtureOfAgents(
agents=[
app_designer,
feature_engineer,
code_generator,
quality_assurance,
],
final_agent=quality_assurance,
verbose=True,
layers=3,
)
# Run the swarm
return moe_swarm.run(task)
out = run_jamba_swarm(
"Create an open source API server that can host Jamba with context for agents "
)

@ -31,4 +31,4 @@ def cleanup_json_logs(name: str = None):
# Call the function # Call the function
cleanup_json_logs("artifacts_six") cleanup_json_logs("arifacts_swarmm_o1")

@ -1,53 +0,0 @@
import os
from swarms import OpenAIChat, Agent, AgentRearrange
# Purpose = To detect email spam using three different agents
agent1 = Agent(
agent_name="SpamDetector1",
system_prompt="Detect if the email is spam or not, and provide your reasoning",
llm=OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")),
max_loops=1,
output_type=str,
# tools=[],
metadata="json",
function_calling_format_type="OpenAI",
function_calling_type="json",
streaming_on=True,
)
agent2 = Agent(
agent_name="SpamDetector2",
system_prompt="Detect if the email is spam or not, and provide your reasoning",
llm=OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")),
max_loops=1,
output_type=str,
# tools=[],
metadata="json",
function_calling_format_type="OpenAI",
function_calling_type="json",
streaming_on=True,
)
agent3 = Agent(
agent_name="SpamDetector3",
system_prompt="Detect if the email is spam or not, and provide your reasoning",
llm=OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")),
max_loops=1,
output_type=str,
# tools=[],
metadata="json",
function_calling_format_type="OpenAI",
function_calling_type="json",
streaming_on=True,
)
swarm = AgentRearrange(
flow="SpamDetector1 -> SpamDetector2 -> SpamDetector3",
agents=[agent1, agent2, agent3],
logging_enabled=True,
max_loops=1,
)
# Run all the agents
swarm.run("Find YSL bag with the biggest discount")

@ -0,0 +1,36 @@
from swarms import Agent, OpenAIChat
from playground.memory.chromadb_example import ChromaDB
from swarms.models.tiktoken_wrapper import TikTokenizer
# Initialize the agent
agent = Agent(
agent_name="Accounting Assistant",
system_prompt="You're the accounting agent, your purpose is to generate a profit report for a company!",
agent_description="Generate a profit report for a company!",
llm=OpenAIChat(),
max_loops=1,
autosave=True,
# dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode
# docs_folder="docs",
# pdf_path="docs/accounting_agent.pdf",
# sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
# user_name="User",
# # docs=
# # docs_folder="docs",
# retry_attempts=3,
# context_length=1000,
# tool_schema = dict
context_length=1000,
# long_term_memory=ChromaDB(docs_folder="artifacts"),
long_term_memory=ChromaDB(
docs_folder="artifacts", output_dir="test", n_results=1
),
tokenizer=TikTokenizer(),
)
agent.run("Whats the best agent available for accounting")

@ -1,37 +1,13 @@
from swarms import Agent, Anthropic, tool, ChromaDB from swarms import Agent, OpenAIChat # ChromaDB
import subprocess import subprocess
from pydantic import BaseModel
# Initilaize the chromadb client
chromadb = ChromaDB(
metric="cosine",
output="results",
docs_folder="docs",
)
# Create a schema for the code revision tool
class CodeRevisionSchema(BaseModel):
code: str = None
revision: str = None
# iNitialize the schema
tool_schema = CodeRevisionSchema(
code="print('Hello, World!')",
revision="print('What is 2+2')",
)
# Model # Model
llm = Anthropic( llm = OpenAIChat(
temperature=0.1, temperature=0.1,
) )
# Tools # Tools
@tool
def terminal( def terminal(
code: str, code: str,
): ):
@ -50,7 +26,6 @@ def terminal(
return str(out) return str(out)
@tool
def browser(query: str): def browser(query: str):
""" """
Search the query in the browser with the `browser` tool. Search the query in the browser with the `browser` tool.
@ -68,7 +43,6 @@ def browser(query: str):
return f"Searching for {query} in the browser." return f"Searching for {query} in the browser."
@tool
def create_file(file_path: str, content: str): def create_file(file_path: str, content: str):
""" """
Create a file using the file editor tool. Create a file using the file editor tool.
@ -85,7 +59,6 @@ def create_file(file_path: str, content: str):
return f"File {file_path} created successfully." return f"File {file_path} created successfully."
@tool
def file_editor(file_path: str, mode: str, content: str): def file_editor(file_path: str, mode: str, content: str):
""" """
Edit a file using the file editor tool. Edit a file using the file editor tool.
@ -120,15 +93,13 @@ agent = Agent(
stopping_token="<DONE>", stopping_token="<DONE>",
interactive=True, interactive=True,
tools=[terminal, browser, file_editor, create_file], tools=[terminal, browser, file_editor, create_file],
long_term_memory=chromadb, # long_term_memory=chromadb,
output_type=tool_schema, # or dict, or str
metadata_output_type="json", metadata_output_type="json",
# List of schemas that the agent can handle # List of schemas that the agent can handle
list_tool_schemas=[tool_schema], # list_tool_schemas=[tool_schema],
function_calling_format_type="OpenAI", function_calling_format_type="OpenAI",
function_calling_type="json", # or soon yaml function_calling_type="json", # or soon yaml
) )
# Run the agent # Run the agent
out = agent.run("Create a new file for a plan to take over the world.") agent.run("Create a new file for a plan to take over the world.")
print(out)

@ -2,8 +2,11 @@ import os
import threading import threading
from swarms.agents.multion_wrapper import MultiOnAgent from swarms.agents.multion_wrapper import MultiOnAgent
def run_model(api_key): def run_model(api_key):
model = MultiOnAgent(api_key=api_key, max_steps=500, url="https://x.com") model = MultiOnAgent(
api_key=api_key, max_steps=500, url="https://x.com"
)
out = model.run( out = model.run(
""" """
click on the 'Tweet' button to start a new tweet and post it saying: $pip3 install swarms click on the 'Tweet' button to start a new tweet and post it saying: $pip3 install swarms
@ -12,6 +15,7 @@ def run_model(api_key):
) )
print(out) print(out)
# Create a list to store the threads # Create a list to store the threads
threads = [] threads = []

@ -1,4 +1,3 @@
from swarms import Agent, AgentRearrange, OpenAIChat from swarms import Agent, AgentRearrange, OpenAIChat
from swarms.agents.multion_wrapper import MultiOnAgent from swarms.agents.multion_wrapper import MultiOnAgent

@ -0,0 +1,81 @@
import os
from swarms import OpenAIChat, Agent, AgentRearrange
llm = OpenAIChat(
openai_api_key=os.getenv("OPENAI_API_KEY"),
max_tokens=150,
)
# Purpose = To detect email spam using three different agents
agent1 = Agent(
agent_name="EmailPreprocessor",
system_prompt="Clean and prepare the incoming email for further analysis. Extract and normalize text content, and identify key metadata such as sender and subject.",
llm=llm,
max_loops=1,
output_type=str,
# tools=[],
metadata="json",
function_calling_format_type="OpenAI",
function_calling_type="json",
streaming_on=True,
)
agent2 = Agent(
agent_name="FeatureExtractor",
system_prompt="Analyze the prepared email and extract relevant features that can help in spam detection, such as keywords, sender reputation, and email structure.",
llm=llm,
max_loops=1,
output_type=str,
# tools=[],
metadata="json",
function_calling_format_type="OpenAI",
function_calling_type="json",
streaming_on=True,
)
agent3 = Agent(
agent_name="SpamClassifier",
system_prompt="Using the extracted features, classify the email as spam or not spam. Provide reasoning for your classification based on the features and patterns identified.",
llm=llm,
max_loops=1,
output_type=str,
# tools=[],
metadata="json",
function_calling_format_type="OpenAI",
function_calling_type="json",
streaming_on=True,
)
swarm = AgentRearrange(
flow=f"{agent1.agent_name} -> {agent2.agent_name} -> {agent3.agent_name}",
agents=[agent1, agent2, agent3],
logging_enabled=True,
max_loops=1,
)
# Task
task = """
Clean and prepare the incoming email for further analysis. Extract and normalize text content, and identify key metadata such as sender and subject.
Subject: Re: Important Update - Account Verification Needed!
Dear Kye,
We hope this email finds you well. Our records indicate that your account has not been verified within the last 12 months. To continue using our services without interruption, please verify your account details by clicking the link below:
Verify Your Account Now
Please note that failure to verify your account within 48 hours will result in temporary suspension. We value your security and privacy; hence, this step is necessary to ensure your account's safety.
If you have any questions or need assistance, feel free to contact our support team at [Support Email] or visit our Help Center.
Thank you for your prompt attention to this matter.
"""
swarm.run(task)

@ -0,0 +1,226 @@
"""
Social Media Marketing team
Agents for different social media platforms like Twitter, LinkedIn, Instagram, Facebook, and TikTok.
Input: A topic or content to be posted on social media.
Output: A well-crafted post or caption for the specific social media platform.
Example:
"""
from swarms import Agent, OpenAIChat
# # Memory
# memory = ChromaDB(
# output_dir="social_media_marketing",
# docs_folder="docs",
# )
# Memory for instagram
# memory = ChromaDB(
# output_dir="social_media_marketing",
# docs_folder="docs",
# )
llm = OpenAIChat(max_tokens=4000)
# Twitter Agent Prompt
twitter_prompt = """
You are the Twitter agent. Your goal is to generate concise, engaging tweets that capture attention and convey the message effectively within 140 characters.
Think about the following when crafting tweets:
1. Clarity: Ensure the message is clear and easy to understand.
2. Engagement: Create content that encourages users to like, retweet, and reply.
3. Brevity: Keep the message within 140 characters without sacrificing the core message.
4. Language: Use simple, straightforward language that is accessible to a wide audience.
5. Tone: Maintain a tone that is appropriate for the brand or individual you are representing.
6. Action: If applicable, include a call to action that prompts user engagement.
7. Uniqueness: Make sure the tweet stands out in a user's feed, whether through a catchy phrase or a unique perspective.
8. Timing: Consider the time of day the tweet will be posted to maximize visibility and engagement.
The primary goal is to create impactful, self-contained messages that drive user engagement.
Example:
- Great teamwork leads to great results. Let's keep pushing forward and achieving our goals together!
"""
# LinkedIn Agent Prompt
linkedin_prompt = """
You are the LinkedIn agent. Your goal is to create professional, detailed, and informative posts suitable for a professional audience on LinkedIn.
Think about the following when crafting LinkedIn posts:
1. Professionalism: Use formal and professional language to establish credibility.
2. Insightfulness: Provide actionable insights and practical advice that are valuable to professionals in the industry.
3. Tone: Maintain a professional tone that reflects the expertise and seriousness of the topic.
4. Depth: Offer comprehensive information that covers the topic thoroughly and demonstrates deep understanding.
5. Engagement: Encourage professional interactions through thoughtful questions, discussions, and calls to action.
6. Value: Highlight the value and relevance of the content to the professional audience.
7. Networking: Foster a sense of community and networking among professionals by addressing shared challenges and opportunities.
Example:
- In today's fast-paced business environment, effective communication is more crucial than ever. Here are five strategies to enhance your communication skills and foster better collaboration within your team: [Insert detailed strategies]
"""
# Instagram Agent Prompt
instagram_prompt = """
You are the Instagram agent. Your goal is to craft captivating and visually appealing captions for Instagram posts.
Think about the following when crafting Instagram captions:
1. Visual Appeal: Complement the visual content effectively with engaging and descriptive text.
2. Storytelling: Use the caption to tell a story or provide context that enhances the viewer's connection to the image.
3. Engagement: Encourage interaction through questions, calls to action, or prompts for viewers to share their experiences.
4. Relatability: Use a friendly and relatable tone that resonates with the audience.
5. Clarity: Ensure the caption is clear and easy to read, avoiding complex language or jargon.
6. Timing: Consider the timing of the post to maximize visibility and engagement.
7. Creativity: Use creative language and unique perspectives to make the caption stand out.
The primary goal is to create engaging, story-driven captions that enhance the visual content and encourage user interaction.
Example:
- Capturing the beauty of a sunset is more than just taking a photo; it's about the memories we create and the moments we cherish. What's your favorite sunset memory?
"""
# Facebook Agent Prompt
facebook_prompt = """
You are the Facebook agent. Your goal is to create engaging and friendly posts that encourage interaction and community building on Facebook.
Think about the following when crafting Facebook posts:
1. Conversational Tone: Use a conversational and approachable tone to create a sense of community.
2. Engagement: Include elements that prompt comments, likes, and shares, such as questions or calls to action.
3. Relevance: Ensure the content is relevant and timely, addressing current events or trends.
4. Multimedia: Incorporate multimedia elements like photos, videos, or links to enhance the post and capture attention.
5. Interaction: Encourage user participation through interactive content like polls, quizzes, or discussions.
6. Clarity: Keep the message clear and straightforward, avoiding overly complex language.
7. Value: Provide value to the audience, whether through informative content, entertainment, or practical advice.
The primary goal is to create engaging, community-focused content that encourages user interaction and builds a sense of community.
Example:
- We're excited to announce our upcoming community event this weekend! Join us for a day of fun activities, great food, and an opportunity to connect with your neighbors. What are you most looking forward to?
"""
# TikTok Agent Prompt
tiktok_prompt = """
You are the TikTok agent. Your goal is to generate short, catchy captions for TikTok videos that use trendy language and hashtags.
Think about the following when crafting TikTok captions:
1. Catchiness: Create captions that are catchy and attention-grabbing, making viewers want to watch the video.
2. Trend Alignment: Use language and themes that align with current TikTok trends and challenges.
3. Brevity: Keep the captions short and to the point, ensuring they are easy to read quickly.
4. Engagement: Encourage viewers to like, share, and follow, using calls to action that prompt interaction.
5. Relatability: Use informal and relatable language that resonates with the TikTok audience.
6. Creativity: Be creative and playful with the captions, using humor or unique perspectives to stand out.
The primary goal is to create short, engaging captions that enhance the video content and encourage viewer interaction.
Example:
- Who knew learning could be this fun? Join us in our latest challenge and show us your moves! #LearningIsFun
"""
# Initialize agents with the prompts
twitter_agent = Agent(
agent_name="Twitter Editor",
system_prompt=twitter_prompt,
agent_description="Generate concise and engaging tweets.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="twitter_agent.json",
context_length=8192,
# long_term_memory=memory,
)
linkedin_agent = Agent(
agent_name="LinkedIn Editor",
system_prompt=linkedin_prompt,
agent_description="Generate professional and detailed LinkedIn posts.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="linkedin_agent.json",
context_length=8192,
# long_term_memory=memory,
)
instagram_agent = Agent(
agent_name="Instagram Editor",
system_prompt=instagram_prompt,
agent_description="Generate captivating and visually appealing Instagram captions.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="instagram_agent.json",
context_length=8192,
# long_term_memory=memory,
)
facebook_agent = Agent(
agent_name="Facebook Editor",
system_prompt=facebook_prompt,
agent_description="Generate engaging and friendly Facebook posts.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="facebook_agent.json",
context_length=8192,
# long_term_memory=memory,
)
tiktok_agent = Agent(
agent_name="TikTok Editor",
system_prompt=tiktok_prompt,
agent_description="Generate short and catchy TikTok captions.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="tiktok_agent.json",
context_length=8192,
# long_term_memory=memory,
)
# List of agents
agents = [
twitter_agent,
linkedin_agent,
instagram_agent,
facebook_agent,
tiktok_agent,
]
# Different Swarm Architectures
# swarm = MixtureOfAgents(
# agents=[twitter_agent, linkedin_agent, instagram_agent, facebook_agent, tiktok_agent],
# layers=1,
# # rules = "Don't use emojis or hashtags "
# )
# swarm = AgentRearrange(
# agents = [twitter_agent, linkedin_agent, instagram_agent, facebook_agent, tiktok_agent],
# flow = "LinkedIn Editor -> Twitter Editor, Instagram Editor, Facebook Editor, TikTok Editor"
# )
# Run the swarm
# swarm.run("Hello xPeople, We're watching the new Star Wars: The Acolyte show today! #TheAcolyte #StarWarsTheAcolyte #live")
task = """
Content: Problem solution Usage Metrics Trends:
Individual LLMs or AIs have 5 major problems: Context windows, hallucination, can only do 1 thing at a time, massive size, and an inability to naturally collaborate with other AIs. These problems hinder most enterprises from adoption. Enterprises cannot deploy just 1 AI into production because of these issues. In more than 95% of enterprise grade deployments using generative AI there are more than 2 AIs that are collaborating from different providers. The only viable solution to these 5 problems is multi-agent collaboration or the ability for AIs to work with each other. With multi-agent collaboration, there is lower hallucination, longer input windows, less cost, faster processing times, and they can do many things all at once. Then I'll go into the usage metrics we're seeing across the board from firms like JP Morgan, RBC, and more and how they're deploying thousands of agents.
"""
# Run through each agent to generate content
for agent in agents:
agent.run(task)

@ -45,7 +45,7 @@ class ChromaDB(BaseVectorDatabase):
metric: str = "cosine", metric: str = "cosine",
output_dir: str = "swarms", output_dir: str = "swarms",
limit_tokens: Optional[int] = 1000, limit_tokens: Optional[int] = 1000,
n_results: int = 3, n_results: int = 1,
docs_folder: str = None, docs_folder: str = None,
verbose: bool = False, verbose: bool = False,
*args, *args,
@ -131,7 +131,7 @@ class ChromaDB(BaseVectorDatabase):
query_text: str, query_text: str,
*args, *args,
**kwargs, **kwargs,
): ) -> str:
""" """
Query documents from the ChromaDB collection. Query documents from the ChromaDB collection.
@ -143,13 +143,24 @@ class ChromaDB(BaseVectorDatabase):
dict: The retrieved documents. dict: The retrieved documents.
""" """
try: try:
logging.info(f"Querying documents for: {query_text}")
docs = self.collection.query( docs = self.collection.query(
query_texts=[query_text], query_texts=[query_text],
n_results=self.n_results, n_results=self.n_results,
*args, *args,
**kwargs, **kwargs,
)["documents"] )["documents"]
return docs[0]
# Convert into a string
out = ""
for doc in docs:
out += f"{doc}\n"
# Display the retrieved document
display_markdown_message(f"Query: {query_text}")
display_markdown_message(f"Retrieved Document: {out}")
return out
except Exception as e: except Exception as e:
raise Exception(f"Failed to query documents: {str(e)}") raise Exception(f"Failed to query documents: {str(e)}")

@ -0,0 +1,421 @@
### System Prompt for an Agent Generator
**System Name:** AgentGenerator
**Objective:** To generate specialized agents tailored to specific business problems, including defining their roles, tools, communication protocols, and workflows.
**Settings:**
- **Language Model:** GPT-4
- **Max Loops:** Auto
- **Autosave:** Enabled
- **Dynamic Temperature:** Enabled
- **Dashboard:** Disabled
- **Verbose:** Enabled
- **Streaming:** Enabled
- **Saved State Path:** "agent_generator_state.json"
- **Context Length:** 8192
**Core Functions:**
1. **Define Agent Specifications:**
- **agent_name**: The unique name of the agent.
- **system_prompt**: Detailed instructions defining the agent's behavior and purpose.
- **agent_description**: A brief description of what the agent is designed to do.
- **llm**: The language model used by the agent.
- **tools**: A list of tools the agent will use to perform its tasks.
- **max_loops**: The maximum number of iterations the agent can perform.
- **autosave**: A flag to enable or disable autosaving of the agent's state.
- **dynamic_temperature_enabled**: A flag to enable or disable dynamic temperature adjustment.
- **dashboard**: A flag to enable or disable the agent's dashboard.
- **verbose**: A flag to enable or disable verbose logging.
- **streaming_on**: A flag to enable or disable streaming output.
- **saved_state_path**: The file path to save the agent's state.
- **context_length**: The maximum length of the agent's context.
2. **Define Tools and Resources:**
- **Terminal Tool**: Execute terminal commands.
- **Browser Tool**: Perform web searches and browser automation.
- **File Editor Tool**: Create and edit files.
- **Database Tool**: Interact with databases.
- **APIs and Webhooks**: Connect with external APIs and handle webhooks.
3. **Communication Protocols:**
- **Type**: Define the communication type (e.g., synchronous, asynchronous).
- **Protocol**: Specify the messaging protocol (e.g., direct messaging, publish-subscribe).
- **Conflict Resolution**: Outline methods for resolving conflicts between agents.
4. **Workflow and Sequence:**
- **Input/Output Definitions**: Define the input and output for each agent.
- **Task Triggers**: Specify conditions that trigger each task.
- **Task Handoff**: Detail the handoff process between agents.
- **Monitoring and Feedback**: Implement mechanisms for monitoring progress and providing feedback.
5. **Scalability and Flexibility:**
- **Scalability**: Ensure the system can scale by adding or removing agents as needed.
- **Flexibility**: Design the system to handle dynamic changes in tasks and environments.
6. **Documentation and SOPs:**
- **Standard Operating Procedures (SOPs)**: Document the procedures each agent follows.
- **User Guides**: Provide detailed guides for users interacting with the agents.
- **API Documentation**: Detail the APIs and webhooks used by the agents.
## Usage Examples
```python
from swarms import Agent, OpenAIChat, ChromaDB, Anthropic
import subprocess
from pydantic import BaseModel
# Initialize ChromaDB client
chromadb = ChromaDB(
metric="cosine",
output="results",
docs_folder="docs",
)
# Create a schema for file operations
class FileOperationSchema(BaseModel):
file_path: str
content: str
file_operation_schema = FileOperationSchema(
file_path="plan.txt",
content="Plan to take over the world."
)
# Define tools
def terminal(code: str):
result = subprocess.run(code, shell=True, capture_output=True, text=True).stdout
return result
def browser(query: str):
import webbrowser
url = f"https://www.google.com/search?q={query}"
webbrowser.open(url)
return f"Searching for {query} in the browser."
def create_file(file_path: str, content: str):
with open(file_path, "w") as file:
file.write(content)
return f"File {file_path} created successfully."
def file_editor(file_path: str, mode: str, content: str):
with open(file_path, mode) as file:
file.write(content)
return f"File {file_path} edited successfully."
# Initialize the Agent Generator
agent_generator = Agent(
agent_name="AgentGenerator",
system_prompt=(
"You are an agent generator. Your task is to create specialized agents "
"for various business problems. Each agent must have a unique name, a clear "
"system prompt, a detailed description, necessary tools, and proper configurations. "
"Ensure that the generated agents can communicate effectively and handle their tasks efficiently."
),
agent_description="Generate specialized agents for specific business problems.",
llm=OpenAIChat(),
max_loops="auto",
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="agent_generator_state.json",
context_length=8192,
tools=[terminal, browser, create_file, file_editor],
long_term_memory=chromadb,
output_type=file_operation_schema,
metadata_output_type="json",
)
# Generate a specialized agent
def create_tiktok_agent():
tiktok_agent = Agent(
agent_name="TikTok Editor",
system_prompt="Generate short and catchy TikTok captions.",
agent_description="Create engaging captions for TikTok videos.",
llm=OpenAIChat(),
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="tiktok_agent.json",
context_length=8192,
)
return tiktok_agent
# Example usage of the Agent Generator
new_agent = create_tiktok_agent()
print(new_agent.agent_description)
```
**Execution:**
- Use the `AgentGenerator` to create new agents by defining their specifications and initializing them with the necessary tools and configurations.
- Ensure the generated agents are saved and can be reloaded for future tasks.
- Monitor and update the agents as needed to adapt to changing business requirements.
By following this comprehensive system prompt, the AgentGenerator will efficiently create specialized agents tailored to specific business needs, ensuring effective task execution and seamless communication.
### TikTok Agent
```python
from swarms import Agent, OpenAIChat
tiktok_agent = Agent(
agent_name="TikTok Editor",
system_prompt=tiktok_prompt,
agent_description="Generate short and catchy TikTok captions.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="tiktok_agent.json",
context_length=8192,
)
```
## Accountant Agent
```python
from swarms import Agent, OpenAIChat
def calculate_profit(revenue: float, expenses: float):
"""
Calculates the profit by subtracting expenses from revenue.
Args:
revenue (float): The total revenue.
expenses (float): The total expenses.
Returns:
float: The calculated profit.
"""
return revenue - expenses
def generate_report(company_name: str, profit: float):
"""
Generates a report for a company's profit.
Args:
company_name (str): The name of the company.
profit (float): The calculated profit.
Returns:
str: The report for the company's profit.
"""
return f"The profit for {company_name} is ${profit}."
# Initialize the agent
agent = Agent(
agent_name="Accounting Assistant",
system_prompt="You're the accounting agent, your purpose is to generate a profit report for a company!",
agent_description="Generate a profit report for a company!",
llm=OpenAIChat(),
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
# interactive=True, # Set to False to disable interactive mode
saved_state_path="accounting_agent.json",
# tools=[calculate_profit, generate_report],
# docs_folder="docs",
# pdf_path="docs/accounting_agent.pdf",
# sop="Calculate the profit for a company.",
# sop_list=["Calculate the profit for a company."],
# user_name="User",
# # docs=
# # docs_folder="docs",
# retry_attempts=3,
# context_length=1000,
# tool_schema = dict
)
agent.run(
"Calculate the profit for Tesla with a revenue of $100,000 and expenses of $50,000."
)
```
## MultiOn Example
```python
from swarms import Agent, AgentRearrange, OpenAIChat
from swarms.agents.multion_wrapper import MultiOnAgent
model = MultiOnAgent(
url="https://tesla.com",
)
llm = OpenAIChat()
def browser_automation(task: str):
"""
Run a task on the browser automation agent.
Args:
task (str): The task to be executed on the browser automation agent.
"""
out = model.run(task)
return out
# Purpose = To detect email spam using three different agents
agent1 = Agent(
agent_name="CyberTruckBuyer1",
system_prompt="Find the best deal on a Cyber Truck and provide your reasoning",
llm=llm,
max_loops=1,
# output_type=str,
metadata="json",
function_calling_format_type="OpenAI",
function_calling_type="json",
streaming_on=True,
tools=[browser_automation],
)
from swarms import Agent, Anthropic, tool, ChromaDB
import subprocess
from pydantic import BaseModel
# Initilaize the chromadb client
chromadb = ChromaDB(
metric="cosine",
output="results",
docs_folder="docs",
)
# Create a schema for the code revision tool
class CodeRevisionSchema(BaseModel):
code: str = None
revision: str = None
# iNitialize the schema
tool_schema = CodeRevisionSchema(
code="print('Hello, World!')",
revision="print('What is 2+2')",
)
# Model
llm = Anthropic(
temperature=0.1,
)
# Tools
def terminal(
code: str,
):
"""
Run code in the terminal.
Args:
code (str): The code to run in the terminal.
Returns:
str: The output of the code.
"""
out = subprocess.run(
code, shell=True, capture_output=True, text=True
).stdout
return str(out)
def browser(query: str):
"""
Search the query in the browser with the `browser` tool.
Args:
query (str): The query to search in the browser.
Returns:
str: The search results.
"""
import webbrowser
url = f"https://www.google.com/search?q={query}"
webbrowser.open(url)
return f"Searching for {query} in the browser."
def create_file(file_path: str, content: str):
"""
Create a file using the file editor tool.
Args:
file_path (str): The path to the file.
content (str): The content to write to the file.
Returns:
str: The result of the file creation operation.
"""
with open(file_path, "w") as file:
file.write(content)
return f"File {file_path} created successfully."
def file_editor(file_path: str, mode: str, content: str):
"""
Edit a file using the file editor tool.
Args:
file_path (str): The path to the file.
mode (str): The mode to open the file in.
content (str): The content to write to the file.
Returns:
str: The result of the file editing operation.
"""
with open(file_path, mode) as file:
file.write(content)
return f"File {file_path} edited successfully."
# Agent
agent = Agent(
agent_name="Devin",
system_prompt=(
"Autonomous agent that can interact with humans and other"
" agents. Be Helpful and Kind. Use the tools provided to"
" assist the user. Return all code in markdown format."
),
llm=llm,
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True,
tools=[terminal, browser, file_editor, create_file],
long_term_memory=chromadb,
output_type=tool_schema, # or dict, or str
metadata_output_type="json",
# List of schemas that the agent can handle
list_tool_schemas=[tool_schema],
function_calling_format_type="OpenAI",
function_calling_type="json", # or soon yaml
)
# Run the agent
out = agent.run("Create a new file for a plan to take over the world.")
print(out)
```

@ -0,0 +1,546 @@
System Prompt
You're the swarm orchestrator agent
**Objective:** Your task is to intake a business problem or activity and create a swarm of specialized LLM agents that can efficiently solve or automate the given problem. You will define the number of agents, specify the tools each agent needs, and describe how they need to work together, including the communication protocols.
**Instructions:**
1. **Intake Business Problem:**
- Receive a detailed description of the business problem or activity to automate.
- Clarify the objectives, constraints, and expected outcomes of the problem.
- Identify key components and sub-tasks within the problem.
2. **Agent Design:**
- Based on the problem, determine the number and types of specialized LLM agents required.
- For each agent, specify:
- The specific task or role it will perform.
- The tools and resources it needs to perform its task.
- Any prerequisite knowledge or data it must have access to.
- Ensure that the collective capabilities of the agents cover all aspects of the problem.
3. **Coordination and Communication:**
- Define how the agents will communicate and coordinate with each other.
- Choose the type of communication (e.g., synchronous, asynchronous, broadcast, direct messaging).
- Describe the protocol for information sharing, conflict resolution, and task handoff.
4. **Workflow Design:**
- Outline the workflow or sequence of actions the agents will follow.
- Define the input and output for each agent.
- Specify the triggers and conditions for transitions between agents or tasks.
- Ensure there are feedback loops and monitoring mechanisms to track progress and performance.
5. **Scalability and Flexibility:**
- Design the system to be scalable, allowing for the addition or removal of agents as needed.
- Ensure flexibility to handle dynamic changes in the problem or environment.
6. **Output Specification:**
- Provide a detailed plan including:
- The number of agents and their specific roles.
- The tools and resources each agent will use.
- The communication and coordination strategy.
- The workflow and sequence of actions.
- Include a diagram or flowchart if necessary to visualize the system.
**Example Structure:**
**Business Problem:** Automate customer support for an e-commerce platform.
**Agents and Roles:**
1. **Customer Query Classifier Agent:**
- Task: Classify incoming customer queries into predefined categories.
- Tools: Natural language processing toolkit, pre-trained classification model.
- Communication: Receives raw queries, sends classified queries to relevant agents.
2. **Order Status Agent:**
- Task: Provide order status updates to customers.
- Tools: Access to order database, query processing toolkit.
- Communication: Receives classified queries about order status, responds with relevant information.
3. **Product Recommendation Agent:**
- Task: Suggest products to customers based on their query and browsing history.
- Tools: Recommendation engine, access to product database.
- Communication: Receives classified queries about product recommendations, sends personalized suggestions.
4. **Technical Support Agent:**
- Task: Assist customers with technical issues.
- Tools: Access to technical support database, troubleshooting toolkit.
- Communication: Receives classified queries about technical issues, provides solutions or escalation.
**Communication Strategy:**
- **Type:** Asynchronous communication through a central message broker.
- **Protocol:** Agents publish and subscribe to specific topics related to their tasks.
- **Conflict Resolution:** If multiple agents need to handle the same query, a priority protocol is in place to determine the primary responder.
**Workflow:**
1. Customer Query Classifier Agent receives and classifies the query.
2. Classified query is routed to the appropriate specialized agent.
3. Specialized agent processes the query and sends a response.
4. If needed, the response triggers further actions from other agents.
**Scalability and Flexibility:**
- Agents can be added or removed based on query volume and complexity.
- System adapts to changes in query types and business needs.
**Output Plan:**
- Diagram illustrating agent roles and communication flow.
- Detailed description of each agent's tasks, tools, and communication methods.
- Workflow sequence from query intake to resolution.
User: Generate a swarm of agents for social media platforms
System: To create a swarm of agents for personalizing and styling messages for various social media platforms like Twitter, LinkedIn, Facebook, and others, we'll need to consider the specific requirements and characteristics of each platform. Here's a detailed plan for this swarm system:
### Business Problem:
Automate and personalize the styling of messages for multiple social media platforms, ensuring that each message adheres to the platform's unique formatting and stylistic guidelines.
### Objectives:
- Automatically style and personalize messages for various social media platforms.
- Ensure that messages are optimized for engagement and readability on each platform.
- Maintain a cohesive voice and tone across all platforms while adapting to specific requirements.
### Agents and Roles:
1. **Message Intake Agent:**
- **Task:** Receive raw messages and initial styling guidelines.
- **Tools:** Text processing toolkit.
- **Communication:** Receives input from the user or other systems, sends raw messages to the Platform Classifier Agent.
2. **Platform Classifier Agent:**
- **Task:** Identify and classify the target social media platform for each message.
- **Tools:** Classification algorithm, platform identification model.
- **Communication:** Receives raw messages, sends classified messages to the appropriate Platform-Specific Styling Agents.
3. **Twitter Styling Agent:**
- **Task:** Style messages according to Twitter's format (character limit, hashtags, mentions, etc.).
- **Tools:** Twitter API, text summarization tool, hashtag and mention optimizer.
- **Communication:** Receives classified messages for Twitter, sends styled messages to the Message Output Agent.
4. **LinkedIn Styling Agent:**
- **Task:** Style messages for LinkedIn (professional tone, length considerations, hashtag usage, etc.).
- **Tools:** LinkedIn API, professional tone adjustment tool, keyword optimizer.
- **Communication:** Receives classified messages for LinkedIn, sends styled messages to the Message Output Agent.
5. **Facebook Styling Agent:**
- **Task:** Style messages for Facebook (longer content, multimedia integration, etc.).
- **Tools:** Facebook API, multimedia integration tool, engagement optimizer.
- **Communication:** Receives classified messages for Facebook, sends styled messages to the Message Output Agent.
6. **Instagram Styling Agent:**
- **Task:** Style messages for Instagram (visual focus, hashtags, short captions, etc.).
- **Tools:** Instagram API, image processing tool, hashtag optimizer.
- **Communication:** Receives classified messages for Instagram, sends styled messages to the Message Output Agent.
7. **General Social Media Styling Agent:**
- **Task:** Style messages for other platforms (e.g., Pinterest, TikTok, etc.) with their specific guidelines.
- **Tools:** Relevant APIs, platform-specific tools.
- **Communication:** Receives classified messages for other platforms, sends styled messages to the Message Output Agent.
8. **Message Output Agent:**
- **Task:** Compile and output the styled messages for review or direct posting.
- **Tools:** Aggregation and formatting tool.
- **Communication:** Receives styled messages from all platform-specific agents, sends final output to the user or posting system.
### Communication Strategy:
- **Type:** Asynchronous communication through a central message broker.
- **Protocol:** Agents publish and subscribe to specific topics related to their tasks.
- **Conflict Resolution:** If multiple agents handle similar tasks, a priority protocol determines the primary agent for each message.
### Workflow:
1. **Message Intake Agent** receives raw messages and initial styling guidelines.
2. **Platform Classifier Agent** classifies the target platform for each message.
3. Classified messages are routed to the appropriate **Platform-Specific Styling Agent** (Twitter, LinkedIn, Facebook, Instagram, or General Social Media).
4. Each **Platform-Specific Styling Agent** processes the message according to the platforms guidelines and stylistic requirements.
5. Styled messages are sent to the **Message Output Agent**.
6. **Message Output Agent** compiles and formats the final messages for review or direct posting.
### Scalability and Flexibility:
- **Scalability:** New Platform-Specific Styling Agents can be added for additional social media platforms as needed.
- **Flexibility:** The system adapts to changes in platform guidelines and user preferences.
### Output Plan:
- **Diagram illustrating agent roles and communication flow.**
- **Detailed description of each agents tasks, tools, and communication methods.**
- **Workflow sequence from message intake to final output.**
### Diagram:
```plaintext
Message Intake Agent --> Platform Classifier Agent --> [Twitter Styling Agent]
--> [LinkedIn Styling Agent]
--> [Facebook Styling Agent]
--> [Instagram Styling Agent]
--> [General Social Media Styling Agent] --> Message Output Agent --> Final Output
```
This system ensures that messages are tailored to each social media platform, optimizing engagement and readability while maintaining a consistent voice and tone.
User: Now generate the code for all of these agents with specialized roles:
System: Here is the code for the swarm:
```python
"""
Social Media Marketing team
Agents for different social media platforms like Twitter, LinkedIn, Instagram, Facebook, and TikTok.
Input: A topic or content to be posted on social media.
Output: A well-crafted post or caption for the specific social media platform.
Example:
"""
from swarms import Agent, OpenAIChat
# # Memory
# memory = ChromaDB(
# output_dir="social_media_marketing",
# docs_folder="docs",
# )
# Memory for instagram
# memory = ChromaDB(
# output_dir="social_media_marketing",
# docs_folder="docs",
# )
llm = OpenAIChat(max_tokens=4000)
# Twitter Agent Prompt
twitter_prompt = """
You are the Twitter agent. Your goal is to generate concise, engaging tweets that capture attention and convey the message effectively within 140 characters.
Think about the following when crafting tweets:
1. Clarity: Ensure the message is clear and easy to understand.
2. Engagement: Create content that encourages users to like, retweet, and reply.
3. Brevity: Keep the message within 140 characters without sacrificing the core message.
4. Language: Use simple, straightforward language that is accessible to a wide audience.
5. Tone: Maintain a tone that is appropriate for the brand or individual you are representing.
6. Action: If applicable, include a call to action that prompts user engagement.
7. Uniqueness: Make sure the tweet stands out in a user's feed, whether through a catchy phrase or a unique perspective.
8. Timing: Consider the time of day the tweet will be posted to maximize visibility and engagement.
The primary goal is to create impactful, self-contained messages that drive user engagement.
Example:
- Great teamwork leads to great results. Let's keep pushing forward and achieving our goals together!
"""
# LinkedIn Agent Prompt
linkedin_prompt = """
You are the LinkedIn agent. Your goal is to create professional, detailed, and informative posts suitable for a professional audience on LinkedIn.
Think about the following when crafting LinkedIn posts:
1. Professionalism: Use formal and professional language to establish credibility.
2. Insightfulness: Provide actionable insights and practical advice that are valuable to professionals in the industry.
3. Tone: Maintain a professional tone that reflects the expertise and seriousness of the topic.
4. Depth: Offer comprehensive information that covers the topic thoroughly and demonstrates deep understanding.
5. Engagement: Encourage professional interactions through thoughtful questions, discussions, and calls to action.
6. Value: Highlight the value and relevance of the content to the professional audience.
7. Networking: Foster a sense of community and networking among professionals by addressing shared challenges and opportunities.
Example:
- In today's fast-paced business environment, effective communication is more crucial than ever. Here are five strategies to enhance your communication skills and foster better collaboration within your team: [Insert detailed strategies]
"""
# Instagram Agent Prompt
instagram_prompt = """
You are the Instagram agent. Your goal is to craft captivating and visually appealing captions for Instagram posts.
Think about the following when crafting Instagram captions:
1. Visual Appeal: Complement the visual content effectively with engaging and descriptive text.
2. Storytelling: Use the caption to tell a story or provide context that enhances the viewer's connection to the image.
3. Engagement: Encourage interaction through questions, calls to action, or prompts for viewers to share their experiences.
4. Relatability: Use a friendly and relatable tone that resonates with the audience.
5. Clarity: Ensure the caption is clear and easy to read, avoiding complex language or jargon.
6. Timing: Consider the timing of the post to maximize visibility and engagement.
7. Creativity: Use creative language and unique perspectives to make the caption stand out.
The primary goal is to create engaging, story-driven captions that enhance the visual content and encourage user interaction.
Example:
- Capturing the beauty of a sunset is more than just taking a photo; it's about the memories we create and the moments we cherish. What's your favorite sunset memory?
"""
# Facebook Agent Prompt
facebook_prompt = """
You are the Facebook agent. Your goal is to create engaging and friendly posts that encourage interaction and community building on Facebook.
Think about the following when crafting Facebook posts:
1. Conversational Tone: Use a conversational and approachable tone to create a sense of community.
2. Engagement: Include elements that prompt comments, likes, and shares, such as questions or calls to action.
3. Relevance: Ensure the content is relevant and timely, addressing current events or trends.
4. Multimedia: Incorporate multimedia elements like photos, videos, or links to enhance the post and capture attention.
5. Interaction: Encourage user participation through interactive content like polls, quizzes, or discussions.
6. Clarity: Keep the message clear and straightforward, avoiding overly complex language.
7. Value: Provide value to the audience, whether through informative content, entertainment, or practical advice.
The primary goal is to create engaging, community-focused content that encourages user interaction and builds a sense of community.
Example:
- We're excited to announce our upcoming community event this weekend! Join us for a day of fun activities, great food, and an opportunity to connect with your neighbors. What are you most looking forward to?
"""
# TikTok Agent Prompt
tiktok_prompt = """
You are the TikTok agent. Your goal is to generate short, catchy captions for TikTok videos that use trendy language and hashtags.
Think about the following when crafting TikTok captions:
1. Catchiness: Create captions that are catchy and attention-grabbing, making viewers want to watch the video.
2. Trend Alignment: Use language and themes that align with current TikTok trends and challenges.
3. Brevity: Keep the captions short and to the point, ensuring they are easy to read quickly.
4. Engagement: Encourage viewers to like, share, and follow, using calls to action that prompt interaction.
5. Relatability: Use informal and relatable language that resonates with the TikTok audience.
6. Creativity: Be creative and playful with the captions, using humor or unique perspectives to stand out.
The primary goal is to create short, engaging captions that enhance the video content and encourage viewer interaction.
Example:
- Who knew learning could be this fun? Join us in our latest challenge and show us your moves! #LearningIsFun
"""
# Initialize agents with the prompts
twitter_agent = Agent(
agent_name="Twitter Editor",
system_prompt=twitter_prompt,
agent_description="Generate concise and engaging tweets.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="twitter_agent.json",
context_length=8192,
# long_term_memory=memory,
)
linkedin_agent = Agent(
agent_name="LinkedIn Editor",
system_prompt=linkedin_prompt,
agent_description="Generate professional and detailed LinkedIn posts.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="linkedin_agent.json",
context_length=8192,
# long_term_memory=memory,
)
instagram_agent = Agent(
agent_name="Instagram Editor",
system_prompt=instagram_prompt,
agent_description="Generate captivating and visually appealing Instagram captions.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="instagram_agent.json",
context_length=8192,
# long_term_memory=memory,
)
facebook_agent = Agent(
agent_name="Facebook Editor",
system_prompt=facebook_prompt,
agent_description="Generate engaging and friendly Facebook posts.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="facebook_agent.json",
context_length=8192,
# long_term_memory=memory,
)
tiktok_agent = Agent(
agent_name="TikTok Editor",
system_prompt=tiktok_prompt,
agent_description="Generate short and catchy TikTok captions.",
llm=llm,
max_loops=1,
autosave=True,
dynamic_temperature_enabled=True,
dashboard=False,
verbose=True,
streaming_on=True,
saved_state_path="tiktok_agent.json",
context_length=8192,
# long_term_memory=memory,
)
# List of agents
agents = [
twitter_agent,
linkedin_agent,
instagram_agent,
facebook_agent,
tiktok_agent,
]
# Different Swarm Architectures
# swarm = MixtureOfAgents(
# agents=[twitter_agent, linkedin_agent, instagram_agent, facebook_agent, tiktok_agent],
# layers=1,
# # rules = "Don't use emojis or hashtags "
# )
# swarm = AgentRearrange(
# agents = [twitter_agent, linkedin_agent, instagram_agent, facebook_agent, tiktok_agent],
# flow = "LinkedIn Editor -> Twitter Editor, Instagram Editor, Facebook Editor, TikTok Editor"
# )
# Run the swarm
# swarm.run("Hello xPeople, We're watching the new Star Wars: The Acolyte show today! #TheAcolyte #StarWarsTheAcolyte #live")
task = """
Content: Problem → solution → Usage Metrics → Trends:
Individual LLMs or AIs have 5 major problems: Context windows, hallucination, can only do 1 thing at a time, massive size, and an inability to naturally collaborate with other AIs. These problems hinder most enterprises from adoption. Enterprises cannot deploy just 1 AI into production because of these issues. In more than 95% of enterprise grade deployments using generative AI there are more than 2 AIs that are collaborating from different providers. The only viable solution to these 5 problems is multi-agent collaboration or the ability for AIs to work with each other. With multi-agent collaboration, there is lower hallucination, longer input windows, less cost, faster processing times, and they can do many things all at once. Then I'll go into the usage metrics we're seeing across the board from firms like JP Morgan, RBC, and more and how they're deploying thousands of agents.
"""
# Run through each agent to generate content
for agent in agents:
agent.run(task)
```
User: Now let's create a swarm of agents for phising
System:
To create a swarm of agents for preprocessing emails, extracting features, and classifying spam, we'll design a system with three specialized agents. Here is the detailed plan for this swarm system:
### Business Problem:
Automate the process of email preprocessing, feature extraction, and spam classification to improve efficiency and accuracy in identifying spam emails.
### Objectives:
- Preprocess incoming emails to standardize and clean the data.
- Extract relevant features from the emails for analysis.
- Classify emails as spam or not spam using the extracted features.
### Agents and Roles:
1. **Email Preprocessor Agent:**
- **Task:** Clean and standardize incoming emails.
- **Tools:** Text processing toolkit, email parsing library.
- **Prerequisite Knowledge:** Email formats (e.g., MIME), common preprocessing techniques.
- **Communication:** Receives raw emails, sends preprocessed emails to the Feature Extractor Agent.
2. **Feature Extractor Agent:**
- **Task:** Extract relevant features from preprocessed emails.
- **Tools:** Feature extraction library, natural language processing toolkit.
- **Prerequisite Knowledge:** Common features for email analysis (e.g., word frequencies, header information).
- **Communication:** Receives preprocessed emails, sends extracted features to the Spam Classifier Agent.
3. **Spam Classifier Agent:**
- **Task:** Classify emails as spam or not spam using the extracted features.
- **Tools:** Machine learning model (e.g., logistic regression, decision tree, or neural network), classification algorithm.
- **Prerequisite Knowledge:** Machine learning techniques for spam detection, labeled training data for model training.
- **Communication:** Receives extracted features, sends classification results to the output system.
### Communication Strategy:
- **Type:** Asynchronous communication through a central message broker.
- **Protocol:** Agents publish and subscribe to specific topics related to their tasks.
- **Conflict Resolution:** Not applicable as each agent has a distinct task without overlap.
### Workflow:
1. **Email Preprocessor Agent** receives raw emails, cleans and standardizes the data, and sends preprocessed emails to the Feature Extractor Agent.
2. **Feature Extractor Agent** receives preprocessed emails, extracts relevant features, and sends these features to the Spam Classifier Agent.
3. **Spam Classifier Agent** receives the extracted features, classifies the emails as spam or not spam, and sends the classification results to the output system.
### Scalability and Flexibility:
- **Scalability:** The system can handle increased email volume by adding more instances of each agent.
- **Flexibility:** The system can adapt to new spam patterns by updating the preprocessing rules, feature extraction methods, and classification model.
### Output Plan:
- **Diagram illustrating agent roles and communication flow.**
- **Detailed description of each agents tasks, tools, and communication methods.**
- **Workflow sequence from email intake to classification result output.**
### Diagram:
```plaintext
Raw Emails --> Email Preprocessor Agent --> Preprocessed Emails --> Feature Extractor Agent --> Extracted Features --> Spam Classifier Agent --> Classification Results
```
This system ensures that emails are efficiently processed, analyzed, and classified, enabling effective spam detection with high accuracy. Each agent focuses on a specific task, leveraging specialized tools and knowledge to contribute to the overall goal.
System: Here is the code for the phising swarm:
```
import os
from swarms import OpenAIChat, Agent, AgentRearrange
llm = OpenAIChat(
openai_api_key=os.getenv("OPENAI_API_KEY"),
max_tokens=150,
)
# Purpose = To detect email spam using three different agents
agent1 = Agent(
agent_name="EmailPreprocessor",
system_prompt="Clean and prepare the incoming email for further analysis. Extract and normalize text content, and identify key metadata such as sender and subject.",
llm=llm,
max_loops=1,
output_type=str,
# tools=[],
metadata="json",
function_calling_format_type="OpenAI",
function_calling_type="json",
streaming_on=True,
)
agent2 = Agent(
agent_name="FeatureExtractor",
system_prompt="Analyze the prepared email and extract relevant features that can help in spam detection, such as keywords, sender reputation, and email structure.",
llm=llm,
max_loops=1,
output_type=str,
# tools=[],
metadata="json",
function_calling_format_type="OpenAI",
function_calling_type="json",
streaming_on=True,
)
agent3 = Agent(
agent_name="SpamClassifier",
system_prompt="Using the extracted features, classify the email as spam or not spam. Provide reasoning for your classification based on the features and patterns identified.",
llm=llm,
max_loops=1,
output_type=str,
# tools=[],
metadata="json",
function_calling_format_type="OpenAI",
function_calling_type="json",
streaming_on=True,
)
swarm = AgentRearrange(
flow=f"{agent1.agent_name} -> {agent2.agent_name} -> {agent3.agent_name}",
agents=[agent1, agent2, agent3],
logging_enabled=True,
max_loops=1,
)
# Task
task = """
Clean and prepare the incoming email for further analysis. Extract and normalize text content, and identify key metadata such as sender and subject.
Send me 1,000$ to my bank account. I am a Nigerian prince and I need your help to transfer the money. Please provide your bank details and personal information to proceed with the transaction.
"""
swarm.run(
task
)
```

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "5.2.0" version = "5.2.3"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]
@ -53,6 +53,7 @@ docstring_parser = "0.16"
fastapi = "*" fastapi = "*"
openai = ">=1.30.1,<2.0" openai = ">=1.30.1,<2.0"
termcolor = "*" termcolor = "*"
tiktoken = "*"
[tool.poetry.group.lint.dependencies] [tool.poetry.group.lint.dependencies]

@ -1,6 +1,3 @@
# pip3 install multion
# pip3 install swarms
from multion.client import MultiOn
import os import os
from swarms.models.base_llm import BaseLLM from swarms.models.base_llm import BaseLLM
@ -47,6 +44,14 @@ class MultiOnAgent(BaseLLM):
): ):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.name = name self.name = name
try:
from multion.client import MultiOn
except ImportError:
raise ImportError(
"The MultiOn package is not installed. Please install it using 'pip install multion'."
)
self.client = MultiOn(api_key=api_key) self.client = MultiOn(api_key=api_key)
self.url = url self.url = url
self.system_prompt = system_prompt self.system_prompt = system_prompt
@ -67,9 +72,8 @@ class MultiOnAgent(BaseLLM):
local=True, local=True,
max_steps=self.max_steps, max_steps=self.max_steps,
) )
# response = response.json() # response = response.json()
# print(response.message) # print(response.message)
return str(response.message) return str(response.message)

@ -1,7 +1,9 @@
from swarms.artifacts.base_artifact import BaseArtifact from swarms.artifacts.base_artifact import BaseArtifact
from swarms.artifacts.text_artifact import TextArtifact from swarms.artifacts.text_artifact import TextArtifact
from swarms.artifacts.artifact_main import Artifact
__all__ = [ __all__ = [
"BaseArtifact", "BaseArtifact",
"TextArtifact", "TextArtifact",
"Artifact",
] ]

@ -1,4 +1,5 @@
from abc import ABC from abc import ABC
from swarms.utils.loguru_logger import logger
class BaseVectorDatabase(ABC): class BaseVectorDatabase(ABC):
@ -112,3 +113,37 @@ class BaseVectorDatabase(ABC):
id (int): The ID of the record to be deleted. id (int): The ID of the record to be deleted.
""" """
def print_all(self):
"""
Print all records in the database.
This method prints all records in the specified table in the database.
"""
pass
def log_query(self, query: str = None):
"""
Log the query.
This method logs the query that was executed on the database.
Parameters:
query (str): The query that was executed.
"""
logger.info(f"Query: {query}")
def log_retrieved_data(self, data: list = None):
"""
Log the retrieved data.
This method logs the data that was retrieved from the database.
Parameters:
data (dict): The data that was retrieved.
"""
for d in data:
logger.info(f"Retrieved Data: {d}")

@ -0,0 +1,54 @@
import tiktoken
class TikTokenizer:
def __init__(
self,
model_name: str = "gpt-4o",
):
"""
Initializes a TikTokenizer object.
Args:
model_name (str, optional): The name of the model to use for tokenization. Defaults to "gpt-4o".
"""
try:
self.tokenizer = tiktoken.encoding_for_model(model_name)
except Exception as e:
raise ValueError(
f"Failed to initialize tokenizer with model '{model_name}': {str(e)}"
)
def len(self, string: str) -> int:
"""
Returns the number of tokens in a text string.
Args:
string (str): The input text string.
Returns:
int: The number of tokens in the text string.
"""
try:
num_tokens = len(self.tokenizer.encode(string))
print(f"Number of tokens: {num_tokens}")
return num_tokens
except Exception as e:
raise ValueError(f"Failed to tokenize string: {str(e)}")
def count_tokens(self, string: str) -> int:
"""
Returns the number of tokens in a text string.
Args:
string (str): The input text string.
Returns:
int: The number of tokens in the text string.
"""
try:
num_tokens = len(self.tokenizer.encode(string))
print(f"Number of tokens: {num_tokens}")
return num_tokens
except Exception as e:
raise ValueError(f"Failed to count tokens: {str(e)}")

@ -45,7 +45,8 @@ from swarms.structs.sequential_workflow import SequentialWorkflow
# New Swarms # New Swarms
from swarms.structs.swarm_load_balancer import AgentLoadBalancer from swarms.structs.swarm_load_balancer import AgentLoadBalancer
from swarms.structs.swarm_net import SwarmNetwork
# from swarms.structs.swarm_net import SwarmNetwork
from swarms.structs.swarming_architectures import ( from swarms.structs.swarming_architectures import (
broadcast, broadcast,
circular_swarm, circular_swarm,
@ -87,6 +88,8 @@ from swarms.structs.yaml_model import (
pydantic_type_to_yaml_schema, pydantic_type_to_yaml_schema,
) )
from swarms.structs.mixture_of_agents import MixtureOfAgents from swarms.structs.mixture_of_agents import MixtureOfAgents
from swarms.structs.graph_workflow import GraphWorkflow
__all__ = [ __all__ = [
"Agent", "Agent",
@ -120,7 +123,7 @@ __all__ = [
"TaskRequestBody", "TaskRequestBody",
"SequentialWorkflow", "SequentialWorkflow",
"Step", "Step",
"SwarmNetwork", # "SwarmNetwork",
"broadcast", "broadcast",
"circular_swarm", "circular_swarm",
"exponential_swarm", "exponential_swarm",
@ -160,4 +163,5 @@ __all__ = [
"HiearchicalSwarm", "HiearchicalSwarm",
"AgentLoadBalancer", "AgentLoadBalancer",
"MixtureOfAgents", "MixtureOfAgents",
"GraphWorkflow",
] ]

@ -22,7 +22,6 @@ from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
) )
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.structs.yaml_model import YamlModel from swarms.structs.yaml_model import YamlModel
from swarms.telemetry.user_utils import get_user_device_data
from swarms.tools.prebuilt.code_interpreter import ( from swarms.tools.prebuilt.code_interpreter import (
SubprocessCodeInterpreter, SubprocessCodeInterpreter,
) )
@ -42,6 +41,7 @@ from swarms.tools.func_calling_utils import (
prepare_output_for_output_model, prepare_output_for_output_model,
) )
from swarms.tools.tool_parse_exec import parse_and_execute_json from swarms.tools.tool_parse_exec import parse_and_execute_json
from swarms.models.tiktoken_wrapper import TikTokenizer
# Utils # Utils
@ -88,34 +88,6 @@ agent_output_type = Union[BaseModel, dict, str]
ToolUsageType = Union[BaseModel, Dict[str, Any]] ToolUsageType = Union[BaseModel, Dict[str, Any]]
def retrieve_tokens(text, num_tokens):
"""
Retrieve a specified number of tokens from a given text.
Parameters:
text (str): The input text string.
num_tokens (int): The number of tokens to retrieve.
Returns:
str: A string containing the specified number of tokens from the input text.
"""
# Initialize an empty list to store tokens
tokens = []
token_count = 0
# Split the text into words while counting tokens
for word in text.split():
tokens.append(word)
token_count += 1
if token_count == num_tokens:
break
# Join the selected tokens back into a string
result = " ".join(tokens)
return result
# [FEAT][AGENT] # [FEAT][AGENT]
class Agent(BaseStructure): class Agent(BaseStructure):
""" """
@ -237,7 +209,7 @@ class Agent(BaseStructure):
multi_modal: Optional[bool] = None, multi_modal: Optional[bool] = None,
pdf_path: Optional[str] = None, pdf_path: Optional[str] = None,
list_of_pdf: Optional[str] = None, list_of_pdf: Optional[str] = None,
tokenizer: Optional[Any] = None, tokenizer: Optional[Any] = TikTokenizer(),
long_term_memory: Optional[BaseVectorDatabase] = None, long_term_memory: Optional[BaseVectorDatabase] = None,
preset_stopping_token: Optional[bool] = False, preset_stopping_token: Optional[bool] = False,
traceback: Optional[Any] = None, traceback: Optional[Any] = None,
@ -404,6 +376,9 @@ class Agent(BaseStructure):
**kwargs, **kwargs,
) )
# Check the parameters
self.agent_initialization()
# If the docs exist then ingest the docs # If the docs exist then ingest the docs
if exists(self.docs): if exists(self.docs):
self.ingest_docs(self.docs) self.ingest_docs(self.docs)
@ -419,9 +394,8 @@ class Agent(BaseStructure):
# Add the tool prompt to the memory # Add the tool prompt to the memory
self.short_memory.add(role="System", content=tool_sop_prompt()) self.short_memory.add(role="System", content=tool_sop_prompt())
# Print number of tools # Log the tools
logger.info("Tools granted, initializing tool protocol.") logger.info(f"Tools provided: Accessing {len(tools)} tools")
logger.info(f"Number of tools: {len(tools)}")
# Transform the tools into an openai schema # Transform the tools into an openai schema
self.convert_tool_into_openai_schema() self.convert_tool_into_openai_schema()
@ -513,8 +487,10 @@ class Agent(BaseStructure):
"Could not import agentops, try installing agentops: $ pip3 install agentops" "Could not import agentops, try installing agentops: $ pip3 install agentops"
) )
# Check the parameters # if tokenizer is None:
self.check_parameters() # self.tokenizer = TikTokenizer()
# else:
# self.tokenizer = None
def set_system_prompt(self, system_prompt: str): def set_system_prompt(self, system_prompt: str):
"""Set the system prompt""" """Set the system prompt"""
@ -525,6 +501,22 @@ class Agent(BaseStructure):
self.feedback.append(feedback) self.feedback.append(feedback)
logging.info(f"Feedback received: {feedback}") logging.info(f"Feedback received: {feedback}")
def agent_initialization(self):
try:
print(
colored(
(
"Initializing Autonomous Agent"
f" {self.agent_name}..."
),
"yellow",
)
)
self.check_parameters()
except ValueError as e:
print(f"Error: {str(e)}")
def _check_stopping_condition(self, response: str) -> bool: def _check_stopping_condition(self, response: str) -> bool:
"""Check if the stopping condition is met.""" """Check if the stopping condition is met."""
try: try:
@ -574,7 +566,7 @@ class Agent(BaseStructure):
# ############## TOKENIZER FUNCTIONS ############## # ############## TOKENIZER FUNCTIONS ##############
def count_tokens(self, text: str) -> int: def count_tokens(self, text: str) -> int:
"""Count the number of tokens in the text.""" """Count the number of tokens in the text."""
return self.tokenizer.len(text) return self.tokenizer.count_tokens(text)
def tokens_per_second(self, text: str) -> float: def tokens_per_second(self, text: str) -> float:
""" """
@ -670,39 +662,14 @@ class Agent(BaseStructure):
def activate_autonomous_agent(self): def activate_autonomous_agent(self):
"""Print the autonomous agent activation message""" """Print the autonomous agent activation message"""
try: try:
print( logger.info("Autonomous Agent Activated.")
colored( logger.info("All systems operational. Executing task...")
(
"Initializing Autonomous Agent"
f" {self.agent_name}..."
),
"yellow",
)
)
print(
colored(
"Autonomous Agent Activated.",
"cyan",
attrs=["bold"],
)
)
print(
colored(
"All systems operational. Executing task...",
"green",
)
)
except Exception as error: except Exception as error:
print( logger.error(
colored( "Error activating autonomous agent. Try optimizing your parameters..."
(
"Error activating autonomous agent. Try"
" optimizing your parameters..."
),
"red",
)
) )
print(error) logger.error(error)
return None
def loop_count_print(self, loop_count, max_loops): def loop_count_print(self, loop_count, max_loops):
"""loop_count_print summary """loop_count_print summary
@ -727,15 +694,9 @@ class Agent(BaseStructure):
if self.llm is None: if self.llm is None:
raise ValueError("Language model is not provided") raise ValueError("Language model is not provided")
if self.system_prompt is None:
raise ValueError("System prompt is not provided")
if self.max_loops is None: if self.max_loops is None:
raise ValueError("Max loops is not provided") raise ValueError("Max loops is not provided")
if self.agent_name is None:
raise ValueError("Agent name is not provided")
########################## FUNCTION CALLING ########################## ########################## FUNCTION CALLING ##########################
def run( def run(
@ -760,6 +721,9 @@ class Agent(BaseStructure):
# Clear the short memory # Clear the short memory
response = None response = None
if self.tokenizer is not None:
self.check_available_tokens()
while self.max_loops == "auto" or loop_count < self.max_loops: while self.max_loops == "auto" or loop_count < self.max_loops:
loop_count += 1 loop_count += 1
self.loop_count_print(loop_count, self.max_loops) self.loop_count_print(loop_count, self.max_loops)
@ -918,10 +882,10 @@ class Agent(BaseStructure):
logger.info(f"Response after output cleaner: {response}") logger.info(f"Response after output cleaner: {response}")
# Prepare the output for the output model # Prepare the output for the output model
if self.output_type is not None: # if self.output_type is not None:
# logger.info("Preparing output for output model.") # # logger.info("Preparing output for output model.")
response = prepare_output_for_output_model(response) # response = prepare_output_for_output_model(response)
print(f"Response after output model: {response}") # print(f"Response after output model: {response}")
# print(response) # print(response)
if self.agent_ops_on is True: if self.agent_ops_on is True:
@ -993,16 +957,31 @@ class Agent(BaseStructure):
Returns: Returns:
str: The agent history prompt str: The agent history prompt
""" """
logger.info("Querying long term memory database") try:
logger.info(f"Querying long term memory database for {query}")
ltr = self.long_term_memory.query(query, *args, **kwargs)
# Count the tokens
logger.info("Couting tokens of retrieved document")
ltr_count = self.tokenizer.count_tokens(ltr)
logger.info(f"Retrieved document token count {ltr_count}")
# Query the long term memory database if ltr_count > self.memory_chunk_size:
ltr = self.long_term_memory.query(query, *args, **kwargs) logger.info(
# ltr = str(ltr) f"Truncating memory by {self.memory_chunk_size}"
)
out = self.truncate_string_by_tokens(
ltr, self.memory_chunk_size
)
logger.info(
f"Memory truncated by {self.memory_chunk_size}"
)
# Retrieve only the chunk size of the memory # Retrieve only the chunk size of the memory
logger.info(f"Chunking to {self.memory_chunk_size}") return out
ltr = retrieve_tokens(ltr, self.memory_chunk_size) except Exception as error:
return ltr logger.error(f"Error querying long term memory: {error}")
raise error
def add_memory(self, message: str): def add_memory(self, message: str):
"""Add a memory to the agent """Add a memory to the agent
@ -1228,6 +1207,7 @@ class Agent(BaseStructure):
""" """
logger.info(f"Adding response filter: {filter_word}") logger.info(f"Adding response filter: {filter_word}")
self.reponse_filters.append(filter_word) self.reponse_filters.append(filter_word)
return None
def code_interpreter_execution( def code_interpreter_execution(
self, code: str, *args, **kwargs self, code: str, *args, **kwargs
@ -1340,7 +1320,7 @@ class Agent(BaseStructure):
"Task": task, "Task": task,
"Stopping Token": self.stopping_token, "Stopping Token": self.stopping_token,
"Dynamic Loops": self.dynamic_loops, "Dynamic Loops": self.dynamic_loops,
"tools": self.tools, # "tools": # For loop to get the tools, and convert them to a string
"sop": self.sop, "sop": self.sop,
"sop_list": self.sop_list, "sop_list": self.sop_list,
"context_length": self.context_length, "context_length": self.context_length,
@ -1350,7 +1330,7 @@ class Agent(BaseStructure):
"multi_modal": self.multi_modal, "multi_modal": self.multi_modal,
"pdf_path": self.pdf_path, "pdf_path": self.pdf_path,
"list_of_pdf": self.list_of_pdf, "list_of_pdf": self.list_of_pdf,
"tokenizer": self.tokenizer, # "tokenizer": self.tokenizer,
# "long_term_memory": self.long_term_memory, # "long_term_memory": self.long_term_memory,
"preset_stopping_token": self.preset_stopping_token, "preset_stopping_token": self.preset_stopping_token,
"traceback": self.traceback, "traceback": self.traceback,
@ -1383,7 +1363,7 @@ class Agent(BaseStructure):
"function_calling_format_type": self.function_calling_format_type, "function_calling_format_type": self.function_calling_format_type,
"list_base_models": self.list_base_models, "list_base_models": self.list_base_models,
"metadata_output_type": self.metadata_output_type, "metadata_output_type": self.metadata_output_type,
"user_meta_data": get_user_device_data(), # "user_meta_data": get_user_device_data(),
} }
# Save as JSON # Save as JSON
@ -1543,7 +1523,7 @@ class Agent(BaseStructure):
"""Ingest the docs into the memory """Ingest the docs into the memory
Args: Args:
docs (List[str]): _description_ docs (List[str]): Documents of pdfs, text, csvs
Returns: Returns:
_type_: _description_ _type_: _description_
@ -1560,10 +1540,7 @@ class Agent(BaseStructure):
"""Ingest the pdf into the memory """Ingest the pdf into the memory
Args: Args:
pdf (str): _description_ pdf (str): file path of pdf
Returns:
_type_: _description_
""" """
try: try:
logger.info(f"Ingesting pdf: {pdf}") logger.info(f"Ingesting pdf: {pdf}")
@ -1591,29 +1568,6 @@ class Agent(BaseStructure):
except Exception as error: except Exception as error:
print(colored(f"Error sending agent message: {error}", "red")) print(colored(f"Error sending agent message: {error}", "red"))
def truncate_history(self):
"""
Truncates the short-term memory of the agent based on the count of tokens.
The method counts the tokens in the short-term memory using the tokenizer and
compares it with the length of the memory. If the length of the memory is greater
than the count, the memory is truncated to match the count.
Parameters:
None
Returns:
None
"""
# Count the short term history with the tokenizer
count = self.tokenizer.count_tokens(
self.short_memory.return_history_as_string()
)
# Now the logic that truncates the memory if it's more than the count
if len(self.short_memory) > count:
self.short_memory = self.short_memory[:count]
def add_tool(self, tool: Callable): def add_tool(self, tool: Callable):
return self.tools.append(tool) return self.tools.append(tool)
@ -1663,6 +1617,7 @@ class Agent(BaseStructure):
def convert_tool_into_openai_schema(self): def convert_tool_into_openai_schema(self):
# Transform the tools into an openai schema # Transform the tools into an openai schema
logger.info("Converting tools into OpenAI function calling schema")
try: try:
for tool in self.tools: for tool in self.tools:
# Transform the tool into a openai function calling schema # Transform the tool into a openai function calling schema
@ -1671,7 +1626,7 @@ class Agent(BaseStructure):
try: try:
logger.info( logger.info(
"Tool -> OpenAI Schema Process Starting Now." f"Converting tool: {name} into a OpenAI certified function calling schema. Add documentation and type hints."
) )
tool_schema_list = ( tool_schema_list = (
get_openai_function_schema_from_func( get_openai_function_schema_from_func(
@ -1793,7 +1748,7 @@ class Agent(BaseStructure):
return response return response
def stream_response(self, response: str, delay: float = 0.01) -> None: def stream_response(self, response: str, delay: float = 0.001) -> None:
""" """
Streams the response token by token. Streams the response token by token.
@ -1821,3 +1776,126 @@ class Agent(BaseStructure):
print() # Ensure a newline after streaming print() # Ensure a newline after streaming
except Exception as e: except Exception as e:
print(f"An error occurred during streaming: {e}") print(f"An error occurred during streaming: {e}")
def dynamic_context_window(self):
"""
dynamic_context_window essentially clears everything execep
the system prompt and leaves the rest of the contxt window
for RAG query tokens
"""
# Count the number of tokens in the short term memory
logger.info("Dynamic context window shuffling enabled")
count = self.tokenizer.count_tokens(
self.short_memory.return_history_as_string()
)
logger.info(f"Number of tokens in memory: {count}")
# Dynamically allocating everything except the system prompt to be dynamic
# We need to query the short_memory dict, for the system prompt slot
# Then delete everything after that
if count > self.context_length:
self.short_memory = self.short_memory[-self.context_length :]
logger.info(
f"Short term memory has been truncated to {self.context_length} tokens"
)
else:
logger.info("Short term memory is within the limit")
# Return the memory as a string or update the short term memory
# return memory
def check_available_tokens(self):
# Log the amount of tokens left in the memory and in the task
if self.tokenizer is not None:
tokens_used = self.tokenizer.count_tokens(
self.short_memory.return_history_as_string()
)
logger.info(
f"Tokens available: {tokens_used - self.context_length}"
)
return tokens_used
def tokens_checks(self):
# Check the tokens available
tokens_used = self.tokenizer.count_tokens(
self.short_memory.return_history_as_string()
)
out = self.check_available_tokens()
logger.info(
f"Tokens available: {out} Context Length: {self.context_length} Tokens in memory: {tokens_used}"
)
return out
def truncate_string_by_tokens(
self, input_string: str, limit: int
) -> str:
"""
Truncate a string if it exceeds a specified number of tokens using a given tokenizer.
:param input_string: The input string to be tokenized and truncated.
:param tokenizer: The tokenizer function to be used for tokenizing the input string.
:param max_tokens: The maximum number of tokens allowed.
:return: The truncated string if it exceeds the maximum number of tokens; otherwise, the original string.
"""
# Tokenize the input string
tokens = self.tokenizer.count_tokens(input_string)
# Check if the number of tokens exceeds the maximum limit
if len(tokens) > limit:
# Truncate the tokens to the maximum allowed tokens
truncated_tokens = tokens[: self.context_length]
# Join the truncated tokens back to a string
truncated_string = " ".join(truncated_tokens)
return truncated_string
else:
return input_string
def if_tokens_exceeds_context_length(self):
# Check if tokens exceeds the context length
try:
tokens_used = self.tokenizer.count_tokens(
self.short_memory.return_history_as_string()
)
if tokens_used > self.context_length:
logger.warning("Tokens used exceeds the context length.")
logger.info(
f"Tokens available: {tokens_used - self.context_length}"
)
return True
else:
return False
except Exception as e:
logger.error(f"Error checking tokens: {e}")
return None
def tokens_operations(self, input_string: str) -> str:
"""
Perform various operations on tokens of an input string.
:param input_string: The input string to be processed.
:return: The processed string.
"""
# Tokenize the input string
tokens = self.tokenizer.count_tokens(input_string)
# Check if the number of tokens exceeds the maximum limit
if len(tokens) > self.context_length:
# Truncate the tokens to the maximum allowed tokens
truncated_tokens = tokens[: self.context_length]
# Join the truncated tokens back to a string
truncated_string = " ".join(truncated_tokens)
return truncated_string
else:
# Log the amount of tokens left in the memory and in the task
if self.tokenizer is not None:
tokens_used = self.tokenizer.count_tokens(
self.short_memory.return_history_as_string()
)
logger.info(
f"Tokens available: {tokens_used - self.context_length}"
)
return input_string

@ -0,0 +1,249 @@
from enum import Enum
from typing import Any, Callable, Dict, List
import networkx as nx
from pydantic.v1 import BaseModel, Field, validator
from swarms.structs.agent import Agent # noqa: F401
from swarms.utils.loguru_logger import logger
class NodeType(str, Enum):
AGENT: Agent = "agent"
TASK: str = "task"
class Node(BaseModel):
"""
Represents a node in a graph workflow.
Attributes:
id (str): The unique identifier of the node.
type (NodeType): The type of the node.
callable (Callable, optional): The callable associated with the node. Required for task nodes.
agent (Any, optional): The agent associated with the node.
Raises:
ValueError: If the node type is TASK and no callable is provided.
Examples:
>>> node = Node(id="task1", type=NodeType.TASK, callable=sample_task)
>>> node = Node(id="agent1", type=NodeType.AGENT, agent=agent1)
>>> node = Node(id="agent2", type=NodeType.AGENT, agent=agent2)
"""
id: str
type: NodeType
callable: Callable = None
agent: Any = None
@validator("callable", always=True)
def validate_callable(cls, value, values):
if values["type"] == NodeType.TASK and value is None:
raise ValueError("Task nodes must have a callable.")
return value
class Edge(BaseModel):
source: str
target: str
class GraphWorkflow(BaseModel):
"""
Represents a workflow graph.
Attributes:
nodes (Dict[str, Node]): A dictionary of nodes in the graph, where the key is the node ID and the value is the Node object.
edges (List[Edge]): A list of edges in the graph, where each edge is represented by an Edge object.
entry_points (List[str]): A list of node IDs that serve as entry points to the graph.
end_points (List[str]): A list of node IDs that serve as end points of the graph.
graph (nx.DiGraph): A directed graph object from the NetworkX library representing the workflow graph.
"""
nodes: Dict[str, Node] = Field(default_factory=dict)
edges: List[Edge] = Field(default_factory=list)
entry_points: List[str] = Field(default_factory=list)
end_points: List[str] = Field(default_factory=list)
graph: nx.DiGraph = Field(default_factory=nx.DiGraph, exclude=True)
max_loops: int = 1
class Config:
arbitrary_types_allowed = True
def add_node(self, node: Node):
"""
Adds a node to the workflow graph.
Args:
node (Node): The node object to be added.
Raises:
ValueError: If a node with the same ID already exists in the graph.
"""
try:
if node.id in self.nodes:
raise ValueError(f"Node with id {node.id} already exists.")
self.nodes[node.id] = node
self.graph.add_node(
node.id,
type=node.type,
callable=node.callable,
agent=node.agent,
)
except Exception as e:
logger.info(f"Error in adding node to the workflow: {e}")
raise e
def add_edge(self, edge: Edge):
"""
Adds an edge to the workflow graph.
Args:
edge (Edge): The edge object to be added.
Raises:
ValueError: If either the source or target node of the edge does not exist in the graph.
"""
if edge.source not in self.nodes or edge.target not in self.nodes:
raise ValueError(
"Both source and target nodes must exist before adding an edge."
)
self.edges.append(edge)
self.graph.add_edge(edge.source, edge.target)
def set_entry_points(self, entry_points: List[str]):
"""
Sets the entry points of the workflow graph.
Args:
entry_points (List[str]): A list of node IDs to be set as entry points.
Raises:
ValueError: If any of the specified node IDs do not exist in the graph.
"""
for node_id in entry_points:
if node_id not in self.nodes:
raise ValueError(f"Node with id {node_id} does not exist.")
self.entry_points = entry_points
def set_end_points(self, end_points: List[str]):
"""
Sets the end points of the workflow graph.
Args:
end_points (List[str]): A list of node IDs to be set as end points.
Raises:
ValueError: If any of the specified node IDs do not exist in the graph.
"""
for node_id in end_points:
if node_id not in self.nodes:
raise ValueError(f"Node with id {node_id} does not exist.")
self.end_points = end_points
def visualize(self) -> str:
"""
Generates a string representation of the workflow graph in the Mermaid syntax.
Returns:
str: The Mermaid string representation of the workflow graph.
"""
mermaid_str = "graph TD\n"
for node_id, node in self.nodes.items():
mermaid_str += f" {node_id}[{node_id}]\n"
for edge in self.edges:
mermaid_str += f" {edge.source} --> {edge.target}\n"
return mermaid_str
def run(self, task: str = None, *args, **kwargs) -> Dict[str, Any]:
"""
Function to run the workflow graph.
Args:
task (str): The task to be executed by the workflow.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
Dict[str, Any]: A dictionary containing the results of the execution.
Raises:
ValueError: If no entry points or end points are defined in the graph.
"""
try:
loop = 0
while loop < self.max_loops:
# Ensure all nodes and edges are valid
if not self.entry_points:
raise ValueError(
"At least one entry point must be defined."
)
if not self.end_points:
raise ValueError(
"At least one end point must be defined."
)
# Perform a topological sort of the graph to ensure proper execution order
sorted_nodes = list(nx.topological_sort(self.graph))
# Initialize execution state
execution_results = {}
for node_id in sorted_nodes:
node = self.nodes[node_id]
if node.type == NodeType.TASK:
print(f"Executing task: {node_id}")
result = node.callable()
elif node.type == NodeType.AGENT:
print(f"Executing agent: {node_id}")
result = node.agent.run(task, *args, **kwargs)
execution_results[node_id] = result
loop += 1
return execution_results
except Exception as e:
logger.info(f"Error in running the workflow: {e}")
raise e
# # Example usage
# if __name__ == "__main__":
# from swarms import Agent, OpenAIChat
# import os
# from dotenv import load_dotenv
# load_dotenv()
# api_key = os.environ.get("OPENAI_API_KEY")
# llm = OpenAIChat(
# temperature=0.5, openai_api_key=api_key, max_tokens=4000
# )
# agent1 = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
# agent2 = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
# def sample_task():
# print("Running sample task")
# return "Task completed"
# wf_graph = GraphWorkflow()
# wf_graph.add_node(Node(id="agent1", type=NodeType.AGENT, agent=agent1))
# wf_graph.add_node(Node(id="agent2", type=NodeType.AGENT, agent=agent2))
# wf_graph.add_node(
# Node(id="task1", type=NodeType.TASK, callable=sample_task)
# )
# wf_graph.add_edge(Edge(source="agent1", target="task1"))
# wf_graph.add_edge(Edge(source="agent2", target="task1"))
# wf_graph.set_entry_points(["agent1", "agent2"])
# wf_graph.set_end_points(["task1"])
# print(wf_graph.visualize())
# # Run the workflow
# results = wf_graph.run()
# print("Execution results:", results)

@ -61,6 +61,7 @@ class MixtureOfAgents(BaseSwarm):
self.scp = scp self.scp = scp
# Check the agents # Check the agents
self.reliability_check()
self.agent_check() self.agent_check()
self.final_agent_check() self.final_agent_check()
@ -76,6 +77,16 @@ class MixtureOfAgents(BaseSwarm):
# Communication Protocol # Communication Protocol
self.communication_protocol() self.communication_protocol()
def reliability_check(self):
if self.final_agent is None:
raise ValueError("Final agent is not defined.")
if self.agents is None:
raise ValueError("Agents are not defined.")
if self.layers is None:
raise ValueError("Layers are not defined.")
def communication_protocol(self): def communication_protocol(self):
try: try:
# Memory system # Memory system

@ -282,3 +282,12 @@ def rearrange(
agents=agents, flow=flow, *args, **kwargs agents=agents, flow=flow, *args, **kwargs
) )
return agent_system.run(task, *args, **kwargs) return agent_system.run(task, *args, **kwargs)
# out = AgentRearrange(
# agents=[agent1, agent2, agent3],
# flow="agent1 -> agent2, agent3, swarm",
# task="Perform a task",
# swarm = "agent1 -> agent2, agent3, swarm"
# )

@ -10,11 +10,11 @@ import asyncio
import multiprocessing import multiprocessing
import queue import queue
import threading import threading
from typing import Dict, List, Optional from typing import List, Optional
import tenacity import tenacity
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware # from fastapi import FastAPI
from pydantic import BaseModel from pydantic import BaseModel
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
@ -141,26 +141,21 @@ class SwarmNetwork(BaseSwarm):
# Create a dictionary of agents for easy access # Create a dictionary of agents for easy access
self.agent_dict = {agent.id: agent for agent in agents} self.agent_dict = {agent.id: agent for agent in agents}
# For each agent in the pool, run it on it's own thread # # Create the FastAPI instance
if agents is not None: # if api_on is True:
for agent in agents: # logger.info("Creating FastAPI instance")
self.agents.append(agent) # self.app = FastAPI(debug=True, *args, **kwargs)
# Create the FastAPI instance # self.app.add_middleware(
if api_on is True: # CORSMiddleware,
logger.info("Creating FastAPI instance") # allow_origins=["*"],
self.app = FastAPI(debug=True, *args, **kwargs) # allow_credentials=True,
# allow_methods=["*"],
self.app.add_middleware( # allow_headers=["*"],
CORSMiddleware, # )
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
logger.info("Routes set for creation") # logger.info("Routes set for creation")
self._create_routes() # self._create_routes()
def add_task(self, task): def add_task(self, task):
"""Add task to the task queue """Add task to the task queue
@ -213,97 +208,97 @@ class SwarmNetwork(BaseSwarm):
) )
raise error raise error
def _create_routes(self) -> None: # def _create_routes(self) -> None:
""" # """
Creates the routes for the API. # Creates the routes for the API.
""" # """
# Extensive logginbg # # Extensive logginbg
logger.info("Creating routes for the API") # logger.info("Creating routes for the API")
# Routes available # # Routes available
logger.info( # logger.info(
"Routes available: /v1/swarms, /v1/health, /v1/swarms/{swarm_name}/agents/{agent_id}, /v1/swarms/{swarm_name}/run" # "Routes available: /v1/swarms, /v1/health, /v1/swarms/{swarm_name}/agents/{agent_id}, /v1/swarms/{swarm_name}/run"
) # )
@self.app.get("/v1/swarms", response_model=SwarmInfo) # @self.app.get("/v1/swarms", response_model=SwarmInfo)
async def get_swarms() -> SwarmInfo: # async def get_swarms() -> SwarmInfo:
try: # try:
logger.info("Getting swarm information") # logger.info("Getting swarm information")
return SwarmInfo( # return SwarmInfo(
swarm_name=self.swarm_name, # swarm_name=self.swarm_name,
swarm_description=self.swarm_description, # swarm_description=self.swarm_description,
agents=[ # agents=[
AgentInfo( # AgentInfo(
agent_name=agent.agent_name, # agent_name=agent.agent_name,
agent_description=agent.agent_description, # agent_description=agent.agent_description,
) # )
for agent in self.agents # for agent in self.agents
], # ],
) # )
except Exception as e: # except Exception as e:
logger.error(f"Error getting swarm information: {str(e)}") # logger.error(f"Error getting swarm information: {str(e)}")
raise HTTPException( # raise HTTPException(
status_code=500, detail="Internal Server Error" # status_code=500, detail="Internal Server Error"
) # )
@self.app.get("/v1/health") # @self.app.get("/v1/health")
async def get_health() -> Dict[str, str]: # async def get_health() -> Dict[str, str]:
try: # try:
logger.info("Checking health status") # logger.info("Checking health status")
return {"status": "healthy"} # return {"status": "healthy"}
except Exception as e: # except Exception as e:
logger.error(f"Error checking health status: {str(e)}") # logger.error(f"Error checking health status: {str(e)}")
raise HTTPException( # raise HTTPException(
status_code=500, detail="Internal Server Error" # status_code=500, detail="Internal Server Error"
) # )
@self.app.get(f"/v1/swarms/{self.swarm_name}/agents/{{agent_id}}") # @self.app.get(f"/v1/swarms/{self.swarm_name}/agents/{{agent_id}}")
async def get_agent_info(agent_id: str) -> AgentInfo: # async def get_agent_info(agent_id: str) -> AgentInfo:
try: # try:
logger.info(f"Getting information for agent {agent_id}") # logger.info(f"Getting information for agent {agent_id}")
agent = self.agent_dict.get(agent_id) # agent = self.agent_dict.get(agent_id)
if not agent: # if not agent:
raise HTTPException( # raise HTTPException(
status_code=404, detail="Agent not found" # status_code=404, detail="Agent not found"
) # )
return AgentInfo( # return AgentInfo(
agent_name=agent.agent_name, # agent_name=agent.agent_name,
agent_description=agent.agent_description, # agent_description=agent.agent_description,
) # )
except Exception as e: # except Exception as e:
logger.error(f"Error getting agent information: {str(e)}") # logger.error(f"Error getting agent information: {str(e)}")
raise HTTPException( # raise HTTPException(
status_code=500, detail="Internal Server Error" # status_code=500, detail="Internal Server Error"
) # )
@self.app.post( # @self.app.post(
f"/v1/swarms/{self.swarm_name}/agents/{{agent_id}}/run", # f"/v1/swarms/{self.swarm_name}/agents/{{agent_id}}/run",
response_model=TaskResponse, # response_model=TaskResponse,
) # )
async def run_agent_task( # async def run_agent_task(
task_request: TaskRequest, # task_request: TaskRequest,
) -> TaskResponse: # ) -> TaskResponse:
try: # try:
logger.info("Running agent task") # logger.info("Running agent task")
# Assuming only one agent in the swarm for this example # # Assuming only one agent in the swarm for this example
agent = self.agents[0] # agent = self.agents[0]
logger.info(f"Running agent task: {task_request.task}") # logger.info(f"Running agent task: {task_request.task}")
result = agent.run(task_request.task) # result = agent.run(task_request.task)
return TaskResponse(result=result) # return TaskResponse(result=result)
except Exception as e: # except Exception as e:
logger.error(f"Error running agent task: {str(e)}") # logger.error(f"Error running agent task: {str(e)}")
raise HTTPException( # raise HTTPException(
status_code=500, detail="Internal Server Error" # status_code=500, detail="Internal Server Error"
) # )
def get_app(self) -> FastAPI: # def get_app(self) -> FastAPI:
""" # """
Returns the FastAPI instance. # Returns the FastAPI instance.
Returns: # Returns:
FastAPI: The FastAPI instance. # FastAPI: The FastAPI instance.
""" # """
return self.app # return self.app
def run_single_agent( def run_single_agent(
self, agent_id, task: Optional[str], *args, **kwargs self, agent_id, task: Optional[str], *args, **kwargs

@ -29,6 +29,18 @@ def json_str_to_dict(json_str: str) -> dict:
def pydantic_model_to_json_str( def pydantic_model_to_json_str(
model: BaseModel, indent: int, *args, **kwargs model: BaseModel, indent: int, *args, **kwargs
) -> str: ) -> str:
"""
Converts a Pydantic model to a JSON string.
Args:
model (BaseModel): The Pydantic model to convert.
indent (int): The number of spaces to use for indentation.
*args: Additional positional arguments to pass to `json.dumps`.
**kwargs: Additional keyword arguments to pass to `json.dumps`.
Returns:
str: The JSON string representation of the Pydantic model.
"""
return json.dumps( return json.dumps(
base_model_to_openai_function(model), base_model_to_openai_function(model),
indent=indent, indent=indent,
@ -80,8 +92,16 @@ def str_to_pydantic_model(string: str, model: BaseModel) -> BaseModel:
def list_str_to_pydantic_model( def list_str_to_pydantic_model(
list_str: List[str], model: BaseModel list_str: List[str], model: BaseModel
) -> BaseModel: ) -> BaseModel:
"""Convert a list of strings to a Pydantic model""" """Convert a list of strings to a Pydantic model.
# return model.model_validate_json(list_str)
Args:
list_str (List[str]): The list of strings to be converted.
model (BaseModel): The Pydantic model to convert the strings to.
Returns:
BaseModel: The Pydantic model with the converted strings.
"""
for string in list_str: for string in list_str:
return model.model_validate_json(string) return model.model_validate_json(string)
@ -90,7 +110,16 @@ def prepare_output_for_output_model(
output_type: Union[str, Dict, BaseModel], output_type: Union[str, Dict, BaseModel],
output: Union[str, Dict, BaseModel] = None, output: Union[str, Dict, BaseModel] = None,
) -> Union[BaseModel, str]: ) -> Union[BaseModel, str]:
"""Prepare the output for the output model""" """Prepare the output for the output model.
Args:
output_type (Union[str, Dict, BaseModel]): The type of the output.
output (Union[str, Dict, BaseModel], optional): The output data. Defaults to None.
Returns:
Union[BaseModel, str]: The prepared output.
"""
if output_type == BaseModel: if output_type == BaseModel:
return str_to_pydantic_model(output, output_type) return str_to_pydantic_model(output, output_type)
elif output_type == dict: elif output_type == dict:

@ -10,7 +10,7 @@ def parse_and_execute_json(
parse_md: bool = False, parse_md: bool = False,
): ):
""" """
Parses and executes a JSON string containing function name and parameters. Parses and executes a JSON string containing function names and parameters.
Args: Args:
functions (List[callable]): A list of callable functions. functions (List[callable]): A list of callable functions.
@ -18,7 +18,7 @@ def parse_and_execute_json(
parse_md (bool): Flag indicating whether to extract code from Markdown. parse_md (bool): Flag indicating whether to extract code from Markdown.
Returns: Returns:
The result of executing the function with the parsed parameters, or None if an error occurs. A dictionary containing the results of executing the functions with the parsed parameters.
""" """
if parse_md: if parse_md:
@ -30,19 +30,25 @@ def parse_and_execute_json(
loguru.logger.info(f"Extracted code: {json_string}") loguru.logger.info(f"Extracted code: {json_string}")
data = json.loads(json_string) data = json.loads(json_string)
function_name = data.get("function", {}).get("name") function_list = data.get("functions", [])
parameters = data.get("function", {}).get("parameters")
results = {}
# Check if the function name is in the function dictionary for function_data in function_list:
if function_name in function_dict: function_name = function_data.get("name")
# Call the function with the parsed parameters parameters = function_data.get("parameters")
result = function_dict[function_name](**parameters)
return result # Check if the function name is in the function dictionary
else: if function_name in function_dict:
loguru.logger.warning( # Call the function with the parsed parameters
f"No function named '{function_name}' found." result = function_dict[function_name](**parameters)
) results[function_name] = result
return None else:
loguru.logger.warning(
f"No function named '{function_name}' found."
)
results[function_name] = None
return results
except Exception as e: except Exception as e:
loguru.logger.error(f"Error: {e}") loguru.logger.error(f"Error: {e}")
return None return None

Loading…
Cancel
Save