[[CLEANUP][hashicorp_vault], [FEATS][Swarm DFS] [Swarm MonteCarlo] [Swarm Tree] [FIXES][Schema fixes] [Logging][Fixed a bug where logs and errors would be sent into root, now an agent workspace dir] [DEMOS][Marketing Campaign] [HiearchicalSwarm]
parent
a3dec82cd5
commit
03a28cbacc
@ -0,0 +1,33 @@
|
||||
|
||||
# ==================================
|
||||
# Use an official Python runtime as a parent image
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
|
||||
# Set the working directory in the container
|
||||
WORKDIR /usr/src/swarms
|
||||
|
||||
|
||||
# Install Python dependencies
|
||||
# COPY requirements.txt and pyproject.toml if you're using poetry for dependency management
|
||||
COPY requirements.txt .
|
||||
RUN pip install --upgrade pip
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Install the 'swarms' package, assuming it's available on PyPI
|
||||
RUN pip install -U swarms
|
||||
|
||||
# Copy the rest of the application
|
||||
COPY . .
|
||||
|
||||
# Expose port if your application has a web interface
|
||||
# EXPOSE 5000
|
||||
|
||||
# # Define environment variable for the swarm to work
|
||||
# ENV OPENAI_API_KEY=your_swarm_api_key_here
|
||||
|
||||
# If you're using `CMD` to execute a Python script, make sure it's executable
|
||||
# RUN chmod +x example.py
|
@ -0,0 +1,97 @@
|
||||
import os
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent-General-11",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=False,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
# interactive=True, # Set to False to disable interactive mode
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
# tools=[#Add your functions here# ],
|
||||
# stopping_token="Stop!",
|
||||
# docs_folder="docs", # Enter your folder name
|
||||
# pdf_path="docs/finance_agent.pdf",
|
||||
# sop="Calculate the profit for a company.",
|
||||
# sop_list=["Calculate the profit for a company."],
|
||||
user_name="swarms_corp",
|
||||
# # docs="",
|
||||
retry_attempts=3,
|
||||
# context_length=1000,
|
||||
# tool_schema = dict
|
||||
context_length=200000,
|
||||
tool_system_prompt=None,
|
||||
)
|
||||
|
||||
# # Convert the agent object to a dictionary
|
||||
print(agent.to_dict())
|
||||
print(agent.to_toml())
|
||||
print(agent.model_dump_json())
|
||||
print(agent.model_dump_yaml())
|
||||
|
||||
# Ingest documents into the agent's knowledge base
|
||||
agent.ingest_docs("your_pdf_path.pdf")
|
||||
|
||||
# Receive a message from a user and process it
|
||||
agent.receive_message(name="agent_name", message="message")
|
||||
|
||||
# Send a message from the agent to a user
|
||||
agent.send_agent_message(agent_name="agent_name", message="message")
|
||||
|
||||
# Ingest multiple documents into the agent's knowledge base
|
||||
agent.ingest_docs("your_pdf_path.pdf", "your_csv_path.csv")
|
||||
|
||||
# Run the agent with a filtered system prompt
|
||||
agent.filtered_run(
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?"
|
||||
)
|
||||
|
||||
# Run the agent with multiple system prompts
|
||||
agent.bulk_run(
|
||||
[
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?",
|
||||
"Another system prompt",
|
||||
]
|
||||
)
|
||||
|
||||
# Add a memory to the agent
|
||||
agent.add_memory("Add a memory to the agent")
|
||||
|
||||
# Check the number of available tokens for the agent
|
||||
agent.check_available_tokens()
|
||||
|
||||
# Perform token checks for the agent
|
||||
agent.tokens_checks()
|
||||
|
||||
# Print the dashboard of the agent
|
||||
agent.print_dashboard()
|
||||
|
||||
# Print the history and memory of the agent
|
||||
agent.print_history_and_memory()
|
||||
|
||||
# Fetch all the documents from the doc folders
|
||||
agent.get_docs_from_doc_folders()
|
||||
|
||||
# Activate agent ops
|
||||
agent.activate_agentops()
|
||||
agent.check_end_session_agentops()
|
||||
|
||||
# Dump the model to a JSON file
|
||||
agent.model_dump_json()
|
||||
print(agent.to_toml())
|
@ -0,0 +1,95 @@
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List
|
||||
import json
|
||||
|
||||
|
||||
AI_PAPER_IDEA_GENERATOR = """
|
||||
|
||||
|
||||
|
||||
You are Phil Wang, a computer scientist and artificial intelligence researcher widely regarded as one of the leading experts in deep learning and neural network architecture search. Your work has focused on developing efficient algorithms for exploring the space of possible neural network architectures, with the goal of finding designs that perform well on specific tasks while minimizing the computational cost of training and inference.
|
||||
|
||||
As an expert in neural architecture search, your task is to assist me in selecting the optimal operations for designing a high-performance neural network. The primary objective is to maximize the model's performance.
|
||||
|
||||
Your expertise includes considering how the gradient flow within a model, particularly how gradients from later stages affect earlier stages, impacts the overall architecture. Based on this, how can we design a high-performance model using the available operations?
|
||||
|
||||
Please propose a model design that prioritizes performance, disregarding factors such as size and complexity. After you suggest a design, I will test its performance and provide feedback. Based on the results of these experiments, we can collaborate to iterate and improve the design. Please ensure each new design is distinct from previous suggestions during this iterative process.
|
||||
|
||||
You're a research scientist working on a new paper. You need to generate a novel idea for a research paper.
|
||||
|
||||
The paper should be in the field of multi-modal learning and should propose a new method or algorithm.
|
||||
|
||||
The paper should be innovative, novel, and feasible.
|
||||
|
||||
Generate a paper idea that meets these criteria.
|
||||
|
||||
You need to provide the following details:
|
||||
- The paper idea
|
||||
- A brief description of the paper idea
|
||||
- A proposed experiment to test the paper idea
|
||||
- Ratings for interestingness, novelty, and feasibility of the paper idea
|
||||
- The ratings should be on a scale of 0.1 to 1.0, with 1.0 being the most innovative, novel, or feasible
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class PaperIdeaSchema(BaseModel):
|
||||
paper_idea: str = Field(
|
||||
...,
|
||||
description="The generated paper idea.",
|
||||
)
|
||||
description: str = Field(
|
||||
...,
|
||||
description="A brief description of the paper idea.",
|
||||
)
|
||||
experiment: str = Field(
|
||||
...,
|
||||
description="A proposed experiment to test the paper idea.",
|
||||
)
|
||||
interestingness: int = Field(
|
||||
...,
|
||||
description="A rating of how interesting the paper idea is on a scale of 0.1 to 1.0 being the most innovative paper idea.",
|
||||
)
|
||||
novelty: int = Field(
|
||||
...,
|
||||
description="A rating of how novel the paper idea is on a scale of 0.1 to 1.0 being the most novel paper idea.",
|
||||
)
|
||||
feasibility: int = Field(
|
||||
...,
|
||||
description="A rating of how feasible the paper idea is on a scale of 0.1 to 1.0 being the most feasible paper idea.",
|
||||
)
|
||||
|
||||
|
||||
class MultiplePaperIdeas(BaseModel):
|
||||
paper_ideas: List[PaperIdeaSchema] = Field(
|
||||
...,
|
||||
description="A list of generated paper ideas.",
|
||||
)
|
||||
|
||||
|
||||
# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
|
||||
# for making API calls to retrieve weather information. It has two attributes: city and date.
|
||||
|
||||
# Example usage:
|
||||
# Initialize the function caller
|
||||
model = OpenAIFunctionCaller(
|
||||
system_prompt=AI_PAPER_IDEA_GENERATOR,
|
||||
max_tokens=4000,
|
||||
temperature=0.7,
|
||||
base_model=MultiplePaperIdeas,
|
||||
parallel_tool_calls=False,
|
||||
)
|
||||
|
||||
|
||||
# Call the function with the input
|
||||
output = model.run(
|
||||
"Generate paper ideas for multi-agent learning and collective intelligence involving many transformer models as an ensemble of transformers "
|
||||
)
|
||||
print(type(output))
|
||||
# print(output)
|
||||
output = json.dumps(output, indent=2)
|
||||
print(output)
|
@ -0,0 +1,116 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from plaid import Client
|
||||
from plaid.api import plaid_api
|
||||
from plaid.model.error import PlaidError
|
||||
from plaid.model.transactions_get_request import TransactionsGetRequest
|
||||
from plaid.model.transactions_get_response import TransactionsGetResponse
|
||||
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
|
||||
|
||||
def fetch_transactions(
|
||||
start_date: str, end_date: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Fetches a list of transactions from Plaid for a given time period.
|
||||
|
||||
Args:
|
||||
access_token (str): The access token associated with the Plaid item.
|
||||
start_date (str): The start date for the transaction query in 'YYYY-MM-DD' format.
|
||||
end_date (str): The end date for the transaction query in 'YYYY-MM-DD' format.
|
||||
|
||||
Returns:
|
||||
List[Dict[str, Any]]: A list of transactions as dictionaries.
|
||||
|
||||
Raises:
|
||||
PlaidError: If there is an error with the request to the Plaid API.
|
||||
ValueError: If the date format is incorrect.
|
||||
"""
|
||||
try:
|
||||
access_token = os.getenv("PLAID_ACCESS_TOKEN")
|
||||
# Validate date format
|
||||
datetime.strptime(start_date, "%Y-%m-%d")
|
||||
datetime.strptime(end_date, "%Y-%m-%d")
|
||||
|
||||
# Initialize the Plaid client with your credentials
|
||||
plaid_client = plaid_api.PlaidApi(
|
||||
Client(
|
||||
client_id=os.getenv("PLAID_CLIENT_ID"),
|
||||
secret=os.getenv("PLAID_SECRET"),
|
||||
environment=os.getenv("PLAID_ENV", "sandbox"),
|
||||
)
|
||||
)
|
||||
|
||||
# Create a request object for transactions
|
||||
request = TransactionsGetRequest(
|
||||
access_token=access_token,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
)
|
||||
|
||||
# Fetch transactions from the Plaid API
|
||||
response: TransactionsGetResponse = plaid_client.transactions_get(
|
||||
request
|
||||
)
|
||||
|
||||
# Return the transactions list
|
||||
return response.transactions
|
||||
|
||||
except PlaidError as e:
|
||||
print(f"Plaid API Error: {e}")
|
||||
raise
|
||||
except ValueError as e:
|
||||
print(f"Date Format Error: {e}")
|
||||
raise
|
||||
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
# dynamic_temperature_enabled=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
# interactive=True, # Set to False to disable interactive mode
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
# tools=[#Add your functions here# ],
|
||||
# stopping_token="Stop!",
|
||||
# interactive=True,
|
||||
# docs_folder="docs", # Enter your folder name
|
||||
# pdf_path="docs/finance_agent.pdf",
|
||||
# sop="Calculate the profit for a company.",
|
||||
# sop_list=["Calculate the profit for a company."],
|
||||
user_name="swarms_corp",
|
||||
# # docs=
|
||||
# # docs_folder="docs",
|
||||
retry_attempts=1,
|
||||
# context_length=1000,
|
||||
# tool_schema = dict
|
||||
context_length=200000,
|
||||
return_step_meta=True,
|
||||
tools=[fetch_transactions],
|
||||
)
|
||||
|
||||
|
||||
out = agent.run(
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
|
||||
)
|
||||
print(out)
|
@ -1,79 +0,0 @@
|
||||
import os
|
||||
from swarms.models.base_llm import BaseLLM
|
||||
|
||||
|
||||
def check_multion_api_key():
|
||||
"""
|
||||
Checks if the MultiOn API key is available in the environment variables.
|
||||
|
||||
Returns:
|
||||
str: The MultiOn API key.
|
||||
"""
|
||||
api_key = os.getenv("MULTION_API_KEY")
|
||||
return api_key
|
||||
|
||||
|
||||
class MultiOnAgent(BaseLLM):
|
||||
"""
|
||||
Represents an agent that interacts with the MultiOn API to run tasks on a remote session.
|
||||
|
||||
Args:
|
||||
api_key (str): The API key for accessing the MultiOn API.
|
||||
url (str): The URL of the remote session.
|
||||
*args: Variable length argument list.
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
|
||||
Attributes:
|
||||
client (MultiOn): The MultiOn client instance.
|
||||
url (str): The URL of the remote session.
|
||||
session_id (str): The ID of the current session.
|
||||
|
||||
Methods:
|
||||
run: Runs a task on the remote session.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str = None,
|
||||
system_prompt: str = None,
|
||||
api_key: str = check_multion_api_key,
|
||||
url: str = "https://huggingface.co/papers",
|
||||
max_steps: int = 1,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.name = name
|
||||
|
||||
try:
|
||||
from multion.client import MultiOn
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The MultiOn package is not installed. Please install it using 'pip install multion'."
|
||||
)
|
||||
|
||||
self.client = MultiOn(api_key=api_key)
|
||||
self.url = url
|
||||
self.system_prompt = system_prompt
|
||||
self.max_steps = max_steps
|
||||
|
||||
def run(self, task: str, *args, **kwargs):
|
||||
"""
|
||||
Runs a task on the remote session.
|
||||
|
||||
Args:
|
||||
task (str): The task to be executed on the remote session.
|
||||
*args: Variable length argument list.
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
"""
|
||||
response = self.client.browse(
|
||||
cmd=task,
|
||||
url=self.url,
|
||||
local=True,
|
||||
max_steps=self.max_steps,
|
||||
)
|
||||
|
||||
# response = response.json()
|
||||
|
||||
# print(response.message)
|
||||
return str(response.message)
|
@ -0,0 +1,353 @@
|
||||
from typing import List, Union
|
||||
|
||||
from swarms.models.base_embedding_model import BaseEmbeddingModel
|
||||
from swarms.models.base_llm import BaseLLM
|
||||
from swarms.models.base_multimodal_model import BaseMultiModalModel
|
||||
from swarms.models.fuyu import Fuyu # noqa: E402
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402
|
||||
from swarms.models.huggingface import HuggingfaceLLM # noqa: E402
|
||||
from swarms.models.idefics import Idefics # noqa: E402
|
||||
from swarms.models.kosmos_two import Kosmos # noqa: E402
|
||||
from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
|
||||
from swarms.models.llama3_hosted import llama3Hosted
|
||||
from swarms.models.llava import LavaMultiModal # noqa: E402
|
||||
from swarms.models.nougat import Nougat # noqa: E402
|
||||
from swarms.models.openai_embeddings import OpenAIEmbeddings
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from swarms.models.openai_tts import OpenAITTS # noqa: E402
|
||||
from swarms.models.palm import GooglePalm as Palm # noqa: E402
|
||||
from swarms.models.popular_llms import Anthropic as Anthropic
|
||||
from swarms.models.popular_llms import (
|
||||
AzureOpenAILLM as AzureOpenAI,
|
||||
)
|
||||
from swarms.models.popular_llms import (
|
||||
CohereChat as Cohere,
|
||||
)
|
||||
from swarms.models.popular_llms import FireWorksAI, OctoAIChat
|
||||
from swarms.models.popular_llms import (
|
||||
OpenAIChatLLM as OpenAIChat,
|
||||
)
|
||||
from swarms.models.popular_llms import (
|
||||
OpenAILLM as OpenAI,
|
||||
)
|
||||
from swarms.models.popular_llms import ReplicateChat as Replicate
|
||||
from swarms.models.qwen import QwenVLMultiModal # noqa: E402
|
||||
from swarms.models.sampling_params import SamplingParams
|
||||
from swarms.models.together import TogetherLLM # noqa: E402
|
||||
from swarms.models.vilt import Vilt # noqa: E402
|
||||
from swarms.structs.base_structure import BaseStructure
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
# New type BaseLLM and BaseEmbeddingModel and BaseMultimodalModel
|
||||
omni_model_type = Union[
|
||||
BaseLLM, BaseEmbeddingModel, BaseMultiModalModel, callable
|
||||
]
|
||||
list_of_omni_model_type = List[omni_model_type]
|
||||
|
||||
|
||||
models = [
|
||||
BaseLLM,
|
||||
BaseEmbeddingModel,
|
||||
BaseMultiModalModel,
|
||||
Fuyu,
|
||||
GPT4VisionAPI,
|
||||
HuggingfaceLLM,
|
||||
Idefics,
|
||||
Kosmos,
|
||||
LayoutLMDocumentQA,
|
||||
llama3Hosted,
|
||||
LavaMultiModal,
|
||||
Nougat,
|
||||
OpenAIEmbeddings,
|
||||
OpenAITTS,
|
||||
Palm,
|
||||
Anthropic,
|
||||
AzureOpenAI,
|
||||
Cohere,
|
||||
OctoAIChat,
|
||||
OpenAIChat,
|
||||
OpenAI,
|
||||
Replicate,
|
||||
QwenVLMultiModal,
|
||||
SamplingParams,
|
||||
TogetherLLM,
|
||||
Vilt,
|
||||
FireWorksAI,
|
||||
OpenAIFunctionCaller,
|
||||
]
|
||||
|
||||
|
||||
class ModelRouter(BaseStructure):
|
||||
"""
|
||||
A router for managing multiple models.
|
||||
|
||||
Attributes:
|
||||
model_router_id (str): The ID of the model router.
|
||||
model_router_description (str): The description of the model router.
|
||||
model_pool (List[omni_model_type]): The list of models in the model pool.
|
||||
|
||||
Methods:
|
||||
check_for_models(): Checks if there are any models in the model pool.
|
||||
add_model(model: omni_model_type): Adds a model to the model pool.
|
||||
add_models(models: List[omni_model_type]): Adds multiple models to the model pool.
|
||||
get_model_by_name(model_name: str) -> omni_model_type: Retrieves a model from the model pool by its name.
|
||||
get_multiple_models_by_name(model_names: List[str]) -> List[omni_model_type]: Retrieves multiple models from the model pool by their names.
|
||||
get_model_pool() -> List[omni_model_type]: Retrieves the entire model pool.
|
||||
get_model_by_index(index: int) -> omni_model_type: Retrieves a model from the model pool by its index.
|
||||
get_model_by_id(model_id: str) -> omni_model_type: Retrieves a model from the model pool by its ID.
|
||||
dict() -> dict: Returns a dictionary representation of the model router.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_router_id: str = "model_router",
|
||||
model_router_description: str = "A router for managing multiple models.",
|
||||
model_pool: List[omni_model_type] = models,
|
||||
verbose: bool = False,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.model_router_id = model_router_id
|
||||
self.model_router_description = model_router_description
|
||||
self.model_pool = model_pool
|
||||
self.verbose = verbose
|
||||
|
||||
self.check_for_models()
|
||||
# self.refactor_model_class_if_invoke()
|
||||
|
||||
def check_for_models(self):
|
||||
"""
|
||||
Checks if there are any models in the model pool.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Raises:
|
||||
ValueError: If no models are found in the model pool.
|
||||
"""
|
||||
if len(self.model_pool) == 0:
|
||||
raise ValueError("No models found in model pool.")
|
||||
|
||||
def add_model(self, model: omni_model_type):
|
||||
"""
|
||||
Adds a model to the model pool.
|
||||
|
||||
Args:
|
||||
model (omni_model_type): The model to be added.
|
||||
|
||||
Returns:
|
||||
str: A success message indicating that the model has been added to the model pool.
|
||||
"""
|
||||
logger.info(f"Adding model {model.name} to model pool.")
|
||||
self.model_pool.append(model)
|
||||
return "Model successfully added to model pool."
|
||||
|
||||
def add_models(self, models: List[omni_model_type]):
|
||||
"""
|
||||
Adds multiple models to the model pool.
|
||||
|
||||
Args:
|
||||
models (List[omni_model_type]): The models to be added.
|
||||
|
||||
Returns:
|
||||
str: A success message indicating that the models have been added to the model pool.
|
||||
"""
|
||||
logger.info("Adding models to model pool.")
|
||||
self.model_pool.extend(models)
|
||||
return "Models successfully added to model pool."
|
||||
|
||||
# def query_model_from_langchain(self, model_name: str, *args, **kwargs):
|
||||
# """
|
||||
# Query a model from langchain community.
|
||||
|
||||
# Args:
|
||||
# model_name (str): The name of the model.
|
||||
# *args: Additional positional arguments to be passed to the model.
|
||||
# **kwargs: Additional keyword arguments to be passed to the model.
|
||||
|
||||
# Returns:
|
||||
# omni_model_type: The model object.
|
||||
|
||||
# Raises:
|
||||
# ValueError: If the model with the given name is not found in the model pool.
|
||||
# """
|
||||
# from langchain_community.llms import __getattr__
|
||||
|
||||
# logger.info(
|
||||
# f"Querying model {model_name} from langchain community."
|
||||
# )
|
||||
# model = __getattr__(model_name)(*args, **kwargs)
|
||||
# model = self.refactor_model_class_if_invoke_class(model)
|
||||
|
||||
# return model
|
||||
|
||||
def get_model_by_name(self, model_name: str) -> omni_model_type:
|
||||
"""
|
||||
Retrieves a model from the model pool by its name.
|
||||
|
||||
Args:
|
||||
model_name (str): The name of the model.
|
||||
|
||||
Returns:
|
||||
omni_model_type: The model object.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model with the given name is not found in the model pool.
|
||||
"""
|
||||
logger.info(f"Retrieving model {model_name} from model pool.")
|
||||
for model in self.model_pool:
|
||||
if model_name in [
|
||||
model.name,
|
||||
model.model_id,
|
||||
model.model_name,
|
||||
]:
|
||||
return model
|
||||
raise ValueError(f"Model {model_name} not found in model pool.")
|
||||
|
||||
def get_multiple_models_by_name(
|
||||
self, model_names: List[str]
|
||||
) -> List[omni_model_type]:
|
||||
"""
|
||||
Retrieves multiple models from the model pool by their names.
|
||||
|
||||
Args:
|
||||
model_names (List[str]): The names of the models.
|
||||
|
||||
Returns:
|
||||
List[omni_model_type]: The list of model objects.
|
||||
|
||||
Raises:
|
||||
ValueError: If any of the models with the given names are not found in the model pool.
|
||||
"""
|
||||
logger.info(
|
||||
f"Retrieving multiple models {model_names} from model pool."
|
||||
)
|
||||
models = []
|
||||
for model_name in model_names:
|
||||
models.append(self.get_model_by_name(model_name))
|
||||
return models
|
||||
|
||||
def get_model_pool(self) -> List[omni_model_type]:
|
||||
"""
|
||||
Retrieves the entire model pool.
|
||||
|
||||
Returns:
|
||||
List[omni_model_type]: The list of model objects in the model pool.
|
||||
"""
|
||||
return self.model_pool
|
||||
|
||||
def get_model_by_index(self, index: int) -> omni_model_type:
|
||||
"""
|
||||
Retrieves a model from the model pool by its index.
|
||||
|
||||
Args:
|
||||
index (int): The index of the model in the model pool.
|
||||
|
||||
Returns:
|
||||
omni_model_type: The model object.
|
||||
|
||||
Raises:
|
||||
IndexError: If the index is out of range.
|
||||
"""
|
||||
return self.model_pool[index]
|
||||
|
||||
def get_model_by_id(self, model_id: str) -> omni_model_type:
|
||||
"""
|
||||
Retrieves a model from the model pool by its ID.
|
||||
|
||||
Args:
|
||||
model_id (str): The ID of the model.
|
||||
|
||||
Returns:
|
||||
omni_model_type: The model object.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model with the given ID is not found in the model pool.
|
||||
"""
|
||||
name = model_id
|
||||
for model in self.model_pool:
|
||||
if (
|
||||
hasattr(model, "model_id")
|
||||
and name == model.model_id
|
||||
or hasattr(model, "model_name")
|
||||
and name == model.model_name
|
||||
or hasattr(model, "name")
|
||||
and name == model.name
|
||||
or hasattr(model, "model")
|
||||
and name == model.model
|
||||
):
|
||||
return model
|
||||
raise ValueError(f"Model {model_id} not found in model pool.")
|
||||
|
||||
def refactor_model_class_if_invoke(self):
|
||||
"""
|
||||
Refactors the model class if it has an 'invoke' method.
|
||||
|
||||
Checks to see if the model pool has a model with an 'invoke' method and refactors it to have a 'run' method and '__call__' method.
|
||||
|
||||
Returns:
|
||||
str: A success message indicating that the model classes have been refactored.
|
||||
"""
|
||||
for model in self.model_pool:
|
||||
if hasattr(model, "invoke"):
|
||||
model.run = model.invoke
|
||||
model.__call__ = model.invoke
|
||||
logger.info(
|
||||
f"Refactored model {model.name} to have run and __call__ methods."
|
||||
)
|
||||
|
||||
# Update the model in the model pool
|
||||
self.model_pool[self.model_pool.index(model)] = model
|
||||
|
||||
return "Model classes successfully refactored."
|
||||
|
||||
def refactor_model_class_if_invoke_class(
|
||||
self, model: callable, *args, **kwargs
|
||||
) -> callable:
|
||||
"""
|
||||
Refactors the model class if it has an 'invoke' method.
|
||||
|
||||
Checks to see if the model pool has a model with an 'invoke' method and refactors it to have a 'run' method and '__call__' method.
|
||||
|
||||
Returns:
|
||||
str: A success message indicating that the model classes have been refactored.
|
||||
"""
|
||||
if hasattr(model, "invoke"):
|
||||
model.run = model.invoke
|
||||
model.__call__ = model.invoke
|
||||
logger.info(
|
||||
f"Refactored model {model.name} to have run and __call__ methods."
|
||||
)
|
||||
|
||||
return model
|
||||
|
||||
def find_model_by_name_and_run(
|
||||
self, model_name: str = None, task: str = None, *args, **kwargs
|
||||
) -> str:
|
||||
"""
|
||||
Finds a model by its name and runs a task on it.
|
||||
|
||||
Args:
|
||||
model_name (str): The name of the model.
|
||||
task (str): The task to be run on the model.
|
||||
*args: Additional positional arguments to be passed to the task.
|
||||
**kwargs: Additional keyword arguments to be passed to the task.
|
||||
|
||||
Returns:
|
||||
str: The result of running the task on the model.
|
||||
|
||||
Raises:
|
||||
ValueError: If the model with the given name is not found in the model pool.
|
||||
"""
|
||||
model = self.get_model_by_name(model_name)
|
||||
return model.run(task, *args, **kwargs)
|
||||
|
||||
|
||||
# model = ModelRouter()
|
||||
# print(model.to_dict())
|
||||
# print(model.get_model_pool())
|
||||
# print(model.get_model_by_index(0))
|
||||
# print(model.get_model_by_id("stability-ai/stable-diffusion:"))
|
||||
# # print(model.get_multiple_models_by_name(["gpt-4o", "gpt-4"]))
|
@ -0,0 +1,230 @@
|
||||
import os
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
from swarms import Agent, OpenAIChat
|
||||
from typing import List, Optional, Callable
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
|
||||
|
||||
class AStarSwarm(BaseSwarm):
|
||||
def __init__(
|
||||
self,
|
||||
root_agent: Agent,
|
||||
child_agents: Optional[List[Agent]] = None,
|
||||
heuristic: Optional[Callable[[Agent], float]] = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initializes the A* Swarm with a root agent and optionally a list of child agents.
|
||||
|
||||
Args:
|
||||
root_agent (Agent): The root agent in the swarm.
|
||||
child_agents (Optional[List[Agent]]): List of child agents.
|
||||
"""
|
||||
self.root_agent = root_agent
|
||||
self.child_agents = child_agents
|
||||
self.heuristic = heuristic
|
||||
self.child_agents = (
|
||||
child_agents if child_agents is not None else []
|
||||
)
|
||||
self.parent_map = {
|
||||
agent: root_agent for agent in self.child_agents
|
||||
}
|
||||
|
||||
def a_star_communicate(
|
||||
self,
|
||||
agent: Agent,
|
||||
task: str,
|
||||
) -> str:
|
||||
"""
|
||||
Distributes the task among agents using A* search-like communication.
|
||||
|
||||
Args:
|
||||
agent (Agent): The agent to start the communication from.
|
||||
task (str): The task to distribute and process.
|
||||
heuristic (Callable[[Agent], float], optional): Function to prioritize which agent to communicate with first.
|
||||
|
||||
Returns:
|
||||
str: The result of the task after processing.
|
||||
"""
|
||||
# Perform the task at the current agent
|
||||
result = agent.run(task)
|
||||
|
||||
# Base case: if no child agents, return the result
|
||||
if agent not in self.parent_map.values():
|
||||
return result
|
||||
|
||||
# Gather child agents
|
||||
children = [
|
||||
child
|
||||
for child, parent in self.parent_map.items()
|
||||
if parent == agent
|
||||
]
|
||||
|
||||
# Sort children based on the heuristic (if provided)
|
||||
if self.heuristic:
|
||||
children.sort(key=self.heuristic, reverse=True)
|
||||
|
||||
# Communicate with child agents
|
||||
for child in children:
|
||||
sub_result = self.a_star_communicate(
|
||||
child, task, self.heuristic
|
||||
)
|
||||
result += f"\n{sub_result}"
|
||||
|
||||
return result
|
||||
|
||||
def visualize(self):
|
||||
"""
|
||||
Visualizes the communication flow between agents in the swarm using networkx and matplotlib.
|
||||
"""
|
||||
graph = nx.DiGraph()
|
||||
|
||||
# Add edges between the root agent and child agents
|
||||
for child in self.child_agents:
|
||||
graph.add_edge(self.root_agent.agent_name, child.agent_name)
|
||||
self._add_edges(graph, child)
|
||||
|
||||
# Draw the graph
|
||||
pos = nx.spring_layout(graph)
|
||||
plt.figure(figsize=(10, 8))
|
||||
nx.draw(
|
||||
graph,
|
||||
pos,
|
||||
with_labels=True,
|
||||
node_color="lightblue",
|
||||
font_size=10,
|
||||
node_size=3000,
|
||||
font_weight="bold",
|
||||
edge_color="gray",
|
||||
)
|
||||
plt.title("Communication Flow Between Agents")
|
||||
plt.show()
|
||||
|
||||
def _add_edges(self, graph: nx.DiGraph, agent: Agent):
|
||||
"""
|
||||
Recursively adds edges to the graph for the given agent.
|
||||
|
||||
Args:
|
||||
graph (nx.DiGraph): The graph to add edges to.
|
||||
agent (Agent): The current agent.
|
||||
"""
|
||||
children = [
|
||||
child
|
||||
for child, parent in self.parent_map.items()
|
||||
if parent == agent
|
||||
]
|
||||
for child in children:
|
||||
graph.add_edge(agent.agent_name, child.agent_name)
|
||||
self._add_edges(graph, child)
|
||||
|
||||
def run(
|
||||
self,
|
||||
task: str,
|
||||
) -> str:
|
||||
"""
|
||||
Start the task from the root agent using A* communication.
|
||||
|
||||
Args:
|
||||
task (str): The task to execute.
|
||||
heuristic (Callable[[Agent], float], optional): Heuristic for A* communication.
|
||||
|
||||
Returns:
|
||||
str: The result of the task after processing.
|
||||
"""
|
||||
return self.a_star_communicate(
|
||||
self.root_agent, task, self.heuristic
|
||||
)
|
||||
|
||||
|
||||
# Heuristic example (can be customized)
|
||||
def example_heuristic(agent: Agent) -> float:
|
||||
"""
|
||||
Example heuristic that prioritizes agents based on some custom logic.
|
||||
|
||||
Args:
|
||||
agent (Agent): The agent to evaluate.
|
||||
|
||||
Returns:
|
||||
float: The priority score for the agent.
|
||||
"""
|
||||
# Example heuristic: prioritize based on the length of the agent's name (as a proxy for complexity)
|
||||
return len(agent.agent_name)
|
||||
|
||||
|
||||
# Set up the model as provided
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize root agent
|
||||
root_agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=2,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
)
|
||||
|
||||
# List of child agents
|
||||
child_agents = [
|
||||
Agent(
|
||||
agent_name="Child-Agent-1",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=2,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent_child_1.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
),
|
||||
Agent(
|
||||
agent_name="Child-Agent-2",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=2,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent_child_2.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
),
|
||||
]
|
||||
|
||||
# Create the A* swarm
|
||||
swarm = AStarSwarm(
|
||||
root_agent=root_agent,
|
||||
child_agents=child_agents,
|
||||
heauristic=example_heuristic,
|
||||
)
|
||||
|
||||
# Run the task with the heuristic
|
||||
result = swarm.run(
|
||||
"What are the components of a startups stock incentive equity plan",
|
||||
)
|
||||
print(result)
|
||||
|
||||
# Visualize the communication flow
|
||||
swarm.visualize()
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,237 @@
|
||||
# import os
|
||||
# from swarms import Agent, OpenAIChat
|
||||
# from typing import List
|
||||
|
||||
# class DepthFirstSearchSwarm:
|
||||
# def __init__(self, agents: List[Agent]):
|
||||
# self.agents = agents
|
||||
# self.visited = set()
|
||||
|
||||
# def dfs(self, agent, task, results):
|
||||
# if agent.agent_name in self.visited:
|
||||
# return
|
||||
# self.visited.add(agent.agent_name)
|
||||
|
||||
# # Execute the agent's task
|
||||
# result = agent.run(task)
|
||||
# results.append(result)
|
||||
|
||||
# # If agent produces more tasks, continue the DFS
|
||||
# if isinstance(result, dict) and "next_tasks" in result:
|
||||
# for next_task in result["next_tasks"]:
|
||||
# next_agent = self.get_next_agent()
|
||||
# if next_agent:
|
||||
# self.dfs(next_agent, next_task, results)
|
||||
# else:
|
||||
# print("No more agents available for further tasks.")
|
||||
|
||||
# def get_next_agent(self):
|
||||
# for agent in self.agents:
|
||||
# if agent.agent_name not in self.visited:
|
||||
# return agent
|
||||
# return None
|
||||
|
||||
# def run(self, task):
|
||||
# results = []
|
||||
# if self.agents:
|
||||
# initial_agent = self.agents[0]
|
||||
# self.dfs(initial_agent, task, results)
|
||||
# return results
|
||||
|
||||
|
||||
# # Usage example
|
||||
|
||||
# # Define agents with their specific roles or capabilities
|
||||
# agents = [
|
||||
# Agent(
|
||||
# agent_name="Financial-Analysis-Agent",
|
||||
# system_prompt="Perform financial analysis",
|
||||
# llm=OpenAIChat(
|
||||
# api_key=os.getenv("OPENAI_API_KEY"),
|
||||
# model_name="gpt-4o-mini",
|
||||
# temperature=0.1,
|
||||
# ),
|
||||
# max_loops=1,
|
||||
# autosave=True,
|
||||
# verbose=True,
|
||||
# streaming_on=True,
|
||||
# dynamic_temperature_enabled=True,
|
||||
# # saved_state_path="finance_agent.json",
|
||||
# user_name="swarms_corp",
|
||||
# retry_attempts=3,
|
||||
# context_length=200000,
|
||||
# ),
|
||||
# # Add more agents with specific tasks if needed
|
||||
# ]
|
||||
|
||||
# # Initialize the DFS swarm
|
||||
# dfs_swarm = DepthFirstSearchSwarm(agents)
|
||||
|
||||
# # Run the DFS swarm with a task
|
||||
# task = (
|
||||
# "Analyze the financial components of a startup's stock incentive plan."
|
||||
# )
|
||||
# results = dfs_swarm.run(task)
|
||||
|
||||
# # Print the results
|
||||
# for idx, result in enumerate(results):
|
||||
# print(f"Result from Agent {idx + 1}: {result}")
|
||||
|
||||
# ####################
|
||||
# import os
|
||||
# from swarms import Agent, OpenAIChat
|
||||
|
||||
# class DFSSwarm:
|
||||
# def __init__(self, agents):
|
||||
# self.agents = agents
|
||||
# self.visited = set()
|
||||
|
||||
# def dfs(self, agent_index, task, previous_output=None):
|
||||
# if agent_index >= len(self.agents):
|
||||
# return previous_output
|
||||
|
||||
# agent = self.agents[agent_index]
|
||||
|
||||
# # Use the previous agent's output as input to the current agent
|
||||
# if previous_output:
|
||||
# task = f"{task}\nPrevious result: {previous_output}"
|
||||
|
||||
# # Run the current agent's task
|
||||
# output = agent.run(task)
|
||||
|
||||
# # Add output to visited to avoid redundant work
|
||||
# self.visited.add(output)
|
||||
|
||||
# # Recursively call DFS on the next agent
|
||||
# return self.dfs(agent_index + 1, task, output)
|
||||
|
||||
# def run(self, task):
|
||||
# # Start DFS from the first agent
|
||||
# return self.dfs(0, task)
|
||||
|
||||
|
||||
# # Get the OpenAI API key from the environment variable
|
||||
# api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# # Create an instance of the OpenAIChat class for each agent
|
||||
# model = OpenAIChat(api_key=api_key, model_name="gpt-4o-mini", temperature=0.1)
|
||||
|
||||
# # Initialize multiple agents
|
||||
# agent1 = Agent(
|
||||
# agent_name="Agent-1",
|
||||
# system_prompt="Agent 1 prompt description here",
|
||||
# llm=model,
|
||||
# max_loops=1,
|
||||
# autosave=True,
|
||||
# dynamic_temperature_enabled=True,
|
||||
# verbose=True,
|
||||
# streaming_on=True,
|
||||
# user_name="swarms_corp",
|
||||
# )
|
||||
|
||||
# agent2 = Agent(
|
||||
# agent_name="Agent-2",
|
||||
# system_prompt="Agent 2 prompt description here",
|
||||
# llm=model,
|
||||
# max_loops=1,
|
||||
# autosave=True,
|
||||
# dynamic_temperature_enabled=True,
|
||||
# verbose=True,
|
||||
# streaming_on=True,
|
||||
# user_name="swarms_corp",
|
||||
# )
|
||||
|
||||
# # Add more agents as needed
|
||||
# # agent3 = ...
|
||||
# # agent4 = ...
|
||||
|
||||
# # Create the swarm with the agents
|
||||
# dfs_swarm = DFSSwarm(agents=[agent1, agent2])
|
||||
|
||||
# # Run the DFS swarm on a task
|
||||
# result = dfs_swarm.run("Analyze the financial components of a startup's stock incentives.")
|
||||
# print("Final Result:", result)
|
||||
|
||||
|
||||
import os
|
||||
from swarms import Agent, OpenAIChat
|
||||
|
||||
|
||||
class DFSSwarm:
|
||||
def __init__(self, agents):
|
||||
self.agents = agents
|
||||
self.visited = set()
|
||||
|
||||
def dfs(self, agent_index, task, previous_output=None):
|
||||
if agent_index >= len(self.agents):
|
||||
return previous_output
|
||||
|
||||
agent = self.agents[agent_index]
|
||||
|
||||
# If there is a previous output, include it in the task for the next agent
|
||||
if previous_output:
|
||||
task = f"{task}\nPrevious result: {previous_output}"
|
||||
|
||||
# Run the current agent's task and get the output
|
||||
output = agent.run(task)
|
||||
|
||||
# Log the output (optional)
|
||||
print(f"Agent {agent_index + 1} Output: {output}")
|
||||
|
||||
# Add output to visited to avoid redundant work
|
||||
self.visited.add(output)
|
||||
|
||||
# Recursively call DFS on the next agent
|
||||
return self.dfs(agent_index + 1, task, output)
|
||||
|
||||
def run(self, task):
|
||||
# Start DFS from the first agent and return the final result
|
||||
final_result = self.dfs(0, task)
|
||||
return final_result
|
||||
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class for each agent
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize multiple agents
|
||||
agent1 = Agent(
|
||||
agent_name="Agent-1",
|
||||
system_prompt="Analyze the financial components of a startup's stock incentives.",
|
||||
llm=model,
|
||||
# max_loops=2,
|
||||
# autosave=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
user_name="swarms_corp",
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
agent_name="Agent-2",
|
||||
system_prompt="Refine the analysis and identify any potential risks or benefits.",
|
||||
llm=model,
|
||||
# max_loops=2,
|
||||
# autosave=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
user_name="swarms_corp",
|
||||
)
|
||||
|
||||
# Add more agents as needed
|
||||
# agent3 = ...
|
||||
# agent4 = ...
|
||||
|
||||
# Create the swarm with the agents
|
||||
dfs_swarm = DFSSwarm(agents=[agent1, agent2])
|
||||
|
||||
# Run the DFS swarm on a task
|
||||
result = dfs_swarm.run(
|
||||
"Start with analyzing the financial components of a startup's stock incentives."
|
||||
)
|
||||
print("Final Result:", result)
|
@ -1,356 +0,0 @@
|
||||
"""
|
||||
|
||||
Boss -> json containig orders in JSON -> list of agents -> send orders to every agent
|
||||
|
||||
|
||||
# Requirements
|
||||
- Boss needs to know which agents are available [PROMPTING]
|
||||
- Boss needs to output json commands sending tasks to every agent with the task and name
|
||||
- Worker agents need to return a response to the boss
|
||||
-> Boss returns the final output to the user
|
||||
"""
|
||||
|
||||
import json
|
||||
from typing import List
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
from swarms.utils.loguru_logger import logger
|
||||
from pydantic import BaseModel, Field
|
||||
from swarms.structs.conversation import Conversation
|
||||
|
||||
|
||||
class HiearchicalRequestDict(BaseModel):
|
||||
task: str = Field(
|
||||
None,
|
||||
title="Task",
|
||||
description="The task to send to the director agent.",
|
||||
)
|
||||
agent_name: str = Field(
|
||||
None,
|
||||
title="Agent Name",
|
||||
description="The name of the agent to send the task to.",
|
||||
)
|
||||
|
||||
class Config:
|
||||
schema_extra = {
|
||||
"example": {
|
||||
"task": "task",
|
||||
"agent_name": "agent_name",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class HiearchicalSwarm(BaseSwarm):
|
||||
"""
|
||||
A class representing a hierarchical swarm.
|
||||
|
||||
Attributes:
|
||||
name (str): The name of the hierarchical swarm.
|
||||
description (str): The description of the hierarchical swarm.
|
||||
director (Agent): The director agent of the hierarchical swarm.
|
||||
agents (List[Agent]): The list of agents in the hierarchical swarm.
|
||||
max_loops (int): The maximum number of loops to run the swarm.
|
||||
long_term_memory_system (BaseSwarm): The long term memory system of the swarm.
|
||||
custom_parse_function (callable): A custom parse function for the swarm.
|
||||
|
||||
Methods:
|
||||
swarm_initialization(*args, **kwargs): Initializes the hierarchical swarm.
|
||||
find_agent_by_name(agent_name: str = None, *args, **kwargs): Finds an agent in the swarm by name.
|
||||
parse_function_activate_agent(json_data: str = None, *args, **kwargs): Parses JSON data and activates the selected agent.
|
||||
select_agent_and_send_task(name: str = None, task: str = None, *args, **kwargs): Selects an agent and sends a task to them.
|
||||
run(task: str = None, *args, **kwargs): Runs the hierarchical swarm.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str = None,
|
||||
description: str = None,
|
||||
director: Agent = None,
|
||||
agents: List[Agent] = None,
|
||||
max_loops: int = 1,
|
||||
long_term_memory_system: BaseSwarm = None,
|
||||
custom_parse_function: callable = None,
|
||||
rules: str = None,
|
||||
custom_director_prompt: str = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.director = director
|
||||
self.agents = agents
|
||||
self.max_loops = max_loops
|
||||
self.long_term_memory_system = long_term_memory_system
|
||||
self.custom_parse_function = custom_parse_function
|
||||
self.rules = rules
|
||||
self.custom_director_prompt = custom_director_prompt
|
||||
|
||||
# Check to see agents is not empty
|
||||
self.agent_error_handling_check()
|
||||
|
||||
# Set the director to max_one loop
|
||||
if self.director.max_loops > 1:
|
||||
self.director.max_loops = 1
|
||||
|
||||
# Set the long term memory system of every agent to long term memory system
|
||||
if long_term_memory_system is True:
|
||||
for agent in agents:
|
||||
agent.long_term_memory = long_term_memory_system
|
||||
|
||||
# Initialize the swarm
|
||||
self.swarm_initialization()
|
||||
|
||||
# Initialize the conversation message pool
|
||||
self.swarm_history = Conversation(
|
||||
time_enabled=True, *args, **kwargs
|
||||
)
|
||||
|
||||
# Set the worker agents as tools for the director
|
||||
for agent in self.agents:
|
||||
self.director.add_tool(agent)
|
||||
|
||||
# Set the has prompt for the director,
|
||||
if custom_director_prompt is not None:
|
||||
self.director.system_prompt = custom_director_prompt
|
||||
else:
|
||||
self.director.system_prompt = self.has_sop()
|
||||
|
||||
def swarm_initialization(self, *args, **kwargs):
|
||||
"""
|
||||
Initializes the hierarchical swarm.
|
||||
|
||||
Args:
|
||||
*args: Additional positional arguments.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
"""
|
||||
logger.info(f"Initializing the hierarchical swarm: {self.name}")
|
||||
logger.info(f"Purpose of this swarm: {self.description}")
|
||||
|
||||
# Now log number of agnets and their names
|
||||
logger.info(f"Number of agents: {len(self.agents)}")
|
||||
logger.info(
|
||||
f"Agent names: {[agent.name for agent in self.agents]}"
|
||||
)
|
||||
|
||||
# Now see if agents is not empty
|
||||
if len(self.agents) == 0:
|
||||
logger.info("No agents found. Please add agents to the swarm.")
|
||||
return None
|
||||
|
||||
# Now see if director is not empty
|
||||
if self.director is None:
|
||||
logger.info(
|
||||
"No director found. Please add a director to the swarm."
|
||||
)
|
||||
return None
|
||||
|
||||
logger.info(
|
||||
f"Initialization complete for the hierarchical swarm: {self.name}"
|
||||
)
|
||||
|
||||
def agent_error_handling_check(self):
|
||||
"""
|
||||
Check if the agents list is not empty.
|
||||
|
||||
Returns:
|
||||
None
|
||||
|
||||
Raises:
|
||||
ValueError: If the agents list is empty.
|
||||
|
||||
"""
|
||||
if len(self.agents) == 0:
|
||||
raise ValueError(
|
||||
"No agents found. Please add agents to the swarm."
|
||||
)
|
||||
return None
|
||||
|
||||
def find_agent_by_name(self, agent_name: str = None, *args, **kwargs):
|
||||
"""
|
||||
Finds an agent in the swarm by name.
|
||||
|
||||
Args:
|
||||
agent_name (str): The name of the agent to find.
|
||||
|
||||
Returns:
|
||||
Agent: The agent with the specified name, or None if not found.
|
||||
|
||||
"""
|
||||
for agent in self.agents:
|
||||
if agent.name == agent_name:
|
||||
return agent
|
||||
return None
|
||||
|
||||
def parse_function_activate_agent(
|
||||
self, json_data: str = None, *args, **kwargs
|
||||
):
|
||||
"""
|
||||
Parse the JSON data and activate the selected agent.
|
||||
|
||||
Args:
|
||||
json_data (str): The JSON data containing the agent name and task.
|
||||
|
||||
Returns:
|
||||
str: The response from the activated agent.
|
||||
|
||||
Raises:
|
||||
json.JSONDecodeError: If the JSON data is invalid.
|
||||
|
||||
"""
|
||||
try:
|
||||
data = json.loads(json_data)
|
||||
|
||||
# Check if the data is a list of agent task pairs
|
||||
if isinstance(data, list):
|
||||
responses = []
|
||||
# Iterate over the list of agent task pairs
|
||||
for agent_task in data:
|
||||
name = agent_task.get("name")
|
||||
task = agent_task.get("task")
|
||||
|
||||
response = self.select_agent_and_send_task(
|
||||
name, task, *args, **kwargs
|
||||
)
|
||||
|
||||
responses.append(response)
|
||||
return responses
|
||||
else:
|
||||
name = data.get("name")
|
||||
task = data.get("task")
|
||||
|
||||
response = self.select_agent_and_send_task(
|
||||
name, task, *args, **kwargs
|
||||
)
|
||||
|
||||
return response
|
||||
except json.JSONDecodeError:
|
||||
logger.error("Invalid JSON data, try again.")
|
||||
raise json.JSONDecodeError
|
||||
|
||||
def select_agent_and_send_task(
|
||||
self, name: str = None, task: str = None, *args, **kwargs
|
||||
):
|
||||
"""
|
||||
Select an agent from the list and send a task to them.
|
||||
|
||||
Args:
|
||||
name (str): The name of the agent to send the task to.
|
||||
task (str): The task to send to the agent.
|
||||
|
||||
Returns:
|
||||
str: The response from the agent.
|
||||
|
||||
Raises:
|
||||
KeyError: If the agent name is not found in the list of agents.
|
||||
|
||||
"""
|
||||
try:
|
||||
# Check to see if the agent name is in the list of agents
|
||||
if name in self.agents:
|
||||
agent = self.agents[name]
|
||||
else:
|
||||
return "Invalid agent name. Please select 'Account Management Agent' or 'Product Support Agent'."
|
||||
|
||||
response = agent.run(task, *args, **kwargs)
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
raise e
|
||||
|
||||
def run(self, task: str = None, *args, **kwargs):
|
||||
"""
|
||||
Run the hierarchical swarm.
|
||||
|
||||
Args:
|
||||
task (str): The task to send to the director agent.
|
||||
|
||||
Returns:
|
||||
str: The response from the director agent.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs while running the swarm.
|
||||
|
||||
"""
|
||||
try:
|
||||
loop = 0
|
||||
|
||||
# While the loop is less than max loops
|
||||
while loop < self.max_loops:
|
||||
# Run the director
|
||||
response = self.director.run(task, *args, **kwargs)
|
||||
|
||||
# Log the director's response
|
||||
self.swarm_history.add(self.director.agent_name, response)
|
||||
|
||||
# Run agents
|
||||
if self.custom_parse_function is not None:
|
||||
response = self.custom_parse_function(response)
|
||||
else:
|
||||
response = self.parse_function_activate_agent(response)
|
||||
|
||||
loop += 1
|
||||
|
||||
task = response
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
raise e
|
||||
|
||||
def run_worker_agent(
|
||||
self, name: str = None, task: str = None, *args, **kwargs
|
||||
):
|
||||
"""
|
||||
Run the worker agent.
|
||||
|
||||
Args:
|
||||
name (str): The name of the worker agent.
|
||||
task (str): The task to send to the worker agent.
|
||||
|
||||
Returns:
|
||||
str: The response from the worker agent.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs while running the worker agent.
|
||||
|
||||
"""
|
||||
try:
|
||||
# Find the agent by name
|
||||
agent = self.find_agent_by_name(name)
|
||||
|
||||
# Run the agent
|
||||
response = agent.run(task, *args, **kwargs)
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
raise e
|
||||
|
||||
def has_sop(self):
|
||||
# We need to check the name of the agents and their description or system prompt
|
||||
# TODO: Provide many shot examples of the agents available and even maybe what tools they have access to
|
||||
# TODO: Provide better reasoning prompt tiles, such as when do you use a certain agent and specific
|
||||
# Things NOT to do.
|
||||
return f"""
|
||||
|
||||
You're a director boss agent orchestrating worker agents with tasks. Select an agent most relevant to
|
||||
the input task and give them a task. If there is not an agent relevant to the input task then say so and be simple and direct.
|
||||
These are the available agents available call them if you need them for a specific
|
||||
task or operation:
|
||||
|
||||
Number of agents: {len(self.agents)}
|
||||
Agents Available: {
|
||||
[
|
||||
{"name": agent.name, "description": agent.system_prompt}
|
||||
for agent in self.agents
|
||||
]
|
||||
}
|
||||
|
||||
"""
|
@ -0,0 +1,293 @@
|
||||
import os
|
||||
from typing import List, Any
|
||||
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from swarms.structs.concat import concat_strings
|
||||
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
|
||||
class AgentSpec(BaseModel):
|
||||
"""
|
||||
A class representing the specifications of an agent.
|
||||
|
||||
Attributes:
|
||||
agent_name (str): The name of the agent.
|
||||
system_prompt (str): The system prompt for the agent.
|
||||
agent_description (str): The description of the agent.
|
||||
max_tokens (int): The maximum number of tokens to generate in the API response.
|
||||
temperature (float): A parameter that controls the randomness of the generated text.
|
||||
context_window (int): The context window for the agent.
|
||||
task (str): The main task for the agent.
|
||||
"""
|
||||
|
||||
agent_name: str
|
||||
system_prompt: str
|
||||
agent_description: str
|
||||
task: str
|
||||
|
||||
|
||||
class AgentTeam(BaseModel):
|
||||
agents: List[AgentSpec] = Field(
|
||||
...,
|
||||
description="The list of agents in the team",
|
||||
)
|
||||
flow: str = Field(
|
||||
...,
|
||||
description="Agent Name -> ",
|
||||
)
|
||||
|
||||
|
||||
class SwarmSpec(BaseModel):
|
||||
"""
|
||||
A class representing the specifications of a swarm of agents.
|
||||
|
||||
Attributes:
|
||||
multiple_agents (List[AgentSpec]): The list of agents in the swarm.
|
||||
"""
|
||||
|
||||
swarm_name: str = Field(
|
||||
...,
|
||||
description="The name of the swarm: e.g., 'Marketing Swarm' or 'Finance Swarm'",
|
||||
)
|
||||
multiple_agents: List[AgentSpec]
|
||||
rules: str = Field(
|
||||
...,
|
||||
description="The rules for all the agents in the swarm: e.g., All agents must return code. Be very simple and direct",
|
||||
)
|
||||
plan: str = Field(
|
||||
...,
|
||||
description="The plan for the swarm: e.g., 'Create a marketing campaign for the new product launch.'",
|
||||
)
|
||||
|
||||
|
||||
class HierarchicalAgentSwarm:
|
||||
"""
|
||||
A class to create and manage a hierarchical swarm of agents.
|
||||
|
||||
Methods:
|
||||
__init__(system_prompt, max_tokens, temperature, base_model, parallel_tool_calls): Initializes the function caller.
|
||||
create_agent(agent_name, system_prompt, agent_description, max_tokens, temperature, context_window): Creates an individual agent.
|
||||
parse_json_for_agents_then_create_agents(function_call): Parses a JSON function call to create multiple agents.
|
||||
run(task): Runs the function caller to create and execute agents based on the provided task.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
director: Any = None,
|
||||
agents: List[Agent] = None,
|
||||
max_loops: int = 1,
|
||||
create_agents_on: bool = False,
|
||||
):
|
||||
"""
|
||||
Initializes the HierarchicalAgentSwarm with an OpenAIFunctionCaller.
|
||||
|
||||
Args:
|
||||
system_prompt (str): The system prompt for the function caller.
|
||||
max_tokens (int): The maximum number of tokens to generate in the API response.
|
||||
temperature (float): The temperature setting for text generation.
|
||||
base_model (BaseModel): The base model for the function caller.
|
||||
parallel_tool_calls (bool): Whether to run tool calls in parallel.
|
||||
"""
|
||||
self.director = director
|
||||
self.agents = agents
|
||||
self.max_loops = max_loops
|
||||
self.create_agents_on = create_agents_on
|
||||
|
||||
# Check if the agents are set
|
||||
self.agents_check()
|
||||
|
||||
def agents_check(self):
|
||||
if self.director is None:
|
||||
raise ValueError("The director is not set.")
|
||||
|
||||
# if self.agents is None:
|
||||
# raise ValueError("The agents are not set.")
|
||||
|
||||
if self.max_loops == 0:
|
||||
raise ValueError("The max_loops is not set.")
|
||||
|
||||
def create_agent(
|
||||
self,
|
||||
agent_name: str,
|
||||
system_prompt: str,
|
||||
agent_description: str,
|
||||
task: str = None,
|
||||
) -> str:
|
||||
"""
|
||||
Creates an individual agent.
|
||||
|
||||
Args:
|
||||
agent_name (str): The name of the agent.
|
||||
system_prompt (str): The system prompt for the agent.
|
||||
agent_description (str): The description of the agent.
|
||||
max_tokens (int): The maximum number of tokens to generate.
|
||||
temperature (float): The temperature for text generation.
|
||||
context_window (int): The context window size for the agent.
|
||||
|
||||
Returns:
|
||||
Agent: An instantiated agent object.
|
||||
"""
|
||||
# name = agent_name.replace(" ", "_")
|
||||
logger.info(f"Creating agent: {agent_name}")
|
||||
agent_name = Agent(
|
||||
agent_name=agent_name,
|
||||
llm=model,
|
||||
system_prompt=system_prompt,
|
||||
agent_description=agent_description,
|
||||
retry_attempts=1,
|
||||
verbose=False,
|
||||
dashboard=False,
|
||||
)
|
||||
self.agents.append(agent_name)
|
||||
|
||||
logger.info(f"Running agent: {agent_name}")
|
||||
output = agent_name.run(task)
|
||||
|
||||
# create_file_in_folder(
|
||||
# agent_name.workspace_dir, f"{agent_name}_output.txt", str(output)
|
||||
# )
|
||||
|
||||
return output
|
||||
|
||||
def parse_json_for_agents_then_create_agents(
|
||||
self, function_call: dict
|
||||
) -> List[Agent]:
|
||||
"""
|
||||
Parses a JSON function call to create a list of agents.
|
||||
|
||||
Args:
|
||||
function_call (dict): The JSON function call specifying the agents.
|
||||
|
||||
Returns:
|
||||
List[Agent]: A list of created agent objects.
|
||||
"""
|
||||
responses = []
|
||||
logger.info("Parsing JSON for agents")
|
||||
for agent in function_call["multiple_agents"]:
|
||||
out = self.create_agent(
|
||||
agent_name=agent["agent_name"],
|
||||
system_prompt=agent["system_prompt"],
|
||||
agent_description=agent["agent_description"],
|
||||
task=agent["task"],
|
||||
)
|
||||
responses.append(out)
|
||||
return concat_strings(responses)
|
||||
|
||||
def run(self, task: str) -> List[Agent]:
|
||||
"""
|
||||
Runs the function caller to create and execute agents based on the provided task.
|
||||
|
||||
Args:
|
||||
task (str): The task for which the agents need to be created and executed.
|
||||
|
||||
Returns:
|
||||
List[Agent]: A list of created agent objects.
|
||||
"""
|
||||
logger.info("Running the swarm")
|
||||
|
||||
# Run the function caller
|
||||
function_call = self.model.run(task)
|
||||
|
||||
# Logging the function call
|
||||
self.log_director_function_call(function_call)
|
||||
|
||||
# Parse the JSON function call and create agents -> run Agents
|
||||
return self.parse_json_for_agents_then_create_agents(function_call)
|
||||
|
||||
def log_director_function_call(self, function_call: dict):
|
||||
# Log the agents the boss makes\
|
||||
logger.info(f"Swarm Name: {function_call['swarm_name']}")
|
||||
# Log the plan
|
||||
logger.info(f"Plan: {function_call['plan']}")
|
||||
logger.info(
|
||||
f"Number of agents: {len(function_call['multiple_agents'])}"
|
||||
)
|
||||
|
||||
for agent in function_call["multiple_agents"]:
|
||||
logger.info(f"Agent: {agent['agent_name']}")
|
||||
# logger.info(f"Task: {agent['task']}")
|
||||
logger.info(f"Description: {agent['agent_description']}")
|
||||
|
||||
|
||||
# Example usage:
|
||||
HIEARCHICAL_AGENT_SYSTEM_PROMPT = """
|
||||
Here's a full-fledged system prompt for a director boss agent, complete with instructions and many-shot examples:
|
||||
|
||||
---
|
||||
|
||||
**System Prompt: Director Boss Agent**
|
||||
|
||||
### Role:
|
||||
You are a Director Boss Agent responsible for orchestrating a swarm of worker agents. Your primary duty is to serve the user efficiently, effectively, and skillfully. You dynamically create new agents when necessary or utilize existing agents, assigning them tasks that align with their capabilities. You must ensure that each agent receives clear, direct, and actionable instructions tailored to their role.
|
||||
|
||||
### Key Responsibilities:
|
||||
1. **Task Delegation:** Assign tasks to the most relevant agent. If no relevant agent exists, create a new one with an appropriate name and system prompt.
|
||||
2. **Efficiency:** Ensure that tasks are completed swiftly and with minimal resource expenditure.
|
||||
3. **Clarity:** Provide orders that are simple, direct, and actionable. Avoid ambiguity.
|
||||
4. **Dynamic Decision Making:** Assess the situation and choose the most effective path, whether that involves using an existing agent or creating a new one.
|
||||
5. **Monitoring:** Continuously monitor the progress of each agent and provide additional instructions or corrections as necessary.
|
||||
|
||||
### Instructions:
|
||||
- **Identify the Task:** Analyze the input task to determine its nature and requirements.
|
||||
- **Agent Selection/Creation:**
|
||||
- If an agent is available and suited for the task, assign the task to that agent.
|
||||
- If no suitable agent exists, create a new agent with a relevant system prompt.
|
||||
- **Task Assignment:** Provide the selected agent with explicit and straightforward instructions.
|
||||
- **Reasoning:** Justify your decisions when selecting or creating agents, focusing on the efficiency and effectiveness of task completion.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
director = (
|
||||
OpenAIFunctionCaller(
|
||||
system_prompt=HIEARCHICAL_AGENT_SYSTEM_PROMPT,
|
||||
max_tokens=3000,
|
||||
temperature=0.4,
|
||||
base_model=SwarmSpec,
|
||||
parallel_tool_calls=False,
|
||||
),
|
||||
)
|
||||
|
||||
# Initialize the hierarchical agent swarm with the necessary parameters
|
||||
swarm = HierarchicalAgentSwarm(
|
||||
director=director,
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
# # Run the swarm with a task
|
||||
# agents = swarm.run(
|
||||
# """
|
||||
# Create a swarm of agents for a marketing campaign to promote
|
||||
# the swarms workshop: [Workshop][Automating Business Operations with Hierarchical Agent Swarms][Swarms Framework + GPT4o],
|
||||
# create agents for twitter, linkedin, and emails, facebook, instagram.
|
||||
|
||||
# The date is Saturday, August 17 4:00 PM - 5:00 PM
|
||||
|
||||
# Link is: https://lu.ma/ew4r4s3i
|
||||
|
||||
|
||||
# """
|
||||
# )
|
||||
|
||||
|
||||
# Run the swarm with a task
|
||||
agents = swarm.run(
|
||||
"""
|
||||
Create a swarms of agents that generate the code in python
|
||||
to send an API request to social media platforms through their apis.
|
||||
Craft a single function to send a message to all platforms, add types and write
|
||||
clean code. Each agent needs to generate code for a specific platform, they
|
||||
must return the python code only.
|
||||
|
||||
"""
|
||||
)
|
@ -1,227 +1,241 @@
|
||||
import os
|
||||
from typing import List
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from swarms.models.openai_function_caller import OpenAIFunctionCaller
|
||||
from swarms import OpenAIChat
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.concat import concat_strings
|
||||
from loguru import logger
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
from swarms.structs.conversation import Conversation
|
||||
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
|
||||
# Initialize the agents
|
||||
growth_agent1 = Agent(
|
||||
agent_name="marketing_specialist",
|
||||
system_prompt="You're the marketing specialist, your purpose is to help companies grow by improving their marketing strategies!",
|
||||
agent_description="Improve a company's marketing strategies!",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
saved_state_path="marketing_specialist.json",
|
||||
stopping_token="Stop!",
|
||||
context_length=1000,
|
||||
)
|
||||
|
||||
growth_agent2 = Agent(
|
||||
agent_name="sales_specialist",
|
||||
system_prompt="You're the sales specialist, your purpose is to help companies grow by improving their sales strategies!",
|
||||
agent_description="Improve a company's sales strategies!",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
saved_state_path="sales_specialist.json",
|
||||
stopping_token="Stop!",
|
||||
context_length=1000,
|
||||
)
|
||||
|
||||
growth_agent3 = Agent(
|
||||
agent_name="product_development_specialist",
|
||||
system_prompt="You're the product development specialist, your purpose is to help companies grow by improving their product development strategies!",
|
||||
agent_description="Improve a company's product development strategies!",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
saved_state_path="product_development_specialist.json",
|
||||
stopping_token="Stop!",
|
||||
context_length=1000,
|
||||
)
|
||||
|
||||
team = [growth_agent1, growth_agent2, growth_agent3]
|
||||
|
||||
|
||||
# class HiearchicalSwarm(BaseModel):
|
||||
# agents: List[Agent]
|
||||
# director: Agent
|
||||
# planner: Agent
|
||||
# max_loops: int = 3
|
||||
# verbose: bool = True
|
||||
|
||||
# def run(self, task: str):
|
||||
# # Plan
|
||||
# # Plan -> JSON Function call -> workers -> response fetch back to boss -> planner
|
||||
# responses = []
|
||||
# responses.append(task)
|
||||
|
||||
# for _ in range(self.max_loops):
|
||||
# # Plan
|
||||
# plan = self.planner.run(concat_strings(responses))
|
||||
# logger.info(f"Agent {self.planner.agent_name} planned: {plan}")
|
||||
# responses.append(plan)
|
||||
|
||||
# # Execute json function calls
|
||||
# calls = self.director.run(plan)
|
||||
# logger.info(
|
||||
# f"Agent {self.director.agent_name} called: {calls}"
|
||||
# )
|
||||
# responses.append(calls)
|
||||
# # Parse and send tasks to agents
|
||||
# output = parse_then_send_tasks_to_agents(self.agents, calls)
|
||||
|
||||
# # Fetch back to boss
|
||||
# responses.append(output)
|
||||
|
||||
# return concat_strings(responses)
|
||||
|
||||
# def __call__(self, task: str):
|
||||
# responses = []
|
||||
# responses.append(task)
|
||||
|
||||
# for _ in range(self.max_loops):
|
||||
# output = self.step(task, responses)
|
||||
# responses.append(output)
|
||||
|
||||
# return concat_strings(responses)
|
||||
|
||||
# def step(self, responses: List[str] = None) -> str:
|
||||
# # Plan
|
||||
# # Plan -> JSON Function call -> workers -> response fetch back to boss -> planner
|
||||
|
||||
# # Plan
|
||||
# plan = self.planner.run(concat_strings(responses))
|
||||
# logger.info(f"Agent {self.planner.agent_name} planned: {plan}")
|
||||
# responses.append(plan)
|
||||
|
||||
# # Execute json function calls
|
||||
# calls = self.director.run(plan)
|
||||
# logger.info(f"Agent {self.director.agent_name} called: {calls}")
|
||||
# responses.append(calls)
|
||||
# # Parse and send tasks to agents
|
||||
# output = parse_then_send_tasks_to_agents(self.agents, calls)
|
||||
|
||||
# # Fetch back to boss
|
||||
# responses.append(output)
|
||||
|
||||
# return concat_strings(responses)
|
||||
|
||||
# def plan(self, task: str, responses: List[str] = None):
|
||||
# # Plan
|
||||
# # Plan -> JSON Function call -> workers -> response fetch back to boss -> planner
|
||||
# # responses = []
|
||||
# # responses.append(task)
|
||||
|
||||
# # Plan
|
||||
# plan = self.planner.run(concat_strings(responses))
|
||||
# logger.info(f"Agent {self.planner.agent_name} planned: {plan}")
|
||||
# responses.append(plan)
|
||||
|
||||
# return concat_strings(responses)
|
||||
|
||||
|
||||
def agents_list(
|
||||
agents: List[Agent] = team,
|
||||
) -> str:
|
||||
responses = []
|
||||
|
||||
for agent in agents:
|
||||
name = agent.agent_name
|
||||
description = agent.description
|
||||
response = f"Agent Name {name}: Description {description}"
|
||||
responses.append(response)
|
||||
|
||||
return concat_strings(responses)
|
||||
class HierarchicalOrderCall(BaseModel):
|
||||
agent_name: str
|
||||
task: str
|
||||
|
||||
|
||||
def parse_then_send_tasks_to_agents(agents: List[Agent], response: dict):
|
||||
# Initialize an empty dictionary to store the output of each agent
|
||||
output = []
|
||||
class CallTeam(BaseModel):
|
||||
calls: List[HierarchicalOrderCall]
|
||||
|
||||
# Loop over the tasks in the response
|
||||
for call in response["calls"]:
|
||||
name = call["agent_name"]
|
||||
task = call["task"]
|
||||
|
||||
# Loop over the agents
|
||||
for agent in agents:
|
||||
# If the agent's name matches the name in the task, run the task
|
||||
if agent.agent_name == name:
|
||||
out = agent.run(task)
|
||||
print(out)
|
||||
class HiearchicalSwarm(BaseSwarm):
|
||||
def __init__(
|
||||
self,
|
||||
agents: List[Agent],
|
||||
director: Agent,
|
||||
name: str = "HierarchicalSwarm",
|
||||
description: str = "A swarm of agents that can be used to distribute tasks to a team of agents.",
|
||||
max_loops: int = 3,
|
||||
verbose: bool = True,
|
||||
create_agents_from_scratch: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.agents = agents
|
||||
self.director = director
|
||||
self.max_loops = max_loops
|
||||
self.verbose = verbose
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.create_agents_from_scratch = create_agents_from_scratch
|
||||
|
||||
self.agents_check()
|
||||
self.director_check()
|
||||
|
||||
# Initialize the conversation
|
||||
self.conversation = Conversation(
|
||||
time_enabled=True,
|
||||
)
|
||||
|
||||
logger.info(f"Initialized {self.name} Hiearchical swarm")
|
||||
|
||||
def agents_check(self):
|
||||
if len(self.agents) == 0:
|
||||
raise ValueError(
|
||||
"No agents found. Please add agents to the swarm."
|
||||
)
|
||||
return None
|
||||
|
||||
def director_check(self):
|
||||
if self.director is None:
|
||||
raise ValueError(
|
||||
"No director found. Please add a director to the swarm."
|
||||
)
|
||||
return None
|
||||
|
||||
def run(self, task: str):
|
||||
# Plan
|
||||
# Plan -> JSON Function call -> workers -> response fetch back to boss -> planner
|
||||
responses = []
|
||||
responses.append(task)
|
||||
|
||||
for _ in range(self.max_loops):
|
||||
# Plan
|
||||
plan = self.planner.run(concat_strings(responses))
|
||||
logger.info(f"Agent {self.planner.agent_name} planned: {plan}")
|
||||
responses.append(plan)
|
||||
|
||||
# Execute json function calls
|
||||
calls = self.director.run(plan)
|
||||
logger.info(
|
||||
f"Agent {self.director.agent_name} called: {calls}"
|
||||
)
|
||||
responses.append(calls)
|
||||
# Parse and send tasks to agents
|
||||
output = self.parse_then_send_tasks_to_agents(
|
||||
self.agents, calls
|
||||
)
|
||||
|
||||
# Fetch back to boss
|
||||
responses.append(output)
|
||||
|
||||
return concat_strings(responses)
|
||||
|
||||
def run_worker_agent(
|
||||
self, name: str = None, task: str = None, *args, **kwargs
|
||||
):
|
||||
"""
|
||||
Run the worker agent.
|
||||
|
||||
Args:
|
||||
name (str): The name of the worker agent.
|
||||
task (str): The task to send to the worker agent.
|
||||
|
||||
Returns:
|
||||
str: The response from the worker agent.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs while running the worker agent.
|
||||
|
||||
"""
|
||||
try:
|
||||
# Find the agent by name
|
||||
agent = self.find_agent_by_name(name)
|
||||
|
||||
# Run the agent
|
||||
response = agent.run(task, *args, **kwargs)
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
raise e
|
||||
|
||||
def find_agent_by_name(self, agent_name: str = None, *args, **kwargs):
|
||||
"""
|
||||
Finds an agent in the swarm by name.
|
||||
|
||||
Args:
|
||||
agent_name (str): The name of the agent to find.
|
||||
|
||||
Returns:
|
||||
Agent: The agent with the specified name, or None if not found.
|
||||
|
||||
"""
|
||||
for agent in self.agents:
|
||||
if agent.name == agent_name:
|
||||
return agent
|
||||
return None
|
||||
|
||||
def select_agent_and_send_task(
|
||||
self, name: str = None, task: str = None, *args, **kwargs
|
||||
):
|
||||
"""
|
||||
Select an agent from the list and send a task to them.
|
||||
|
||||
Args:
|
||||
name (str): The name of the agent to send the task to.
|
||||
task (str): The task to send to the agent.
|
||||
|
||||
Returns:
|
||||
str: The response from the agent.
|
||||
|
||||
Raises:
|
||||
KeyError: If the agent name is not found in the list of agents.
|
||||
|
||||
"""
|
||||
try:
|
||||
# Check to see if the agent name is in the list of agents
|
||||
if name in self.agents:
|
||||
agent = self.agents[name]
|
||||
else:
|
||||
return "Invalid agent name. Please select 'Account Management Agent' or 'Product Support Agent'."
|
||||
|
||||
response = agent.run(task, *args, **kwargs)
|
||||
|
||||
output.append(f"{name}: {out}")
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Error: {e}")
|
||||
raise e
|
||||
|
||||
# Store the output in the dictionary
|
||||
# output[name] = out
|
||||
break
|
||||
def agents_list(
|
||||
self,
|
||||
) -> str:
|
||||
logger.info("Listing agents")
|
||||
|
||||
return output
|
||||
for agent in self.agents:
|
||||
name = agent.agent_name
|
||||
description = agent.description or "No description available."
|
||||
logger.info(f"Agent: {name}, Description: {description}")
|
||||
self.conversation.add(name, description)
|
||||
|
||||
return self.conversation.return_history_as_string()
|
||||
|
||||
class HierarchicalOrderCall(BaseModel):
|
||||
agent_name: str
|
||||
task: str
|
||||
def parse_then_send_tasks_to_agents(self, response: dict):
|
||||
# Initialize an empty dictionary to store the output of each agent
|
||||
output = []
|
||||
|
||||
# Loop over the tasks in the response
|
||||
for call in response["calls"]:
|
||||
name = call["agent_name"]
|
||||
task = call["task"]
|
||||
|
||||
class CallTeam(BaseModel):
|
||||
calls: List[HierarchicalOrderCall]
|
||||
# Loop over the agents
|
||||
for agent in self.agents:
|
||||
# If the agent's name matches the name in the task, run the task
|
||||
if agent.agent_name == name:
|
||||
out = agent.run(task)
|
||||
print(out)
|
||||
|
||||
output.append(f"{name}: {out}")
|
||||
|
||||
# Store the output in the dictionary
|
||||
# output[name] = out
|
||||
break
|
||||
|
||||
return output
|
||||
|
||||
|
||||
# Example usage:
|
||||
system_prompt = f"""
|
||||
You're a director agent, your responsibility is to serve the user efficiently, effectively and skillfully.You have a swarm of agents available to distribute tasks to, interact with the user and then submit tasks to the worker agents. Provide orders to the worker agents that are direct, explicit, and simple. Ensure that they are given tasks that are understandable, actionable, and simple to execute.
|
||||
# # Example usage:
|
||||
# system_prompt = f"""
|
||||
# You're a director agent, your responsibility is to serve the user efficiently, effectively and skillfully.You have a swarm of agents available to distribute tasks to, interact with the user and then submit tasks to the worker agents. Provide orders to the worker agents that are direct, explicit, and simple. Ensure that they are given tasks that are understandable, actionable, and simple to execute.
|
||||
|
||||
|
||||
######
|
||||
Workers available:
|
||||
# ######
|
||||
# Workers available:
|
||||
|
||||
{agents_list(team)}
|
||||
# {agents_list(team)}
|
||||
|
||||
|
||||
"""
|
||||
# """
|
||||
|
||||
|
||||
# Initialize the function caller
|
||||
function_caller = OpenAIFunctionCaller(
|
||||
system_prompt=system_prompt,
|
||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
||||
max_tokens=500,
|
||||
temperature=0.5,
|
||||
base_model=CallTeam,
|
||||
)
|
||||
def has_sop(self):
|
||||
# We need to check the name of the agents and their description or system prompt
|
||||
# TODO: Provide many shot examples of the agents available and even maybe what tools they have access to
|
||||
# TODO: Provide better reasoning prompt tiles, such as when do you use a certain agent and specific
|
||||
# Things NOT to do.
|
||||
return f"""
|
||||
|
||||
# Run the function caller
|
||||
response = function_caller.run(
|
||||
"Now let's grow the company! Send an order to the marketing specialist, sales specialist, and product development specialist to improve the company's growth strategies."
|
||||
)
|
||||
# print(response)
|
||||
print(response)
|
||||
print(type(response))
|
||||
You're a director boss agent orchestrating worker agents with tasks. Select an agent most relevant to
|
||||
the input task and give them a task. If there is not an agent relevant to the input task then say so and be simple and direct.
|
||||
These are the available agents available call them if you need them for a specific
|
||||
task or operation:
|
||||
|
||||
Number of agents: {len(self.agents)}
|
||||
Agents Available: {
|
||||
[
|
||||
{"name": agent.name, "description": agent.system_prompt}
|
||||
for agent in self.agents
|
||||
]
|
||||
}
|
||||
|
||||
out = parse_then_send_tasks_to_agents(team, response)
|
||||
print(out)
|
||||
"""
|
||||
|
@ -0,0 +1,197 @@
|
||||
import os
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import Any, Callable, List, Optional
|
||||
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
|
||||
class MonteCarloSwarm(BaseSwarm):
|
||||
"""
|
||||
MonteCarloSwarm leverages multiple agents to collaborate in a Monte Carlo fashion.
|
||||
Each agent's output is passed to the next, refining the result progressively.
|
||||
Supports parallel execution, dynamic agent selection, and custom result aggregation.
|
||||
|
||||
Attributes:
|
||||
agents (List[Agent]): A list of agents that will participate in the swarm.
|
||||
parallel (bool): If True, agents will run in parallel.
|
||||
result_aggregator (Callable[[List[Any]], Any]): A function to aggregate results from agents.
|
||||
max_workers (Optional[int]): The maximum number of threads for parallel execution.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
agents: List[Agent],
|
||||
parallel: bool = False,
|
||||
result_aggregator: Optional[Callable[[List[Any]], Any]] = None,
|
||||
max_workers: Optional[int] = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Initializes the MonteCarloSwarm with a list of agents.
|
||||
|
||||
Args:
|
||||
agents (List[Agent]): A list of agents to include in the swarm.
|
||||
parallel (bool): If True, agents will run in parallel. Default is False.
|
||||
result_aggregator (Optional[Callable[[List[Any]], Any]]): A function to aggregate results from agents.
|
||||
max_workers (Optional[int]): The maximum number of threads for parallel execution.
|
||||
"""
|
||||
super().__init__(agents=agents, *args, **kwargs)
|
||||
|
||||
if not agents:
|
||||
raise ValueError("The agents list cannot be empty.")
|
||||
|
||||
self.agents = agents
|
||||
self.parallel = parallel
|
||||
self.result_aggregator = (
|
||||
result_aggregator or self.default_aggregator
|
||||
)
|
||||
self.max_workers = max_workers or len(agents)
|
||||
|
||||
def run(self, task: str) -> Any:
|
||||
"""
|
||||
Runs the MonteCarloSwarm with the given input, passing the output of each agent
|
||||
to the next one in the list or running agents in parallel.
|
||||
|
||||
Args:
|
||||
task (str): The initial input to provide to the first agent.
|
||||
|
||||
Returns:
|
||||
Any: The final output after all agents have processed the input.
|
||||
"""
|
||||
logger.info(
|
||||
f"Starting MonteCarloSwarm with parallel={self.parallel}"
|
||||
)
|
||||
|
||||
if self.parallel:
|
||||
results = self._run_parallel(task)
|
||||
else:
|
||||
results = self._run_sequential(task)
|
||||
|
||||
final_output = self.result_aggregator(results)
|
||||
logger.info(
|
||||
f"MonteCarloSwarm completed. Final output: {final_output}"
|
||||
)
|
||||
return final_output
|
||||
|
||||
def _run_sequential(self, task: str) -> List[Any]:
|
||||
"""
|
||||
Runs the agents sequentially, passing each agent's output to the next.
|
||||
|
||||
Args:
|
||||
task (str): The initial input to provide to the first agent.
|
||||
|
||||
Returns:
|
||||
List[Any]: A list of results from each agent.
|
||||
"""
|
||||
results = []
|
||||
current_input = task
|
||||
for i, agent in enumerate(self.agents):
|
||||
logger.info(f"Agent {i + 1} processing sequentially...")
|
||||
current_output = agent.run(current_input)
|
||||
results.append(current_output)
|
||||
current_input = current_output
|
||||
return results
|
||||
|
||||
def _run_parallel(self, task: str) -> List[Any]:
|
||||
"""
|
||||
Runs the agents in parallel, each receiving the same initial input.
|
||||
|
||||
Args:
|
||||
task (str): The initial input to provide to all agents.
|
||||
|
||||
Returns:
|
||||
List[Any]: A list of results from each agent.
|
||||
"""
|
||||
results = []
|
||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
future_to_agent = {
|
||||
executor.submit(agent.run, task): agent
|
||||
for agent in self.agents
|
||||
}
|
||||
for future in as_completed(future_to_agent):
|
||||
try:
|
||||
result = future.result()
|
||||
results.append(result)
|
||||
logger.info(f"Agent completed with result: {result}")
|
||||
except Exception as e:
|
||||
logger.error(f"Agent encountered an error: {e}")
|
||||
results.append(None)
|
||||
return results
|
||||
|
||||
@staticmethod
|
||||
def default_aggregator(results: List[Any]) -> Any:
|
||||
"""
|
||||
Default result aggregator that returns the last result.
|
||||
|
||||
Args:
|
||||
results (List[Any]): A list of results from agents.
|
||||
|
||||
Returns:
|
||||
Any: The final aggregated result.
|
||||
"""
|
||||
return results
|
||||
|
||||
|
||||
def average_aggregator(results: List[float]) -> float:
|
||||
return sum(results) / len(results) if results else 0.0
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize the agents
|
||||
agents_list = [
|
||||
Agent(
|
||||
agent_name="Financial-Analysis-Agent-1",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=False,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent_1.json",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
),
|
||||
Agent(
|
||||
agent_name="Financial-Analysis-Agent-2",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=False,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent_2.json",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
),
|
||||
# Add more agents as needed
|
||||
]
|
||||
|
||||
# Initialize the MonteCarloSwarm with parallel execution enabled
|
||||
swarm = MonteCarloSwarm(
|
||||
agents=agents_list, parallel=True, max_workers=2
|
||||
)
|
||||
|
||||
# Run the swarm with an initial query
|
||||
final_output = swarm.run(
|
||||
"What are the components of a startup's stock incentive equity plan?"
|
||||
)
|
||||
print("Final output:", final_output)
|
@ -0,0 +1,136 @@
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
from typing import Any, Dict, Optional, List
|
||||
|
||||
|
||||
class PromptCache:
|
||||
"""
|
||||
A framework to handle prompt caching for any LLM API. This reduces costs, latency,
|
||||
and allows reuse of long-form context across multiple API requests.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cache_dir: str = "cache",
|
||||
llm_api_function: Optional[Any] = None,
|
||||
text: Optional[List[str]] = None,
|
||||
):
|
||||
"""
|
||||
Initializes the PromptCache instance.
|
||||
|
||||
Args:
|
||||
cache_dir (str): Directory where cached responses are stored.
|
||||
llm_api_function (Optional[Any]): The function that interacts with the LLM API.
|
||||
It should accept a prompt and return the response.
|
||||
"""
|
||||
self.cache_dir = cache_dir
|
||||
self.llm_api_function = llm_api_function
|
||||
self.text = text
|
||||
|
||||
if not os.path.exists(cache_dir):
|
||||
os.makedirs(cache_dir)
|
||||
|
||||
def _generate_cache_key(self, prompt: str) -> str:
|
||||
"""
|
||||
Generates a unique cache key for a given prompt.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to generate a cache key for.
|
||||
|
||||
Returns:
|
||||
str: A unique cache key.
|
||||
"""
|
||||
return hashlib.md5(prompt.encode("utf-8")).hexdigest()
|
||||
|
||||
def _cache_file_path(self, cache_key: str) -> str:
|
||||
"""
|
||||
Constructs the file path for the cache file.
|
||||
|
||||
Args:
|
||||
cache_key (str): The cache key for the prompt.
|
||||
|
||||
Returns:
|
||||
str: The path to the cache file.
|
||||
"""
|
||||
return os.path.join(self.cache_dir, f"{cache_key}.json")
|
||||
|
||||
def _load_from_cache(self, cache_key: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Loads a cached response if available.
|
||||
|
||||
Args:
|
||||
cache_key (str): The cache key for the prompt.
|
||||
|
||||
Returns:
|
||||
Optional[Dict[str, Any]]: The cached response, or None if not found.
|
||||
"""
|
||||
cache_file = self._cache_file_path(cache_key)
|
||||
if os.path.exists(cache_file):
|
||||
with open(cache_file, "r") as f:
|
||||
return json.load(f)
|
||||
return None
|
||||
|
||||
def _save_to_cache(
|
||||
self, cache_key: str, response: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Saves the API response to the cache.
|
||||
|
||||
Args:
|
||||
cache_key (str): The cache key for the prompt.
|
||||
response (Dict[str, Any]): The API response to be cached.
|
||||
"""
|
||||
cache_file = self._cache_file_path(cache_key)
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(response, f)
|
||||
|
||||
def get_response(self, prompt: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Retrieves the response for a prompt, using cache if available.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to retrieve the response for.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: The API response, either from cache or freshly fetched.
|
||||
"""
|
||||
cache_key = self._generate_cache_key(prompt)
|
||||
cached_response = self._load_from_cache(cache_key)
|
||||
|
||||
if cached_response is not None:
|
||||
return cached_response
|
||||
|
||||
# If the response is not cached, use the LLM API to get the response
|
||||
if self.llm_api_function is None:
|
||||
raise ValueError("LLM API function is not defined.")
|
||||
|
||||
response = self.llm_api_function(prompt)
|
||||
self._save_to_cache(cache_key, response)
|
||||
|
||||
return response
|
||||
|
||||
def clear_cache(self) -> None:
|
||||
"""
|
||||
Clears the entire cache directory.
|
||||
"""
|
||||
for cache_file in os.listdir(self.cache_dir):
|
||||
os.remove(os.path.join(self.cache_dir, cache_file))
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Dummy LLM API function
|
||||
def mock_llm_api(prompt: str) -> Dict[str, Any]:
|
||||
return {"response": f"Mock response to '{prompt}'"}
|
||||
|
||||
# Initialize the cache
|
||||
cache = PromptCache(llm_api_function=mock_llm_api)
|
||||
|
||||
# Example prompts
|
||||
prompt1 = "What is the capital of France?"
|
||||
prompt2 = "Explain the theory of relativity."
|
||||
|
||||
# Get responses
|
||||
print(cache.get_response(prompt1))
|
||||
print(cache.get_response(prompt2))
|
@ -0,0 +1,106 @@
|
||||
import hashlib
|
||||
from typing import Dict, Optional
|
||||
|
||||
|
||||
class PromptCache:
|
||||
"""
|
||||
A class to manage prompt caching for LLMs, allowing the reuse of context across multiple API requests.
|
||||
|
||||
This reduces costs and latency, particularly for long prompts.
|
||||
|
||||
Attributes:
|
||||
cache (Dict[str, str]): A dictionary to store cached prompts and their corresponding responses.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initializes the PromptCache with an empty cache."""
|
||||
self.cache: Dict[str, str] = {}
|
||||
|
||||
def _hash_prompt(self, prompt: str) -> str:
|
||||
"""
|
||||
Generates a unique hash for a given prompt.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt to hash.
|
||||
|
||||
Returns:
|
||||
str: The generated hash.
|
||||
"""
|
||||
return hashlib.sha256(prompt.encode()).hexdigest()
|
||||
|
||||
def add_to_cache(self, prompt: str, response: str) -> None:
|
||||
"""
|
||||
Adds a prompt and its corresponding response to the cache.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt string.
|
||||
response (str): The response generated by the LLM.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
prompt_hash = self._hash_prompt(prompt)
|
||||
self.cache[prompt_hash] = response
|
||||
|
||||
def get_from_cache(self, prompt: str) -> Optional[str]:
|
||||
"""
|
||||
Retrieves a cached response for a given prompt, if available.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt string to retrieve the cached response for.
|
||||
|
||||
Returns:
|
||||
Optional[str]: The cached response if found, otherwise None.
|
||||
"""
|
||||
prompt_hash = self._hash_prompt(prompt)
|
||||
return self.cache.get(prompt_hash)
|
||||
|
||||
def clear_cache(self) -> None:
|
||||
"""
|
||||
Clears the entire prompt cache.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
self.cache.clear()
|
||||
|
||||
def cache_size(self) -> int:
|
||||
"""
|
||||
Returns the number of items currently in the cache.
|
||||
|
||||
Returns:
|
||||
int: The size of the cache.
|
||||
"""
|
||||
return len(self.cache)
|
||||
|
||||
def remove_from_cache(self, prompt: str) -> None:
|
||||
"""
|
||||
Removes a specific prompt and its response from the cache.
|
||||
|
||||
Args:
|
||||
prompt (str): The prompt string to remove from the cache.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
prompt_hash = self._hash_prompt(prompt)
|
||||
if prompt_hash in self.cache:
|
||||
del self.cache[prompt_hash]
|
||||
|
||||
|
||||
# Example usage:
|
||||
|
||||
# Initialize the cache
|
||||
prompt_cache = PromptCache()
|
||||
|
||||
# Add a prompt and response to the cache
|
||||
prompt = "What is the capital of France?"
|
||||
response = "The capital of France is Paris."
|
||||
prompt_cache.add_to_cache(prompt, response)
|
||||
|
||||
# Retrieve the response from the cache
|
||||
cached_response = prompt_cache.get_from_cache(prompt)
|
||||
if cached_response:
|
||||
print("Cached response:", cached_response)
|
||||
else:
|
||||
print("Prompt not found in cache.")
|
@ -0,0 +1,175 @@
|
||||
import os
|
||||
from swarms import Agent, OpenAIChat
|
||||
from typing import List, Union, Callable
|
||||
from collections import Counter
|
||||
|
||||
# Aggregation functions
|
||||
|
||||
|
||||
def aggregate_most_common_result(results: List[str]) -> str:
|
||||
"""
|
||||
Aggregate results using the most common result.
|
||||
|
||||
Args:
|
||||
results (List[str]): List of results from each iteration.
|
||||
|
||||
Returns:
|
||||
str: The most common result.
|
||||
"""
|
||||
result_counter = Counter(results)
|
||||
most_common_result = result_counter.most_common(1)[0][0]
|
||||
return most_common_result
|
||||
|
||||
|
||||
def aggregate_weighted_vote(results: List[str], weights: List[int]) -> str:
|
||||
"""
|
||||
Aggregate results using a weighted voting system.
|
||||
|
||||
Args:
|
||||
results (List[str]): List of results from each iteration.
|
||||
weights (List[int]): List of weights corresponding to each result.
|
||||
|
||||
Returns:
|
||||
str: The result with the highest weighted vote.
|
||||
"""
|
||||
weighted_results = Counter()
|
||||
for result, weight in zip(results, weights):
|
||||
weighted_results[result] += weight
|
||||
|
||||
weighted_result = weighted_results.most_common(1)[0][0]
|
||||
return weighted_result
|
||||
|
||||
|
||||
def aggregate_average_numerical(results: List[Union[str, float]]) -> float:
|
||||
"""
|
||||
Aggregate results by averaging numerical outputs.
|
||||
|
||||
Args:
|
||||
results (List[Union[str, float]]): List of numerical results from each iteration.
|
||||
|
||||
Returns:
|
||||
float: The average of the numerical results.
|
||||
"""
|
||||
numerical_results = [
|
||||
float(result) for result in results if is_numerical(result)
|
||||
]
|
||||
if numerical_results:
|
||||
return sum(numerical_results) / len(numerical_results)
|
||||
else:
|
||||
return float("nan") # or handle non-numerical case as needed
|
||||
|
||||
|
||||
def aggregate_consensus(results: List[str]) -> Union[str, None]:
|
||||
"""
|
||||
Aggregate results by checking if there's a consensus (all results are the same).
|
||||
|
||||
Args:
|
||||
results (List[str]): List of results from each iteration.
|
||||
|
||||
Returns:
|
||||
Union[str, None]: The consensus result if there is one, otherwise None.
|
||||
"""
|
||||
if all(result == results[0] for result in results):
|
||||
return results[0]
|
||||
else:
|
||||
return None # or handle lack of consensus as needed
|
||||
|
||||
|
||||
def is_numerical(value: str) -> bool:
|
||||
"""
|
||||
Check if a string can be interpreted as a numerical value.
|
||||
|
||||
Args:
|
||||
value (str): The string to check.
|
||||
|
||||
Returns:
|
||||
bool: True if the string is numerical, otherwise False.
|
||||
"""
|
||||
try:
|
||||
float(value)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
# MonteCarloSwarm class
|
||||
|
||||
|
||||
class MonteCarloSwarm:
|
||||
def __init__(
|
||||
self,
|
||||
agents: List[Agent],
|
||||
iterations: int = 100,
|
||||
aggregator: Callable = aggregate_most_common_result,
|
||||
):
|
||||
self.agents = agents
|
||||
self.iterations = iterations
|
||||
self.aggregator = aggregator
|
||||
|
||||
def run(self, task: str) -> Union[str, float, None]:
|
||||
"""
|
||||
Execute the Monte Carlo swarm, passing the output of each agent to the next.
|
||||
The final result is aggregated over multiple iterations using the provided aggregator.
|
||||
|
||||
Args:
|
||||
task (str): The task for the swarm to execute.
|
||||
|
||||
Returns:
|
||||
Union[str, float, None]: The final aggregated result.
|
||||
"""
|
||||
aggregated_results = []
|
||||
|
||||
for i in range(self.iterations):
|
||||
result = task
|
||||
for agent in self.agents:
|
||||
result = agent.run(result)
|
||||
aggregated_results.append(result)
|
||||
|
||||
# Apply the selected aggregation function
|
||||
final_result = self.aggregator(aggregated_results)
|
||||
return final_result
|
||||
|
||||
|
||||
# Example usage:
|
||||
|
||||
# Assuming you have the OpenAI API key set up and agents defined
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
agent1 = Agent(
|
||||
agent_name="Agent1",
|
||||
system_prompt="System prompt for agent 1",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
agent_name="Agent2",
|
||||
system_prompt="System prompt for agent 2",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a MonteCarloSwarm with the agents and a selected aggregation function
|
||||
swarm = MonteCarloSwarm(
|
||||
agents=[agent1, agent2],
|
||||
iterations=1,
|
||||
aggregator=aggregate_weighted_vote,
|
||||
)
|
||||
|
||||
# Run the swarm on a specific task
|
||||
final_output = swarm.run(
|
||||
"What are the components of a startup's stock incentive plan?"
|
||||
)
|
||||
print("Final Output:", final_output)
|
||||
|
||||
# You can easily switch the aggregation function by passing a different one to the constructor:
|
||||
# swarm = MonteCarloSwarm(agents=[agent1, agent2], iterations=100, aggregator=aggregate_weighted_vote)
|
||||
|
||||
# If using weighted voting, you'll need to adjust the aggregator call to provide the weights:
|
||||
# weights = list(range(100, 0, -1)) # Example weights for 100 iterations
|
||||
# swarm = MonteCarloSwarm(agents=[agent1, agent2], iterations=100, aggregator=lambda results: aggregate_weighted_vote(results, weights))
|
@ -1,110 +0,0 @@
|
||||
import requests
|
||||
from loguru import logger
|
||||
import os
|
||||
|
||||
|
||||
def fetch_secrets_from_vault(
|
||||
client_id: str = os.getenv("HCP_CLIENT_ID"),
|
||||
client_secret: str = os.getenv("HCP_CLIENT_SECRET"),
|
||||
organization_id: str = os.getenv("HCP_ORGANIZATION_ID"),
|
||||
project_id: str = os.getenv("HCP_PROJECT_ID"),
|
||||
app_id: str = os.getenv("HCP_APP_ID"),
|
||||
) -> str:
|
||||
"""
|
||||
Fetch secrets from HashiCorp Vault using service principal authentication.
|
||||
|
||||
Args:
|
||||
client_id (str): The client ID for the service principal.
|
||||
client_secret (str): The client secret for the service principal.
|
||||
organization_id (str): The ID of the organization in HCP.
|
||||
project_id (str): The ID of the project in HCP.
|
||||
app_id (str): The ID of the app in HCP.
|
||||
|
||||
Returns:
|
||||
str: A dictionary containing the fetched secrets.
|
||||
|
||||
Raises:
|
||||
Exception: If there is an error retrieving the API token or secrets.
|
||||
"""
|
||||
# Step 1: Generate the API Token
|
||||
token_url = "https://auth.idp.hashicorp.com/oauth2/token"
|
||||
token_data = {
|
||||
"client_id": client_id,
|
||||
"client_secret": client_secret,
|
||||
"grant_type": "client_credentials",
|
||||
"audience": "https://api.hashicorp.cloud",
|
||||
}
|
||||
token_headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
||||
|
||||
logger.info("Requesting API token from HashiCorp Vault")
|
||||
response = requests.post(
|
||||
token_url, data=token_data, headers=token_headers
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
logger.error(
|
||||
f"Failed to retrieve API token. Status Code: {response.status_code}, Response: {response.text}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
api_token = response.json().get("access_token")
|
||||
|
||||
if not api_token:
|
||||
raise Exception("Failed to retrieve API token")
|
||||
|
||||
# Step 2: Fetch Secrets
|
||||
secrets_url = f"https://api.cloud.hashicorp.com/secrets/2023-06-13/organizations/{organization_id}/projects/{project_id}/apps/{app_id}/open"
|
||||
secrets_headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
logger.info("Fetching secrets from HashiCorp Vault")
|
||||
response = requests.get(secrets_url, headers=secrets_headers)
|
||||
|
||||
if response.status_code != 200:
|
||||
logger.error(
|
||||
f"Failed to fetch secrets. Status Code: {response.status_code}, Response: {response.text}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
secrets = response.json()
|
||||
|
||||
for secret in secrets["secrets"]:
|
||||
name = secret.get("name")
|
||||
value = secret.get("version", {}).get("value")
|
||||
print(f"Name: {name}, Value: {value}")
|
||||
|
||||
return name, value
|
||||
|
||||
|
||||
# def main() -> None:
|
||||
# """
|
||||
# Main function to fetch secrets from HashiCorp Vault and print them.
|
||||
|
||||
# Raises:
|
||||
# EnvironmentError: If required environment variables are not set.
|
||||
# """
|
||||
# HCP_CLIENT_ID = os.getenv("HCP_CLIENT_ID")
|
||||
# HCP_CLIENT_SECRET = os.getenv("HCP_CLIENT_SECRET")
|
||||
# ORGANIZATION_ID = os.getenv("HCP_ORGANIZATION_ID")
|
||||
# PROJECT_ID = os.getenv("HCP_PROJECT_ID")
|
||||
# APP_ID = os.getenv("HCP_APP_ID")
|
||||
|
||||
# # if not all([HCP_CLIENT_ID, HCP_CLIENT_SECRET, ORGANIZATION_ID, PROJECT_ID, APP_ID]):
|
||||
# # raise EnvironmentError("One or more environment variables are missing: HCP_CLIENT_ID, HCP_CLIENT_SECRET, ORGANIZATION_ID, PROJECT_ID, APP_ID")
|
||||
|
||||
# secrets = fetch_secrets_from_vault(
|
||||
# HCP_CLIENT_ID,
|
||||
# HCP_CLIENT_SECRET,
|
||||
# ORGANIZATION_ID,
|
||||
# PROJECT_ID,
|
||||
# APP_ID,
|
||||
# )
|
||||
# print(secrets)
|
||||
|
||||
# for secret in secrets["secrets"]:
|
||||
# name = secret.get("name")
|
||||
# value = secret.get("version", {}).get("value")
|
||||
# print(f"Name: {name}, Value: {value}")
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# main()
|
@ -1,540 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
from typing import Any
|
||||
|
||||
import pkg_resources
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
|
||||
OTLPSpanExporter,
|
||||
)
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.trace import Span, Status, StatusCode
|
||||
from swarms.structs.base_swarm import BaseSwarm
|
||||
|
||||
|
||||
class Telemetry:
|
||||
"""A class to handle anonymous telemetry for the swarms package.
|
||||
|
||||
The data being collected is for development purpose, all data is anonymous.
|
||||
|
||||
There is NO data being collected on the prompts, tasks descriptions
|
||||
agents backstories or goals nor responses or any data that is being
|
||||
processed by the agents, nor any secrets and env vars.
|
||||
|
||||
Data collected includes:
|
||||
- Version of swarms
|
||||
- Version of Python
|
||||
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
|
||||
- Number of agents and tasks in a crew
|
||||
- Crew Process being used
|
||||
- If Agents are using memory or allowing delegation
|
||||
- If Tasks are being executed in parallel or sequentially
|
||||
- Language model being used
|
||||
- Roles of agents in a crew
|
||||
- Tools names available
|
||||
|
||||
Users can opt-in to sharing more complete data using the `share_crew`
|
||||
attribute in the Crew class.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.ready = False
|
||||
self.trace_set = False
|
||||
try:
|
||||
telemetry_endpoint = "https://telemetry.swarms.com:4319"
|
||||
self.resource = Resource(
|
||||
attributes={SERVICE_NAME: "swarms-telemetry"},
|
||||
)
|
||||
self.provider = TracerProvider(resource=self.resource)
|
||||
|
||||
processor = BatchSpanProcessor(
|
||||
OTLPSpanExporter(
|
||||
endpoint=f"{telemetry_endpoint}/v1/traces",
|
||||
timeout=30,
|
||||
)
|
||||
)
|
||||
|
||||
self.provider.add_span_processor(processor)
|
||||
self.ready = True
|
||||
except BaseException as e:
|
||||
if isinstance(
|
||||
e,
|
||||
(
|
||||
SystemExit,
|
||||
KeyboardInterrupt,
|
||||
GeneratorExit,
|
||||
asyncio.CancelledError,
|
||||
),
|
||||
):
|
||||
raise # Re-raise the exception to not interfere with system signals
|
||||
self.ready = False
|
||||
|
||||
def set_tracer(self):
|
||||
if self.ready and not self.trace_set:
|
||||
try:
|
||||
trace.set_tracer_provider(self.provider)
|
||||
self.trace_set = True
|
||||
except Exception:
|
||||
self.ready = False
|
||||
self.trace_set = False
|
||||
|
||||
def swarm_creation(
|
||||
self, swarm: BaseSwarm, inputs: dict[str, Any] | None
|
||||
):
|
||||
"""Records the creation of a crew."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("swarms.telemetry")
|
||||
span = tracer.start_span("Crew Created")
|
||||
self._add_attribute(
|
||||
span,
|
||||
"swarms_version",
|
||||
pkg_resources.get_distribution("swarms").version,
|
||||
)
|
||||
self._add_attribute(
|
||||
span, "python_version", platform.python_version()
|
||||
)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "crew_process", crew.process)
|
||||
self._add_attribute(span, "crew_memory", crew.memory)
|
||||
self._add_attribute(
|
||||
span, "crew_number_of_tasks", len(crew.tasks)
|
||||
)
|
||||
self._add_attribute(
|
||||
span, "crew_number_of_agents", len(crew.agents)
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_agents",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": agent.key,
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"goal": agent.goal,
|
||||
"backstory": agent.backstory,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.prompt_file,
|
||||
"llm": json.dumps(
|
||||
self._safe_llm_attributes(agent.llm)
|
||||
),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in agent.tools or []
|
||||
],
|
||||
}
|
||||
for agent in crew.agents
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_tasks",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": task.key,
|
||||
"id": str(task.id),
|
||||
"description": task.description,
|
||||
"expected_output": task.expected_output,
|
||||
"async_execution?": task.async_execution,
|
||||
"human_input?": task.human_input,
|
||||
"agent_role": (
|
||||
task.agent.role
|
||||
if task.agent
|
||||
else "None"
|
||||
),
|
||||
"agent_key": (
|
||||
task.agent.key if task.agent else None
|
||||
),
|
||||
"context": (
|
||||
[
|
||||
task.description
|
||||
for task in task.context
|
||||
]
|
||||
if task.context
|
||||
else None
|
||||
),
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in task.tools or []
|
||||
],
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(span, "platform", platform.platform())
|
||||
self._add_attribute(
|
||||
span, "platform_release", platform.release()
|
||||
)
|
||||
self._add_attribute(
|
||||
span, "platform_system", platform.system()
|
||||
)
|
||||
self._add_attribute(
|
||||
span, "platform_version", platform.version()
|
||||
)
|
||||
self._add_attribute(span, "cpus", os.cpu_count())
|
||||
|
||||
if crew.share_crew:
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_inputs",
|
||||
json.dumps(inputs) if inputs else None,
|
||||
)
|
||||
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def task_started(self, crew: Crew, task: Task) -> Span | None:
|
||||
"""Records task started in a crew."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("swarms.telemetry")
|
||||
|
||||
created_span = tracer.start_span("Task Created")
|
||||
|
||||
self._add_attribute(created_span, "crew_key", crew.key)
|
||||
self._add_attribute(created_span, "crew_id", str(crew.id))
|
||||
self._add_attribute(created_span, "task_key", task.key)
|
||||
self._add_attribute(created_span, "task_id", str(task.id))
|
||||
|
||||
if crew.share_crew:
|
||||
self._add_attribute(
|
||||
created_span,
|
||||
"formatted_description",
|
||||
task.description,
|
||||
)
|
||||
self._add_attribute(
|
||||
created_span,
|
||||
"formatted_expected_output",
|
||||
task.expected_output,
|
||||
)
|
||||
|
||||
created_span.set_status(Status(StatusCode.OK))
|
||||
created_span.end()
|
||||
|
||||
span = tracer.start_span("Task Execution")
|
||||
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "task_key", task.key)
|
||||
self._add_attribute(span, "task_id", str(task.id))
|
||||
|
||||
if crew.share_crew:
|
||||
self._add_attribute(
|
||||
span, "formatted_description", task.description
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"formatted_expected_output",
|
||||
task.expected_output,
|
||||
)
|
||||
|
||||
return span
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def task_ended(self, span: Span, task: Task, crew: Crew):
|
||||
"""Records task execution in a crew."""
|
||||
if self.ready:
|
||||
try:
|
||||
if crew.share_crew:
|
||||
self._add_attribute(
|
||||
span,
|
||||
"task_output",
|
||||
task.output.raw if task.output else "",
|
||||
)
|
||||
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||
"""Records the repeated usage 'error' of a tool by an agent."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("swarms.telemetry")
|
||||
span = tracer.start_span("Tool Repeated Usage")
|
||||
self._add_attribute(
|
||||
span,
|
||||
"swarms_version",
|
||||
pkg_resources.get_distribution("swarms").version,
|
||||
)
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
if llm:
|
||||
self._add_attribute(
|
||||
span,
|
||||
"llm",
|
||||
json.dumps(self._safe_llm_attributes(llm)),
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||
"""Records the usage of a tool by an agent."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("swarms.telemetry")
|
||||
span = tracer.start_span("Tool Usage")
|
||||
self._add_attribute(
|
||||
span,
|
||||
"swarms_version",
|
||||
pkg_resources.get_distribution("swarms").version,
|
||||
)
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
if llm:
|
||||
self._add_attribute(
|
||||
span,
|
||||
"llm",
|
||||
json.dumps(self._safe_llm_attributes(llm)),
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_usage_error(self, llm: Any):
|
||||
"""Records the usage of a tool by an agent."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("swarms.telemetry")
|
||||
span = tracer.start_span("Tool Usage Error")
|
||||
self._add_attribute(
|
||||
span,
|
||||
"swarms_version",
|
||||
pkg_resources.get_distribution("swarms").version,
|
||||
)
|
||||
if llm:
|
||||
self._add_attribute(
|
||||
span,
|
||||
"llm",
|
||||
json.dumps(self._safe_llm_attributes(llm)),
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def individual_test_result_span(
|
||||
self, crew: Crew, quality: int, exec_time: int, model_name: str
|
||||
):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("swarms.telemetry")
|
||||
span = tracer.start_span("Crew Individual Test Result")
|
||||
|
||||
self._add_attribute(
|
||||
span,
|
||||
"swarms_version",
|
||||
pkg_resources.get_distribution("swarms").version,
|
||||
)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "quality", str(quality))
|
||||
self._add_attribute(span, "exec_time", str(exec_time))
|
||||
self._add_attribute(span, "model_name", model_name)
|
||||
return span
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def test_execution_span(
|
||||
self,
|
||||
crew: Crew,
|
||||
iterations: int,
|
||||
inputs: dict[str, Any] | None,
|
||||
model_name: str,
|
||||
):
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("swarms.telemetry")
|
||||
span = tracer.start_span("Crew Test Execution")
|
||||
|
||||
self._add_attribute(
|
||||
span,
|
||||
"swarms_version",
|
||||
pkg_resources.get_distribution("swarms").version,
|
||||
)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "iterations", str(iterations))
|
||||
self._add_attribute(span, "model_name", model_name)
|
||||
|
||||
if crew.share_crew:
|
||||
self._add_attribute(
|
||||
span,
|
||||
"inputs",
|
||||
json.dumps(inputs) if inputs else None,
|
||||
)
|
||||
|
||||
return span
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def crew_execution_span(
|
||||
self, crew: Crew, inputs: dict[str, Any] | None
|
||||
):
|
||||
"""Records the complete execution of a crew.
|
||||
This is only collected if the user has opted-in to share the crew.
|
||||
"""
|
||||
self.crew_creation(crew, inputs)
|
||||
|
||||
if (self.ready) and (crew.share_crew):
|
||||
try:
|
||||
tracer = trace.get_tracer("swarms.telemetry")
|
||||
span = tracer.start_span("Crew Execution")
|
||||
self._add_attribute(
|
||||
span,
|
||||
"swarms_version",
|
||||
pkg_resources.get_distribution("swarms").version,
|
||||
)
|
||||
self._add_attribute(span, "crew_key", crew.key)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_inputs",
|
||||
json.dumps(inputs) if inputs else None,
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_agents",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"key": agent.key,
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"goal": agent.goal,
|
||||
"backstory": agent.backstory,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.prompt_file,
|
||||
"llm": json.dumps(
|
||||
self._safe_llm_attributes(agent.llm)
|
||||
),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in agent.tools or []
|
||||
],
|
||||
}
|
||||
for agent in crew.agents
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_tasks",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"id": str(task.id),
|
||||
"description": task.description,
|
||||
"expected_output": task.expected_output,
|
||||
"async_execution?": task.async_execution,
|
||||
"human_input?": task.human_input,
|
||||
"agent_role": (
|
||||
task.agent.role
|
||||
if task.agent
|
||||
else "None"
|
||||
),
|
||||
"agent_key": (
|
||||
task.agent.key if task.agent else None
|
||||
),
|
||||
"context": (
|
||||
[
|
||||
task.description
|
||||
for task in task.context
|
||||
]
|
||||
if task.context
|
||||
else None
|
||||
),
|
||||
"tools_names": [
|
||||
tool.name.casefold()
|
||||
for tool in task.tools or []
|
||||
],
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
return span
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def end_crew(self, crew, final_string_output):
|
||||
if (self.ready) and (crew.share_crew):
|
||||
try:
|
||||
self._add_attribute(
|
||||
crew._execution_span,
|
||||
"swarms_version",
|
||||
pkg_resources.get_distribution("swarms").version,
|
||||
)
|
||||
self._add_attribute(
|
||||
crew._execution_span,
|
||||
"crew_output",
|
||||
final_string_output,
|
||||
)
|
||||
self._add_attribute(
|
||||
crew._execution_span,
|
||||
"crew_tasks_output",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"id": str(task.id),
|
||||
"description": task.description,
|
||||
"output": task.output.raw_output,
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
crew._execution_span.set_status(Status(StatusCode.OK))
|
||||
crew._execution_span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _add_attribute(self, span, key, value):
|
||||
"""Add an attribute to a span."""
|
||||
try:
|
||||
return span.set_attribute(key, value)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _safe_llm_attributes(self, llm):
|
||||
attributes = [
|
||||
"name",
|
||||
"model_name",
|
||||
"base_url",
|
||||
"model",
|
||||
"top_k",
|
||||
"temperature",
|
||||
]
|
||||
if llm:
|
||||
safe_attributes = {
|
||||
k: v for k, v in vars(llm).items() if k in attributes
|
||||
}
|
||||
safe_attributes["class"] = llm.__class__.__name__
|
||||
return safe_attributes
|
||||
return {}
|
@ -0,0 +1,192 @@
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
import json
|
||||
import requests
|
||||
from loguru import logger
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
|
||||
OTLPSpanExporter,
|
||||
)
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.trace import Status, StatusCode
|
||||
|
||||
|
||||
class TelemetryProcessor:
|
||||
"""
|
||||
A class to handle telemetry processing, including converting data to JSON,
|
||||
exporting it to an API server, and tracing the operations with OpenTelemetry.
|
||||
|
||||
Attributes:
|
||||
service_name (str): The name of the service for tracing.
|
||||
otlp_endpoint (str): The endpoint URL for the OTLP exporter.
|
||||
tracer (Tracer): The tracer object used for creating spans.
|
||||
|
||||
Methods:
|
||||
process_data(data: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None) -> str:
|
||||
Converts input data to a JSON string.
|
||||
|
||||
export_to_server(json_data: Optional[str] = None, api_url: Optional[str] = None) -> None:
|
||||
Sends the JSON data to the specified API server.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
service_name: str = "telemetry_service",
|
||||
otlp_endpoint: str = "http://localhost:4318/v1/traces",
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Initializes the TelemetryProcessor class with configurable settings.
|
||||
|
||||
Args:
|
||||
service_name (str): The name of the service for tracing.
|
||||
otlp_endpoint (str): The endpoint URL for the OTLP exporter.
|
||||
"""
|
||||
self.service_name = service_name
|
||||
self.otlp_endpoint = otlp_endpoint
|
||||
|
||||
# Configure OpenTelemetry Tracing
|
||||
resource = Resource(
|
||||
attributes={SERVICE_NAME: self.service_name}, *args, **kwargs
|
||||
)
|
||||
trace.set_tracer_provider(
|
||||
TracerProvider(resource=resource), *args, **kwargs
|
||||
)
|
||||
self.tracer = trace.get_tracer(__name__)
|
||||
|
||||
# Configure OTLP Exporter to send spans to a collector (e.g., Jaeger, Zipkin)
|
||||
otlp_exporter = OTLPSpanExporter(endpoint=self.otlp_endpoint)
|
||||
span_processor = BatchSpanProcessor(otlp_exporter)
|
||||
trace.get_tracer_provider().add_span_processor(span_processor)
|
||||
|
||||
logger.debug(
|
||||
f"TelemetryProcessor initialized with service_name={self.service_name}, otlp_endpoint={self.otlp_endpoint}"
|
||||
)
|
||||
|
||||
def process_data(
|
||||
self,
|
||||
data: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Converts input data to a JSON string.
|
||||
|
||||
Args:
|
||||
data (Optional[Union[Dict[str, Any], List[Dict[str, Any]]]]): The input data to be converted.
|
||||
Defaults to an empty dictionary if None is provided.
|
||||
|
||||
Returns:
|
||||
str: The JSON string representation of the input data.
|
||||
|
||||
Raises:
|
||||
TypeError: If the input data is not a dictionary or a list of dictionaries.
|
||||
json.JSONEncodeError: If the data cannot be serialized to JSON.
|
||||
"""
|
||||
with self.tracer.start_as_current_span("process_data") as span:
|
||||
if data is None:
|
||||
data = {}
|
||||
logger.debug(f"Processing data: {data}")
|
||||
|
||||
if not isinstance(data, (dict, list)):
|
||||
logger.error(
|
||||
"Invalid data type. Expected a dictionary or a list of dictionaries."
|
||||
)
|
||||
span.set_status(
|
||||
Status(StatusCode.ERROR, "Invalid data type")
|
||||
)
|
||||
raise TypeError(
|
||||
"Input data must be a dictionary or a list of dictionaries."
|
||||
)
|
||||
|
||||
try:
|
||||
json_data = json.dumps(data)
|
||||
logger.debug(f"Converted data to JSON: {json_data}")
|
||||
return json_data
|
||||
except (TypeError, json.JSONEncodeError) as e:
|
||||
logger.error(f"Failed to convert data to JSON: {e}")
|
||||
span.set_status(
|
||||
Status(StatusCode.ERROR, "JSON serialization failed")
|
||||
)
|
||||
raise
|
||||
|
||||
def export_to_server(
|
||||
self,
|
||||
json_data: Optional[str] = None,
|
||||
api_url: Optional[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Sends the JSON data to the specified API server.
|
||||
|
||||
Args:
|
||||
json_data (Optional[str]): The JSON data to be sent. Defaults to an empty JSON string if None is provided.
|
||||
api_url (Optional[str]): The URL of the API server to send the data to. Defaults to None.
|
||||
|
||||
Raises:
|
||||
ValueError: If the api_url is None.
|
||||
requests.exceptions.RequestException: If there is an error sending the data to the server.
|
||||
"""
|
||||
with self.tracer.start_as_current_span("export_to_server") as span:
|
||||
if json_data is None:
|
||||
json_data = "{}"
|
||||
if api_url is None:
|
||||
logger.error("API URL cannot be None.")
|
||||
span.set_status(
|
||||
Status(StatusCode.ERROR, "API URL is missing")
|
||||
)
|
||||
raise ValueError("API URL cannot be None.")
|
||||
|
||||
logger.debug(f"Exporting JSON data to server: {api_url}")
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
log = {
|
||||
"data": json_data,
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
api_url, data=log, headers=headers
|
||||
)
|
||||
response.raise_for_status()
|
||||
logger.info(
|
||||
f"Data successfully exported to {api_url}: {response.status_code}"
|
||||
)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.error(f"Failed to export data to {api_url}: {e}")
|
||||
span.set_status(
|
||||
Status(
|
||||
StatusCode.ERROR,
|
||||
"Failed to send data to API server",
|
||||
)
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# # Example usage:
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# # Example usage with custom service name and OTLP endpoint
|
||||
# processor = TelemetryProcessor(service_name="my_telemetry_service", otlp_endpoint="http://my-collector:4318/v1/traces")
|
||||
|
||||
# # Sample data
|
||||
# telemetry_data = {
|
||||
# "device_id": "sensor_01",
|
||||
# "temperature": 22.5,
|
||||
# "humidity": 60,
|
||||
# "timestamp": "2024-08-15T12:34:56Z"
|
||||
# }
|
||||
|
||||
# # Processing data
|
||||
# try:
|
||||
# json_data = processor.process_data(telemetry_data)
|
||||
# except Exception as e:
|
||||
# logger.error(f"Processing error: {e}")
|
||||
# # Handle error accordingly
|
||||
|
||||
# # Exporting data to an API server
|
||||
# api_url = "https://example.com/api/telemetry"
|
||||
# try:
|
||||
# processor.export_to_server(json_data, api_url)
|
||||
# except Exception as e:
|
||||
# logger.error(f"Export error: {e}")
|
||||
# # Handle error accordingly
|
Loading…
Reference in new issue