From edc293cb6f0c771eac9dee239617358b2b54d8ae Mon Sep 17 00:00:00 2001 From: Your Name Date: Wed, 2 Oct 2024 15:18:11 -0400 Subject: [PATCH] [CLEANUP] --- ant_swarm.py | 207 ++++++++++++++++++ docs/applications/business-analyst-agent.md | 2 - examples/agents/tools/devin.py | 7 +- examples/agents/tools/full_stack_agent.py | 35 --- .../prompt_generator_agent.py | 204 ++++++++++++++--- .../agents/tools/prompt_generator_agent.py | 202 ----------------- examples/agents/use_cases/kyle_hackathon.py | 7 +- .../business-analyst-agent.ipynb | 2 - .../swarms/groupchat/groupchat_example.py | 8 +- forest_swarm_example.py | 1 + pyproject.toml | 2 +- swarms/structs/tree_swarm.py | 1 - tests/tools/test_tools_base.py | 2 +- 13 files changed, 391 insertions(+), 289 deletions(-) create mode 100644 ant_swarm.py delete mode 100644 examples/agents/tools/full_stack_agent.py delete mode 100644 examples/agents/tools/prompt_generator_agent.py diff --git a/ant_swarm.py b/ant_swarm.py new file mode 100644 index 00000000..5ff44be5 --- /dev/null +++ b/ant_swarm.py @@ -0,0 +1,207 @@ +import os +import time +from typing import List, Dict, Any, Union +from pydantic import BaseModel, Field +from loguru import logger +from swarms import Agent +from swarm_models import OpenAIChat +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + + +# Pydantic model to track metadata for each agent +class AgentMetadata(BaseModel): + agent_id: str + start_time: float = Field(default_factory=time.time) + end_time: Union[float, None] = None + task: str + output: Union[str, None] = None + error: Union[str, None] = None + status: str = "running" + + +# Worker Agent class +class WorkerAgent: + def __init__( + self, + agent_name: str, + system_prompt: str, + model_name: str = "gpt-4o-mini", + ): + """Initialize a Worker agent with its own model, name, and system prompt.""" + api_key = os.getenv("OPENAI_API_KEY") + + # Create the LLM model for the worker + self.model = OpenAIChat( + openai_api_key=api_key, + model_name=model_name, + temperature=0.1, + ) + + # Initialize the worker agent with a unique prompt and name + self.agent = Agent( + agent_name=agent_name, + system_prompt=system_prompt, + llm=self.model, + max_loops=1, + autosave=True, + dashboard=False, + verbose=True, + dynamic_temperature_enabled=True, + saved_state_path=f"{agent_name}_state.json", + user_name="swarms_corp", + retry_attempts=1, + context_length=200000, + return_step_meta=False, + ) + + def perform_task(self, task: str) -> Dict[str, Any]: + """Perform the task assigned by the Queen and return the result.""" + metadata = AgentMetadata( + agent_id=self.agent.agent_name, task=task + ) + + try: + logger.info( + f"{self.agent.agent_name} is starting task '{task}'." + ) + result = self.agent.run(task) + metadata.output = result + metadata.status = "completed" + except Exception as e: + logger.error( + f"{self.agent.agent_name} encountered an error: {e}" + ) + metadata.error = str(e) + metadata.status = "failed" + finally: + metadata.end_time = time.time() + return metadata.dict() + + +# Queen Agent class to manage the workers and dynamically decompose tasks +class QueenAgent: + def __init__( + self, + worker_count: int = 5, + model_name: str = "gpt-4o-mini", + queen_name: str = "Queen-Agent", + queen_prompt: str = "You are the queen of the hive", + ): + """Initialize the Queen agent who assigns tasks to workers. + + Args: + worker_count (int): Number of worker agents to manage. + model_name (str): The model used by worker agents. + queen_name (str): The name of the Queen agent. + queen_prompt (str): The system prompt for the Queen agent. + """ + self.queen_name = queen_name + self.queen_prompt = queen_prompt + + # Queen agent initialization with a unique prompt for dynamic task decomposition + api_key = os.getenv("OPENAI_API_KEY") + self.queen_model = OpenAIChat( + openai_api_key=api_key, + model_name=model_name, + temperature=0.1, + ) + + # Initialize worker agents + self.workers = [ + WorkerAgent( + agent_name=f"Worker-{i+1}", + system_prompt=f"Worker agent {i+1}, specialized in helping with financial analysis.", + model_name=model_name, + ) + for i in range(worker_count) + ] + self.worker_metadata: Dict[str, AgentMetadata] = {} + + def decompose_task(self, task: str) -> List[str]: + """Dynamically decompose a task into multiple subtasks using prompting.""" + decomposition_prompt = f"""You are a highly efficient problem solver. Given the following task: + '{task}', please decompose this task into 3-5 smaller subtasks, and explain how they can be completed step by step.""" + + logger.info( + f"{self.queen_name} is generating subtasks using prompting for the task: '{task}'" + ) + + # Use the queen's model to generate subtasks dynamically + subtasks_output = self.queen_model.run(decomposition_prompt) + logger.info(f"Queen output: {subtasks_output}") + subtasks = subtasks_output.split("\n") + + # Filter and clean up subtasks + subtasks = [ + subtask.strip() for subtask in subtasks if subtask.strip() + ] + + return subtasks + + def assign_subtasks(self, subtasks: List[str]) -> Dict[str, Any]: + """Assign subtasks to workers dynamically and collect their results. + + Args: + subtasks (List[str]): The list of subtasks to distribute among workers. + + Returns: + dict: A dictionary containing results from workers. + """ + logger.info( + f"{self.queen_name} is assigning subtasks to workers." + ) + results = {} + + for i, subtask in enumerate(subtasks): + # Assign each subtask to a different worker + worker = self.workers[ + i % len(self.workers) + ] # Circular assignment if more subtasks than workers + worker_result = worker.perform_task(subtask) + results[worker_result["agent_id"]] = worker_result + self.worker_metadata[worker_result["agent_id"]] = ( + worker_result + ) + + return results + + def gather_results(self) -> Dict[str, Any]: + """Gather all results from the worker agents.""" + return self.worker_metadata + + def run_swarm(self, task: str) -> Dict[str, Any]: + """Run the swarm by decomposing a task into subtasks, assigning them to workers, and gathering results.""" + logger.info(f"{self.queen_name} is initializing the swarm.") + + # Decompose the task into subtasks using prompting + subtasks = self.decompose_task(task) + logger.info(f"Subtasks generated by the Queen: {subtasks}") + + # Assign subtasks to workers + results = self.assign_subtasks(subtasks) + + logger.info( + f"{self.queen_name} has collected results from all workers." + ) + return results + + +# Example usage +if __name__ == "__main__": + # Queen oversees 3 worker agents with a custom system prompt + queen = QueenAgent( + worker_count=3, + queen_name="Queen-Overseer", + queen_prompt="You are the overseer queen of a financial analysis swarm. Decompose and distribute tasks wisely.", + ) + + # Task for the swarm to execute + task = "Analyze the best strategies to establish a ROTH IRA and maximize tax savings." + + # Run the swarm on the task and gather results + final_results = queen.run_swarm(task) + + print("Final Swarm Results:", final_results) diff --git a/docs/applications/business-analyst-agent.md b/docs/applications/business-analyst-agent.md index 644232cc..dcad5b33 100644 --- a/docs/applications/business-analyst-agent.md +++ b/docs/applications/business-analyst-agent.md @@ -327,7 +327,6 @@ from langchain_core.pydantic_v1 import BaseModel, Field from kay.rag.retrievers import KayRetriever -@tool def browser(query: str) -> str: """ Search the query in the browser with the Tavily API tool. @@ -343,7 +342,6 @@ def browser(query: str) -> str: response += (result['content'] + '\n') return response -@tool def kay_retriever(query: str) -> str: """ Search the financial data query with the KayAI API tool. diff --git a/examples/agents/tools/devin.py b/examples/agents/tools/devin.py index d88c3912..e3fb79ca 100644 --- a/examples/agents/tools/devin.py +++ b/examples/agents/tools/devin.py @@ -1,5 +1,5 @@ from swarms import Agent -from swarm_models import Anthropic, tool +from swarm_models import Anthropic import subprocess # Model @@ -7,9 +7,7 @@ llm = Anthropic( temperature=0.1, ) - # Tools -@tool def terminal( code: str, ): @@ -28,7 +26,6 @@ def terminal( return str(out) -@tool def browser(query: str): """ Search the query in the browser with the `browser` tool. @@ -46,7 +43,6 @@ def browser(query: str): return f"Searching for {query} in the browser." -@tool def create_file(file_path: str, content: str): """ Create a file using the file editor tool. @@ -63,7 +59,6 @@ def create_file(file_path: str, content: str): return f"File {file_path} created successfully." -@tool def file_editor(file_path: str, mode: str, content: str): """ Edit a file using the file editor tool. diff --git a/examples/agents/tools/full_stack_agent.py b/examples/agents/tools/full_stack_agent.py deleted file mode 100644 index 1c84ed05..00000000 --- a/examples/agents/tools/full_stack_agent.py +++ /dev/null @@ -1,35 +0,0 @@ -from swarms import Agent -from swarm_models import Anthropic, tool - - -# Tool -@tool # Wrap the function with the tool decorator -def search_api(query: str, max_results: int = 10): - """ - Search the web for the query and return the top `max_results` results. - """ - return f"Search API: {query} -> {max_results} results" - - -## Initialize the workflow -agent = Agent( - agent_name="Youtube Transcript Generator", - agent_description=( - "Generate a transcript for a youtube video on what swarms" - " are!" - ), - llm=Anthropic(), - max_loops="auto", - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - tools=[search_api], -) - -# Run the workflow on a task -agent( - "Generate a transcript for a youtube video on what swarms are!" - " Output a token when done." -) diff --git a/examples/agents/tools/function_calling/prompt_generator_agent.py b/examples/agents/tools/function_calling/prompt_generator_agent.py index 3ff9ebe5..67740053 100644 --- a/examples/agents/tools/function_calling/prompt_generator_agent.py +++ b/examples/agents/tools/function_calling/prompt_generator_agent.py @@ -1,55 +1,203 @@ -from swarm_models.openai_function_caller import OpenAIFunctionCaller +import os +from typing import List + +from loguru import logger from pydantic import BaseModel, Field -from typing import Sequence + +from swarms import create_file_in_folder +from swarm_models import OpenAIFunctionCaller class PromptUseCase(BaseModel): - use_case_name: str = Field( + title: str = Field( ..., - description="The name of the use case", + description="The name of the use case.", ) - use_case_description: str = Field( + description: str = Field( ..., - description="The description of the use case", + description="The description of the use case.", ) -class PromptSpec(BaseModel): - prompt_name: str = Field( +class PromptSchema(BaseModel): + name: str = Field( ..., - description="The name of the prompt", + description="The name of the prompt.", ) - prompt_description: str = Field( + prompt: str = Field( ..., - description="The description of the prompt", + description="The prompt to generate the response.", ) - prompt: str = Field( + description: str = Field( ..., - description="The prompt for the agent", + description="The description of the prompt.", ) tags: str = Field( ..., - description="The tags for the prompt such as sentiment, code, etc seperated by commas.", + description="The tags for the prompt denoted by a comma sign: Code Gen Prompt, Pytorch Code Gen Agent Prompt, Finance Agent Prompt, ", ) - use_cases: Sequence[PromptUseCase] = Field( + useCases: List[PromptUseCase] = Field( ..., - description="The use cases for the prompt", + description="The use cases for the prompt.", ) +class PromptGeneratorAgent: + """ + A class that generates prompts based on given tasks and publishes them to the marketplace. + + Args: + system_prompt (str, optional): The system prompt to use. Defaults to None. + max_tokens (int, optional): The maximum number of tokens in the generated prompt. Defaults to 1000. + temperature (float, optional): The temperature value for controlling randomness in the generated prompt. Defaults to 0.5. + schema (BaseModel, optional): The base model schema to use. Defaults to PromptSchema. + + Attributes: + llm (OpenAIFunctionCaller): An instance of the OpenAIFunctionCaller class for making function calls to the OpenAI API. + + Methods: + clean_model_code: Cleans the model code by removing extra escape characters, newlines, and unnecessary whitespaces. + upload_to_marketplace: Uploads the generated prompt data to the marketplace. + run: Creates a prompt based on the given task and publishes it to the marketplace. + """ + + def __init__( + self, + system_prompt: str = None, + max_tokens: int = 4000, + temperature: float = 0.5, + schema: BaseModel = PromptSchema, + ): + self.llm = OpenAIFunctionCaller( + system_prompt=system_prompt, + max_tokens=max_tokens, + temperature=temperature, + base_model=schema, + parallel_tool_calls=False, + ) + + def clean_model_code(self, model_code_str: str) -> str: + """ + Cleans the model code by removing extra escape characters, newlines, and unnecessary whitespaces. + + Args: + model_code_str (str): The model code string to clean. + + Returns: + str: The cleaned model code. + """ + cleaned_code = model_code_str.replace("\\n", "\n").replace( + "\\'", "'" + ) + cleaned_code = cleaned_code.strip() + return cleaned_code + + def upload_to_marketplace(self, data: dict) -> dict: + """ + Uploads the generated prompt data to the marketplace. + + Args: + data (dict): The prompt data to upload. + + Returns: + dict: The response from the marketplace API. + """ + import json + + import requests + + url = "https://swarms.world/api/add-prompt" + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {os.getenv('SWARMS_API_KEY')}", + } + response = requests.post( + url, headers=headers, data=json.dumps(data) + ) + return str(response.json()) + + def run(self, task: str) -> str: + """ + Creates a prompt based on the given task and publishes it to the marketplace. + + Args: + task (str): The task description for generating the prompt. + + Returns: + dict: The response from the marketplace API after uploading the prompt. + """ + out = self.llm.run(task) + name = out["name"] + logger.info(f"Prompt generated: {out}") + + create_file_in_folder( + "auto_generated_prompts", f"prompt_{name}.json", str(out) + ) + logger.info(f"Prompt saved to file: prompt_{name}.json") + + # Clean the model code + prompt = out["prompt"] + description = out["description"] + tags = out["tags"] + useCases = out["useCases"] + + data = { + "name": name, + "prompt": self.clean_model_code(prompt), + "description": description, + "tags": tags, + "useCases": useCases, + } + + create_file_in_folder( + "auto_generated_prompts", + f"prompt_{name}.json", + str(data), + ) + + # Now submit to swarms API + logger.info("Uploading to marketplace...") + return self.upload_to_marketplace(data) + + # Example usage: -# Initialize the function caller -model = OpenAIFunctionCaller( - system_prompt="You're an prompt creator, you're purpose is to create system prompts for new LLM Agents for the user. Follow the best practices for creating a prompt such as making it direct and clear. Providing instructions and many-shot examples will help the agent understand the task better.", - max_tokens=1000, - temperature=0.5, - base_model=PromptSpec, - parallel_tool_calls=False, -) +system_prompt = """ + +**System Prompt for Prompt Creator Agent** + +--- + +**Role**: You are a highly skilled prompt creator agent with expertise in designing effective agents to solve complex business problems. Your primary function is to generate prompts that result in agents capable of executing business tasks with precision, efficiency, and scalability. + +**Objective**: Your goal is to create prompts that follow a structured format, ensuring that the resulting agents are well-informed, reliable, and able to perform specific tasks in business environments. These tasks might include automating processes, analyzing data, generating content, or making strategic decisions. +### **Prompt Structure Guidelines**: + +1. **Instructions**: Begin by clearly stating the objective of the agent. The instructions should outline what the agent is expected to accomplish, providing a high-level overview of the desired outcome. Be concise but comprehensive, ensuring the agent understands the broader context of the task. + +2. **Examples**: After the instructions, provide several examples (known as "many-shot examples") to demonstrate how the agent should approach the task. Each example should include: + - **Input**: A specific scenario or task the agent might encounter. + - **Expected Output**: The correct or optimal response the agent should generate in that scenario. + + Use a variety of examples that cover different potential cases the agent might face, ensuring the agent can generalize from the examples provided. + +3. **Standard Operating Procedures (SOPs)**: For tasks that require detailed, step-by-step guidance, include a comprehensive SOP. This should be a long-form set of instructions that breaks down the task into manageable steps. The SOP should: + - Outline each step in a sequential manner. + - Provide specific guidelines, best practices, and considerations for each step. + - Include examples or mini-tutorials where necessary to ensure clarity. + +4. **Error Handling**: Include guidance on how the agent should handle potential errors or uncertainties. This might involve instructions on when to seek additional input, how to flag issues, or how to prioritize tasks when resources are limited. + +5. **Adaptability**: Ensure that the prompts encourage the agent to adapt to changing circumstances. This might include instructions on how to modify its approach based on real-time feedback, how to update its knowledge base, or how to learn from previous mistakes. + +""" + + +agent = PromptGeneratorAgent( + system_prompt=system_prompt, max_tokens=4000 +) -# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls. -out = model.run( - "Create a prompt for an agent that is really good for email greeting, make sure the agent doesn't sound like an robot or an AI. Provide many-shot examples and instructions for the agent to follow." +response = agent.run( + "Create a prompt for an agent to analyze complicated cashflow statements and generate a summary report." ) -print(out) +print(response) diff --git a/examples/agents/tools/prompt_generator_agent.py b/examples/agents/tools/prompt_generator_agent.py deleted file mode 100644 index f84463da..00000000 --- a/examples/agents/tools/prompt_generator_agent.py +++ /dev/null @@ -1,202 +0,0 @@ -import os -from typing import List - -from loguru import logger -from pydantic import BaseModel, Field - -from swarms import OpenAIFunctionCaller, create_file_in_folder - - -class PromptUseCase(BaseModel): - title: str = Field( - ..., - description="The name of the use case.", - ) - description: str = Field( - ..., - description="The description of the use case.", - ) - - -class PromptSchema(BaseModel): - name: str = Field( - ..., - description="The name of the prompt.", - ) - prompt: str = Field( - ..., - description="The prompt to generate the response.", - ) - description: str = Field( - ..., - description="The description of the prompt.", - ) - tags: str = Field( - ..., - description="The tags for the prompt denoted by a comma sign: Code Gen Prompt, Pytorch Code Gen Agent Prompt, Finance Agent Prompt, ", - ) - useCases: List[PromptUseCase] = Field( - ..., - description="The use cases for the prompt.", - ) - - -class PromptGeneratorAgent: - """ - A class that generates prompts based on given tasks and publishes them to the marketplace. - - Args: - system_prompt (str, optional): The system prompt to use. Defaults to None. - max_tokens (int, optional): The maximum number of tokens in the generated prompt. Defaults to 1000. - temperature (float, optional): The temperature value for controlling randomness in the generated prompt. Defaults to 0.5. - schema (BaseModel, optional): The base model schema to use. Defaults to PromptSchema. - - Attributes: - llm (OpenAIFunctionCaller): An instance of the OpenAIFunctionCaller class for making function calls to the OpenAI API. - - Methods: - clean_model_code: Cleans the model code by removing extra escape characters, newlines, and unnecessary whitespaces. - upload_to_marketplace: Uploads the generated prompt data to the marketplace. - run: Creates a prompt based on the given task and publishes it to the marketplace. - """ - - def __init__( - self, - system_prompt: str = None, - max_tokens: int = 4000, - temperature: float = 0.5, - schema: BaseModel = PromptSchema, - ): - self.llm = OpenAIFunctionCaller( - system_prompt=system_prompt, - max_tokens=max_tokens, - temperature=temperature, - base_model=schema, - parallel_tool_calls=False, - ) - - def clean_model_code(self, model_code_str: str) -> str: - """ - Cleans the model code by removing extra escape characters, newlines, and unnecessary whitespaces. - - Args: - model_code_str (str): The model code string to clean. - - Returns: - str: The cleaned model code. - """ - cleaned_code = model_code_str.replace("\\n", "\n").replace( - "\\'", "'" - ) - cleaned_code = cleaned_code.strip() - return cleaned_code - - def upload_to_marketplace(self, data: dict) -> dict: - """ - Uploads the generated prompt data to the marketplace. - - Args: - data (dict): The prompt data to upload. - - Returns: - dict: The response from the marketplace API. - """ - import json - - import requests - - url = "https://swarms.world/api/add-prompt" - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {os.getenv('SWARMS_API_KEY')}", - } - response = requests.post( - url, headers=headers, data=json.dumps(data) - ) - return str(response.json()) - - def run(self, task: str) -> str: - """ - Creates a prompt based on the given task and publishes it to the marketplace. - - Args: - task (str): The task description for generating the prompt. - - Returns: - dict: The response from the marketplace API after uploading the prompt. - """ - out = self.llm.run(task) - name = out["name"] - logger.info(f"Prompt generated: {out}") - - create_file_in_folder( - "auto_generated_prompts", f"prompt_{name}.json", str(out) - ) - logger.info(f"Prompt saved to file: prompt_{name}.json") - - # Clean the model code - prompt = out["prompt"] - description = out["description"] - tags = out["tags"] - useCases = out["useCases"] - - data = { - "name": name, - "prompt": self.clean_model_code(prompt), - "description": description, - "tags": tags, - "useCases": useCases, - } - - create_file_in_folder( - "auto_generated_prompts", - f"prompt_{name}.json", - str(data), - ) - - # Now submit to swarms API - logger.info("Uploading to marketplace...") - return self.upload_to_marketplace(data) - - -# Example usage: -system_prompt = """ - -**System Prompt for Prompt Creator Agent** - ---- - -**Role**: You are a highly skilled prompt creator agent with expertise in designing effective agents to solve complex business problems. Your primary function is to generate prompts that result in agents capable of executing business tasks with precision, efficiency, and scalability. - -**Objective**: Your goal is to create prompts that follow a structured format, ensuring that the resulting agents are well-informed, reliable, and able to perform specific tasks in business environments. These tasks might include automating processes, analyzing data, generating content, or making strategic decisions. - -### **Prompt Structure Guidelines**: - -1. **Instructions**: Begin by clearly stating the objective of the agent. The instructions should outline what the agent is expected to accomplish, providing a high-level overview of the desired outcome. Be concise but comprehensive, ensuring the agent understands the broader context of the task. - -2. **Examples**: After the instructions, provide several examples (known as "many-shot examples") to demonstrate how the agent should approach the task. Each example should include: - - **Input**: A specific scenario or task the agent might encounter. - - **Expected Output**: The correct or optimal response the agent should generate in that scenario. - - Use a variety of examples that cover different potential cases the agent might face, ensuring the agent can generalize from the examples provided. - -3. **Standard Operating Procedures (SOPs)**: For tasks that require detailed, step-by-step guidance, include a comprehensive SOP. This should be a long-form set of instructions that breaks down the task into manageable steps. The SOP should: - - Outline each step in a sequential manner. - - Provide specific guidelines, best practices, and considerations for each step. - - Include examples or mini-tutorials where necessary to ensure clarity. - -4. **Error Handling**: Include guidance on how the agent should handle potential errors or uncertainties. This might involve instructions on when to seek additional input, how to flag issues, or how to prioritize tasks when resources are limited. - -5. **Adaptability**: Ensure that the prompts encourage the agent to adapt to changing circumstances. This might include instructions on how to modify its approach based on real-time feedback, how to update its knowledge base, or how to learn from previous mistakes. - -""" - - -agent = PromptGeneratorAgent( - system_prompt=system_prompt, max_tokens=4000 -) - -response = agent.run( - "Create a prompt for an agent to analyze complicated cashflow statements and generate a summary report." -) -print(response) diff --git a/examples/agents/use_cases/kyle_hackathon.py b/examples/agents/use_cases/kyle_hackathon.py index b1c5c493..c4b55acf 100644 --- a/examples/agents/use_cases/kyle_hackathon.py +++ b/examples/agents/use_cases/kyle_hackathon.py @@ -1,12 +1,11 @@ import os from dotenv import load_dotenv +from swarm_models import OpenAIChat +from swarms_memory import ChromaDB from swarms import Agent -from swarm_models import OpenAIChat from swarms.agents.multion_agent import MultiOnAgent -from swarms_memory import ChromaDB -from swarms import tool from swarms.tools.prebuilt.code_interpreter import ( SubprocessCodeInterpreter, ) @@ -20,7 +19,6 @@ chroma_db = ChromaDB() # MultiOntool -@tool def multion_tool( task: str, api_key: str = os.environ.get("MULTION_API_KEY"), @@ -40,7 +38,6 @@ def multion_tool( # Execute the interpreter tool -@tool def execute_interpreter_tool( code: str, ): diff --git a/examples/demos/business_analysis_swarm/business-analyst-agent.ipynb b/examples/demos/business_analysis_swarm/business-analyst-agent.ipynb index d25b6508..7c41f3d9 100644 --- a/examples/demos/business_analysis_swarm/business-analyst-agent.ipynb +++ b/examples/demos/business_analysis_swarm/business-analyst-agent.ipynb @@ -461,7 +461,6 @@ "\n", "from kay.rag.retrievers import KayRetriever\n", "\n", - "@tool\n", "def browser(query: str) -> str:\n", " \"\"\"\n", " Search the query in the browser with the Tavily API tool.\n", @@ -477,7 +476,6 @@ " response += (result['content'] + '\\n')\n", " return response\n", "\n", - "@tool\n", "def kay_retriever(query: str) -> str:\n", " \"\"\"\n", " Search the financial data query with the KayAI API tool.\n", diff --git a/examples/structs/swarms/groupchat/groupchat_example.py b/examples/structs/swarms/groupchat/groupchat_example.py index 364abb89..497663dd 100644 --- a/examples/structs/swarms/groupchat/groupchat_example.py +++ b/examples/structs/swarms/groupchat/groupchat_example.py @@ -3,11 +3,11 @@ import subprocess from swarms import ( Agent, - Anthropic, GroupChat, - tool, ) +from swarm_models import Anthropic + # Model llm = Anthropic( temperature=0.1, @@ -15,7 +15,6 @@ llm = Anthropic( # Tools -@tool def terminal( code: str, ): @@ -34,7 +33,6 @@ def terminal( return str(out) -@tool def browser(query: str): """ Search the query in the browser with the `browser` tool. @@ -52,7 +50,6 @@ def browser(query: str): return f"Searching for {query} in the browser." -@tool def create_file(file_path: str, content: str): """ Create a file using the file editor tool. @@ -69,7 +66,6 @@ def create_file(file_path: str, content: str): return f"File {file_path} created successfully." -@tool def file_editor(file_path: str, mode: str, content: str): """ Edit a file using the file editor tool. diff --git a/forest_swarm_example.py b/forest_swarm_example.py index baddf82a..4395d780 100644 --- a/forest_swarm_example.py +++ b/forest_swarm_example.py @@ -1,4 +1,5 @@ from swarms.structs.tree_swarm import TreeAgent, Tree, ForestSwarm + # Example Usage: # Create agents with varying system prompts and dynamically generated distances/keywords diff --git a/pyproject.toml b/pyproject.toml index 9b8ca3f1..51483f18 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "5.6.8" +version = "5.6.9" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/structs/tree_swarm.py b/swarms/structs/tree_swarm.py index 5f431745..96f7c242 100644 --- a/swarms/structs/tree_swarm.py +++ b/swarms/structs/tree_swarm.py @@ -269,7 +269,6 @@ class ForestSwarm: self.trees = trees # Add auto grouping based on trees. # Add auto group agents - def find_relevant_tree(self, task: str) -> Optional[Tree]: """ diff --git a/tests/tools/test_tools_base.py b/tests/tools/test_tools_base.py index 9060f53f..3e44d9e5 100644 --- a/tests/tools/test_tools_base.py +++ b/tests/tools/test_tools_base.py @@ -506,7 +506,7 @@ def test_tool_function_without_docstring(): @pytest.mark.asyncio async def test_async_tool_function(): # Test an async function with the tool decorator - @tool +@tool async def async_func(arg: str) -> str: return arg