From 9733541cc761c481d4728e354eafe1695707c28f Mon Sep 17 00:00:00 2001 From: Kye Date: Sat, 14 Oct 2023 14:08:44 -0400 Subject: [PATCH] stacked worker agent --- stacked_worker.py | 89 +++++++++ swarms/agents/__init__.py | 2 + .../openai_llm.py => agents/hf_agents.py} | 4 +- swarms/prompts/agent_prompt_generator.py | 186 ------------------ 4 files changed, 93 insertions(+), 188 deletions(-) create mode 100644 stacked_worker.py rename swarms/{models/openai_llm.py => agents/hf_agents.py} (99%) delete mode 100644 swarms/prompts/agent_prompt_generator.py diff --git a/stacked_worker.py b/stacked_worker.py new file mode 100644 index 00000000..9f545f1f --- /dev/null +++ b/stacked_worker.py @@ -0,0 +1,89 @@ +from swarms.models import OpenAIChat +from swarms.workers import Worker +from swarms.tools.autogpt import tool +from swarms.agents.hf_agents import HFAgent +from swarms.agents.omni_modal_agent import OmniModalAgent + +#Initialize API Key +api_key = "" + + +# Initialize the language model, +# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC +llm = OpenAIChat( + openai_api_key=api_key, + temperature=0.5, +) + +#wrap a function with the tool decorator to make it a tool +@tool +def hf_agent(task: str = None): + """ + An tool that uses an openai model to call and respond to a task by search for a model on huggingface + It first downloads the model then uses it. + + Rules: Don't call this model for simple tasks like generating a summary, only call this tool for multi modal tasks like generating images, videos, speech, etc + + """ + agent = HFAgent(model="text-davinci-003", api_key=api_key) + response = agent.run(task, text="¡Este es un API muy agradable!") + return response + + +#wrap a function with the tool decorator to make it a tool +@tool +def omni_agent(task: str = None): + """ + An tool that uses an openai Model to utilize and call huggingface models and guide them to perform a task. + + Rules: Don't call this model for simple tasks like generating a summary, only call this tool for multi modal tasks like generating images, videos, speech + The following tasks are what this tool should be used for: + + Tasks omni agent is good for: + -------------- + document-question-answering + image-captioning + image-question-answering + image-segmentation + speech-to-text + summarization + text-classification + text-question-answering + translation + huggingface-tools/text-to-image + huggingface-tools/text-to-video + text-to-speech + huggingface-tools/text-download + huggingface-tools/image-transformation + """ + agent = OmniModalAgent(llm) + response = agent.run(task) + return response + +# Append tools to an list +tools = [ + hf_agent, + omni_agent, +] + + +#Initialize a single Worker node with previously defined tools in addition to it's +# predefined tools +node = Worker( + llm=llm, + ai_name="Optimus Prime", + openai_api_key=api_key, + ai_role="Worker in a swarm", + external_tools=tools, + human_in_the_loop=False, + temperature=0.5, +) + +#Specify task +task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." + +# Run the node on the task +response = node.run(task) + +# Print the response +print(response) diff --git a/swarms/agents/__init__.py b/swarms/agents/__init__.py index 9bcb519d..4ca446ad 100644 --- a/swarms/agents/__init__.py +++ b/swarms/agents/__init__.py @@ -5,6 +5,8 @@ # from swarms.agents.aot import AoTAgent # from swarms.agents.multi_modal_visual_agent import MultiModalAgent from swarms.agents.omni_modal_agent import OmniModalAgent +from swarms.agents.hf_agents import HFAgent + # utils diff --git a/swarms/models/openai_llm.py b/swarms/agents/hf_agents.py similarity index 99% rename from swarms/models/openai_llm.py rename to swarms/agents/hf_agents.py index e0a8ae62..28c18c71 100644 --- a/swarms/models/openai_llm.py +++ b/swarms/agents/hf_agents.py @@ -370,7 +370,7 @@ class Agent: return [self.generate_one(prompt, stop) for prompt in prompts] -class OpenAIModel(Agent): +class HFAgent(Agent): """ Agent that uses the openai API to generate code. @@ -401,7 +401,7 @@ class OpenAIModel(Agent): Example: ```py - from swarms.models import OpenAI + from swarms.agents.hf_agents import HFAgent agent = OpenAiAgent(model="text-davinci-003", api_key=xxx) agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") diff --git a/swarms/prompts/agent_prompt_generator.py b/swarms/prompts/agent_prompt_generator.py deleted file mode 100644 index 81e93a73..00000000 --- a/swarms/prompts/agent_prompt_generator.py +++ /dev/null @@ -1,186 +0,0 @@ -import json -from typing import List - -from langchain.tools.base import BaseTool - -FINISH_NAME = "finish" - - -class PromptGenerator: - """A class for generating custom prompt strings. - - Does this based on constraints, commands, resources, and performance evaluations. - """ - - def __init__(self) -> None: - """Initialize the PromptGenerator object. - - Starts with empty lists of constraints, commands, resources, - and performance evaluations. - """ - self.constraints: List[str] = [] - self.commands: List[BaseTool] = [] - self.resources: List[str] = [] - self.performance_evaluation: List[str] = [] - self.response_format = { - "thoughts": { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user", - }, - "command": {"name": "command name", "args": {"arg name": "value"}}, - } - - def add_constraint(self, constraint: str) -> None: - """ - Add a constraint to the constraints list. - - Args: - constraint (str): The constraint to be added. - """ - self.constraints.append(constraint) - - def add_tool(self, tool: BaseTool) -> None: - self.commands.append(tool) - - def _generate_command_string(self, tool: BaseTool) -> str: - output = f"{tool.name}: {tool.description}" - output += f", args json schema: {json.dumps(tool.args)}" - return output - - def add_resource(self, resource: str) -> None: - """ - Add a resource to the resources list. - - Args: - resource (str): The resource to be added. - """ - self.resources.append(resource) - - def add_performance_evaluation(self, evaluation: str) -> None: - """ - Add a performance evaluation item to the performance_evaluation list. - - Args: - evaluation (str): The evaluation item to be added. - """ - self.performance_evaluation.append(evaluation) - - def _generate_numbered_list(self, items: list, item_type: str = "list") -> str: - """ - Generate a numbered list from given items based on the item_type. - - Args: - items (list): A list of items to be numbered. - item_type (str, optional): The type of items in the list. - Defaults to 'list'. - - Returns: - str: The formatted numbered list. - """ - if item_type == "command": - command_strings = [ - f"{i + 1}. {self._generate_command_string(item)}" - for i, item in enumerate(items) - ] - finish_description = ( - "use this to signal that you have finished all your objectives" - ) - finish_args = ( - '"response": "final response to let ' - 'people know you have finished your objectives"' - ) - finish_string = ( - f"{len(items) + 1}. {FINISH_NAME}: " - f"{finish_description}, args: {finish_args}" - ) - return "\n".join(command_strings + [finish_string]) - else: - return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) - - def generate_prompt_string(self) -> str: - """Generate a prompt string. - - Returns: - str: The generated prompt string. - """ - formatted_response_format = json.dumps(self.response_format, indent=4) - prompt_string = ( - f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n" - f"Commands:\n" - f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" - f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n" - f"Performance Evaluation:\n" - f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" - f"You should only respond in JSON format as described below " - f"\nResponse Format: \n{formatted_response_format} " - f"\nEnsure the response can be parsed by Python json.loads" - ) - - return prompt_string - - -def get_prompt(tools: List[BaseTool]) -> str: - """Generates a prompt string. - - It includes various constraints, commands, resources, and performance evaluations. - - Returns: - str: The generated prompt string. - """ - - # Initialize the PromptGenerator object - prompt_generator = PromptGenerator() - - # Add constraints to the PromptGenerator object - prompt_generator.add_constraint( - "~4000 word limit for short term memory. " - "Your short term memory is short, " - "so immediately save important information to files." - ) - prompt_generator.add_constraint( - "If you are unsure how you previously did something " - "or want to recall past events, " - "thinking about similar events will help you remember." - ) - prompt_generator.add_constraint("No user assistance") - prompt_generator.add_constraint( - 'Exclusively use the commands listed in double quotes e.g. "command name"' - ) - - # Add commands to the PromptGenerator object - for tool in tools: - prompt_generator.add_tool(tool) - - # Add resources to the PromptGenerator object - prompt_generator.add_resource( - "Internet access for searches and information gathering." - ) - prompt_generator.add_resource("Long Term memory management.") - prompt_generator.add_resource( - "GPT-3.5 powered Agents for delegation of simple tasks." - ) - prompt_generator.add_resource("File output.") - - # Add performance evaluations to the PromptGenerator object - prompt_generator.add_performance_evaluation( - "Continuously review and analyze your actions " - "to ensure you are performing to the best of your abilities." - ) - prompt_generator.add_performance_evaluation( - "Constructively self-criticize your big-picture behavior constantly." - ) - prompt_generator.add_performance_evaluation( - "Reflect on past decisions and strategies to refine your approach." - ) - prompt_generator.add_performance_evaluation( - "Every command has a cost, so be smart and efficient. " - "Aim to complete tasks in the least number of steps." - ) - - # Generate the prompt string - prompt_string = prompt_generator.generate_prompt_string() - - return prompt_string