diff --git a/docs/corporate/data_room.md b/docs/corporate/data_room.md index 8df244a4..0a2f6109 100644 --- a/docs/corporate/data_room.md +++ b/docs/corporate/data_room.md @@ -52,7 +52,6 @@ ## **Introdution** Swarms provides automation-as-a-service through swarms of autonomous agents that work together as a team. We enable our customers to build, deploy, and scale production-grade multi-agent applications to automate real-world tasks. - ### **Vision** Our vision for 2024 is to provide the most reliable infrastructure for deploying autonomous agents into the real world through the Swarm Cloud, our premier cloud platform for the scalable deployment of Multi-Modal Autonomous Agents. The platform focuses on delivering maximum value to users by only taking a small fee when utilizing the agents for the hosted compute power needed to host the agents. @@ -69,16 +68,26 @@ The team has thousands of hours building and optimizing autonomous agents. Leade Key milestones: get 80K framework users in January 2024, start contracts in target verticals, introduce commercial products in 2025 with various pricing models. - +## Resources ### **Pre-Seed Pitch Deck** -- [Here is our pitch deck for our preseed round](https://www.figma.com/file/LlEMXZ48HTIG3S9VzdibaB/Swarm-Pitch-Deck?type=design&node-id=0%3A1& -mode=design&t=D3023hPOz27M9RGD-1) - +- [Here is our pitch deck for our preseed round](https://drive.google.com/file/d/1c76gK5UIdrfN4JOSpSlvVBEOpzR9emWc/view?usp=sharing) ### **The Swarm Corporation Memo** To learn more about our mission, vision, plans for GTM, and much more please refer to the [Swarm Memo here](https://docs.google.com/document/d/1hS_nv_lFjCqLfnJBoF6ULY9roTbSgSuCkvXvSUSc7Lo/edit?usp=sharing) + + +## **Financial Documents** +This section is dedicated entirely for corporate documents. + +- [Cap Table](https://docs.google.com/spreadsheets/d/1wuTWbfhYaY5Xp6nSQ9R0wDtSpwSS9coHxsjKd0UbIDc/edit?usp=sharing) + +- [Cashflow Prediction Sheet](https://docs.google.com/spreadsheets/d/1HQEHCIXXMHajXMl5sj8MEfcQtWfOnD7GjHtNiocpD60/edit?usp=sharing) + + +------ + ## **Product** Swarms is an open source framework for developers in python to enable seamless, reliable, and scalable multi-agent orchestration through modularity, customization, and precision. @@ -86,7 +95,7 @@ Swarms is an open source framework for developers in python to enable seamless, ### Product Growth Metrics | Name | Description | Link | -|----------------------------------|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +|--------------------------b--------|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| | Total Downloads of all time | Total number of downloads for the product over its entire lifespan. | [![Downloads](https://static.pepy.tech/badge/swarms)](https://pepy.tech/project/swarms) | | Downloads this month | Number of downloads for the product in the current month. | [![Downloads](https://static.pepy.tech/badge/swarms/month)](https://pepy.tech/project/swarms) | | Total Downloads this week | Total number of downloads for the product in the current week. | [![Downloads](https://static.pepy.tech/badge/swarms/week)](https://pepy.tech/project/swarms) | @@ -98,10 +107,4 @@ Swarms is an open source framework for developers in python to enable seamless, | Github Traffic Metrics | Metrics related to traffic, such as views and clones on Github. | [Github Traffic Metrics](https://github.com/kyegomez/swarms/graphs/traffic) | | Issues with the framework | Current open issues for the product on Github. | [![GitHub issues](https://img.shields.io/github/issues/kyegomez/swarms)](https://github.com/kyegomez/swarms/issues) | - - -## **Corporate Documents** -This section is dedicated entirely for corporate documents. - -- [Cap Table](https://docs.google.com/spreadsheets/d/1wuTWbfhYaY5Xp6nSQ9R0wDtSpwSS9coHxsjKd0UbIDc/edit?usp=sharing) - +------- \ No newline at end of file diff --git a/playground/agents/multion_agent.py b/playground/agents/multion_agent.py index a8f5175d..88e383b3 100644 --- a/playground/agents/multion_agent.py +++ b/playground/agents/multion_agent.py @@ -1,71 +1,12 @@ -import multion - -from swarms.models.base_llm import AbstractLLM +from swarms.agents.multion_agent import MultiOnAgent from swarms.structs.agent import Agent from swarms.structs.concurrent_workflow import ConcurrentWorkflow from swarms.structs.task import Task - -class MultiOnAgent(AbstractLLM): - """ - Represents a multi-on agent that performs browsing tasks. - - Args: - max_steps (int): The maximum number of steps to perform during browsing. - starting_url (str): The starting URL for browsing. - - Attributes: - max_steps (int): The maximum number of steps to perform during browsing. - starting_url (str): The starting URL for browsing. - """ - - def __init__( - self, - multion_api_key: str, - max_steps: int = 4, - starting_url: str = "https://www.google.com", - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.multion_api_key = multion_api_key - self.max_steps = max_steps - self.starting_url = starting_url - - multion.login( - use_api=True, - # multion_api_key=self.multion_api_key - *args, - **kwargs, - ) - - def run(self, task: str, *args, **kwargs): - """ - Runs a browsing task. - - Args: - task (str): The task to perform during browsing. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Returns: - dict: The response from the browsing task. - """ - response = multion.browse( - { - "cmd": task, - "url": self.starting_url, - "maxSteps": self.max_steps, - }, - *args, - **kwargs, - ) - - return response.result, response.status, response.lastUrl - - # model -model = MultiOnAgent(multion_api_key="") +model = MultiOnAgent( + multion_api_key="" +) # out = model.run("search for a recipe") agent = Agent( @@ -86,16 +27,16 @@ task = Task( ), ) + # Swarm workflow = ConcurrentWorkflow( - max_workers=1000, + max_workers=21, autosave=True, print_results=True, return_results=True, ) + # Add task to workflow workflow.add(task) - -# Run workflow workflow.run() diff --git a/playground/structs/kyle_hackathon.py b/playground/structs/kyle_hackathon.py new file mode 100644 index 00000000..c66de68b --- /dev/null +++ b/playground/structs/kyle_hackathon.py @@ -0,0 +1,84 @@ +import os + +from dotenv import load_dotenv +from swarms import Agent, OpenAIChat +from swarms.agents.multion_agent import MultiOnAgent +from swarms.memory.chroma_db import ChromaDB +from swarms.tools.tool import tool +from swarms.utils.code_interpreter import SubprocessCodeInterpreter + +# Load the environment variables +load_dotenv() + + +# Memory +chroma_db = ChromaDB() + + +# MultiOntool +@tool +def multion_tool( + task: str, + api_key: str = os.environ.get("MULTION_API_KEY"), +): + """ + Executes a task using the MultiOnAgent. + + Args: + task (str): The task to be executed. + api_key (str, optional): The API key for the MultiOnAgent. Defaults to the value of the MULTION_API_KEY environment variable. + + Returns: + The result of the task execution. + """ + multion = MultiOnAgent(multion_api_key=api_key) + return multion(task) + + +# Execute the interpreter tool +@tool +def execute_interpreter_tool(code: str,): + """ + Executes a single command using the interpreter. + + Args: + task (str): The command to be executed. + + Returns: + None + """ + out = SubprocessCodeInterpreter(debug_mode=True) + out = out.run(code) + return code + + +# Get the API key from the environment +api_key = os.environ.get("OPENAI_API_KEY") + +# Initialize the language model +llm = OpenAIChat( + temperature=0.5, + openai_api_key=api_key, +) + + +# Initialize the workflow +agent = Agent( + agent_name="Research Agent", + agent_description="An agent that performs research tasks.", + system_prompt="Perform a research task.", + llm=llm, + max_loops=1, + dashboard=True, + # tools=[multion_tool, execute_interpreter_tool], + verbose=True, + long_term_memory=chroma_db, + stopping_token="done", +) + +# Run the workflow on a task +out = agent.run( + "Generate a 10,000 word blog on health and wellness, and say done" + " when you are done" +) +print(out) diff --git a/playground/structs/message_pool_example.py b/playground/structs/message_pool_example.py new file mode 100644 index 00000000..dca596ba --- /dev/null +++ b/playground/structs/message_pool_example.py @@ -0,0 +1,19 @@ +from swarms.structs.agent import Agent +from swarms.structs.message_pool import MessagePool +from swarms import OpenAIChat + +agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") +agent2 = Agent(llm=OpenAIChat(), agent_name="agent2") +agent3 = Agent(llm=OpenAIChat(), agent_name="agent3") + +moderator = Agent(agent_name="moderator") +agents = [agent1, agent2, agent3] +message_pool = MessagePool( + agents=agents, moderator=moderator, turns=5 +) +message_pool.add(agent=agent1, content="Hello, agent2!", turn=1) +message_pool.add(agent=agent2, content="Hello, agent1!", turn=1) +message_pool.add(agent=agent3, content="Hello, agent1!", turn=1) +message_pool.get_all_messages() +message_pool.get_visible_messages(agent=agent1, turn=1) +message_pool.get_visible_messages(agent=agent2, turn=1) diff --git a/pyproject.toml b/pyproject.toml index 3d3931d0..e9af9b9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,6 +76,7 @@ supervision = "*" scikit-image = "*" pinecone-client = "*" roboflow = "*" +multion = "*" diff --git a/requirements.txt b/requirements.txt index 25fffdfb..2a62f6ba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,7 @@ requests_mock pypdf==4.0.1 accelerate==0.22.0 loguru +multion chromadb tensorflow optimum diff --git a/swarms/agents/__init__.py b/swarms/agents/__init__.py index b213748e..52db5534 100644 --- a/swarms/agents/__init__.py +++ b/swarms/agents/__init__.py @@ -16,6 +16,7 @@ from swarms.agents.stopping_conditions import ( ) from swarms.agents.tool_agent import ToolAgent from swarms.agents.worker_agent import Worker +from swarms.agents.multion_agent import MultiOnAgent __all__ = [ "AbstractAgent", @@ -34,4 +35,5 @@ __all__ = [ "check_end", "Worker", "agent_wrapper", + "MultiOnAgent", ] diff --git a/swarms/agents/multion_agent.py b/swarms/agents/multion_agent.py new file mode 100644 index 00000000..760f3251 --- /dev/null +++ b/swarms/agents/multion_agent.py @@ -0,0 +1,69 @@ +import os +import multion + +from swarms.models.base_llm import AbstractLLM +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + +# Muliton key +MULTION_API_KEY = os.getenv("MULTION_API_KEY") + + +class MultiOnAgent(AbstractLLM): + """ + Represents a multi-on agent that performs browsing tasks. + + Args: + max_steps (int): The maximum number of steps to perform during browsing. + starting_url (str): The starting URL for browsing. + + Attributes: + max_steps (int): The maximum number of steps to perform during browsing. + starting_url (str): The starting URL for browsing. + """ + + def __init__( + self, + multion_api_key: str = MULTION_API_KEY, + max_steps: int = 4, + starting_url: str = "https://www.google.com", + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + self.multion_api_key = multion_api_key + self.max_steps = max_steps + self.starting_url = starting_url + + self.multion = multion.login( + use_api=True, + multion_api_key=str(multion_api_key), + *args, + **kwargs, + ) + + def run(self, task: str, *args, **kwargs): + """ + Runs a browsing task. + + Args: + task (str): The task to perform during browsing. + *args: Additional positional arguments. + **kwargs: Additional keyword arguments. + + Returns: + dict: The response from the browsing task. + """ + response = self.multion.browse( + { + "cmd": task, + "url": self.starting_url, + "maxSteps": self.max_steps, + }, + *args, + **kwargs, + ) + + return response.result, response.status, response.lastUrl diff --git a/swarms/agents/worker_agent.py b/swarms/agents/worker_agent.py index d254acef..6dffc483 100644 --- a/swarms/agents/worker_agent.py +++ b/swarms/agents/worker_agent.py @@ -1,12 +1,12 @@ import os -from typing import Any, List +from typing import List import faiss from langchain.docstore import InMemoryDocstore from langchain.embeddings import OpenAIEmbeddings from langchain.vectorstores import FAISS from langchain_experimental.autonomous_agents import AutoGPT - +from swarms.tools.tool import BaseTool from swarms.utils.decorators import error_decorator, timing_decorator @@ -48,7 +48,7 @@ class Worker: temperature: float = 0.5, llm=None, openai_api_key: str = None, - tools: List[Any] = None, + tools: List[BaseTool] = None, embedding_size: int = 1536, search_kwargs: dict = {"k": 8}, verbose: bool = False, @@ -165,7 +165,7 @@ class Worker: # @log_decorator @error_decorator @timing_decorator - def run(self, task: str = None, img=None, *args, **kwargs): + def run(self, task: str = None, *args, **kwargs): """ Run the autonomous agent on a given task. @@ -195,7 +195,7 @@ class Worker: - `results`: The results of the agent's processing. """ try: - results = self.run(task, *args, **kwargs) - return results + result = self.agent.run([task], *args, **kwargs) + return result except Exception as error: raise RuntimeError(f"Error while running agent: {error}") diff --git a/swarms/models/cog_vlm.py b/swarms/models/cog_vlm.py index 14b99b60..e456b669 100644 --- a/swarms/models/cog_vlm.py +++ b/swarms/models/cog_vlm.py @@ -227,8 +227,8 @@ class CogVLMMultiModal(BaseMultiModalModel): torch_type = torch.float16 print( - "========Use torch type as:{} with device:{}========\n\n" - .format(torch_type, device) + f"========Use torch type as:{torch_type} with" + f" device:{device}========\n\n" ) if "cuda" in device: @@ -495,7 +495,7 @@ class CogVLMMultiModal(BaseMultiModalModel): choices=[choice_data], object="chat.completion.chunk", ) - yield "{}".format(chunk.model_dump_json(exclude_unset=True)) + yield f"{chunk.model_dump_json(exclude_unset=True)}" previous_text = "" for new_response in self.generate_stream_cogvlm(params): @@ -515,9 +515,7 @@ class CogVLMMultiModal(BaseMultiModalModel): choices=[choice_data], object="chat.completion.chunk", ) - yield "{}".format( - chunk.model_dump_json(exclude_unset=True) - ) + yield f"{chunk.model_dump_json(exclude_unset=True)}" choice_data = ChatCompletionResponseStreamChoice( index=0, delta=DeltaMessage(), @@ -527,4 +525,4 @@ class CogVLMMultiModal(BaseMultiModalModel): choices=[choice_data], object="chat.completion.chunk", ) - yield "{}".format(chunk.model_dump_json(exclude_unset=True)) + yield f"{chunk.model_dump_json(exclude_unset=True)}" diff --git a/swarms/models/nougat.py b/swarms/models/nougat.py index e3d35370..8c95b9c1 100644 --- a/swarms/models/nougat.py +++ b/swarms/models/nougat.py @@ -96,9 +96,7 @@ class Nougat: # Convert the matches to a readable format cleaned_data = [ - "Date: {}, Amount: {}".format( - date, amount.replace(",", "") - ) + f"Date: {date}, Amount: {amount.replace(',', '')}" for date, amount in matches ] diff --git a/swarms/prompts/worker_prompt.py b/swarms/prompts/worker_prompt.py index 165fa058..08636516 100644 --- a/swarms/prompts/worker_prompt.py +++ b/swarms/prompts/worker_prompt.py @@ -3,12 +3,12 @@ import datetime time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") -def worker_tools_sop_promp(name: str, memory: str): +def worker_tools_sop_promp(name: str, memory: str, time=time): out = """ You are {name}, Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications. - If you have completed all your tasks, make sure to use the "finish" command. + If you have completed all your tasks, make sure to use the 'finish' command. GOALS: @@ -19,11 +19,11 @@ def worker_tools_sop_promp(name: str, memory: str): 1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files. 2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember. 3. No user assistance - 4. Exclusively use the commands listed in double quotes e.g. "command name" + 4. Exclusively use the commands listed in double quotes e.g. 'command name' Commands: - 1. finish: use this to signal that you have finished all your objectives, args: "response": "final response to let people know you have finished your objectives" + 1. finish: use this to signal that you have finished all your objectives, args: 'response': 'final response to let people know you have finished your objectives' Resources: @@ -42,17 +42,17 @@ def worker_tools_sop_promp(name: str, memory: str): You should only respond in JSON format as described below Response Format: { - "thoughts": { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted - list that conveys - long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user" + 'thoughts': { + 'text': 'thoughts', + 'reasoning': 'reasoning', + 'plan': '- short bulleted - list that conveys - long-term plan', + 'criticism': 'constructive self-criticism', + 'speak': 'thoughts summary to say to user' }, - "command": { - "name": "command name", - "args": { - "arg name": "value" + 'command': { + 'name': 'command name', + 'args': { + 'arg name': 'value' } } } @@ -62,6 +62,6 @@ def worker_tools_sop_promp(name: str, memory: str): [{memory}] Human: Determine which next command to use, and respond using the format specified above: - """.format(name=name, memory=memory, time=time) + """.format(name=name, time=time, memory=memory) return str(out) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index be5b4402..e1282e5b 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -24,8 +24,6 @@ from swarms.tools.exec_tool import execute_tool_by_name from swarms.tools.tool import BaseTool from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.data_to_text import data_to_text - -# from swarms.utils.logger import logger from swarms.utils.parse_code import extract_code_from_markdown from swarms.utils.pdf_to_text import pdf_to_text from swarms.utils.token_count_tiktoken import limit_tokens_from_string @@ -33,6 +31,7 @@ from swarms.utils.video_to_frames import ( save_frames_as_images, video_to_frames, ) +import yaml # Utils @@ -209,6 +208,8 @@ class Agent: search_algorithm: Optional[Callable] = None, logs_to_filename: Optional[str] = None, evaluator: Optional[Callable] = None, + output_json: bool = False, + stopping_func: Optional[Callable] = None, *args, **kwargs, ): @@ -262,6 +263,8 @@ class Agent: self.search_algorithm = search_algorithm self.logs_to_filename = logs_to_filename self.evaluator = evaluator + self.output_json = output_json + self.stopping_func = stopping_func # The max_loops will be set dynamically if the dynamic_loop if self.dynamic_loops: @@ -626,6 +629,11 @@ class Agent: ) print(response) + if self.output_json: + response = extract_code_from_markdown( + response + ) + # Add the response to the history history.append(response) @@ -651,13 +659,27 @@ class Agent: "Evaluator", out ) - # Check to see if stopping token is in the output to stop the loop + # Stopping logic for agents if self.stopping_token: + # Check if the stopping token is in the response + if self.stopping_token in response: + break + + if self.stopping_condition: if self._check_stopping_condition( response - ) or parse_done_token(response): + ): + break + + if self.parse_done_token: + if parse_done_token(response): + break + + if self.stopping_func is not None: + if self.stopping_func(response) is True: break + # If the stopping condition is met then break self.step_cache.append(step) logging.info(f"Step: {step}") @@ -1043,6 +1065,22 @@ class Agent: # Get user input response = input("You: ") + def save_to_yaml(self, file_path: str) -> None: + """ + Save the agent to a YAML file + + Args: + file_path (str): The path to the YAML file + """ + try: + logger.info(f"Saving agent to YAML file: {file_path}") + with open(file_path, "w") as f: + yaml.dump(self.__dict__, f) + except Exception as error: + print( + colored(f"Error saving agent to YAML: {error}", "red") + ) + def save_state(self, file_path: str) -> None: """ Saves the current state of the agent to a JSON file, including the llm parameters. @@ -1075,7 +1113,6 @@ class Agent: "autosave": self.autosave, "saved_state_path": self.saved_state_path, "max_loops": self.max_loops, - # "StepCache": self.step_cache, } with open(file_path, "w") as f: diff --git a/swarms/structs/async_workflow.py b/swarms/structs/async_workflow.py index da144642..fa53c46b 100644 --- a/swarms/structs/async_workflow.py +++ b/swarms/structs/async_workflow.py @@ -43,7 +43,7 @@ class AsyncWorkflow: loop: Optional[asyncio.AbstractEventLoop] = None stopping_condition: Optional[Callable] = None - async def add(self, task: Any, tasks: List[Any]): + async def add(self, task: Any = None, tasks: List[Any] = None): """Add tasks to the workflow""" try: if tasks: diff --git a/swarms/structs/message_pool.py b/swarms/structs/message_pool.py new file mode 100644 index 00000000..37dbb19e --- /dev/null +++ b/swarms/structs/message_pool.py @@ -0,0 +1,214 @@ +import hashlib +from time import time_ns +from typing import Callable, List, Optional, Sequence, Union + +from swarms.structs.agent import Agent +from swarms.structs.base_swarm import BaseSwarm +from swarms.utils.loguru_logger import logger + + +def _hash(input: str): + """ + Hashes the input string using SHA256 algorithm. + + Args: + input (str): The string to be hashed. + + Returns: + str: The hexadecimal representation of the hash value. + """ + hex_dig = hashlib.sha256(input.encode("utf-8")).hexdigest() + return hex_dig + + +def msg_hash( + agent: Agent, content: str, turn: int, msg_type: str = "text" +): + """ + Generate a hash value for a message. + + Args: + agent (Agent): The agent sending the message. + content (str): The content of the message. + turn (int): The turn number of the message. + msg_type (str, optional): The type of the message. Defaults to "text". + + Returns: + int: The hash value of the message. + """ + time = time_ns() + return _hash( + f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:" + f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}" + ) + + +class MessagePool(BaseSwarm): + """ + A class representing a message pool for agents in a swarm. + + Attributes: + agents (Optional[Sequence[Agent]]): The list of agents in the swarm. + moderator (Optional[Agent]): The moderator agent. + turns (Optional[int]): The number of turns. + routing_function (Optional[Callable]): The routing function for message distribution. + show_names (Optional[bool]): Flag indicating whether to show agent names. + messages (List[Dict]): The list of messages in the pool. + + Examples: + >>> from swarms.structs.agent import Agent + >>> from swarms.structs.message_pool import MessagePool + >>> agent1 = Agent(agent_name="agent1") + >>> agent2 = Agent(agent_name="agent2") + >>> agent3 = Agent(agent_name="agent3") + >>> moderator = Agent(agent_name="moderator") + >>> agents = [agent1, agent2, agent3] + >>> message_pool = MessagePool(agents=agents, moderator=moderator, turns=5) + >>> message_pool.add(agent=agent1, content="Hello, agent2!", turn=1) + >>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1) + >>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1) + >>> message_pool.get_all_messages() + [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] + >>> message_pool.get_visible_messages(agent=agent1, turn=1) + [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] + >>> message_pool.get_visible_messages(agent=agent2, turn=1) + [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] + """ + + def __init__( + self, + agents: Optional[Sequence[Agent]] = None, + moderator: Optional[Agent] = None, + turns: Optional[int] = 5, + routing_function: Optional[Callable] = None, + show_names: Optional[bool] = False, + autosave: Optional[bool] = False, + *args, + **kwargs, + ): + super().__init__() + + self.agent = agents + self.moderator = moderator + self.turns = turns + self.routing_function = routing_function + self.show_names = show_names + self.autosave = autosave + + self.messages = [] + + logger.info("MessagePool initialized") + logger.info(f"Number of agents: {len(agents)}") + logger.info( + f"Agents: {[agent.agent_name for agent in agents]}" + ) + logger.info(f"moderator: {moderator.agent_name} is available") + logger.info(f"Number of turns: {turns}") + + def add( + self, + agent: Agent, + content: str, + turn: int, + visible_to: Union[str, List[str]] = "all", + logged: bool = True, + ): + """ + Add a message to the pool. + + Args: + agent (Agent): The agent sending the message. + content (str): The content of the message. + turn (int): The turn number. + visible_to (Union[str, List[str]], optional): The agents who can see the message. Defaults to "all". + logged (bool, optional): Flag indicating whether the message should be logged. Defaults to True. + """ + + self.messages.append( + { + "agent": agent, + "content": content, + "turn": turn, + "visible_to": visible_to, + "logged": logged, + } + ) + logger.info(f"Message added: {content}") + + def reset(self): + """ + Reset the message pool. + """ + self.messages = [] + logger.info("MessagePool reset") + + def last_turn(self): + """ + Get the last turn number. + + Returns: + int: The last turn number. + """ + if len(self.messages) == 0: + return 0 + else: + return self.messages[-1]["turn"] + + @property + def last_message(self): + """ + Get the last message in the pool. + + Returns: + dict: The last message. + """ + if len(self.messages) == 0: + return None + else: + return self.messages[-1] + + def get_all_messages(self): + """ + Get all messages in the pool. + + Returns: + List[Dict]: The list of all messages. + """ + return self.messages + + def get_visible_messages(self, agent: Agent, turn: int): + """ + Get the visible messages for a given agent and turn. + + Args: + agent (Agent): The agent. + turn (int): The turn number. + + Returns: + List[Dict]: The list of visible messages. + """ + # Get the messages before the current turn + prev_messages = [ + message + for message in self.messages + if message["turn"] < turn + ] + + visible_messages = [] + for message in prev_messages: + if ( + message["visible_to"] == "all" + or agent.agent_name in message["visible_to"] + ): + visible_messages.append(message) + return visible_messages + + def query(self, query: str): + """ + Query a message from the messages list and then pass it to the moderator + """ + return [ + (mod, content) + for mod, content in self.messages + if mod == self.moderator + ] diff --git a/swarms/utils/loguru_logger.py b/swarms/utils/loguru_logger.py new file mode 100644 index 00000000..b94ff33f --- /dev/null +++ b/swarms/utils/loguru_logger.py @@ -0,0 +1,10 @@ +from loguru import logger + +logger = logger.add( + "MessagePool.log", + level="INFO", + colorize=True, + format="{time} {message}", + backtrace=True, + diagnose=True, +) diff --git a/swarms/utils/main.py b/swarms/utils/main.py index b94fae11..9dbd47fd 100644 --- a/swarms/utils/main.py +++ b/swarms/utils/main.py @@ -108,7 +108,7 @@ class Code: self.value = value def __str__(self): - return "%d" % self.value + return f"{int(self.value)}" class Color(Code): diff --git a/tests/structs/test_message_pool.py b/tests/structs/test_message_pool.py new file mode 100644 index 00000000..91d0c28b --- /dev/null +++ b/tests/structs/test_message_pool.py @@ -0,0 +1,117 @@ +from swarms.structs.agent import Agent +from swarms.structs.message_pool import MessagePool +from swarms import OpenAIChat + + +def test_message_pool_initialization(): + agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") + agent2 = Agent(llm=OpenAIChat(), agent_name="agent1") + moderator = Agent(llm=OpenAIChat(), agent_name="agent1") + agents = [agent1, agent2] + message_pool = MessagePool( + agents=agents, moderator=moderator, turns=5 + ) + + assert message_pool.agent == agents + assert message_pool.moderator == moderator + assert message_pool.turns == 5 + assert message_pool.messages == [] + + +def test_message_pool_add(): + agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") + message_pool = MessagePool( + agents=[agent1], moderator=agent1, turns=5 + ) + message_pool.add(agent=agent1, content="Hello, world!", turn=1) + + assert message_pool.messages == [ + { + "agent": agent1, + "content": "Hello, world!", + "turn": 1, + "visible_to": "all", + "logged": True, + } + ] + + +def test_message_pool_reset(): + agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") + message_pool = MessagePool( + agents=[agent1], moderator=agent1, turns=5 + ) + message_pool.add(agent=agent1, content="Hello, world!", turn=1) + message_pool.reset() + + assert message_pool.messages == [] + + +def test_message_pool_last_turn(): + agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") + message_pool = MessagePool( + agents=[agent1], moderator=agent1, turns=5 + ) + message_pool.add(agent=agent1, content="Hello, world!", turn=1) + + assert message_pool.last_turn() == 1 + + +def test_message_pool_last_message(): + agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") + message_pool = MessagePool( + agents=[agent1], moderator=agent1, turns=5 + ) + message_pool.add(agent=agent1, content="Hello, world!", turn=1) + + assert message_pool.last_message == { + "agent": agent1, + "content": "Hello, world!", + "turn": 1, + "visible_to": "all", + "logged": True, + } + + +def test_message_pool_get_all_messages(): + agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") + message_pool = MessagePool( + agents=[agent1], moderator=agent1, turns=5 + ) + message_pool.add(agent=agent1, content="Hello, world!", turn=1) + + assert message_pool.get_all_messages() == [ + { + "agent": agent1, + "content": "Hello, world!", + "turn": 1, + "visible_to": "all", + "logged": True, + } + ] + + +def test_message_pool_get_visible_messages(): + agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") + agent2 = Agent(agent_name="agent2") + message_pool = MessagePool( + agents=[agent1, agent2], moderator=agent1, turns=5 + ) + message_pool.add( + agent=agent1, + content="Hello, agent2!", + turn=1, + visible_to=[agent2.agent_name], + ) + + assert message_pool.get_visible_messages( + agent=agent2, turn=2 + ) == [ + { + "agent": agent1, + "content": "Hello, agent2!", + "turn": 1, + "visible_to": [agent2.agent_name], + "logged": True, + } + ]