From b9634f95384d2bb1299a614f7c1be5197ba4f58e Mon Sep 17 00:00:00 2001 From: Kye Date: Tue, 3 Oct 2023 13:23:03 -0400 Subject: [PATCH] clean up godmode exampleg --- README.md | 24 ++++++++++++++++++++---- swarms/swarms/god_mode.py | 29 ++++++++++++++++++++++++++++- swarms/swarms/scable_groupchat.py | 31 ++++++++++++++++++++++--------- 3 files changed, 70 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index c257b2d5..6361e721 100644 --- a/README.md +++ b/README.md @@ -57,15 +57,31 @@ print(response) --- ## Usage - +- `GodMode` is a simple class that takes in x amount of llms and when given a task runs them all concurrently! ```python -from swarms import HuggingFaceLLM -hugging_face_model = HuggingFaceLLM(model_id="Voicelab/trurl-2-13b") -generated_text = hugging_face_model.generate("In a world where AI") +from swarms.models import Anthropic, GooglePalm, OpenAIChat +from swarms.swarms import GodMode + +claude = Anthropic(anthropic_api_key="") +palm = GooglePalm(google_api_key="") +gpt = OpenAIChat(openai_api_key="") + +# Usage +llms = [ + claude, + palm, + gpt +] + +god_mode = GodMode(llms) + +task = f"What are the biggest risks facing humanity?" +god_mode.print_responses(task) ``` +- The `Worker` is an fully feat ```python from swarms import Worker diff --git a/swarms/swarms/god_mode.py b/swarms/swarms/god_mode.py index 00f32be7..1fd5f50e 100644 --- a/swarms/swarms/god_mode.py +++ b/swarms/swarms/god_mode.py @@ -4,10 +4,37 @@ from tabulate import tabulate class GodMode: + """ + GodMode + ----- + + Architecture: + How it works: + 1. GodMode receives a task from the user. + 2. GodMode distributes the task to all LLMs. + 3. GodMode collects the responses from all LLMs. + 4. GodMode prints the responses from all LLMs. + + Parameters: + llms: list of LLMs + + Methods: + run(task): distribute task to all LLMs and collect responses + print_responses(task): print responses from all LLMs + + Usage: + god_mode = GodMode(llms) + god_mode.run(task) + god_mode.print_responses(task) + + + + + """ def __init__(self, llms): self.llms = llms - def run_all(self, task): + def run(self, task): with ThreadPoolExecutor() as executor: responses = executor.map(lambda llm: llm(task), self.llms) return list(responses) diff --git a/swarms/swarms/scable_groupchat.py b/swarms/swarms/scable_groupchat.py index f4c60c4c..61787d4f 100644 --- a/swarms/swarms/scable_groupchat.py +++ b/swarms/swarms/scable_groupchat.py @@ -20,8 +20,10 @@ class ScalableGroupChat: Worker -> ScalableGroupChat(Worker * 10) -> every response is embedded and placed in chroma - -> every response is then retrieved and sent to the worker + -> every response is then retrieved by querying the database and sent then passed into the prompt of the worker -> every worker is then updated with the new response + -> every worker can communicate at any time + -> every worker can communicate without restrictions in parallel """ def __init__( @@ -32,21 +34,34 @@ class ScalableGroupChat: ): self.workers = [] self.worker_count = worker_count + self.collection_name = collection_name + self.api_key = api_key # Create a list of Worker instances with unique names for i in range(worker_count): - self.workers.append(Worker(openai_api_key=api_key, ai_name=f"Worker-{i}")) + self.workers.append( + Worker( + openai_api_key=api_key, + ai_name=f"Worker-{i}" + ) + ) - def embed(self, input, api_key, model_name): + def embed( + self, + input, + model_name + ): + """Embeds an input of size N into a vector of size M""" openai = embedding_functions.OpenAIEmbeddingFunction( - api_key=api_key, + api_key=self.api_key, model_name=model_name ) + embedding = openai(input) + return embedding - # @abstractmethod def retrieve_results( self, agent_id: int @@ -115,8 +130,8 @@ class ScalableGroupChat: Allows the agents to chat with eachother thrught the vectordatabase - # Instantiate the Orchestrator with 10 agents - orchestrator = Orchestrator( + # Instantiate the ScalableGroupChat with 10 agents + orchestrator = ScalableGroupChat( llm, agent_list=[llm]*10, task_queue=[] @@ -131,8 +146,6 @@ class ScalableGroupChat: message_vector = self.embed( message, - self.api_key, - self.model_name ) #store the mesage in the vector database