From 80171b0e4fc1c5fc11de5ec87d8e45a4b82b05e6 Mon Sep 17 00:00:00 2001 From: Kye Date: Tue, 26 Dec 2023 19:49:08 -0500 Subject: [PATCH] [README][ModelParallelizer]g --- README.md | 42 +++++++++++++++++++++++++++++++ docs/swarms/swarms/godmode.md | 22 ++++++++-------- playground/swarms/godmode.py | 33 ++++++++++++++++++------ swarms/swarms/model_parallizer.py | 6 ++--- 4 files changed, 81 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 3de9b66c..b4477f4f 100644 --- a/README.md +++ b/README.md @@ -460,6 +460,48 @@ print(video_path) ``` +### `ModelParallelizer` +- Run any models concurrent to compare outputs +- Concurrent execution of various llms +- Plug in and play with your models + +```python +import os + +from dotenv import load_dotenv + +from swarms.models import Anthropic, Gemini, Mixtral, OpenAIChat +from swarms.swarms import ModelParallelizer + +load_dotenv() + +# API Keys +anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") +openai_api_key = os.getenv("OPENAI_API_KEY") +gemini_api_key = os.getenv("GEMINI_API_KEY") + +# Initialize the models +llm = OpenAIChat(openai_api_key=openai_api_key) +anthropic = Anthropic(anthropic_api_key=anthropic_api_key) +mixtral = Mixtral() +gemini = Gemini(gemini_api_key=gemini_api_key) + +# Initialize the parallelizer +llms = [llm, anthropic, mixtral, gemini] +parallelizer = ModelParallelizer(llms) + +# Set the task +task = "Generate a 10,000 word blog on health and wellness." + +# Run the task +out = parallelizer.run(task) + +# Print the responses 1 by 1 +for i in range(len(out)): + print(f"Response from LLM {i}: {out[i]}") +``` + + ### Simple Conversational Agent - Plug in and play conversational agent with `GPT4`, `Mixytral`, or any of our models - Reliable conversational structure to hold messages together with dynamic handling for long context conversations and interactions with auto chunking diff --git a/docs/swarms/swarms/godmode.md b/docs/swarms/swarms/godmode.md index 2d903a8d..6655c954 100644 --- a/docs/swarms/swarms/godmode.md +++ b/docs/swarms/swarms/godmode.md @@ -102,8 +102,8 @@ Now that we have covered the class definition, let's delve into the functionalit One of the primary use cases of `ModelParallelizer` is to distribute a task to all registered LLMs and collect their responses. This can be achieved using the `run(task)` method. Below is an example: ```python -god_mode = ModelParallelizer(llms) -responses = god_mode.run("Translate the following English text to French: 'Hello, how are you?'") +parallelizer = ModelParallelizer(llms) +responses = parallelizer.run("Translate the following English text to French: 'Hello, how are you?'") ``` ### Printing Responses @@ -111,7 +111,7 @@ responses = god_mode.run("Translate the following English text to French: 'Hello To present the responses from all LLMs in a structured tabular format, use the `print_responses(task)` method. Example: ```python -god_mode.print_responses("Summarize the main points of 'War and Peace.'") +parallelizer.print_responses("Summarize the main points of 'War and Peace.'") ``` ### Saving Responses to a File @@ -119,7 +119,7 @@ god_mode.print_responses("Summarize the main points of 'War and Peace.'") Users can save the responses to a file using the `save_responses_to_file(filename)` method. This is useful for archiving and reviewing responses later. Example: ```python -god_mode.save_responses_to_file("responses.txt") +parallelizer.save_responses_to_file("responses.txt") ``` ### Task History @@ -127,7 +127,7 @@ god_mode.save_responses_to_file("responses.txt") The `ModelParallelizer` class keeps track of the task history. Developers can access the task history using the `get_task_history()` method. Example: ```python -task_history = god_mode.get_task_history() +task_history = parallelizer.get_task_history() for i, task in enumerate(task_history): print(f"Task {i + 1}: {task}") ``` @@ -186,13 +186,13 @@ worker3 = Worker( # Register the worker agents with ModelParallelizer agents = [worker1, worker2, worker3] -god_mode = ModelParallelizer(agents) +parallelizer = ModelParallelizer(agents) # Task for sentiment analysis task = "Please analyze the sentiment of the following sentence: 'This movie is amazing!'" # Print responses from all agents -god_mode.print_responses(task) +parallelizer.print_responses(task) ``` ### Example 2: Translation @@ -209,13 +209,13 @@ translator3 = OpenAIChat(model_name="translator-en-de", openai_api_key="api-key" # Register translation agents with ModelParallelizer translators = [translator1, translator2, translator3] -god_mode = ModelParallelizer(translators) +parallelizer = ModelParallelizer(translators) # Task for translation task = "Translate the following English text to French: 'Hello, how are you?'" # Print translated responses from all agents -god_mode.print_responses(task) +parallelizer.print_responses(task) ``` ### Example 3: Summarization @@ -233,13 +233,13 @@ summarizer3 = OpenAIChat(model_name="summarizer-en", openai_api_key="api-key", t # Register summarization agents with ModelParallelizer summarizers = [summarizer1, summarizer2, summarizer3] -god_mode = ModelParallelizer(summarizers) +parallelizer = ModelParallelizer(summarizers) # Task for summarization task = "Summarize the main points of the article titled 'Climate Change and Its Impact on the Environment.'" # Print summarized responses from all agents -god_mode.print_responses(task) +parallelizer.print_responses(task) ``` ## 7. Conclusion diff --git a/playground/swarms/godmode.py b/playground/swarms/godmode.py index 4d18ef56..8b1e690c 100644 --- a/playground/swarms/godmode.py +++ b/playground/swarms/godmode.py @@ -1,16 +1,33 @@ -from swarms.swarms import ModelParallelizer -from swarms.models import OpenAIChat +import os + +from dotenv import load_dotenv -api_key = "" +from swarms.models import Anthropic, Gemini, Mixtral, OpenAIChat +from swarms.swarms import ModelParallelizer -llm = OpenAIChat(openai_api_key=api_key) +load_dotenv() +# API Keys +anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") +openai_api_key = os.getenv("OPENAI_API_KEY") +gemini_api_key = os.getenv("GEMINI_API_KEY") -llms = [llm, llm, llm] +# Initialize the models +llm = OpenAIChat(openai_api_key=openai_api_key) +anthropic = Anthropic(anthropic_api_key=anthropic_api_key) +mixtral = Mixtral() +gemini = Gemini(gemini_api_key=gemini_api_key) -god_mode = ModelParallelizer(llms) +# Initialize the parallelizer +llms = [llm, anthropic, mixtral, gemini] +parallelizer = ModelParallelizer(llms) +# Set the task task = "Generate a 10,000 word blog on health and wellness." -out = god_mode.run(task) -god_mode.print_responses(task) +# Run the task +out = parallelizer.run(task) + +# Print the responses 1 by 1 +for i in range(len(out)): + print(f"Response from LLM {i}: {out[i]}") \ No newline at end of file diff --git a/swarms/swarms/model_parallizer.py b/swarms/swarms/model_parallizer.py index 0b087b6d..3844f5b4 100644 --- a/swarms/swarms/model_parallizer.py +++ b/swarms/swarms/model_parallizer.py @@ -31,9 +31,9 @@ class ModelParallelizer: print_responses(task): print responses from all LLMs Usage: - god_mode = ModelParallelizer(llms) - god_mode.run(task) - god_mode.print_responses(task) + parallelizer = ModelParallelizer(llms) + parallelizer.run(task) + parallelizer.print_responses(task) """