clean up godmode exampleg

pull/58/head
Kye 1 year ago
parent e0f110fdef
commit b9634f9538

@ -57,15 +57,31 @@ print(response)
---
## Usage
- `GodMode` is a simple class that takes in x amount of llms and when given a task runs them all concurrently!
```python
from swarms import HuggingFaceLLM
hugging_face_model = HuggingFaceLLM(model_id="Voicelab/trurl-2-13b")
generated_text = hugging_face_model.generate("In a world where AI")
from swarms.models import Anthropic, GooglePalm, OpenAIChat
from swarms.swarms import GodMode
claude = Anthropic(anthropic_api_key="")
palm = GooglePalm(google_api_key="")
gpt = OpenAIChat(openai_api_key="")
# Usage
llms = [
claude,
palm,
gpt
]
god_mode = GodMode(llms)
task = f"What are the biggest risks facing humanity?"
god_mode.print_responses(task)
```
- The `Worker` is an fully feat
```python
from swarms import Worker

@ -4,10 +4,37 @@ from tabulate import tabulate
class GodMode:
"""
GodMode
-----
Architecture:
How it works:
1. GodMode receives a task from the user.
2. GodMode distributes the task to all LLMs.
3. GodMode collects the responses from all LLMs.
4. GodMode prints the responses from all LLMs.
Parameters:
llms: list of LLMs
Methods:
run(task): distribute task to all LLMs and collect responses
print_responses(task): print responses from all LLMs
Usage:
god_mode = GodMode(llms)
god_mode.run(task)
god_mode.print_responses(task)
"""
def __init__(self, llms):
self.llms = llms
def run_all(self, task):
def run(self, task):
with ThreadPoolExecutor() as executor:
responses = executor.map(lambda llm: llm(task), self.llms)
return list(responses)

@ -20,8 +20,10 @@ class ScalableGroupChat:
Worker -> ScalableGroupChat(Worker * 10)
-> every response is embedded and placed in chroma
-> every response is then retrieved and sent to the worker
-> every response is then retrieved by querying the database and sent then passed into the prompt of the worker
-> every worker is then updated with the new response
-> every worker can communicate at any time
-> every worker can communicate without restrictions in parallel
"""
def __init__(
@ -32,21 +34,34 @@ class ScalableGroupChat:
):
self.workers = []
self.worker_count = worker_count
self.collection_name = collection_name
self.api_key = api_key
# Create a list of Worker instances with unique names
for i in range(worker_count):
self.workers.append(Worker(openai_api_key=api_key, ai_name=f"Worker-{i}"))
self.workers.append(
Worker(
openai_api_key=api_key,
ai_name=f"Worker-{i}"
)
)
def embed(self, input, api_key, model_name):
def embed(
self,
input,
model_name
):
"""Embeds an input of size N into a vector of size M"""
openai = embedding_functions.OpenAIEmbeddingFunction(
api_key=api_key,
api_key=self.api_key,
model_name=model_name
)
embedding = openai(input)
return embedding
# @abstractmethod
def retrieve_results(
self,
agent_id: int
@ -115,8 +130,8 @@ class ScalableGroupChat:
Allows the agents to chat with eachother thrught the vectordatabase
# Instantiate the Orchestrator with 10 agents
orchestrator = Orchestrator(
# Instantiate the ScalableGroupChat with 10 agents
orchestrator = ScalableGroupChat(
llm,
agent_list=[llm]*10,
task_queue=[]
@ -131,8 +146,6 @@ class ScalableGroupChat:
message_vector = self.embed(
message,
self.api_key,
self.model_name
)
#store the mesage in the vector database

Loading…
Cancel
Save