multi agent docs + playground code quality

pull/180/head
Kye 1 year ago
parent 32c476e1d2
commit 6c7de98d71

@ -12,4 +12,3 @@ flow = Flow(llm=llm, max_loops=1, dashboard=True)
# Run the workflow on a task # Run the workflow on a task
out = flow.run("Generate a 10,000 word blog on health and wellness.") out = flow.run("Generate a 10,000 word blog on health and wellness.")

@ -0,0 +1,31 @@
import os
from dotenv import load_dotenv
from swarms.models import OpenAIChat
from swarms.structs import Flow
from swarms.swarms.multi_agent_collab import MultiAgentCollaboration
load_dotenv()
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
openai_api_key=api_key,
)
## Initialize the workflow
flow = Flow(llm=llm, max_loops=1, dashboard=True)
flow2 = Flow(llm=llm, max_loops=1, dashboard=True)
flow3 = Flow(llm=llm, max_loops=1, dashboard=True)
swarm = MultiAgentCollaboration(
agents=[flow, flow2, flow3],
max_iters=4,
)
swarm.run("Generate a 10,000 word blog on health and wellness.")

@ -1,19 +1,24 @@
import re import re
from swarms.models.openai_models import OpenAIChat from swarms.models.openai_models import OpenAIChat
class AutoTemp: class AutoTemp:
""" """
AutoTemp is a tool for automatically selecting the best temperature setting for a given task. AutoTemp is a tool for automatically selecting the best temperature setting for a given task.
It generates responses at different temperatures, evaluates them, and ranks them based on quality. It generates responses at different temperatures, evaluates them, and ranks them based on quality.
""" """
def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): def __init__(
self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6
):
self.api_key = api_key self.api_key = api_key
self.default_temp = default_temp self.default_temp = default_temp
self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
self.auto_select = auto_select self.auto_select = auto_select
self.max_workers = max_workers self.max_workers = max_workers
self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) self.llm = OpenAIChat(
openai_api_key=self.api_key, temperature=self.default_temp
)
def evaluate_output(self, output, temperature): def evaluate_output(self, output, temperature):
print(f"Evaluating output at temperature {temperature}...") print(f"Evaluating output at temperature {temperature}...")
@ -34,12 +39,16 @@ class AutoTemp:
--- ---
""" """
score_text = self.llm(eval_prompt, temperature=0.5) score_text = self.llm(eval_prompt, temperature=0.5)
score_match = re.search(r'\b\d+(\.\d)?\b', score_text) score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
return round(float(score_match.group()), 1) if score_match else 0.0 return round(float(score_match.group()), 1) if score_match else 0.0
def run(self, prompt, temperature_string): def run(self, prompt, temperature_string):
print("Starting generation process...") print("Starting generation process...")
temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] temperature_list = [
float(temp.strip())
for temp in temperature_string.split(",")
if temp.strip()
]
outputs = {} outputs = {}
scores = {} scores = {}
for temp in temperature_list: for temp in temperature_list:

@ -11,12 +11,16 @@ openai_api_key = os.getenv("OPENAI_API_KEY")
# Define prompts for various tasks # Define prompts for various tasks
MEAL_PLAN_PROMPT = "Based on the following user preferences: dietary restrictions as vegetarian, preferred cuisines as Italian and Indian, a total caloric intake of around 2000 calories per day, and an exclusion of legumes, create a detailed weekly meal plan. Include a variety of meals for breakfast, lunch, dinner, and optional snacks." MEAL_PLAN_PROMPT = "Based on the following user preferences: dietary restrictions as vegetarian, preferred cuisines as Italian and Indian, a total caloric intake of around 2000 calories per day, and an exclusion of legumes, create a detailed weekly meal plan. Include a variety of meals for breakfast, lunch, dinner, and optional snacks."
IMAGE_ANALYSIS_PROMPT = "Identify the items in this fridge, including their quantities and condition." IMAGE_ANALYSIS_PROMPT = (
"Identify the items in this fridge, including their quantities and condition."
)
# Function to encode image to base64 # Function to encode image to base64
def encode_image(image_path): def encode_image(image_path):
with open(image_path, "rb") as image_file: with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8') return base64.b64encode(image_file.read()).decode("utf-8")
# Initialize Language Model (LLM) # Initialize Language Model (LLM)
llm = OpenAIChat( llm = OpenAIChat(
@ -24,12 +28,13 @@ llm = OpenAIChat(
max_tokens=3000, max_tokens=3000,
) )
# Function to handle vision tasks # Function to handle vision tasks
def create_vision_agent(image_path): def create_vision_agent(image_path):
base64_image = encode_image(image_path) base64_image = encode_image(image_path)
headers = { headers = {
"Content-Type": "application/json", "Content-Type": "application/json",
"Authorization": f"Bearer {openai_api_key}" "Authorization": f"Bearer {openai_api_key}",
} }
payload = { payload = {
"model": "gpt-4-vision-preview", "model": "gpt-4-vision-preview",
@ -38,28 +43,39 @@ def create_vision_agent(image_path):
"role": "user", "role": "user",
"content": [ "content": [
{"type": "text", "text": IMAGE_ANALYSIS_PROMPT}, {"type": "text", "text": IMAGE_ANALYSIS_PROMPT},
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}} {
] "type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
},
],
} }
], ],
"max_tokens": 300 "max_tokens": 300,
} }
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) response = requests.post(
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload
)
return response.json() return response.json()
# Function to generate an integrated shopping list considering meal plan and fridge contents # Function to generate an integrated shopping list considering meal plan and fridge contents
def generate_integrated_shopping_list(meal_plan_output, image_analysis, user_preferences): def generate_integrated_shopping_list(
meal_plan_output, image_analysis, user_preferences
):
# Prepare the prompt for the LLM # Prepare the prompt for the LLM
fridge_contents = image_analysis['choices'][0]['message']['content'] fridge_contents = image_analysis["choices"][0]["message"]["content"]
prompt = (f"Based on this meal plan: {meal_plan_output}, " prompt = (
f"and the following items in the fridge: {fridge_contents}, " f"Based on this meal plan: {meal_plan_output}, "
f"considering dietary preferences as vegetarian with a preference for Italian and Indian cuisines, " f"and the following items in the fridge: {fridge_contents}, "
f"generate a comprehensive shopping list that includes only the items needed.") f"considering dietary preferences as vegetarian with a preference for Italian and Indian cuisines, "
f"generate a comprehensive shopping list that includes only the items needed."
)
# Send the prompt to the LLM and return the response # Send the prompt to the LLM and return the response
response = llm(prompt) response = llm(prompt)
return response # assuming the response is a string return response # assuming the response is a string
# Define agent for meal planning # Define agent for meal planning
meal_plan_agent = Flow( meal_plan_agent = Flow(
llm=llm, llm=llm,
@ -74,19 +90,19 @@ user_preferences = {
"dietary_restrictions": "vegetarian", "dietary_restrictions": "vegetarian",
"preferred_cuisines": ["Italian", "Indian"], "preferred_cuisines": ["Italian", "Indian"],
"caloric_intake": 2000, "caloric_intake": 2000,
"other notes": "Doesn't eat legumes" "other notes": "Doesn't eat legumes",
} }
# Generate Meal Plan # Generate Meal Plan
meal_plan_output = meal_plan_agent.run( meal_plan_output = meal_plan_agent.run(f"Generate a meal plan: {user_preferences}")
f"Generate a meal plan: {user_preferences}"
)
# Vision Agent - Analyze an Image # Vision Agent - Analyze an Image
image_analysis_output = create_vision_agent("full_fridge.jpg") image_analysis_output = create_vision_agent("full_fridge.jpg")
# Generate Integrated Shopping List # Generate Integrated Shopping List
integrated_shopping_list = generate_integrated_shopping_list(meal_plan_output, image_analysis_output, user_preferences) integrated_shopping_list = generate_integrated_shopping_list(
meal_plan_output, image_analysis_output, user_preferences
)
# Print and save the outputs # Print and save the outputs
print("Meal Plan:", meal_plan_output) print("Meal Plan:", meal_plan_output)

@ -23,22 +23,6 @@ bid_parser = BidOutputParser(
) )
def select_next_speaker_director(step: int, agents, director) -> int:
# if the step if even => director
# => director selects next speaker
if step % 2 == 1:
idx = 0
else:
idx = director.select_next_speaker() + 1
return idx
# Define a selection function
def select_speaker_round_table(step: int, agents) -> int:
# This function selects the speaker in a round-robin fashion
return step % len(agents)
# main # main
class MultiAgentCollaboration: class MultiAgentCollaboration:
""" """
@ -49,6 +33,15 @@ class MultiAgentCollaboration:
selection_function (callable): The function that selects the next speaker. selection_function (callable): The function that selects the next speaker.
Defaults to select_next_speaker. Defaults to select_next_speaker.
max_iters (int): The maximum number of iterations. Defaults to 10. max_iters (int): The maximum number of iterations. Defaults to 10.
autosave (bool): Whether to autosave the state of all agents. Defaults to True.
saved_file_path_name (str): The path to the saved file. Defaults to
"multi_agent_collab.json".
stopping_token (str): The token that stops the collaboration. Defaults to
"<DONE>".
results (list): The results of the collaboration. Defaults to [].
logger (logging.Logger): The logger. Defaults to logger.
logging (bool): Whether to log the collaboration. Defaults to True.
Methods: Methods:
reset: Resets the state of all agents. reset: Resets the state of all agents.
@ -62,18 +55,40 @@ class MultiAgentCollaboration:
Usage: Usage:
>>> from swarms.models import MultiAgentCollaboration
>>> from swarms.models import Flow
>>> from swarms.models import OpenAIChat >>> from swarms.models import OpenAIChat
>>> from swarms.models import Anthropic >>> from swarms.structs import Flow
>>> from swarms.swarms.multi_agent_collab import MultiAgentCollaboration
>>>
>>> # Initialize the language model
>>> llm = OpenAIChat(
>>> temperature=0.5,
>>> )
>>>
>>>
>>> ## Initialize the workflow
>>> flow = Flow(llm=llm, max_loops=1, dashboard=True)
>>>
>>> # Run the workflow on a task
>>> out = flow.run("Generate a 10,000 word blog on health and wellness.")
>>>
>>> # Initialize the multi-agent collaboration
>>> swarm = MultiAgentCollaboration(
>>> agents=[flow],
>>> max_iters=4,
>>> )
>>>
>>> # Run the multi-agent collaboration
>>> swarm.run()
>>>
>>> # Format the results of the multi-agent collaboration
>>> swarm.format_results(swarm.results)
""" """
def __init__( def __init__(
self, self,
agents: List[Flow], agents: List[Flow],
selection_function: callable = select_next_speaker_director, selection_function: callable = None,
max_iters: int = 10, max_iters: int = 10,
autosave: bool = True, autosave: bool = True,
saved_file_path_name: str = "multi_agent_collab.json", saved_file_path_name: str = "multi_agent_collab.json",
@ -165,7 +180,7 @@ class MultiAgentCollaboration:
), ),
retry_error_callback=lambda retry_state: 0, retry_error_callback=lambda retry_state: 0,
) )
def run(self): def run_director(self, task: str):
"""Runs the multi-agent collaboration.""" """Runs the multi-agent collaboration."""
n = 0 n = 0
self.reset() self.reset()
@ -179,6 +194,74 @@ class MultiAgentCollaboration:
print("\n") print("\n")
n += 1 n += 1
def select_next_speaker_roundtable(self, step: int, agents: List[Flow]) -> int:
"""Selects the next speaker."""
return step % len(agents)
def select_next_speaker_director(step: int, agents: List[Flow], director) -> int:
# if the step if even => director
# => director selects next speaker
if step % 2 == 1:
idx = 0
else:
idx = director.select_next_speaker() + 1
return idx
# def run(self, task: str):
# """Runs the multi-agent collaboration."""
# for step in range(self.max_iters):
# speaker_idx = self.select_next_speaker_roundtable(step, self.agents)
# speaker = self.agents[speaker_idx]
# result = speaker.run(task)
# self.results.append({"agent": speaker, "response": result})
# if self.autosave:
# self.save_state()
# if result == self.stopping_token:
# break
# return self.results
# def run(self, task: str):
# for _ in range(self.max_iters):
# for step, agent, in enumerate(self.agents):
# result = agent.run(task)
# self.results.append({"agent": agent, "response": result})
# if self.autosave:
# self.save_state()
# if result == self.stopping_token:
# break
# return self.results
# def run(self, task: str):
# conversation = task
# for _ in range(self.max_iters):
# for agent in self.agents:
# result = agent.run(conversation)
# self.results.append({"agent": agent, "response": result})
# conversation = result
# if self.autosave:
# self.save()
# if result == self.stopping_token:
# break
# return self.results
def run(self, task: str):
conversation = task
for _ in range(self.max_iters):
for agent in self.agents:
result = agent.run(conversation)
self.results.append({"agent": agent, "response": result})
conversation += result
if self.autosave:
self.save_state()
if result == self.stopping_token:
break
return self.results
def format_results(self, results): def format_results(self, results):
"""Formats the results of the run method""" """Formats the results of the run method"""
formatted_results = "\n".join( formatted_results = "\n".join(

Loading…
Cancel
Save