multi agent docs + playground code quality

pull/307/head
Kye 1 year ago committed by Zack
parent eba9c7d7a4
commit 25451010d5

@ -6,7 +6,6 @@ from dotenv import load_dotenv
from swarms.models import OpenAIChat
from swarms.structs import Agent
<<<<<<< HEAD
# Load the environment variables
load_dotenv()
@ -19,15 +18,9 @@ llm = OpenAIChat(
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=1000,
=======
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
>>>>>>> 4ae59df8 (tools fix, parse docs, inject tools docs into prompts, and attempt to execute tools, display markdown)
)
## Initialize the workflow
<<<<<<< HEAD
agent = Agent(
llm=llm,
max_loops=1,
@ -38,10 +31,3 @@ agent = Agent(
# Run the workflow on a task
out = agent.run("Generate a 10,000 word blog on health and wellness.")
print(out)
=======
flow = Flow(llm=llm, max_loops=1, dashboard=True)
# Run the workflow on a task
out = flow.run("Generate a 10,000 word blog on health and wellness.")
>>>>>>> 4ae59df8 (tools fix, parse docs, inject tools docs into prompts, and attempt to execute tools, display markdown)

@ -0,0 +1,31 @@
import os
from dotenv import load_dotenv
from swarms.models import OpenAIChat
from swarms.structs import Flow
from swarms.swarms.multi_agent_collab import MultiAgentCollaboration
load_dotenv()
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
openai_api_key=api_key,
)
## Initialize the workflow
flow = Flow(llm=llm, max_loops=1, dashboard=True)
flow2 = Flow(llm=llm, max_loops=1, dashboard=True)
flow3 = Flow(llm=llm, max_loops=1, dashboard=True)
swarm = MultiAgentCollaboration(
agents=[flow, flow2, flow3],
max_iters=4,
)
swarm.run("Generate a 10,000 word blog on health and wellness.")

@ -1,19 +1,24 @@
import re
from swarms.models.openai_models import OpenAIChat
class AutoTemp:
"""
AutoTemp is a tool for automatically selecting the best temperature setting for a given task.
It generates responses at different temperatures, evaluates them, and ranks them based on quality.
"""
def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6):
def __init__(
self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6
):
self.api_key = api_key
self.default_temp = default_temp
self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
self.auto_select = auto_select
self.max_workers = max_workers
self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp)
self.llm = OpenAIChat(
openai_api_key=self.api_key, temperature=self.default_temp
)
def evaluate_output(self, output, temperature):
print(f"Evaluating output at temperature {temperature}...")
@ -34,12 +39,16 @@ class AutoTemp:
---
"""
score_text = self.llm(eval_prompt, temperature=0.5)
score_match = re.search(r'\b\d+(\.\d)?\b', score_text)
score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
return round(float(score_match.group()), 1) if score_match else 0.0
def run(self, prompt, temperature_string):
print("Starting generation process...")
temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()]
temperature_list = [
float(temp.strip())
for temp in temperature_string.split(",")
if temp.strip()
]
outputs = {}
scores = {}
for temp in temperature_list:

@ -11,12 +11,16 @@ openai_api_key = os.getenv("OPENAI_API_KEY")
# Define prompts for various tasks
MEAL_PLAN_PROMPT = "Based on the following user preferences: dietary restrictions as vegetarian, preferred cuisines as Italian and Indian, a total caloric intake of around 2000 calories per day, and an exclusion of legumes, create a detailed weekly meal plan. Include a variety of meals for breakfast, lunch, dinner, and optional snacks."
IMAGE_ANALYSIS_PROMPT = "Identify the items in this fridge, including their quantities and condition."
IMAGE_ANALYSIS_PROMPT = (
"Identify the items in this fridge, including their quantities and condition."
)
# Function to encode image to base64
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
return base64.b64encode(image_file.read()).decode("utf-8")
# Initialize Language Model (LLM)
llm = OpenAIChat(
@ -24,12 +28,13 @@ llm = OpenAIChat(
max_tokens=3000,
)
# Function to handle vision tasks
def create_vision_agent(image_path):
base64_image = encode_image(image_path)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai_api_key}"
"Authorization": f"Bearer {openai_api_key}",
}
payload = {
"model": "gpt-4-vision-preview",
@ -38,28 +43,39 @@ def create_vision_agent(image_path):
"role": "user",
"content": [
{"type": "text", "text": IMAGE_ANALYSIS_PROMPT},
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}}
]
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
},
],
}
],
"max_tokens": 300
"max_tokens": 300,
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
response = requests.post(
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload
)
return response.json()
# Function to generate an integrated shopping list considering meal plan and fridge contents
def generate_integrated_shopping_list(meal_plan_output, image_analysis, user_preferences):
def generate_integrated_shopping_list(
meal_plan_output, image_analysis, user_preferences
):
# Prepare the prompt for the LLM
fridge_contents = image_analysis['choices'][0]['message']['content']
prompt = (f"Based on this meal plan: {meal_plan_output}, "
fridge_contents = image_analysis["choices"][0]["message"]["content"]
prompt = (
f"Based on this meal plan: {meal_plan_output}, "
f"and the following items in the fridge: {fridge_contents}, "
f"considering dietary preferences as vegetarian with a preference for Italian and Indian cuisines, "
f"generate a comprehensive shopping list that includes only the items needed.")
f"generate a comprehensive shopping list that includes only the items needed."
)
# Send the prompt to the LLM and return the response
response = llm(prompt)
return response # assuming the response is a string
# Define agent for meal planning
meal_plan_agent = Flow(
llm=llm,
@ -74,19 +90,19 @@ user_preferences = {
"dietary_restrictions": "vegetarian",
"preferred_cuisines": ["Italian", "Indian"],
"caloric_intake": 2000,
"other notes": "Doesn't eat legumes"
"other notes": "Doesn't eat legumes",
}
# Generate Meal Plan
meal_plan_output = meal_plan_agent.run(
f"Generate a meal plan: {user_preferences}"
)
meal_plan_output = meal_plan_agent.run(f"Generate a meal plan: {user_preferences}")
# Vision Agent - Analyze an Image
image_analysis_output = create_vision_agent("full_fridge.jpg")
# Generate Integrated Shopping List
integrated_shopping_list = generate_integrated_shopping_list(meal_plan_output, image_analysis_output, user_preferences)
integrated_shopping_list = generate_integrated_shopping_list(
meal_plan_output, image_analysis_output, user_preferences
)
# Print and save the outputs
print("Meal Plan:", meal_plan_output)

@ -56,7 +56,7 @@ class MultiAgentCollaboration:
Usage:
>>> from swarms.models import OpenAIChat
>>> from swarms.structs import Agent
>>> from swarms.structs import Flow
>>> from swarms.swarms.multi_agent_collab import MultiAgentCollaboration
>>>
>>> # Initialize the language model
@ -66,14 +66,14 @@ class MultiAgentCollaboration:
>>>
>>>
>>> ## Initialize the workflow
>>> agent = Agent(llm=llm, max_loops=1, dashboard=True)
>>> flow = Flow(llm=llm, max_loops=1, dashboard=True)
>>>
>>> # Run the workflow on a task
>>> out = agent.run("Generate a 10,000 word blog on health and wellness.")
>>> out = flow.run("Generate a 10,000 word blog on health and wellness.")
>>>
>>> # Initialize the multi-agent collaboration
>>> swarm = MultiAgentCollaboration(
>>> agents=[agent],
>>> agents=[flow],
>>> max_iters=4,
>>> )
>>>
@ -87,7 +87,7 @@ class MultiAgentCollaboration:
def __init__(
self,
agents: List[Agent],
agents: List[Flow],
selection_function: callable = None,
max_iters: int = 10,
autosave: bool = True,
@ -200,15 +200,11 @@ class MultiAgentCollaboration:
print("\n")
n += 1
def select_next_speaker_roundtable(
self, step: int, agents: List[Agent]
) -> int:
def select_next_speaker_roundtable(self, step: int, agents: List[Flow]) -> int:
"""Selects the next speaker."""
return step % len(agents)
def select_next_speaker_director(
step: int, agents: List[Agent], director
) -> int:
def select_next_speaker_director(step: int, agents: List[Flow], director) -> int:
# if the step if even => director
# => director selects next speaker
if step % 2 == 1:
@ -262,9 +258,7 @@ class MultiAgentCollaboration:
for _ in range(self.max_iters):
for agent in self.agents:
result = agent.run(conversation)
self.results.append(
{"agent": agent, "response": result}
)
self.results.append({"agent": agent, "response": result})
conversation += result
if self.autosave:

Loading…
Cancel
Save