pull/437/head
Kye 9 months ago
parent 91b2291784
commit 9d28535393

@ -1,12 +1,11 @@
from swarms import Agent, Anthropic from swarms import Agent, Anthropic
## Initialize the workflow # Initialize the agemt
agent = Agent( agent = Agent(
agent_name="Transcript Generator", agent_name="Transcript Generator",
agent_description=( agent_description=(
"Generate a transcript for a youtube video on what swarms" "Generate a transcript for a youtube video on what swarms" " are!"
" are!"
), ),
llm=Anthropic(), llm=Anthropic(),
max_loops=3, max_loops=3,
@ -18,5 +17,5 @@ agent = Agent(
interactive=True, interactive=True,
) )
# Run the workflow on a task # Run the Agent on a task
agent("Generate a transcript for a youtube video on what swarms are!") agent("Generate a transcript for a youtube video on what swarms are!")

@ -2,7 +2,7 @@ from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent from swarms import ToolAgent
from swarms.utils.json_utils import base_model_to_json from swarms.tools.json_utils import base_model_to_json
# Model name # Model name
model_name = "CohereForAI/c4ai-command-r-v01-4bit" model_name = "CohereForAI/c4ai-command-r-v01-4bit"
@ -28,9 +28,7 @@ class APIExampleRequestSchema(BaseModel):
headers: dict = Field( headers: dict = Field(
..., description="The headers for the example request" ..., description="The headers for the example request"
) )
body: dict = Field( body: dict = Field(..., description="The body of the example request")
..., description="The body of the example request"
)
response: dict = Field( response: dict = Field(
..., ...,
description="The expected response of the example request", description="The expected response of the example request",

@ -14,8 +14,7 @@ def search_api(query: str, max_results: int = 10):
agent = Agent( agent = Agent(
agent_name="Youtube Transcript Generator", agent_name="Youtube Transcript Generator",
agent_description=( agent_description=(
"Generate a transcript for a youtube video on what swarms" "Generate a transcript for a youtube video on what swarms" " are!"
" are!"
), ),
llm=Anthropic(), llm=Anthropic(),
max_loops="auto", max_loops="auto",

@ -2,7 +2,7 @@ from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent from swarms import ToolAgent
from swarms.utils.json_utils import base_model_to_json from swarms.tools.json_utils import base_model_to_json
# Model name # Model name
model_name = "ai21labs/Jamba-v0.1" model_name = "ai21labs/Jamba-v0.1"
@ -28,9 +28,7 @@ class APIExampleRequestSchema(BaseModel):
headers: dict = Field( headers: dict = Field(
..., description="The headers for the example request" ..., description="The headers for the example request"
) )
body: dict = Field( body: dict = Field(..., description="The body of the example request")
..., description="The body of the example request"
)
response: dict = Field( response: dict = Field(
..., ...,
description="The expected response of the example request", description="The expected response of the example request",

@ -4,9 +4,7 @@ load_dict = {"ImageCaptioning": "cuda"}
node = MultiModalAgent(load_dict) node = MultiModalAgent(load_dict)
text = node.run_text( text = node.run_text("What is your name? Generate a picture of yourself")
"What is your name? Generate a picture of yourself"
)
img = node.run_img("/image1", "What is this image about?") img = node.run_img("/image1", "What is this image about?")

@ -2,7 +2,7 @@ from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent from swarms import ToolAgent
from swarms.utils.json_utils import base_model_to_json from swarms.tools.json_utils import base_model_to_json
# Load the pre-trained model and tokenizer # Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained( model = AutoModelForCausalLM.from_pretrained(
@ -17,9 +17,7 @@ tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
class Schema(BaseModel): class Schema(BaseModel):
name: str = Field(..., title="Name of the person") name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person") agent: int = Field(..., title="Age of the person")
is_student: bool = Field( is_student: bool = Field(..., title="Whether the person is a student")
..., title="Whether the person is a student"
)
courses: list[str] = Field( courses: list[str] = Field(
..., title="List of courses the person is taking" ..., title="List of courses the person is taking"
) )
@ -29,9 +27,7 @@ class Schema(BaseModel):
tool_schema = base_model_to_json(Schema) tool_schema = base_model_to_json(Schema)
# Define the task to generate a person's information # Define the task to generate a person's information
task = ( task = "Generate a person's information based on the following schema:"
"Generate a person's information based on the following schema:"
)
# Create an instance of the ToolAgent class # Create an instance of the ToolAgent class
agent = ToolAgent( agent = ToolAgent(

@ -4,7 +4,7 @@ from dotenv import load_dotenv
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from swarms import OpenAIChat, ToolAgent from swarms import OpenAIChat, ToolAgent
from swarms.utils.json_utils import base_model_to_json from swarms.tools.json_utils import base_model_to_json
# Load the environment variables # Load the environment variables
load_dotenv() load_dotenv()
@ -19,9 +19,7 @@ chat = OpenAIChat(
class Schema(BaseModel): class Schema(BaseModel):
name: str = Field(..., title="Name of the person") name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person") agent: int = Field(..., title="Age of the person")
is_student: bool = Field( is_student: bool = Field(..., title="Whether the person is a student")
..., title="Whether the person is a student"
)
courses: list[str] = Field( courses: list[str] = Field(
..., title="List of courses the person is taking" ..., title="List of courses the person is taking"
) )
@ -31,9 +29,7 @@ class Schema(BaseModel):
tool_schema = base_model_to_json(Schema) tool_schema = base_model_to_json(Schema)
# Define the task to generate a person's information # Define the task to generate a person's information
task = ( task = "Generate a person's information based on the following schema:"
"Generate a person's information based on the following schema:"
)
# Create an instance of the ToolAgent class # Create an instance of the ToolAgent class
agent = ToolAgent( agent = ToolAgent(

@ -34,9 +34,7 @@ def text_to_video(task: str):
step = 4 # Options: [1,2,4,8] step = 4 # Options: [1,2,4,8]
repo = "ByteDance/AnimateDiff-Lightning" repo = "ByteDance/AnimateDiff-Lightning"
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors" ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
base = ( # Choose to your favorite base model. base = "emilianJR/epiCRealism" # Choose to your favorite base model.
"emilianJR/epiCRealism"
)
adapter = MotionAdapter().to(device, dtype) adapter = MotionAdapter().to(device, dtype)
adapter.load_state_dict( adapter.load_state_dict(

@ -61,16 +61,12 @@ class ProductAdConceptGenerator:
"in an ice cave setting", "in an ice cave setting",
"in a serene and calm landscape", "in a serene and calm landscape",
] ]
self.contexts = [ self.contexts = ["high realism product ad (extremely creative)"]
"high realism product ad (extremely creative)"
]
def generate_concept(self): def generate_concept(self):
theme = random.choice(self.themes) theme = random.choice(self.themes)
context = random.choice(self.contexts) context = random.choice(self.contexts)
return ( return f"{theme} inside a {style} {self.product_name}, {context}"
f"{theme} inside a {style} {self.product_name}, {context}"
)
# User input # User input

@ -31,9 +31,7 @@ def test_find_most_similar_podcasts():
graph = create_graph() graph = create_graph()
weight_edges(graph) weight_edges(graph)
user_list = create_user_list() user_list = create_user_list()
most_similar_podcasts = find_most_similar_podcasts( most_similar_podcasts = find_most_similar_podcasts(graph, user_list)
graph, user_list
)
assert isinstance(most_similar_podcasts, list) assert isinstance(most_similar_podcasts, list)

@ -45,9 +45,7 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
) as executor: ) as executor:
futures = [] futures = []
for i, (fn, args, kwargs) in enumerate(callable_functions): for i, (fn, args, kwargs) in enumerate(callable_functions):
futures.append( futures.append(executor.submit(worker, fn, args, kwargs, i))
executor.submit(worker, fn, args, kwargs, i)
)
# Wait for all threads to complete # Wait for all threads to complete
concurrent.futures.wait(futures) concurrent.futures.wait(futures)
@ -56,9 +54,7 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
# Adjusting the function to extract specific column values # Adjusting the function to extract specific column values
def extract_and_create_agents( def extract_and_create_agents(csv_file_path: str, target_columns: list):
csv_file_path: str, target_columns: list
):
""" """
Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row, Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
creates an Agent for each, and adds it to the swarm network. creates an Agent for each, and adds it to the swarm network.
@ -138,8 +134,7 @@ def extract_and_create_agents(
# Log the agent # Log the agent
logger.info( logger.info(
f"Agent created: {agent_name} with long term" f"Agent created: {agent_name} with long term" " memory"
" memory"
) )
agents.append(agent) agents.append(agent)

@ -16,9 +16,7 @@ def test_pass():
def test_invalid_sports(): def test_invalid_sports():
assert ( assert (
vocal.generate_video( vocal.generate_video("I just ate some delicious tacos", "tacos")
"I just ate some delicious tacos", "tacos"
)
== "Invalid sports entered!! Please enter a valid sport." == "Invalid sports entered!! Please enter a valid sport."
) )

@ -51,6 +51,4 @@ algorithmic_psuedocode_agent = paper_summarizer_agent.run(
"Focus on creating the algorithmic pseudocode for the novel" "Focus on creating the algorithmic pseudocode for the novel"
f" method in this paper: {paper}" f" method in this paper: {paper}"
) )
pytorch_code = paper_implementor_agent.run( pytorch_code = paper_implementor_agent.run(algorithmic_psuedocode_agent)
algorithmic_psuedocode_agent
)

@ -55,9 +55,7 @@ class AutoBlogGenSwarm:
): ):
self.llm = llm() self.llm = llm()
self.topic_selection_task = topic_selection_task self.topic_selection_task = topic_selection_task
self.topic_selection_agent_prompt = ( self.topic_selection_agent_prompt = topic_selection_agent_prompt
topic_selection_agent_prompt
)
self.objective = objective self.objective = objective
self.iterations = iterations self.iterations = iterations
self.max_retries = max_retries self.max_retries = max_retries
@ -93,9 +91,7 @@ class AutoBlogGenSwarm:
def step(self): def step(self):
"""Steps through the task""" """Steps through the task"""
topic_selection_agent = self.llm( topic_selection_agent = self.llm(self.topic_selection_agent_prompt)
self.topic_selection_agent_prompt
)
topic_selection_agent = self.print_beautifully( topic_selection_agent = self.print_beautifully(
"Topic Selection Agent", topic_selection_agent "Topic Selection Agent", topic_selection_agent
) )
@ -105,9 +101,7 @@ class AutoBlogGenSwarm:
# Agent that reviews the draft # Agent that reviews the draft
review_agent = self.llm(self.get_review_prompt(draft_blog)) review_agent = self.llm(self.get_review_prompt(draft_blog))
review_agent = self.print_beautifully( review_agent = self.print_beautifully("Review Agent", review_agent)
"Review Agent", review_agent
)
# Agent that publishes on social media # Agent that publishes on social media
distribution_agent = self.llm( distribution_agent = self.llm(

@ -48,11 +48,7 @@ class AutoTemp:
""" """
score_text = self.llm(eval_prompt, temperature=0.5) score_text = self.llm(eval_prompt, temperature=0.5)
score_match = re.search(r"\b\d+(\.\d)?\b", score_text) score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
return ( return round(float(score_match.group()), 1) if score_match else 0.0
round(float(score_match.group()), 1)
if score_match
else 0.0
)
def run(self, prompt, temperature_string): def run(self, prompt, temperature_string):
print("Starting generation process...") print("Starting generation process...")

@ -56,15 +56,11 @@ class BlogGen:
) )
chosen_topic = topic_output.split("\n")[0] chosen_topic = topic_output.split("\n")[0]
print( print(colored("Selected topic: " + chosen_topic, "yellow"))
colored("Selected topic: " + chosen_topic, "yellow")
)
# Initial draft generation with AutoTemp # Initial draft generation with AutoTemp
initial_draft_prompt = ( initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace(
self.DRAFT_WRITER_SYSTEM_PROMPT.replace( "{{CHOSEN_TOPIC}}", chosen_topic
"{{CHOSEN_TOPIC}}", chosen_topic
)
) )
auto_temp_output = self.auto_temp.run( auto_temp_output = self.auto_temp.run(
initial_draft_prompt, self.temperature_range initial_draft_prompt, self.temperature_range

@ -12,9 +12,7 @@ api_key = os.getenv("OPENAI_API_KEY")
stability_api_key = os.getenv("STABILITY_API_KEY") stability_api_key = os.getenv("STABILITY_API_KEY")
# Initialize language model # Initialize language model
llm = OpenAIChat( llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000)
openai_api_key=api_key, temperature=0.5, max_tokens=3000
)
# User preferences (can be dynamically set in a real application) # User preferences (can be dynamically set in a real application)
user_preferences = { user_preferences = {
@ -30,9 +28,7 @@ curriculum_prompt = edu_prompts.CURRICULUM_DESIGN_PROMPT.format(
interactive_prompt = edu_prompts.INTERACTIVE_LEARNING_PROMPT.format( interactive_prompt = edu_prompts.INTERACTIVE_LEARNING_PROMPT.format(
**user_preferences **user_preferences
) )
sample_prompt = edu_prompts.SAMPLE_TEST_PROMPT.format( sample_prompt = edu_prompts.SAMPLE_TEST_PROMPT.format(**user_preferences)
**user_preferences
)
image_prompt = edu_prompts.IMAGE_GENERATION_PROMPT.format( image_prompt = edu_prompts.IMAGE_GENERATION_PROMPT.format(
**user_preferences **user_preferences
) )
@ -49,9 +45,7 @@ workflow = SequentialWorkflow(max_loops=1)
# Add tasks to workflow with personalized prompts # Add tasks to workflow with personalized prompts
workflow.add(curriculum_agent, "Generate a curriculum") workflow.add(curriculum_agent, "Generate a curriculum")
workflow.add( workflow.add(interactive_learning_agent, "Generate an interactive lesson")
interactive_learning_agent, "Generate an interactive lesson"
)
workflow.add(sample_lesson_agent, "Generate a practice test") workflow.add(sample_lesson_agent, "Generate a practice test")
# Execute the workflow for text-based tasks # Execute the workflow for text-based tasks

@ -11,9 +11,7 @@ from swarms.structs import Agent
load_dotenv() load_dotenv()
FEATURE = ( FEATURE = "Implement an all-new signup system in typescript using supabase"
"Implement an all-new signup system in typescript using supabase"
)
CODEBASE = """ CODEBASE = """
import React, { useState } from 'react'; import React, { useState } from 'react';
@ -68,9 +66,7 @@ feature_implementer_backend = Agent(
) )
# Create another agent for a different task # Create another agent for a different task
tester_agent = Agent( tester_agent = Agent(llm=llm, max_loops=1, sop=TEST_SOP, autosave=True)
llm=llm, max_loops=1, sop=TEST_SOP, autosave=True
)
# Create another agent for a different task # Create another agent for a different task
documenting_agent = Agent( documenting_agent = Agent(

@ -44,9 +44,7 @@ class Idea2Image(Agent):
print(f"Generated image at: {img}") print(f"Generated image at: {img}")
analysis = ( analysis = (
self.vision_api.run(img, current_prompt) self.vision_api.run(img, current_prompt) if img else None
if img
else None
) )
if analysis: if analysis:
current_prompt += ( current_prompt += (
@ -147,9 +145,7 @@ gpt_api = OpenAIChat(openai_api_key=openai_api_key)
# Define the modified Idea2Image class here # Define the modified Idea2Image class here
# Streamlit UI layout # Streamlit UI layout
st.title( st.title("Explore the infinite Multi-Modal Idea Space with Idea2Image")
"Explore the infinite Multi-Modal Idea Space with Idea2Image"
)
user_prompt = st.text_input("Prompt for image generation:") user_prompt = st.text_input("Prompt for image generation:")
num_iterations = st.number_input( num_iterations = st.number_input(
"Enter the number of iterations for image improvement:", "Enter the number of iterations for image improvement:",
@ -168,9 +164,7 @@ if st.button("Generate Image"):
user_prompt, num_iterations, run_folder user_prompt, num_iterations, run_folder
) )
for i, (enriched_prompt, img_path, analysis) in enumerate( for i, (enriched_prompt, img_path, analysis) in enumerate(results):
results
):
st.write(f"Iteration {i+1}:") st.write(f"Iteration {i+1}:")
st.write("Enriched Prompt:", enriched_prompt) st.write("Enriched Prompt:", enriched_prompt)
if img_path: if img_path:

@ -96,9 +96,7 @@ for _ in range(max_iterations):
# Evaluate the image by passing the file path # Evaluate the image by passing the file path
score = evaluate_img(llm, task, img_path) score = evaluate_img(llm, task, img_path)
print( print(
colored( colored(f"Evaluated Image Score: {score} for {img_path}", "cyan")
f"Evaluated Image Score: {score} for {img_path}", "cyan"
)
) )
# Update the best score and image path if necessary # Update the best score and image path if necessary

@ -77,9 +77,7 @@ def generate_integrated_shopping_list(
meal_plan_output, image_analysis, user_preferences meal_plan_output, image_analysis, user_preferences
): ):
# Prepare the prompt for the LLM # Prepare the prompt for the LLM
fridge_contents = image_analysis["choices"][0]["message"][ fridge_contents = image_analysis["choices"][0]["message"]["content"]
"content"
]
prompt = ( prompt = (
f"Based on this meal plan: {meal_plan_output}, and the" f"Based on this meal plan: {meal_plan_output}, and the"
f" following items in the fridge: {fridge_contents}," f" following items in the fridge: {fridge_contents},"
@ -131,9 +129,7 @@ print("Integrated Shopping List:", integrated_shopping_list)
with open("nutrition_output.txt", "w") as file: with open("nutrition_output.txt", "w") as file:
file.write("Meal Plan:\n" + meal_plan_output + "\n\n") file.write("Meal Plan:\n" + meal_plan_output + "\n\n")
file.write( file.write(
"Integrated Shopping List:\n" "Integrated Shopping List:\n" + integrated_shopping_list + "\n"
+ integrated_shopping_list
+ "\n"
) )
print("Outputs have been saved to nutrition_output.txt") print("Outputs have been saved to nutrition_output.txt")

@ -42,9 +42,7 @@ def get_review_prompt(article):
return prompt return prompt
def social_media_prompt( def social_media_prompt(article: str, goal: str = "Clicks and engagement"):
article: str, goal: str = "Clicks and engagement"
):
prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace( prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace(
"{{ARTICLE}}", article "{{ARTICLE}}", article
).replace("{{GOAL}}", goal) ).replace("{{GOAL}}", goal)

@ -24,9 +24,7 @@ async def handle_websocket(websocket, path):
# Broadcast the message to all other users in the public group chats. # Broadcast the message to all other users in the public group chats.
for other_websocket in public_group_chats: for other_websocket in public_group_chats:
if other_websocket != websocket: if other_websocket != websocket:
await other_websocket.send( await other_websocket.send(f"{username}: {message}")
f"{username}: {message}"
)
finally: finally:
# Remove the user from the list of public group chats. # Remove the user from the list of public group chats.
public_group_chats.remove(websocket) public_group_chats.remove(websocket)

@ -48,9 +48,7 @@ def generate_conversation(characters, topic):
# Generate the conversation # Generate the conversation
conversation = generate_conversation( conversation = generate_conversation(character_names, conversation_topic)
character_names, conversation_topic
)
# Play the conversation # Play the conversation
for line in conversation: for line in conversation:

@ -48,9 +48,7 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
) as executor: ) as executor:
futures = [] futures = []
for i, (fn, args, kwargs) in enumerate(callable_functions): for i, (fn, args, kwargs) in enumerate(callable_functions):
futures.append( futures.append(executor.submit(worker, fn, args, kwargs, i))
executor.submit(worker, fn, args, kwargs, i)
)
# Wait for all threads to complete # Wait for all threads to complete
concurrent.futures.wait(futures) concurrent.futures.wait(futures)
@ -59,9 +57,7 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
# Adjusting the function to extract specific column values # Adjusting the function to extract specific column values
def extract_and_create_agents( def extract_and_create_agents(csv_file_path: str, target_columns: list):
csv_file_path: str, target_columns: list
):
""" """
Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row, Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
creates an Agent for each, and adds it to the swarm network. creates an Agent for each, and adds it to the swarm network.

@ -31,9 +31,7 @@ llm = GPT4VisionAPI(openai_api_key=api_key, max_tokens=2000)
assembly_line = ( assembly_line = (
"playground/demos/swarm_of_mma_manufacturing/assembly_line.jpg" "playground/demos/swarm_of_mma_manufacturing/assembly_line.jpg"
) )
red_robots = ( red_robots = "playground/demos/swarm_of_mma_manufacturing/red_robots.jpg"
"playground/demos/swarm_of_mma_manufacturing/red_robots.jpg"
)
robots = "playground/demos/swarm_of_mma_manufacturing/robots.jpg" robots = "playground/demos/swarm_of_mma_manufacturing/robots.jpg"
tesla_assembly_line = ( tesla_assembly_line = (
"playground/demos/swarm_of_mma_manufacturing/tesla_assembly.jpg" "playground/demos/swarm_of_mma_manufacturing/tesla_assembly.jpg"
@ -127,31 +125,19 @@ health_check = health_security_agent.run(
print( print(
colored( colored("--------------- Productivity agents initializing...", "green")
"--------------- Productivity agents initializing...", "green"
)
) )
# Add the third task to the productivity_check_agent # Add the third task to the productivity_check_agent
productivity_check = productivity_check_agent.run( productivity_check = productivity_check_agent.run(
health_check, assembly_line health_check, assembly_line
) )
print( print(colored("--------------- Security agents initializing...", "green"))
colored(
"--------------- Security agents initializing...", "green"
)
)
# Add the fourth task to the security_check_agent # Add the fourth task to the security_check_agent
security_check = security_check_agent.run( security_check = security_check_agent.run(productivity_check, red_robots)
productivity_check, red_robots
)
print( print(colored("--------------- Efficiency agents initializing...", "cyan"))
colored(
"--------------- Efficiency agents initializing...", "cyan"
)
)
# Add the fifth task to the efficiency_check_agent # Add the fifth task to the efficiency_check_agent
efficiency_check = efficiency_check_agent.run( efficiency_check = efficiency_check_agent.run(
security_check, tesla_assembly_line security_check, tesla_assembly_line

@ -12,9 +12,7 @@ api_key = os.getenv("OPENAI_API_KEY")
stability_api_key = os.getenv("STABILITY_API_KEY") stability_api_key = os.getenv("STABILITY_API_KEY")
# Initialize language model # Initialize language model
llm = OpenAIChat( llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000)
openai_api_key=api_key, temperature=0.5, max_tokens=3000
)
# Initialize Vision model # Initialize Vision model
vision_api = GPT4VisionAPI(api_key=api_key) vision_api = GPT4VisionAPI(api_key=api_key)
@ -51,17 +49,13 @@ workflow = SequentialWorkflow(max_loops=1)
# Add tasks to workflow with personalized prompts # Add tasks to workflow with personalized prompts
workflow.add(architecture_analysis_agent, "Architecture Analysis") workflow.add(architecture_analysis_agent, "Architecture Analysis")
workflow.add( workflow.add(infrastructure_evaluation_agent, "Infrastructure Evaluation")
infrastructure_evaluation_agent, "Infrastructure Evaluation"
)
workflow.add(traffic_flow_analysis_agent, "Traffic Flow Analysis") workflow.add(traffic_flow_analysis_agent, "Traffic Flow Analysis")
workflow.add( workflow.add(
environmental_impact_assessment_agent, environmental_impact_assessment_agent,
"Environmental Impact Assessment", "Environmental Impact Assessment",
) )
workflow.add( workflow.add(public_space_utilization_agent, "Public Space Utilization")
public_space_utilization_agent, "Public Space Utilization"
)
workflow.add( workflow.add(
socioeconomic_impact_analysis_agent, socioeconomic_impact_analysis_agent,
"Socioeconomic Impact Analysis", "Socioeconomic Impact Analysis",

@ -8,9 +8,7 @@ model = QwenVLMultiModal(
) )
# Run the model # Run the model
response = model( response = model("Hello, how are you?", "https://example.com/image.jpg")
"Hello, how are you?", "https://example.com/image.jpg"
)
# Print the response # Print the response
print(response) print(response)

@ -3,9 +3,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent from swarms import ToolAgent
# Load the pre-trained model and tokenizer # Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained( model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b")
"databricks/dolly-v2-12b"
)
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b") tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
# Define a JSON schema for person's information # Define a JSON schema for person's information
@ -20,9 +18,7 @@ json_schema = {
} }
# Define the task to generate a person's information # Define the task to generate a person's information
task = ( task = "Generate a person's information based on the following schema:"
"Generate a person's information based on the following schema:"
)
# Create an instance of the ToolAgent class # Create an instance of the ToolAgent class
agent = ToolAgent( agent = ToolAgent(

@ -8,9 +8,7 @@ model = QwenVLMultiModal(
) )
# Run the model # Run the model
response = model( response = model("Hello, how are you?", "https://example.com/image.jpg")
"Hello, how are you?", "https://example.com/image.jpg"
)
# Print the response # Print the response
print(response) print(response)

@ -7,6 +7,4 @@ model = TogetherLLM(
) )
# Run the model # Run the model
model.run( model.run("Generate a blog post about the best way to make money online.")
"Generate a blog post about the best way to make money online."
)

@ -35,9 +35,7 @@ class DialogueAgent:
[ [
self.system_message, self.system_message,
HumanMessage( HumanMessage(
content="\n".join( content="\n".join(self.message_history + [self.prefix])
self.message_history + [self.prefix]
)
), ),
] ]
) )
@ -76,9 +74,7 @@ class DialogueSimulator:
def step(self) -> tuple[str, str]: def step(self) -> tuple[str, str]:
# 1. choose the next speaker # 1. choose the next speaker
speaker_idx = self.select_next_speaker( speaker_idx = self.select_next_speaker(self._step, self.agents)
self._step, self.agents
)
speaker = self.agents[speaker_idx] speaker = self.agents[speaker_idx]
# 2. next speaker sends message # 2. next speaker sends message
@ -116,9 +112,7 @@ class BiddingDialogueAgent(DialogueAgent):
message_history="\n".join(self.message_history), message_history="\n".join(self.message_history),
recent_message=self.message_history[-1], recent_message=self.message_history[-1],
) )
bid_string = self.model( bid_string = self.model([SystemMessage(content=prompt)]).content
[SystemMessage(content=prompt)]
).content
return bid_string return bid_string
@ -140,10 +134,12 @@ player_descriptor_system_message = SystemMessage(
def generate_character_description(character_name): def generate_character_description(character_name):
character_specifier_prompt = [ character_specifier_prompt = [
player_descriptor_system_message, player_descriptor_system_message,
HumanMessage(content=f"""{game_description} HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities. Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities.
Speak directly to {character_name}. Speak directly to {character_name}.
Do not add anything else."""), Do not add anything else."""
),
] ]
character_description = ChatOpenAI(temperature=1.0)( character_description = ChatOpenAI(temperature=1.0)(
character_specifier_prompt character_specifier_prompt
@ -161,10 +157,9 @@ Your goal is to be as creative as possible and make the voters think you are the
""" """
def generate_character_system_message( def generate_character_system_message(character_name, character_header):
character_name, character_header return SystemMessage(
): content=f"""{character_header}
return SystemMessage(content=f"""{character_header}
You will speak in the style of {character_name}, and exaggerate their personality. You will speak in the style of {character_name}, and exaggerate their personality.
You will come up with creative ideas related to {topic}. You will come up with creative ideas related to {topic}.
Do not say the same things over and over again. Do not say the same things over and over again.
@ -176,7 +171,8 @@ Speak only from the perspective of {character_name}.
Stop speaking the moment you finish speaking from your perspective. Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words! Never forget to keep your response to {word_limit} words!
Do not add anything else. Do not add anything else.
""") """
)
character_descriptions = [ character_descriptions = [
@ -190,9 +186,7 @@ character_headers = [
) )
] ]
character_system_messages = [ character_system_messages = [
generate_character_system_message( generate_character_system_message(character_name, character_headers)
character_name, character_headers
)
for character_name, character_headers in zip( for character_name, character_headers in zip(
character_names, character_headers character_names, character_headers
) )
@ -261,7 +255,8 @@ for character_name, bidding_template in zip(
topic_specifier_prompt = [ topic_specifier_prompt = [
SystemMessage(content="You can make a task more specific."), SystemMessage(content="You can make a task more specific."),
HumanMessage(content=f"""{game_description} HumanMessage(
content=f"""{game_description}
You are the debate moderator. You are the debate moderator.
Please make the debate topic more specific. Please make the debate topic more specific.
@ -269,7 +264,8 @@ topic_specifier_prompt = [
Be creative and imaginative. Be creative and imaginative.
Please reply with the specified topic in {word_limit} words or less. Please reply with the specified topic in {word_limit} words or less.
Speak directly to the presidential candidates: {*character_names,}. Speak directly to the presidential candidates: {*character_names,}.
Do not add anything else."""), Do not add anything else."""
),
] ]
specified_topic = ChatOpenAI(temperature=1.0)( specified_topic = ChatOpenAI(temperature=1.0)(
topic_specifier_prompt topic_specifier_prompt
@ -298,9 +294,7 @@ def ask_for_bid(agent) -> str:
return bid return bid
def select_next_speaker( def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:
step: int, agents: List[DialogueAgent]
) -> int:
bids = [] bids = []
for agent in agents: for agent in agents:
bid = ask_for_bid(agent) bid = ask_for_bid(agent)

@ -44,7 +44,5 @@ manager = Agent(
agents = [flow1, flow2, flow3] agents = [flow1, flow2, flow3]
group_chat = GroupChat(agents=agents, messages=[], max_round=10) group_chat = GroupChat(agents=agents, messages=[], max_round=10)
chat_manager = GroupChatManager( chat_manager = GroupChatManager(groupchat=group_chat, selector=manager)
groupchat=group_chat, selector=manager
)
chat_history = chat_manager("Write me a riddle") chat_history = chat_manager("Write me a riddle")

@ -6,7 +6,7 @@ from swarms import Agent, OpenAIChat
from swarms.agents.multion_agent import MultiOnAgent from swarms.agents.multion_agent import MultiOnAgent
from swarms.memory.chroma_db import ChromaDB from swarms.memory.chroma_db import ChromaDB
from swarms.tools.tool import tool from swarms.tools.tool import tool
from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.tools.code_interpreter import SubprocessCodeInterpreter
# Load the environment variables # Load the environment variables
load_dotenv() load_dotenv()

@ -8,9 +8,7 @@ agent3 = Agent(llm=OpenAIChat(), agent_name="agent3")
moderator = Agent(agent_name="moderator") moderator = Agent(agent_name="moderator")
agents = [agent1, agent2, agent3] agents = [agent1, agent2, agent3]
message_pool = MessagePool( message_pool = MessagePool(agents=agents, moderator=moderator, turns=5)
agents=agents, moderator=moderator, turns=5
)
message_pool.add(agent=agent1, content="Hello, agent2!", turn=1) message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
message_pool.add(agent=agent2, content="Hello, agent1!", turn=1) message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
message_pool.add(agent=agent3, content="Hello, agent1!", turn=1) message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)

@ -7,9 +7,7 @@ node = Worker(
# Instantiate the Orchestrator with 10 agents # Instantiate the Orchestrator with 10 agents
orchestrator = Orchestrator( orchestrator = Orchestrator(node, agent_list=[node] * 10, task_queue=[])
node, agent_list=[node] * 10, task_queue=[]
)
# Agent 7 sends a message to Agent 9 # Agent 7 sends a message to Agent 9
orchestrator.chat( orchestrator.chat(

@ -21,9 +21,7 @@ json_schema = {
} }
# Define the task to generate a person's information # Define the task to generate a person's information
task = ( task = "Generate a person's information based on the following schema:"
"Generate a person's information based on the following schema:"
)
# Create an instance of the ToolAgent class # Create an instance of the ToolAgent class
agent = ToolAgent( agent = ToolAgent(

@ -100,9 +100,7 @@ class PythonDocumentationSwarm:
with open(file_path, "w") as file: with open(file_path, "w") as file:
file.write(doc_content) file.write(doc_content)
logger.info( logger.info(f"Documentation generated for {item.__name__}.")
f"Documentation generated for {item.__name__}."
)
except Exception as e: except Exception as e:
logger.error( logger.error(
f"Error processing documentation for {item.__name__}." f"Error processing documentation for {item.__name__}."
@ -130,8 +128,7 @@ class PythonDocumentationSwarm:
thread.join() thread.join()
logger.info( logger.info(
"Documentation generated in 'swarms.structs'" "Documentation generated in 'swarms.structs'" " directory."
" directory."
) )
except Exception as e: except Exception as e:
logger.error("Error running documentation process.") logger.error("Error running documentation process.")
@ -143,8 +140,7 @@ class PythonDocumentationSwarm:
executor.map(self.process_documentation, python_items) executor.map(self.process_documentation, python_items)
logger.info( logger.info(
"Documentation generated in 'swarms.structs'" "Documentation generated in 'swarms.structs'" " directory."
" directory."
) )
except Exception as e: except Exception as e:
logger.error("Error running documentation process.") logger.error("Error running documentation process.")

@ -4,7 +4,7 @@ B -> W1, W2, W3
""" """
from typing import List, Optional from typing import List, Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from swarms.utils.json_utils import str_to_json from swarms.tools.json_utils import str_to_json
class HierarchicalSwarm(BaseModel): class HierarchicalSwarm(BaseModel):

@ -35,7 +35,6 @@ agent = Agent(
) )
out = agent.run( out = agent.run(
"Use the search api to find the best restaurants in New York" "Use the search api to find the best restaurants in New York" " City."
" City."
) )
print(out) print(out)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "4.8.2" version = "4.8.4"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]
@ -35,25 +35,22 @@ python = ">=3.9,<4.0"
torch = ">=2.1.1,<3.0" torch = ">=2.1.1,<3.0"
transformers = ">= 4.39.0, <5.0.0" transformers = ">= 4.39.0, <5.0.0"
asyncio = ">=3.4.3,<4.0" asyncio = ">=3.4.3,<4.0"
einops = "0.7.0"
langchain-core = "0.1.33"
langchain-community = "0.0.29" langchain-community = "0.0.29"
langchain-experimental = "0.0.55" langchain-experimental = "0.0.55"
backoff = "2.2.1" backoff = "2.2.1"
toml = "*" toml = "*"
pypdf = "4.1.0" pypdf = "4.1.0"
httpx = "0.24.1"
ratelimit = "2.2.1" ratelimit = "2.2.1"
loguru = "0.7.2" loguru = "0.7.2"
pydantic = "2.6.4" pydantic = "2.6.4"
tenacity = "8.2.3" tenacity = "8.2.3"
Pillow = "10.2.0" Pillow = "10.2.0"
rich = "13.5.2"
psutil = "*" psutil = "*"
sentry-sdk = "*" sentry-sdk = "*"
python-dotenv = "*" python-dotenv = "*"
accelerate = "0.28.0" accelerate = "0.28.0"
opencv-python = "^4.9.0.80" opencv-python = "^4.9.0.80"
yaml = "*"
[tool.poetry.group.lint.dependencies] [tool.poetry.group.lint.dependencies]
black = "^23.1.0" black = "^23.1.0"
@ -71,7 +68,7 @@ pandas = "^2.2.2"
fastapi = "^0.110.1" fastapi = "^0.110.1"
[tool.ruff] [tool.ruff]
line-length = 128 line-length = 75
[tool.ruff.lint] [tool.ruff.lint]
select = ["E", "F", "W", "I", "UP"] select = ["E", "F", "W", "I", "UP"]
@ -84,6 +81,21 @@ preview = true
"swarms/prompts/**.py" = ["E501"] "swarms/prompts/**.py" = ["E501"]
[tool.black] [tool.black]
line-length = 70 target-version = ["py38"]
target-version = ['py38'] line-length = 75
preview = true include = '\.pyi?$'
exclude = '''
/(
\.git
| \.hg
| \.mypy_cache
| \.tox
| \.venv
| _build
| buck-out
| build
| dist
| docs
)/
'''

@ -2,8 +2,6 @@
torch>=2.1.1,<3.0 torch>=2.1.1,<3.0
transformers==4.39.0 transformers==4.39.0
asyncio>=3.4.3,<4.0 asyncio>=3.4.3,<4.0
einops==0.7.0
langchain-core==0.1.33
langchain-community==0.0.29 langchain-community==0.0.29
langchain-experimental==0.0.55 langchain-experimental==0.0.55
backoff==2.2.1 backoff==2.2.1

@ -52,9 +52,7 @@ def main():
# Gathering all functions from the swarms.utils module # Gathering all functions from the swarms.utils module
functions = [ functions = [
obj obj
for name, obj in inspect.getmembers( for name, obj in inspect.getmembers(sys.modules["swarms.utils"])
sys.modules["swarms.utils"]
)
if inspect.isfunction(obj) if inspect.isfunction(obj)
] ]

@ -57,9 +57,7 @@ def process_documentation(
with open(file_path, "w") as file: with open(file_path, "w") as file:
file.write(doc_content) file.write(doc_content)
print( print(f"Processed documentation for {item.__name__}. at {file_path}")
f"Processed documentation for {item.__name__}. at {file_path}"
)
def main(module: str = "docs/swarms/structs"): def main(module: str = "docs/swarms/structs"):

@ -68,9 +68,7 @@ def create_test(cls):
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content) # Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
processed_content = model( processed_content = model(
TEST_WRITER_SOP_PROMPT( TEST_WRITER_SOP_PROMPT(input_content, "swarms", "swarms.memory")
input_content, "swarms", "swarms.memory"
)
) )
processed_content = extract_code_from_markdown(processed_content) processed_content = extract_code_from_markdown(processed_content)

@ -57,9 +57,7 @@ def main():
# Gathering all functions from the swarms.utils module # Gathering all functions from the swarms.utils module
functions = [ functions = [
obj obj
for name, obj in inspect.getmembers( for name, obj in inspect.getmembers(sys.modules["swarms.utils"])
sys.modules["swarms.utils"]
)
if inspect.isfunction(obj) if inspect.isfunction(obj)
] ]

@ -22,9 +22,7 @@ def generate_file_list(directory, output_file):
# Remove the file extension # Remove the file extension
file_name, _ = os.path.splitext(file) file_name, _ = os.path.splitext(file)
# Write the file name and path to the output file # Write the file name and path to the output file
f.write( f.write(f'- {file_name}: "swarms/utils/{file_path}"\n')
f'- {file_name}: "swarms/utils/{file_path}"\n'
)
# Use the function to generate the file list # Use the function to generate the file list

@ -13,18 +13,15 @@ def get_package_versions(requirements_path, output_path):
for requirement in requirements: for requirement in requirements:
# Skip empty lines and comments # Skip empty lines and comments
if ( if requirement.strip() == "" or requirement.strip().startswith(
requirement.strip() == "" "#"
or requirement.strip().startswith("#")
): ):
continue continue
# Extract package name # Extract package name
package_name = requirement.split("==")[0].strip() package_name = requirement.split("==")[0].strip()
try: try:
version = pkg_resources.get_distribution( version = pkg_resources.get_distribution(package_name).version
package_name
).version
package_versions.append(f"{package_name}=={version}") package_versions.append(f"{package_name}=={version}")
except pkg_resources.DistributionNotFound: except pkg_resources.DistributionNotFound:
package_versions.append(f"{package_name}: not installed") package_versions.append(f"{package_name}: not installed")

@ -147,7 +147,5 @@ class ToolAgent(Agent):
) )
except Exception as error: except Exception as error:
logger.error( logger.error(f"Error running {self.name} for task: {task}")
f"Error running {self.name} for task: {task}"
)
raise error raise error

@ -69,15 +69,11 @@ def cosine_similarity_top_k(
score_array = cosine_similarity(X, Y) score_array = cosine_similarity(X, Y)
score_threshold = score_threshold or -1.0 score_threshold = score_threshold or -1.0
score_array[score_array < score_threshold] = 0 score_array[score_array < score_threshold] = 0
top_k = min( top_k = min(top_k or len(score_array), np.count_nonzero(score_array))
top_k or len(score_array), np.count_nonzero(score_array) top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[-top_k:]
) top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[ ::-1
-top_k:
] ]
top_k_idxs = top_k_idxs[
np.argsort(score_array.ravel()[top_k_idxs])
][::-1]
ret_idxs = np.unravel_index(top_k_idxs, score_array.shape) ret_idxs = np.unravel_index(top_k_idxs, score_array.shape)
scores = score_array.ravel()[top_k_idxs].tolist() scores = score_array.ravel()[top_k_idxs].tolist()
return list(zip(*ret_idxs)), scores # type: ignore return list(zip(*ret_idxs)), scores # type: ignore

@ -44,9 +44,7 @@ class DictSharedMemory:
entry_id = str(uuid.uuid4()) entry_id = str(uuid.uuid4())
data = {} data = {}
epoch = datetime.datetime.utcfromtimestamp(0) epoch = datetime.datetime.utcfromtimestamp(0)
epoch = ( epoch = (datetime.datetime.utcnow() - epoch).total_seconds()
datetime.datetime.utcnow() - epoch
).total_seconds()
data[entry_id] = { data[entry_id] = {
"agent": agent_id, "agent": agent_id,
"epoch": epoch, "epoch": epoch,

@ -170,9 +170,7 @@ class LangchainChromaVectorMemory(AbstractVectorDatabase):
) )
texts = [text.page_content for text in texts] texts = [text.page_content for text in texts]
elif type == "cos": elif type == "cos":
texts = self.db.similarity_search_with_score( texts = self.db.similarity_search_with_score(query=query, k=k)
query=query, k=k
)
texts = [ texts = [
text[0].page_content text[0].page_content
for text in texts for text in texts

@ -34,9 +34,7 @@ class PostgresDB(AbstractVectorDatabase):
table_name (str): The name of the table in the database. table_name (str): The name of the table in the database.
""" """
self.engine = create_engine( self.engine = create_engine(connection_string, *args, **kwargs)
connection_string, *args, **kwargs
)
self.table_name = table_name self.table_name = table_name
self.VectorModel = self._create_vector_model() self.VectorModel = self._create_vector_model()

@ -123,9 +123,7 @@ class PineconeDB(AbstractVectorDatabase):
Returns: Returns:
str: _description_ str: _description_
""" """
vector_id = ( vector_id = vector_id if vector_id else str_to_hash(str(vector))
vector_id if vector_id else str_to_hash(str(vector))
)
params = {"namespace": namespace} | kwargs params = {"namespace": namespace} | kwargs

@ -40,9 +40,7 @@ class ShortTermMemory(BaseStructure):
self.medium_term_memory = [] self.medium_term_memory = []
self.lock = threading.Lock() self.lock = threading.Lock()
def add( def add(self, role: str = None, message: str = None, *args, **kwargs):
self, role: str = None, message: str = None, *args, **kwargs
):
"""Add a message to the short term memory. """Add a message to the short term memory.
Args: Args:
@ -160,9 +158,7 @@ class ShortTermMemory(BaseStructure):
with open(filename, "w") as f: with open(filename, "w") as f:
json.dump( json.dump(
{ {
"short_term_memory": ( "short_term_memory": (self.short_term_memory),
self.short_term_memory
),
"medium_term_memory": ( "medium_term_memory": (
self.medium_term_memory self.medium_term_memory
), ),
@ -184,9 +180,7 @@ class ShortTermMemory(BaseStructure):
with self.lock: with self.lock:
with open(filename) as f: with open(filename) as f:
data = json.load(f) data = json.load(f)
self.short_term_memory = data.get( self.short_term_memory = data.get("short_term_memory", [])
"short_term_memory", []
)
self.medium_term_memory = data.get( self.medium_term_memory = data.get(
"medium_term_memory", [] "medium_term_memory", []
) )

@ -5,9 +5,7 @@ from swarms.memory.base_vectordb import AbstractVectorDatabase
try: try:
import sqlite3 import sqlite3
except ImportError: except ImportError:
raise ImportError( raise ImportError("Please install sqlite3 to use the SQLiteDB class.")
"Please install sqlite3 to use the SQLiteDB class."
)
class SQLiteDB(AbstractVectorDatabase): class SQLiteDB(AbstractVectorDatabase):

@ -126,9 +126,7 @@ class WeaviateDB(AbstractVectorDatabase):
print(f"Error adding object: {error}") print(f"Error adding object: {error}")
raise raise
def query( def query(self, collection_name: str, query: str, limit: int = 10):
self, collection_name: str, query: str, limit: int = 10
):
"""Query objects from a specified collection. """Query objects from a specified collection.
Args: Args:

@ -25,9 +25,7 @@ class BaseEmbeddingModel(
tokenizer: Callable = None tokenizer: Callable = None
chunker: Callable = None chunker: Callable = None
def embed_text_artifact( def embed_text_artifact(self, artifact: TextArtifact) -> list[float]:
self, artifact: TextArtifact
) -> list[float]:
return self.embed_string(artifact.to_text()) return self.embed_string(artifact.to_text())
def embed_string(self, string: str) -> list[float]: def embed_string(self, string: str) -> list[float]:

@ -154,15 +154,11 @@ class AbstractLLM(ABC):
Returns: Returns:
_type_: _description_ _type_: _description_
""" """
return await asyncio.gather( return await asyncio.gather(*(self.arun(task) for task in tasks))
*(self.arun(task) for task in tasks)
)
def chat(self, task: str, history: str = "") -> str: def chat(self, task: str, history: str = "") -> str:
"""Chat with the model""" """Chat with the model"""
complete_task = ( complete_task = task + " | " + history # Delimiter for clarity
task + " | " + history
) # Delimiter for clarity
return self.run(complete_task) return self.run(complete_task)
def __call__(self, task: str) -> str: def __call__(self, task: str) -> str:
@ -209,9 +205,7 @@ class AbstractLLM(ABC):
def log_event(self, message: str): def log_event(self, message: str):
"""Log an event.""" """Log an event."""
logging.info( logging.info(f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {message}")
f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {message}"
)
def save_checkpoint(self, checkpoint_dir: str = "checkpoints"): def save_checkpoint(self, checkpoint_dir: str = "checkpoints"):
"""Save the model state.""" """Save the model state."""

@ -135,9 +135,7 @@ class BaseMultiModalModel:
image_pil = Image.open(BytesIO(response.content)) image_pil = Image.open(BytesIO(response.content))
return image_pil return image_pil
except requests.RequestException as error: except requests.RequestException as error:
print( print(f"Error fetching image from {img} and error: {error}")
f"Error fetching image from {img} and error: {error}"
)
return None return None
def encode_img(self, img: str): def encode_img(self, img: str):
@ -190,9 +188,7 @@ class BaseMultiModalModel:
"""Clear the chat history""" """Clear the chat history"""
self.chat_history = [] self.chat_history = []
def run_many( def run_many(self, tasks: List[str], imgs: List[str], *args, **kwargs):
self, tasks: List[str], imgs: List[str], *args, **kwargs
):
""" """
Run the model on multiple tasks and images all at once using concurrent Run the model on multiple tasks and images all at once using concurrent
@ -206,18 +202,14 @@ class BaseMultiModalModel:
""" """
# Instantiate the thread pool executor # Instantiate the thread pool executor
with ThreadPoolExecutor( with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
max_workers=self.max_workers
) as executor:
results = executor.map(self.run, tasks, imgs) results = executor.map(self.run, tasks, imgs)
# Print the results for debugging # Print the results for debugging
for result in results: for result in results:
print(result) print(result)
def run_batch( def run_batch(self, tasks_images: List[Tuple[str, str]]) -> List[str]:
self, tasks_images: List[Tuple[str, str]]
) -> List[str]:
"""Process a batch of tasks and images""" """Process a batch of tasks and images"""
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [ futures = [
@ -244,9 +236,7 @@ class BaseMultiModalModel:
"""Process a batch of tasks and images asynchronously with retries""" """Process a batch of tasks and images asynchronously with retries"""
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
futures = [ futures = [
loop.run_in_executor( loop.run_in_executor(None, self.run_with_retries, task, img)
None, self.run_with_retries, task, img
)
for task, img in tasks_images for task, img in tasks_images
] ]
return await asyncio.gather(*futures) return await asyncio.gather(*futures)
@ -264,9 +254,7 @@ class BaseMultiModalModel:
print(f"Error with the request {error}") print(f"Error with the request {error}")
continue continue
def run_batch_with_retries( def run_batch_with_retries(self, tasks_images: List[Tuple[str, str]]):
self, tasks_images: List[Tuple[str, str]]
):
"""Run the model with retries""" """Run the model with retries"""
for i in range(self.retries): for i in range(self.retries):
try: try:

@ -299,9 +299,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
""" """
messages = params["messages"] messages = params["messages"]
temperature = float(params.get("temperature", 1.0)) temperature = float(params.get("temperature", 1.0))
repetition_penalty = float( repetition_penalty = float(params.get("repetition_penalty", 1.0))
params.get("repetition_penalty", 1.0)
)
top_p = float(params.get("top_p", 1.0)) top_p = float(params.get("top_p", 1.0))
max_new_tokens = int(params.get("max_tokens", 256)) max_new_tokens = int(params.get("max_tokens", 256))
query, history, image_list = self.process_history_and_images( query, history, image_list = self.process_history_and_images(
@ -318,9 +316,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
) )
inputs = { inputs = {
"input_ids": ( "input_ids": (
input_by_model["input_ids"] input_by_model["input_ids"].unsqueeze(0).to(self.device)
.unsqueeze(0)
.to(self.device)
), ),
"token_type_ids": ( "token_type_ids": (
input_by_model["token_type_ids"] input_by_model["token_type_ids"]
@ -379,9 +375,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
"text": generated_text, "text": generated_text,
"usage": { "usage": {
"prompt_tokens": input_echo_len, "prompt_tokens": input_echo_len,
"completion_tokens": ( "completion_tokens": (total_len - input_echo_len),
total_len - input_echo_len
),
"total_tokens": total_len, "total_tokens": total_len,
}, },
} }
@ -437,9 +431,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
for item in content: for item in content:
if isinstance(item, ImageUrlContent): if isinstance(item, ImageUrlContent):
image_url = item.image_url.url image_url = item.image_url.url
if image_url.startswith( if image_url.startswith("data:image/jpeg;base64,"):
"data:image/jpeg;base64,"
):
base64_encoded_image = image_url.split( base64_encoded_image = image_url.split(
"data:image/jpeg;base64," "data:image/jpeg;base64,"
)[1] )[1]
@ -471,9 +463,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
text_content, text_content,
) )
else: else:
raise AssertionError( raise AssertionError("assistant reply before user")
"assistant reply before user"
)
else: else:
raise AssertionError(f"unrecognized role: {role}") raise AssertionError(f"unrecognized role: {role}")

@ -199,9 +199,7 @@ class Dalle3:
with open(full_path, "wb") as file: with open(full_path, "wb") as file:
file.write(response.content) file.write(response.content)
else: else:
raise ValueError( raise ValueError(f"Failed to download image from {img_url}")
f"Failed to download image from {img_url}"
)
def create_variations(self, img: str): def create_variations(self, img: str):
""" """
@ -249,9 +247,7 @@ class Dalle3:
"red", "red",
) )
) )
print( print(colored(f"Error running Dalle3: {error.error}", "red"))
colored(f"Error running Dalle3: {error.error}", "red")
)
raise error raise error
def print_dashboard(self): def print_dashboard(self):
@ -310,9 +306,7 @@ class Dalle3:
executor.submit(self, task): task for task in tasks executor.submit(self, task): task for task in tasks
} }
results = [] results = []
for future in concurrent.futures.as_completed( for future in concurrent.futures.as_completed(future_to_task):
future_to_task
):
task = future_to_task[future] task = future_to_task[future]
try: try:
img = future.result() img = future.result()
@ -359,9 +353,7 @@ class Dalle3:
"""Str method for the Dalle3 class""" """Str method for the Dalle3 class"""
return f"Dalle3(image_url={self.image_url})" return f"Dalle3(image_url={self.image_url})"
@backoff.on_exception( @backoff.on_exception(backoff.expo, Exception, max_tries=max_retries)
backoff.expo, Exception, max_tries=max_retries
)
def rate_limited_call(self, task: str): def rate_limited_call(self, task: str):
"""Rate limited call to the Dalle3 API""" """Rate limited call to the Dalle3 API"""
return self.__call__(task) return self.__call__(task)

@ -70,9 +70,7 @@ class DistilWhisperModel:
def __init__(self, model_id="distil-whisper/distil-large-v2"): def __init__(self, model_id="distil-whisper/distil-large-v2"):
self.device = "cuda:0" if torch.cuda.is_available() else "cpu" self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self.torch_dtype = ( self.torch_dtype = (
torch.float16 torch.float16 if torch.cuda.is_available() else torch.float32
if torch.cuda.is_available()
else torch.float32
) )
self.model_id = model_id self.model_id = model_id
self.model = AutoModelForSpeechSeq2Seq.from_pretrained( self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
@ -112,9 +110,7 @@ class DistilWhisperModel:
:return: The transcribed text. :return: The transcribed text.
""" """
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
return await loop.run_in_executor( return await loop.run_in_executor(None, self.transcribe, inputs)
None, self.transcribe, inputs
)
def real_time_transcribe(self, audio_file_path, chunk_duration=5): def real_time_transcribe(self, audio_file_path, chunk_duration=5):
""" """
@ -138,9 +134,7 @@ class DistilWhisperModel:
sample_rate = audio_input.sampling_rate sample_rate = audio_input.sampling_rate
len(audio_input.array) / sample_rate len(audio_input.array) / sample_rate
chunks = [ chunks = [
audio_input.array[ audio_input.array[i : i + sample_rate * chunk_duration]
i : i + sample_rate * chunk_duration
]
for i in range( for i in range(
0, 0,
len(audio_input.array), len(audio_input.array),
@ -149,9 +143,7 @@ class DistilWhisperModel:
] ]
print( print(
colored( colored("Starting real-time transcription...", "green")
"Starting real-time transcription...", "green"
)
) )
for i, chunk in enumerate(chunks): for i, chunk in enumerate(chunks):
@ -162,8 +154,8 @@ class DistilWhisperModel:
return_tensors="pt", return_tensors="pt",
padding=True, padding=True,
) )
processed_inputs = ( processed_inputs = processed_inputs.input_values.to(
processed_inputs.input_values.to(self.device) self.device
) )
# Generate transcription for the chunk # Generate transcription for the chunk
@ -174,9 +166,7 @@ class DistilWhisperModel:
# Print the chunk's transcription # Print the chunk's transcription
print( print(
colored( colored(f"Chunk {i+1}/{len(chunks)}: ", "yellow")
f"Chunk {i+1}/{len(chunks)}: ", "yellow"
)
+ transcription + transcription
) )

@ -112,9 +112,7 @@ class Gemini(BaseMultiModalModel):
) )
# Initialize the model # Initialize the model
self.model = genai.GenerativeModel( self.model = genai.GenerativeModel(model_name, *args, **kwargs)
model_name, *args, **kwargs
)
# Check for the key # Check for the key
if self.gemini_api_key is None: if self.gemini_api_key is None:
@ -211,9 +209,7 @@ class Gemini(BaseMultiModalModel):
raise ValueError("Please provide a Gemini API key") raise ValueError("Please provide a Gemini API key")
# Load the image # Load the image
img = [ img = [{"mime_type": type, "data": Path(img).read_bytes()}]
{"mime_type": type, "data": Path(img).read_bytes()}
]
except Exception as error: except Exception as error:
print(f"Error processing image: {error}") print(f"Error processing image: {error}")

@ -42,9 +42,7 @@ class GPT4VSAM(BaseMultiModalModel):
self.device = device self.device = device
self.return_related_marks = return_related_marks self.return_related_marks = return_related_marks
self.sam = SegmentAnythingMarkGenerator( self.sam = SegmentAnythingMarkGenerator(device, *args, **kwargs)
device, *args, **kwargs
)
self.visualizer = MarkVisualizer(*args, **kwargs) self.visualizer = MarkVisualizer(*args, **kwargs)
def load_img(self, img: str) -> Any: def load_img(self, img: str) -> Any:

@ -15,8 +15,7 @@ try:
import cv2 import cv2
except ImportError: except ImportError:
print( print(
"OpenCV not installed. Please install OpenCV to use this" "OpenCV not installed. Please install OpenCV to use this" " model."
" model."
) )
raise ImportError raise ImportError
@ -248,9 +247,7 @@ class GPT4VisionAPI(BaseMultiModalModel):
if not success: if not success:
break break
_, buffer = cv2.imencode(".jpg", frame) _, buffer = cv2.imencode(".jpg", frame)
base64_frames.append( base64_frames.append(base64.b64encode(buffer).decode("utf-8"))
base64.b64encode(buffer).decode("utf-8")
)
video.release() video.release()
print(len(base64_frames), "frames read.") print(len(base64_frames), "frames read.")
@ -433,9 +430,7 @@ class GPT4VisionAPI(BaseMultiModalModel):
def health_check(self): def health_check(self):
"""Health check for the GPT4Vision model""" """Health check for the GPT4Vision model"""
try: try:
response = requests.get( response = requests.get("https://api.openai.com/v1/engines")
"https://api.openai.com/v1/engines"
)
return response.status_code == 200 return response.status_code == 200
except requests.RequestException as error: except requests.RequestException as error:
print(f"Health check failed: {error}") print(f"Health check failed: {error}")

@ -203,9 +203,7 @@ class HuggingfaceLLM(AbstractLLM):
results = list(executor.map(self.run, tasks)) results = list(executor.map(self.run, tasks))
return results return results
def run_batch( def run_batch(self, tasks_images: List[Tuple[str, str]]) -> List[str]:
self, tasks_images: List[Tuple[str, str]]
) -> List[str]:
"""Process a batch of tasks and images""" """Process a batch of tasks and images"""
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [ futures = [

@ -77,9 +77,7 @@ class Idefics(BaseMultiModalModel):
def __init__( def __init__(
self, self,
model_name: Optional[ model_name: Optional[str] = "HuggingFaceM4/idefics-9b-instruct",
str
] = "HuggingFaceM4/idefics-9b-instruct",
device: Callable = autodetect_device, device: Callable = autodetect_device,
torch_dtype=torch.bfloat16, torch_dtype=torch.bfloat16,
max_length: int = 100, max_length: int = 100,

@ -87,8 +87,8 @@ class Kosmos(BaseMultiModalModel):
skip_special_tokens=True, skip_special_tokens=True,
)[0] )[0]
processed_text, entities = ( processed_text, entities = self.processor.post_process_generation(
self.processor.post_process_generation(generated_texts) generated_texts
) )
return processed_text, entities return processed_text, entities
@ -189,9 +189,7 @@ class Kosmos(BaseMultiModalModel):
) )
# draw bbox # draw bbox
# random color # random color
color = tuple( color = tuple(np.random.randint(0, 255, size=3).tolist())
np.random.randint(0, 255, size=3).tolist()
)
new_image = cv2.rectangle( new_image = cv2.rectangle(
new_image, new_image,
(orig_x1, orig_y1), (orig_x1, orig_y1),
@ -210,9 +208,7 @@ class Kosmos(BaseMultiModalModel):
if ( if (
y1 y1
< text_height < text_height + text_offset_original + 2 * text_spaces
+ text_offset_original
+ 2 * text_spaces
): ):
y1 = ( y1 = (
orig_y1 orig_y1

@ -115,12 +115,10 @@ class MedicalSAM:
if len(box_torch.shape) == 2: if len(box_torch.shape) == 2:
box_torch = box_torch[:, None, :] box_torch = box_torch[:, None, :]
sparse_embeddings, dense_embeddings = ( sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
self.model.prompt_encoder( points=None,
points=None, boxes=box_torch,
boxes=box_torch, masks=None,
masks=None,
)
) )
low_res_logits, _ = self.model.mask_decoder( low_res_logits, _ = self.model.mask_decoder(

@ -74,9 +74,9 @@ class Mistral(AbstractLLM):
"""Run the model on a given task.""" """Run the model on a given task."""
try: try:
model_inputs = self.tokenizer( model_inputs = self.tokenizer([task], return_tensors="pt").to(
[task], return_tensors="pt" self.device
).to(self.device) )
generated_ids = self.model.generate( generated_ids = self.model.generate(
**model_inputs, **model_inputs,
max_length=self.max_length, max_length=self.max_length,
@ -85,9 +85,7 @@ class Mistral(AbstractLLM):
max_new_tokens=self.max_length, max_new_tokens=self.max_length,
**kwargs, **kwargs,
) )
output_text = self.tokenizer.batch_decode(generated_ids)[ output_text = self.tokenizer.batch_decode(generated_ids)[0]
0
]
return output_text return output_text
except Exception as e: except Exception as e:
raise ValueError(f"Error running the model: {str(e)}") raise ValueError(f"Error running the model: {str(e)}")

@ -146,9 +146,7 @@ class MPT7B:
self, prompts: list, temperature: float = 1.0 self, prompts: list, temperature: float = 1.0
) -> list: ) -> list:
"""Batch generate text""" """Batch generate text"""
self.logger.info( self.logger.info(f"Generating text for {len(prompts)} prompts...")
f"Generating text for {len(prompts)} prompts..."
)
results = [] results = []
with torch.autocast("cuda", dtype=torch.bfloat16): with torch.autocast("cuda", dtype=torch.bfloat16):
for prompt in prompts: for prompt in prompts:

@ -53,9 +53,7 @@ def _create_retry_decorator(
| retry_if_exception_type(llm.error.APIError) | retry_if_exception_type(llm.error.APIError)
| retry_if_exception_type(llm.error.APIConnectionError) | retry_if_exception_type(llm.error.APIConnectionError)
| retry_if_exception_type(llm.error.RateLimitError) | retry_if_exception_type(llm.error.RateLimitError)
| retry_if_exception_type( | retry_if_exception_type(llm.error.ServiceUnavailableError)
llm.error.ServiceUnavailableError
)
), ),
before_sleep=before_sleep_log(logger, logging.WARNING), before_sleep=before_sleep_log(logger, logging.WARNING),
) )
@ -79,9 +77,7 @@ def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
| retry_if_exception_type(llm.error.APIError) | retry_if_exception_type(llm.error.APIError)
| retry_if_exception_type(llm.error.APIConnectionError) | retry_if_exception_type(llm.error.APIConnectionError)
| retry_if_exception_type(llm.error.RateLimitError) | retry_if_exception_type(llm.error.RateLimitError)
| retry_if_exception_type( | retry_if_exception_type(llm.error.ServiceUnavailableError)
llm.error.ServiceUnavailableError
)
), ),
before_sleep=before_sleep_log(logger, logging.WARNING), before_sleep=before_sleep_log(logger, logging.WARNING),
) )
@ -102,15 +98,11 @@ def _check_response(response: dict) -> dict:
if any(len(d["embedding"]) == 1 for d in response["data"]): if any(len(d["embedding"]) == 1 for d in response["data"]):
import llm import llm
raise llm.error.APIError( raise llm.error.APIError("OpenAI API returned an empty embedding")
"OpenAI API returned an empty embedding"
)
return response return response
def embed_with_retry( def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any:
embeddings: OpenAIEmbeddings, **kwargs: Any
) -> Any:
"""Use tenacity to retry the embedding call.""" """Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings) retry_decorator = _create_retry_decorator(embeddings)
@ -181,7 +173,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
client: Any = None #: :meta private: client: Any = None #: :meta private:
model: str = "text-embedding-ada-002" model: str = "text-embedding-ada-002"
deployment: str = model # to support Azure OpenAI Service custom deployment names deployment: str = (
model # to support Azure OpenAI Service custom deployment names
)
openai_api_version: str | None = None openai_api_version: str | None = None
# to support Azure OpenAI Service custom endpoints # to support Azure OpenAI Service custom endpoints
openai_api_base: str | None = None openai_api_base: str | None = None
@ -194,9 +188,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
openai_api_key: str | None = None openai_api_key: str | None = None
openai_organization: str | None = None openai_organization: str | None = None
allowed_special: Literal["all"] | set[str] = set() allowed_special: Literal["all"] | set[str] = set()
disallowed_special: Literal["all"] | set[str] | Sequence[ disallowed_special: Literal["all"] | set[str] | Sequence[str] = "all"
str
] = "all"
chunk_size: int = 1000 chunk_size: int = 1000
"""Maximum number of texts to embed in each batch""" """Maximum number of texts to embed in each batch"""
max_retries: int = 6 max_retries: int = 6
@ -228,9 +220,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
extra = values.get("model_kwargs", {}) extra = values.get("model_kwargs", {})
for field_name in list(values): for field_name in list(values):
if field_name in extra: if field_name in extra:
raise ValueError( raise ValueError(f"Found {field_name} supplied twice.")
f"Found {field_name} supplied twice."
)
if field_name not in all_required_field_names: if field_name not in all_required_field_names:
warnings.warn( warnings.warn(
f"""WARNING! {field_name} is not default parameter. f"""WARNING! {field_name} is not default parameter.
@ -339,9 +329,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
engine: str, engine: str,
chunk_size: int | None = None, chunk_size: int | None = None,
) -> list[list[float]]: ) -> list[list[float]]:
embeddings: list[list[float]] = [ embeddings: list[list[float]] = [[] for _ in range(len(texts))]
[] for _ in range(len(texts))
]
try: try:
import tiktoken import tiktoken
except ImportError: except ImportError:
@ -358,8 +346,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
encoding = tiktoken.encoding_for_model(model_name) encoding = tiktoken.encoding_for_model(model_name)
except KeyError: except KeyError:
logger.warning( logger.warning(
"Warning: model not found. Using cl100k_base" "Warning: model not found. Using cl100k_base" " encoding."
" encoding."
) )
model = "cl100k_base" model = "cl100k_base"
encoding = tiktoken.get_encoding(model) encoding = tiktoken.get_encoding(model)
@ -374,9 +361,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special, disallowed_special=self.disallowed_special,
) )
for j in range(0, len(token), self.embedding_ctx_length): for j in range(0, len(token), self.embedding_ctx_length):
tokens.append( tokens.append(token[j : j + self.embedding_ctx_length])
token[j : j + self.embedding_ctx_length]
)
indices.append(i) indices.append(i)
batched_embeddings: list[list[float]] = [] batched_embeddings: list[list[float]] = []
@ -402,9 +387,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
r["embedding"] for r in response["data"] r["embedding"] for r in response["data"]
) )
results: list[list[list[float]]] = [ results: list[list[list[float]]] = [[] for _ in range(len(texts))]
[] for _ in range(len(texts))
]
num_tokens_in_batch: list[list[int]] = [ num_tokens_in_batch: list[list[int]] = [
[] for _ in range(len(texts)) [] for _ in range(len(texts))
] ]
@ -424,9 +407,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
average = np.average( average = np.average(
_result, axis=0, weights=num_tokens_in_batch[i] _result, axis=0, weights=num_tokens_in_batch[i]
) )
embeddings[i] = ( embeddings[i] = (average / np.linalg.norm(average)).tolist()
average / np.linalg.norm(average)
).tolist()
return embeddings return embeddings
@ -439,9 +420,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
engine: str, engine: str,
chunk_size: int | None = None, chunk_size: int | None = None,
) -> list[list[float]]: ) -> list[list[float]]:
embeddings: list[list[float]] = [ embeddings: list[list[float]] = [[] for _ in range(len(texts))]
[] for _ in range(len(texts))
]
try: try:
import tiktoken import tiktoken
except ImportError: except ImportError:
@ -458,8 +437,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
encoding = tiktoken.encoding_for_model(model_name) encoding = tiktoken.encoding_for_model(model_name)
except KeyError: except KeyError:
logger.warning( logger.warning(
"Warning: model not found. Using cl100k_base" "Warning: model not found. Using cl100k_base" " encoding."
" encoding."
) )
model = "cl100k_base" model = "cl100k_base"
encoding = tiktoken.get_encoding(model) encoding = tiktoken.get_encoding(model)
@ -474,9 +452,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special, disallowed_special=self.disallowed_special,
) )
for j in range(0, len(token), self.embedding_ctx_length): for j in range(0, len(token), self.embedding_ctx_length):
tokens.append( tokens.append(token[j : j + self.embedding_ctx_length])
token[j : j + self.embedding_ctx_length]
)
indices.append(i) indices.append(i)
batched_embeddings: list[list[float]] = [] batched_embeddings: list[list[float]] = []
@ -491,9 +467,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
r["embedding"] for r in response["data"] r["embedding"] for r in response["data"]
) )
results: list[list[list[float]]] = [ results: list[list[list[float]]] = [[] for _ in range(len(texts))]
[] for _ in range(len(texts))
]
num_tokens_in_batch: list[list[int]] = [ num_tokens_in_batch: list[list[int]] = [
[] for _ in range(len(texts)) [] for _ in range(len(texts))
] ]
@ -515,9 +489,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
average = np.average( average = np.average(
_result, axis=0, weights=num_tokens_in_batch[i] _result, axis=0, weights=num_tokens_in_batch[i]
) )
embeddings[i] = ( embeddings[i] = (average / np.linalg.norm(average)).tolist()
average / np.linalg.norm(average)
).tolist()
return embeddings return embeddings
@ -536,9 +508,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
""" """
# NOTE: to keep things simple, we assume the list may contain texts longer # NOTE: to keep things simple, we assume the list may contain texts longer
# than the maximum context and use length-safe embedding function. # than the maximum context and use length-safe embedding function.
return self._get_len_safe_embeddings( return self._get_len_safe_embeddings(texts, engine=self.deployment)
texts, engine=self.deployment
)
async def aembed_documents( async def aembed_documents(
self, texts: list[str], chunk_size: int | None = 0 self, texts: list[str], chunk_size: int | None = 0

@ -129,14 +129,9 @@ class GooglePalm(BaseLLM, BaseModel):
values["temperature"] is not None values["temperature"] is not None
and not 0 <= values["temperature"] <= 1 and not 0 <= values["temperature"] <= 1
): ):
raise ValueError( raise ValueError("temperature must be in the range [0.0, 1.0]")
"temperature must be in the range [0.0, 1.0]"
)
if ( if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
values["top_p"] is not None
and not 0 <= values["top_p"] <= 1
):
raise ValueError("top_p must be in the range [0.0, 1.0]") raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0: if values["top_k"] is not None and values["top_k"] <= 0:
@ -146,9 +141,7 @@ class GooglePalm(BaseLLM, BaseModel):
values["max_output_tokens"] is not None values["max_output_tokens"] is not None
and values["max_output_tokens"] <= 0 and values["max_output_tokens"] <= 0
): ):
raise ValueError( raise ValueError("max_output_tokens must be greater than zero")
"max_output_tokens must be greater than zero"
)
return values return values
@ -177,12 +170,8 @@ class GooglePalm(BaseLLM, BaseModel):
prompt_generations = [] prompt_generations = []
for candidate in completion.candidates: for candidate in completion.candidates:
raw_text = candidate["output"] raw_text = candidate["output"]
stripped_text = _strip_erroneous_leading_spaces( stripped_text = _strip_erroneous_leading_spaces(raw_text)
raw_text prompt_generations.append(Generation(text=stripped_text))
)
prompt_generations.append(
Generation(text=stripped_text)
)
generations.append(prompt_generations) generations.append(prompt_generations)
return LLMResult(generations=generations) return LLMResult(generations=generations)

@ -139,6 +139,4 @@ class QwenVLMultiModal(BaseMultiModalModel):
) )
return response, history return response, history
except Exception as e: except Exception as e:
raise Exception( raise Exception("An error occurred during the chat.") from e
"An error occurred during the chat."
) from e

@ -143,9 +143,7 @@ class SamplingParams:
self.logprobs = logprobs self.logprobs = logprobs
self.prompt_logprobs = prompt_logprobs self.prompt_logprobs = prompt_logprobs
self.skip_special_tokens = skip_special_tokens self.skip_special_tokens = skip_special_tokens
self.spaces_between_special_tokens = ( self.spaces_between_special_tokens = spaces_between_special_tokens
spaces_between_special_tokens
)
self.logits_processors = logits_processors self.logits_processors = logits_processors
self.include_stop_str_in_output = include_stop_str_in_output self.include_stop_str_in_output = include_stop_str_in_output
self._verify_args() self._verify_args()
@ -189,31 +187,23 @@ class SamplingParams:
f" {self.temperature}." f" {self.temperature}."
) )
if not 0.0 < self.top_p <= 1.0: if not 0.0 < self.top_p <= 1.0:
raise ValueError( raise ValueError(f"top_p must be in (0, 1], got {self.top_p}.")
f"top_p must be in (0, 1], got {self.top_p}."
)
if self.top_k < -1 or self.top_k == 0: if self.top_k < -1 or self.top_k == 0:
raise ValueError( raise ValueError(
"top_k must be -1 (disable), or at least 1, " "top_k must be -1 (disable), or at least 1, "
f"got {self.top_k}." f"got {self.top_k}."
) )
if not 0.0 <= self.min_p <= 1.0: if not 0.0 <= self.min_p <= 1.0:
raise ValueError( raise ValueError(f"min_p must be in [0, 1], got {self.min_p}.")
f"min_p must be in [0, 1], got {self.min_p}."
)
if self.max_tokens is not None and self.max_tokens < 1: if self.max_tokens is not None and self.max_tokens < 1:
raise ValueError( raise ValueError(
"max_tokens must be at least 1, got" "max_tokens must be at least 1, got" f" {self.max_tokens}."
f" {self.max_tokens}."
) )
if self.logprobs is not None and self.logprobs < 0: if self.logprobs is not None and self.logprobs < 0:
raise ValueError( raise ValueError(
f"logprobs must be non-negative, got {self.logprobs}." f"logprobs must be non-negative, got {self.logprobs}."
) )
if ( if self.prompt_logprobs is not None and self.prompt_logprobs < 0:
self.prompt_logprobs is not None
and self.prompt_logprobs < 0
):
raise ValueError( raise ValueError(
"prompt_logprobs must be non-negative, got " "prompt_logprobs must be non-negative, got "
f"{self.prompt_logprobs}." f"{self.prompt_logprobs}."
@ -230,13 +220,9 @@ class SamplingParams:
"temperature must be 0 when using beam search." "temperature must be 0 when using beam search."
) )
if self.top_p < 1.0 - _SAMPLING_EPS: if self.top_p < 1.0 - _SAMPLING_EPS:
raise ValueError( raise ValueError("top_p must be 1 when using beam search.")
"top_p must be 1 when using beam search."
)
if self.top_k != -1: if self.top_k != -1:
raise ValueError( raise ValueError("top_k must be -1 when using beam search.")
"top_k must be -1 when using beam search."
)
if self.early_stopping not in [True, False, "never"]: if self.early_stopping not in [True, False, "never"]:
raise ValueError( raise ValueError(
"early_stopping must be True, False, or 'never', " "early_stopping must be True, False, or 'never', "

@ -88,15 +88,11 @@ class SpeechT5:
self.model_name = model_name self.model_name = model_name
self.vocoder_name = vocoder_name self.vocoder_name = vocoder_name
self.dataset_name = dataset_name self.dataset_name = dataset_name
self.processor = SpeechT5Processor.from_pretrained( self.processor = SpeechT5Processor.from_pretrained(self.model_name)
self.model_name
)
self.model = SpeechT5ForTextToSpeech.from_pretrained( self.model = SpeechT5ForTextToSpeech.from_pretrained(
self.model_name self.model_name
) )
self.vocoder = SpeechT5HifiGan.from_pretrained( self.vocoder = SpeechT5HifiGan.from_pretrained(self.vocoder_name)
self.vocoder_name
)
self.embeddings_dataset = load_dataset( self.embeddings_dataset = load_dataset(
self.dataset_name, split="validation" self.dataset_name, split="validation"
) )
@ -121,9 +117,7 @@ class SpeechT5:
def set_model(self, model_name: str): def set_model(self, model_name: str):
"""Set the model to a new model.""" """Set the model to a new model."""
self.model_name = model_name self.model_name = model_name
self.processor = SpeechT5Processor.from_pretrained( self.processor = SpeechT5Processor.from_pretrained(self.model_name)
self.model_name
)
self.model = SpeechT5ForTextToSpeech.from_pretrained( self.model = SpeechT5ForTextToSpeech.from_pretrained(
self.model_name self.model_name
) )
@ -131,9 +125,7 @@ class SpeechT5:
def set_vocoder(self, vocoder_name): def set_vocoder(self, vocoder_name):
"""Set the vocoder to a new vocoder.""" """Set the vocoder to a new vocoder."""
self.vocoder_name = vocoder_name self.vocoder_name = vocoder_name
self.vocoder = SpeechT5HifiGan.from_pretrained( self.vocoder = SpeechT5HifiGan.from_pretrained(self.vocoder_name)
self.vocoder_name
)
def set_embeddings_dataset(self, dataset_name): def set_embeddings_dataset(self, dataset_name):
"""Set the embeddings dataset to a new dataset.""" """Set the embeddings dataset to a new dataset."""

@ -127,9 +127,7 @@ class SSD1B:
if task in self.cache: if task in self.cache:
return self.cache[task] return self.cache[task]
try: try:
img = self.pipe( img = self.pipe(prompt=task, neg_prompt=neg_prompt).images[0]
prompt=task, neg_prompt=neg_prompt
).images[0]
# Generate a unique filename for the image # Generate a unique filename for the image
img_name = f"{uuid.uuid4()}.{self.image_format}" img_name = f"{uuid.uuid4()}.{self.image_format}"
@ -223,9 +221,7 @@ class SSD1B:
executor.submit(self, task): task for task in tasks executor.submit(self, task): task for task in tasks
} }
results = [] results = []
for future in concurrent.futures.as_completed( for future in concurrent.futures.as_completed(future_to_task):
future_to_task
):
task = future_to_task[future] task = future_to_task[future]
try: try:
img = future.result() img = future.result()
@ -272,9 +268,7 @@ class SSD1B:
"""Str method for the SSD1B class""" """Str method for the SSD1B class"""
return f"SSD1B(image_url={self.image_url})" return f"SSD1B(image_url={self.image_url})"
@backoff.on_exception( @backoff.on_exception(backoff.expo, Exception, max_tries=max_retries)
backoff.expo, Exception, max_tries=max_retries
)
def rate_limited_call(self, task: str): def rate_limited_call(self, task: str):
"""Rate limited call to the SSD1B API""" """Rate limited call to the SSD1B API"""
return self.__call__(task) return self.__call__(task)

@ -120,9 +120,7 @@ class TogetherLLM(AbstractLLM):
out = response.json() out = response.json()
content = ( content = (
out["choices"][0] out["choices"][0].get("message", {}).get("content", None)
.get("message", {})
.get("content", None)
) )
if self.streaming_enabled: if self.streaming_enabled:
content = self.stream_response(content) content = self.stream_response(content)

@ -15,9 +15,7 @@ class UltralyticsModel(BaseMultiModalModel):
**kwargs: Arbitrary keyword arguments. **kwargs: Arbitrary keyword arguments.
""" """
def __init__( def __init__(self, model_name: str = "yolov8n.pt", *args, **kwargs):
self, model_name: str = "yolov8n.pt", *args, **kwargs
):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.model_name = model_name self.model_name = model_name

@ -78,9 +78,7 @@ class WizardLLMStoryTeller:
bnb_config = BitsAndBytesConfig(**quantization_config) bnb_config = BitsAndBytesConfig(**quantization_config)
try: try:
self.tokenizer = AutoTokenizer.from_pretrained( self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.model_id
)
self.model = AutoModelForCausalLM.from_pretrained( self.model = AutoModelForCausalLM.from_pretrained(
self.model_id, quantization_config=bnb_config self.model_id, quantization_config=bnb_config
) )

@ -78,9 +78,7 @@ class YarnMistral128:
bnb_config = BitsAndBytesConfig(**quantization_config) bnb_config = BitsAndBytesConfig(**quantization_config)
try: try:
self.tokenizer = AutoTokenizer.from_pretrained( self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.model_id
)
self.model = AutoModelForCausalLM.from_pretrained( self.model = AutoModelForCausalLM.from_pretrained(
self.model_id, self.model_id,
quantization_config=bnb_config, quantization_config=bnb_config,

@ -87,9 +87,7 @@ class Yi34B200k:
top_k=self.top_k, top_k=self.top_k,
top_p=self.top_p, top_p=self.top_p,
) )
return self.tokenizer.decode( return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
outputs[0], skip_special_tokens=True
)
# # Example usage # # Example usage

@ -91,8 +91,7 @@ def format_vision_prompt(objective, previous_action):
""" """
if previous_action: if previous_action:
previous_action = ( previous_action = (
"Here was the previous action you took:" "Here was the previous action you took:" f" {previous_action}"
f" {previous_action}"
) )
else: else:
previous_action = "" previous_action = ""

@ -19,7 +19,7 @@ from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
) )
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.tools.tool import BaseTool from swarms.tools.tool import BaseTool
from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.tools.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.data_to_text import data_to_text from swarms.utils.data_to_text import data_to_text
from swarms.utils.parse_code import extract_code_from_markdown from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text from swarms.utils.pdf_to_text import pdf_to_text
@ -326,9 +326,7 @@ class Agent:
) )
else: else:
tools_prompt = tool_usage_worker_prompt( tools_prompt = tool_usage_worker_prompt(tools=self.tools)
tools=self.tools
)
# Append the tools prompt to the short_term_memory # Append the tools prompt to the short_term_memory
self.short_memory.add( self.short_memory.add(
@ -354,9 +352,7 @@ class Agent:
f"{self.agent_name}.log", f"{self.agent_name}.log",
level="INFO", level="INFO",
colorize=True, colorize=True,
format=( format=("<green>{time}</green> <level>{message}</level>"),
"<green>{time}</green> <level>{message}</level>"
),
backtrace=True, backtrace=True,
diagnose=True, diagnose=True,
) )
@ -403,9 +399,7 @@ class Agent:
self.llm.temperature = 0.7 self.llm.temperature = 0.7
except Exception as error: except Exception as error:
print( print(
colored( colored(f"Error dynamically changing temperature: {error}")
f"Error dynamically changing temperature: {error}"
)
) )
def format_prompt(self, template, **kwargs: Any) -> str: def format_prompt(self, template, **kwargs: Any) -> str:
@ -418,24 +412,16 @@ class Agent:
logger.info(f"Adding task to memory: {task}") logger.info(f"Adding task to memory: {task}")
self.short_memory.add(f"{self.user_name}: {task}") self.short_memory.add(f"{self.user_name}: {task}")
except Exception as error: except Exception as error:
print( print(colored(f"Error adding task to memory: {error}", "red"))
colored(
f"Error adding task to memory: {error}", "red"
)
)
def add_message_to_memory(self, message: str): def add_message_to_memory(self, message: str):
"""Add the message to the memory""" """Add the message to the memory"""
try: try:
logger.info(f"Adding message to memory: {message}") logger.info(f"Adding message to memory: {message}")
self.short_memory.add( self.short_memory.add(role=self.agent_name, content=message)
role=self.agent_name, content=message
)
except Exception as error: except Exception as error:
print( print(
colored( colored(f"Error adding message to memory: {error}", "red")
f"Error adding message to memory: {error}", "red"
)
) )
def add_message_to_memory_and_truncate(self, message: str): def add_message_to_memory_and_truncate(self, message: str):
@ -549,9 +535,7 @@ class Agent:
history = [f"{user_name}: {task}"] history = [f"{user_name}: {task}"]
return history return history
def _dynamic_prompt_setup( def _dynamic_prompt_setup(self, dynamic_prompt: str, task: str) -> str:
self, dynamic_prompt: str, task: str
) -> str:
"""_dynamic_prompt_setup summary """_dynamic_prompt_setup summary
Args: Args:
@ -561,9 +545,7 @@ class Agent:
Returns: Returns:
str: _description_ str: _description_
""" """
dynamic_prompt = ( dynamic_prompt = dynamic_prompt or self.construct_dynamic_prompt()
dynamic_prompt or self.construct_dynamic_prompt()
)
combined_prompt = f"{dynamic_prompt}\n{task}" combined_prompt = f"{dynamic_prompt}\n{task}"
return combined_prompt return combined_prompt
@ -581,9 +563,7 @@ class Agent:
self.activate_autonomous_agent() self.activate_autonomous_agent()
if task: if task:
self.short_memory.add( self.short_memory.add(role=self.user_name, content=task)
role=self.user_name, content=task
)
loop_count = 0 loop_count = 0
response = None response = None
@ -600,9 +580,7 @@ class Agent:
if self.dynamic_temperature_enabled: if self.dynamic_temperature_enabled:
self.dynamic_temperature() self.dynamic_temperature()
task_prompt = ( task_prompt = self.short_memory.return_history_as_string()
self.short_memory.return_history_as_string()
)
attempt = 0 attempt = 0
success = False success = False
@ -621,9 +599,7 @@ class Agent:
if self.tools: if self.tools:
# Extract code from markdown # Extract code from markdown
response = extract_code_from_markdown( response = extract_code_from_markdown(response)
response
)
# Execute the tool by name # Execute the tool by name
execute_tool_by_name( execute_tool_by_name(
@ -634,15 +610,13 @@ class Agent:
if self.code_interpreter: if self.code_interpreter:
# Extract code from markdown # Extract code from markdown
extracted_code = ( extracted_code = extract_code_from_markdown(
extract_code_from_markdown(response) response
) )
# Execute the code # Execute the code
# execution = execute_command(extracted_code) # execution = execute_command(extracted_code)
execution = CodeExecutor().run( execution = CodeExecutor().run(extracted_code)
extracted_code
)
# Add the execution to the memory # Add the execution to the memory
self.short_memory.add( self.short_memory.add(
@ -658,9 +632,7 @@ class Agent:
) )
if self.evaluator: if self.evaluator:
evaluated_response = self.evaluator( evaluated_response = self.evaluator(response)
response
)
print( print(
"Evaluated Response:" "Evaluated Response:"
f" {evaluated_response}" f" {evaluated_response}"
@ -672,9 +644,7 @@ class Agent:
# Sentiment analysis # Sentiment analysis
if self.sentiment_analyzer: if self.sentiment_analyzer:
sentiment = self.sentiment_analyzer( sentiment = self.sentiment_analyzer(response)
response
)
print(f"Sentiment: {sentiment}") print(f"Sentiment: {sentiment}")
if sentiment > self.sentiment_threshold: if sentiment > self.sentiment_threshold:
@ -726,9 +696,8 @@ class Agent:
and self._check_stopping_condition(response) and self._check_stopping_condition(response)
): ):
break break
elif ( elif self.stopping_func is not None and self.stopping_func(
self.stopping_func is not None response
and self.stopping_func(response)
): ):
break break
@ -826,9 +795,7 @@ class Agent:
context = f""" context = f"""
System: This reminds you of these events from your past: [{ltr}] System: This reminds you of these events from your past: [{ltr}]
""" """
return self.short_memory.add( return self.short_memory.add(role=self.agent_name, content=context)
role=self.agent_name, content=context
)
def add_memory(self, message: str): def add_memory(self, message: str):
"""Add a memory to the agent """Add a memory to the agent
@ -840,9 +807,7 @@ class Agent:
_type_: _description_ _type_: _description_
""" """
logger.info(f"Adding memory: {message}") logger.info(f"Adding memory: {message}")
return self.short_memory.add( return self.short_memory.add(role=self.agent_name, content=message)
role=self.agent_name, content=message
)
async def run_concurrent(self, tasks: List[str], **kwargs): async def run_concurrent(self, tasks: List[str], **kwargs):
""" """
@ -889,9 +854,7 @@ class Agent:
json.dump(self.short_memory, f) json.dump(self.short_memory, f)
# print(f"Saved agent history to {file_path}") # print(f"Saved agent history to {file_path}")
except Exception as error: except Exception as error:
print( print(colored(f"Error saving agent history: {error}", "red"))
colored(f"Error saving agent history: {error}", "red")
)
def load(self, file_path: str): def load(self, file_path: str):
""" """
@ -916,23 +879,11 @@ class Agent:
Prints the entire history and memory of the agent. Prints the entire history and memory of the agent.
Each message is colored and formatted for better readability. Each message is colored and formatted for better readability.
""" """
print( print(colored("Agent History and Memory", "cyan", attrs=["bold"]))
colored( print(colored("========================", "cyan", attrs=["bold"]))
"Agent History and Memory", "cyan", attrs=["bold"] for loop_index, history in enumerate(self.short_memory, start=1):
)
)
print(
colored(
"========================", "cyan", attrs=["bold"]
)
)
for loop_index, history in enumerate(
self.short_memory, start=1
):
print( print(
colored( colored(f"\nLoop {loop_index}:", "yellow", attrs=["bold"])
f"\nLoop {loop_index}:", "yellow", attrs=["bold"]
)
) )
for message in history: for message in history:
speaker, _, message_text = message.partition(": ") speaker, _, message_text = message.partition(": ")
@ -943,8 +894,7 @@ class Agent:
) )
else: else:
print( print(
colored(f"{speaker}:", "blue") colored(f"{speaker}:", "blue") + f" {message_text}"
+ f" {message_text}"
) )
print(colored("------------------------", "cyan")) print(colored("------------------------", "cyan"))
print(colored("End of Agent History", "cyan", attrs=["bold"])) print(colored("End of Agent History", "cyan", attrs=["bold"]))
@ -975,9 +925,7 @@ class Agent:
self.short_memory.add( self.short_memory.add(
role=self.agent_name, content=response role=self.agent_name, content=response
) )
self.short_memory.add( self.short_memory.add(role=self.user_name, content=task)
role=self.user_name, content=task
)
else: else:
self.short_memory.add( self.short_memory.add(
role=self.agent_name, content=response role=self.agent_name, content=response
@ -1054,9 +1002,7 @@ class Agent:
Apply the response filters to the response Apply the response filters to the response
""" """
logger.info( logger.info(f"Applying response filters to response: {response}")
f"Applying response filters to response: {response}"
)
for word in self.response_filters: for word in self.response_filters:
response = response.replace(word, "[FILTERED]") response = response.replace(word, "[FILTERED]")
return response return response
@ -1096,9 +1042,7 @@ class Agent:
with open(file_path, "w") as f: with open(file_path, "w") as f:
yaml.dump(self.__dict__, f) yaml.dump(self.__dict__, f)
except Exception as error: except Exception as error:
print( print(colored(f"Error saving agent to YAML: {error}", "red"))
colored(f"Error saving agent to YAML: {error}", "red")
)
def save_state(self, file_path: str) -> None: def save_state(self, file_path: str) -> None:
""" """
@ -1126,9 +1070,7 @@ class Agent:
"retry_interval": self.retry_interval, "retry_interval": self.retry_interval,
"interactive": self.interactive, "interactive": self.interactive,
"dashboard": self.dashboard, "dashboard": self.dashboard,
"dynamic_temperature": ( "dynamic_temperature": (self.dynamic_temperature_enabled),
self.dynamic_temperature_enabled
),
"autosave": self.autosave, "autosave": self.autosave,
"saved_state_path": self.saved_state_path, "saved_state_path": self.saved_state_path,
"max_loops": self.max_loops, "max_loops": self.max_loops,
@ -1137,14 +1079,10 @@ class Agent:
with open(file_path, "w") as f: with open(file_path, "w") as f:
json.dump(state, f, indent=4) json.dump(state, f, indent=4)
saved = colored( saved = colored(f"Saved agent state to: {file_path}", "green")
f"Saved agent state to: {file_path}", "green"
)
print(saved) print(saved)
except Exception as error: except Exception as error:
print( print(colored(f"Error saving agent state: {error}", "red"))
colored(f"Error saving agent state: {error}", "red")
)
def state_to_str(self): def state_to_str(self):
"""Transform the JSON into a string""" """Transform the JSON into a string"""
@ -1163,9 +1101,7 @@ class Agent:
"retry_interval": self.retry_interval, "retry_interval": self.retry_interval,
"interactive": self.interactive, "interactive": self.interactive,
"dashboard": self.dashboard, "dashboard": self.dashboard,
"dynamic_temperature": ( "dynamic_temperature": (self.dynamic_temperature_enabled),
self.dynamic_temperature_enabled
),
"autosave": self.autosave, "autosave": self.autosave,
"saved_state_path": self.saved_state_path, "saved_state_path": self.saved_state_path,
"max_loops": self.max_loops, "max_loops": self.max_loops,
@ -1214,9 +1150,7 @@ class Agent:
print(f"Agent state loaded from {file_path}") print(f"Agent state loaded from {file_path}")
except Exception as error: except Exception as error:
print( print(colored(f"Error loading agent state: {error}", "red"))
colored(f"Error loading agent state: {error}", "red")
)
def retry_on_failure( def retry_on_failure(
self, self,
@ -1232,9 +1166,7 @@ class Agent:
try: try:
return function() return function()
except Exception as error: except Exception as error:
logging.error( logging.error(f"Error generating response: {error}")
f"Error generating response: {error}"
)
attempt += 1 attempt += 1
time.sleep(retry_delay) time.sleep(retry_delay)
raise Exception("All retry attempts failed") raise Exception("All retry attempts failed")
@ -1320,9 +1252,7 @@ class Agent:
for doc in docs: for doc in docs:
data = data_to_text(doc) data = data_to_text(doc)
return self.short_memory.add( return self.short_memory.add(role=self.user_name, content=data)
role=self.user_name, content=data
)
except Exception as error: except Exception as error:
print(colored(f"Error ingesting docs: {error}", "red")) print(colored(f"Error ingesting docs: {error}", "red"))
@ -1338,9 +1268,7 @@ class Agent:
try: try:
logger.info(f"Ingesting pdf: {pdf}") logger.info(f"Ingesting pdf: {pdf}")
text = pdf_to_text(pdf) text = pdf_to_text(pdf)
return self.short_memory.add( return self.short_memory.add(role=self.user_name, content=text)
role=self.user_name, content=text
)
except Exception as error: except Exception as error:
print(colored(f"Error ingesting pdf: {error}", "red")) print(colored(f"Error ingesting pdf: {error}", "red"))
@ -1361,11 +1289,7 @@ class Agent:
message = f"{agent_name}: {message}" message = f"{agent_name}: {message}"
return self.run(message, *args, **kwargs) return self.run(message, *args, **kwargs)
except Exception as error: except Exception as error:
print( print(colored(f"Error sending agent message: {error}", "red"))
colored(
f"Error sending agent message: {error}", "red"
)
)
def truncate_history(self): def truncate_history(self):
""" """
@ -1407,9 +1331,7 @@ class Agent:
for file in files: for file in files:
text = data_to_text(file) text = data_to_text(file)
return self.short_memory.add( return self.short_memory.add(role=self.user_name, content=text)
role=self.user_name, content=text
)
except Exception as error: except Exception as error:
print( print(
colored( colored(

@ -117,9 +117,7 @@ class AgentRearrange(BaseSwarm):
return None return None
task_to_run = specific_tasks.get(dest_agent_name, task) task_to_run = specific_tasks.get(dest_agent_name, task)
if self.custom_prompt: if self.custom_prompt:
out = dest_agent.run( out = dest_agent.run(f"{task_to_run} {self.custom_prompt}")
f"{task_to_run} {self.custom_prompt}"
)
else: else:
out = dest_agent.run(f"{task_to_run} (from {source})") out = dest_agent.run(f"{task_to_run} (from {source})")
return out return out
@ -138,9 +136,7 @@ class AgentRearrange(BaseSwarm):
results.append(result) results.append(result)
else: else:
for destination in destinations: for destination in destinations:
task = specific_tasks.get( task = specific_tasks.get(destination, default_task)
destination, default_task
)
destination_agent = self.self_find_agent_by_name( destination_agent = self.self_find_agent_by_name(
destination destination
) )
@ -156,9 +152,7 @@ class AgentRearrange(BaseSwarm):
**specific_tasks, **specific_tasks,
): ):
self.flows.clear() # Reset previous flows self.flows.clear() # Reset previous flows
results = self.process_flows( results = self.process_flows(pattern, default_task, specific_tasks)
pattern, default_task, specific_tasks
)
return results return results

@ -67,9 +67,7 @@ class AsyncWorkflow:
except Exception as error: except Exception as error:
logger.error(f"[ERROR][AsyncWorkflow] {error}") logger.error(f"[ERROR][AsyncWorkflow] {error}")
async def delete( async def delete(self, task: Any = None, tasks: List[Task] = None):
self, task: Any = None, tasks: List[Task] = None
):
"""Delete a task from the workflow""" """Delete a task from the workflow"""
try: try:
if task: if task:

@ -140,9 +140,7 @@ class AutoSwarmRouter(BaseSwarm):
if self.name in self.swarm_dict: if self.name in self.swarm_dict:
# If a match is found then send the task to the swarm # If a match is found then send the task to the swarm
out = self.swarm_dict[self.name].run( out = self.swarm_dict[self.name].run(task, *args, **kwargs)
task, *args, **kwargs
)
if self.custom_postprocess: if self.custom_postprocess:
# If custom postprocess function is provided then run it # If custom postprocess function is provided then run it
@ -151,9 +149,7 @@ class AutoSwarmRouter(BaseSwarm):
return out return out
# If no match is found then return None # If no match is found then return None
raise ValueError( raise ValueError(f"Swarm with name {self.name} not found.")
f"Swarm with name {self.name} not found."
)
except Exception as e: except Exception as e:
logger.error(f"Error: {e}") logger.error(f"Error: {e}")
raise e raise e

@ -155,9 +155,7 @@ class AutoScaler(BaseStructure):
for _ in range(new_agents_counts): for _ in range(new_agents_counts):
self.agents_pool.append(self.agents[0]()) self.agents_pool.append(self.agents[0]())
except Exception as error: except Exception as error:
print( print(f"Error scaling up: {error} try again with a new task")
f"Error scaling up: {error} try again with a new task"
)
def scale_down(self): def scale_down(self):
"""scale down""" """scale down"""
@ -169,13 +167,10 @@ class AutoScaler(BaseStructure):
del self.agents_pool[-1] # remove last agent del self.agents_pool[-1] # remove last agent
except Exception as error: except Exception as error:
print( print(
f"Error scaling down: {error} try again with a new" f"Error scaling down: {error} try again with a new" " task"
" task"
) )
def run( def run(self, agent_id, task: Optional[str] = None, *args, **kwargs):
self, agent_id, task: Optional[str] = None, *args, **kwargs
):
"""Run agent the task on the agent id """Run agent the task on the agent id
Args: Args:
@ -203,11 +198,7 @@ class AutoScaler(BaseStructure):
sleep(60) # check minute sleep(60) # check minute
pending_tasks = self.task_queue.qsize() pending_tasks = self.task_queue.qsize()
active_agents = sum( active_agents = sum(
[ [1 for agent in self.agents_pool if agent.is_busy()]
1
for agent in self.agents_pool
if agent.is_busy()
]
) )
if ( if (
@ -246,17 +237,13 @@ class AutoScaler(BaseStructure):
if available_agent: if available_agent:
available_agent.run(task) available_agent.run(task)
except Exception as error: except Exception as error:
print( print(f"Error starting: {error} try again with a new task")
f"Error starting: {error} try again with a new task"
)
def check_agent_health(self): def check_agent_health(self):
"""Checks the health of each agent and replaces unhealthy agents.""" """Checks the health of each agent and replaces unhealthy agents."""
for i, agent in enumerate(self.agents_pool): for i, agent in enumerate(self.agents_pool):
if not agent.is_healthy(): if not agent.is_healthy():
logging.warning( logging.warning(f"Replacing unhealthy agent at index {i}")
f"Replacing unhealthy agent at index {i}"
)
self.agents_pool[i] = self.agent() self.agents_pool[i] = self.agent()
def balance_load(self): def balance_load(self):
@ -273,9 +260,7 @@ class AutoScaler(BaseStructure):
" task" " task"
) )
def set_scaling_strategy( def set_scaling_strategy(self, strategy: Callable[[int, int], int]):
self, strategy: Callable[[int, int], int]
):
"""Set a custom scaling strategy.""" """Set a custom scaling strategy."""
self.custom_scale_strategy = strategy self.custom_scale_strategy = strategy

@ -187,9 +187,7 @@ class BaseStructure(BaseModel):
async def run_async(self, *args, **kwargs): async def run_async(self, *args, **kwargs):
"""Run the structure asynchronously.""" """Run the structure asynchronously."""
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
return await loop.run_in_executor( return await loop.run_in_executor(None, self.run, *args, **kwargs)
None, self.run, *args, **kwargs
)
async def save_metadata_async(self, metadata: Dict[str, Any]): async def save_metadata_async(self, metadata: Dict[str, Any]):
"""Save metadata to file asynchronously. """Save metadata to file asynchronously.
@ -222,9 +220,7 @@ class BaseStructure(BaseModel):
None, self.log_error, error_message None, self.log_error, error_message
) )
async def save_artifact_async( async def save_artifact_async(self, artifact: Any, artifact_name: str):
self, artifact: Any, artifact_name: str
):
"""Save artifact to file asynchronously. """Save artifact to file asynchronously.
Args: Args:
@ -266,9 +262,7 @@ class BaseStructure(BaseModel):
None, self.log_event, event, event_type None, self.log_event, event, event_type
) )
async def asave_to_file( async def asave_to_file(self, data: Any, file: str, *args, **kwargs):
self, data: Any, file: str, *args, **kwargs
):
"""Save data to file asynchronously. """Save data to file asynchronously.
Args: Args:
@ -357,8 +351,7 @@ class BaseStructure(BaseModel):
""" """
with ThreadPoolExecutor(max_workers=batch_size) as executor: with ThreadPoolExecutor(max_workers=batch_size) as executor:
futures = [ futures = [
executor.submit(self.run, data) executor.submit(self.run, data) for data in batched_data
for data in batched_data
] ]
return [future.result() for future in futures] return [future.result() for future in futures]
@ -418,9 +411,7 @@ class BaseStructure(BaseModel):
_type_: _description_ _type_: _description_
""" """
self.monitor_resources() self.monitor_resources()
return self.run_batched( return self.run_batched(batched_data, batch_size, *args, **kwargs)
batched_data, batch_size, *args, **kwargs
)
# x = BaseStructure() # x = BaseStructure()

@ -361,9 +361,7 @@ class BaseSwarm(ABC):
task (Optional[str], optional): _description_. Defaults to None. task (Optional[str], optional): _description_. Defaults to None.
""" """
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
result = loop.run_until_complete( result = loop.run_until_complete(self.arun(task, *args, **kwargs))
self.arun(task, *args, **kwargs)
)
return result return result
def run_batch_async(self, tasks: List[str], *args, **kwargs): def run_batch_async(self, tasks: List[str], *args, **kwargs):
@ -533,9 +531,7 @@ class BaseSwarm(ABC):
Agent: Instance of Agent representing the retrieved Agent, or None if not found. Agent: Instance of Agent representing the retrieved Agent, or None if not found.
""" """
def join_swarm( def join_swarm(self, from_entity: Agent | Agent, to_entity: Agent):
self, from_entity: Agent | Agent, to_entity: Agent
):
""" """
Add a relationship between a Swarm and an Agent or other Swarm to the registry. Add a relationship between a Swarm and an Agent or other Swarm to the registry.

@ -68,9 +68,7 @@ class BaseWorkflow(BaseStructure):
elif tasks: elif tasks:
self.task_pool.extend(tasks) self.task_pool.extend(tasks)
else: else:
raise ValueError( raise ValueError("You must provide a task or a list of tasks")
"You must provide a task or a list of tasks"
)
def add_agent(self, agent: Agent, *args, **kwargs): def add_agent(self, agent: Agent, *args, **kwargs):
return self.agent_pool(agent) return self.agent_pool(agent)
@ -122,23 +120,17 @@ class BaseWorkflow(BaseStructure):
Dict[str, Any]: The results of each task in the workflow Dict[str, Any]: The results of each task in the workflow
""" """
try: try:
return { return {task.description: task.result for task in self.tasks}
task.description: task.result for task in self.tasks
}
except Exception as error: except Exception as error:
print( print(
colored( colored(f"Error getting task results: {error}", "red"),
f"Error getting task results: {error}", "red"
),
) )
def remove_task(self, task: str) -> None: def remove_task(self, task: str) -> None:
"""Remove tasks from sequential workflow""" """Remove tasks from sequential workflow"""
try: try:
self.tasks = [ self.tasks = [
task task for task in self.tasks if task.description != task
for task in self.tasks
if task.description != task
] ]
except Exception as error: except Exception as error:
print( print(
@ -177,9 +169,7 @@ class BaseWorkflow(BaseStructure):
task.kwargs.update(updates) task.kwargs.update(updates)
break break
else: else:
raise ValueError( raise ValueError(f"Task {task} not found in workflow.")
f"Task {task} not found in workflow."
)
except Exception as error: except Exception as error:
print( print(
colored( colored(
@ -214,9 +204,7 @@ class BaseWorkflow(BaseStructure):
self.tasks.remove(task) self.tasks.remove(task)
break break
else: else:
raise ValueError( raise ValueError(f"Task {task} not found in workflow.")
f"Task {task} not found in workflow."
)
except Exception as error: except Exception as error:
print( print(
colored( colored(
@ -299,9 +287,7 @@ class BaseWorkflow(BaseStructure):
) )
) )
def load_workflow_state( def load_workflow_state(self, filepath: str = None, **kwargs) -> None:
self, filepath: str = None, **kwargs
) -> None:
""" """
Loads the workflow state from a json file and restores the workflow state. Loads the workflow state from a json file and restores the workflow state.

@ -102,15 +102,11 @@ class BlocksList(BaseStructure):
return [block for block in self.blocks if block.id == id] return [block for block in self.blocks if block.id == id]
def get_by_parent(self, parent: str): def get_by_parent(self, parent: str):
return [ return [block for block in self.blocks if block.parent == parent]
block for block in self.blocks if block.parent == parent
]
def get_by_parent_id(self, parent_id: str): def get_by_parent_id(self, parent_id: str):
return [ return [
block block for block in self.blocks if block.parent_id == parent_id
for block in self.blocks
if block.parent_id == parent_id
] ]
def get_by_parent_name(self, parent_name: str): def get_by_parent_name(self, parent_name: str):

@ -16,9 +16,7 @@ class Company:
shared_instructions: str = None shared_instructions: str = None
ceo: Optional[Agent] = None ceo: Optional[Agent] = None
agents: List[Agent] = field(default_factory=list) agents: List[Agent] = field(default_factory=list)
agent_interactions: Dict[str, List[str]] = field( agent_interactions: Dict[str, List[str]] = field(default_factory=dict)
default_factory=dict
)
history: Conversation = field(default_factory=Conversation) history: Conversation = field(default_factory=Conversation)
def __post_init__(self): def __post_init__(self):
@ -46,9 +44,7 @@ class Company:
self.agents.append(agent) self.agents.append(agent)
except Exception as error: except Exception as error:
logger.error( logger.error(f"[ERROR][CLASS: Company][METHOD: add] {error}")
f"[ERROR][CLASS: Company][METHOD: add] {error}"
)
raise error raise error
def get(self, agent_name: str) -> Agent: def get(self, agent_name: str) -> Agent:
@ -73,9 +69,7 @@ class Company:
" company." " company."
) )
except Exception as error: except Exception as error:
logger.error( logger.error(f"[ERROR][CLASS: Company][METHOD: get] {error}")
f"[ERROR][CLASS: Company][METHOD: get] {error}"
)
raise error raise error
def remove(self, agent: Agent) -> None: def remove(self, agent: Agent) -> None:
@ -118,9 +112,7 @@ class Company:
elif isinstance(node, list): elif isinstance(node, list):
for agent in node: for agent in node:
if not isinstance(agent, Agent): if not isinstance(agent, Agent):
raise ValueError( raise ValueError("Invalid agent in org chart")
"Invalid agent in org chart"
)
self.add(agent) self.add(agent)
for i, agent in enumerate(node): for i, agent in enumerate(node):
@ -153,9 +145,7 @@ class Company:
""" """
if agent1.ai_name not in self.agents_interactions: if agent1.ai_name not in self.agents_interactions:
self.agents_interactions[agent1.ai_name] = [] self.agents_interactions[agent1.ai_name] = []
self.agents_interactions[agent1.ai_name].append( self.agents_interactions[agent1.ai_name].append(agent2.ai_name)
agent2.ai_name
)
def run(self): def run(self):
""" """

@ -35,9 +35,7 @@ class ConcurrentWorkflow(BaseStructure):
max_loops: int = 1 max_loops: int = 1
max_workers: int = 5 max_workers: int = 5
autosave: bool = False autosave: bool = False
saved_state_filepath: Optional[str] = ( saved_state_filepath: Optional[str] = "runs/concurrent_workflow.json"
"runs/concurrent_workflow.json"
)
print_results: bool = False print_results: bool = False
return_results: bool = False return_results: bool = False
use_processes: bool = False use_processes: bool = False
@ -89,9 +87,7 @@ class ConcurrentWorkflow(BaseStructure):
} }
results = [] results = []
for future in concurrent.futures.as_completed( for future in concurrent.futures.as_completed(futures):
futures
):
task = futures[future] task = futures[future]
try: try:
result = future.result() result = future.result()

@ -339,9 +339,7 @@ class Conversation(BaseStructure):
def update_from_database(self, *args, **kwargs): def update_from_database(self, *args, **kwargs):
"""Update the conversation history from the database""" """Update the conversation history from the database"""
self.database.update( self.database.update("conversation", self.conversation_history)
"conversation", self.conversation_history
)
def get_from_database(self, *args, **kwargs): def get_from_database(self, *args, **kwargs):
"""Get the conversation history from the database""" """Get the conversation history from the database"""

@ -140,9 +140,7 @@ class Debate:
self.affirmative.system_prompt( self.affirmative.system_prompt(
self.save_file["player_meta_prompt"] self.save_file["player_meta_prompt"]
) )
self.negative.system_prompt( self.negative.system_prompt(self.save_file["player_meta_prompt"])
self.save_file["player_meta_prompt"]
)
self.moderator.system_prompt( self.moderator.system_prompt(
self.save_file["moderator_meta_prompt"] self.save_file["moderator_meta_prompt"]
) )
@ -191,14 +189,10 @@ class Debate:
def save_file_to_json(self, id): def save_file_to_json(self, id):
now = datetime.now() now = datetime.now()
current_time = now.strftime("%Y-%m-%d_%H:%M:%S") current_time = now.strftime("%Y-%m-%d_%H:%M:%S")
save_file_path = os.path.join( save_file_path = os.path.join(self.save_file_dir, f"{id}.json")
self.save_file_dir, f"{id}.json"
)
self.save_file["end_time"] = current_time self.save_file["end_time"] = current_time
json_str = json.dumps( json_str = json.dumps(self.save_file, ensure_ascii=False, indent=4)
self.save_file, ensure_ascii=False, indent=4
)
with open(save_file_path, "w") as f: with open(save_file_path, "w") as f:
f.write(json_str) f.write(json_str)

@ -126,15 +126,11 @@ class GraphWorkflow(BaseStructure):
if from_node in self.graph: if from_node in self.graph:
for condition_value, to_node in edge_dict.items(): for condition_value, to_node in edge_dict.items():
if to_node in self.graph: if to_node in self.graph:
self.graph[from_node]["edges"][ self.graph[from_node]["edges"][to_node] = condition
to_node
] = condition
else: else:
raise ValueError("Node does not exist in graph") raise ValueError("Node does not exist in graph")
else: else:
raise ValueError( raise ValueError(f"Node {from_node} does not exist in graph")
f"Node {from_node} does not exist in graph"
)
def run(self): def run(self):
""" """
@ -160,9 +156,7 @@ class GraphWorkflow(BaseStructure):
ValueError: _description_ ValueError: _description_
""" """
if node_name not in self.graph: if node_name not in self.graph:
raise ValueError( raise ValueError(f"Node {node_name} does not exist in graph")
f"Node {node_name} does not exist in graph"
)
def _check_nodes_exist(self, from_node, to_node): def _check_nodes_exist(self, from_node, to_node):
""" """

@ -51,8 +51,7 @@ class GroupChat:
def next_agent(self, agent: Agent) -> Agent: def next_agent(self, agent: Agent) -> Agent:
"""Return the next agent in the list.""" """Return the next agent in the list."""
return self.agents[ return self.agents[
(self.agent_names.index(agent.name) + 1) (self.agent_names.index(agent.name) + 1) % len(self.agents)
% len(self.agents)
] ]
def select_speaker_msg(self): def select_speaker_msg(self):
@ -122,9 +121,7 @@ class GroupChat:
""" """
formatted_messages = [] formatted_messages = []
for message in messages: for message in messages:
formatted_message = ( formatted_message = f"'{message['role']}:{message['content']}"
f"'{message['role']}:{message['content']}"
)
formatted_messages.append(formatted_message) formatted_messages.append(formatted_message)
return "\n".join(formatted_messages) return "\n".join(formatted_messages)

@ -165,9 +165,7 @@ class MajorityVoting:
# If autosave is enabled, save the conversation to a file # If autosave is enabled, save the conversation to a file
if self.autosave: if self.autosave:
create_file( create_file(str(self.conversation), "majority_voting.json")
str(self.conversation), "majority_voting.json"
)
# Log the agents # Log the agents
logger.info("Initializing majority voting system") logger.info("Initializing majority voting system")
@ -224,9 +222,7 @@ class MajorityVoting:
# If an output parser is provided, parse the responses # If an output parser is provided, parse the responses
if self.output_parser is not None: if self.output_parser is not None:
majority_vote = self.output_parser( majority_vote = self.output_parser(responses, *args, **kwargs)
responses, *args, **kwargs
)
else: else:
majority_vote = majority_voting(responses) majority_vote = majority_voting(responses)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save