pull/437/head
Kye 9 months ago
parent 91b2291784
commit 9d28535393

@ -1,12 +1,11 @@
from swarms import Agent, Anthropic
## Initialize the workflow
# Initialize the agemt
agent = Agent(
agent_name="Transcript Generator",
agent_description=(
"Generate a transcript for a youtube video on what swarms"
" are!"
"Generate a transcript for a youtube video on what swarms" " are!"
),
llm=Anthropic(),
max_loops=3,
@ -18,5 +17,5 @@ agent = Agent(
interactive=True,
)
# Run the workflow on a task
# Run the Agent on a task
agent("Generate a transcript for a youtube video on what swarms are!")

@ -2,7 +2,7 @@ from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
from swarms.utils.json_utils import base_model_to_json
from swarms.tools.json_utils import base_model_to_json
# Model name
model_name = "CohereForAI/c4ai-command-r-v01-4bit"
@ -28,9 +28,7 @@ class APIExampleRequestSchema(BaseModel):
headers: dict = Field(
..., description="The headers for the example request"
)
body: dict = Field(
..., description="The body of the example request"
)
body: dict = Field(..., description="The body of the example request")
response: dict = Field(
...,
description="The expected response of the example request",

@ -14,8 +14,7 @@ def search_api(query: str, max_results: int = 10):
agent = Agent(
agent_name="Youtube Transcript Generator",
agent_description=(
"Generate a transcript for a youtube video on what swarms"
" are!"
"Generate a transcript for a youtube video on what swarms" " are!"
),
llm=Anthropic(),
max_loops="auto",

@ -2,7 +2,7 @@ from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
from swarms.utils.json_utils import base_model_to_json
from swarms.tools.json_utils import base_model_to_json
# Model name
model_name = "ai21labs/Jamba-v0.1"
@ -28,9 +28,7 @@ class APIExampleRequestSchema(BaseModel):
headers: dict = Field(
..., description="The headers for the example request"
)
body: dict = Field(
..., description="The body of the example request"
)
body: dict = Field(..., description="The body of the example request")
response: dict = Field(
...,
description="The expected response of the example request",

@ -4,9 +4,7 @@ load_dict = {"ImageCaptioning": "cuda"}
node = MultiModalAgent(load_dict)
text = node.run_text(
"What is your name? Generate a picture of yourself"
)
text = node.run_text("What is your name? Generate a picture of yourself")
img = node.run_img("/image1", "What is this image about?")

@ -2,7 +2,7 @@ from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
from swarms.utils.json_utils import base_model_to_json
from swarms.tools.json_utils import base_model_to_json
# Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
@ -17,9 +17,7 @@ tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
class Schema(BaseModel):
name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person")
is_student: bool = Field(
..., title="Whether the person is a student"
)
is_student: bool = Field(..., title="Whether the person is a student")
courses: list[str] = Field(
..., title="List of courses the person is taking"
)
@ -29,9 +27,7 @@ class Schema(BaseModel):
tool_schema = base_model_to_json(Schema)
# Define the task to generate a person's information
task = (
"Generate a person's information based on the following schema:"
)
task = "Generate a person's information based on the following schema:"
# Create an instance of the ToolAgent class
agent = ToolAgent(

@ -4,7 +4,7 @@ from dotenv import load_dotenv
from pydantic import BaseModel, Field
from swarms import OpenAIChat, ToolAgent
from swarms.utils.json_utils import base_model_to_json
from swarms.tools.json_utils import base_model_to_json
# Load the environment variables
load_dotenv()
@ -19,9 +19,7 @@ chat = OpenAIChat(
class Schema(BaseModel):
name: str = Field(..., title="Name of the person")
agent: int = Field(..., title="Age of the person")
is_student: bool = Field(
..., title="Whether the person is a student"
)
is_student: bool = Field(..., title="Whether the person is a student")
courses: list[str] = Field(
..., title="List of courses the person is taking"
)
@ -31,9 +29,7 @@ class Schema(BaseModel):
tool_schema = base_model_to_json(Schema)
# Define the task to generate a person's information
task = (
"Generate a person's information based on the following schema:"
)
task = "Generate a person's information based on the following schema:"
# Create an instance of the ToolAgent class
agent = ToolAgent(

@ -34,9 +34,7 @@ def text_to_video(task: str):
step = 4 # Options: [1,2,4,8]
repo = "ByteDance/AnimateDiff-Lightning"
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
base = ( # Choose to your favorite base model.
"emilianJR/epiCRealism"
)
base = "emilianJR/epiCRealism" # Choose to your favorite base model.
adapter = MotionAdapter().to(device, dtype)
adapter.load_state_dict(

@ -61,16 +61,12 @@ class ProductAdConceptGenerator:
"in an ice cave setting",
"in a serene and calm landscape",
]
self.contexts = [
"high realism product ad (extremely creative)"
]
self.contexts = ["high realism product ad (extremely creative)"]
def generate_concept(self):
theme = random.choice(self.themes)
context = random.choice(self.contexts)
return (
f"{theme} inside a {style} {self.product_name}, {context}"
)
return f"{theme} inside a {style} {self.product_name}, {context}"
# User input

@ -31,9 +31,7 @@ def test_find_most_similar_podcasts():
graph = create_graph()
weight_edges(graph)
user_list = create_user_list()
most_similar_podcasts = find_most_similar_podcasts(
graph, user_list
)
most_similar_podcasts = find_most_similar_podcasts(graph, user_list)
assert isinstance(most_similar_podcasts, list)

@ -45,9 +45,7 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
) as executor:
futures = []
for i, (fn, args, kwargs) in enumerate(callable_functions):
futures.append(
executor.submit(worker, fn, args, kwargs, i)
)
futures.append(executor.submit(worker, fn, args, kwargs, i))
# Wait for all threads to complete
concurrent.futures.wait(futures)
@ -56,9 +54,7 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
# Adjusting the function to extract specific column values
def extract_and_create_agents(
csv_file_path: str, target_columns: list
):
def extract_and_create_agents(csv_file_path: str, target_columns: list):
"""
Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
creates an Agent for each, and adds it to the swarm network.
@ -138,8 +134,7 @@ def extract_and_create_agents(
# Log the agent
logger.info(
f"Agent created: {agent_name} with long term"
" memory"
f"Agent created: {agent_name} with long term" " memory"
)
agents.append(agent)

@ -16,9 +16,7 @@ def test_pass():
def test_invalid_sports():
assert (
vocal.generate_video(
"I just ate some delicious tacos", "tacos"
)
vocal.generate_video("I just ate some delicious tacos", "tacos")
== "Invalid sports entered!! Please enter a valid sport."
)

@ -51,6 +51,4 @@ algorithmic_psuedocode_agent = paper_summarizer_agent.run(
"Focus on creating the algorithmic pseudocode for the novel"
f" method in this paper: {paper}"
)
pytorch_code = paper_implementor_agent.run(
algorithmic_psuedocode_agent
)
pytorch_code = paper_implementor_agent.run(algorithmic_psuedocode_agent)

@ -55,9 +55,7 @@ class AutoBlogGenSwarm:
):
self.llm = llm()
self.topic_selection_task = topic_selection_task
self.topic_selection_agent_prompt = (
topic_selection_agent_prompt
)
self.topic_selection_agent_prompt = topic_selection_agent_prompt
self.objective = objective
self.iterations = iterations
self.max_retries = max_retries
@ -93,9 +91,7 @@ class AutoBlogGenSwarm:
def step(self):
"""Steps through the task"""
topic_selection_agent = self.llm(
self.topic_selection_agent_prompt
)
topic_selection_agent = self.llm(self.topic_selection_agent_prompt)
topic_selection_agent = self.print_beautifully(
"Topic Selection Agent", topic_selection_agent
)
@ -105,9 +101,7 @@ class AutoBlogGenSwarm:
# Agent that reviews the draft
review_agent = self.llm(self.get_review_prompt(draft_blog))
review_agent = self.print_beautifully(
"Review Agent", review_agent
)
review_agent = self.print_beautifully("Review Agent", review_agent)
# Agent that publishes on social media
distribution_agent = self.llm(

@ -48,11 +48,7 @@ class AutoTemp:
"""
score_text = self.llm(eval_prompt, temperature=0.5)
score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
return (
round(float(score_match.group()), 1)
if score_match
else 0.0
)
return round(float(score_match.group()), 1) if score_match else 0.0
def run(self, prompt, temperature_string):
print("Starting generation process...")

@ -56,16 +56,12 @@ class BlogGen:
)
chosen_topic = topic_output.split("\n")[0]
print(
colored("Selected topic: " + chosen_topic, "yellow")
)
print(colored("Selected topic: " + chosen_topic, "yellow"))
# Initial draft generation with AutoTemp
initial_draft_prompt = (
self.DRAFT_WRITER_SYSTEM_PROMPT.replace(
initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace(
"{{CHOSEN_TOPIC}}", chosen_topic
)
)
auto_temp_output = self.auto_temp.run(
initial_draft_prompt, self.temperature_range
)

@ -12,9 +12,7 @@ api_key = os.getenv("OPENAI_API_KEY")
stability_api_key = os.getenv("STABILITY_API_KEY")
# Initialize language model
llm = OpenAIChat(
openai_api_key=api_key, temperature=0.5, max_tokens=3000
)
llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000)
# User preferences (can be dynamically set in a real application)
user_preferences = {
@ -30,9 +28,7 @@ curriculum_prompt = edu_prompts.CURRICULUM_DESIGN_PROMPT.format(
interactive_prompt = edu_prompts.INTERACTIVE_LEARNING_PROMPT.format(
**user_preferences
)
sample_prompt = edu_prompts.SAMPLE_TEST_PROMPT.format(
**user_preferences
)
sample_prompt = edu_prompts.SAMPLE_TEST_PROMPT.format(**user_preferences)
image_prompt = edu_prompts.IMAGE_GENERATION_PROMPT.format(
**user_preferences
)
@ -49,9 +45,7 @@ workflow = SequentialWorkflow(max_loops=1)
# Add tasks to workflow with personalized prompts
workflow.add(curriculum_agent, "Generate a curriculum")
workflow.add(
interactive_learning_agent, "Generate an interactive lesson"
)
workflow.add(interactive_learning_agent, "Generate an interactive lesson")
workflow.add(sample_lesson_agent, "Generate a practice test")
# Execute the workflow for text-based tasks

@ -11,9 +11,7 @@ from swarms.structs import Agent
load_dotenv()
FEATURE = (
"Implement an all-new signup system in typescript using supabase"
)
FEATURE = "Implement an all-new signup system in typescript using supabase"
CODEBASE = """
import React, { useState } from 'react';
@ -68,9 +66,7 @@ feature_implementer_backend = Agent(
)
# Create another agent for a different task
tester_agent = Agent(
llm=llm, max_loops=1, sop=TEST_SOP, autosave=True
)
tester_agent = Agent(llm=llm, max_loops=1, sop=TEST_SOP, autosave=True)
# Create another agent for a different task
documenting_agent = Agent(

@ -44,9 +44,7 @@ class Idea2Image(Agent):
print(f"Generated image at: {img}")
analysis = (
self.vision_api.run(img, current_prompt)
if img
else None
self.vision_api.run(img, current_prompt) if img else None
)
if analysis:
current_prompt += (
@ -147,9 +145,7 @@ gpt_api = OpenAIChat(openai_api_key=openai_api_key)
# Define the modified Idea2Image class here
# Streamlit UI layout
st.title(
"Explore the infinite Multi-Modal Idea Space with Idea2Image"
)
st.title("Explore the infinite Multi-Modal Idea Space with Idea2Image")
user_prompt = st.text_input("Prompt for image generation:")
num_iterations = st.number_input(
"Enter the number of iterations for image improvement:",
@ -168,9 +164,7 @@ if st.button("Generate Image"):
user_prompt, num_iterations, run_folder
)
for i, (enriched_prompt, img_path, analysis) in enumerate(
results
):
for i, (enriched_prompt, img_path, analysis) in enumerate(results):
st.write(f"Iteration {i+1}:")
st.write("Enriched Prompt:", enriched_prompt)
if img_path:

@ -96,9 +96,7 @@ for _ in range(max_iterations):
# Evaluate the image by passing the file path
score = evaluate_img(llm, task, img_path)
print(
colored(
f"Evaluated Image Score: {score} for {img_path}", "cyan"
)
colored(f"Evaluated Image Score: {score} for {img_path}", "cyan")
)
# Update the best score and image path if necessary

@ -77,9 +77,7 @@ def generate_integrated_shopping_list(
meal_plan_output, image_analysis, user_preferences
):
# Prepare the prompt for the LLM
fridge_contents = image_analysis["choices"][0]["message"][
"content"
]
fridge_contents = image_analysis["choices"][0]["message"]["content"]
prompt = (
f"Based on this meal plan: {meal_plan_output}, and the"
f" following items in the fridge: {fridge_contents},"
@ -131,9 +129,7 @@ print("Integrated Shopping List:", integrated_shopping_list)
with open("nutrition_output.txt", "w") as file:
file.write("Meal Plan:\n" + meal_plan_output + "\n\n")
file.write(
"Integrated Shopping List:\n"
+ integrated_shopping_list
+ "\n"
"Integrated Shopping List:\n" + integrated_shopping_list + "\n"
)
print("Outputs have been saved to nutrition_output.txt")

@ -42,9 +42,7 @@ def get_review_prompt(article):
return prompt
def social_media_prompt(
article: str, goal: str = "Clicks and engagement"
):
def social_media_prompt(article: str, goal: str = "Clicks and engagement"):
prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace(
"{{ARTICLE}}", article
).replace("{{GOAL}}", goal)

@ -24,9 +24,7 @@ async def handle_websocket(websocket, path):
# Broadcast the message to all other users in the public group chats.
for other_websocket in public_group_chats:
if other_websocket != websocket:
await other_websocket.send(
f"{username}: {message}"
)
await other_websocket.send(f"{username}: {message}")
finally:
# Remove the user from the list of public group chats.
public_group_chats.remove(websocket)

@ -48,9 +48,7 @@ def generate_conversation(characters, topic):
# Generate the conversation
conversation = generate_conversation(
character_names, conversation_topic
)
conversation = generate_conversation(character_names, conversation_topic)
# Play the conversation
for line in conversation:

@ -48,9 +48,7 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
) as executor:
futures = []
for i, (fn, args, kwargs) in enumerate(callable_functions):
futures.append(
executor.submit(worker, fn, args, kwargs, i)
)
futures.append(executor.submit(worker, fn, args, kwargs, i))
# Wait for all threads to complete
concurrent.futures.wait(futures)
@ -59,9 +57,7 @@ def execute_concurrently(callable_functions: callable, max_workers=5):
# Adjusting the function to extract specific column values
def extract_and_create_agents(
csv_file_path: str, target_columns: list
):
def extract_and_create_agents(csv_file_path: str, target_columns: list):
"""
Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
creates an Agent for each, and adds it to the swarm network.

@ -31,9 +31,7 @@ llm = GPT4VisionAPI(openai_api_key=api_key, max_tokens=2000)
assembly_line = (
"playground/demos/swarm_of_mma_manufacturing/assembly_line.jpg"
)
red_robots = (
"playground/demos/swarm_of_mma_manufacturing/red_robots.jpg"
)
red_robots = "playground/demos/swarm_of_mma_manufacturing/red_robots.jpg"
robots = "playground/demos/swarm_of_mma_manufacturing/robots.jpg"
tesla_assembly_line = (
"playground/demos/swarm_of_mma_manufacturing/tesla_assembly.jpg"
@ -127,31 +125,19 @@ health_check = health_security_agent.run(
print(
colored(
"--------------- Productivity agents initializing...", "green"
)
colored("--------------- Productivity agents initializing...", "green")
)
# Add the third task to the productivity_check_agent
productivity_check = productivity_check_agent.run(
health_check, assembly_line
)
print(
colored(
"--------------- Security agents initializing...", "green"
)
)
print(colored("--------------- Security agents initializing...", "green"))
# Add the fourth task to the security_check_agent
security_check = security_check_agent.run(
productivity_check, red_robots
)
security_check = security_check_agent.run(productivity_check, red_robots)
print(
colored(
"--------------- Efficiency agents initializing...", "cyan"
)
)
print(colored("--------------- Efficiency agents initializing...", "cyan"))
# Add the fifth task to the efficiency_check_agent
efficiency_check = efficiency_check_agent.run(
security_check, tesla_assembly_line

@ -12,9 +12,7 @@ api_key = os.getenv("OPENAI_API_KEY")
stability_api_key = os.getenv("STABILITY_API_KEY")
# Initialize language model
llm = OpenAIChat(
openai_api_key=api_key, temperature=0.5, max_tokens=3000
)
llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000)
# Initialize Vision model
vision_api = GPT4VisionAPI(api_key=api_key)
@ -51,17 +49,13 @@ workflow = SequentialWorkflow(max_loops=1)
# Add tasks to workflow with personalized prompts
workflow.add(architecture_analysis_agent, "Architecture Analysis")
workflow.add(
infrastructure_evaluation_agent, "Infrastructure Evaluation"
)
workflow.add(infrastructure_evaluation_agent, "Infrastructure Evaluation")
workflow.add(traffic_flow_analysis_agent, "Traffic Flow Analysis")
workflow.add(
environmental_impact_assessment_agent,
"Environmental Impact Assessment",
)
workflow.add(
public_space_utilization_agent, "Public Space Utilization"
)
workflow.add(public_space_utilization_agent, "Public Space Utilization")
workflow.add(
socioeconomic_impact_analysis_agent,
"Socioeconomic Impact Analysis",

@ -8,9 +8,7 @@ model = QwenVLMultiModal(
)
# Run the model
response = model(
"Hello, how are you?", "https://example.com/image.jpg"
)
response = model("Hello, how are you?", "https://example.com/image.jpg")
# Print the response
print(response)

@ -3,9 +3,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
# Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"databricks/dolly-v2-12b"
)
model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b")
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
# Define a JSON schema for person's information
@ -20,9 +18,7 @@ json_schema = {
}
# Define the task to generate a person's information
task = (
"Generate a person's information based on the following schema:"
)
task = "Generate a person's information based on the following schema:"
# Create an instance of the ToolAgent class
agent = ToolAgent(

@ -8,9 +8,7 @@ model = QwenVLMultiModal(
)
# Run the model
response = model(
"Hello, how are you?", "https://example.com/image.jpg"
)
response = model("Hello, how are you?", "https://example.com/image.jpg")
# Print the response
print(response)

@ -7,6 +7,4 @@ model = TogetherLLM(
)
# Run the model
model.run(
"Generate a blog post about the best way to make money online."
)
model.run("Generate a blog post about the best way to make money online.")

@ -35,9 +35,7 @@ class DialogueAgent:
[
self.system_message,
HumanMessage(
content="\n".join(
self.message_history + [self.prefix]
)
content="\n".join(self.message_history + [self.prefix])
),
]
)
@ -76,9 +74,7 @@ class DialogueSimulator:
def step(self) -> tuple[str, str]:
# 1. choose the next speaker
speaker_idx = self.select_next_speaker(
self._step, self.agents
)
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
# 2. next speaker sends message
@ -116,9 +112,7 @@ class BiddingDialogueAgent(DialogueAgent):
message_history="\n".join(self.message_history),
recent_message=self.message_history[-1],
)
bid_string = self.model(
[SystemMessage(content=prompt)]
).content
bid_string = self.model([SystemMessage(content=prompt)]).content
return bid_string
@ -140,10 +134,12 @@ player_descriptor_system_message = SystemMessage(
def generate_character_description(character_name):
character_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(content=f"""{game_description}
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities.
Speak directly to {character_name}.
Do not add anything else."""),
Do not add anything else."""
),
]
character_description = ChatOpenAI(temperature=1.0)(
character_specifier_prompt
@ -161,10 +157,9 @@ Your goal is to be as creative as possible and make the voters think you are the
"""
def generate_character_system_message(
character_name, character_header
):
return SystemMessage(content=f"""{character_header}
def generate_character_system_message(character_name, character_header):
return SystemMessage(
content=f"""{character_header}
You will speak in the style of {character_name}, and exaggerate their personality.
You will come up with creative ideas related to {topic}.
Do not say the same things over and over again.
@ -176,7 +171,8 @@ Speak only from the perspective of {character_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
""")
"""
)
character_descriptions = [
@ -190,9 +186,7 @@ character_headers = [
)
]
character_system_messages = [
generate_character_system_message(
character_name, character_headers
)
generate_character_system_message(character_name, character_headers)
for character_name, character_headers in zip(
character_names, character_headers
)
@ -261,7 +255,8 @@ for character_name, bidding_template in zip(
topic_specifier_prompt = [
SystemMessage(content="You can make a task more specific."),
HumanMessage(content=f"""{game_description}
HumanMessage(
content=f"""{game_description}
You are the debate moderator.
Please make the debate topic more specific.
@ -269,7 +264,8 @@ topic_specifier_prompt = [
Be creative and imaginative.
Please reply with the specified topic in {word_limit} words or less.
Speak directly to the presidential candidates: {*character_names,}.
Do not add anything else."""),
Do not add anything else."""
),
]
specified_topic = ChatOpenAI(temperature=1.0)(
topic_specifier_prompt
@ -298,9 +294,7 @@ def ask_for_bid(agent) -> str:
return bid
def select_next_speaker(
step: int, agents: List[DialogueAgent]
) -> int:
def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:
bids = []
for agent in agents:
bid = ask_for_bid(agent)

@ -44,7 +44,5 @@ manager = Agent(
agents = [flow1, flow2, flow3]
group_chat = GroupChat(agents=agents, messages=[], max_round=10)
chat_manager = GroupChatManager(
groupchat=group_chat, selector=manager
)
chat_manager = GroupChatManager(groupchat=group_chat, selector=manager)
chat_history = chat_manager("Write me a riddle")

@ -6,7 +6,7 @@ from swarms import Agent, OpenAIChat
from swarms.agents.multion_agent import MultiOnAgent
from swarms.memory.chroma_db import ChromaDB
from swarms.tools.tool import tool
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.tools.code_interpreter import SubprocessCodeInterpreter
# Load the environment variables
load_dotenv()

@ -8,9 +8,7 @@ agent3 = Agent(llm=OpenAIChat(), agent_name="agent3")
moderator = Agent(agent_name="moderator")
agents = [agent1, agent2, agent3]
message_pool = MessagePool(
agents=agents, moderator=moderator, turns=5
)
message_pool = MessagePool(agents=agents, moderator=moderator, turns=5)
message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)

@ -7,9 +7,7 @@ node = Worker(
# Instantiate the Orchestrator with 10 agents
orchestrator = Orchestrator(
node, agent_list=[node] * 10, task_queue=[]
)
orchestrator = Orchestrator(node, agent_list=[node] * 10, task_queue=[])
# Agent 7 sends a message to Agent 9
orchestrator.chat(

@ -21,9 +21,7 @@ json_schema = {
}
# Define the task to generate a person's information
task = (
"Generate a person's information based on the following schema:"
)
task = "Generate a person's information based on the following schema:"
# Create an instance of the ToolAgent class
agent = ToolAgent(

@ -100,9 +100,7 @@ class PythonDocumentationSwarm:
with open(file_path, "w") as file:
file.write(doc_content)
logger.info(
f"Documentation generated for {item.__name__}."
)
logger.info(f"Documentation generated for {item.__name__}.")
except Exception as e:
logger.error(
f"Error processing documentation for {item.__name__}."
@ -130,8 +128,7 @@ class PythonDocumentationSwarm:
thread.join()
logger.info(
"Documentation generated in 'swarms.structs'"
" directory."
"Documentation generated in 'swarms.structs'" " directory."
)
except Exception as e:
logger.error("Error running documentation process.")
@ -143,8 +140,7 @@ class PythonDocumentationSwarm:
executor.map(self.process_documentation, python_items)
logger.info(
"Documentation generated in 'swarms.structs'"
" directory."
"Documentation generated in 'swarms.structs'" " directory."
)
except Exception as e:
logger.error("Error running documentation process.")

@ -4,7 +4,7 @@ B -> W1, W2, W3
"""
from typing import List, Optional
from pydantic import BaseModel, Field
from swarms.utils.json_utils import str_to_json
from swarms.tools.json_utils import str_to_json
class HierarchicalSwarm(BaseModel):

@ -35,7 +35,6 @@ agent = Agent(
)
out = agent.run(
"Use the search api to find the best restaurants in New York"
" City."
"Use the search api to find the best restaurants in New York" " City."
)
print(out)

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "4.8.2"
version = "4.8.4"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -35,25 +35,22 @@ python = ">=3.9,<4.0"
torch = ">=2.1.1,<3.0"
transformers = ">= 4.39.0, <5.0.0"
asyncio = ">=3.4.3,<4.0"
einops = "0.7.0"
langchain-core = "0.1.33"
langchain-community = "0.0.29"
langchain-experimental = "0.0.55"
backoff = "2.2.1"
toml = "*"
pypdf = "4.1.0"
httpx = "0.24.1"
ratelimit = "2.2.1"
loguru = "0.7.2"
pydantic = "2.6.4"
tenacity = "8.2.3"
Pillow = "10.2.0"
rich = "13.5.2"
psutil = "*"
sentry-sdk = "*"
python-dotenv = "*"
accelerate = "0.28.0"
opencv-python = "^4.9.0.80"
yaml = "*"
[tool.poetry.group.lint.dependencies]
black = "^23.1.0"
@ -71,7 +68,7 @@ pandas = "^2.2.2"
fastapi = "^0.110.1"
[tool.ruff]
line-length = 128
line-length = 75
[tool.ruff.lint]
select = ["E", "F", "W", "I", "UP"]
@ -84,6 +81,21 @@ preview = true
"swarms/prompts/**.py" = ["E501"]
[tool.black]
line-length = 70
target-version = ['py38']
preview = true
target-version = ["py38"]
line-length = 75
include = '\.pyi?$'
exclude = '''
/(
\.git
| \.hg
| \.mypy_cache
| \.tox
| \.venv
| _build
| buck-out
| build
| dist
| docs
)/
'''

@ -2,8 +2,6 @@
torch>=2.1.1,<3.0
transformers==4.39.0
asyncio>=3.4.3,<4.0
einops==0.7.0
langchain-core==0.1.33
langchain-community==0.0.29
langchain-experimental==0.0.55
backoff==2.2.1

@ -52,9 +52,7 @@ def main():
# Gathering all functions from the swarms.utils module
functions = [
obj
for name, obj in inspect.getmembers(
sys.modules["swarms.utils"]
)
for name, obj in inspect.getmembers(sys.modules["swarms.utils"])
if inspect.isfunction(obj)
]

@ -57,9 +57,7 @@ def process_documentation(
with open(file_path, "w") as file:
file.write(doc_content)
print(
f"Processed documentation for {item.__name__}. at {file_path}"
)
print(f"Processed documentation for {item.__name__}. at {file_path}")
def main(module: str = "docs/swarms/structs"):

@ -68,9 +68,7 @@ def create_test(cls):
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
processed_content = model(
TEST_WRITER_SOP_PROMPT(
input_content, "swarms", "swarms.memory"
)
TEST_WRITER_SOP_PROMPT(input_content, "swarms", "swarms.memory")
)
processed_content = extract_code_from_markdown(processed_content)

@ -57,9 +57,7 @@ def main():
# Gathering all functions from the swarms.utils module
functions = [
obj
for name, obj in inspect.getmembers(
sys.modules["swarms.utils"]
)
for name, obj in inspect.getmembers(sys.modules["swarms.utils"])
if inspect.isfunction(obj)
]

@ -22,9 +22,7 @@ def generate_file_list(directory, output_file):
# Remove the file extension
file_name, _ = os.path.splitext(file)
# Write the file name and path to the output file
f.write(
f'- {file_name}: "swarms/utils/{file_path}"\n'
)
f.write(f'- {file_name}: "swarms/utils/{file_path}"\n')
# Use the function to generate the file list

@ -13,18 +13,15 @@ def get_package_versions(requirements_path, output_path):
for requirement in requirements:
# Skip empty lines and comments
if (
requirement.strip() == ""
or requirement.strip().startswith("#")
if requirement.strip() == "" or requirement.strip().startswith(
"#"
):
continue
# Extract package name
package_name = requirement.split("==")[0].strip()
try:
version = pkg_resources.get_distribution(
package_name
).version
version = pkg_resources.get_distribution(package_name).version
package_versions.append(f"{package_name}=={version}")
except pkg_resources.DistributionNotFound:
package_versions.append(f"{package_name}: not installed")

@ -147,7 +147,5 @@ class ToolAgent(Agent):
)
except Exception as error:
logger.error(
f"Error running {self.name} for task: {task}"
)
logger.error(f"Error running {self.name} for task: {task}")
raise error

@ -69,15 +69,11 @@ def cosine_similarity_top_k(
score_array = cosine_similarity(X, Y)
score_threshold = score_threshold or -1.0
score_array[score_array < score_threshold] = 0
top_k = min(
top_k or len(score_array), np.count_nonzero(score_array)
)
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[
-top_k:
top_k = min(top_k or len(score_array), np.count_nonzero(score_array))
top_k_idxs = np.argpartition(score_array, -top_k, axis=None)[-top_k:]
top_k_idxs = top_k_idxs[np.argsort(score_array.ravel()[top_k_idxs])][
::-1
]
top_k_idxs = top_k_idxs[
np.argsort(score_array.ravel()[top_k_idxs])
][::-1]
ret_idxs = np.unravel_index(top_k_idxs, score_array.shape)
scores = score_array.ravel()[top_k_idxs].tolist()
return list(zip(*ret_idxs)), scores # type: ignore

@ -44,9 +44,7 @@ class DictSharedMemory:
entry_id = str(uuid.uuid4())
data = {}
epoch = datetime.datetime.utcfromtimestamp(0)
epoch = (
datetime.datetime.utcnow() - epoch
).total_seconds()
epoch = (datetime.datetime.utcnow() - epoch).total_seconds()
data[entry_id] = {
"agent": agent_id,
"epoch": epoch,

@ -170,9 +170,7 @@ class LangchainChromaVectorMemory(AbstractVectorDatabase):
)
texts = [text.page_content for text in texts]
elif type == "cos":
texts = self.db.similarity_search_with_score(
query=query, k=k
)
texts = self.db.similarity_search_with_score(query=query, k=k)
texts = [
text[0].page_content
for text in texts

@ -34,9 +34,7 @@ class PostgresDB(AbstractVectorDatabase):
table_name (str): The name of the table in the database.
"""
self.engine = create_engine(
connection_string, *args, **kwargs
)
self.engine = create_engine(connection_string, *args, **kwargs)
self.table_name = table_name
self.VectorModel = self._create_vector_model()

@ -123,9 +123,7 @@ class PineconeDB(AbstractVectorDatabase):
Returns:
str: _description_
"""
vector_id = (
vector_id if vector_id else str_to_hash(str(vector))
)
vector_id = vector_id if vector_id else str_to_hash(str(vector))
params = {"namespace": namespace} | kwargs

@ -40,9 +40,7 @@ class ShortTermMemory(BaseStructure):
self.medium_term_memory = []
self.lock = threading.Lock()
def add(
self, role: str = None, message: str = None, *args, **kwargs
):
def add(self, role: str = None, message: str = None, *args, **kwargs):
"""Add a message to the short term memory.
Args:
@ -160,9 +158,7 @@ class ShortTermMemory(BaseStructure):
with open(filename, "w") as f:
json.dump(
{
"short_term_memory": (
self.short_term_memory
),
"short_term_memory": (self.short_term_memory),
"medium_term_memory": (
self.medium_term_memory
),
@ -184,9 +180,7 @@ class ShortTermMemory(BaseStructure):
with self.lock:
with open(filename) as f:
data = json.load(f)
self.short_term_memory = data.get(
"short_term_memory", []
)
self.short_term_memory = data.get("short_term_memory", [])
self.medium_term_memory = data.get(
"medium_term_memory", []
)

@ -5,9 +5,7 @@ from swarms.memory.base_vectordb import AbstractVectorDatabase
try:
import sqlite3
except ImportError:
raise ImportError(
"Please install sqlite3 to use the SQLiteDB class."
)
raise ImportError("Please install sqlite3 to use the SQLiteDB class.")
class SQLiteDB(AbstractVectorDatabase):

@ -126,9 +126,7 @@ class WeaviateDB(AbstractVectorDatabase):
print(f"Error adding object: {error}")
raise
def query(
self, collection_name: str, query: str, limit: int = 10
):
def query(self, collection_name: str, query: str, limit: int = 10):
"""Query objects from a specified collection.
Args:

@ -25,9 +25,7 @@ class BaseEmbeddingModel(
tokenizer: Callable = None
chunker: Callable = None
def embed_text_artifact(
self, artifact: TextArtifact
) -> list[float]:
def embed_text_artifact(self, artifact: TextArtifact) -> list[float]:
return self.embed_string(artifact.to_text())
def embed_string(self, string: str) -> list[float]:

@ -154,15 +154,11 @@ class AbstractLLM(ABC):
Returns:
_type_: _description_
"""
return await asyncio.gather(
*(self.arun(task) for task in tasks)
)
return await asyncio.gather(*(self.arun(task) for task in tasks))
def chat(self, task: str, history: str = "") -> str:
"""Chat with the model"""
complete_task = (
task + " | " + history
) # Delimiter for clarity
complete_task = task + " | " + history # Delimiter for clarity
return self.run(complete_task)
def __call__(self, task: str) -> str:
@ -209,9 +205,7 @@ class AbstractLLM(ABC):
def log_event(self, message: str):
"""Log an event."""
logging.info(
f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {message}"
)
logging.info(f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {message}")
def save_checkpoint(self, checkpoint_dir: str = "checkpoints"):
"""Save the model state."""

@ -135,9 +135,7 @@ class BaseMultiModalModel:
image_pil = Image.open(BytesIO(response.content))
return image_pil
except requests.RequestException as error:
print(
f"Error fetching image from {img} and error: {error}"
)
print(f"Error fetching image from {img} and error: {error}")
return None
def encode_img(self, img: str):
@ -190,9 +188,7 @@ class BaseMultiModalModel:
"""Clear the chat history"""
self.chat_history = []
def run_many(
self, tasks: List[str], imgs: List[str], *args, **kwargs
):
def run_many(self, tasks: List[str], imgs: List[str], *args, **kwargs):
"""
Run the model on multiple tasks and images all at once using concurrent
@ -206,18 +202,14 @@ class BaseMultiModalModel:
"""
# Instantiate the thread pool executor
with ThreadPoolExecutor(
max_workers=self.max_workers
) as executor:
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
results = executor.map(self.run, tasks, imgs)
# Print the results for debugging
for result in results:
print(result)
def run_batch(
self, tasks_images: List[Tuple[str, str]]
) -> List[str]:
def run_batch(self, tasks_images: List[Tuple[str, str]]) -> List[str]:
"""Process a batch of tasks and images"""
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [
@ -244,9 +236,7 @@ class BaseMultiModalModel:
"""Process a batch of tasks and images asynchronously with retries"""
loop = asyncio.get_event_loop()
futures = [
loop.run_in_executor(
None, self.run_with_retries, task, img
)
loop.run_in_executor(None, self.run_with_retries, task, img)
for task, img in tasks_images
]
return await asyncio.gather(*futures)
@ -264,9 +254,7 @@ class BaseMultiModalModel:
print(f"Error with the request {error}")
continue
def run_batch_with_retries(
self, tasks_images: List[Tuple[str, str]]
):
def run_batch_with_retries(self, tasks_images: List[Tuple[str, str]]):
"""Run the model with retries"""
for i in range(self.retries):
try:

@ -299,9 +299,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
"""
messages = params["messages"]
temperature = float(params.get("temperature", 1.0))
repetition_penalty = float(
params.get("repetition_penalty", 1.0)
)
repetition_penalty = float(params.get("repetition_penalty", 1.0))
top_p = float(params.get("top_p", 1.0))
max_new_tokens = int(params.get("max_tokens", 256))
query, history, image_list = self.process_history_and_images(
@ -318,9 +316,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
)
inputs = {
"input_ids": (
input_by_model["input_ids"]
.unsqueeze(0)
.to(self.device)
input_by_model["input_ids"].unsqueeze(0).to(self.device)
),
"token_type_ids": (
input_by_model["token_type_ids"]
@ -379,9 +375,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
"text": generated_text,
"usage": {
"prompt_tokens": input_echo_len,
"completion_tokens": (
total_len - input_echo_len
),
"completion_tokens": (total_len - input_echo_len),
"total_tokens": total_len,
},
}
@ -437,9 +431,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
for item in content:
if isinstance(item, ImageUrlContent):
image_url = item.image_url.url
if image_url.startswith(
"data:image/jpeg;base64,"
):
if image_url.startswith("data:image/jpeg;base64,"):
base64_encoded_image = image_url.split(
"data:image/jpeg;base64,"
)[1]
@ -471,9 +463,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
text_content,
)
else:
raise AssertionError(
"assistant reply before user"
)
raise AssertionError("assistant reply before user")
else:
raise AssertionError(f"unrecognized role: {role}")

@ -199,9 +199,7 @@ class Dalle3:
with open(full_path, "wb") as file:
file.write(response.content)
else:
raise ValueError(
f"Failed to download image from {img_url}"
)
raise ValueError(f"Failed to download image from {img_url}")
def create_variations(self, img: str):
"""
@ -249,9 +247,7 @@ class Dalle3:
"red",
)
)
print(
colored(f"Error running Dalle3: {error.error}", "red")
)
print(colored(f"Error running Dalle3: {error.error}", "red"))
raise error
def print_dashboard(self):
@ -310,9 +306,7 @@ class Dalle3:
executor.submit(self, task): task for task in tasks
}
results = []
for future in concurrent.futures.as_completed(
future_to_task
):
for future in concurrent.futures.as_completed(future_to_task):
task = future_to_task[future]
try:
img = future.result()
@ -359,9 +353,7 @@ class Dalle3:
"""Str method for the Dalle3 class"""
return f"Dalle3(image_url={self.image_url})"
@backoff.on_exception(
backoff.expo, Exception, max_tries=max_retries
)
@backoff.on_exception(backoff.expo, Exception, max_tries=max_retries)
def rate_limited_call(self, task: str):
"""Rate limited call to the Dalle3 API"""
return self.__call__(task)

@ -70,9 +70,7 @@ class DistilWhisperModel:
def __init__(self, model_id="distil-whisper/distil-large-v2"):
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self.torch_dtype = (
torch.float16
if torch.cuda.is_available()
else torch.float32
torch.float16 if torch.cuda.is_available() else torch.float32
)
self.model_id = model_id
self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
@ -112,9 +110,7 @@ class DistilWhisperModel:
:return: The transcribed text.
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None, self.transcribe, inputs
)
return await loop.run_in_executor(None, self.transcribe, inputs)
def real_time_transcribe(self, audio_file_path, chunk_duration=5):
"""
@ -138,9 +134,7 @@ class DistilWhisperModel:
sample_rate = audio_input.sampling_rate
len(audio_input.array) / sample_rate
chunks = [
audio_input.array[
i : i + sample_rate * chunk_duration
]
audio_input.array[i : i + sample_rate * chunk_duration]
for i in range(
0,
len(audio_input.array),
@ -149,9 +143,7 @@ class DistilWhisperModel:
]
print(
colored(
"Starting real-time transcription...", "green"
)
colored("Starting real-time transcription...", "green")
)
for i, chunk in enumerate(chunks):
@ -162,8 +154,8 @@ class DistilWhisperModel:
return_tensors="pt",
padding=True,
)
processed_inputs = (
processed_inputs.input_values.to(self.device)
processed_inputs = processed_inputs.input_values.to(
self.device
)
# Generate transcription for the chunk
@ -174,9 +166,7 @@ class DistilWhisperModel:
# Print the chunk's transcription
print(
colored(
f"Chunk {i+1}/{len(chunks)}: ", "yellow"
)
colored(f"Chunk {i+1}/{len(chunks)}: ", "yellow")
+ transcription
)

@ -112,9 +112,7 @@ class Gemini(BaseMultiModalModel):
)
# Initialize the model
self.model = genai.GenerativeModel(
model_name, *args, **kwargs
)
self.model = genai.GenerativeModel(model_name, *args, **kwargs)
# Check for the key
if self.gemini_api_key is None:
@ -211,9 +209,7 @@ class Gemini(BaseMultiModalModel):
raise ValueError("Please provide a Gemini API key")
# Load the image
img = [
{"mime_type": type, "data": Path(img).read_bytes()}
]
img = [{"mime_type": type, "data": Path(img).read_bytes()}]
except Exception as error:
print(f"Error processing image: {error}")

@ -42,9 +42,7 @@ class GPT4VSAM(BaseMultiModalModel):
self.device = device
self.return_related_marks = return_related_marks
self.sam = SegmentAnythingMarkGenerator(
device, *args, **kwargs
)
self.sam = SegmentAnythingMarkGenerator(device, *args, **kwargs)
self.visualizer = MarkVisualizer(*args, **kwargs)
def load_img(self, img: str) -> Any:

@ -15,8 +15,7 @@ try:
import cv2
except ImportError:
print(
"OpenCV not installed. Please install OpenCV to use this"
" model."
"OpenCV not installed. Please install OpenCV to use this" " model."
)
raise ImportError
@ -248,9 +247,7 @@ class GPT4VisionAPI(BaseMultiModalModel):
if not success:
break
_, buffer = cv2.imencode(".jpg", frame)
base64_frames.append(
base64.b64encode(buffer).decode("utf-8")
)
base64_frames.append(base64.b64encode(buffer).decode("utf-8"))
video.release()
print(len(base64_frames), "frames read.")
@ -433,9 +430,7 @@ class GPT4VisionAPI(BaseMultiModalModel):
def health_check(self):
"""Health check for the GPT4Vision model"""
try:
response = requests.get(
"https://api.openai.com/v1/engines"
)
response = requests.get("https://api.openai.com/v1/engines")
return response.status_code == 200
except requests.RequestException as error:
print(f"Health check failed: {error}")

@ -203,9 +203,7 @@ class HuggingfaceLLM(AbstractLLM):
results = list(executor.map(self.run, tasks))
return results
def run_batch(
self, tasks_images: List[Tuple[str, str]]
) -> List[str]:
def run_batch(self, tasks_images: List[Tuple[str, str]]) -> List[str]:
"""Process a batch of tasks and images"""
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [

@ -77,9 +77,7 @@ class Idefics(BaseMultiModalModel):
def __init__(
self,
model_name: Optional[
str
] = "HuggingFaceM4/idefics-9b-instruct",
model_name: Optional[str] = "HuggingFaceM4/idefics-9b-instruct",
device: Callable = autodetect_device,
torch_dtype=torch.bfloat16,
max_length: int = 100,

@ -87,8 +87,8 @@ class Kosmos(BaseMultiModalModel):
skip_special_tokens=True,
)[0]
processed_text, entities = (
self.processor.post_process_generation(generated_texts)
processed_text, entities = self.processor.post_process_generation(
generated_texts
)
return processed_text, entities
@ -189,9 +189,7 @@ class Kosmos(BaseMultiModalModel):
)
# draw bbox
# random color
color = tuple(
np.random.randint(0, 255, size=3).tolist()
)
color = tuple(np.random.randint(0, 255, size=3).tolist())
new_image = cv2.rectangle(
new_image,
(orig_x1, orig_y1),
@ -210,9 +208,7 @@ class Kosmos(BaseMultiModalModel):
if (
y1
< text_height
+ text_offset_original
+ 2 * text_spaces
< text_height + text_offset_original + 2 * text_spaces
):
y1 = (
orig_y1

@ -115,13 +115,11 @@ class MedicalSAM:
if len(box_torch.shape) == 2:
box_torch = box_torch[:, None, :]
sparse_embeddings, dense_embeddings = (
self.model.prompt_encoder(
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
points=None,
boxes=box_torch,
masks=None,
)
)
low_res_logits, _ = self.model.mask_decoder(
image_embeddings=img,

@ -74,9 +74,9 @@ class Mistral(AbstractLLM):
"""Run the model on a given task."""
try:
model_inputs = self.tokenizer(
[task], return_tensors="pt"
).to(self.device)
model_inputs = self.tokenizer([task], return_tensors="pt").to(
self.device
)
generated_ids = self.model.generate(
**model_inputs,
max_length=self.max_length,
@ -85,9 +85,7 @@ class Mistral(AbstractLLM):
max_new_tokens=self.max_length,
**kwargs,
)
output_text = self.tokenizer.batch_decode(generated_ids)[
0
]
output_text = self.tokenizer.batch_decode(generated_ids)[0]
return output_text
except Exception as e:
raise ValueError(f"Error running the model: {str(e)}")

@ -146,9 +146,7 @@ class MPT7B:
self, prompts: list, temperature: float = 1.0
) -> list:
"""Batch generate text"""
self.logger.info(
f"Generating text for {len(prompts)} prompts..."
)
self.logger.info(f"Generating text for {len(prompts)} prompts...")
results = []
with torch.autocast("cuda", dtype=torch.bfloat16):
for prompt in prompts:

@ -53,9 +53,7 @@ def _create_retry_decorator(
| retry_if_exception_type(llm.error.APIError)
| retry_if_exception_type(llm.error.APIConnectionError)
| retry_if_exception_type(llm.error.RateLimitError)
| retry_if_exception_type(
llm.error.ServiceUnavailableError
)
| retry_if_exception_type(llm.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
@ -79,9 +77,7 @@ def _async_retry_decorator(embeddings: OpenAIEmbeddings) -> Any:
| retry_if_exception_type(llm.error.APIError)
| retry_if_exception_type(llm.error.APIConnectionError)
| retry_if_exception_type(llm.error.RateLimitError)
| retry_if_exception_type(
llm.error.ServiceUnavailableError
)
| retry_if_exception_type(llm.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
@ -102,15 +98,11 @@ def _check_response(response: dict) -> dict:
if any(len(d["embedding"]) == 1 for d in response["data"]):
import llm
raise llm.error.APIError(
"OpenAI API returned an empty embedding"
)
raise llm.error.APIError("OpenAI API returned an empty embedding")
return response
def embed_with_retry(
embeddings: OpenAIEmbeddings, **kwargs: Any
) -> Any:
def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@ -181,7 +173,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
client: Any = None #: :meta private:
model: str = "text-embedding-ada-002"
deployment: str = model # to support Azure OpenAI Service custom deployment names
deployment: str = (
model # to support Azure OpenAI Service custom deployment names
)
openai_api_version: str | None = None
# to support Azure OpenAI Service custom endpoints
openai_api_base: str | None = None
@ -194,9 +188,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
openai_api_key: str | None = None
openai_organization: str | None = None
allowed_special: Literal["all"] | set[str] = set()
disallowed_special: Literal["all"] | set[str] | Sequence[
str
] = "all"
disallowed_special: Literal["all"] | set[str] | Sequence[str] = "all"
chunk_size: int = 1000
"""Maximum number of texts to embed in each batch"""
max_retries: int = 6
@ -228,9 +220,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(
f"Found {field_name} supplied twice."
)
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
@ -339,9 +329,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
engine: str,
chunk_size: int | None = None,
) -> list[list[float]]:
embeddings: list[list[float]] = [
[] for _ in range(len(texts))
]
embeddings: list[list[float]] = [[] for _ in range(len(texts))]
try:
import tiktoken
except ImportError:
@ -358,8 +346,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning(
"Warning: model not found. Using cl100k_base"
" encoding."
"Warning: model not found. Using cl100k_base" " encoding."
)
model = "cl100k_base"
encoding = tiktoken.get_encoding(model)
@ -374,9 +361,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(
token[j : j + self.embedding_ctx_length]
)
tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i)
batched_embeddings: list[list[float]] = []
@ -402,9 +387,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
r["embedding"] for r in response["data"]
)
results: list[list[list[float]]] = [
[] for _ in range(len(texts))
]
results: list[list[list[float]]] = [[] for _ in range(len(texts))]
num_tokens_in_batch: list[list[int]] = [
[] for _ in range(len(texts))
]
@ -424,9 +407,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
average = np.average(
_result, axis=0, weights=num_tokens_in_batch[i]
)
embeddings[i] = (
average / np.linalg.norm(average)
).tolist()
embeddings[i] = (average / np.linalg.norm(average)).tolist()
return embeddings
@ -439,9 +420,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
engine: str,
chunk_size: int | None = None,
) -> list[list[float]]:
embeddings: list[list[float]] = [
[] for _ in range(len(texts))
]
embeddings: list[list[float]] = [[] for _ in range(len(texts))]
try:
import tiktoken
except ImportError:
@ -458,8 +437,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning(
"Warning: model not found. Using cl100k_base"
" encoding."
"Warning: model not found. Using cl100k_base" " encoding."
)
model = "cl100k_base"
encoding = tiktoken.get_encoding(model)
@ -474,9 +452,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(
token[j : j + self.embedding_ctx_length]
)
tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i)
batched_embeddings: list[list[float]] = []
@ -491,9 +467,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
r["embedding"] for r in response["data"]
)
results: list[list[list[float]]] = [
[] for _ in range(len(texts))
]
results: list[list[list[float]]] = [[] for _ in range(len(texts))]
num_tokens_in_batch: list[list[int]] = [
[] for _ in range(len(texts))
]
@ -515,9 +489,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
average = np.average(
_result, axis=0, weights=num_tokens_in_batch[i]
)
embeddings[i] = (
average / np.linalg.norm(average)
).tolist()
embeddings[i] = (average / np.linalg.norm(average)).tolist()
return embeddings
@ -536,9 +508,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
"""
# NOTE: to keep things simple, we assume the list may contain texts longer
# than the maximum context and use length-safe embedding function.
return self._get_len_safe_embeddings(
texts, engine=self.deployment
)
return self._get_len_safe_embeddings(texts, engine=self.deployment)
async def aembed_documents(
self, texts: list[str], chunk_size: int | None = 0

@ -129,14 +129,9 @@ class GooglePalm(BaseLLM, BaseModel):
values["temperature"] is not None
and not 0 <= values["temperature"] <= 1
):
raise ValueError(
"temperature must be in the range [0.0, 1.0]"
)
raise ValueError("temperature must be in the range [0.0, 1.0]")
if (
values["top_p"] is not None
and not 0 <= values["top_p"] <= 1
):
if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
raise ValueError("top_p must be in the range [0.0, 1.0]")
if values["top_k"] is not None and values["top_k"] <= 0:
@ -146,9 +141,7 @@ class GooglePalm(BaseLLM, BaseModel):
values["max_output_tokens"] is not None
and values["max_output_tokens"] <= 0
):
raise ValueError(
"max_output_tokens must be greater than zero"
)
raise ValueError("max_output_tokens must be greater than zero")
return values
@ -177,12 +170,8 @@ class GooglePalm(BaseLLM, BaseModel):
prompt_generations = []
for candidate in completion.candidates:
raw_text = candidate["output"]
stripped_text = _strip_erroneous_leading_spaces(
raw_text
)
prompt_generations.append(
Generation(text=stripped_text)
)
stripped_text = _strip_erroneous_leading_spaces(raw_text)
prompt_generations.append(Generation(text=stripped_text))
generations.append(prompt_generations)
return LLMResult(generations=generations)

@ -139,6 +139,4 @@ class QwenVLMultiModal(BaseMultiModalModel):
)
return response, history
except Exception as e:
raise Exception(
"An error occurred during the chat."
) from e
raise Exception("An error occurred during the chat.") from e

@ -143,9 +143,7 @@ class SamplingParams:
self.logprobs = logprobs
self.prompt_logprobs = prompt_logprobs
self.skip_special_tokens = skip_special_tokens
self.spaces_between_special_tokens = (
spaces_between_special_tokens
)
self.spaces_between_special_tokens = spaces_between_special_tokens
self.logits_processors = logits_processors
self.include_stop_str_in_output = include_stop_str_in_output
self._verify_args()
@ -189,31 +187,23 @@ class SamplingParams:
f" {self.temperature}."
)
if not 0.0 < self.top_p <= 1.0:
raise ValueError(
f"top_p must be in (0, 1], got {self.top_p}."
)
raise ValueError(f"top_p must be in (0, 1], got {self.top_p}.")
if self.top_k < -1 or self.top_k == 0:
raise ValueError(
"top_k must be -1 (disable), or at least 1, "
f"got {self.top_k}."
)
if not 0.0 <= self.min_p <= 1.0:
raise ValueError(
f"min_p must be in [0, 1], got {self.min_p}."
)
raise ValueError(f"min_p must be in [0, 1], got {self.min_p}.")
if self.max_tokens is not None and self.max_tokens < 1:
raise ValueError(
"max_tokens must be at least 1, got"
f" {self.max_tokens}."
"max_tokens must be at least 1, got" f" {self.max_tokens}."
)
if self.logprobs is not None and self.logprobs < 0:
raise ValueError(
f"logprobs must be non-negative, got {self.logprobs}."
)
if (
self.prompt_logprobs is not None
and self.prompt_logprobs < 0
):
if self.prompt_logprobs is not None and self.prompt_logprobs < 0:
raise ValueError(
"prompt_logprobs must be non-negative, got "
f"{self.prompt_logprobs}."
@ -230,13 +220,9 @@ class SamplingParams:
"temperature must be 0 when using beam search."
)
if self.top_p < 1.0 - _SAMPLING_EPS:
raise ValueError(
"top_p must be 1 when using beam search."
)
raise ValueError("top_p must be 1 when using beam search.")
if self.top_k != -1:
raise ValueError(
"top_k must be -1 when using beam search."
)
raise ValueError("top_k must be -1 when using beam search.")
if self.early_stopping not in [True, False, "never"]:
raise ValueError(
"early_stopping must be True, False, or 'never', "

@ -88,15 +88,11 @@ class SpeechT5:
self.model_name = model_name
self.vocoder_name = vocoder_name
self.dataset_name = dataset_name
self.processor = SpeechT5Processor.from_pretrained(
self.model_name
)
self.processor = SpeechT5Processor.from_pretrained(self.model_name)
self.model = SpeechT5ForTextToSpeech.from_pretrained(
self.model_name
)
self.vocoder = SpeechT5HifiGan.from_pretrained(
self.vocoder_name
)
self.vocoder = SpeechT5HifiGan.from_pretrained(self.vocoder_name)
self.embeddings_dataset = load_dataset(
self.dataset_name, split="validation"
)
@ -121,9 +117,7 @@ class SpeechT5:
def set_model(self, model_name: str):
"""Set the model to a new model."""
self.model_name = model_name
self.processor = SpeechT5Processor.from_pretrained(
self.model_name
)
self.processor = SpeechT5Processor.from_pretrained(self.model_name)
self.model = SpeechT5ForTextToSpeech.from_pretrained(
self.model_name
)
@ -131,9 +125,7 @@ class SpeechT5:
def set_vocoder(self, vocoder_name):
"""Set the vocoder to a new vocoder."""
self.vocoder_name = vocoder_name
self.vocoder = SpeechT5HifiGan.from_pretrained(
self.vocoder_name
)
self.vocoder = SpeechT5HifiGan.from_pretrained(self.vocoder_name)
def set_embeddings_dataset(self, dataset_name):
"""Set the embeddings dataset to a new dataset."""

@ -127,9 +127,7 @@ class SSD1B:
if task in self.cache:
return self.cache[task]
try:
img = self.pipe(
prompt=task, neg_prompt=neg_prompt
).images[0]
img = self.pipe(prompt=task, neg_prompt=neg_prompt).images[0]
# Generate a unique filename for the image
img_name = f"{uuid.uuid4()}.{self.image_format}"
@ -223,9 +221,7 @@ class SSD1B:
executor.submit(self, task): task for task in tasks
}
results = []
for future in concurrent.futures.as_completed(
future_to_task
):
for future in concurrent.futures.as_completed(future_to_task):
task = future_to_task[future]
try:
img = future.result()
@ -272,9 +268,7 @@ class SSD1B:
"""Str method for the SSD1B class"""
return f"SSD1B(image_url={self.image_url})"
@backoff.on_exception(
backoff.expo, Exception, max_tries=max_retries
)
@backoff.on_exception(backoff.expo, Exception, max_tries=max_retries)
def rate_limited_call(self, task: str):
"""Rate limited call to the SSD1B API"""
return self.__call__(task)

@ -120,9 +120,7 @@ class TogetherLLM(AbstractLLM):
out = response.json()
content = (
out["choices"][0]
.get("message", {})
.get("content", None)
out["choices"][0].get("message", {}).get("content", None)
)
if self.streaming_enabled:
content = self.stream_response(content)

@ -15,9 +15,7 @@ class UltralyticsModel(BaseMultiModalModel):
**kwargs: Arbitrary keyword arguments.
"""
def __init__(
self, model_name: str = "yolov8n.pt", *args, **kwargs
):
def __init__(self, model_name: str = "yolov8n.pt", *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_name = model_name

@ -78,9 +78,7 @@ class WizardLLMStoryTeller:
bnb_config = BitsAndBytesConfig(**quantization_config)
try:
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_id
)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id, quantization_config=bnb_config
)

@ -78,9 +78,7 @@ class YarnMistral128:
bnb_config = BitsAndBytesConfig(**quantization_config)
try:
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_id
)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.model = AutoModelForCausalLM.from_pretrained(
self.model_id,
quantization_config=bnb_config,

@ -87,9 +87,7 @@ class Yi34B200k:
top_k=self.top_k,
top_p=self.top_p,
)
return self.tokenizer.decode(
outputs[0], skip_special_tokens=True
)
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
# # Example usage

@ -91,8 +91,7 @@ def format_vision_prompt(objective, previous_action):
"""
if previous_action:
previous_action = (
"Here was the previous action you took:"
f" {previous_action}"
"Here was the previous action you took:" f" {previous_action}"
)
else:
previous_action = ""

@ -19,7 +19,7 @@ from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
)
from swarms.structs.conversation import Conversation
from swarms.tools.tool import BaseTool
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.tools.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.data_to_text import data_to_text
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
@ -326,9 +326,7 @@ class Agent:
)
else:
tools_prompt = tool_usage_worker_prompt(
tools=self.tools
)
tools_prompt = tool_usage_worker_prompt(tools=self.tools)
# Append the tools prompt to the short_term_memory
self.short_memory.add(
@ -354,9 +352,7 @@ class Agent:
f"{self.agent_name}.log",
level="INFO",
colorize=True,
format=(
"<green>{time}</green> <level>{message}</level>"
),
format=("<green>{time}</green> <level>{message}</level>"),
backtrace=True,
diagnose=True,
)
@ -403,9 +399,7 @@ class Agent:
self.llm.temperature = 0.7
except Exception as error:
print(
colored(
f"Error dynamically changing temperature: {error}"
)
colored(f"Error dynamically changing temperature: {error}")
)
def format_prompt(self, template, **kwargs: Any) -> str:
@ -418,24 +412,16 @@ class Agent:
logger.info(f"Adding task to memory: {task}")
self.short_memory.add(f"{self.user_name}: {task}")
except Exception as error:
print(
colored(
f"Error adding task to memory: {error}", "red"
)
)
print(colored(f"Error adding task to memory: {error}", "red"))
def add_message_to_memory(self, message: str):
"""Add the message to the memory"""
try:
logger.info(f"Adding message to memory: {message}")
self.short_memory.add(
role=self.agent_name, content=message
)
self.short_memory.add(role=self.agent_name, content=message)
except Exception as error:
print(
colored(
f"Error adding message to memory: {error}", "red"
)
colored(f"Error adding message to memory: {error}", "red")
)
def add_message_to_memory_and_truncate(self, message: str):
@ -549,9 +535,7 @@ class Agent:
history = [f"{user_name}: {task}"]
return history
def _dynamic_prompt_setup(
self, dynamic_prompt: str, task: str
) -> str:
def _dynamic_prompt_setup(self, dynamic_prompt: str, task: str) -> str:
"""_dynamic_prompt_setup summary
Args:
@ -561,9 +545,7 @@ class Agent:
Returns:
str: _description_
"""
dynamic_prompt = (
dynamic_prompt or self.construct_dynamic_prompt()
)
dynamic_prompt = dynamic_prompt or self.construct_dynamic_prompt()
combined_prompt = f"{dynamic_prompt}\n{task}"
return combined_prompt
@ -581,9 +563,7 @@ class Agent:
self.activate_autonomous_agent()
if task:
self.short_memory.add(
role=self.user_name, content=task
)
self.short_memory.add(role=self.user_name, content=task)
loop_count = 0
response = None
@ -600,9 +580,7 @@ class Agent:
if self.dynamic_temperature_enabled:
self.dynamic_temperature()
task_prompt = (
self.short_memory.return_history_as_string()
)
task_prompt = self.short_memory.return_history_as_string()
attempt = 0
success = False
@ -621,9 +599,7 @@ class Agent:
if self.tools:
# Extract code from markdown
response = extract_code_from_markdown(
response
)
response = extract_code_from_markdown(response)
# Execute the tool by name
execute_tool_by_name(
@ -634,15 +610,13 @@ class Agent:
if self.code_interpreter:
# Extract code from markdown
extracted_code = (
extract_code_from_markdown(response)
extracted_code = extract_code_from_markdown(
response
)
# Execute the code
# execution = execute_command(extracted_code)
execution = CodeExecutor().run(
extracted_code
)
execution = CodeExecutor().run(extracted_code)
# Add the execution to the memory
self.short_memory.add(
@ -658,9 +632,7 @@ class Agent:
)
if self.evaluator:
evaluated_response = self.evaluator(
response
)
evaluated_response = self.evaluator(response)
print(
"Evaluated Response:"
f" {evaluated_response}"
@ -672,9 +644,7 @@ class Agent:
# Sentiment analysis
if self.sentiment_analyzer:
sentiment = self.sentiment_analyzer(
response
)
sentiment = self.sentiment_analyzer(response)
print(f"Sentiment: {sentiment}")
if sentiment > self.sentiment_threshold:
@ -726,9 +696,8 @@ class Agent:
and self._check_stopping_condition(response)
):
break
elif (
self.stopping_func is not None
and self.stopping_func(response)
elif self.stopping_func is not None and self.stopping_func(
response
):
break
@ -826,9 +795,7 @@ class Agent:
context = f"""
System: This reminds you of these events from your past: [{ltr}]
"""
return self.short_memory.add(
role=self.agent_name, content=context
)
return self.short_memory.add(role=self.agent_name, content=context)
def add_memory(self, message: str):
"""Add a memory to the agent
@ -840,9 +807,7 @@ class Agent:
_type_: _description_
"""
logger.info(f"Adding memory: {message}")
return self.short_memory.add(
role=self.agent_name, content=message
)
return self.short_memory.add(role=self.agent_name, content=message)
async def run_concurrent(self, tasks: List[str], **kwargs):
"""
@ -889,9 +854,7 @@ class Agent:
json.dump(self.short_memory, f)
# print(f"Saved agent history to {file_path}")
except Exception as error:
print(
colored(f"Error saving agent history: {error}", "red")
)
print(colored(f"Error saving agent history: {error}", "red"))
def load(self, file_path: str):
"""
@ -916,23 +879,11 @@ class Agent:
Prints the entire history and memory of the agent.
Each message is colored and formatted for better readability.
"""
print(colored("Agent History and Memory", "cyan", attrs=["bold"]))
print(colored("========================", "cyan", attrs=["bold"]))
for loop_index, history in enumerate(self.short_memory, start=1):
print(
colored(
"Agent History and Memory", "cyan", attrs=["bold"]
)
)
print(
colored(
"========================", "cyan", attrs=["bold"]
)
)
for loop_index, history in enumerate(
self.short_memory, start=1
):
print(
colored(
f"\nLoop {loop_index}:", "yellow", attrs=["bold"]
)
colored(f"\nLoop {loop_index}:", "yellow", attrs=["bold"])
)
for message in history:
speaker, _, message_text = message.partition(": ")
@ -943,8 +894,7 @@ class Agent:
)
else:
print(
colored(f"{speaker}:", "blue")
+ f" {message_text}"
colored(f"{speaker}:", "blue") + f" {message_text}"
)
print(colored("------------------------", "cyan"))
print(colored("End of Agent History", "cyan", attrs=["bold"]))
@ -975,9 +925,7 @@ class Agent:
self.short_memory.add(
role=self.agent_name, content=response
)
self.short_memory.add(
role=self.user_name, content=task
)
self.short_memory.add(role=self.user_name, content=task)
else:
self.short_memory.add(
role=self.agent_name, content=response
@ -1054,9 +1002,7 @@ class Agent:
Apply the response filters to the response
"""
logger.info(
f"Applying response filters to response: {response}"
)
logger.info(f"Applying response filters to response: {response}")
for word in self.response_filters:
response = response.replace(word, "[FILTERED]")
return response
@ -1096,9 +1042,7 @@ class Agent:
with open(file_path, "w") as f:
yaml.dump(self.__dict__, f)
except Exception as error:
print(
colored(f"Error saving agent to YAML: {error}", "red")
)
print(colored(f"Error saving agent to YAML: {error}", "red"))
def save_state(self, file_path: str) -> None:
"""
@ -1126,9 +1070,7 @@ class Agent:
"retry_interval": self.retry_interval,
"interactive": self.interactive,
"dashboard": self.dashboard,
"dynamic_temperature": (
self.dynamic_temperature_enabled
),
"dynamic_temperature": (self.dynamic_temperature_enabled),
"autosave": self.autosave,
"saved_state_path": self.saved_state_path,
"max_loops": self.max_loops,
@ -1137,14 +1079,10 @@ class Agent:
with open(file_path, "w") as f:
json.dump(state, f, indent=4)
saved = colored(
f"Saved agent state to: {file_path}", "green"
)
saved = colored(f"Saved agent state to: {file_path}", "green")
print(saved)
except Exception as error:
print(
colored(f"Error saving agent state: {error}", "red")
)
print(colored(f"Error saving agent state: {error}", "red"))
def state_to_str(self):
"""Transform the JSON into a string"""
@ -1163,9 +1101,7 @@ class Agent:
"retry_interval": self.retry_interval,
"interactive": self.interactive,
"dashboard": self.dashboard,
"dynamic_temperature": (
self.dynamic_temperature_enabled
),
"dynamic_temperature": (self.dynamic_temperature_enabled),
"autosave": self.autosave,
"saved_state_path": self.saved_state_path,
"max_loops": self.max_loops,
@ -1214,9 +1150,7 @@ class Agent:
print(f"Agent state loaded from {file_path}")
except Exception as error:
print(
colored(f"Error loading agent state: {error}", "red")
)
print(colored(f"Error loading agent state: {error}", "red"))
def retry_on_failure(
self,
@ -1232,9 +1166,7 @@ class Agent:
try:
return function()
except Exception as error:
logging.error(
f"Error generating response: {error}"
)
logging.error(f"Error generating response: {error}")
attempt += 1
time.sleep(retry_delay)
raise Exception("All retry attempts failed")
@ -1320,9 +1252,7 @@ class Agent:
for doc in docs:
data = data_to_text(doc)
return self.short_memory.add(
role=self.user_name, content=data
)
return self.short_memory.add(role=self.user_name, content=data)
except Exception as error:
print(colored(f"Error ingesting docs: {error}", "red"))
@ -1338,9 +1268,7 @@ class Agent:
try:
logger.info(f"Ingesting pdf: {pdf}")
text = pdf_to_text(pdf)
return self.short_memory.add(
role=self.user_name, content=text
)
return self.short_memory.add(role=self.user_name, content=text)
except Exception as error:
print(colored(f"Error ingesting pdf: {error}", "red"))
@ -1361,11 +1289,7 @@ class Agent:
message = f"{agent_name}: {message}"
return self.run(message, *args, **kwargs)
except Exception as error:
print(
colored(
f"Error sending agent message: {error}", "red"
)
)
print(colored(f"Error sending agent message: {error}", "red"))
def truncate_history(self):
"""
@ -1407,9 +1331,7 @@ class Agent:
for file in files:
text = data_to_text(file)
return self.short_memory.add(
role=self.user_name, content=text
)
return self.short_memory.add(role=self.user_name, content=text)
except Exception as error:
print(
colored(

@ -117,9 +117,7 @@ class AgentRearrange(BaseSwarm):
return None
task_to_run = specific_tasks.get(dest_agent_name, task)
if self.custom_prompt:
out = dest_agent.run(
f"{task_to_run} {self.custom_prompt}"
)
out = dest_agent.run(f"{task_to_run} {self.custom_prompt}")
else:
out = dest_agent.run(f"{task_to_run} (from {source})")
return out
@ -138,9 +136,7 @@ class AgentRearrange(BaseSwarm):
results.append(result)
else:
for destination in destinations:
task = specific_tasks.get(
destination, default_task
)
task = specific_tasks.get(destination, default_task)
destination_agent = self.self_find_agent_by_name(
destination
)
@ -156,9 +152,7 @@ class AgentRearrange(BaseSwarm):
**specific_tasks,
):
self.flows.clear() # Reset previous flows
results = self.process_flows(
pattern, default_task, specific_tasks
)
results = self.process_flows(pattern, default_task, specific_tasks)
return results

@ -67,9 +67,7 @@ class AsyncWorkflow:
except Exception as error:
logger.error(f"[ERROR][AsyncWorkflow] {error}")
async def delete(
self, task: Any = None, tasks: List[Task] = None
):
async def delete(self, task: Any = None, tasks: List[Task] = None):
"""Delete a task from the workflow"""
try:
if task:

@ -140,9 +140,7 @@ class AutoSwarmRouter(BaseSwarm):
if self.name in self.swarm_dict:
# If a match is found then send the task to the swarm
out = self.swarm_dict[self.name].run(
task, *args, **kwargs
)
out = self.swarm_dict[self.name].run(task, *args, **kwargs)
if self.custom_postprocess:
# If custom postprocess function is provided then run it
@ -151,9 +149,7 @@ class AutoSwarmRouter(BaseSwarm):
return out
# If no match is found then return None
raise ValueError(
f"Swarm with name {self.name} not found."
)
raise ValueError(f"Swarm with name {self.name} not found.")
except Exception as e:
logger.error(f"Error: {e}")
raise e

@ -155,9 +155,7 @@ class AutoScaler(BaseStructure):
for _ in range(new_agents_counts):
self.agents_pool.append(self.agents[0]())
except Exception as error:
print(
f"Error scaling up: {error} try again with a new task"
)
print(f"Error scaling up: {error} try again with a new task")
def scale_down(self):
"""scale down"""
@ -169,13 +167,10 @@ class AutoScaler(BaseStructure):
del self.agents_pool[-1] # remove last agent
except Exception as error:
print(
f"Error scaling down: {error} try again with a new"
" task"
f"Error scaling down: {error} try again with a new" " task"
)
def run(
self, agent_id, task: Optional[str] = None, *args, **kwargs
):
def run(self, agent_id, task: Optional[str] = None, *args, **kwargs):
"""Run agent the task on the agent id
Args:
@ -203,11 +198,7 @@ class AutoScaler(BaseStructure):
sleep(60) # check minute
pending_tasks = self.task_queue.qsize()
active_agents = sum(
[
1
for agent in self.agents_pool
if agent.is_busy()
]
[1 for agent in self.agents_pool if agent.is_busy()]
)
if (
@ -246,17 +237,13 @@ class AutoScaler(BaseStructure):
if available_agent:
available_agent.run(task)
except Exception as error:
print(
f"Error starting: {error} try again with a new task"
)
print(f"Error starting: {error} try again with a new task")
def check_agent_health(self):
"""Checks the health of each agent and replaces unhealthy agents."""
for i, agent in enumerate(self.agents_pool):
if not agent.is_healthy():
logging.warning(
f"Replacing unhealthy agent at index {i}"
)
logging.warning(f"Replacing unhealthy agent at index {i}")
self.agents_pool[i] = self.agent()
def balance_load(self):
@ -273,9 +260,7 @@ class AutoScaler(BaseStructure):
" task"
)
def set_scaling_strategy(
self, strategy: Callable[[int, int], int]
):
def set_scaling_strategy(self, strategy: Callable[[int, int], int]):
"""Set a custom scaling strategy."""
self.custom_scale_strategy = strategy

@ -187,9 +187,7 @@ class BaseStructure(BaseModel):
async def run_async(self, *args, **kwargs):
"""Run the structure asynchronously."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None, self.run, *args, **kwargs
)
return await loop.run_in_executor(None, self.run, *args, **kwargs)
async def save_metadata_async(self, metadata: Dict[str, Any]):
"""Save metadata to file asynchronously.
@ -222,9 +220,7 @@ class BaseStructure(BaseModel):
None, self.log_error, error_message
)
async def save_artifact_async(
self, artifact: Any, artifact_name: str
):
async def save_artifact_async(self, artifact: Any, artifact_name: str):
"""Save artifact to file asynchronously.
Args:
@ -266,9 +262,7 @@ class BaseStructure(BaseModel):
None, self.log_event, event, event_type
)
async def asave_to_file(
self, data: Any, file: str, *args, **kwargs
):
async def asave_to_file(self, data: Any, file: str, *args, **kwargs):
"""Save data to file asynchronously.
Args:
@ -357,8 +351,7 @@ class BaseStructure(BaseModel):
"""
with ThreadPoolExecutor(max_workers=batch_size) as executor:
futures = [
executor.submit(self.run, data)
for data in batched_data
executor.submit(self.run, data) for data in batched_data
]
return [future.result() for future in futures]
@ -418,9 +411,7 @@ class BaseStructure(BaseModel):
_type_: _description_
"""
self.monitor_resources()
return self.run_batched(
batched_data, batch_size, *args, **kwargs
)
return self.run_batched(batched_data, batch_size, *args, **kwargs)
# x = BaseStructure()

@ -361,9 +361,7 @@ class BaseSwarm(ABC):
task (Optional[str], optional): _description_. Defaults to None.
"""
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
self.arun(task, *args, **kwargs)
)
result = loop.run_until_complete(self.arun(task, *args, **kwargs))
return result
def run_batch_async(self, tasks: List[str], *args, **kwargs):
@ -533,9 +531,7 @@ class BaseSwarm(ABC):
Agent: Instance of Agent representing the retrieved Agent, or None if not found.
"""
def join_swarm(
self, from_entity: Agent | Agent, to_entity: Agent
):
def join_swarm(self, from_entity: Agent | Agent, to_entity: Agent):
"""
Add a relationship between a Swarm and an Agent or other Swarm to the registry.

@ -68,9 +68,7 @@ class BaseWorkflow(BaseStructure):
elif tasks:
self.task_pool.extend(tasks)
else:
raise ValueError(
"You must provide a task or a list of tasks"
)
raise ValueError("You must provide a task or a list of tasks")
def add_agent(self, agent: Agent, *args, **kwargs):
return self.agent_pool(agent)
@ -122,23 +120,17 @@ class BaseWorkflow(BaseStructure):
Dict[str, Any]: The results of each task in the workflow
"""
try:
return {
task.description: task.result for task in self.tasks
}
return {task.description: task.result for task in self.tasks}
except Exception as error:
print(
colored(
f"Error getting task results: {error}", "red"
),
colored(f"Error getting task results: {error}", "red"),
)
def remove_task(self, task: str) -> None:
"""Remove tasks from sequential workflow"""
try:
self.tasks = [
task
for task in self.tasks
if task.description != task
task for task in self.tasks if task.description != task
]
except Exception as error:
print(
@ -177,9 +169,7 @@ class BaseWorkflow(BaseStructure):
task.kwargs.update(updates)
break
else:
raise ValueError(
f"Task {task} not found in workflow."
)
raise ValueError(f"Task {task} not found in workflow.")
except Exception as error:
print(
colored(
@ -214,9 +204,7 @@ class BaseWorkflow(BaseStructure):
self.tasks.remove(task)
break
else:
raise ValueError(
f"Task {task} not found in workflow."
)
raise ValueError(f"Task {task} not found in workflow.")
except Exception as error:
print(
colored(
@ -299,9 +287,7 @@ class BaseWorkflow(BaseStructure):
)
)
def load_workflow_state(
self, filepath: str = None, **kwargs
) -> None:
def load_workflow_state(self, filepath: str = None, **kwargs) -> None:
"""
Loads the workflow state from a json file and restores the workflow state.

@ -102,15 +102,11 @@ class BlocksList(BaseStructure):
return [block for block in self.blocks if block.id == id]
def get_by_parent(self, parent: str):
return [
block for block in self.blocks if block.parent == parent
]
return [block for block in self.blocks if block.parent == parent]
def get_by_parent_id(self, parent_id: str):
return [
block
for block in self.blocks
if block.parent_id == parent_id
block for block in self.blocks if block.parent_id == parent_id
]
def get_by_parent_name(self, parent_name: str):

@ -16,9 +16,7 @@ class Company:
shared_instructions: str = None
ceo: Optional[Agent] = None
agents: List[Agent] = field(default_factory=list)
agent_interactions: Dict[str, List[str]] = field(
default_factory=dict
)
agent_interactions: Dict[str, List[str]] = field(default_factory=dict)
history: Conversation = field(default_factory=Conversation)
def __post_init__(self):
@ -46,9 +44,7 @@ class Company:
self.agents.append(agent)
except Exception as error:
logger.error(
f"[ERROR][CLASS: Company][METHOD: add] {error}"
)
logger.error(f"[ERROR][CLASS: Company][METHOD: add] {error}")
raise error
def get(self, agent_name: str) -> Agent:
@ -73,9 +69,7 @@ class Company:
" company."
)
except Exception as error:
logger.error(
f"[ERROR][CLASS: Company][METHOD: get] {error}"
)
logger.error(f"[ERROR][CLASS: Company][METHOD: get] {error}")
raise error
def remove(self, agent: Agent) -> None:
@ -118,9 +112,7 @@ class Company:
elif isinstance(node, list):
for agent in node:
if not isinstance(agent, Agent):
raise ValueError(
"Invalid agent in org chart"
)
raise ValueError("Invalid agent in org chart")
self.add(agent)
for i, agent in enumerate(node):
@ -153,9 +145,7 @@ class Company:
"""
if agent1.ai_name not in self.agents_interactions:
self.agents_interactions[agent1.ai_name] = []
self.agents_interactions[agent1.ai_name].append(
agent2.ai_name
)
self.agents_interactions[agent1.ai_name].append(agent2.ai_name)
def run(self):
"""

@ -35,9 +35,7 @@ class ConcurrentWorkflow(BaseStructure):
max_loops: int = 1
max_workers: int = 5
autosave: bool = False
saved_state_filepath: Optional[str] = (
"runs/concurrent_workflow.json"
)
saved_state_filepath: Optional[str] = "runs/concurrent_workflow.json"
print_results: bool = False
return_results: bool = False
use_processes: bool = False
@ -89,9 +87,7 @@ class ConcurrentWorkflow(BaseStructure):
}
results = []
for future in concurrent.futures.as_completed(
futures
):
for future in concurrent.futures.as_completed(futures):
task = futures[future]
try:
result = future.result()

@ -339,9 +339,7 @@ class Conversation(BaseStructure):
def update_from_database(self, *args, **kwargs):
"""Update the conversation history from the database"""
self.database.update(
"conversation", self.conversation_history
)
self.database.update("conversation", self.conversation_history)
def get_from_database(self, *args, **kwargs):
"""Get the conversation history from the database"""

@ -140,9 +140,7 @@ class Debate:
self.affirmative.system_prompt(
self.save_file["player_meta_prompt"]
)
self.negative.system_prompt(
self.save_file["player_meta_prompt"]
)
self.negative.system_prompt(self.save_file["player_meta_prompt"])
self.moderator.system_prompt(
self.save_file["moderator_meta_prompt"]
)
@ -191,14 +189,10 @@ class Debate:
def save_file_to_json(self, id):
now = datetime.now()
current_time = now.strftime("%Y-%m-%d_%H:%M:%S")
save_file_path = os.path.join(
self.save_file_dir, f"{id}.json"
)
save_file_path = os.path.join(self.save_file_dir, f"{id}.json")
self.save_file["end_time"] = current_time
json_str = json.dumps(
self.save_file, ensure_ascii=False, indent=4
)
json_str = json.dumps(self.save_file, ensure_ascii=False, indent=4)
with open(save_file_path, "w") as f:
f.write(json_str)

@ -126,15 +126,11 @@ class GraphWorkflow(BaseStructure):
if from_node in self.graph:
for condition_value, to_node in edge_dict.items():
if to_node in self.graph:
self.graph[from_node]["edges"][
to_node
] = condition
self.graph[from_node]["edges"][to_node] = condition
else:
raise ValueError("Node does not exist in graph")
else:
raise ValueError(
f"Node {from_node} does not exist in graph"
)
raise ValueError(f"Node {from_node} does not exist in graph")
def run(self):
"""
@ -160,9 +156,7 @@ class GraphWorkflow(BaseStructure):
ValueError: _description_
"""
if node_name not in self.graph:
raise ValueError(
f"Node {node_name} does not exist in graph"
)
raise ValueError(f"Node {node_name} does not exist in graph")
def _check_nodes_exist(self, from_node, to_node):
"""

@ -51,8 +51,7 @@ class GroupChat:
def next_agent(self, agent: Agent) -> Agent:
"""Return the next agent in the list."""
return self.agents[
(self.agent_names.index(agent.name) + 1)
% len(self.agents)
(self.agent_names.index(agent.name) + 1) % len(self.agents)
]
def select_speaker_msg(self):
@ -122,9 +121,7 @@ class GroupChat:
"""
formatted_messages = []
for message in messages:
formatted_message = (
f"'{message['role']}:{message['content']}"
)
formatted_message = f"'{message['role']}:{message['content']}"
formatted_messages.append(formatted_message)
return "\n".join(formatted_messages)

@ -165,9 +165,7 @@ class MajorityVoting:
# If autosave is enabled, save the conversation to a file
if self.autosave:
create_file(
str(self.conversation), "majority_voting.json"
)
create_file(str(self.conversation), "majority_voting.json")
# Log the agents
logger.info("Initializing majority voting system")
@ -224,9 +222,7 @@ class MajorityVoting:
# If an output parser is provided, parse the responses
if self.output_parser is not None:
majority_vote = self.output_parser(
responses, *args, **kwargs
)
majority_vote = self.output_parser(responses, *args, **kwargs)
else:
majority_vote = majority_voting(responses)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save