pull/439/head
Kye 9 months ago
parent 4fbd2281f4
commit d572cafce6

@ -147,7 +147,6 @@ nav:
- Foundational Structures: - Foundational Structures:
- Agent: "swarms/structs/agent.md" - Agent: "swarms/structs/agent.md"
- basestructure: "swarms/structs/basestructure.md" - basestructure: "swarms/structs/basestructure.md"
- artifactupload: "swarms/structs/artifactupload.md"
- taskinput: "swarms/structs/taskinput.md" - taskinput: "swarms/structs/taskinput.md"
- stepinput: "swarms/structs/stepinput.md" - stepinput: "swarms/structs/stepinput.md"
- artifact: "swarms/structs/artifact.md" - artifact: "swarms/structs/artifact.md"

@ -1,17 +1,8 @@
![Swarming banner icon](images/swarmslogobanner.png)
<div align="center"> # Swarms
Orchestrate swarms of agents for production-grade applications. Orchestrate swarms of agents for production-grade applications.
[![GitHub issues](https://img.shields.io/github/issues/kyegomez/swarms)](https://github.com/kyegomez/swarms/issues) [![GitHub forks](https://img.shields.io/github/forks/kyegomez/swarms)](https://github.com/kyegomez/swarms/network) [![GitHub stars](https://img.shields.io/github/stars/kyegomez/swarms)](https://github.com/kyegomez/swarms/stargazers) [![GitHub license](https://img.shields.io/github/license/kyegomez/swarms)](https://github.com/kyegomez/swarms/blob/main/LICENSE)[![GitHub star chart](https://img.shields.io/github/stars/kyegomez/swarms?style=social)](https://star-history.com/#kyegomez/swarms)[![Dependency Status](https://img.shields.io/librariesio/github/kyegomez/swarms)](https://libraries.io/github/kyegomez/swarms) [![Downloads](https://static.pepy.tech/badge/swarms/month)](https://pepy.tech/project/swarms)
[![Join the Agora discord](https://img.shields.io/discord/1110910277110743103?label=Discord&logo=discord&logoColor=white&style=plastic&color=d7b023)![Share on Twitter](https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Share%20%40kyegomez/swarms)](https://twitter.com/intent/tweet?text=Check%20out%20this%20amazing%20AI%20project:%20&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms) [![Share on Facebook](https://img.shields.io/badge/Share-%20facebook-blue)](https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms) [![Share on LinkedIn](https://img.shields.io/badge/Share-%20linkedin-blue)](https://www.linkedin.com/shareArticle?mini=true&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&title=&summary=&source=)
[![Share on Reddit](https://img.shields.io/badge/-Share%20on%20Reddit-orange)](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&title=Swarms%20-%20the%20future%20of%20AI) [![Share on Hacker News](https://img.shields.io/badge/-Share%20on%20Hacker%20News-orange)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&t=Swarms%20-%20the%20future%20of%20AI) [![Share on Pinterest](https://img.shields.io/badge/-Share%20on%20Pinterest-red)](https://pinterest.com/pin/create/button/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&media=https%3A%2F%2Fexample.com%2Fimage.jpg&description=Swarms%20-%20the%20future%20of%20AI) [![Share on WhatsApp](https://img.shields.io/badge/-Share%20on%20WhatsApp-green)](https://api.whatsapp.com/send?text=Check%20out%20Swarms%20-%20the%20future%20of%20AI%20%23swarms%20%23AI%0A%0Ahttps%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms)
</div>
Individual agents face five significant challenges that hinder their deployment in production: short memory, single-task threading, hallucinations, high cost, and lack of collaboration. Multi-agent collaboration offers a solution to all these issues. Swarms provides simple, reliable, and agile tools to create your own Swarm tailored to your specific needs. Currently, Swarms is being used in production by RBC, John Deere, and many AI startups. For more information on the unparalleled benefits of multi-agent collaboration, check out this GitHub repository for research papers or schedule a call with me! Individual agents face five significant challenges that hinder their deployment in production: short memory, single-task threading, hallucinations, high cost, and lack of collaboration. Multi-agent collaboration offers a solution to all these issues. Swarms provides simple, reliable, and agile tools to create your own Swarm tailored to your specific needs. Currently, Swarms is being used in production by RBC, John Deere, and many AI startups. For more information on the unparalleled benefits of multi-agent collaboration, check out this GitHub repository for research papers or schedule a call with me!
---- ----

@ -20,7 +20,9 @@ agent = Agent(
) )
# Run the Agent on a task # Run the Agent on a task
out = agent.run("Generate a transcript for a youtube video on what swarms are!") out = agent.run(
"Generate a transcript for a youtube video on what swarms are!"
)
print(out) print(out)
# Save the state # Save the state

@ -0,0 +1,41 @@
from swarms import Anthropic, Agent, SequentialWorkflow
# Initialize the language model agent (e.g., GPT-3)
llm = Anthropic()
# Initialize agents for individual tasks
agent1 = Agent(
agent_name="Blog generator", llm=llm, max_loops=1, dashboard=False
)
agent2 = Agent(
agent_name="summarizer", llm=llm, max_loops=1, dashboard=False
)
# Create the Sequential workflow
workflow = SequentialWorkflow(
max_loops=1, objective="Create a full blog and then summarize it"
)
# Add tasks to the workflow
workflow.add(
"Generate a 10,000 word blog on health and wellness.", agent1
) # this task will be executed task,
workflow.add(
"Summarize the generated blog", agent2
) # then the next agent will accomplish this task
# Run the workflow
out = workflow.run()
print(f"{out}")

@ -81,4 +81,4 @@ __all__ = [
"VideoModality", "VideoModality",
"TogetherLLM", "TogetherLLM",
"Vilt", "Vilt",
] ]

@ -2,4 +2,4 @@ from langchain_community.embeddings.openai import OpenAIEmbeddings
__all__ = [ __all__ = [
"OpenAIEmbeddings", "OpenAIEmbeddings",
] ]

@ -2,4 +2,4 @@ from langchain_community.llms.google_palm import GooglePalm
__all__ = [ __all__ = [
"GooglePalm", "GooglePalm",
] ]

@ -11,35 +11,54 @@ from langchain.llms.openai import OpenAI # , OpenAIChat, AzureOpenAI
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
from langchain.llms.replicate import Replicate from langchain.llms.replicate import Replicate
class AnthropicChat(Anthropic): class AnthropicChat(Anthropic):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs) return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class CohereChat(Cohere): class CohereChat(Cohere):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs) return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class MosaicMLChat(MosaicML): class MosaicMLChat(MosaicML):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs) return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class OpenAILLM(OpenAI): class OpenAILLM(OpenAI):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs) return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class ReplicateChat(Replicate): class ReplicateChat(Replicate):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs) return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class AzureOpenAILLM(AzureChatOpenAI): class AzureOpenAILLM(AzureChatOpenAI):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs) return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class OpenAIChatLLM(OpenAIChat): class OpenAIChatLLM(OpenAIChat):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
@ -48,7 +67,13 @@ class OpenAIChatLLM(OpenAIChat):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs) return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class OctoAIChat(OctoAIEndpoint): class OctoAIChat(OctoAIEndpoint):
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs) return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)

@ -69,6 +69,12 @@ class Conversation(BaseStructure):
save_filepath: str = None, save_filepath: str = None,
tokenizer: Any = None, tokenizer: Any = None,
context_length: int = 8192, context_length: int = 8192,
rules: str = None,
custom_rules_prompt: str = None,
user: str = "User:",
auto_save: bool = True,
save_as_yaml: bool = True,
save_as_json: bool = False,
*args, *args,
**kwargs, **kwargs,
): ):
@ -81,11 +87,23 @@ class Conversation(BaseStructure):
self.conversation_history = [] self.conversation_history = []
self.tokenizer = tokenizer self.tokenizer = tokenizer
self.context_length = context_length self.context_length = context_length
self.rules = rules
self.custom_rules_prompt = custom_rules_prompt
self.user = user
self.auto_save = auto_save
self.save_as_yaml = save_as_yaml
self.save_as_json = save_as_json
# If system prompt is not None, add it to the conversation history # If system prompt is not None, add it to the conversation history
if self.system_prompt is not None: if self.system_prompt is not None:
self.add("System: ", self.system_prompt) self.add("System: ", self.system_prompt)
if self.rules is not None:
self.add(user, rules)
if custom_rules_prompt is not None:
self.add(user, custom_rules_prompt)
# If tokenizer then truncate # If tokenizer then truncate
if tokenizer is not None: if tokenizer is not None:
self.truncate_memory_with_tokenizer() self.truncate_memory_with_tokenizer()

@ -1,203 +1,134 @@
from dataclasses import dataclass from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional from typing import List, Optional
from termcolor import colored
# from swarms.utils.logger import logger
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.structs.task import Task
from swarms.utils.loguru_logger import logger from swarms.utils.loguru_logger import logger
from swarms.utils.try_except_wrapper import try_except_wrapper
# SequentialWorkflow class definition using dataclasses
@dataclass @dataclass
class SequentialWorkflow: class SequentialWorkflow:
""" name: str = "Sequential Workflow"
SequentialWorkflow class for running a sequence of task_pool using N number of autonomous agents.
Args:
max_loops (int): The maximum number of times to run the workflow.
dashboard (bool): Whether to display the dashboard for the workflow.
Attributes:
task_pool (List[Task]): The list of task_pool to execute.
max_loops (int): The maximum number of times to run the workflow.
dashboard (bool): Whether to display the dashboard for the workflow.
Examples:
>>> from swarms.models import OpenAIChat
>>> from swarms.structs import SequentialWorkflow
>>> llm = OpenAIChat(openai_api_key="")
>>> workflow = SequentialWorkflow(max_loops=1)
>>> workflow.add("What's the weather in miami", llm)
>>> workflow.add("Create a report on these metrics", llm)
>>> workflow.run()
>>> workflow.task_pool
"""
name: str = None
description: str = None description: str = None
task_pool: List[Task] = None objective: str = None
max_loops: int = 1 max_loops: int = 1
autosave: bool = False autosave: bool = False
saved_state_filepath: Optional[str] = "sequential_workflow_state.json" saved_state_filepath: Optional[str] = "sequential_workflow_state.json"
restore_state_filepath: Optional[str] = None restore_state_filepath: Optional[str] = None
dashboard: bool = False dashboard: bool = False
agents: List[Agent] = None agent_pool: List[Agent] = field(default_factory=list)
# task_pool: List[str] = field(
# default_factory=list
# ) # List to store tasks
def __post_init__(self): def __post_init__(self):
self.conversation = Conversation( self.conversation = Conversation(
system_prompt=f"Objective: {self.description}",
time_enabled=True, time_enabled=True,
autosave=True, autosave=True,
) )
# Logging # If objective exists then set it
logger.info("Number of agents activated:") if self.objective is not None:
if self.agents: self.conversation.system_prompt = self.objective
logger.info(f"Agents: {len(self.agents)}")
else: def workflow_bootup(self):
logger.info("No agents activated.") logger.info(f"{self.name} is activating...")
if self.task_pool: for agent in self.agent_pool:
logger.info(f"Task Pool Size: {len(self.task_pool)}") logger.info(f"Agent {agent.agent_name} Activated")
else:
logger.info("Task Pool is empty.") @try_except_wrapper
def add(self, task: str, agent: Agent, *args, **kwargs):
def add( self.agent_pool.append(agent)
self, # self.task_pool.append(
task: Optional[Task] = None, # task
tasks: Optional[List[Task]] = None, # ) # Store tasks corresponding to each agent
*args,
**kwargs, return self.conversation.add(
) -> None: role=agent.agent_name, content=task, *args, **kwargs
""" )
Add a task to the workflow.
Args:
agent (Union[Callable, Agent]): The model or agent to execute the task.
task (str): The task description or the initial input for the Agent.
*args: Additional arguments to pass to the task execution.
**kwargs: Additional keyword arguments to pass to the task execution.
"""
for agent in self.agents:
out = agent(str(self.description))
self.conversation.add(agent.agent_name, out)
prompt = self.conversation.return_history_as_string()
out = agent(prompt)
return out
def reset_workflow(self) -> None: def reset_workflow(self) -> None:
"""Resets the workflow by clearing the results of each task.""" self.conversation = {}
try:
for task in self.task_pool: # @try_except_wrapper
task.result = None # WITH TASK POOL
logger.info( # def run(self):
f"[INFO][SequentialWorkflow] Reset task {task} in" # if not self.agent_pool:
" workflow" # raise ValueError("No agents have been added to the workflow.")
)
except Exception as error: # self.workflow_bootup()
logger.error( # loops = 0
colored(f"Error resetting workflow: {error}", "red"), # prompt = None # Initialize prompt to None; will be updated with the output of each agent
) # while loops < self.max_loops:
# for i, agent in enumerate(self.agent_pool):
def get_task_results(self) -> Dict[str, Any]: # task = (
""" # self.task_pool[i] if prompt is None else prompt
Returns the results of each task in the workflow. # ) # Use initial task or the output from the previous agent
# logger.info(
Returns: # f"Agent: {agent.agent_name} {i+1} is executing the task"
Dict[str, Any]: The results of each task in the workflow # )
""" # logger.info("\n")
try: # output = agent.run(task)
return { # if output is None:
task.description: task.result for task in self.task_pool # logger.error(
} # f"Agent {i+1} returned None for task: {task}"
except Exception as error: # )
logger.error( # raise ValueError(f"Agent {i+1} returned None.")
colored(f"Error getting task results: {error}", "red"), # self.conversation.add(agent.agent_name, output)
) # prompt = output # Update prompt with current agent's output to pass to the next agent
# logger.info(f"Prompt: {prompt}")
def remove_task(self, task: Task) -> None: # loops += 1
"""Remove task_pool from sequential workflow""" # return self.conversation.return_history_as_string()
try: @try_except_wrapper
self.task_pool.remove(task) def run(self):
logger.info( if not self.agent_pool:
f"[INFO][SequentialWorkflow] Removed task {task} from" raise ValueError("No agents have been added to the workflow.")
" workflow"
)
except Exception as error:
logger.error(
colored(
f"Error removing task from workflow: {error}",
"red",
),
)
def run(self) -> None:
"""
Run the workflow.
Raises:
ValueError: If an Agent instance is used as a task and the 'task' argument is not provided.
"""
self.workflow_bootup() self.workflow_bootup()
loops = 0 loops = 0
while loops < self.max_loops: while loops < self.max_loops:
for i, agent in enumerate(self.agents): previous_output = None # Initialize to None; will hold the output of the previous agent
logger.info(f"Agent {i+1} is executing the task.") for i, agent in enumerate(self.agent_pool):
out = agent(self.description) # Fetch the last task specific to this agent from the conversation history
self.conversation.add(agent.agent_name, str(out)) tasks_for_agent = [
prompt = self.conversation.return_history_as_string() msg["content"]
print(prompt) for msg in self.conversation.conversation_history
print("Next agent...........") if msg["role"] == agent.agent_name
out = agent(prompt) ]
task = tasks_for_agent[-1] if tasks_for_agent else None
return out
# try: if task is None and previous_output is not None:
# self.workflow_bootup() # If no specific task for this agent, use the output from the previous agent
# loops = 0 task = previous_output
# while loops < self.max_loops:
# for i in range(len(self.task_pool)): if task is None:
# task = self.task_pool[i] # If no initial task is found, and there's no previous output, log error and skip this agent
# # Check if the current task can be executed logger.error(
# if task.result is None: f"No initial task found for agent {agent.agent_name}, and no previous output to use."
# # Get the inputs for the current task )
# task.context(task) continue
# result = task.execute() logger.info(
f" \n Agent {i+1} ({agent.agent_name}) is executing the task: {task} \n"
# # Pass the inputs to the next task )
# if i < len(self.task_pool) - 1:
# next_task = self.task_pool[i + 1] # Space the log
# next_task.description = result
output = agent.run(task)
# # Execute the current task if output is None:
# task.execute() logger.error(
f"Agent {agent.agent_name} returned None for task: {task}"
# # Autosave the workflow state )
# if self.autosave: raise ValueError(
# self.save_workflow_state( f"Agent {agent.agent_name} returned None."
# "sequential_workflow_state.json" )
# )
# Update the conversation history with the new output using agent's role
# self.workflow_shutdown() self.conversation.add(
# loops += 1 role=agent.agent_name, content=output
# except Exception as e: )
# logger.error( previous_output = output # Update the previous_output to pass to the next agent
# colored(
# ( loops += 1
# "Error initializing the Sequential workflow:" return self.conversation.return_history_as_string()
# f" {e} try optimizing your inputs like the"
# " agent class and task description"
# ),
# "red",
# attrs=["bold", "underline"],
# )
# )

@ -1,4 +1,4 @@
from swarms.utils.logger import logger from swarms.utils.loguru_logger import logger
def try_except_wrapper(func, verbose: bool = False): def try_except_wrapper(func, verbose: bool = False):

Loading…
Cancel
Save