pull/439/head
Kye 9 months ago
parent 4fbd2281f4
commit d572cafce6

@ -147,7 +147,6 @@ nav:
- Foundational Structures:
- Agent: "swarms/structs/agent.md"
- basestructure: "swarms/structs/basestructure.md"
- artifactupload: "swarms/structs/artifactupload.md"
- taskinput: "swarms/structs/taskinput.md"
- stepinput: "swarms/structs/stepinput.md"
- artifact: "swarms/structs/artifact.md"

@ -1,17 +1,8 @@
![Swarming banner icon](images/swarmslogobanner.png)
<div align="center">
# Swarms
Orchestrate swarms of agents for production-grade applications.
[![GitHub issues](https://img.shields.io/github/issues/kyegomez/swarms)](https://github.com/kyegomez/swarms/issues) [![GitHub forks](https://img.shields.io/github/forks/kyegomez/swarms)](https://github.com/kyegomez/swarms/network) [![GitHub stars](https://img.shields.io/github/stars/kyegomez/swarms)](https://github.com/kyegomez/swarms/stargazers) [![GitHub license](https://img.shields.io/github/license/kyegomez/swarms)](https://github.com/kyegomez/swarms/blob/main/LICENSE)[![GitHub star chart](https://img.shields.io/github/stars/kyegomez/swarms?style=social)](https://star-history.com/#kyegomez/swarms)[![Dependency Status](https://img.shields.io/librariesio/github/kyegomez/swarms)](https://libraries.io/github/kyegomez/swarms) [![Downloads](https://static.pepy.tech/badge/swarms/month)](https://pepy.tech/project/swarms)
[![Join the Agora discord](https://img.shields.io/discord/1110910277110743103?label=Discord&logo=discord&logoColor=white&style=plastic&color=d7b023)![Share on Twitter](https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Share%20%40kyegomez/swarms)](https://twitter.com/intent/tweet?text=Check%20out%20this%20amazing%20AI%20project:%20&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms) [![Share on Facebook](https://img.shields.io/badge/Share-%20facebook-blue)](https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms) [![Share on LinkedIn](https://img.shields.io/badge/Share-%20linkedin-blue)](https://www.linkedin.com/shareArticle?mini=true&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&title=&summary=&source=)
[![Share on Reddit](https://img.shields.io/badge/-Share%20on%20Reddit-orange)](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&title=Swarms%20-%20the%20future%20of%20AI) [![Share on Hacker News](https://img.shields.io/badge/-Share%20on%20Hacker%20News-orange)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&t=Swarms%20-%20the%20future%20of%20AI) [![Share on Pinterest](https://img.shields.io/badge/-Share%20on%20Pinterest-red)](https://pinterest.com/pin/create/button/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&media=https%3A%2F%2Fexample.com%2Fimage.jpg&description=Swarms%20-%20the%20future%20of%20AI) [![Share on WhatsApp](https://img.shields.io/badge/-Share%20on%20WhatsApp-green)](https://api.whatsapp.com/send?text=Check%20out%20Swarms%20-%20the%20future%20of%20AI%20%23swarms%20%23AI%0A%0Ahttps%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms)
</div>
Individual agents face five significant challenges that hinder their deployment in production: short memory, single-task threading, hallucinations, high cost, and lack of collaboration. Multi-agent collaboration offers a solution to all these issues. Swarms provides simple, reliable, and agile tools to create your own Swarm tailored to your specific needs. Currently, Swarms is being used in production by RBC, John Deere, and many AI startups. For more information on the unparalleled benefits of multi-agent collaboration, check out this GitHub repository for research papers or schedule a call with me!
----

@ -20,7 +20,9 @@ agent = Agent(
)
# Run the Agent on a task
out = agent.run("Generate a transcript for a youtube video on what swarms are!")
out = agent.run(
"Generate a transcript for a youtube video on what swarms are!"
)
print(out)
# Save the state

@ -0,0 +1,41 @@
from swarms import Anthropic, Agent, SequentialWorkflow
# Initialize the language model agent (e.g., GPT-3)
llm = Anthropic()
# Initialize agents for individual tasks
agent1 = Agent(
agent_name="Blog generator", llm=llm, max_loops=1, dashboard=False
)
agent2 = Agent(
agent_name="summarizer", llm=llm, max_loops=1, dashboard=False
)
# Create the Sequential workflow
workflow = SequentialWorkflow(
max_loops=1, objective="Create a full blog and then summarize it"
)
# Add tasks to the workflow
workflow.add(
"Generate a 10,000 word blog on health and wellness.", agent1
) # this task will be executed task,
workflow.add(
"Summarize the generated blog", agent2
) # then the next agent will accomplish this task
# Run the workflow
out = workflow.run()
print(f"{out}")

@ -81,4 +81,4 @@ __all__ = [
"VideoModality",
"TogetherLLM",
"Vilt",
]
]

@ -2,4 +2,4 @@ from langchain_community.embeddings.openai import OpenAIEmbeddings
__all__ = [
"OpenAIEmbeddings",
]
]

@ -2,4 +2,4 @@ from langchain_community.llms.google_palm import GooglePalm
__all__ = [
"GooglePalm",
]
]

@ -11,35 +11,54 @@ from langchain.llms.openai import OpenAI # , OpenAIChat, AzureOpenAI
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
from langchain.llms.replicate import Replicate
class AnthropicChat(Anthropic):
def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class CohereChat(Cohere):
def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class MosaicMLChat(MosaicML):
def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class OpenAILLM(OpenAI):
def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class ReplicateChat(Replicate):
def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class AzureOpenAILLM(AzureChatOpenAI):
def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class OpenAIChatLLM(OpenAIChat):
def __init__(self, *args, **kwargs):
@ -48,7 +67,13 @@ class OpenAIChatLLM(OpenAIChat):
def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
class OctoAIChat(OctoAIEndpoint):
def __call__(self, *args, **kwargs):
return self.invoke(*args, **kwargs)
def run(self, *args, **kwargs):
return self.invoke(*args, **kwargs)

@ -69,6 +69,12 @@ class Conversation(BaseStructure):
save_filepath: str = None,
tokenizer: Any = None,
context_length: int = 8192,
rules: str = None,
custom_rules_prompt: str = None,
user: str = "User:",
auto_save: bool = True,
save_as_yaml: bool = True,
save_as_json: bool = False,
*args,
**kwargs,
):
@ -81,11 +87,23 @@ class Conversation(BaseStructure):
self.conversation_history = []
self.tokenizer = tokenizer
self.context_length = context_length
self.rules = rules
self.custom_rules_prompt = custom_rules_prompt
self.user = user
self.auto_save = auto_save
self.save_as_yaml = save_as_yaml
self.save_as_json = save_as_json
# If system prompt is not None, add it to the conversation history
if self.system_prompt is not None:
self.add("System: ", self.system_prompt)
if self.rules is not None:
self.add(user, rules)
if custom_rules_prompt is not None:
self.add(user, custom_rules_prompt)
# If tokenizer then truncate
if tokenizer is not None:
self.truncate_memory_with_tokenizer()

@ -1,203 +1,134 @@
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from termcolor import colored
# from swarms.utils.logger import logger
from dataclasses import dataclass, field
from typing import List, Optional
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.structs.task import Task
from swarms.utils.loguru_logger import logger
from swarms.utils.try_except_wrapper import try_except_wrapper
# SequentialWorkflow class definition using dataclasses
@dataclass
class SequentialWorkflow:
"""
SequentialWorkflow class for running a sequence of task_pool using N number of autonomous agents.
Args:
max_loops (int): The maximum number of times to run the workflow.
dashboard (bool): Whether to display the dashboard for the workflow.
Attributes:
task_pool (List[Task]): The list of task_pool to execute.
max_loops (int): The maximum number of times to run the workflow.
dashboard (bool): Whether to display the dashboard for the workflow.
Examples:
>>> from swarms.models import OpenAIChat
>>> from swarms.structs import SequentialWorkflow
>>> llm = OpenAIChat(openai_api_key="")
>>> workflow = SequentialWorkflow(max_loops=1)
>>> workflow.add("What's the weather in miami", llm)
>>> workflow.add("Create a report on these metrics", llm)
>>> workflow.run()
>>> workflow.task_pool
"""
name: str = None
name: str = "Sequential Workflow"
description: str = None
task_pool: List[Task] = None
objective: str = None
max_loops: int = 1
autosave: bool = False
saved_state_filepath: Optional[str] = "sequential_workflow_state.json"
restore_state_filepath: Optional[str] = None
dashboard: bool = False
agents: List[Agent] = None
agent_pool: List[Agent] = field(default_factory=list)
# task_pool: List[str] = field(
# default_factory=list
# ) # List to store tasks
def __post_init__(self):
self.conversation = Conversation(
system_prompt=f"Objective: {self.description}",
time_enabled=True,
autosave=True,
)
# Logging
logger.info("Number of agents activated:")
if self.agents:
logger.info(f"Agents: {len(self.agents)}")
else:
logger.info("No agents activated.")
if self.task_pool:
logger.info(f"Task Pool Size: {len(self.task_pool)}")
else:
logger.info("Task Pool is empty.")
def add(
self,
task: Optional[Task] = None,
tasks: Optional[List[Task]] = None,
*args,
**kwargs,
) -> None:
"""
Add a task to the workflow.
Args:
agent (Union[Callable, Agent]): The model or agent to execute the task.
task (str): The task description or the initial input for the Agent.
*args: Additional arguments to pass to the task execution.
**kwargs: Additional keyword arguments to pass to the task execution.
"""
for agent in self.agents:
out = agent(str(self.description))
self.conversation.add(agent.agent_name, out)
prompt = self.conversation.return_history_as_string()
out = agent(prompt)
return out
# If objective exists then set it
if self.objective is not None:
self.conversation.system_prompt = self.objective
def workflow_bootup(self):
logger.info(f"{self.name} is activating...")
for agent in self.agent_pool:
logger.info(f"Agent {agent.agent_name} Activated")
@try_except_wrapper
def add(self, task: str, agent: Agent, *args, **kwargs):
self.agent_pool.append(agent)
# self.task_pool.append(
# task
# ) # Store tasks corresponding to each agent
return self.conversation.add(
role=agent.agent_name, content=task, *args, **kwargs
)
def reset_workflow(self) -> None:
"""Resets the workflow by clearing the results of each task."""
try:
for task in self.task_pool:
task.result = None
logger.info(
f"[INFO][SequentialWorkflow] Reset task {task} in"
" workflow"
)
except Exception as error:
logger.error(
colored(f"Error resetting workflow: {error}", "red"),
)
def get_task_results(self) -> Dict[str, Any]:
"""
Returns the results of each task in the workflow.
Returns:
Dict[str, Any]: The results of each task in the workflow
"""
try:
return {
task.description: task.result for task in self.task_pool
}
except Exception as error:
logger.error(
colored(f"Error getting task results: {error}", "red"),
)
def remove_task(self, task: Task) -> None:
"""Remove task_pool from sequential workflow"""
try:
self.task_pool.remove(task)
logger.info(
f"[INFO][SequentialWorkflow] Removed task {task} from"
" workflow"
)
except Exception as error:
logger.error(
colored(
f"Error removing task from workflow: {error}",
"red",
),
)
def run(self) -> None:
"""
Run the workflow.
Raises:
ValueError: If an Agent instance is used as a task and the 'task' argument is not provided.
"""
self.conversation = {}
# @try_except_wrapper
# WITH TASK POOL
# def run(self):
# if not self.agent_pool:
# raise ValueError("No agents have been added to the workflow.")
# self.workflow_bootup()
# loops = 0
# prompt = None # Initialize prompt to None; will be updated with the output of each agent
# while loops < self.max_loops:
# for i, agent in enumerate(self.agent_pool):
# task = (
# self.task_pool[i] if prompt is None else prompt
# ) # Use initial task or the output from the previous agent
# logger.info(
# f"Agent: {agent.agent_name} {i+1} is executing the task"
# )
# logger.info("\n")
# output = agent.run(task)
# if output is None:
# logger.error(
# f"Agent {i+1} returned None for task: {task}"
# )
# raise ValueError(f"Agent {i+1} returned None.")
# self.conversation.add(agent.agent_name, output)
# prompt = output # Update prompt with current agent's output to pass to the next agent
# logger.info(f"Prompt: {prompt}")
# loops += 1
# return self.conversation.return_history_as_string()
@try_except_wrapper
def run(self):
if not self.agent_pool:
raise ValueError("No agents have been added to the workflow.")
self.workflow_bootup()
loops = 0
while loops < self.max_loops:
for i, agent in enumerate(self.agents):
logger.info(f"Agent {i+1} is executing the task.")
out = agent(self.description)
self.conversation.add(agent.agent_name, str(out))
prompt = self.conversation.return_history_as_string()
print(prompt)
print("Next agent...........")
out = agent(prompt)
return out
# try:
# self.workflow_bootup()
# loops = 0
# while loops < self.max_loops:
# for i in range(len(self.task_pool)):
# task = self.task_pool[i]
# # Check if the current task can be executed
# if task.result is None:
# # Get the inputs for the current task
# task.context(task)
# result = task.execute()
# # Pass the inputs to the next task
# if i < len(self.task_pool) - 1:
# next_task = self.task_pool[i + 1]
# next_task.description = result
# # Execute the current task
# task.execute()
# # Autosave the workflow state
# if self.autosave:
# self.save_workflow_state(
# "sequential_workflow_state.json"
# )
# self.workflow_shutdown()
# loops += 1
# except Exception as e:
# logger.error(
# colored(
# (
# "Error initializing the Sequential workflow:"
# f" {e} try optimizing your inputs like the"
# " agent class and task description"
# ),
# "red",
# attrs=["bold", "underline"],
# )
# )
previous_output = None # Initialize to None; will hold the output of the previous agent
for i, agent in enumerate(self.agent_pool):
# Fetch the last task specific to this agent from the conversation history
tasks_for_agent = [
msg["content"]
for msg in self.conversation.conversation_history
if msg["role"] == agent.agent_name
]
task = tasks_for_agent[-1] if tasks_for_agent else None
if task is None and previous_output is not None:
# If no specific task for this agent, use the output from the previous agent
task = previous_output
if task is None:
# If no initial task is found, and there's no previous output, log error and skip this agent
logger.error(
f"No initial task found for agent {agent.agent_name}, and no previous output to use."
)
continue
logger.info(
f" \n Agent {i+1} ({agent.agent_name}) is executing the task: {task} \n"
)
# Space the log
output = agent.run(task)
if output is None:
logger.error(
f"Agent {agent.agent_name} returned None for task: {task}"
)
raise ValueError(
f"Agent {agent.agent_name} returned None."
)
# Update the conversation history with the new output using agent's role
self.conversation.add(
role=agent.agent_name, content=output
)
previous_output = output # Update the previous_output to pass to the next agent
loops += 1
return self.conversation.return_history_as_string()

@ -1,4 +1,4 @@
from swarms.utils.logger import logger
from swarms.utils.loguru_logger import logger
def try_except_wrapper(func, verbose: bool = False):

Loading…
Cancel
Save