parent
035e5b81e6
commit
ad58b388ea
@ -0,0 +1,17 @@
|
|||||||
|
# Available Models
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
| Model Name | Description | Input Price | Output Price | Use Cases |
|
||||||
|
|-----------------------|---------------------------------------------------------------------------------------------------------|--------------|--------------|------------------------------------------------------------------------|
|
||||||
|
| **Llama3-70b** | Llama 3 is an auto-regressive language model that uses an optimized transformer architecture. | $0.80/1M Tokens | $1.60/1M Tokens | General natural language processing tasks. |
|
||||||
|
| **Llava-Internlm2-20b** | LLaVA model fine-tuned from InternLM2-Chat-20B and CLIP-ViT-Large-patch14-336. | Contact for pricing | Contact for pricing | Enhanced language understanding integrated with visual processing. |
|
||||||
|
| **Llama-3-Giraffe-70B** | Abacus.AI presents our longer-necked variant of Llama 3 70B! | $1/1M Tokens | $2/1M Tokens | Extensive natural language tasks with a focus on depth and efficiency. |
|
||||||
|
| **Qwen-vl** | Qwen VL for real-world multi-modal function calling. | $5/1M Tokens | $10/1M Tokens | Multi-modal interactions and function handling in complex environments.|
|
||||||
|
| **XComposer2-4khd-7b** | One of the highest performing VLMs (Video Language Models). | $4/1M Tokens | $8/1M Tokens | High-resolution video processing and understanding. |
|
||||||
|
| **Llava-Llama-3** | Llama3 with Multi-Modal Processing. | $5/1M Tokens | $10/1M Tokens | Advanced multi-modal scenarios involving language and image processing. |
|
||||||
|
| **cogvlm-chat-17b** | Groundbreaking multimodal model designed to understand and reason about visual elements in images. | $5/1M Tokens | $10/1M Tokens | Image-based chatbots and interactive systems. |
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## What models should we add?
|
||||||
|
[Book a call with us to learn more about your needs:](https://calendly.com/swarm-corp/30min)
|
@ -0,0 +1,68 @@
|
|||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models.popular_llms import Anthropic
|
||||||
|
from swarms.tools.openai_tool_creator_decorator import tool
|
||||||
|
|
||||||
|
|
||||||
|
# Importing the search API tool
|
||||||
|
@tool
|
||||||
|
def search_api(query: str) -> str:
|
||||||
|
"""
|
||||||
|
This tool searches the web for information about COVID-19 symptoms.
|
||||||
|
"""
|
||||||
|
return f"Search API tool called with query: {query}"
|
||||||
|
|
||||||
|
|
||||||
|
print(search_api("COVID-19 symptoms"))
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize the schema for the person's information
|
||||||
|
class Schema(BaseModel):
|
||||||
|
name: str = Field(..., title="Name of the person")
|
||||||
|
agent: int = Field(..., title="Age of the person")
|
||||||
|
is_student: bool = Field(..., title="Whether the person is a student")
|
||||||
|
courses: list[str] = Field(
|
||||||
|
..., title="List of courses the person is taking"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Convert the schema to a JSON string
|
||||||
|
tool_schema = Schema(
|
||||||
|
name="Tool Name",
|
||||||
|
agent=1,
|
||||||
|
is_student=True,
|
||||||
|
courses=["Course1", "Course2"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define the task to generate a person's information
|
||||||
|
task = "Generate a person's information based on the following schema:"
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="WeatherMan Agent",
|
||||||
|
# Set the tool schema to the JSON string -- this is the key difference
|
||||||
|
tool_schema=tool_schema,
|
||||||
|
llm=Anthropic(),
|
||||||
|
max_loops=3,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
tools=[], # or list of tools
|
||||||
|
verbose=True,
|
||||||
|
interactive=True,
|
||||||
|
# Set the output type to the tool schema which is a BaseModel
|
||||||
|
output_type=tool_schema, # or dict, or str
|
||||||
|
metadata_output_type="json",
|
||||||
|
# List of schemas that the agent can handle
|
||||||
|
list_tool_schemas=[tool_schema],
|
||||||
|
function_calling_format_type="OpenAI",
|
||||||
|
function_calling_type="json", # or soon yaml
|
||||||
|
execute_tool=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the agent to generate the person's information
|
||||||
|
generated_data = agent.run(task)
|
||||||
|
|
||||||
|
# Print the generated data
|
||||||
|
print(f"Generated data: {generated_data}")
|
@ -1,41 +1,33 @@
|
|||||||
from swarms import Anthropic, Agent, SequentialWorkflow
|
from swarms import Agent, SequentialWorkflow, Anthropic
|
||||||
|
|
||||||
|
|
||||||
# Initialize the language model agent (e.g., GPT-3)
|
# Initialize the language model agent (e.g., GPT-3)
|
||||||
|
|
||||||
llm = Anthropic()
|
llm = Anthropic()
|
||||||
|
|
||||||
|
|
||||||
# Initialize agents for individual tasks
|
# Initialize agents for individual tasks
|
||||||
|
|
||||||
agent1 = Agent(
|
agent1 = Agent(
|
||||||
agent_name="Blog generator", llm=llm, max_loops=1, dashboard=False
|
agent_name="Blog generator",
|
||||||
|
system_prompt="Generate a blog post like stephen king",
|
||||||
|
llm=llm,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
tools=[],
|
||||||
)
|
)
|
||||||
|
|
||||||
agent2 = Agent(
|
agent2 = Agent(
|
||||||
agent_name="summarizer", llm=llm, max_loops=1, dashboard=False
|
agent_name="summarizer",
|
||||||
|
system_prompt="Sumamrize the blog post",
|
||||||
|
llm=llm,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
tools=[],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# Create the Sequential workflow
|
# Create the Sequential workflow
|
||||||
|
|
||||||
workflow = SequentialWorkflow(
|
workflow = SequentialWorkflow(
|
||||||
max_loops=1, objective="Create a full blog and then summarize it"
|
agents=[agent1, agent2], max_loops=1, verbose=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# Add tasks to the workflow
|
|
||||||
|
|
||||||
workflow.add(
|
|
||||||
"Generate a 10,000 word blog on health and wellness.", agent1
|
|
||||||
) # this task will be executed task,
|
|
||||||
|
|
||||||
workflow.add(
|
|
||||||
"Summarize the generated blog", agent2
|
|
||||||
) # then the next agent will accomplish this task
|
|
||||||
|
|
||||||
|
|
||||||
# Run the workflow
|
# Run the workflow
|
||||||
|
workflow.run(
|
||||||
out = workflow.run()
|
"Generate a blog post on how swarms of agents can help businesses grow."
|
||||||
print(f"{out}")
|
)
|
||||||
|
@ -1,214 +0,0 @@
|
|||||||
import json
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from swarms.tools.tool import BaseTool
|
|
||||||
|
|
||||||
FINISH_NAME = "finish"
|
|
||||||
|
|
||||||
|
|
||||||
class SchemaGenerator:
|
|
||||||
"""A class for generating custom prompt strings.
|
|
||||||
|
|
||||||
Does this based on constraints, commands, resources, and performance evaluations.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
constraints (List[str]): A list of constraints.
|
|
||||||
commands (List[BaseTool]): A list of commands.
|
|
||||||
resources (List[str]): A list of resources.
|
|
||||||
performance_evaluation (List[str]): A list of performance evaluations.
|
|
||||||
response_format (dict): A dictionary of the response format.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> schema_generator = SchemaGenerator()
|
|
||||||
>>> schema_generator.add_constraint("No user assistance")
|
|
||||||
>>> schema_generator.add_resource("Internet access for searches and information gathering.")
|
|
||||||
>>> schema_generator.add_performance_evaluation("Continuously review and analyze your actions to ensure you are performing to the best of your abilities.")
|
|
||||||
>>> prompt_string = schema_generator.generate_prompt_string()
|
|
||||||
>>> print(prompt_string)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self) -> None:
|
|
||||||
"""Initialize the SchemaGenerator object.
|
|
||||||
|
|
||||||
Starts with empty lists of constraints, commands, resources,
|
|
||||||
and performance evaluations.
|
|
||||||
"""
|
|
||||||
self.constraints: List[str] = []
|
|
||||||
self.commands: List[BaseTool] = []
|
|
||||||
self.resources: List[str] = []
|
|
||||||
self.performance_evaluation: List[str] = []
|
|
||||||
self.response_format = {
|
|
||||||
"thoughts": {
|
|
||||||
"text": "thought",
|
|
||||||
"reasoning": "reasoning",
|
|
||||||
"plan": (
|
|
||||||
"- short bulleted\n- list that conveys\n-"
|
|
||||||
" long-term plan"
|
|
||||||
),
|
|
||||||
"criticism": "constructive self-criticism",
|
|
||||||
"speak": "thoughts summary to say to user",
|
|
||||||
},
|
|
||||||
"command": {
|
|
||||||
"name": "command name",
|
|
||||||
"args": {"arg name": "value"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def add_constraint(self, constraint: str) -> None:
|
|
||||||
"""
|
|
||||||
Add a constraint to the constraints list.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
constraint (str): The constraint to be added.
|
|
||||||
"""
|
|
||||||
self.constraints.append(constraint)
|
|
||||||
|
|
||||||
def add_tool(self, tool: BaseTool) -> None:
|
|
||||||
self.commands.append(tool)
|
|
||||||
|
|
||||||
def _generate_command_string(self, tool: BaseTool) -> str:
|
|
||||||
output = f"{tool.name}: {tool.description}"
|
|
||||||
output += f", args json schema: {json.dumps(tool.args)}"
|
|
||||||
return output
|
|
||||||
|
|
||||||
def add_resource(self, resource: str) -> None:
|
|
||||||
"""
|
|
||||||
Add a resource to the resources list.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
resource (str): The resource to be added.
|
|
||||||
"""
|
|
||||||
self.resources.append(resource)
|
|
||||||
|
|
||||||
def add_performance_evaluation(self, evaluation: str) -> None:
|
|
||||||
"""
|
|
||||||
Add a performance evaluation item to the performance_evaluation list.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
evaluation (str): The evaluation item to be added.
|
|
||||||
"""
|
|
||||||
self.performance_evaluation.append(evaluation)
|
|
||||||
|
|
||||||
def _generate_numbered_list(
|
|
||||||
self, items: list, item_type: str = "list"
|
|
||||||
) -> str:
|
|
||||||
"""
|
|
||||||
Generate a numbered list from given items based on the item_type.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
items (list): A list of items to be numbered.
|
|
||||||
item_type (str, optional): The type of items in the list.
|
|
||||||
Defaults to 'list'.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The formatted numbered list.
|
|
||||||
"""
|
|
||||||
if item_type == "command":
|
|
||||||
command_strings = [
|
|
||||||
f"{i + 1}. {self._generate_command_string(item)}"
|
|
||||||
for i, item in enumerate(items)
|
|
||||||
]
|
|
||||||
finish_description = (
|
|
||||||
"use this to signal that you have finished all your"
|
|
||||||
" objectives"
|
|
||||||
)
|
|
||||||
finish_args = (
|
|
||||||
'"response": "final response to let '
|
|
||||||
'people know you have finished your objectives"'
|
|
||||||
)
|
|
||||||
finish_string = (
|
|
||||||
f"{len(items) + 1}. {FINISH_NAME}: "
|
|
||||||
f"{finish_description}, args: {finish_args}"
|
|
||||||
)
|
|
||||||
return "\n".join(command_strings + [finish_string])
|
|
||||||
else:
|
|
||||||
return "\n".join(
|
|
||||||
f"{i+1}. {item}" for i, item in enumerate(items)
|
|
||||||
)
|
|
||||||
|
|
||||||
def generate_prompt_string(self) -> str:
|
|
||||||
"""Generate a prompt string.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The generated prompt string.
|
|
||||||
"""
|
|
||||||
formatted_response_format = json.dumps(
|
|
||||||
self.response_format, indent=4
|
|
||||||
)
|
|
||||||
prompt_string = (
|
|
||||||
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\nCommands:\n{self._generate_numbered_list(self.commands, item_type='command')}\n\nResources:\n{self._generate_numbered_list(self.resources)}\n\nPerformance"
|
|
||||||
f" Evaluation:\n{self._generate_numbered_list(self.performance_evaluation)}\n\nYou"
|
|
||||||
" should only respond in JSON format as described below"
|
|
||||||
" \nResponse Format:"
|
|
||||||
f" \n{formatted_response_format} \nEnsure the response"
|
|
||||||
" can be parsed by Python json.loads"
|
|
||||||
)
|
|
||||||
|
|
||||||
return prompt_string
|
|
||||||
|
|
||||||
|
|
||||||
def get_prompt(tools: List[BaseTool]) -> str:
|
|
||||||
"""Generates a prompt string.
|
|
||||||
|
|
||||||
It includes various constraints, commands, resources, and performance evaluations.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The generated prompt string.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Initialize the SchemaGenerator object
|
|
||||||
schema_generator = SchemaGenerator()
|
|
||||||
|
|
||||||
# Add constraints to the SchemaGenerator object
|
|
||||||
schema_generator.add_constraint(
|
|
||||||
"~4000 word limit for short term memory. "
|
|
||||||
"Your short term memory is short, "
|
|
||||||
"so immediately save important information to files."
|
|
||||||
)
|
|
||||||
schema_generator.add_constraint(
|
|
||||||
"If you are unsure how you previously did something "
|
|
||||||
"or want to recall past events, "
|
|
||||||
"thinking about similar events will help you remember."
|
|
||||||
)
|
|
||||||
schema_generator.add_constraint("No user assistance")
|
|
||||||
schema_generator.add_constraint(
|
|
||||||
"Exclusively use the commands listed in double quotes e.g."
|
|
||||||
' "command name"'
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add commands to the SchemaGenerator object
|
|
||||||
for tool in tools:
|
|
||||||
schema_generator.add_tool(tool)
|
|
||||||
|
|
||||||
# Add resources to the SchemaGenerator object
|
|
||||||
schema_generator.add_resource(
|
|
||||||
"Internet access for searches and information gathering."
|
|
||||||
)
|
|
||||||
schema_generator.add_resource("Long Term memory management.")
|
|
||||||
schema_generator.add_resource(
|
|
||||||
"GPT-3.5 powered Agents for delegation of simple tasks."
|
|
||||||
)
|
|
||||||
schema_generator.add_resource("File output.")
|
|
||||||
|
|
||||||
# Add performance evaluations to the SchemaGenerator object
|
|
||||||
schema_generator.add_performance_evaluation(
|
|
||||||
"Continuously review and analyze your actions "
|
|
||||||
"to ensure you are performing to the best of your abilities."
|
|
||||||
)
|
|
||||||
schema_generator.add_performance_evaluation(
|
|
||||||
"Constructively self-criticize your big-picture behavior"
|
|
||||||
" constantly."
|
|
||||||
)
|
|
||||||
schema_generator.add_performance_evaluation(
|
|
||||||
"Reflect on past decisions and strategies to refine your"
|
|
||||||
" approach."
|
|
||||||
)
|
|
||||||
schema_generator.add_performance_evaluation(
|
|
||||||
"Every command has a cost, so be smart and efficient. "
|
|
||||||
"Aim to complete tasks in the least number of steps."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Generate the prompt string
|
|
||||||
prompt_string = schema_generator.generate_prompt_string()
|
|
||||||
|
|
||||||
return prompt_string
|
|
@ -1,231 +0,0 @@
|
|||||||
import logging
|
|
||||||
from collections import defaultdict
|
|
||||||
from typing import Callable, Sequence
|
|
||||||
from swarms.structs.agent import Agent
|
|
||||||
from swarms.structs.base_swarm import BaseSwarm
|
|
||||||
|
|
||||||
|
|
||||||
# Assuming the existence of an appropriate Agent class and logger setup
|
|
||||||
class AgentRearrange(BaseSwarm):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
agents: Sequence[Agent] = None,
|
|
||||||
verbose: bool = False,
|
|
||||||
custom_prompt: str = None,
|
|
||||||
callbacks: Sequence[Callable] = None,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
super().__init__()
|
|
||||||
if not all(isinstance(agent, Agent) for agent in agents):
|
|
||||||
raise ValueError(
|
|
||||||
"All elements must be instances of the Agent class."
|
|
||||||
)
|
|
||||||
self.agents = agents
|
|
||||||
self.verbose = verbose
|
|
||||||
self.custom_prompt = custom_prompt
|
|
||||||
self.callbacks = callbacks if callbacks is not None else []
|
|
||||||
self.flows = defaultdict(list)
|
|
||||||
|
|
||||||
def parse_pattern(self, pattern: str):
|
|
||||||
"""
|
|
||||||
Parse the interaction pattern to set up task flows, supporting both sequential
|
|
||||||
and concurrent executions within the same pattern.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
self.flows.clear() # Ensure flows are reset each time pattern is parsed
|
|
||||||
# Split pattern into potentially concurrent flows
|
|
||||||
concurrent_flows = pattern.split(",")
|
|
||||||
for flow in concurrent_flows:
|
|
||||||
# Trim whitespace and identify sequential parts within each concurrent flow
|
|
||||||
parts = [part.strip() for part in flow.split("->")]
|
|
||||||
if len(parts) > 1:
|
|
||||||
# Link each part sequentially to the next as source -> destination
|
|
||||||
for i in range(len(parts) - 1):
|
|
||||||
source = parts[i]
|
|
||||||
destination = parts[i + 1]
|
|
||||||
# Validate and add each sequential link
|
|
||||||
if source not in [
|
|
||||||
agent.agent_name for agent in self.agents
|
|
||||||
]:
|
|
||||||
logging.error(
|
|
||||||
f"Source agent {source} not found."
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
if destination not in [
|
|
||||||
agent.agent_name for agent in self.agents
|
|
||||||
]:
|
|
||||||
logging.error(
|
|
||||||
f"Destination agent {destination} not"
|
|
||||||
" found."
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
self.flows[source].append(destination)
|
|
||||||
else:
|
|
||||||
# Handle single agent case if needed
|
|
||||||
self.flows[parts[0]] = []
|
|
||||||
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error parsing pattern: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def self_find_agent_by_name(self, name: str):
|
|
||||||
for agent in self.agents:
|
|
||||||
if agent.agent_name == name:
|
|
||||||
return agent
|
|
||||||
return None
|
|
||||||
|
|
||||||
def agent_exists(self, name: str):
|
|
||||||
for agent in self.agents:
|
|
||||||
if agent.agent_name == name:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def parse_concurrent_flow(
|
|
||||||
self,
|
|
||||||
flow: str,
|
|
||||||
):
|
|
||||||
sequential_agents = flow.split("->")
|
|
||||||
for i, source_name in enumerate(sequential_agents[:-1]):
|
|
||||||
destination_name = sequential_agents[i + 1].strip()
|
|
||||||
self.parse_sequential_flow(
|
|
||||||
source_name.strip(), destination_name
|
|
||||||
)
|
|
||||||
|
|
||||||
def parse_sequential_flow(
|
|
||||||
self,
|
|
||||||
source: str,
|
|
||||||
destination: str,
|
|
||||||
):
|
|
||||||
if not self.self_find_agent_by_name(
|
|
||||||
source
|
|
||||||
) or not self.self_find_agent_by_name(destination):
|
|
||||||
return False
|
|
||||||
self.flows[source].append(destination)
|
|
||||||
|
|
||||||
def execute_task(
|
|
||||||
self,
|
|
||||||
dest_agent_name: str,
|
|
||||||
source: str,
|
|
||||||
task: str,
|
|
||||||
specific_tasks: dict,
|
|
||||||
):
|
|
||||||
dest_agent = self.self_find_agent_by_name(dest_agent_name)
|
|
||||||
if not dest_agent:
|
|
||||||
return None
|
|
||||||
task_to_run = specific_tasks.get(dest_agent_name, task)
|
|
||||||
if self.custom_prompt:
|
|
||||||
out = dest_agent.run(f"{task_to_run} {self.custom_prompt}")
|
|
||||||
else:
|
|
||||||
out = dest_agent.run(f"{task_to_run} (from {source})")
|
|
||||||
return out
|
|
||||||
|
|
||||||
def process_flows(self, pattern, default_task, specific_tasks):
|
|
||||||
if not self.parse_pattern(pattern):
|
|
||||||
return None
|
|
||||||
|
|
||||||
results = []
|
|
||||||
for source, destinations in self.flows.items():
|
|
||||||
if not destinations:
|
|
||||||
task = specific_tasks.get(source, default_task)
|
|
||||||
source_agent = self.self_find_agent_by_name(source)
|
|
||||||
if source_agent:
|
|
||||||
result = source_agent.run(task)
|
|
||||||
results.append(result)
|
|
||||||
else:
|
|
||||||
for destination in destinations:
|
|
||||||
task = specific_tasks.get(destination, default_task)
|
|
||||||
destination_agent = self.self_find_agent_by_name(
|
|
||||||
destination
|
|
||||||
)
|
|
||||||
if destination_agent:
|
|
||||||
result = destination_agent.run(task)
|
|
||||||
results.append(result)
|
|
||||||
return results
|
|
||||||
|
|
||||||
def __call__(
|
|
||||||
self,
|
|
||||||
pattern: str = None,
|
|
||||||
default_task: str = None,
|
|
||||||
**specific_tasks,
|
|
||||||
):
|
|
||||||
self.flows.clear() # Reset previous flows
|
|
||||||
results = self.process_flows(pattern, default_task, specific_tasks)
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
# ## Initialize the workflow
|
|
||||||
# agent = Agent(
|
|
||||||
# agent_name="t",
|
|
||||||
# agent_description=(
|
|
||||||
# "Generate a transcript for a youtube video on what swarms"
|
|
||||||
# " are!"
|
|
||||||
# ),
|
|
||||||
# system_prompt=(
|
|
||||||
# "Generate a transcript for a youtube video on what swarms"
|
|
||||||
# " are!"
|
|
||||||
# ),
|
|
||||||
# llm=Anthropic(),
|
|
||||||
# max_loops=1,
|
|
||||||
# autosave=True,
|
|
||||||
# dashboard=False,
|
|
||||||
# streaming_on=True,
|
|
||||||
# verbose=True,
|
|
||||||
# stopping_token="<DONE>",
|
|
||||||
# )
|
|
||||||
|
|
||||||
# agent2 = Agent(
|
|
||||||
# agent_name="t1",
|
|
||||||
# agent_description=(
|
|
||||||
# "Generate a transcript for a youtube video on what swarms"
|
|
||||||
# " are!"
|
|
||||||
# ),
|
|
||||||
# llm=Anthropic(),
|
|
||||||
# max_loops=1,
|
|
||||||
# system_prompt="Summarize the transcript",
|
|
||||||
# autosave=True,
|
|
||||||
# dashboard=False,
|
|
||||||
# streaming_on=True,
|
|
||||||
# verbose=True,
|
|
||||||
# stopping_token="<DONE>",
|
|
||||||
# )
|
|
||||||
|
|
||||||
# agent3 = Agent(
|
|
||||||
# agent_name="t2",
|
|
||||||
# agent_description=(
|
|
||||||
# "Generate a transcript for a youtube video on what swarms"
|
|
||||||
# " are!"
|
|
||||||
# ),
|
|
||||||
# llm=Anthropic(),
|
|
||||||
# max_loops=1,
|
|
||||||
# system_prompt="Finalize the transcript",
|
|
||||||
# autosave=True,
|
|
||||||
# dashboard=False,
|
|
||||||
# streaming_on=True,
|
|
||||||
# verbose=True,
|
|
||||||
# stopping_token="<DONE>",
|
|
||||||
# )
|
|
||||||
|
|
||||||
|
|
||||||
# # Rearrange the agents
|
|
||||||
# rearrange = AgentRearrange(
|
|
||||||
# agents=[agent, agent2, agent3],
|
|
||||||
# verbose=True,
|
|
||||||
# # custom_prompt="Summarize the transcript",
|
|
||||||
# )
|
|
||||||
|
|
||||||
# # Run the workflow on a task
|
|
||||||
# results = rearrange(
|
|
||||||
# # pattern="t -> t1, t2 -> t2",
|
|
||||||
# pattern="t -> t1 -> t2",
|
|
||||||
# default_task=(
|
|
||||||
# "Generate a transcript for a YouTube video on what swarms"
|
|
||||||
# " are!"
|
|
||||||
# ),
|
|
||||||
# t="Generate a transcript for a YouTube video on what swarms are!",
|
|
||||||
# # t2="Summarize the transcript",
|
|
||||||
# # t3="Finalize the transcript",
|
|
||||||
# )
|
|
||||||
# # print(results)
|
|
@ -1,148 +1,223 @@
|
|||||||
import logging
|
from swarms import Agent
|
||||||
from collections import defaultdict
|
from typing import List
|
||||||
|
from swarms.structs.base_swarm import BaseSwarm
|
||||||
from swarms.utils.loguru_logger import logger
|
from swarms.utils.loguru_logger import logger
|
||||||
from swarms.structs.agent import Agent
|
|
||||||
from typing import Sequence, Callable
|
|
||||||
|
|
||||||
|
|
||||||
class AgentRearrange:
|
class AgentRearrange(BaseSwarm):
|
||||||
|
"""
|
||||||
|
A class representing a swarm of agents for rearranging tasks.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
agents (dict): A dictionary of agents, where the key is the agent's name and the value is the agent object.
|
||||||
|
flow (str): The flow pattern of the tasks.
|
||||||
|
|
||||||
|
Methods:
|
||||||
|
__init__(agents: List[Agent] = None, flow: str = None): Initializes the AgentRearrange object.
|
||||||
|
add_agent(agent: Agent): Adds an agent to the swarm.
|
||||||
|
remove_agent(agent_name: str): Removes an agent from the swarm.
|
||||||
|
add_agents(agents: List[Agent]): Adds multiple agents to the swarm.
|
||||||
|
validate_flow(): Validates the flow pattern.
|
||||||
|
run(task): Runs the swarm to rearrange the tasks.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
agents: Sequence[Agent] = None,
|
agents: List[Agent] = None,
|
||||||
verbose: bool = False,
|
flow: str = None,
|
||||||
custom_prompt: str = None,
|
max_loops: int = 1,
|
||||||
callbacks: Sequence[Callable] = None,
|
verbose: bool = True,
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Initialize the AgentRearrange class.
|
Initializes the AgentRearrange object.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
agents (Sequence[Agent], optional): A sequence of Agent objects. Defaults to None.
|
agents (List[Agent], optional): A list of Agent objects. Defaults to None.
|
||||||
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
|
flow (str, optional): The flow pattern of the tasks. Defaults to None.
|
||||||
custom_prompt (str, optional): A custom prompt string. Defaults to None.
|
|
||||||
callbacks (Sequence[Callable], optional): A sequence of callback functions. Defaults to None.
|
|
||||||
*args: Variable length argument list.
|
|
||||||
**kwargs: Arbitrary keyword arguments.
|
|
||||||
"""
|
"""
|
||||||
if not all(isinstance(agent, Agent) for agent in agents):
|
self.agents = {agent.name: agent for agent in agents}
|
||||||
raise ValueError(
|
self.flow = flow
|
||||||
"All elements must be instances of the Agent class."
|
|
||||||
)
|
|
||||||
self.agents = agents
|
|
||||||
self.verbose = verbose
|
self.verbose = verbose
|
||||||
self.custom_prompt = custom_prompt
|
self.max_loops = max_loops
|
||||||
self.callbacks = callbacks if callbacks is not None else []
|
|
||||||
self.flows = defaultdict(list)
|
if verbose is True:
|
||||||
|
logger.add("agent_rearrange.log")
|
||||||
|
|
||||||
def parse_pattern(self, pattern: str):
|
def add_agent(self, agent: Agent):
|
||||||
"""
|
"""
|
||||||
Parse the interaction pattern and setup task flows.
|
Adds an agent to the swarm.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
pattern (str): The interaction pattern to parse.
|
agent (Agent): The agent to be added.
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if the pattern parsing is successful, False otherwise.
|
|
||||||
"""
|
"""
|
||||||
try:
|
logger.info(f"Adding agent {agent.name} to the swarm.")
|
||||||
for flow in pattern.split(","):
|
self.agents[agent.name] = agent
|
||||||
parts = [part.strip() for part in flow.split("->")]
|
|
||||||
if len(parts) != 2:
|
def remove_agent(self, agent_name: str):
|
||||||
logging.error(
|
|
||||||
f"Invalid flow pattern: {flow}. Each flow"
|
|
||||||
" must have exactly one '->'."
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
|
|
||||||
source_name, destinations_str = parts
|
|
||||||
source = self.find_agent_by_name(source_name)
|
|
||||||
if source is None:
|
|
||||||
logging.error(f"Source agent {source_name} not found.")
|
|
||||||
return False
|
|
||||||
|
|
||||||
destinations_names = destinations_str.split()
|
|
||||||
for dest_name in destinations_names:
|
|
||||||
dest = self.find_agent_by_name(dest_name)
|
|
||||||
if dest is None:
|
|
||||||
logging.error(
|
|
||||||
f"Destination agent {dest_name} not" " found."
|
|
||||||
)
|
|
||||||
return False
|
|
||||||
self.flows[source.agent_name].append(dest.agent_name)
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error: {e}")
|
|
||||||
raise e
|
|
||||||
|
|
||||||
def self_find_agen_by_name(self, name: str):
|
|
||||||
"""
|
"""
|
||||||
Find an agent by its name.
|
Removes an agent from the swarm.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
name (str): The name of the agent to find.
|
agent_name (str): The name of the agent to be removed.
|
||||||
|
|
||||||
Returns:
|
|
||||||
Agent: The Agent object if found, None otherwise.
|
|
||||||
"""
|
"""
|
||||||
for agent in self.agents:
|
del self.agents[agent_name]
|
||||||
if agent.agent_name == name:
|
|
||||||
return agent
|
|
||||||
return None
|
|
||||||
|
|
||||||
def __call__(
|
def add_agents(self, agents: List[Agent]):
|
||||||
self,
|
|
||||||
agents: Sequence[Agent] = None,
|
|
||||||
pattern: str = None,
|
|
||||||
task: str = None,
|
|
||||||
**tasks,
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Execute the task based on the specified pattern.
|
Adds multiple agents to the swarm.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
agents (Sequence[Agent], optional): A sequence of Agent objects. Defaults to None.
|
agents (List[Agent]): A list of Agent objects.
|
||||||
pattern (str, optional): The interaction pattern to follow. Defaults to None.
|
"""
|
||||||
task (str, optional): The task to execute. Defaults to None.
|
for agent in agents:
|
||||||
**tasks: Additional tasks specified as keyword arguments.
|
self.agents[agent.name] = agent
|
||||||
|
|
||||||
|
def validate_flow(self):
|
||||||
"""
|
"""
|
||||||
try:
|
Validates the flow pattern.
|
||||||
if agents:
|
|
||||||
self.flows.clear() # Reset previous flows
|
Raises:
|
||||||
if not self.parse_pattern(pattern):
|
ValueError: If the flow pattern is incorrectly formatted or contains duplicate agent names.
|
||||||
return # Pattern parsing failed
|
|
||||||
|
Returns:
|
||||||
for source, destinations in self.flows.items():
|
bool: True if the flow pattern is valid.
|
||||||
for dest in destinations:
|
"""
|
||||||
dest_agent = self.self_find_agen_by_name(dest)
|
if "->" not in self.flow:
|
||||||
task = tasks.get(dest, task)
|
raise ValueError(
|
||||||
|
"Flow must include '->' to denote the direction of the task."
|
||||||
if self.custom_prompt:
|
|
||||||
dest_agent.run(f"{task} {self.custom_prompt}")
|
|
||||||
else:
|
|
||||||
dest_agent.run(f"{task} (from {source})")
|
|
||||||
# else:
|
|
||||||
# raise ValueError(
|
|
||||||
# "No agents provided. Please provide agents to"
|
|
||||||
# " execute the task."
|
|
||||||
# )
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(
|
|
||||||
f"Error: {e} try again by providing agents and" " pattern"
|
|
||||||
)
|
)
|
||||||
raise e
|
|
||||||
|
|
||||||
|
agents_in_flow = []
|
||||||
|
tasks = self.flow.split("->")
|
||||||
|
for task in tasks:
|
||||||
|
agent_names = [name.strip() for name in task.split(",")]
|
||||||
|
for agent_name in agent_names:
|
||||||
|
if agent_name not in self.agents:
|
||||||
|
raise ValueError(
|
||||||
|
f"Agent '{agent_name}' is not registered."
|
||||||
|
)
|
||||||
|
agents_in_flow.append(agent_name)
|
||||||
|
|
||||||
# # Example usage
|
if len(set(agents_in_flow)) != len(agents_in_flow):
|
||||||
# try:
|
raise ValueError(
|
||||||
# agents = [
|
"Duplicate agent names in the flow are not allowed."
|
||||||
# Agent(agent_name=f"b{i}") for i in range(1, 4)
|
)
|
||||||
# ] # Creating agents b1, b2, b3
|
|
||||||
# agents.append(Agent(agent_name="d")) # Adding agent d
|
|
||||||
# rearranger = Rearrange(agents)
|
|
||||||
|
|
||||||
# # Specifying a complex pattern for task execution
|
print("Flow is valid.")
|
||||||
# rearranger.execute("d -> b1 b2 b3, b2 -> b3", "Analyze data")
|
return True
|
||||||
# except ValueError as e:
|
|
||||||
# logging.error(e)
|
def run(self, task: str, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Runs the swarm to rearrange the tasks.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task: The initial task to be processed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The final processed task.
|
||||||
|
"""
|
||||||
|
if not self.validate_flow():
|
||||||
|
return "Invalid flow configuration."
|
||||||
|
|
||||||
|
tasks = self.flow.split("->")
|
||||||
|
current_task = task
|
||||||
|
|
||||||
|
for task in tasks:
|
||||||
|
agent_names = [name.strip() for name in task.split(",")]
|
||||||
|
if len(agent_names) > 1:
|
||||||
|
# Parallel processing
|
||||||
|
logger.info(f"Running agents in parallel: {agent_names}")
|
||||||
|
results = []
|
||||||
|
for agent_name in agent_names:
|
||||||
|
agent = self.agents[agent_name]
|
||||||
|
result = agent.run(current_task, *args, **kwargs)
|
||||||
|
results.append(result)
|
||||||
|
current_task = "; ".join(results)
|
||||||
|
else:
|
||||||
|
# Sequential processing
|
||||||
|
logger.info(f"Running agents sequentially: {agent_names}")
|
||||||
|
agent = self.agents[agent_names[0]]
|
||||||
|
current_task = agent.run(current_task, *args, **kwargs)
|
||||||
|
|
||||||
|
return current_task
|
||||||
|
|
||||||
|
|
||||||
|
def rearrange(
|
||||||
|
agents: List[Agent], flow: str, task: str = None, *args, **kwargs
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Rearranges the given list of agents based on the specified flow.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
agents (List[Agent]): The list of agents to be rearranged.
|
||||||
|
flow (str): The flow used for rearranging the agents.
|
||||||
|
task (str, optional): The task to be performed during rearrangement. Defaults to None.
|
||||||
|
*args: Additional positional arguments.
|
||||||
|
**kwargs: Additional keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The result of running the agent system with the specified task.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
agents = [agent1, agent2, agent3]
|
||||||
|
flow = "agent1 -> agent2, agent3"
|
||||||
|
task = "Perform a task"
|
||||||
|
rearrange(agents, flow, task)
|
||||||
|
"""
|
||||||
|
agent_system = AgentRearrange(
|
||||||
|
agents=agents, flow=flow, *args, **kwargs
|
||||||
|
)
|
||||||
|
return agent_system.run(task, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
# # Initialize the director agent
|
||||||
|
# director = Agent(
|
||||||
|
# agent_name="Director",
|
||||||
|
# system_prompt="Directs the tasks for the workers",
|
||||||
|
# llm=Anthropic(),
|
||||||
|
# max_loops=1,
|
||||||
|
# dashboard=False,
|
||||||
|
# streaming_on=True,
|
||||||
|
# verbose=True,
|
||||||
|
# stopping_token="<DONE>",
|
||||||
|
# state_save_file_type="json",
|
||||||
|
# saved_state_path="director.json",
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # Initialize worker 1
|
||||||
|
# worker1 = Agent(
|
||||||
|
# agent_name="Worker1",
|
||||||
|
# system_prompt="Generates a transcript for a youtube video on what swarms are",
|
||||||
|
# llm=Anthropic(),
|
||||||
|
# max_loops=1,
|
||||||
|
# dashboard=False,
|
||||||
|
# streaming_on=True,
|
||||||
|
# verbose=True,
|
||||||
|
# stopping_token="<DONE>",
|
||||||
|
# state_save_file_type="json",
|
||||||
|
# saved_state_path="worker1.json",
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # Initialize worker 2
|
||||||
|
# worker2 = Agent(
|
||||||
|
# agent_name="Worker2",
|
||||||
|
# system_prompt="Summarizes the transcript generated by Worker1",
|
||||||
|
# llm=Anthropic(),
|
||||||
|
# max_loops=1,
|
||||||
|
# dashboard=False,
|
||||||
|
# streaming_on=True,
|
||||||
|
# verbose=True,
|
||||||
|
# stopping_token="<DONE>",
|
||||||
|
# state_save_file_type="json",
|
||||||
|
# saved_state_path="worker2.json",
|
||||||
|
# )
|
||||||
|
|
||||||
|
|
||||||
|
# flow = "Director -> Worker1 -> Worker2"
|
||||||
|
# agent_system = AgentRearrange(
|
||||||
|
# agents=[director, worker1, worker2], flow=flow
|
||||||
|
# )
|
||||||
|
# # Run the system
|
||||||
|
# output = agent_system.run(
|
||||||
|
# "Create a format to express and communicate swarms of llms in a structured manner for youtube"
|
||||||
|
# )
|
||||||
|
@ -1,107 +1,91 @@
|
|||||||
from dataclasses import dataclass, field
|
import time
|
||||||
from typing import List, Optional
|
import json
|
||||||
from swarms.structs.agent import Agent
|
|
||||||
from swarms.structs.conversation import Conversation
|
|
||||||
from swarms.utils.loguru_logger import logger
|
from swarms.utils.loguru_logger import logger
|
||||||
from swarms.utils.try_except_wrapper import try_except_wrapper
|
|
||||||
from swarms.structs.base_workflow import BaseWorkflow
|
from swarms.structs.base_workflow import BaseWorkflow
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from typing import List, Dict
|
||||||
|
from swarms.structs.agent import Agent
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
class StepSequentialWorkflow(BaseModel):
|
||||||
|
agent_names: List[str] = Field(
|
||||||
|
..., description="List of agent names to include in the workflow."
|
||||||
|
)
|
||||||
|
max_loops: int = Field(
|
||||||
|
1, description="Maximum number of loops to run the workflow."
|
||||||
|
)
|
||||||
|
verbose: bool = Field(
|
||||||
|
False, description="Whether to log debug information."
|
||||||
|
)
|
||||||
|
steps: Dict = Field(
|
||||||
|
...,
|
||||||
|
description="Dictionary of steps for the workflow with each agent and its parameters.",
|
||||||
|
)
|
||||||
|
time: str = Field(
|
||||||
|
time.strftime("%Y-%m-%d %H:%M:%S"),
|
||||||
|
description="Time of the workflow.",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Define a class to handle the sequential workflow
|
||||||
class SequentialWorkflow(BaseWorkflow):
|
class SequentialWorkflow(BaseWorkflow):
|
||||||
name: str = "Sequential Workflow"
|
def __init__(
|
||||||
description: str = None
|
self,
|
||||||
objective: str = None
|
agents: List[Agent] = None,
|
||||||
max_loops: int = 1
|
max_loops: int = 2,
|
||||||
autosave: bool = False
|
verbose: bool = False,
|
||||||
saved_state_filepath: Optional[str] = "sequential_workflow_state.json"
|
*args,
|
||||||
restore_state_filepath: Optional[str] = None
|
**kwargs,
|
||||||
dashboard: bool = False
|
):
|
||||||
agent_pool: List[Agent] = field(default_factory=list)
|
"""
|
||||||
# task_pool: List[str] = field(
|
Initializes a SequentialWorkflow with a list of agents.
|
||||||
# default_factory=list
|
|
||||||
# ) # List to store tasks
|
:param agents: List of agents to include in the workflow.
|
||||||
|
"""
|
||||||
def __post_init__(self):
|
self.agents = agents
|
||||||
super().__init__()
|
self.max_loops = max_loops
|
||||||
self.conversation = Conversation(
|
|
||||||
time_enabled=True,
|
if verbose:
|
||||||
autosave=True,
|
logger.add("sequential_workflow.log", level="DEBUG")
|
||||||
|
|
||||||
|
if not self.agents:
|
||||||
|
raise ValueError("No agents provided for workflow")
|
||||||
|
|
||||||
|
if not self.max_loops:
|
||||||
|
self.max_loops = 1
|
||||||
|
|
||||||
|
# Log all the agents in the workflow
|
||||||
|
logger.info(
|
||||||
|
f"Initialized SequentialWorkflow with agents: {json.dumps([str(agent.agent_name) for agent in self.agents])}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# If objective exists then set it
|
def run(self, task: str, *args, **kwargs):
|
||||||
if self.objective is not None:
|
"""
|
||||||
self.conversation.system_prompt = self.objective
|
Run the workflow starting with an initial task.
|
||||||
|
|
||||||
def workflow_bootup(self):
|
:param task: The task to start the workflow.
|
||||||
logger.info(f"{self.name} is activating...")
|
"""
|
||||||
|
logger.info(f"Starting workflow with task: {task}")
|
||||||
for agent in self.agent_pool:
|
current_output = task
|
||||||
logger.info(f"Agent {agent.agent_name} Activated")
|
for agent in self.agents:
|
||||||
|
count = 0
|
||||||
@try_except_wrapper
|
while count < self.max_loops:
|
||||||
def add(self, task: str, agent: Agent, *args, **kwargs):
|
try:
|
||||||
self.agent_pool.append(agent)
|
logger.info(f"Running agent {agent.agent_name}")
|
||||||
# self.task_pool.append(
|
current_output = agent.run(
|
||||||
# task
|
current_output, *args, **kwargs
|
||||||
# ) # Store tasks corresponding to each agent
|
|
||||||
|
|
||||||
return self.conversation.add(
|
|
||||||
role=agent.agent_name, content=task, *args, **kwargs
|
|
||||||
)
|
|
||||||
|
|
||||||
def reset_workflow(self) -> None:
|
|
||||||
self.conversation = {}
|
|
||||||
|
|
||||||
@try_except_wrapper
|
|
||||||
def run(self):
|
|
||||||
if not self.agent_pool:
|
|
||||||
raise ValueError("No agents have been added to the workflow.")
|
|
||||||
|
|
||||||
self.workflow_bootup()
|
|
||||||
loops = 0
|
|
||||||
while loops < self.max_loops:
|
|
||||||
previous_output = None # Initialize to None; will hold the output of the previous agent
|
|
||||||
for i, agent in enumerate(self.agent_pool):
|
|
||||||
# Fetch the last task specific to this agent from the conversation history
|
|
||||||
tasks_for_agent = [
|
|
||||||
msg["content"]
|
|
||||||
for msg in self.conversation.conversation_history
|
|
||||||
if msg["role"] == agent.agent_name
|
|
||||||
]
|
|
||||||
task = tasks_for_agent[-1] if tasks_for_agent else None
|
|
||||||
|
|
||||||
if task is None and previous_output is not None:
|
|
||||||
# If no specific task for this agent, use the output from the previous agent
|
|
||||||
task = previous_output
|
|
||||||
|
|
||||||
if task is None:
|
|
||||||
# If no initial task is found, and there's no previous output, log error and skip this agent
|
|
||||||
logger.error(
|
|
||||||
f"No initial task found for agent {agent.agent_name}, and no previous output to use."
|
|
||||||
)
|
)
|
||||||
continue
|
print(current_output)
|
||||||
|
count += 1
|
||||||
logger.info(
|
logger.debug(
|
||||||
f" \n Agent {i+1} ({agent.agent_name}) is executing the task: {task} \n"
|
f"Agent {agent.agent_name} completed loop {count} "
|
||||||
)
|
) # Log partial output for brevity
|
||||||
|
except Exception as e:
|
||||||
# Space the log
|
|
||||||
|
|
||||||
output = agent.run(task)
|
|
||||||
if output is None:
|
|
||||||
logger.error(
|
logger.error(
|
||||||
f"Agent {agent.agent_name} returned None for task: {task}"
|
f"Error occurred while running agent {agent.agent_name}: {str(e)}"
|
||||||
)
|
|
||||||
raise ValueError(
|
|
||||||
f"Agent {agent.agent_name} returned None."
|
|
||||||
)
|
)
|
||||||
|
raise
|
||||||
# Update the conversation history with the new output using agent's role
|
logger.info(f"Finished running agent {agent.agent_name}")
|
||||||
self.conversation.add(
|
logger.info("Finished running workflow")
|
||||||
role=agent.agent_name, content=output
|
return current_output
|
||||||
)
|
|
||||||
previous_output = output # Update the previous_output to pass to the next agent
|
|
||||||
|
|
||||||
loops += 1
|
|
||||||
return self.conversation.return_history_as_string()
|
|
||||||
|
@ -1,106 +0,0 @@
|
|||||||
import json
|
|
||||||
from typing import List, Optional
|
|
||||||
|
|
||||||
from pydantic import model_validator, BaseModel, Field, Json
|
|
||||||
|
|
||||||
from swarms.structs.agent import Agent
|
|
||||||
from swarms.structs.task import Task
|
|
||||||
|
|
||||||
|
|
||||||
class Team(BaseModel):
|
|
||||||
"""
|
|
||||||
Class that represents a group of agents, how they should work together and
|
|
||||||
their tasks.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
tasks (Optional[List[Task]]): List of tasks.
|
|
||||||
agents (Optional[List[Agent]]): List of agents in this Team.
|
|
||||||
architecture (str): Architecture that the Team will follow. Default is "sequential".
|
|
||||||
verbose (bool): Verbose mode for the Agent Execution. Default is False.
|
|
||||||
config (Optional[Json]): Configuration of the Team. Default is None.
|
|
||||||
"""
|
|
||||||
|
|
||||||
tasks: Optional[List[Task]] = Field(None, description="List of tasks")
|
|
||||||
agents: Optional[List[Agent]] = Field(
|
|
||||||
None, description="List of agents in this Team."
|
|
||||||
)
|
|
||||||
architecture = Field(
|
|
||||||
description="architecture that the Team will follow.",
|
|
||||||
default="sequential",
|
|
||||||
)
|
|
||||||
verbose: bool = Field(
|
|
||||||
description="Verbose mode for the Agent Execution",
|
|
||||||
default=False,
|
|
||||||
)
|
|
||||||
config: Optional[Json] = Field(
|
|
||||||
description="Configuration of the Team.", default=None
|
|
||||||
)
|
|
||||||
|
|
||||||
@model_validator(mode="before")
|
|
||||||
@classmethod
|
|
||||||
def check_config(_cls, values):
|
|
||||||
if not values.get("config") and (
|
|
||||||
not values.get("agents") and not values.get("tasks")
|
|
||||||
):
|
|
||||||
raise ValueError(
|
|
||||||
"Either agents and task need to be set or config."
|
|
||||||
)
|
|
||||||
|
|
||||||
if values.get("config"):
|
|
||||||
config = json.loads(values.get("config"))
|
|
||||||
if not config.get("agents") or not config.get("tasks"):
|
|
||||||
raise ValueError("Config should have agents and tasks.")
|
|
||||||
|
|
||||||
values["agents"] = [
|
|
||||||
Agent(**agent) for agent in config["agents"]
|
|
||||||
]
|
|
||||||
|
|
||||||
tasks = []
|
|
||||||
for task in config["tasks"]:
|
|
||||||
task_agent = [
|
|
||||||
agt
|
|
||||||
for agt in values["agents"]
|
|
||||||
if agt.role == task["agent"]
|
|
||||||
][0]
|
|
||||||
del task["agent"]
|
|
||||||
tasks.append(Task(**task, agent=task_agent))
|
|
||||||
|
|
||||||
values["tasks"] = tasks
|
|
||||||
return values
|
|
||||||
|
|
||||||
def run(self) -> str:
|
|
||||||
"""
|
|
||||||
Kickoff the Team to work on its tasks.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
output (List[str]): Output of the Team for each task.
|
|
||||||
"""
|
|
||||||
if self.architecture == "sequential":
|
|
||||||
return self.__sequential_loop()
|
|
||||||
|
|
||||||
def __sequential_loop(self) -> str:
|
|
||||||
"""
|
|
||||||
Loop that executes the sequential architecture.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
output (str): Output of the Team.
|
|
||||||
"""
|
|
||||||
task_outcome = None
|
|
||||||
for task in self.tasks:
|
|
||||||
# Add delegation tools to the task if the agent allows it
|
|
||||||
# if task.agent.allow_delegation:
|
|
||||||
# tools = AgentTools(agents=self.agents).tools()
|
|
||||||
# task.tools += tools
|
|
||||||
|
|
||||||
self.__log(f"\nWorking Agent: {task.agent.role}")
|
|
||||||
self.__log(f"Starting Task: {task.description} ...")
|
|
||||||
|
|
||||||
task_outcome = task.execute(task_outcome)
|
|
||||||
|
|
||||||
self.__log(f"Task output: {task_outcome}")
|
|
||||||
|
|
||||||
return task_outcome
|
|
||||||
|
|
||||||
def __log(self, message):
|
|
||||||
if self.verbose:
|
|
||||||
print(message)
|
|
@ -0,0 +1,379 @@
|
|||||||
|
import json
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from swarms.utils.loguru_logger import logger
|
||||||
|
from swarms.tools.py_func_to_openai_func_str import (
|
||||||
|
get_openai_function_schema_from_func,
|
||||||
|
load_basemodels_if_needed,
|
||||||
|
)
|
||||||
|
from swarms.tools.openai_tool_creator_decorator import openai_tool_executor
|
||||||
|
from typing import Callable, Optional, Any, Dict, List
|
||||||
|
from swarms.tools.pydantic_to_json import (
|
||||||
|
base_model_to_openai_function,
|
||||||
|
multi_base_model_to_openai_function,
|
||||||
|
function_to_str,
|
||||||
|
functions_to_str,
|
||||||
|
)
|
||||||
|
from swarms.tools.function_util import process_tool_docs
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
ToolType = Union[BaseModel, Dict[str, Any], Callable[..., Any]]
|
||||||
|
|
||||||
|
|
||||||
|
class BaseTool(BaseModel):
|
||||||
|
"""
|
||||||
|
Base class for tools in the swarms package.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
verbose (bool): Flag indicating whether to enable verbose mode.
|
||||||
|
functions (List[Callable[..., Any]]): List of functions associated with the tool.
|
||||||
|
base_models (List[type[BaseModel]]): List of base models associated with the tool.
|
||||||
|
|
||||||
|
Methods:
|
||||||
|
func_to_dict(function: Callable[..., Any], name: Optional[str] = None, description: str) -> Dict[str, Any]:
|
||||||
|
Converts a function to a dictionary representation.
|
||||||
|
|
||||||
|
load_params_from_func_for_pybasemodel(func: Callable[..., Any], *args: Any, **kwargs: Any) -> Callable[..., Any]:
|
||||||
|
Loads parameters from a function for a Pydantic BaseModel.
|
||||||
|
|
||||||
|
base_model_to_dict(pydantic_type: type[BaseModel], output_str: bool = False, *args: Any, **kwargs: Any) -> dict[str, Any]:
|
||||||
|
Converts a Pydantic BaseModel to a dictionary representation.
|
||||||
|
|
||||||
|
multi_base_models_to_dict(pydantic_types: List[type[BaseModel]], *args: Any, **kwargs: Any) -> dict[str, Any]:
|
||||||
|
Converts multiple Pydantic BaseModels to a dictionary representation.
|
||||||
|
|
||||||
|
dict_to_str(dict: dict[str, Any]) -> str:
|
||||||
|
Converts a dictionary to a string representation.
|
||||||
|
|
||||||
|
multi_dict_to_str(dicts: list[dict[str, Any]]) -> str:
|
||||||
|
Converts multiple dictionaries to a string representation.
|
||||||
|
|
||||||
|
get_docs_from_callable(item) -> Any:
|
||||||
|
Retrieves documentation from a callable item.
|
||||||
|
"""
|
||||||
|
|
||||||
|
verbose: bool = False
|
||||||
|
functions: List[Callable[..., Any]] = []
|
||||||
|
base_models: List[type[BaseModel]] = []
|
||||||
|
verbose: bool = False
|
||||||
|
autocheck: bool = False
|
||||||
|
auto_execute_tool: Optional[bool] = False
|
||||||
|
|
||||||
|
def func_to_dict(
|
||||||
|
function: Callable[..., Any],
|
||||||
|
*,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
description: str,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
try:
|
||||||
|
return get_openai_function_schema_from_func(
|
||||||
|
function=function,
|
||||||
|
name=name,
|
||||||
|
description=description,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred in func_to_dict: {e}")
|
||||||
|
logger.error(
|
||||||
|
"Please check the function and ensure it is valid."
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
"If the issue persists, please seek further assistance."
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def load_params_from_func_for_pybasemodel(
|
||||||
|
func: Callable[..., Any],
|
||||||
|
*args: Any,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> Callable[..., Any]:
|
||||||
|
try:
|
||||||
|
return load_basemodels_if_needed(func, *args, **kwargs)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"An error occurred in load_params_from_func_for_pybasemodel: {e}"
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
"Please check the function and ensure it is valid."
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
"If the issue persists, please seek further assistance."
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def base_model_to_dict(
|
||||||
|
pydantic_type: type[BaseModel],
|
||||||
|
output_str: bool = False,
|
||||||
|
*args: Any,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
try:
|
||||||
|
return base_model_to_openai_function(
|
||||||
|
pydantic_type, output_str, *args, **kwargs
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred in base_model_to_dict: {e}")
|
||||||
|
logger.error(
|
||||||
|
"Please check the Pydantic type and ensure it is valid."
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
"If the issue persists, please seek further assistance."
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def multi_base_models_to_dict(
|
||||||
|
pydantic_types: List[type[BaseModel]],
|
||||||
|
*args: Any,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
try:
|
||||||
|
return multi_base_model_to_openai_function(
|
||||||
|
pydantic_types, *args, **kwargs
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"An error occurred in multi_base_models_to_dict: {e}"
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
"Please check the Pydantic types and ensure they are valid."
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
"If the issue persists, please seek further assistance."
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def dict_to_str(
|
||||||
|
dict: dict[str, Any],
|
||||||
|
) -> str:
|
||||||
|
try:
|
||||||
|
return function_to_str(dict)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred in dict_to_str: {e}")
|
||||||
|
logger.error(
|
||||||
|
"Please check the dictionary and ensure it is valid."
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
"If the issue persists, please seek further assistance."
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def multi_dict_to_str(
|
||||||
|
dicts: list[dict[str, Any]],
|
||||||
|
) -> str:
|
||||||
|
try:
|
||||||
|
return functions_to_str(dicts)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred in multi_dict_to_str: {e}")
|
||||||
|
logger.error(
|
||||||
|
"Please check the dictionaries and ensure they are valid."
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
"If the issue persists, please seek further assistance."
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def get_docs_from_callable(item):
|
||||||
|
try:
|
||||||
|
return process_tool_docs(item)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred in get_docs: {e}")
|
||||||
|
logger.error("Please check the item and ensure it is valid.")
|
||||||
|
logger.error(
|
||||||
|
"If the issue persists, please seek further assistance."
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def execute_tool(
|
||||||
|
self,
|
||||||
|
tools: List[Dict[str, Any]],
|
||||||
|
function_map: Dict[str, Callable],
|
||||||
|
*args: Any,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> Callable:
|
||||||
|
try:
|
||||||
|
return openai_tool_executor(
|
||||||
|
tools, function_map, self.verbose, *args, **kwargs
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"An error occurred in execute_tool: {e}")
|
||||||
|
logger.error(
|
||||||
|
"Please check the tools and function map and ensure they are valid."
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
"If the issue persists, please seek further assistance."
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def detect_tool_input_type(input):
|
||||||
|
if isinstance(input, BaseModel):
|
||||||
|
return "Pydantic"
|
||||||
|
elif isinstance(input, dict):
|
||||||
|
return "Dictionary"
|
||||||
|
elif callable(input):
|
||||||
|
return "Function"
|
||||||
|
else:
|
||||||
|
return "Unknown"
|
||||||
|
|
||||||
|
def dynamic_run(self, input) -> str:
|
||||||
|
"""
|
||||||
|
Executes the dynamic run based on the input type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input: The input to be processed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The result of the dynamic run.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
None
|
||||||
|
|
||||||
|
"""
|
||||||
|
tool_input_type = self.detect_tool_input_type(input)
|
||||||
|
if tool_input_type == "Pydantic":
|
||||||
|
function_str = base_model_to_openai_function(input)
|
||||||
|
elif tool_input_type == "Dictionary":
|
||||||
|
function_str = function_to_str(input)
|
||||||
|
elif tool_input_type == "Function":
|
||||||
|
function_str = get_openai_function_schema_from_func(input)
|
||||||
|
else:
|
||||||
|
return "Unknown tool input type"
|
||||||
|
|
||||||
|
if self.auto_execute_tool:
|
||||||
|
if tool_input_type == "Function":
|
||||||
|
# Add the function to the functions list
|
||||||
|
self.functions.append(input)
|
||||||
|
|
||||||
|
# Create a function map from the functions list
|
||||||
|
function_map = {func.__name__: func for func in self.functions}
|
||||||
|
|
||||||
|
# Execute the tool
|
||||||
|
return self.execute_tool(
|
||||||
|
tools=[function_str], function_map=function_map
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return function_str
|
||||||
|
|
||||||
|
def execute_tool_by_name(
|
||||||
|
tools: List[Dict[str, Any]],
|
||||||
|
tool_name: str,
|
||||||
|
function_map: Dict[str, Callable],
|
||||||
|
) -> Any:
|
||||||
|
"""
|
||||||
|
Search for a tool by name and execute it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tools (List[Dict[str, Any]]): A list of tools. Each tool is a dictionary that includes a 'name' key.
|
||||||
|
tool_name (str): The name of the tool to execute.
|
||||||
|
function_map (Dict[str, Callable]): A dictionary that maps tool names to functions.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The result of executing the tool.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the tool with the specified name is not found.
|
||||||
|
TypeError: If the tool name is not mapped to a function in the function map.
|
||||||
|
"""
|
||||||
|
# Search for the tool by name
|
||||||
|
tool = next(
|
||||||
|
(tool for tool in tools if tool.get("name") == tool_name), None
|
||||||
|
)
|
||||||
|
|
||||||
|
# If the tool is not found, raise an error
|
||||||
|
if tool is None:
|
||||||
|
raise ValueError(f"Tool '{tool_name}' not found")
|
||||||
|
|
||||||
|
# Get the function associated with the tool
|
||||||
|
func = function_map.get(tool_name)
|
||||||
|
|
||||||
|
# If the function is not found, raise an error
|
||||||
|
if func is None:
|
||||||
|
raise TypeError(
|
||||||
|
f"Tool '{tool_name}' is not mapped to a function"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the tool
|
||||||
|
return func(**tool.get("parameters", {}))
|
||||||
|
|
||||||
|
def execute_tool_from_text(
|
||||||
|
text: str = None, function_map: Dict[str, Callable] = None
|
||||||
|
) -> Any:
|
||||||
|
"""
|
||||||
|
Convert a JSON-formatted string into a tool dictionary and execute the tool.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
text (str): A JSON-formatted string that represents a tool. The string should be convertible into a dictionary that includes a 'name' key and a 'parameters' key.
|
||||||
|
function_map (Dict[str, Callable]): A dictionary that maps tool names to functions.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The result of executing the tool.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the tool with the specified name is not found.
|
||||||
|
TypeError: If the tool name is not mapped to a function in the function map.
|
||||||
|
"""
|
||||||
|
# Convert the text into a dictionary
|
||||||
|
tool = json.loads(text)
|
||||||
|
|
||||||
|
# Get the tool name and parameters from the dictionary
|
||||||
|
tool_name = tool.get("name")
|
||||||
|
tool_params = tool.get("parameters", {})
|
||||||
|
|
||||||
|
# Get the function associated with the tool
|
||||||
|
func = function_map.get(tool_name)
|
||||||
|
|
||||||
|
# If the function is not found, raise an error
|
||||||
|
if func is None:
|
||||||
|
raise TypeError(
|
||||||
|
f"Tool '{tool_name}' is not mapped to a function"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the tool
|
||||||
|
return func(**tool_params)
|
||||||
|
|
||||||
|
|
||||||
|
# # Example function definitions and mappings
|
||||||
|
# def get_current_weather(location, unit='celsius'):
|
||||||
|
# return f"Weather in {location} is likely sunny and 75° {unit.title()}"
|
||||||
|
|
||||||
|
# def add(a, b):
|
||||||
|
# return a + b
|
||||||
|
|
||||||
|
# # Example tool configurations
|
||||||
|
# tools = [
|
||||||
|
# {
|
||||||
|
# "type": "function",
|
||||||
|
# "function": {
|
||||||
|
# "name": "get_current_weather",
|
||||||
|
# "parameters": {
|
||||||
|
# "properties": {
|
||||||
|
# "location": "San Francisco, CA",
|
||||||
|
# "unit": "fahrenheit",
|
||||||
|
# },
|
||||||
|
# },
|
||||||
|
# },
|
||||||
|
# },
|
||||||
|
# {
|
||||||
|
# "type": "function",
|
||||||
|
# "function": {
|
||||||
|
# "name": "add",
|
||||||
|
# "parameters": {
|
||||||
|
# "properties": {
|
||||||
|
# "a": 1,
|
||||||
|
# "b": 2,
|
||||||
|
# },
|
||||||
|
# },
|
||||||
|
# },
|
||||||
|
# }
|
||||||
|
# ]
|
||||||
|
|
||||||
|
# function_map = {
|
||||||
|
# "get_current_weather": get_current_weather,
|
||||||
|
# "add": add,
|
||||||
|
# }
|
||||||
|
|
||||||
|
# # Creating and executing the advanced executor
|
||||||
|
# tool_executor = BaseTool(verbose=True).execute_tool(tools, function_map)
|
||||||
|
|
||||||
|
# try:
|
||||||
|
# results = tool_executor()
|
||||||
|
# print(results) # Outputs results from both functions
|
||||||
|
# except Exception as e:
|
||||||
|
# print(f"Error: {e}")
|
@ -0,0 +1,58 @@
|
|||||||
|
import pytest
|
||||||
|
from agent_rearrange import AgentRearrange
|
||||||
|
|
||||||
|
|
||||||
|
# Mocking the Agent class
|
||||||
|
class MockAgent:
|
||||||
|
def __init__(self, agent_name):
|
||||||
|
self.agent_name = agent_name
|
||||||
|
|
||||||
|
def run(self, task):
|
||||||
|
return f"Running {task}"
|
||||||
|
|
||||||
|
|
||||||
|
# Test for AgentRearrange class
|
||||||
|
class TestAgentRearrange:
|
||||||
|
@pytest.fixture
|
||||||
|
def agent_rearrange(self):
|
||||||
|
agents = [MockAgent("agent1"), MockAgent("agent2")]
|
||||||
|
return AgentRearrange(agents=agents)
|
||||||
|
|
||||||
|
def test_parse_pattern(self, agent_rearrange):
|
||||||
|
assert agent_rearrange.parse_pattern("agent1->agent2") is True
|
||||||
|
assert agent_rearrange.parse_pattern("agent3->agent4") is False
|
||||||
|
|
||||||
|
def test_self_find_agent_by_name(self, agent_rearrange):
|
||||||
|
assert (
|
||||||
|
agent_rearrange.self_find_agent_by_name("agent1").agent_name
|
||||||
|
== "agent1"
|
||||||
|
)
|
||||||
|
assert agent_rearrange.self_find_agent_by_name("agent3") is None
|
||||||
|
|
||||||
|
def test_agent_exists(self, agent_rearrange):
|
||||||
|
assert agent_rearrange.agent_exists("agent1") is True
|
||||||
|
assert agent_rearrange.agent_exists("agent3") is False
|
||||||
|
|
||||||
|
def test_parse_concurrent_flow(self, agent_rearrange):
|
||||||
|
agent_rearrange.parse_concurrent_flow("agent1->agent2")
|
||||||
|
assert "agent2" in agent_rearrange.flows["agent1"]
|
||||||
|
|
||||||
|
def test_parse_sequential_flow(self, agent_rearrange):
|
||||||
|
agent_rearrange.parse_sequential_flow("agent1", "agent2")
|
||||||
|
assert "agent2" in agent_rearrange.flows["agent1"]
|
||||||
|
|
||||||
|
def test_execute_task(self, agent_rearrange):
|
||||||
|
assert (
|
||||||
|
agent_rearrange.execute_task("agent1", "agent2", "task1", {})
|
||||||
|
== "Running task1 (from agent2)"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_process_flows(self, agent_rearrange):
|
||||||
|
assert agent_rearrange.process_flows(
|
||||||
|
"agent1->agent2", "task1", {}
|
||||||
|
) == ["Running task1"]
|
||||||
|
|
||||||
|
def test_call(self, agent_rearrange):
|
||||||
|
assert agent_rearrange(
|
||||||
|
pattern="agent1->agent2", default_task="task1"
|
||||||
|
) == ["Running task1"]
|
Loading…
Reference in new issue