parent
9cbbdc9a10
commit
5728bff63a
@ -0,0 +1,54 @@
|
|||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from swarms import Anthropic
|
||||||
|
from swarms import Agent
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize the schema for the person's information
|
||||||
|
class Schema(BaseModel):
|
||||||
|
name: str = Field(..., title="Name of the person")
|
||||||
|
agent: int = Field(..., title="Age of the person")
|
||||||
|
is_student: bool = Field(..., title="Whether the person is a student")
|
||||||
|
courses: list[str] = Field(
|
||||||
|
..., title="List of courses the person is taking"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert the schema to a JSON string
|
||||||
|
tool_schema = Schema(
|
||||||
|
name="Tool Name",
|
||||||
|
agent=1,
|
||||||
|
is_student=True,
|
||||||
|
courses=["Course1", "Course2"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define the task to generate a person's information
|
||||||
|
task = "Generate a person's information based on the following schema:"
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Person Information Generator",
|
||||||
|
system_prompt=(
|
||||||
|
"Generate a person's information based on the following schema:"
|
||||||
|
),
|
||||||
|
# Set the tool schema to the JSON string -- this is the key difference
|
||||||
|
tool_schema=tool_schema,
|
||||||
|
llm=Anthropic(),
|
||||||
|
max_loops=3,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
interactive=True,
|
||||||
|
# Set the output type to the tool schema which is a BaseModel
|
||||||
|
output_type=tool_schema, # or dict, or str
|
||||||
|
metadata_output_type="json",
|
||||||
|
# List of schemas that the agent can handle
|
||||||
|
list_tool_schemas = [tool_schema],
|
||||||
|
function_calling_format_type = "OpenAI",
|
||||||
|
function_calling_type = "json" # or soon yaml
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the agent to generate the person's information
|
||||||
|
generated_data = agent.run(task)
|
||||||
|
|
||||||
|
# Print the generated data
|
||||||
|
print(f"Generated data: {generated_data}")
|
@ -0,0 +1,98 @@
|
|||||||
|
swarms
|
||||||
|
|
||||||
|
pip install swarms
|
||||||
|
swarms is the most pythonic way of writing cognitive systems. Leveraging pydantic models as output schemas combined with langchain in the backend allows for a seamless integration of llms into your apps. It utilizes OpenAI Functions or LlamaCpp grammars (json-schema-mode) for efficient structured output. In the backend it compiles the swarms syntax into langchain runnables so you can easily invoke, stream or batch process your pipelines.
|
||||||
|
|
||||||
|
Open in GitHub Codespaces
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from swarms import Anthropic
|
||||||
|
from swarms import Agent
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize the schema for the person's information
|
||||||
|
class Schema(BaseModel):
|
||||||
|
name: str = Field(..., title="Name of the person")
|
||||||
|
agent: int = Field(..., title="Age of the person")
|
||||||
|
is_student: bool = Field(..., title="Whether the person is a student")
|
||||||
|
courses: list[str] = Field(
|
||||||
|
..., title="List of courses the person is taking"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert the schema to a JSON string
|
||||||
|
tool_schema = Schema(
|
||||||
|
name="Tool Name",
|
||||||
|
agent=1,
|
||||||
|
is_student=True,
|
||||||
|
courses=["Course1", "Course2"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define the task to generate a person's information
|
||||||
|
task = "Generate a person's information based on the following schema:"
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Person Information Generator",
|
||||||
|
system_prompt=(
|
||||||
|
"Generate a person's information based on the following schema:"
|
||||||
|
),
|
||||||
|
# Set the tool schema to the JSON string -- this is the key difference
|
||||||
|
tool_schema=tool_schema,
|
||||||
|
llm=Anthropic(),
|
||||||
|
max_loops=3,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
interactive=True,
|
||||||
|
# Set the output type to the tool schema which is a BaseModel
|
||||||
|
output_type=tool_schema, # or dict, or str
|
||||||
|
metadata_output_type="json",
|
||||||
|
# List of schemas that the agent can handle
|
||||||
|
list_tool_schemas = [tool_schema],
|
||||||
|
function_calling_format_type = "OpenAI",
|
||||||
|
function_calling_type = "json" # or soon yaml
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the agent to generate the person's information
|
||||||
|
generated_data = agent.run(task)
|
||||||
|
|
||||||
|
# Print the generated data
|
||||||
|
print(f"Generated data: {generated_data}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Features
|
||||||
|
🐍 pythonic
|
||||||
|
🔀 easy swap between openai or local models
|
||||||
|
🔄 dynamic output types (pydantic models, or primitives)
|
||||||
|
👁️ vision llm support
|
||||||
|
🧠 langchain_core as backend
|
||||||
|
📝 jinja templating for prompts
|
||||||
|
🏗️ reliable structured output
|
||||||
|
🔁 auto retry parsing
|
||||||
|
🔧 langsmith support
|
||||||
|
🔄 sync, async, streaming, parallel, fallbacks
|
||||||
|
📦 gguf download from huggingface
|
||||||
|
✅ type hints for all functions and mypy support
|
||||||
|
🗣️ chat router component
|
||||||
|
🧩 composable with langchain LCEL
|
||||||
|
🛠️ easy error handling
|
||||||
|
🚦 enums and literal support
|
||||||
|
📐 custom parsing types
|
||||||
|
Documentation
|
||||||
|
Checkout the docs here 👈
|
||||||
|
|
||||||
|
Also highly recommend to try and run the examples in the ./examples folder.
|
||||||
|
|
||||||
|
Contribution
|
||||||
|
You want to contribute? Thanks, that's great! For more information checkout the Contributing Guide. Please run the dev setup to get started:
|
||||||
|
|
||||||
|
git clone https://github.com/kyegomez/swarms.git && cd swarms
|
||||||
|
|
||||||
|
./dev_setup.sh
|
||||||
|
About
|
||||||
|
⛓️ build cognitive systems, pythonic
|
@ -0,0 +1,7 @@
|
|||||||
|
from swarms import AutoSwarm
|
||||||
|
|
||||||
|
# Initialize the swarm
|
||||||
|
swarm = AutoSwarm("kyegomez/myswarm")
|
||||||
|
|
||||||
|
# Run the swarm
|
||||||
|
swarm.run("Generate a 10,000 word blog on health and wellness.")
|
@ -1,329 +0,0 @@
|
|||||||
import logging
|
|
||||||
import queue
|
|
||||||
import threading
|
|
||||||
from time import sleep
|
|
||||||
from typing import Callable, Dict, List, Optional
|
|
||||||
|
|
||||||
from termcolor import colored
|
|
||||||
|
|
||||||
from swarms.structs.agent import Agent
|
|
||||||
from swarms.structs.base_structure import BaseStructure
|
|
||||||
from swarms.utils.decorators import (
|
|
||||||
error_decorator,
|
|
||||||
log_decorator,
|
|
||||||
timing_decorator,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class AutoScaler(BaseStructure):
|
|
||||||
"""
|
|
||||||
AutoScaler class
|
|
||||||
|
|
||||||
The AutoScaler class is responsible for managing the agents pool
|
|
||||||
and the task queue. It also monitors the health of the agents and
|
|
||||||
scales the pool up or down based on the number of pending tasks
|
|
||||||
and the current load of the agents.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
initial_agents (Optional[int], optional): Initial number of
|
|
||||||
agents to start with. Defaults to 10.
|
|
||||||
scale_up_factor (int, optional): Factor by which to scale up
|
|
||||||
the agents pool. Defaults to 1.
|
|
||||||
idle_threshold (float, optional): Threshold for scaling down
|
|
||||||
the agents pool. Defaults to 0.2.
|
|
||||||
busy_threshold (float, optional): Threshold for scaling up
|
|
||||||
the agents pool. Defaults to 0.7.
|
|
||||||
agents (List[Agent], optional): List of agents to use in the
|
|
||||||
pool. Defaults to None.
|
|
||||||
autoscale (Optional[bool], optional): Whether to autoscale
|
|
||||||
the agents pool. Defaults to True.
|
|
||||||
min_agents (Optional[int], optional): Minimum number of
|
|
||||||
agents to keep in the pool. Defaults to 10.
|
|
||||||
max_agents (Optional[int], optional): Maximum number of
|
|
||||||
agents to keep in the pool. Defaults to 100.
|
|
||||||
custom_scale_strategy (Optional[Callable], optional): Custom
|
|
||||||
scaling strategy to use. Defaults to None.
|
|
||||||
|
|
||||||
Methods:
|
|
||||||
add_task: Add tasks to queue
|
|
||||||
scale_up: Add more agents
|
|
||||||
scale_down: scale down
|
|
||||||
run: Run agent the task on the agent id
|
|
||||||
monitor_and_scale: Monitor and scale
|
|
||||||
start: Start scaling
|
|
||||||
check_agent_health: Checks the health of each agent and
|
|
||||||
replaces unhealthy agents.
|
|
||||||
balance_load: Distributes tasks among agents based on their
|
|
||||||
current load.
|
|
||||||
set_scaling_strategy: Set a custom scaling strategy.
|
|
||||||
execute_scaling_strategy: Execute the custom scaling strategy
|
|
||||||
if defined.
|
|
||||||
report_agent_metrics: Collects and reports metrics from each
|
|
||||||
agent.
|
|
||||||
report: Reports the current state of the autoscaler.
|
|
||||||
print_dashboard: Prints a dashboard of the current state of
|
|
||||||
the autoscaler.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> import os
|
|
||||||
>>> from dotenv import load_dotenv
|
|
||||||
>>> # Import the OpenAIChat model and the Agent struct
|
|
||||||
>>> from swarms.models import OpenAIChat
|
|
||||||
>>> from swarms.structs import Agent
|
|
||||||
>>> from swarms.structs.autoscaler import AutoScaler
|
|
||||||
>>> # Load the environment variables
|
|
||||||
>>> load_dotenv()
|
|
||||||
>>> # Get the API key from the environment
|
|
||||||
>>> api_key = os.environ.get("OPENAI_API_KEY")
|
|
||||||
>>> # Initialize the language model
|
|
||||||
>>> llm = OpenAIChat(
|
|
||||||
... temperature=0.5,
|
|
||||||
... openai_api_key=api_key,
|
|
||||||
... )
|
|
||||||
>>> ## Initialize the workflow
|
|
||||||
>>> agent = Agent(llm=llm, max_loops=1, dashboard=True)
|
|
||||||
>>> # Load the autoscaler
|
|
||||||
>>> autoscaler = AutoScaler(
|
|
||||||
... initial_agents=2,
|
|
||||||
... scale_up_factor=1,
|
|
||||||
... idle_threshold=0.2,
|
|
||||||
... busy_threshold=0.7,
|
|
||||||
... agents=[agent],
|
|
||||||
... autoscale=True,
|
|
||||||
... min_agents=1,
|
|
||||||
... max_agents=5,
|
|
||||||
... custom_scale_strategy=None,
|
|
||||||
... )
|
|
||||||
>>> print(autoscaler)
|
|
||||||
>>> # Run the workflow on a task
|
|
||||||
>>> out = autoscaler.run(agent.id, "Generate a 10,000 word blog on health and wellness.")
|
|
||||||
>>> print(out)
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
@log_decorator
|
|
||||||
@error_decorator
|
|
||||||
@timing_decorator
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
initial_agents: Optional[int] = 10,
|
|
||||||
scale_up_factor: int = 1,
|
|
||||||
idle_threshold: float = 0.2,
|
|
||||||
busy_threshold: float = 0.7,
|
|
||||||
agents: List[Agent] = None,
|
|
||||||
autoscale: Optional[bool] = True,
|
|
||||||
min_agents: Optional[int] = 10,
|
|
||||||
max_agents: Optional[int] = 100,
|
|
||||||
custom_scale_strategy: Optional[Callable] = None,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
self.agents_pool = agents or [
|
|
||||||
agents[0]() for _ in range(initial_agents)
|
|
||||||
]
|
|
||||||
self.task_queue = queue.Queue()
|
|
||||||
self.scale_up_factor = scale_up_factor
|
|
||||||
self.idle_threshold = idle_threshold
|
|
||||||
self.busy_threshold = busy_threshold
|
|
||||||
self.lock = threading.Lock()
|
|
||||||
self.agents = agents
|
|
||||||
self.autoscale = autoscale
|
|
||||||
self.min_agents = min_agents
|
|
||||||
self.max_agents = max_agents
|
|
||||||
self.custom_scale_strategy = custom_scale_strategy
|
|
||||||
|
|
||||||
def add_task(self, task):
|
|
||||||
"""Add tasks to queue"""
|
|
||||||
try:
|
|
||||||
self.task_queue.put(task)
|
|
||||||
except Exception as error:
|
|
||||||
print(
|
|
||||||
f"Error adding task to queue: {error} try again with"
|
|
||||||
" a new task"
|
|
||||||
)
|
|
||||||
|
|
||||||
@log_decorator
|
|
||||||
@error_decorator
|
|
||||||
@timing_decorator
|
|
||||||
def scale_up(self):
|
|
||||||
"""Add more agents"""
|
|
||||||
try:
|
|
||||||
with self.lock:
|
|
||||||
new_agents_counts = (
|
|
||||||
len(self.agents_pool) * self.scale_up_factor
|
|
||||||
)
|
|
||||||
for _ in range(new_agents_counts):
|
|
||||||
self.agents_pool.append(self.agents[0]())
|
|
||||||
except Exception as error:
|
|
||||||
print(f"Error scaling up: {error} try again with a new task")
|
|
||||||
|
|
||||||
def scale_down(self):
|
|
||||||
"""scale down"""
|
|
||||||
try:
|
|
||||||
with self.lock:
|
|
||||||
if (
|
|
||||||
len(self.agents_pool) > 10
|
|
||||||
): # ensure minmum of 10 agents
|
|
||||||
del self.agents_pool[-1] # remove last agent
|
|
||||||
except Exception as error:
|
|
||||||
print(
|
|
||||||
f"Error scaling down: {error} try again with a new" " task"
|
|
||||||
)
|
|
||||||
|
|
||||||
def run(self, agent_id, task: Optional[str] = None, *args, **kwargs):
|
|
||||||
"""Run agent the task on the agent id
|
|
||||||
|
|
||||||
Args:
|
|
||||||
agent_id (_type_): _description_
|
|
||||||
task (str, optional): _description_. Defaults to None.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: _description_
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
_type_: _description_
|
|
||||||
"""
|
|
||||||
for agent in self.agents_pool:
|
|
||||||
if agent.id == agent_id:
|
|
||||||
return agent.run(task, *args, **kwargs)
|
|
||||||
raise ValueError(f"No agent found with ID {agent_id}")
|
|
||||||
|
|
||||||
@log_decorator
|
|
||||||
@error_decorator
|
|
||||||
@timing_decorator
|
|
||||||
def monitor_and_scale(self):
|
|
||||||
"""Monitor and scale"""
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
sleep(60) # check minute
|
|
||||||
pending_tasks = self.task_queue.qsize()
|
|
||||||
active_agents = sum(
|
|
||||||
[1 for agent in self.agents_pool if agent.is_busy()]
|
|
||||||
)
|
|
||||||
|
|
||||||
if (
|
|
||||||
pending_tasks / len(self.agents_pool)
|
|
||||||
> self.busy_threshold
|
|
||||||
):
|
|
||||||
self.scale_up()
|
|
||||||
elif (
|
|
||||||
active_agents / len(self.agents_pool)
|
|
||||||
< self.idle_threshold
|
|
||||||
):
|
|
||||||
self.scale_down()
|
|
||||||
except Exception as error:
|
|
||||||
print(
|
|
||||||
f"Error monitoring and scaling: {error} try again"
|
|
||||||
" with a new task"
|
|
||||||
)
|
|
||||||
|
|
||||||
@log_decorator
|
|
||||||
@error_decorator
|
|
||||||
@timing_decorator
|
|
||||||
def start(self):
|
|
||||||
"""Start scaling"""
|
|
||||||
try:
|
|
||||||
monitor_thread = threading.Thread(
|
|
||||||
target=self.monitor_and_scale
|
|
||||||
)
|
|
||||||
monitor_thread.start()
|
|
||||||
|
|
||||||
while True:
|
|
||||||
task = self.task_queue.get()
|
|
||||||
if task:
|
|
||||||
available_agent = next(
|
|
||||||
agent for agent in self.agents_pool
|
|
||||||
)
|
|
||||||
if available_agent:
|
|
||||||
available_agent.run(task)
|
|
||||||
except Exception as error:
|
|
||||||
print(f"Error starting: {error} try again with a new task")
|
|
||||||
|
|
||||||
def check_agent_health(self):
|
|
||||||
"""Checks the health of each agent and replaces unhealthy agents."""
|
|
||||||
for i, agent in enumerate(self.agents_pool):
|
|
||||||
if not agent.is_healthy():
|
|
||||||
logging.warning(f"Replacing unhealthy agent at index {i}")
|
|
||||||
self.agents_pool[i] = self.agent()
|
|
||||||
|
|
||||||
def balance_load(self):
|
|
||||||
"""Distributes tasks among agents based on their current load."""
|
|
||||||
try:
|
|
||||||
while not self.task_queue.empty():
|
|
||||||
for agent in self.agents_pool:
|
|
||||||
if agent.can_accept_task():
|
|
||||||
task = self.task_queue.get()
|
|
||||||
agent.run(task)
|
|
||||||
except Exception as error:
|
|
||||||
print(
|
|
||||||
f"Error balancing load: {error} try again with a new"
|
|
||||||
" task"
|
|
||||||
)
|
|
||||||
|
|
||||||
def set_scaling_strategy(self, strategy: Callable[[int, int], int]):
|
|
||||||
"""Set a custom scaling strategy."""
|
|
||||||
self.custom_scale_strategy = strategy
|
|
||||||
|
|
||||||
def execute_scaling_strategy(self):
|
|
||||||
"""Execute the custom scaling strategy if defined."""
|
|
||||||
try:
|
|
||||||
if hasattr(self, "custom_scale_strategy"):
|
|
||||||
scale_amount = self.custom_scale_strategy(
|
|
||||||
self.task_queue.qsize(), len(self.agents_pool)
|
|
||||||
)
|
|
||||||
if scale_amount > 0:
|
|
||||||
for _ in range(scale_amount):
|
|
||||||
self.agents_pool.append(self.agent())
|
|
||||||
elif scale_amount < 0:
|
|
||||||
for _ in range(abs(scale_amount)):
|
|
||||||
if len(self.agents_pool) > 10:
|
|
||||||
del self.agents_pool[-1]
|
|
||||||
except Exception as error:
|
|
||||||
print(
|
|
||||||
f"Error executing scaling strategy: {error} try again"
|
|
||||||
" with a new task"
|
|
||||||
)
|
|
||||||
|
|
||||||
def report_agent_metrics(self) -> Dict[str, List[float]]:
|
|
||||||
"""Collects and reports metrics from each agent."""
|
|
||||||
metrics = {
|
|
||||||
"completion_time": [],
|
|
||||||
"success_rate": [],
|
|
||||||
"error_rate": [],
|
|
||||||
}
|
|
||||||
for agent in self.agents_pool:
|
|
||||||
agent_metrics = agent.get_metrics()
|
|
||||||
for key in metrics.keys():
|
|
||||||
metrics[key].append(agent_metrics.get(key, 0.0))
|
|
||||||
return metrics
|
|
||||||
|
|
||||||
def report(self):
|
|
||||||
"""Reports the current state of the autoscaler."""
|
|
||||||
self.check_agent_health()
|
|
||||||
self.balance_load()
|
|
||||||
self.execute_scaling_strategy()
|
|
||||||
metrics = self.report_agent_metrics()
|
|
||||||
print(metrics)
|
|
||||||
|
|
||||||
def print_dashboard(self):
|
|
||||||
"""Prints a dashboard of the current state of the autoscaler."""
|
|
||||||
print(
|
|
||||||
colored(
|
|
||||||
f"""
|
|
||||||
|
|
||||||
Autoscaler Dashboard
|
|
||||||
--------------------
|
|
||||||
Agents: {len(self.agents_pool)}
|
|
||||||
Initial Agents: {self.initial_agents}
|
|
||||||
self.scale_up_factor: {self.scale_up_factor}
|
|
||||||
self.idle_threshold: {self.idle_threshold}
|
|
||||||
self.busy_threshold: {self.busy_threshold}
|
|
||||||
self.task_queue.qsize(): {self.task_queue.qsize()}
|
|
||||||
self.task_queue.empty(): {self.task_queue.empty()}
|
|
||||||
self.task_queue.full(): {self.task_queue.full()}
|
|
||||||
self.task_queue.maxsize: {self.task_queue.maxsize}
|
|
||||||
|
|
||||||
""",
|
|
||||||
"cyan",
|
|
||||||
)
|
|
||||||
)
|
|
@ -1,25 +0,0 @@
|
|||||||
from dataclasses import dataclass
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class BlockDevice:
|
|
||||||
"""
|
|
||||||
Represents a block device.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
device (str): The device name.
|
|
||||||
cluster (str): The cluster name.
|
|
||||||
description (str): A description of the block device.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, device: str, cluster: str, description: str):
|
|
||||||
self.device = device
|
|
||||||
self.cluster = cluster
|
|
||||||
self.description = description
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return (
|
|
||||||
f"BlockDevice(device={self.device},"
|
|
||||||
f" cluster={self.cluster},"
|
|
||||||
f" description={self.description})"
|
|
||||||
)
|
|
@ -1,70 +0,0 @@
|
|||||||
import multiprocessing as mp
|
|
||||||
from typing import List, Optional
|
|
||||||
|
|
||||||
from swarms.structs.base_structure import BaseStructure
|
|
||||||
|
|
||||||
|
|
||||||
class LoadBalancer(BaseStructure):
|
|
||||||
"""
|
|
||||||
A load balancer class that distributes tasks among multiple workers.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
num_workers (int): The number of worker processes to create.
|
|
||||||
agents (Optional[List]): A list of agents to assign to the load balancer.
|
|
||||||
*args: Variable length argument list.
|
|
||||||
**kwargs: Arbitrary keyword arguments.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
num_workers (int): The number of worker processes.
|
|
||||||
agents (Optional[List]): A list of agents assigned to the load balancer.
|
|
||||||
tasks (List): A list of tasks to be executed.
|
|
||||||
results (List): A list of results from the executed tasks.
|
|
||||||
workers (List): A list of worker processes.
|
|
||||||
|
|
||||||
Methods:
|
|
||||||
add_task: Add a task to the load balancer.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
num_workers: int = 1,
|
|
||||||
agents: Optional[List] = None,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.num_workers = num_workers
|
|
||||||
self.agents = agents
|
|
||||||
self.tasks = []
|
|
||||||
self.results = []
|
|
||||||
self.workers = []
|
|
||||||
self._init_workers()
|
|
||||||
|
|
||||||
def _init_workers(self):
|
|
||||||
for i in range(self.num_workers):
|
|
||||||
worker = mp.Process(target=self._worker)
|
|
||||||
worker.start()
|
|
||||||
self.workers.append(worker)
|
|
||||||
|
|
||||||
def _worker(self):
|
|
||||||
while True:
|
|
||||||
task = self._get_task()
|
|
||||||
if task is None:
|
|
||||||
break
|
|
||||||
result = self._run_task(task)
|
|
||||||
self._add_result(result)
|
|
||||||
|
|
||||||
def _get_task(self):
|
|
||||||
if len(self.tasks) == 0:
|
|
||||||
return None
|
|
||||||
return self.tasks.pop(0)
|
|
||||||
|
|
||||||
def _run_task(self, task):
|
|
||||||
return task()
|
|
||||||
|
|
||||||
def _add_result(self, result):
|
|
||||||
self.results.append(result)
|
|
||||||
|
|
||||||
def add_task(self, task):
|
|
||||||
self.tasks.append(task)
|
|
@ -1,176 +0,0 @@
|
|||||||
from dataclasses import asdict
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
import networkx as nx
|
|
||||||
import redis
|
|
||||||
from redis.commands.graph import Graph, Node
|
|
||||||
|
|
||||||
from swarms.structs.agent import Agent
|
|
||||||
from swarms.structs.base_swarm import BaseSwarm
|
|
||||||
|
|
||||||
|
|
||||||
class SwarmRelationship:
|
|
||||||
JOINED = "joined"
|
|
||||||
|
|
||||||
|
|
||||||
class RedisSwarmRegistry(BaseSwarm):
|
|
||||||
"""
|
|
||||||
Initialize the SwarmRedisRegistry object.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
host (str): The hostname or IP address of the Redis server. Default is "localhost".
|
|
||||||
port (int): The port number of the Redis server. Default is 6379.
|
|
||||||
db: The Redis database number. Default is 0.
|
|
||||||
graph_name (str): The name of the RedisGraph graph. Default is "swarm_registry".
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
host: str = "localhost",
|
|
||||||
port: int = 6379,
|
|
||||||
db=0,
|
|
||||||
graph_name: str = "swarm_registry",
|
|
||||||
):
|
|
||||||
self.redis = redis.StrictRedis(
|
|
||||||
host=host, port=port, db=db, decode_responses=True
|
|
||||||
)
|
|
||||||
self.redis_graph = Graph(self.redis, graph_name)
|
|
||||||
self.graph = nx.DiGraph()
|
|
||||||
|
|
||||||
def _entity_to_node(self, entity: Agent | Agent) -> Node:
|
|
||||||
"""
|
|
||||||
Converts an Agent or Swarm object to a Node object.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
entity (Agent | Agent): The Agent or Swarm object to convert.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Node: The converted Node object.
|
|
||||||
"""
|
|
||||||
return Node(
|
|
||||||
node_id=entity.id,
|
|
||||||
alias=entity.agent_name,
|
|
||||||
label=entity.agent_description,
|
|
||||||
properties=asdict(entity),
|
|
||||||
)
|
|
||||||
|
|
||||||
def _add_node(self, node: Agent | Agent):
|
|
||||||
"""
|
|
||||||
Adds a node to the graph.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
node (Agent | Agent): The Agent or Swarm node to add.
|
|
||||||
"""
|
|
||||||
self.graph.add_node(node.id)
|
|
||||||
if isinstance(node, Agent):
|
|
||||||
self.add_swarm_entry(node)
|
|
||||||
elif isinstance(node, Agent):
|
|
||||||
self.add_agent_entry(node)
|
|
||||||
|
|
||||||
def _add_edge(self, from_node: Node, to_node: Node, relationship):
|
|
||||||
"""
|
|
||||||
Adds an edge between two nodes in the graph.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
from_node (Node): The source node of the edge.
|
|
||||||
to_node (Node): The target node of the edge.
|
|
||||||
relationship: The relationship type between the nodes.
|
|
||||||
"""
|
|
||||||
match_query = (
|
|
||||||
f"MATCH (a:{from_node.label}),(b:{to_node.label}) WHERE"
|
|
||||||
f" a.id = {from_node.id} AND b.id = {to_node.id}"
|
|
||||||
)
|
|
||||||
|
|
||||||
query = f"""
|
|
||||||
{match_query}
|
|
||||||
CREATE (a)-[r:joined]->(b) RETURN r
|
|
||||||
""".replace(
|
|
||||||
"\n", ""
|
|
||||||
)
|
|
||||||
|
|
||||||
self.redis_graph.query(query)
|
|
||||||
|
|
||||||
def add_swarm_entry(self, swarm: Agent):
|
|
||||||
"""
|
|
||||||
Adds a swarm entry to the graph.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
swarm (Agent): The swarm object to add.
|
|
||||||
"""
|
|
||||||
node = self._entity_to_node(swarm)
|
|
||||||
self._persist_node(node)
|
|
||||||
|
|
||||||
def add_agent_entry(self, agent: Agent):
|
|
||||||
"""
|
|
||||||
Adds an agent entry to the graph.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
agent (Agent): The agent object to add.
|
|
||||||
"""
|
|
||||||
node = self._entity_to_node(agent)
|
|
||||||
self._persist_node(node)
|
|
||||||
|
|
||||||
def join_swarm(
|
|
||||||
self,
|
|
||||||
from_entity: Agent | Agent,
|
|
||||||
to_entity: Agent,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Adds an edge between two nodes in the graph.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
from_entity (Agent | Agent): The source entity of the edge.
|
|
||||||
to_entity (Agent): The target entity of the edge.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Any: The result of adding the edge.
|
|
||||||
"""
|
|
||||||
from_node = self._entity_to_node(from_entity)
|
|
||||||
to_node = self._entity_to_node(to_entity)
|
|
||||||
|
|
||||||
return self._add_edge(from_node, to_node, SwarmRelationship.JOINED)
|
|
||||||
|
|
||||||
def _persist_node(self, node: Node):
|
|
||||||
"""
|
|
||||||
Persists a node in the graph.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
node (Node): The node to persist.
|
|
||||||
"""
|
|
||||||
query = f"CREATE {node}"
|
|
||||||
self.redis_graph.query(query)
|
|
||||||
|
|
||||||
def retrieve_swarm_information(self, swarm_id: int) -> Agent:
|
|
||||||
"""
|
|
||||||
Retrieves swarm information from the registry.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
swarm_id (int): The ID of the swarm to retrieve.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Agent: The retrieved swarm information as an Agent object.
|
|
||||||
"""
|
|
||||||
swarm_key = f"swarm:{swarm_id}"
|
|
||||||
swarm_data = self.redis.hgetall(swarm_key)
|
|
||||||
if swarm_data:
|
|
||||||
# Parse the swarm_data and return an instance of AgentBase
|
|
||||||
# You can use the retrieved data to populate the AgentBase attributes
|
|
||||||
|
|
||||||
return Agent(**swarm_data)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def retrieve_joined_agents(self) -> List[Agent]:
|
|
||||||
"""
|
|
||||||
Retrieves a list of joined agents from the registry.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[Agent]: The retrieved joined agents as a list of Agent objects.
|
|
||||||
"""
|
|
||||||
agent_data = self.redis_graph.query(
|
|
||||||
"MATCH (a:agent)-[:joined]->(b:manager) RETURN a"
|
|
||||||
)
|
|
||||||
if agent_data:
|
|
||||||
# Parse the agent_data and return an instance of AgentBase
|
|
||||||
# You can use the retrieved data to populate the AgentBase attributes
|
|
||||||
return [Agent(**agent_data) for agent_data in agent_data]
|
|
||||||
return None
|
|
@ -0,0 +1,134 @@
|
|||||||
|
from typing import Any, Optional, List
|
||||||
|
|
||||||
|
from docstring_parser import parse
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
|
def _remove_a_key(d: dict, remove_key: str) -> None:
|
||||||
|
"""Remove a key from a dictionary recursively"""
|
||||||
|
if isinstance(d, dict):
|
||||||
|
for key in list(d.keys()):
|
||||||
|
if key == remove_key and "type" in d.keys():
|
||||||
|
del d[key]
|
||||||
|
else:
|
||||||
|
_remove_a_key(d[key], remove_key)
|
||||||
|
|
||||||
|
|
||||||
|
def pydantic_to_functions(
|
||||||
|
pydantic_type: type[BaseModel],
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Convert a Pydantic model to a dictionary representation of functions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pydantic_type (type[BaseModel]): The Pydantic model type to convert.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict[str, Any]: A dictionary representation of the functions.
|
||||||
|
|
||||||
|
"""
|
||||||
|
schema = pydantic_type.model_json_schema()
|
||||||
|
|
||||||
|
docstring = parse(pydantic_type.__doc__ or "")
|
||||||
|
parameters = {
|
||||||
|
k: v
|
||||||
|
for k, v in schema.items()
|
||||||
|
if k not in ("title", "description")
|
||||||
|
}
|
||||||
|
|
||||||
|
for param in docstring.params:
|
||||||
|
if (name := param.arg_name) in parameters["properties"] and (
|
||||||
|
description := param.description
|
||||||
|
):
|
||||||
|
if "description" not in parameters["properties"][name]:
|
||||||
|
parameters["properties"][name]["description"] = description
|
||||||
|
|
||||||
|
parameters["type"] = "object"
|
||||||
|
|
||||||
|
if "description" not in schema:
|
||||||
|
if docstring.short_description:
|
||||||
|
schema["description"] = docstring.short_description
|
||||||
|
else:
|
||||||
|
schema["description"] = (
|
||||||
|
f"Correctly extracted `{pydantic_type.__class__.__name__.lower()}` with all "
|
||||||
|
f"the required parameters with correct types"
|
||||||
|
)
|
||||||
|
|
||||||
|
_remove_a_key(parameters, "title")
|
||||||
|
_remove_a_key(parameters, "additionalProperties")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"function_call": {
|
||||||
|
"name": pydantic_type.__class__.__name__.lower(),
|
||||||
|
},
|
||||||
|
"functions": [
|
||||||
|
{
|
||||||
|
"name": pydantic_type.__class__.__name__.lower(),
|
||||||
|
"description": schema["description"],
|
||||||
|
"parameters": parameters,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def multi_pydantic_to_functions(
|
||||||
|
pydantic_types: List[BaseModel] = None
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Converts multiple Pydantic types to a dictionary of functions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pydantic_types (List[BaseModel]]): A list of Pydantic types to convert.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict[str, Any]: A dictionary containing the converted functions.
|
||||||
|
|
||||||
|
"""
|
||||||
|
functions: list[dict[str, Any]] = [
|
||||||
|
pydantic_to_functions(pydantic_type)["functions"][0]
|
||||||
|
for pydantic_type in pydantic_types
|
||||||
|
]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"function_call": "auto",
|
||||||
|
"functions": functions,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def function_to_str(function: dict[str, Any]) -> str:
|
||||||
|
"""
|
||||||
|
Convert a function dictionary to a string representation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
function (dict[str, Any]): The function dictionary to convert.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The string representation of the function.
|
||||||
|
|
||||||
|
"""
|
||||||
|
function_str = f"Function: {function['name']}\n"
|
||||||
|
function_str += f"Description: {function['description']}\n"
|
||||||
|
function_str += "Parameters:\n"
|
||||||
|
|
||||||
|
for param, details in function["parameters"]["properties"].items():
|
||||||
|
function_str += f" {param} ({details['type']}): {details.get('description', '')}\n"
|
||||||
|
|
||||||
|
return function_str
|
||||||
|
|
||||||
|
|
||||||
|
def functions_to_str(functions: list[dict[str, Any]]) -> str:
|
||||||
|
"""
|
||||||
|
Convert a list of function dictionaries to a string representation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
functions (list[dict[str, Any]]): The list of function dictionaries to convert.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The string representation of the functions.
|
||||||
|
|
||||||
|
"""
|
||||||
|
functions_str = ""
|
||||||
|
for function in functions:
|
||||||
|
functions_str += function_to_str(function) + "\n"
|
||||||
|
|
||||||
|
return functions_str
|
Loading…
Reference in new issue