Merge remote-tracking branch 'upstream/master'

pull/700/head
Patrick Devaney 4 months ago
commit 69d95d2d51

@ -0,0 +1,297 @@
.git
.gitignore
.env
__pycache__
*.pyc
*.pyo
*.pyd
.Python
env/
venv/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.log
.pytest_cache/
.mypy_cache/
__pycache__/
.venv/
.env
image/
audio/
video/
artifacts_three
dataframe/
.ruff_cache
.pytest_cache
static/generated
runs
Financial-Analysis-Agent_state.json
experimental
artifacts_five
encryption
errors
chroma
agent_workspace
.pt
Accounting Assistant_state.json
Unit Testing Agent_state.json
sec_agent
Devin_state.json
poetry.lock
hire_researchers
agent_workspace
json_logs
Medical Image Diagnostic Agent_state.json
flight agent_state.json
D_state.json
artifacts_six
artifacts_seven
swarms/__pycache__
artifacts_once
transcript_generator.json
venv
.DS_Store
Cargo.lock
.DS_STORE
artifacts_logs
Cargo.lock
Medical Treatment Recommendation Agent_state.json
swarms/agents/.DS_Store
artifacts_two
logs
T_state.json
_build
conversation.txt
t1_state.json
stderr_log.txt
t2_state.json
.vscode
.DS_STORE
# Byte-compiled / optimized / DLL files
Transcript Generator_state.json
__pycache__/
*.py[cod]
*$py.class
.grit
swarm-worker-01_state.json
error.txt
Devin Worker 2_state.json
# C extensions
*.so
.ruff_cache
errors.txt
Autonomous-Agent-XYZ1B_state.json
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
.DS_Store
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.vscode/settings.json
# -*- mode: gitignore; -*-
*~
\#*\#
/.emacs.desktop
/.emacs.desktop.lock
*.elc
auto-save-list
tramp
.\#*
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*
# eshell files
/eshell/history
/eshell/lastdir
# elpa packages
/elpa/
# reftex files
*.rel
# AUCTeX auto folder
/auto/
# cask packages
.cask/
dist/
# Flycheck
flycheck_*.el
# server auth directory
/server/
# projectiles files
.projectile
# directory configuration
.dir-locals.el
# network security
/network-security.data

@ -20,4 +20,4 @@ AZURE_OPENAI_ENDPOINT="your_azure_openai_endpoint"
AZURE_OPENAI_DEPLOYMENT="your_azure_openai_deployment"
OPENAI_API_VERSION="your_openai_api_version"
AZURE_OPENAI_API_KEY="your_azure_openai_api_key"
AZURE_OPENAI_AD_TOKEN="your_azure_openai_ad_token"
AZURE_OPENAI_AD_TOKEN="your_azure_openai_ad_token"

3
.gitignore vendored

@ -400,4 +400,5 @@ flycheck_*.el
.dir-locals.el
# network security
/network-security.data
/network-security.data

@ -1,3 +1,4 @@
<<<<<<< HEAD
# Use an official Python runtime as a parent image
FROM python:3.11-slim-bullseye
@ -37,3 +38,60 @@ WORKDIR /usr/src/app/tests
# Default command to run tests located in the /tests directory
CMD pytest /usr/src/app/tests --continue-on-collection-errors --tb=short --disable-warnings | tee /usr/src/app/logs/test_logs.txt
=======
# Use Python 3.11 slim-bullseye for smaller base image
FROM python:3.11-slim-bullseye AS builder
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PIP_NO_CACHE_DIR=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1
# Set the working directory
WORKDIR /build
# Install only essential build dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
gcc \
g++ \
gfortran \
&& rm -rf /var/lib/apt/lists/*
# Install swarms packages
RUN pip install --no-cache-dir swarm-models swarms
# Production stage
FROM python:3.11-slim-bullseye
# Set secure environment variables
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
WORKSPACE_DIR="agent_workspace" \
PATH="/app:${PATH}" \
PYTHONPATH="/app:${PYTHONPATH}" \
USER=swarms
# Create non-root user
RUN useradd -m -s /bin/bash -U $USER && \
mkdir -p /app && \
chown -R $USER:$USER /app
# Set working directory
WORKDIR /app
# Copy only necessary files from builder
COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages
COPY --from=builder /usr/local/bin /usr/local/bin
# Copy application with correct permissions
COPY --chown=$USER:$USER . .
# Switch to non-root user
USER $USER
# Health check
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
CMD python -c "import swarms; print('Health check passed')" || exit 1
>>>>>>> upstream/master

@ -295,7 +295,7 @@ print(agent.model_dump_json())
print(agent.model_dump_yaml())
# Ingest documents into the agent's knowledge base
agent.ingest_docs("your_pdf_path.pdf")
("your_pdf_path.pdf")
# Receive a message from a user and process it
agent.receive_message(name="agent_name", message="message")
@ -1840,6 +1840,134 @@ result = mixture_router.run("Evaluate the potential acquisition of TechStartup I
```
-------
## GroupChat
A production-grade multi-agent system enabling sophisticated group conversations between AI agents with customizable speaking patterns, parallel processing capabilities, and comprehensive conversation tracking.
```python
import os
from dotenv import load_dotenv
from swarm_models import OpenAIChat
from swarms import Agent, GroupChat, expertise_based
if __name__ == "__main__":
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key,
model_name="gpt-4o-mini",
temperature=0.1,
)
# Example agents
agent1 = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt="You are a financial analyst specializing in investment strategies.",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
output_type="string",
streaming_on=False,
)
agent2 = Agent(
agent_name="Tax-Adviser-Agent",
system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
output_type="string",
streaming_on=False,
)
agents = [agent1, agent2]
chat = GroupChat(
name="Investment Advisory",
description="Financial and tax analysis group",
agents=agents,
speaker_fn=expertise_based,
)
history = chat.run(
"How to optimize tax strategy for investments?"
)
print(history.model_dump_json(indent=2))
```
---
## MultiAgentRouter
The MultiAgentRouter is a swarm architecture designed to dynamically assign tasks to the most suitable agent. It achieves this through a director or boss entity that utilizes function calls to identify and allocate tasks to the agent best equipped to handle them. [Check out the documentation](https://docs.swarms.world/en/latest/swarms/structs/multi_agent_router/)
```python
from swarms import Agent
from swarms.structs.multi_agent_orchestrator import MultiAgentRouter
# Example usage:
if __name__ == "__main__":
# Define some example agents
agents = [
Agent(
agent_name="ResearchAgent",
description="Specializes in researching topics and providing detailed, factual information",
system_prompt="You are a research specialist. Provide detailed, well-researched information about any topic, citing sources when possible.",
model_name="openai/gpt-4o",
),
Agent(
agent_name="CodeExpertAgent",
description="Expert in writing, reviewing, and explaining code across multiple programming languages",
system_prompt="You are a coding expert. Write, review, and explain code with a focus on best practices and clean code principles.",
model_name="openai/gpt-4o",
),
Agent(
agent_name="WritingAgent",
description="Skilled in creative and technical writing, content creation, and editing",
system_prompt="You are a writing specialist. Create, edit, and improve written content while maintaining appropriate tone and style.",
model_name="openai/gpt-4o",
),
]
# Initialize routers with different configurations
router_execute = MultiAgentRouter(agents=agents, execute_task=True)
# Example task
task = "Write a Python function to calculate fibonacci numbers"
try:
# Process the task with execution
print("\nWith task execution:")
result_execute = router_execute.route_task(task)
print(result_execute)
except Exception as e:
print(f"Error occurred: {str(e)}")
```
----------

File diff suppressed because it is too large Load Diff

@ -1,291 +0,0 @@
import os
import json
import logging
from typing import Dict, Optional, Any
from dataclasses import dataclass
import requests
import time
# Set up logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
handlers=[
logging.FileHandler("api_tests.log"),
logging.StreamHandler(),
],
)
logger = logging.getLogger(__name__)
# Configuration
@dataclass
class TestConfig:
"""Test configuration settings"""
base_url: str
timeout: int = 30
verify_ssl: bool = True
debug: bool = True
# Load config from environment or use defaults
config = TestConfig(
base_url=os.getenv("API_BASE_URL", "http://0.0.0.0:8000/v1")
)
class APIClient:
"""API Client for testing"""
def __init__(self, config: TestConfig):
self.config = config
self.session = requests.Session()
def _url(self, path: str) -> str:
"""Construct full URL"""
return f"{self.config.base_url}/{path.lstrip('/')}"
def _log_request_details(
self, method: str, url: str, headers: Dict, data: Any
):
"""Log request details for debugging"""
logger.info("\nRequest Details:")
logger.info(f"Method: {method}")
logger.info(f"URL: {url}")
logger.info(f"Headers: {json.dumps(headers, indent=2)}")
logger.info(
f"Data: {json.dumps(data, indent=2) if data else None}"
)
def _log_response_details(self, response: requests.Response):
"""Log response details for debugging"""
logger.info("\nResponse Details:")
logger.info(f"Status Code: {response.status_code}")
logger.info(
f"Headers: {json.dumps(dict(response.headers), indent=2)}"
)
try:
logger.info(
f"Body: {json.dumps(response.json(), indent=2)}"
)
except Exception:
logger.info(f"Body: {response.text}")
def _request(
self,
method: str,
path: str,
headers: Optional[Dict] = None,
**kwargs: Any,
) -> requests.Response:
"""Make HTTP request with config defaults"""
url = self._url(path)
headers = headers or {}
if self.config.debug:
self._log_request_details(
method, url, headers, kwargs.get("json")
)
try:
response = self.session.request(
method=method,
url=url,
headers=headers,
timeout=self.config.timeout,
verify=self.config.verify_ssl,
**kwargs,
)
if self.config.debug:
self._log_response_details(response)
if response.status_code >= 400:
logger.error(
f"Request failed with status {response.status_code}"
)
logger.error(f"Response: {response.text}")
response.raise_for_status()
return response
except requests.exceptions.RequestException as e:
logger.error(f"Request failed: {str(e)}")
if hasattr(e, "response") and e.response is not None:
logger.error(f"Error response: {e.response.text}")
raise
class TestRunner:
"""Test runner with logging and reporting"""
def __init__(self):
self.client = APIClient(config)
self.results = {"passed": 0, "failed": 0, "total_time": 0}
self.api_key = None
self.user_id = None
self.agent_id = None
def run_test(self, test_name: str, test_func: callable):
"""Run a single test with timing and logging"""
logger.info(f"\nRunning test: {test_name}")
start_time = time.time()
try:
test_func()
self.results["passed"] += 1
logger.info(f"{test_name} - PASSED")
except Exception as e:
self.results["failed"] += 1
logger.error(f"{test_name} - FAILED: {str(e)}")
logger.exception(e)
end_time = time.time()
duration = end_time - start_time
self.results["total_time"] += duration
logger.info(f"Test duration: {duration:.2f}s")
def test_user_creation(self):
"""Test user creation"""
response = self.client._request(
"POST", "/users", json={"username": "test_user"}
)
data = response.json()
assert "user_id" in data, "No user_id in response"
assert "api_key" in data, "No api_key in response"
self.api_key = data["api_key"]
self.user_id = data["user_id"]
logger.info(f"Created user with ID: {self.user_id}")
def test_create_api_key(self):
"""Test API key creation"""
headers = {"api-key": self.api_key}
response = self.client._request(
"POST",
f"/users/{self.user_id}/api-keys",
headers=headers,
json={"name": "test_key"},
)
data = response.json()
assert "key" in data, "No key in response"
logger.info("Successfully created new API key")
def test_create_agent(self):
"""Test agent creation"""
headers = {"api-key": self.api_key}
agent_config = {
"agent_name": "test_agent",
"model_name": "gpt-4",
"system_prompt": "You are a test agent",
"description": "Test agent description",
"temperature": 0.7,
"max_loops": 1,
}
response = self.client._request(
"POST", "/agent", headers=headers, json=agent_config
)
data = response.json()
assert "agent_id" in data, "No agent_id in response"
self.agent_id = data["agent_id"]
logger.info(f"Created agent with ID: {self.agent_id}")
# Wait a bit for agent to be ready
time.sleep(2)
def test_list_agents(self):
"""Test agent listing"""
headers = {"api-key": self.api_key}
response = self.client._request(
"GET", "/agents", headers=headers
)
agents = response.json()
assert isinstance(agents, list), "Response is not a list"
assert len(agents) > 0, "No agents returned"
logger.info(f"Successfully retrieved {len(agents)} agents")
def test_agent_completion(self):
"""Test agent completion"""
if not self.agent_id:
logger.error("No agent_id available for completion test")
raise ValueError("Agent ID not set")
headers = {"api-key": self.api_key}
completion_request = {
"prompt": "Write 'Hello World!'",
"agent_id": str(
self.agent_id
), # Ensure UUID is converted to string
"max_tokens": 100,
"stream": False,
"temperature_override": 0.7,
}
logger.info(
f"Sending completion request for agent {self.agent_id}"
)
response = self.client._request(
"POST",
"/agent/completions",
headers=headers,
json=completion_request,
)
data = response.json()
assert "response" in data, "No response in completion"
logger.info(f"Completion response: {data.get('response')}")
def run_all_tests(self):
"""Run all tests and generate report"""
logger.info("\n" + "=" * 50)
logger.info("Starting API test suite...")
logger.info(f"Base URL: {config.base_url}")
logger.info("=" * 50 + "\n")
# Define test sequence
tests = [
("User Creation", self.test_user_creation),
("API Key Creation", self.test_create_api_key),
("Agent Creation", self.test_create_agent),
("List Agents", self.test_list_agents),
("Agent Completion", self.test_agent_completion),
]
# Run tests
for test_name, test_func in tests:
self.run_test(test_name, test_func)
# Generate report
self.print_report()
def print_report(self):
"""Print test results report"""
total_tests = self.results["passed"] + self.results["failed"]
success_rate = (
(self.results["passed"] / total_tests * 100)
if total_tests > 0
else 0
)
report = f"""
\n{'='*50}
API TEST RESULTS
{'='*50}
Total Tests: {total_tests}
Passed: {self.results['passed']}
Failed: {self.results['failed']}
Success Rate: {success_rate:.2f}%
Total Time: {self.results['total_time']:.2f}s
{'='*50}
"""
logger.info(report)
if __name__ == "__main__":
try:
runner = TestRunner()
runner.run_all_tests()
except KeyboardInterrupt:
logger.info("\nTest suite interrupted by user")
except Exception as e:
logger.error(f"Test suite failed: {str(e)}")
logger.exception(e)

@ -1,936 +0,0 @@
import os
import secrets
import traceback
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime, timedelta
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, Optional
from uuid import UUID, uuid4
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
#consider if the following imports need to be added to the main swarms requirements.txt:
#opentelemetry-api
#opentelemetry-sdk
#opentelemetry-instrumentation-fastapi
#opentelemetry-instrumentation-requests
#opentelemetry-exporter-otlp-proto-grpc
import uvicorn
from dotenv import load_dotenv
from fastapi import (
BackgroundTasks,
Depends,
FastAPI,
Header,
HTTPException,
Query,
Request,
status,
)
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
OTEL_SERVICE_NAME = os.getenv("OTEL_SERVICE_NAME", "swarms-api")
OTEL_EXPORTER_OTLP_ENDPOINT = os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT", "http://aws-otel-collector:4317")
# Load environment variables
load_dotenv()
class AgentStatus(str, Enum):
"""Enum for agent status."""
IDLE = "idle"
PROCESSING = "processing"
ERROR = "error"
MAINTENANCE = "maintenance"
# Security configurations
API_KEY_LENGTH = 32 # Length of generated API keys
class APIKey(BaseModel):
key: str
name: str
created_at: datetime
last_used: datetime
is_active: bool = True
class APIKeyCreate(BaseModel):
name: str # A friendly name for the API key
class User(BaseModel):
id: UUID
username: str
is_active: bool = True
is_admin: bool = False
api_keys: Dict[str, APIKey] = {} # key -> APIKey object
class AgentConfig(BaseModel):
"""Configuration model for creating a new agent."""
agent_name: str = Field(..., description="Name of the agent")
model_name: str = Field(
...,
description="Name of the llm you want to use provided by litellm",
)
description: str = Field(
default="", description="Description of the agent's purpose"
)
system_prompt: str = Field(
..., description="System prompt for the agent"
)
model_name: str = Field(
default="gpt-4", description="Model name to use"
)
temperature: float = Field(
default=0.1,
ge=0.0,
le=2.0,
description="Temperature for the model",
)
max_loops: int = Field(
default=1, ge=1, description="Maximum number of loops"
)
autosave: bool = Field(
default=True, description="Enable autosave"
)
dashboard: bool = Field(
default=False, description="Enable dashboard"
)
verbose: bool = Field(
default=True, description="Enable verbose output"
)
dynamic_temperature_enabled: bool = Field(
default=True, description="Enable dynamic temperature"
)
user_name: str = Field(
default="default_user", description="Username for the agent"
)
retry_attempts: int = Field(
default=1, ge=1, description="Number of retry attempts"
)
context_length: int = Field(
default=200000, ge=1000, description="Context length"
)
output_type: str = Field(
default="string", description="Output type (string or json)"
)
streaming_on: bool = Field(
default=False, description="Enable streaming"
)
tags: List[str] = Field(
default_factory=list,
description="Tags for categorizing the agent",
)
class AgentUpdate(BaseModel):
"""Model for updating agent configuration."""
description: Optional[str] = None
system_prompt: Optional[str] = None
temperature: Optional[float] = 0.5
max_loops: Optional[int] = 1
tags: Optional[List[str]] = None
status: Optional[AgentStatus] = None
class AgentSummary(BaseModel):
"""Summary model for agent listing."""
agent_id: UUID
agent_name: str
description: str
created_at: datetime
last_used: datetime
total_completions: int
tags: List[str]
status: AgentStatus
class AgentMetrics(BaseModel):
"""Model for agent performance metrics."""
total_completions: int
average_response_time: float
error_rate: float
last_24h_completions: int
total_tokens_used: int
uptime_percentage: float
success_rate: float
peak_tokens_per_minute: int
class CompletionRequest(BaseModel):
"""Model for completion requests."""
prompt: str = Field(..., description="The prompt to process")
agent_id: UUID = Field(..., description="ID of the agent to use")
max_tokens: Optional[int] = Field(
None, description="Maximum tokens to generate"
)
temperature_override: Optional[float] = 0.5
stream: bool = Field(
default=False, description="Enable streaming response"
)
class CompletionResponse(BaseModel):
"""Model for completion responses."""
agent_id: UUID
response: str
metadata: Dict[str, Any]
timestamp: datetime
processing_time: float
token_usage: Dict[str, int]
class AgentStore:
"""Enhanced store for managing agents."""
def __init__(self):
self.agents: Dict[UUID, Agent] = {}
self.agent_metadata: Dict[UUID, Dict[str, Any]] = {}
self.users: Dict[UUID, User] = {} # user_id -> User
self.api_keys: Dict[str, UUID] = {} # api_key -> user_id
self.user_agents: Dict[UUID, List[UUID]] = (
{}
) # user_id -> [agent_ids]
self.executor = ThreadPoolExecutor(max_workers=4)
self._ensure_directories()
def _ensure_directories(self):
"""Ensure required directories exist."""
Path("logs").mkdir(exist_ok=True)
Path("states").mkdir(exist_ok=True)
def create_api_key(self, user_id: UUID, key_name: str) -> APIKey:
"""Create a new API key for a user."""
if user_id not in self.users:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="User not found",
)
# Generate a secure random API key
api_key = secrets.token_urlsafe(API_KEY_LENGTH)
# Create the API key object
key_object = APIKey(
key=api_key,
name=key_name,
created_at=datetime.utcnow(),
last_used=datetime.utcnow(),
)
# Store the API key
self.users[user_id].api_keys[api_key] = key_object
self.api_keys[api_key] = user_id
return key_object
async def verify_agent_access(
self, agent_id: UUID, user_id: UUID
) -> bool:
"""Verify if a user has access to an agent."""
if agent_id not in self.agents:
return False
return (
self.agent_metadata[agent_id]["owner_id"] == user_id
or self.users[user_id].is_admin
)
def validate_api_key(self, api_key: str) -> Optional[UUID]:
"""Validate an API key and return the associated user ID."""
user_id = self.api_keys.get(api_key)
if not user_id or api_key not in self.users[user_id].api_keys:
return None
key_object = self.users[user_id].api_keys[api_key]
if not key_object.is_active:
return None
# Update last used timestamp
key_object.last_used = datetime.utcnow()
return user_id
async def create_agent(
self, config: AgentConfig, user_id: UUID
) -> UUID:
"""Create a new agent with the given configuration."""
try:
agent = Agent(
agent_name=config.agent_name,
system_prompt=config.system_prompt,
model_name=config.model_name,
max_loops=config.max_loops,
autosave=config.autosave,
dashboard=config.dashboard,
verbose=config.verbose,
dynamic_temperature_enabled=True,
saved_state_path=f"states/{config.agent_name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
user_name=config.user_name,
retry_attempts=config.retry_attempts,
context_length=config.context_length,
return_step_meta=True,
output_type="str",
streaming_on=config.streaming_on,
)
agent_id = uuid4()
self.agents[agent_id] = agent
self.agent_metadata[agent_id] = {
"description": config.description,
"created_at": datetime.utcnow(),
"last_used": datetime.utcnow(),
"total_completions": 0,
"tags": config.tags,
"total_tokens": 0,
"error_count": 0,
"response_times": [],
"status": AgentStatus.IDLE,
"start_time": datetime.utcnow(),
"downtime": timedelta(),
"successful_completions": 0,
}
# Add to user's agents list
if user_id not in self.user_agents:
self.user_agents[user_id] = []
self.user_agents[user_id].append(agent_id)
return agent_id
except Exception as e:
logger.error(f"Error creating agent: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Failed to create agent: {str(e)}",
)
async def get_agent(self, agent_id: UUID) -> Agent:
"""Retrieve an agent by ID."""
agent = self.agents.get(agent_id)
if not agent:
logger.error(f"Agent not found: {agent_id}")
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Agent {agent_id} not found",
)
return agent
async def update_agent(
self, agent_id: UUID, update: AgentUpdate
) -> None:
"""Update agent configuration."""
agent = await self.get_agent(agent_id)
metadata = self.agent_metadata[agent_id]
if update.system_prompt:
agent.system_prompt = update.system_prompt
if update.max_loops is not None:
agent.max_loops = update.max_loops
if update.tags is not None:
metadata["tags"] = update.tags
if update.description is not None:
metadata["description"] = update.description
if update.status is not None:
metadata["status"] = update.status
if update.status == AgentStatus.MAINTENANCE:
metadata["downtime"] += (
datetime.utcnow() - metadata["last_used"]
)
logger.info(f"Updated agent {agent_id}")
async def list_agents(
self,
tags: Optional[List[str]] = None,
status: Optional[AgentStatus] = None,
) -> List[AgentSummary]:
"""List all agents, optionally filtered by tags and status."""
summaries = []
for agent_id, agent in self.agents.items():
metadata = self.agent_metadata[agent_id]
# Apply filters
if tags and not any(
tag in metadata["tags"] for tag in tags
):
continue
if status and metadata["status"] != status:
continue
summaries.append(
AgentSummary(
agent_id=agent_id,
agent_name=agent.agent_name,
description=metadata["description"],
created_at=metadata["created_at"],
last_used=metadata["last_used"],
total_completions=metadata["total_completions"],
tags=metadata["tags"],
status=metadata["status"],
)
)
return summaries
async def get_agent_metrics(self, agent_id: UUID) -> AgentMetrics:
"""Get performance metrics for an agent."""
metadata = self.agent_metadata[agent_id]
response_times = metadata["response_times"]
# Calculate metrics
total_time = datetime.utcnow() - metadata["start_time"]
uptime = total_time - metadata["downtime"]
uptime_percentage = (
uptime.total_seconds() / total_time.total_seconds()
) * 100
success_rate = (
metadata["successful_completions"]
/ metadata["total_completions"]
* 100
if metadata["total_completions"] > 0
else 0
)
return AgentMetrics(
total_completions=metadata["total_completions"],
average_response_time=(
sum(response_times) / len(response_times)
if response_times
else 0
),
error_rate=(
metadata["error_count"]
/ metadata["total_completions"]
if metadata["total_completions"] > 0
else 0
),
last_24h_completions=sum(
1
for t in response_times
if (datetime.utcnow() - t).days < 1
),
total_tokens_used=metadata["total_tokens"],
uptime_percentage=uptime_percentage,
success_rate=success_rate,
peak_tokens_per_minute=max(
metadata.get("tokens_per_minute", [0])
),
)
async def clone_agent(
self, agent_id: UUID, new_name: str
) -> UUID:
"""Clone an existing agent with a new name."""
original_agent = await self.get_agent(agent_id)
original_metadata = self.agent_metadata[agent_id]
config = AgentConfig(
agent_name=new_name,
description=f"Clone of {original_agent.agent_name}",
system_prompt=original_agent.system_prompt,
model_name=original_agent.model_name,
temperature=0.5,
max_loops=original_agent.max_loops,
tags=original_metadata["tags"],
)
return await self.create_agent(config)
async def delete_agent(self, agent_id: UUID) -> None:
"""Delete an agent."""
if agent_id not in self.agents:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Agent {agent_id} not found",
)
# Clean up any resources
agent = self.agents[agent_id]
if agent.autosave and os.path.exists(agent.saved_state_path):
os.remove(agent.saved_state_path)
del self.agents[agent_id]
del self.agent_metadata[agent_id]
logger.info(f"Deleted agent {agent_id}")
async def process_completion(
self,
agent: Agent,
prompt: str,
agent_id: UUID,
max_tokens: Optional[int] = None,
temperature_override: Optional[float] = None,
) -> CompletionResponse:
"""Process a completion request using the specified agent."""
# TELEMETRY CHANGE 6: Initialize tracer for this module
tracer = trace.get_tracer(__name__)
# TELEMETRY CHANGE 7: Create parent span for entire completion process
with tracer.start_as_current_span("process_completion") as span:
# TELEMETRY CHANGE 8: Add context attributes
span.set_attribute("agent.id", str(agent_id))
span.set_attribute("agent.name", agent.agent_name)
span.set_attribute("prompt.length", len(prompt))
if max_tokens:
span.set_attribute("max_tokens", max_tokens)
start_time = datetime.utcnow()
metadata = self.agent_metadata[agent_id]
try:
with tracer.start_span("update_agent_status") as status_span:
metadata["status"] = AgentStatus.PROCESSING
metadata["last_used"] = start_time
status_span.set_attribute("agent.status", AgentStatus.PROCESSING.value)
with tracer.start_span("process_agent_completion") as completion_span:
response = agent.run(prompt)
completion_span.set_attribute("completion.success", True)
with tracer.start_span("update_metrics") as metrics_span:
processing_time = (datetime.utcnow() - start_time).total_seconds()
metadata["response_times"].append(processing_time)
metadata["total_completions"] += 1
metadata["successful_completions"] += 1
prompt_tokens = len(prompt.split()) * 1.3
completion_tokens = len(response.split()) * 1.3
total_tokens = int(prompt_tokens + completion_tokens)
metadata["total_tokens"] += total_tokens
metrics_span.set_attribute("processing.time", processing_time)
metrics_span.set_attribute("tokens.total", total_tokens)
metrics_span.set_attribute("tokens.prompt", int(prompt_tokens))
metrics_span.set_attribute("tokens.completion", int(completion_tokens))
with tracer.start_span("update_token_tracking") as token_span:
current_minute = datetime.utcnow().replace(second=0, microsecond=0)
if "tokens_per_minute" not in metadata:
metadata["tokens_per_minute"] = {}
metadata["tokens_per_minute"][current_minute] = (
metadata["tokens_per_minute"].get(current_minute, 0) + total_tokens
)
token_span.set_attribute("tokens.per_minute",
metadata["tokens_per_minute"][current_minute])
completion_response = CompletionResponse(
agent_id=agent_id,
response=response,
metadata={
"agent_name": agent.agent_name,
},
timestamp=datetime.utcnow(),
processing_time=processing_time,
token_usage={
"prompt_tokens": int(prompt_tokens),
"completion_tokens": int(completion_tokens),
"total_tokens": total_tokens,
},
)
# TELEMETRY CHANGE 10: Detailed error tracking
span.set_attribute("completion.status", "success")
return completion_response
except Exception as e:
metadata["error_count"] += 1
metadata["status"] = AgentStatus.ERROR
# TELEMETRY CHANGE 11: Detailed error recording
span.set_attribute("completion.status", "error")
span.set_attribute("error.type", e.__class__.__name__)
span.set_attribute("error.message", str(e))
span.record_exception(e)
logger.error(
f"Error in completion processing: {str(e)}\n{traceback.format_exc()}"
)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Error processing completion: {str(e)}",
)
finally:
metadata["status"] = AgentStatus.IDLE
span.set_attribute("agent.final_status", AgentStatus.IDLE.value)
class StoreManager:
_instance = None
@classmethod
def get_instance(cls) -> "AgentStore":
if cls._instance is None:
cls._instance = AgentStore()
return cls._instance
# Modify the dependency function
def get_store() -> AgentStore:
"""Dependency to get the AgentStore instance."""
return StoreManager.get_instance()
# Security utility function using the new dependency
async def get_current_user(
api_key: str = Header(
..., description="API key for authentication"
),
store: AgentStore = Depends(get_store),
) -> User:
"""Validate API key and return current user."""
user_id = store.validate_api_key(api_key)
if not user_id:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid or expired API key",
headers={"WWW-Authenticate": "ApiKey"},
)
return store.users[user_id]
class SwarmsAPI:
"""Enhanced API class for Swarms agent integration."""
def __init__(self):
self.app = FastAPI(
title="Swarms Agent API",
description="Production-grade API for Swarms agent interaction",
version="1.0.0",
docs_url="/v1/docs",
redoc_url="/v1/redoc",
)
# Initialize the store using the singleton manager
self.store = StoreManager.get_instance()
# Configure CORS
self.app.add_middleware(
CORSMiddleware,
allow_origins=[
"*"
], # Configure appropriately for production
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
self._setup_routes()
def _setup_routes(self):
"""Set up API routes."""
# In your API code
@self.app.post("/v1/users", response_model=Dict[str, Any])
async def create_user(request: Request):
"""Create a new user and initial API key."""
try:
body = await request.json()
username = body.get("username")
if not username or len(username) < 3:
raise HTTPException(
status_code=400, detail="Invalid username"
)
user_id = uuid4()
user = User(id=user_id, username=username)
self.store.users[user_id] = user
initial_key = self.store.create_api_key(
user_id, "Initial Key"
)
return {
"user_id": user_id,
"api_key": initial_key.key,
}
except Exception as e:
logger.error(f"Error creating user: {str(e)}")
raise HTTPException(status_code=400, detail=str(e))
@self.app.post(
"/v1/users/{user_id}/api-keys", response_model=APIKey
)
async def create_api_key(
user_id: UUID,
key_create: APIKeyCreate,
current_user: User = Depends(get_current_user),
):
"""Create a new API key for a user."""
if (
current_user.id != user_id
and not current_user.is_admin
):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Not authorized to create API keys for this user",
)
return self.store.create_api_key(user_id, key_create.name)
@self.app.get(
"/v1/users/{user_id}/api-keys",
response_model=List[APIKey],
)
async def list_api_keys(
user_id: UUID,
current_user: User = Depends(get_current_user),
):
"""List all API keys for a user."""
if (
current_user.id != user_id
and not current_user.is_admin
):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Not authorized to view API keys for this user",
)
return list(self.store.users[user_id].api_keys.values())
@self.app.delete("/v1/users/{user_id}/api-keys/{key}")
async def revoke_api_key(
user_id: UUID,
key: str,
current_user: User = Depends(get_current_user),
):
"""Revoke an API key."""
if (
current_user.id != user_id
and not current_user.is_admin
):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Not authorized to revoke API keys for this user",
)
if key in self.store.users[user_id].api_keys:
self.store.users[user_id].api_keys[
key
].is_active = False
del self.store.api_keys[key]
return {"status": "API key revoked"}
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="API key not found",
)
@self.app.get(
"/v1/users/me/agents", response_model=List[AgentSummary]
)
async def list_user_agents(
current_user: User = Depends(get_current_user),
tags: Optional[List[str]] = Query(None),
status: Optional[AgentStatus] = None,
):
"""List all agents owned by the current user."""
user_agents = self.store.user_agents.get(
current_user.id, []
)
return [
agent
for agent in await self.store.list_agents(
tags, status
)
if agent.agent_id in user_agents
]
# Modify existing routes to use API key authentication
@self.app.post("/v1/agent", response_model=Dict[str, UUID])
async def create_agent(
config: AgentConfig,
current_user: User = Depends(get_current_user),
):
"""Create a new agent with the specified configuration."""
agent_id = await self.store.create_agent(
config, current_user.id
)
return {"agent_id": agent_id}
@self.app.get("/v1/agents", response_model=List[AgentSummary])
async def list_agents(
tags: Optional[List[str]] = Query(None),
status: Optional[AgentStatus] = None,
):
"""List all agents, optionally filtered by tags and status."""
return await self.store.list_agents(tags, status)
@self.app.patch(
"/v1/agent/{agent_id}", response_model=Dict[str, str]
)
async def update_agent(agent_id: UUID, update: AgentUpdate):
"""Update an existing agent's configuration."""
await self.store.update_agent(agent_id, update)
return {"status": "updated"}
@self.app.get(
"/v1/agent/{agent_id}/metrics",
response_model=AgentMetrics,
)
async def get_agent_metrics(agent_id: UUID):
"""Get performance metrics for a specific agent."""
return await self.store.get_agent_metrics(agent_id)
@self.app.post(
"/v1/agent/{agent_id}/clone",
response_model=Dict[str, UUID],
)
async def clone_agent(agent_id: UUID, new_name: str):
"""Clone an existing agent with a new name."""
new_id = await self.store.clone_agent(agent_id, new_name)
return {"agent_id": new_id}
@self.app.delete("/v1/agent/{agent_id}")
async def delete_agent(agent_id: UUID):
"""Delete an agent."""
await self.store.delete_agent(agent_id)
return {"status": "deleted"}
@self.app.post(
"/v1/agent/completions", response_model=CompletionResponse
)
async def create_completion(
request: CompletionRequest,
background_tasks: BackgroundTasks,
):
"""Process a completion request with the specified agent."""
try:
agent = await self.store.get_agent(request.agent_id)
# Process completion
response = await self.store.process_completion(
agent,
request.prompt,
request.agent_id,
request.max_tokens,
0.5,
)
# Schedule background cleanup
background_tasks.add_task(
self._cleanup_old_metrics, request.agent_id
)
return response
except Exception as e:
logger.error(f"Error processing completion: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Error processing completion: {str(e)}",
)
@self.app.get("/v1/agent/{agent_id}/status")
async def get_agent_status(agent_id: UUID):
"""Get the current status of an agent."""
metadata = self.store.agent_metadata.get(agent_id)
if not metadata:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Agent {agent_id} not found",
)
return {
"agent_id": agent_id,
"status": metadata["status"],
"last_used": metadata["last_used"],
"total_completions": metadata["total_completions"],
"error_count": metadata["error_count"],
}
async def _cleanup_old_metrics(self, agent_id: UUID):
"""Clean up old metrics data to prevent memory bloat."""
metadata = self.store.agent_metadata.get(agent_id)
if metadata:
# Keep only last 24 hours of response times
cutoff = datetime.utcnow() - timedelta(days=1)
metadata["response_times"] = [
t
for t in metadata["response_times"]
if isinstance(t, (int, float))
and t > cutoff.timestamp()
]
# Clean up old tokens per minute data
if "tokens_per_minute" in metadata:
metadata["tokens_per_minute"] = {
k: v
for k, v in metadata["tokens_per_minute"].items()
if k > cutoff
}
@app.middleware("http")
async def add_trace_context(request: Request, call_next):
span = trace.get_current_span()
span.set_attribute("http.url", str(request.url))
span.set_attribute("http.method", request.method)
response = await call_next(request)
span.set_attribute("http.status_code", response.status_code)
return response
def create_app() -> FastAPI:
"""Create and configure the FastAPI application."""
logger.info("Creating FastAPI application")
# TELEMETRY CHANGE 1: Configure OpenTelemetry resource with service name
resource = Resource.create({"service.name": "swarms-api"})
trace.set_tracer_provider(TracerProvider(resource=resource))
# TELEMETRY CHANGE 2: Set up OTLP exporter for AWS
otlp_exporter = OTLPSpanExporter(
endpoint="http://aws-otel-collector:4317", # AWS OpenTelemetry Collector endpoint
insecure=True
)
# TELEMETRY CHANGE 3: Configure batch processing of spans
span_processor = BatchSpanProcessor(otlp_exporter)
trace.get_tracer_provider().add_span_processor(span_processor)
api = SwarmsAPI()
app = api.app
# TELEMETRY CHANGE 4: Instrument FastAPI framework
FastAPIInstrumentor.instrument_app(app)
# TELEMETRY CHANGE 5: Instrument HTTP client library
RequestsInstrumentor().instrument()
logger.info("FastAPI application created successfully")
return app
app = create_app()
if __name__ == "__main__":
try:
logger.info("Starting API server...")
print("Starting API server on http://0.0.0.0:8000")
uvicorn.run(
app, # Pass the app instance directly
host="0.0.0.0",
port=8000,
log_level="info",
)
except Exception as e:
logger.error(f"Failed to start API: {str(e)}")
print(f"Error starting server: {str(e)}")

@ -1,254 +0,0 @@
import os
from typing import Dict, Optional, Any
from dataclasses import dataclass
import pytest
import requests
from uuid import UUID
from pydantic import BaseModel
from _pytest.terminal import TerminalReporter
# Configuration
@dataclass
class TestConfig:
"""Test configuration settings"""
base_url: str
timeout: int = 30
verify_ssl: bool = True
# Load config from environment or use defaults
config = TestConfig(
base_url=os.getenv("API_BASE_URL", "http://localhost:8000/v1")
)
# API Response Types
class UserResponse(BaseModel):
user_id: str
api_key: str
class AgentResponse(BaseModel):
agent_id: UUID
class MetricsResponse(BaseModel):
total_completions: int
average_response_time: float
error_rate: float
last_24h_completions: int
total_tokens_used: int
uptime_percentage: float
success_rate: float
peak_tokens_per_minute: int
class APIClient:
"""API Client with typed methods"""
def __init__(self, config: TestConfig):
self.config = config
self.session = requests.Session()
def _url(self, path: str) -> str:
"""Construct full URL"""
return f"{self.config.base_url}/{path.lstrip('/')}"
def _request(
self,
method: str,
path: str,
headers: Optional[Dict] = None,
**kwargs: Any,
) -> requests.Response:
"""Make HTTP request with config defaults"""
url = self._url(path)
return self.session.request(
method=method,
url=url,
headers=headers,
timeout=self.config.timeout,
verify=self.config.verify_ssl,
**kwargs,
)
def create_user(self, username: str) -> UserResponse:
"""Create a new user"""
response = self._request(
"POST", "/users", json={"username": username}
)
response.raise_for_status()
return UserResponse(**response.json())
def create_agent(
self, agent_config: Dict[str, Any], api_key: str
) -> AgentResponse:
"""Create a new agent"""
headers = {"api-key": api_key}
response = self._request(
"POST", "/agent", headers=headers, json=agent_config
)
response.raise_for_status()
return AgentResponse(**response.json())
def get_metrics(
self, agent_id: UUID, api_key: str
) -> MetricsResponse:
"""Get agent metrics"""
headers = {"api-key": api_key}
response = self._request(
"GET", f"/agent/{agent_id}/metrics", headers=headers
)
response.raise_for_status()
return MetricsResponse(**response.json())
# Test Fixtures
@pytest.fixture
def api_client() -> APIClient:
"""Fixture for API client"""
return APIClient(config)
@pytest.fixture
def test_user(api_client: APIClient) -> UserResponse:
"""Fixture for test user"""
return api_client.create_user("test_user")
@pytest.fixture
def test_agent(
api_client: APIClient, test_user: UserResponse
) -> AgentResponse:
"""Fixture for test agent"""
agent_config = {
"agent_name": "test_agent",
"model_name": "gpt-4",
"system_prompt": "You are a test agent",
"description": "Test agent description",
}
return api_client.create_agent(agent_config, test_user.api_key)
# Tests
def test_user_creation(api_client: APIClient):
"""Test user creation flow"""
response = api_client.create_user("new_test_user")
assert response.user_id
assert response.api_key
def test_agent_creation(
api_client: APIClient, test_user: UserResponse
):
"""Test agent creation flow"""
agent_config = {
"agent_name": "test_agent",
"model_name": "gpt-4",
"system_prompt": "You are a test agent",
"description": "Test agent description",
}
response = api_client.create_agent(
agent_config, test_user.api_key
)
assert response.agent_id
def test_agent_metrics(
api_client: APIClient,
test_user: UserResponse,
test_agent: AgentResponse,
):
"""Test metrics retrieval"""
metrics = api_client.get_metrics(
test_agent.agent_id, test_user.api_key
)
assert metrics.total_completions >= 0
assert metrics.error_rate >= 0
assert metrics.uptime_percentage >= 0
def test_invalid_auth(api_client: APIClient):
"""Test invalid authentication"""
with pytest.raises(requests.exceptions.HTTPError) as exc_info:
api_client.create_agent({}, "invalid_key")
assert exc_info.value.response.status_code == 401
# Custom pytest plugin to capture test results
class ResultCapture:
def __init__(self):
self.total = 0
self.passed = 0
self.failed = 0
self.errors = 0
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(
terminalreporter: TerminalReporter, exitstatus: int
):
yield
capture = getattr(
terminalreporter.config, "_result_capture", None
)
if capture:
capture.total = (
len(terminalreporter.stats.get("passed", []))
+ len(terminalreporter.stats.get("failed", []))
+ len(terminalreporter.stats.get("error", []))
)
capture.passed = len(terminalreporter.stats.get("passed", []))
capture.failed = len(terminalreporter.stats.get("failed", []))
capture.errors = len(terminalreporter.stats.get("error", []))
@dataclass
class TestReport:
total_tests: int
passed: int
failed: int
errors: int
@property
def success_rate(self) -> float:
return (
(self.passed / self.total_tests) * 100
if self.total_tests > 0
else 0
)
def run_tests() -> TestReport:
"""Run tests and generate typed report"""
# Create result capture
capture = ResultCapture()
# Create pytest configuration
args = [__file__, "-v"]
# Run pytest with our plugin
pytest.main(args, plugins=[capture])
# Generate report
return TestReport(
total_tests=capture.total,
passed=capture.passed,
failed=capture.failed,
errors=capture.errors,
)
if __name__ == "__main__":
# Example usage with environment variable
# export API_BASE_URL=http://api.example.com/v1
report = run_tests()
print("\nTest Results:")
print(f"Total Tests: {report.total_tests}")
print(f"Passed: {report.passed}")
print(f"Failed: {report.failed}")
print(f"Errors: {report.errors}")
print(f"Success Rate: {report.success_rate:.2f}%")

@ -1,472 +0,0 @@
import asyncio
import json
from datetime import datetime
from typing import Any, Dict, List, Optional
from uuid import UUID
import httpx
from loguru import logger
# Configure logger
logger.add(
"tests/api_test_{time}.log",
rotation="1 day",
retention="7 days",
level="DEBUG",
format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}",
)
class TestConfig:
"""Test configuration and utilities"""
BASE_URL: str = "http://localhost:8000/v1"
TEST_USERNAME: str = "test_user"
api_key: Optional[str] = None
user_id: Optional[UUID] = None
test_agent_id: Optional[UUID] = None
class TestResult:
"""Model for test results"""
def __init__(
self,
test_name: str,
status: str,
duration: float,
error: Optional[str] = None,
details: Optional[Dict[str, Any]] = None,
):
self.test_name = test_name
self.status = status
self.duration = duration
self.error = error
self.details = details or {}
def dict(self):
return {
"test_name": self.test_name,
"status": self.status,
"duration": self.duration,
"error": self.error,
"details": self.details,
}
async def log_response(
response: httpx.Response, test_name: str
) -> None:
"""Log API response details"""
logger.debug(f"\n{test_name} Response:")
logger.debug(f"Status Code: {response.status_code}")
logger.debug(f"Headers: {dict(response.headers)}")
try:
logger.debug(f"Body: {response.json()}")
except json.JSONDecodeError:
logger.debug(f"Body: {response.text}")
async def create_test_user() -> TestResult:
"""Create a test user and get API key"""
start_time = datetime.now()
try:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{TestConfig.BASE_URL}/users",
json={"username": TestConfig.TEST_USERNAME},
)
await log_response(response, "Create User")
if response.status_code == 200:
data = response.json()
TestConfig.api_key = data["api_key"]
TestConfig.user_id = UUID(data["user_id"])
return TestResult(
test_name="create_test_user",
status="passed",
duration=(
datetime.now() - start_time
).total_seconds(),
details={"user_id": str(TestConfig.user_id)},
)
else:
return TestResult(
test_name="create_test_user",
status="failed",
duration=(
datetime.now() - start_time
).total_seconds(),
error=f"Failed to create user: {response.text}",
)
except Exception as e:
logger.error(f"Error in create_test_user: {str(e)}")
return TestResult(
test_name="create_test_user",
status="error",
duration=(datetime.now() - start_time).total_seconds(),
error=str(e),
)
async def create_test_agent() -> TestResult:
"""Create a test agent"""
start_time = datetime.now()
try:
# Create agent config according to the AgentConfig model
agent_config = {
"agent_name": "test_agent",
"model_name": "gpt-4",
"description": "Test agent for API testing",
"system_prompt": "You are a test agent.",
"temperature": 0.1,
"max_loops": 1,
"dynamic_temperature_enabled": True,
"user_name": TestConfig.TEST_USERNAME,
"retry_attempts": 1,
"context_length": 4000,
"output_type": "string",
"streaming_on": False,
"tags": ["test", "api"],
"stopping_token": "<DONE>",
"auto_generate_prompt": False,
}
async with httpx.AsyncClient() as client:
response = await client.post(
f"{TestConfig.BASE_URL}/agent",
json=agent_config,
headers={"api-key": TestConfig.api_key},
)
await log_response(response, "Create Agent")
if response.status_code == 200:
data = response.json()
TestConfig.test_agent_id = UUID(data["agent_id"])
return TestResult(
test_name="create_test_agent",
status="passed",
duration=(
datetime.now() - start_time
).total_seconds(),
details={
"agent_id": str(TestConfig.test_agent_id)
},
)
else:
return TestResult(
test_name="create_test_agent",
status="failed",
duration=(
datetime.now() - start_time
).total_seconds(),
error=f"Failed to create agent: {response.text}",
)
except Exception as e:
logger.error(f"Error in create_test_agent: {str(e)}")
return TestResult(
test_name="create_test_agent",
status="error",
duration=(datetime.now() - start_time).total_seconds(),
error=str(e),
)
async def test_agent_completion() -> TestResult:
"""Test agent completion endpoint"""
start_time = datetime.now()
try:
completion_request = {
"prompt": "Hello, this is a test prompt.",
"agent_id": str(TestConfig.test_agent_id),
"max_tokens": 100,
"temperature_override": 0.5,
"stream": False,
}
async with httpx.AsyncClient() as client:
response = await client.post(
f"{TestConfig.BASE_URL}/agent/completions",
json=completion_request,
headers={"api-key": TestConfig.api_key},
)
await log_response(response, "Agent Completion")
if response.status_code == 200:
return TestResult(
test_name="test_agent_completion",
status="passed",
duration=(
datetime.now() - start_time
).total_seconds(),
details={"response": response.json()},
)
else:
return TestResult(
test_name="test_agent_completion",
status="failed",
duration=(
datetime.now() - start_time
).total_seconds(),
error=f"Failed completion test: {response.text}",
)
except Exception as e:
logger.error(f"Error in test_agent_completion: {str(e)}")
return TestResult(
test_name="test_agent_completion",
status="error",
duration=(datetime.now() - start_time).total_seconds(),
error=str(e),
)
async def test_agent_metrics() -> TestResult:
"""Test agent metrics endpoint"""
start_time = datetime.now()
try:
if not TestConfig.test_agent_id:
return TestResult(
test_name="test_agent_metrics",
status="failed",
duration=(
datetime.now() - start_time
).total_seconds(),
error="No test agent ID available",
)
async with httpx.AsyncClient() as client:
response = await client.get(
f"{TestConfig.BASE_URL}/agent/{str(TestConfig.test_agent_id)}/metrics",
headers={"api-key": TestConfig.api_key},
)
await log_response(response, "Agent Metrics")
if response.status_code == 200:
return TestResult(
test_name="test_agent_metrics",
status="passed",
duration=(
datetime.now() - start_time
).total_seconds(),
details={"metrics": response.json()},
)
else:
return TestResult(
test_name="test_agent_metrics",
status="failed",
duration=(
datetime.now() - start_time
).total_seconds(),
error=f"Failed metrics test: {response.text}",
)
except Exception as e:
logger.error(f"Error in test_agent_metrics: {str(e)}")
return TestResult(
test_name="test_agent_metrics",
status="error",
duration=(datetime.now() - start_time).total_seconds(),
error=str(e),
)
async def test_update_agent() -> TestResult:
"""Test agent update endpoint"""
start_time = datetime.now()
try:
if not TestConfig.test_agent_id:
return TestResult(
test_name="test_update_agent",
status="failed",
duration=(
datetime.now() - start_time
).total_seconds(),
error="No test agent ID available",
)
update_data = {
"description": "Updated test agent description",
"tags": ["test", "updated"],
"max_loops": 2,
}
async with httpx.AsyncClient() as client:
response = await client.patch(
f"{TestConfig.BASE_URL}/agent/{str(TestConfig.test_agent_id)}",
json=update_data,
headers={"api-key": TestConfig.api_key},
)
await log_response(response, "Update Agent")
if response.status_code == 200:
return TestResult(
test_name="test_update_agent",
status="passed",
duration=(
datetime.now() - start_time
).total_seconds(),
details={"update_response": response.json()},
)
else:
return TestResult(
test_name="test_update_agent",
status="failed",
duration=(
datetime.now() - start_time
).total_seconds(),
error=f"Failed update test: {response.text}",
)
except Exception as e:
logger.error(f"Error in test_update_agent: {str(e)}")
return TestResult(
test_name="test_update_agent",
status="error",
duration=(datetime.now() - start_time).total_seconds(),
error=str(e),
)
async def test_error_handling() -> TestResult:
"""Test API error handling"""
start_time = datetime.now()
try:
async with httpx.AsyncClient() as client:
# Test with invalid API key
invalid_agent_id = "00000000-0000-0000-0000-000000000000"
response = await client.get(
f"{TestConfig.BASE_URL}/agent/{invalid_agent_id}/metrics",
headers={"api-key": "invalid_key"},
)
await log_response(response, "Invalid API Key Test")
if response.status_code in [401, 403]:
return TestResult(
test_name="test_error_handling",
status="passed",
duration=(
datetime.now() - start_time
).total_seconds(),
details={"error_response": response.json()},
)
else:
return TestResult(
test_name="test_error_handling",
status="failed",
duration=(
datetime.now() - start_time
).total_seconds(),
error="Error handling test failed",
)
except Exception as e:
logger.error(f"Error in test_error_handling: {str(e)}")
return TestResult(
test_name="test_error_handling",
status="error",
duration=(datetime.now() - start_time).total_seconds(),
error=str(e),
)
async def cleanup_test_resources() -> TestResult:
"""Clean up test resources"""
start_time = datetime.now()
try:
if TestConfig.test_agent_id:
async with httpx.AsyncClient() as client:
response = await client.delete(
f"{TestConfig.BASE_URL}/agent/{str(TestConfig.test_agent_id)}",
headers={"api-key": TestConfig.api_key},
)
await log_response(response, "Delete Agent")
return TestResult(
test_name="cleanup_test_resources",
status="passed",
duration=(datetime.now() - start_time).total_seconds(),
details={"cleanup": "completed"},
)
except Exception as e:
logger.error(f"Error in cleanup_test_resources: {str(e)}")
return TestResult(
test_name="cleanup_test_resources",
status="error",
duration=(datetime.now() - start_time).total_seconds(),
error=str(e),
)
async def run_all_tests() -> List[TestResult]:
"""Run all tests in sequence"""
logger.info("Starting API test suite")
results = []
# Initialize
results.append(await create_test_user())
if results[-1].status != "passed":
logger.error(
"Failed to create test user, aborting remaining tests"
)
return results
# Add delay to ensure user is properly created
await asyncio.sleep(1)
# Core tests
test_functions = [
create_test_agent,
test_agent_completion,
test_agent_metrics,
test_update_agent,
test_error_handling,
]
for test_func in test_functions:
result = await test_func()
results.append(result)
logger.info(f"Test {result.test_name}: {result.status}")
if result.error:
logger.error(
f"Error in {result.test_name}: {result.error}"
)
# Add small delay between tests
await asyncio.sleep(0.5)
# Cleanup
results.append(await cleanup_test_resources())
# Log summary
passed = sum(1 for r in results if r.status == "passed")
failed = sum(1 for r in results if r.status == "failed")
errors = sum(1 for r in results if r.status == "error")
logger.info("\nTest Summary:")
logger.info(f"Total Tests: {len(results)}")
logger.info(f"Passed: {passed}")
logger.info(f"Failed: {failed}")
logger.info(f"Errors: {errors}")
return results
def main():
"""Main entry point for running tests"""
logger.info("Starting API testing suite")
try:
results = asyncio.run(run_all_tests())
# Write results to JSON file
with open("test_results.json", "w") as f:
json.dump(
[result.dict() for result in results],
f,
indent=2,
default=str,
)
logger.info("Test results written to test_results.json")
except Exception:
logger.error("Fatal error in test suite: ")
main()

@ -1,11 +0,0 @@
fastapi
uvicorn
pydantic
loguru
python-dotenv
swarms # Specify the version or source if it's not on PyPI
opentelemetry-api
opentelemetry-sdk
opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-requests
opentelemetry-exporter-otlp-proto-grpc

@ -1,37 +0,0 @@
service:
readiness_probe:
path: /docs
initial_delay_seconds: 300
timeout_seconds: 30
replica_policy:
min_replicas: 1
max_replicas: 50
target_qps_per_replica: 5
upscale_delay_seconds: 180
downscale_delay_seconds: 600
resources:
ports: 8000 # FastAPI default port
cpus: 16
memory: 64
disk_size: 100
use_spot: true
workdir: /app
setup: |
git clone https://github.com/kyegomez/swarms.git
cd swarms/api
pip install -r requirements.txt
pip install swarms
run: |
cd swarms/api
uvicorn main:app --host 0.0.0.0 --port 8000 --workers 4
# env:
# PYTHONPATH: /app/swarms
# LOG_LEVEL: "INFO"
# # MAX_WORKERS: "4"

@ -1,112 +0,0 @@
import requests
import json
from time import sleep
BASE_URL = "http://0.0.0.0:8000/v1"
def make_request(method, endpoint, data=None):
"""Helper function to make requests with error handling"""
url = f"{BASE_URL}{endpoint}"
try:
if method == "GET":
response = requests.get(url)
elif method == "POST":
response = requests.post(url, json=data)
elif method == "DELETE":
response = requests.delete(url)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(
f"Error making {method} request to {endpoint}: {str(e)}"
)
if hasattr(e.response, "text"):
print(f"Response text: {e.response.text}")
return None
def create_agent():
"""Create a test agent"""
data = {
"agent_name": "test_agent",
"model_name": "gpt-4",
"system_prompt": "You are a helpful assistant",
"description": "Test agent",
"temperature": 0.7,
"max_loops": 1,
"tags": ["test"],
}
return make_request("POST", "/v1/agent", data)
def list_agents():
"""List all agents"""
return make_request("GET", "/v1/agents")
def test_completion(agent_id):
"""Test a completion with the agent"""
data = {
"prompt": "Say hello!",
"agent_id": agent_id,
"max_tokens": 100,
}
return make_request("POST", "/v1/agent/completions", data)
def get_agent_metrics(agent_id):
"""Get metrics for an agent"""
return make_request("GET", f"/v1/agent/{agent_id}/metrics")
def delete_agent(agent_id):
"""Delete an agent"""
return make_request("DELETE", f"/v1/agent/{agent_id}")
def run_tests():
print("Starting API tests...")
# Create an agent
print("\n1. Creating agent...")
agent_response = create_agent()
if not agent_response:
print("Failed to create agent")
return
agent_id = agent_response.get("agent_id")
print(f"Created agent with ID: {agent_id}")
# Give the server a moment to process
sleep(2)
# List agents
print("\n2. Listing agents...")
agents = list_agents()
print(f"Found {len(agents)} agents")
# Test completion
if agent_id:
print("\n3. Testing completion...")
completion = test_completion(agent_id)
if completion:
print(
f"Completion response: {completion.get('response')}"
)
print("\n4. Getting agent metrics...")
metrics = get_agent_metrics(agent_id)
if metrics:
print(f"Agent metrics: {json.dumps(metrics, indent=2)}")
# Clean up
# print("\n5. Cleaning up - deleting agent...")
# delete_result = delete_agent(agent_id)
# if delete_result:
# print("Successfully deleted agent")
if __name__ == "__main__":
run_tests()

@ -160,6 +160,7 @@ nav:
- OpenAI Assistant: "swarms/agents/openai_assistant.md"
- Create and Run Agents from YAML: "swarms/agents/create_agents_yaml.md"
- Integrating External Agents from Griptape, Langchain, etc: "swarms/agents/external_party_agents.md"
- Creating Custom Agents: "swarms/agents/new_agent.md"
- Tools:
- Overview: "swarms/tools/main.md"
- What are tools?: "swarms/tools/build_tool.md"
@ -172,6 +173,7 @@ nav:
- Swarm Architectures: "swarms/concept/swarm_architectures.md"
- Choosing the right Swarm Architecture: "swarms/concept/how_to_choose_swarms.md"
- Building Custom Swarms: "swarms/structs/custom_swarm.md"
- Create New Swarm Architectures: "swarms/structs/create_new_swarm.md"
- Architectures Available:
- MajorityVoting: "swarms/structs/majorityvoting.md"
- AgentRearrange: "swarms/structs/agent_rearrange.md"
@ -184,6 +186,8 @@ nav:
- ForestSwarm: "swarms/structs/forest_swarm.md"
- SwarmRouter: "swarms/structs/swarm_router.md"
- TaskQueueSwarm: "swarms/structs/taskqueue_swarm.md"
- SwarmRearrange: "swarms/structs/swarm_rearrange.md"
- MultiAgentRouter: "swarms/structs/multi_agent_router.md"
- Various Execution Methods: "swarms/structs/various_execution_methods.md"
- Workflows:
- ConcurrentWorkflow: "swarms/structs/concurrentworkflow.md"
@ -193,6 +197,8 @@ nav:
- Conversation: "swarms/structs/conversation.md"
# - Task: "swarms/structs/task.md"
- Full API Reference: "swarms/framework/reference.md"
- Examples:
- Unique Swarms: "swarms/examples/unique_swarms.md"
- Swarm Models:
- Overview: "swarms/models/index.md"
# - Models Available: "swarms/models/index.md"
@ -215,11 +221,12 @@ nav:
- BaseMultiModalModel: "swarms/models/base_multimodal_model.md"
- Multi Modal Models Available: "swarms/models/multimodal_models.md"
- GPT4VisionAPI: "swarms/models/gpt4v.md"
# - Swarms Cloud API:
# # - Overview: "swarms_cloud/main.md"
# - Overview: "swarms_cloud/vision.md"
# - Swarms Cloud CLI: "swarms_cloud/cli.md"
# # - Add Agents to Marketplace: "swarms_cloud/add_agent.md"
- Swarms Cloud API:
# - Overview: "swarms_cloud/main.md"
- Overview: "swarms_cloud/vision.md"
- MCS API: "swarms_cloud/mcs_api.md"
- Swarms Cloud CLI: "swarms_cloud/cli.md"
# - Add Agents to Marketplace: "swarms_cloud/add_agent.md"
# - Available Models: "swarms_cloud/available_models.md"
# - Agent API: "swarms_cloud/agent_api.md"
# - Migrate from OpenAI to Swarms in 3 lines of code: "swarms_cloud/migrate_openai.md"

@ -0,0 +1,212 @@
# How to Create Good Agents
This guide will walk you through the steps to build high-quality agents by extending the `Agent` class. It emphasizes best practices, the use of type annotations, comprehensive documentation, and modular design to ensure maintainability and scalability. Additionally, you will learn how to incorporate a callable `llm` parameter or specify a `model_name` attribute to enhance flexibility and functionality. These principles ensure that agents are not only functional but also robust and adaptable to future requirements.
## Overview
A good agent is a modular and reusable component designed to perform specific tasks efficiently. By inheriting from the base `Agent` class, developers can extend its functionality while adhering to standardized principles. Each custom agent should:
- Inherit from the `Agent` class to maintain compatibility with swarms.
- Define a `run(task: str, img: str)` method to execute tasks effectively.
- Include descriptive attributes such as `name`, `system_prompt`, and `description` to enhance clarity.
- Optionally, include an `llm` parameter (callable) or a `model_name` to enable seamless integration with language models.
- Emphasize modularity, allowing the agent to be reused across various contexts and tasks.
By following these guidelines, you can create agents that integrate well with broader systems and exhibit high reliability in real-world applications.
---
## Creating a Custom Agent
Here is a detailed template for creating a custom agent by inheriting the `Agent` class. This template demonstrates how to structure an agent with extendable and reusable features:
```python
from typing import Callable, Any
from swarms import Agent
class MyNewAgent(Agent):
"""
A custom agent class for specialized tasks.
Attributes:
name (str): The name of the agent.
system_prompt (str): The prompt guiding the agent's behavior.
description (str): A brief description of the agent's purpose.
llm (Callable, optional): A callable representing the language model to use.
"""
def __init__(self, name: str, system_prompt: str, model_name: str = None, description: str, llm: Callable = None):
"""
Initialize the custom agent.
Args:
name (str): The name of the agent.
system_prompt (str): The prompt guiding the agent.
model_name (str): The name of your model can use litellm [openai/gpt-4o]
description (str): A description of the agent's purpose.
llm (Callable, optional): A callable representing the language model to use.
"""
super().__init__(agent_name=name, system_prompt=system_prompt, model_name=model_name)
self.agent_name = agent_name
self.system_prompt system_prompt
self.description = description
self.model_name = model_name
def run(self, task: str, img: str, *args: Any, **kwargs: Any) -> Any:
"""
Execute the task assigned to the agent.
Args:
task (str): The task description.
img (str): The image input for processing.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
Any: The result of the task execution.
"""
# Your custom logic
...
```
This design ensures a seamless extension of functionality while maintaining clear and maintainable code.
---
## Key Considerations
### 1. **Type Annotations**
Always use type hints for method parameters and return values. This improves code readability, supports static analysis tools, and reduces bugs, ensuring long-term reliability.
### 2. **Comprehensive Documentation**
Provide detailed docstrings for all classes, methods, and attributes. Clear documentation ensures that your agent's functionality is understandable to both current and future collaborators.
### 3. **Modular Design**
Keep the agent logic modular and reusable. Modularity simplifies debugging, testing, and extending functionalities, making the code more adaptable to diverse scenarios.
### 4. **Flexible Model Integration**
Use either an `llm` callable or `model_name` attribute for integrating language models. This flexibility ensures your agent can adapt to various tasks, environments, and system requirements.
### 5. **Error Handling**
Incorporate robust error handling to manage unexpected inputs or issues during execution. This not only ensures reliability but also builds user trust in your system.
### 6. **Scalability Considerations**
Ensure your agent design can scale to accommodate increased complexity or a larger number of tasks without compromising performance.
---
## Example Usage
Here is an example of how to use your custom agent effectively:
```python
# Example LLM callable
class MockLLM:
"""
A mock language model class for simulating LLM behavior.
Methods:
run(task: str, img: str, *args: Any, **kwargs: Any) -> str:
Processes the task and image input to return a simulated response.
"""
def run(self, task: str, img: str, *args: Any, **kwargs: Any) -> str:
return f"Processed task '{task}' with image '{img}'"
# Create an instance of MyNewAgent
agent = MyNewAgent(
name="ImageProcessor",
system_prompt="Process images and extract relevant details.",
description="An agent specialized in processing images and extracting insights.",
llm=MockLLM().run
)
# Run a task
result = agent.run(task="Analyze content", img="path/to/image.jpg")
print(result)
```
This example showcases the practical application of the `MyNewAgent` class and highlights its extensibility.
## Production-Grade Example with **Griptape Agent Integration Example**
In this example, we will create a **Griptape** agent by inheriting from the Swarms `Agent` class and implementing the `run` method.
### **Griptape Integration Steps**:
1. **Inherit from Swarms Agent**: Inherit from the `SwarmsAgent` class.
2. **Create Griptape Agent**: Initialize the **Griptape** agent inside your class and provide it with the necessary tools.
3. **Override the `run()` method**: Implement logic to process a task string and execute the Griptape agent.
## **Griptape Example Code**:
```python
from swarms import (
Agent as SwarmsAgent,
) # Import the base Agent class from Swarms
from griptape.structures import Agent as GriptapeAgent
from griptape.tools import (
WebScraperTool,
FileManagerTool,
PromptSummaryTool,
)
# Create a custom agent class that inherits from SwarmsAgent
class GriptapeSwarmsAgent(SwarmsAgent):
def __init__(self, name: str, system_prompt: str: str, *args, **kwargs):
super().__init__(agent_name=name, system_prompt=system_prompt)
# Initialize the Griptape agent with its tools
self.agent = GriptapeAgent(
input="Load {{ args[0] }}, summarize it, and store it in a file called {{ args[1] }}.",
tools=[
WebScraperTool(off_prompt=True),
PromptSummaryTool(off_prompt=True),
FileManagerTool(),
],
*args,
**kwargs,
)
# Override the run method to take a task and execute it using the Griptape agent
def run(self, task: str) -> str:
# Extract URL and filename from task
url, filename = task.split(",") # Example task string: "https://example.com, output.txt"
# Execute the Griptape agent
result = self.agent.run(url.strip(), filename.strip())
# Return the final result as a string
return str(result)
# Example usage:
griptape_swarms_agent = GriptapeSwarmsAgent()
output = griptape_swarms_agent.run("https://griptape.ai, griptape.txt")
print(output)
```
---
## Best Practices
1. **Test Extensively:**
Validate your agent with various task inputs to ensure it performs as expected under different conditions.
2. **Follow the Single Responsibility Principle:**
Design each agent to focus on a specific task or role, ensuring clarity and modularity in implementation.
3. **Log Actions:**
Include detailed logging within the `run` method to capture key actions, inputs, and results for debugging and monitoring.
4. **Use Open-Source Contributions:**
Contribute your custom agents to the Swarms repository at [https://github.com/kyegomez/swarms](https://github.com/kyegomez/swarms). Sharing your innovations helps advance the ecosystem and encourages collaboration.
5. **Iterate and Refactor:**
Continuously improve your agents based on feedback, performance evaluations, and new requirements to maintain relevance and functionality.
---
## Conclusion
By following these guidelines, you can create powerful and flexible agents tailored to specific tasks. Leveraging inheritance from the `Agent` class ensures compatibility and standardization across swarms. Emphasize modularity, thorough testing, and clear documentation to build agents that are robust, scalable, and easy to integrate. Collaborate with the community by submitting your innovative agents to the Swarms repository, contributing to a growing ecosystem of intelligent solutions. With a well-designed agent, you are equipped to tackle diverse challenges efficiently and effectively.

@ -0,0 +1,634 @@
In this section, we present a diverse collection of unique swarms, each with its own distinct characteristics and applications. These examples are designed to illustrate the versatility and potential of swarm intelligence in various domains. By exploring these examples, you can gain a deeper understanding of how swarms can be leveraged to solve complex problems and improve decision-making processes.
# Documentation
## Table of Contents
1. [Common Parameters](#common-parameters)
2. [Basic Swarm Patterns](#basic-swarm-patterns)
3. [Mathematical Swarm Patterns](#mathematical-swarm-patterns)
4. [Advanced Swarm Patterns](#advanced-swarm-patterns)
5. [Communication Patterns](#communication-patterns)
6. [Best Practices](#best-practices)
7. [Common Use Cases](#common-use-cases)
## Common Parameters
All swarm architectures accept these base parameters:
- `agents: AgentListType` - List of Agent objects to participate in the swarm
- `tasks: List[str]` - List of tasks to be processed by the agents
- `return_full_history: bool` (optional) - If True, returns conversation history. Defaults to True
Return types are generally `Union[dict, List[str]]`, where:
- If `return_full_history=True`: Returns a dictionary containing the full conversation history
- If `return_full_history=False`: Returns a list of agent responses
## Basic Swarm Patterns
### Circular Swarm
```python
def circular_swarm(agents: AgentListType, tasks: List[str], return_full_history: bool = True)
```
**Information Flow:**
```mermaid
flowchart LR
subgraph Circular Flow
A1((Agent 1)) --> A2((Agent 2))
A2 --> A3((Agent 3))
A3 --> A4((Agent 4))
A4 --> A1
end
Task1[Task 1] --> A1
Task2[Task 2] --> A2
Task3[Task 3] --> A3
```
**Best Used When:**
- You need continuous processing of tasks
- Tasks need to be processed by every agent in sequence
- You want predictable, ordered task distribution
**Key Features:**
- Tasks move in a circular pattern through all agents
- Each agent processes each task once
- Maintains strict ordering of task processing
### Linear Swarm
```python
def linear_swarm(agents: AgentListType, tasks: List[str], return_full_history: bool = True)
```
**Information Flow:**
```mermaid
flowchart LR
Input[Task Input] --> A1
subgraph Sequential Processing
A1((Agent 1)) --> A2((Agent 2))
A2 --> A3((Agent 3))
A3 --> A4((Agent 4))
A4 --> A5((Agent 5))
end
A5 --> Output[Final Result]
```
**Best Used When:**
- Tasks need sequential, pipeline-style processing
- Each agent performs a specific transformation step
- Order of processing is critical
### Star Swarm
```python
def star_swarm(agents: AgentListType, tasks: List[str], return_full_history: bool = True)
```
**Information Flow:**
```mermaid
flowchart TD
subgraph Star Pattern
A1((Central Agent))
A2((Agent 2))
A3((Agent 3))
A4((Agent 4))
A5((Agent 5))
A1 --> A2
A1 --> A3
A1 --> A4
A1 --> A5
end
Task[Initial Task] --> A1
A2 --> Result2[Result 2]
A3 --> Result3[Result 3]
A4 --> Result4[Result 4]
A5 --> Result5[Result 5]
```
**Best Used When:**
- You need centralized control
- Tasks require coordination or oversight
- You want to maintain a single point of task distribution
### Mesh Swarm
```python
def mesh_swarm(agents: AgentListType, tasks: List[str], return_full_history: bool = True)
```
**Information Flow:**
```mermaid
flowchart TD
subgraph Mesh Network
A1((Agent 1)) <--> A2((Agent 2))
A2 <--> A3((Agent 3))
A1 <--> A4((Agent 4))
A2 <--> A5((Agent 5))
A3 <--> A6((Agent 6))
A4 <--> A5
A5 <--> A6
end
Tasks[Task Pool] --> A1
Tasks --> A2
Tasks --> A3
Tasks --> A4
Tasks --> A5
Tasks --> A6
```
**Best Used When:**
- You need maximum flexibility
- Task processing order isn't critical
- You want fault tolerance
## Mathematical Swarm Patterns
### Fibonacci Swarm
```python
def fibonacci_swarm(agents: AgentListType, tasks: List[str])
```
**Information Flow:**
```mermaid
flowchart TD
subgraph Fibonacci Pattern
L1[Level 1: 1 Agent] --> L2[Level 2: 1 Agent]
L2 --> L3[Level 3: 2 Agents]
L3 --> L4[Level 4: 3 Agents]
L4 --> L5[Level 5: 5 Agents]
end
Task[Initial Task] --> L1
L5 --> Results[Processed Results]
```
**Best Used When:**
- You need natural scaling patterns
- Tasks have increasing complexity
- You want organic growth in processing capacity
### Pyramid Swarm
```python
def pyramid_swarm(agents: AgentListType, tasks: List[str], return_full_history: bool = True)
```
**Information Flow:**
```mermaid
flowchart TD
subgraph Pyramid Structure
A1((Leader Agent))
A2((Manager 1))
A3((Manager 2))
A4((Worker 1))
A5((Worker 2))
A6((Worker 3))
A7((Worker 4))
A1 --> A2
A1 --> A3
A2 --> A4
A2 --> A5
A3 --> A6
A3 --> A7
end
Task[Complex Task] --> A1
A4 --> Result1[Output 1]
A5 --> Result2[Output 2]
A6 --> Result3[Output 3]
A7 --> Result4[Output 4]
```
**Best Used When:**
- You need hierarchical task processing
- Tasks require multiple levels of oversight
- You want organized task delegation
### Grid Swarm
```python
def grid_swarm(agents: AgentListType, tasks: List[str])
```
**Information Flow:**
```mermaid
flowchart TD
subgraph Grid Layout
A1((1)) <--> A2((2)) <--> A3((3))
A4((4)) <--> A5((5)) <--> A6((6))
A7((7)) <--> A8((8)) <--> A9((9))
A1 <--> A4 <--> A7
A2 <--> A5 <--> A8
A3 <--> A6 <--> A9
end
Tasks[Task Queue] --> A1
Tasks --> A5
Tasks --> A9
```
**Best Used When:**
- Tasks have spatial relationships
- You need neighbor-based processing
- You want structured parallel processing
## Communication Patterns
### One-to-One Communication
```python
def one_to_one(sender: Agent, receiver: Agent, task: str, max_loops: int = 1) -> str
```
**Information Flow:**
```mermaid
flowchart LR
Task[Task] --> S((Sender))
S --> R((Receiver))
R --> Result[Result]
```
**Best Used When:**
- Direct agent communication is needed
- Tasks require back-and-forth interaction
- You need controlled message exchange
### Broadcast Communication
```python
async def broadcast(sender: Agent, agents: AgentListType, task: str) -> None
```
**Information Flow:**
```mermaid
flowchart TD
T[Task] --> S((Sender))
S --> A1((Agent 1))
S --> A2((Agent 2))
S --> A3((Agent 3))
S --> A4((Agent 4))
```
**Best Used When:**
- Information needs to reach all agents
- Tasks require global coordination
- You need system-wide updates
## Best Practices
1. **Choose the Right Pattern:**
- Consider your task's natural structure
- Think about scaling requirements
- Consider fault tolerance needs
2. **Performance Considerations:**
- More complex patterns have higher overhead
- Consider communication costs
- Match pattern to available resources
3. **Error Handling:**
- All patterns include basic error checking
- Consider adding additional error handling for production
- Monitor agent performance and task completion
4. **Scaling:**
- Different patterns scale differently
- Consider future growth needs
- Test with expected maximum load
## Common Use Cases
1. **Data Processing Pipelines**
- Linear Swarm
- Circular Swarm
2. **Distributed Computing**
- Mesh Swarm
- Grid Swarm
3. **Hierarchical Systems**
- Pyramid Swarm
- Star Swarm
4. **Dynamic Workloads**
- Exponential Swarm
- Fibonacci Swarm
5. **Conflict-Free Processing**
- Prime Swarm
- Harmonic Swarm
```python
import asyncio
from typing import List
from swarms.structs.agent import Agent
from swarms.structs.swarming_architectures import (
broadcast,
circular_swarm,
exponential_swarm,
fibonacci_swarm,
grid_swarm,
linear_swarm,
mesh_swarm,
one_to_three,
prime_swarm,
sigmoid_swarm,
sinusoidal_swarm,
staircase_swarm,
star_swarm,
)
def create_finance_agents() -> List[Agent]:
"""Create specialized finance agents"""
return [
Agent(
agent_name="MarketAnalyst",
system_prompt="You are a market analysis expert. Analyze market trends and provide insights.",
model_name="gpt-4o-mini"
),
Agent(
agent_name="RiskManager",
system_prompt="You are a risk management specialist. Evaluate risks and provide mitigation strategies.",
model_name="gpt-4o-mini"
),
Agent(
agent_name="PortfolioManager",
system_prompt="You are a portfolio management expert. Optimize investment portfolios and asset allocation.",
model_name="gpt-4o-mini"
),
Agent(
agent_name="ComplianceOfficer",
system_prompt="You are a financial compliance expert. Ensure regulatory compliance and identify issues.",
model_name="gpt-4o-mini"
)
]
def create_healthcare_agents() -> List[Agent]:
"""Create specialized healthcare agents"""
return [
Agent(
agent_name="Diagnostician",
system_prompt="You are a medical diagnostician. Analyze symptoms and suggest potential diagnoses.",
model_name="gpt-4o-mini"
),
Agent(
agent_name="Treatment_Planner",
system_prompt="You are a treatment planning specialist. Develop comprehensive treatment plans.",
model_name="gpt-4o-mini"
),
Agent(
agent_name="MedicalResearcher",
system_prompt="You are a medical researcher. Analyze latest research and provide evidence-based recommendations.",
model_name="gpt-4o-mini"
),
Agent(
agent_name="PatientCareCoordinator",
system_prompt="You are a patient care coordinator. Manage patient care workflow and coordination.",
model_name="gpt-4o-mini"
)
]
def print_separator():
print("\n" + "="*50 + "\n")
def run_finance_circular_swarm():
"""Investment analysis workflow using circular swarm"""
print_separator()
print("FINANCE - INVESTMENT ANALYSIS (Circular Swarm)")
agents = create_finance_agents()
tasks = [
"Analyze Tesla stock performance for Q4 2024",
"Assess market risks and potential hedging strategies",
"Recommend portfolio adjustments based on analysis"
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
result = circular_swarm(agents, tasks)
print("\nResults:")
for log in result['history']:
print(f"\n{log['agent_name']}:")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
def run_healthcare_grid_swarm():
"""Patient diagnosis and treatment planning using grid swarm"""
print_separator()
print("HEALTHCARE - PATIENT DIAGNOSIS (Grid Swarm)")
agents = create_healthcare_agents()
tasks = [
"Review patient symptoms: fever, fatigue, joint pain",
"Research latest treatment protocols",
"Develop preliminary treatment plan",
"Coordinate with specialists"
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
result = grid_swarm(agents, tasks)
print("\nGrid swarm processing completed")
print(result)
def run_finance_linear_swarm():
"""Loan approval process using linear swarm"""
print_separator()
print("FINANCE - LOAN APPROVAL PROCESS (Linear Swarm)")
agents = create_finance_agents()[:3]
tasks = [
"Review loan application and credit history",
"Assess risk factors and compliance requirements",
"Generate final loan recommendation"
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
result = linear_swarm(agents, tasks)
print("\nResults:")
for log in result['history']:
print(f"\n{log['agent_name']}:")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
def run_healthcare_star_swarm():
"""Complex medical case management using star swarm"""
print_separator()
print("HEALTHCARE - COMPLEX CASE MANAGEMENT (Star Swarm)")
agents = create_healthcare_agents()
tasks = [
"Complex case: Patient with multiple chronic conditions",
"Develop integrated care plan"
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
result = star_swarm(agents, tasks)
print("\nResults:")
for log in result['history']:
print(f"\n{log['agent_name']}:")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
def run_finance_mesh_swarm():
"""Market risk assessment using mesh swarm"""
print_separator()
print("FINANCE - MARKET RISK ASSESSMENT (Mesh Swarm)")
agents = create_finance_agents()
tasks = [
"Analyze global market conditions",
"Assess currency exchange risks",
"Evaluate sector-specific risks",
"Review portfolio exposure"
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
result = mesh_swarm(agents, tasks)
print("\nResults:")
for log in result['history']:
print(f"\n{log['agent_name']}:")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
def run_mathematical_finance_swarms():
"""Complex financial analysis using mathematical swarms"""
print_separator()
print("FINANCE - MARKET PATTERN ANALYSIS")
agents = create_finance_agents()
tasks = [
"Analyze historical market patterns",
"Predict market trends using technical analysis",
"Identify potential arbitrage opportunities"
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
print("\nFibonacci Swarm Results:")
result = fibonacci_swarm(agents, tasks.copy())
print(result)
print("\nPrime Swarm Results:")
result = prime_swarm(agents, tasks.copy())
print(result)
print("\nExponential Swarm Results:")
result = exponential_swarm(agents, tasks.copy())
print(result)
def run_healthcare_pattern_swarms():
"""Patient monitoring using pattern swarms"""
print_separator()
print("HEALTHCARE - PATIENT MONITORING PATTERNS")
agents = create_healthcare_agents()
task = "Monitor and analyze patient vital signs: BP, heart rate, temperature, O2 saturation"
print(f"\nTask: {task}")
print("\nStaircase Pattern Analysis:")
result = staircase_swarm(agents, task)
print(result)
print("\nSigmoid Pattern Analysis:")
result = sigmoid_swarm(agents, task)
print(result)
print("\nSinusoidal Pattern Analysis:")
result = sinusoidal_swarm(agents, task)
print(result)
async def run_communication_examples():
"""Communication patterns for emergency scenarios"""
print_separator()
print("EMERGENCY COMMUNICATION PATTERNS")
# Finance market alert
finance_sender = create_finance_agents()[0]
finance_receivers = create_finance_agents()[1:]
market_alert = "URGENT: Major market volatility detected - immediate risk assessment required"
print("\nFinance Market Alert:")
print(f"Alert: {market_alert}")
result = await broadcast(finance_sender, finance_receivers, market_alert)
print("\nBroadcast Results:")
for log in result['history']:
print(f"\n{log['agent_name']}:")
print(f"Response: {log['response']}")
# Healthcare emergency
health_sender = create_healthcare_agents()[0]
health_receivers = create_healthcare_agents()[1:4]
emergency_case = "EMERGENCY: Trauma patient with multiple injuries - immediate consultation required"
print("\nHealthcare Emergency:")
print(f"Case: {emergency_case}")
result = await one_to_three(health_sender, health_receivers, emergency_case)
print("\nConsultation Results:")
for log in result['history']:
print(f"\n{log['agent_name']}:")
print(f"Response: {log['response']}")
async def run_all_examples():
"""Execute all swarm examples"""
print("\n=== SWARM ARCHITECTURE EXAMPLES ===\n")
# Finance examples
run_finance_circular_swarm()
run_finance_linear_swarm()
run_finance_mesh_swarm()
run_mathematical_finance_swarms()
# Healthcare examples
run_healthcare_grid_swarm()
run_healthcare_star_swarm()
run_healthcare_pattern_swarms()
# Communication examples
await run_communication_examples()
print("\n=== ALL EXAMPLES COMPLETED ===")
if __name__ == "__main__":
asyncio.run(run_all_examples())
```

@ -127,43 +127,52 @@ Before you begin, ensure you have the following installed:
poetry install --extras "desktop"
```
=== "Using Docker COMING SOON [DOES NOT WORK YET]"
=== "Using Docker"
Docker is an excellent option for creating isolated and reproducible environments, suitable for both development and production.
Docker is an excellent option for creating isolated and reproducible environments, suitable for both development and production. Contact us if there are any issues with the docker setup
1. **Pull the Docker image:**
```bash
docker pull kyegomez/swarms
docker pull swarmscorp/swarms:tagname
```
2. **Run the Docker container:**
```bash
docker run -it --rm kyegomez/swarms
docker run -it --rm swarmscorp/swarms:tagname
```
3. **Build and run a custom Docker image:**
```dockerfile
# Dockerfile
FROM python:3.10-slim
# Set up environment
WORKDIR /app
COPY . /app
# Install dependencies
RUN pip install --upgrade pip && \
pip install -e .
CMD ["python", "your_script.py"]
```
```bash
# Build and run the Docker image
docker build -t swarms-custom .
docker run -it --rm swarms-custom
# Use Python 3.11 instead of 3.13
FROM python:3.11-slim
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
WORKSPACE_DIR="agent_workspace" \
OPENAI_API_KEY="your_swarm_api_key_here"
# Set the working directory
WORKDIR /usr/src/swarms
# Install system dependencies
RUN apt-get update && apt-get install -y \
build-essential \
gcc \
g++ \
gfortran \
&& rm -rf /var/lib/apt/lists/*
# Install swarms package
RUN pip3 install -U swarm-models
RUN pip3 install -U swarms
# Copy the application
COPY . .
```
=== "Using Kubernetes"

@ -0,0 +1,210 @@
# How to Add a New Swarm Class
This guide provides comprehensive step-by-step instructions for developers to create and add a new swarm. It emphasizes the importance of adhering to best practices, using proper type hints, and documenting code thoroughly to ensure maintainability, scalability, and clarity in your implementations.
## Overview
A Swarm class enables developers to manage and coordinate multiple agents working together to accomplish complex tasks efficiently. Each Swarm must:
- Contain a `run(task: str, img: str, *args, **kwargs)` method, which serves as the primary execution method for tasks.
- Include `name`, `description`, and `agents` parameters.
- Ensure `agents` is a list of callables, with each callable adhering to specific requirements for dynamic agent behavior.
- Follow type-hinting and documentation best practices to maintain code clarity and reliability.
Each Agent within the swarm must:
- Contain `agent_name`, `system_prompt`, and a `run` method.
- Follow similar type hinting and documentation standards to ensure consistency and readability.
By adhering to these requirements, you can create robust, reusable, and modular swarms that streamline task management and enhance collaborative functionality. Developers are also encouraged to contribute their swarms back to the open-source community by submitting a pull request to the Swarms repository at [https://github.com/kyegomez/swarms](https://github.com/kyegomez/swarms).
---
## Creating a Swarm Class
Below is a detailed template for creating a Swarm class. Ensure that all elements are documented and clearly defined:
```python
from typing import Callable, Any, List
class MySwarm:
"""
A custom swarm class to manage and execute tasks with multiple agents.
Attributes:
name (str): The name of the swarm.
description (str): A brief description of the swarm's purpose.
agents (List[Callable]): A list of callables representing the agents to be utilized.
"""
def __init__(self, name: str, description: str, agents: List[Callable]):
"""
Initialize the Swarm with its name, description, and agents.
Args:
name (str): The name of the swarm.
description (str): A description of the swarm.
agents (List[Callable]): A list of callables that provide the agents for the swarm.
"""
self.name = name
self.description = description
self.agents = agents
def run(self, task: str, img: str, *args: Any, **kwargs: Any) -> Any:
"""
Execute a task using the swarm and its agents.
Args:
task (str): The task description.
img (str): The image input.
*args: Additional positional arguments for customization.
**kwargs: Additional keyword arguments for fine-tuning behavior.
Returns:
Any: The result of the task execution, aggregated from all agents.
"""
results = []
for agent in self.agents:
result = agent.run(task, img, *args, **kwargs)
results.append(result)
return results
```
This Swarm class serves as the main orchestrator for coordinating agents and running tasks dynamically and flexibly.
---
## Creating an Agent Class
Each agent must follow a well-defined structure to ensure compatibility with the swarm. Below is an example of an agent class:
```python
class Agent:
"""
A single agent class to handle specific tasks assigned by the swarm.
Attributes:
agent_name (str): The name of the agent.
system_prompt (str): The system prompt guiding the agent's behavior and purpose.
"""
def __init__(self, agent_name: str, system_prompt: str):
"""
Initialize the agent with its name and system prompt.
Args:
agent_name (str): The name of the agent.
system_prompt (str): The guiding prompt for the agent.
"""
self.agent_name = agent_name
self.system_prompt = system_prompt
def run(self, task: str, img: str, *args: Any, **kwargs: Any) -> Any:
"""
Execute a specific task assigned to the agent.
Args:
task (str): The task description.
img (str): The image input for processing.
*args: Additional positional arguments for task details.
**kwargs: Additional keyword arguments for extended functionality.
Returns:
Any: The result of the task execution, which can be customized.
"""
# Example implementation (to be customized by developer)
return f"Agent {self.agent_name} executed task: {task}"
```
This structure ensures that each agent can independently handle tasks and integrate seamlessly into a swarm.
---
## Adding Your Swarm to a Project
### Step 1: Define Your Agents
Create one or more instances of the `Agent` class to serve as components of your swarm. For example:
```python
def create_agents():
return [
Agent(agent_name="Agent1", system_prompt="Analyze the image and summarize results."),
Agent(agent_name="Agent2", system_prompt="Detect objects and highlight key features."),
]
```
### Step 2: Implement Your Swarm
Create an instance of your Swarm class, defining its name, description, and associated agents:
```python
my_swarm = MySwarm(
name="Image Analysis Swarm",
description="A swarm designed to analyze images and perform a range of related tasks.",
agents=create_agents()
)
```
### Step 3: Execute Tasks
Call the `run` method of your swarm, passing in the required parameters for execution:
```python
results = my_swarm.run(task="Analyze image content", img="path/to/image.jpg")
print(results)
```
This simple flow allows you to dynamically utilize agents for diverse operations and ensures efficient task execution.
---
## Best Practices
To ensure your swarm implementation is efficient and maintainable, follow these best practices:
1. **Type Annotations:**
Use precise type hints for parameters and return types to improve code readability and support static analysis tools.
2. **Comprehensive Documentation:**
Include clear and detailed docstrings for all classes, methods, and attributes to ensure your code is understandable.
3. **Thorough Testing:**
Test your swarm and agents with various tasks to verify correctness and identify potential edge cases.
4. **Modular Design:**
Keep your swarm and agent logic modular, enabling reuse and easy extensions for future enhancements.
5. **Error Handling:**
Implement robust error handling in the `run` methods to gracefully manage unexpected inputs or issues during execution.
6. **Code Review:**
Regularly review and refactor your code to align with the latest best practices and maintain high quality.
7. **Scalability:**
Design your swarm with scalability in mind, ensuring it can handle a large number of agents and complex tasks.
8. **Logging and Monitoring:**
Include comprehensive logging to track task execution and monitor performance, enabling easier debugging and optimization.
9. **Open-Source Contributions:**
Consider contributing your swarm to the Swarms repository to benefit the community. Submit a pull request at [https://github.com/kyegomez/swarms](https://github.com/kyegomez/swarms).
---
## Example Output
Given the implementation above, executing a task might produce output such as:
```plaintext
[
"Agent Agent1 executed task: Analyze image content",
"Agent Agent2 executed task: Analyze image content"
]
```
The modular design ensures that each agent contributes to the overall functionality of the swarm, allowing seamless scalability and dynamic task management.
---
## Conclusion
By following these guidelines, you can create swarms that are powerful, flexible, and maintainable. Leveraging the provided templates and best practices enables you to build efficient multi-agent systems capable of handling diverse and complex tasks. Proper structuring, thorough testing, and adherence to best practices will ensure your swarm integrates effectively into any project, delivering robust and reliable performance. Furthermore, maintaining clear documentation and emphasizing modularity will help your implementation adapt to future needs and use cases. Empower your projects with a well-designed swarm architecture today, and consider submitting your swarm to the open-source community to foster collaboration and innovation.

@ -1,231 +1,352 @@
# GroupChat Class Documentation
The GroupChat class manages multi-agent conversations with state persistence, comprehensive logging, and flexible agent configurations. It supports both Agent class instances and callable functions, making it versatile for different use cases.
# GroupChat Swarm Documentation
A production-grade multi-agent system enabling sophisticated group conversations between AI agents with customizable speaking patterns, parallel processing capabilities, and comprehensive conversation tracking.
## Advanced Configuration
### Agent Parameters
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| agent_name | str | Required | Unique identifier for the agent |
| system_prompt | str | Required | Role and behavior instructions |
| llm | Any | Required | Language model instance |
| max_loops | int | 1 | Maximum conversation turns |
| autosave | bool | False | Enable conversation saving |
| dashboard | bool | False | Enable monitoring dashboard |
| verbose | bool | True | Enable detailed logging |
| dynamic_temperature | bool | True | Enable dynamic temperature |
| retry_attempts | int | 1 | Failed request retry count |
| context_length | int | 200000 | Maximum context window |
| output_type | str | "string" | Response format type |
| streaming_on | bool | False | Enable streaming responses |
### GroupChat Parameters
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| name | str | "GroupChat" | Chat group identifier |
| description | str | "" | Purpose description |
| agents | List[Agent] | [] | Participating agents |
| speaker_fn | Callable | round_robin | Speaker selection function |
| max_loops | int | 10 | Maximum conversation turns |
## Table of Contents
- [Installation](#installation)
- [Core Concepts](#core-concepts)
- [Basic Usage](#basic-usage)
- [Advanced Configuration](#advanced-configuration)
- [Speaker Functions](#speaker-functions)
- [Response Models](#response-models)
- [Advanced Examples](#advanced-examples)
- [API Reference](#api-reference)
- [Best Practices](#best-practices)
## Installation
```bash
pip install swarms python-dotenv pydantic
pip3 install swarms swarm-models loguru
```
## Core Concepts
## Attributes
| Attribute | Type | Description |
|-----------|------|-------------|
| state_path | str | Path for saving/loading chat state |
| wrapped_agents | List[AgentWrapper] | List of wrapped agent instances |
| selector_agent | AgentWrapper | Agent responsible for speaker selection |
| state | GroupChatState | Current state of the group chat |
The GroupChat system consists of several key components:
## Methods
1. **Agents**: Individual AI agents with specialized knowledge and roles
2. **Speaker Functions**: Control mechanisms for conversation flow
3. **Chat History**: Structured conversation tracking
4. **Response Models**: Pydantic models for data validation
### Core Methods
## Basic Usage
```python
def run(self, task: str) -> str:
"""Execute the group chat conversation"""
def save_state(self) -> None:
"""Save current state to disk"""
@classmethod
def load_state(cls, state_path: str) -> 'GroupChat':
"""Load GroupChat from saved state"""
def get_conversation_summary(self) -> Dict[str, Any]:
"""Return a summary of the conversation"""
import os
from dotenv import load_dotenv
from swarm_models import OpenAIChat
from swarms import Agent, GroupChat, expertise_based
if __name__ == "__main__":
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key,
model_name="gpt-4o-mini",
temperature=0.1,
)
# Example agents
agent1 = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt="You are a financial analyst specializing in investment strategies.",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
output_type="string",
streaming_on=False,
)
agent2 = Agent(
agent_name="Tax-Adviser-Agent",
system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
output_type="string",
streaming_on=False,
)
agents = [agent1, agent2]
chat = GroupChat(
name="Investment Advisory",
description="Financial and tax analysis group",
agents=agents,
speaker_fn=expertise_based,
)
history = chat.run(
"How to optimize tax strategy for investments?"
)
print(history.model_dump_json(indent=2))
def export_conversation(self, format: str = "json") -> Union[str, Dict]:
"""Export the conversation in specified format"""
```
### Internal Methods
## Speaker Functions
```python
def _log_interaction(self, agent_name: str, position: int, input_text: str, output_text: str) -> None:
"""Log a single interaction"""
def _add_message(self, role: str, content: str) -> None:
"""Add a message to the conversation history"""
### Built-in Functions
def select_next_speaker(self, last_speaker: AgentWrapper) -> AgentWrapper:
"""Select the next speaker using the selector agent"""
```python
def round_robin(history: List[str], agent: Agent) -> bool:
"""
Enables agents to speak in turns.
Returns True for each agent in sequence.
"""
return True
def expertise_based(history: List[str], agent: Agent) -> bool:
"""
Enables agents to speak based on their expertise.
Returns True if agent's role matches conversation context.
"""
return agent.system_prompt.lower() in history[-1].lower() if history else True
def random_selection(history: List[str], agent: Agent) -> bool:
"""
Randomly selects speaking agents.
Returns True/False with 50% probability.
"""
import random
return random.choice([True, False])
def most_recent(history: List[str], agent: Agent) -> bool:
"""
Enables agents to respond to their mentions.
Returns True if agent was last speaker.
"""
return agent.agent_name == history[-1].split(":")[0].strip() if history else True
```
## Usage Examples
### Custom Speaker Function Example
### 1. Basic Setup with Two Agents
```python
import os
from swarms import Agent
from swarm_models import OpenAIChat
# Initialize OpenAI
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(openai_api_key=api_key, model_name="gpt-4-mini")
# Create agents
analyst = Agent(
agent_name="Financial-Analyst",
system_prompt="You are a financial analyst...",
llm=model
)
advisor = Agent(
agent_name="Investment-Advisor",
system_prompt="You are an investment advisor...",
llm=model
)
# Create group chat
def custom_speaker(history: List[str], agent: Agent) -> bool:
"""
Custom speaker function with complex logic.
Args:
history: Previous conversation messages
agent: Current agent being evaluated
Returns:
bool: Whether agent should speak
"""
# No history - let everyone speak
if not history:
return True
last_message = history[-1].lower()
# Check for agent expertise keywords
expertise_relevant = any(
keyword in last_message
for keyword in agent.expertise_keywords
)
# Check for direct mentions
mentioned = agent.agent_name.lower() in last_message
# Check if agent hasn't spoken recently
not_recent_speaker = not any(
agent.agent_name in msg
for msg in history[-3:]
)
return expertise_relevant or mentioned or not_recent_speaker
# Usage
chat = GroupChat(
name="Investment Team",
agents=[analyst, advisor],
max_rounds=5,
group_objective="Provide investment advice"
agents=[agent1, agent2],
speaker_fn=custom_speaker
)
response = chat.run("What's the best investment strategy for retirement?")
```
### 2. Advanced Setup with State Management
```python
# Create group chat with state persistence
chat = GroupChat(
name="Investment Advisory Team",
description="Expert team for financial planning",
agents=[analyst, advisor, tax_specialist],
max_rounds=10,
admin_name="Senior Advisor",
group_objective="Provide comprehensive financial planning",
state_path="investment_chat_state.json",
rules="1. Always provide sources\n2. Be concise\n3. Focus on practical advice"
)
## Response Models
# Run chat and save state
response = chat.run("Create a retirement plan for a 35-year old")
chat.save_state()
### Complete Schema
# Load existing chat state
loaded_chat = GroupChat.load_state("investment_chat_state.json")
```
### 3. Using Custom Callable Agents
```python
def custom_agent(input_text: str) -> str:
# Custom logic here
return f"Processed: {input_text}"
# Mix of regular agents and callable functions
chat = GroupChat(
name="Hybrid Team",
agents=[analyst, custom_agent],
max_rounds=3
)
class AgentResponse(BaseModel):
"""Individual agent response in a conversation turn"""
agent_name: str
role: str
message: str
timestamp: datetime = Field(default_factory=datetime.now)
turn_number: int
preceding_context: List[str] = Field(default_factory=list)
class ChatTurn(BaseModel):
"""Single turn in the conversation"""
turn_number: int
responses: List[AgentResponse]
task: str
timestamp: datetime = Field(default_factory=datetime.now)
class ChatHistory(BaseModel):
"""Complete conversation history"""
turns: List[ChatTurn]
total_messages: int
name: str
description: str
start_time: datetime = Field(default_factory=datetime.now)
```
### 4. Export and Analysis
```python
# Run chat
chat.run("Analyze market conditions")
# Get summary
summary = chat.get_conversation_summary()
print(summary)
## Advanced Examples
# Export in different formats
json_conv = chat.export_conversation(format="json")
text_conv = chat.export_conversation(format="text")
```
### Multi-Agent Analysis Team
### 5. Advanced Configuration with Custom Selector
```python
class CustomSelector(Agent):
def run(self, input_text: str) -> str:
# Custom selection logic
return "Financial-Analyst"
chat = GroupChat(
name="Custom Selection Team",
agents=[analyst, advisor],
selector_agent=CustomSelector(
agent_name="Custom-Selector",
system_prompt="Select the next speaker based on expertise",
llm=model
),
max_rounds=5
# Create specialized agents
data_analyst = Agent(
agent_name="Data-Analyst",
system_prompt="You analyze numerical data and patterns",
llm=model
)
```
### 6. Debugging Setup
```python
import logging
market_expert = Agent(
agent_name="Market-Expert",
system_prompt="You provide market insights and trends",
llm=model
)
# Configure logging
logging.basicConfig(level=logging.DEBUG)
strategy_advisor = Agent(
agent_name="Strategy-Advisor",
system_prompt="You formulate strategic recommendations",
llm=model
)
chat = GroupChat(
name="Debug Team",
agents=[analyst, advisor],
max_rounds=3,
state_path="debug_chat.json"
# Create analysis team
analysis_team = GroupChat(
name="Market Analysis Team",
description="Comprehensive market analysis group",
agents=[data_analyst, market_expert, strategy_advisor],
speaker_fn=expertise_based,
max_loops=15
)
# Run with detailed logging
try:
response = chat.run("Complex query")
except Exception as e:
logger.error(f"Chat failed: {str(e)}")
# Access last successful state
state = chat.state
# Run complex analysis
history = analysis_team.run("""
Analyze the current market conditions:
1. Identify key trends
2. Evaluate risks
3. Recommend investment strategy
""")
```
## Error Handling
The GroupChat class includes comprehensive error handling:
### Parallel Processing
```python
try:
chat = GroupChat(agents=[analyst]) # Will raise ValueError
except ValueError as e:
print("Configuration error:", str(e))
try:
response = chat.run("Query")
except Exception as e:
# Access error state
error_summary = chat.get_conversation_summary()
print("Execution error:", str(e))
print("State at error:", error_summary)
# Define multiple analysis tasks
tasks = [
"Analyze tech sector trends",
"Evaluate real estate market",
"Review commodity prices",
"Assess global economic indicators"
]
# Run tasks concurrently
histories = chat.concurrent_run(tasks)
# Process results
for task, history in zip(tasks, histories):
print(f"\nAnalysis for: {task}")
for turn in history.turns:
for response in turn.responses:
print(f"{response.agent_name}: {response.message}")
```
## Best Practices
1. **State Management**:
- Always specify a `state_path` for important conversations
- Use `save_state()` after critical operations
- Implement regular state backups for long conversations
2. **Agent Configuration**:
- Provide clear system prompts for each agent
- Use descriptive agent names
- Consider agent expertise when setting the group objective
3. **Performance**:
- Keep `max_rounds` reasonable (5-10 for most cases)
- Use early stopping conditions when possible
- Monitor conversation length and complexity
4. **Error Handling**:
- Always wrap chat execution in try-except blocks
- Implement proper logging
- Save states before potentially risky operations
## Limitations
- Agents must either have a `run` method or be callable
- State files can grow large with many interactions
- Selector agent may need optimization for large agent groups
- Real-time streaming not supported in basic configuration
1. **Agent Design**
- Give agents clear, specific roles
- Use detailed system prompts
- Set appropriate context lengths
- Enable retries for reliability
2. **Speaker Functions**
- Match function to use case
- Consider conversation flow
- Handle edge cases
- Add appropriate logging
3. **Error Handling**
- Use try-except blocks
- Log errors appropriately
- Implement retry logic
- Provide fallback responses
4. **Performance**
- Use concurrent processing for multiple tasks
- Monitor context lengths
- Implement proper cleanup
- Cache responses when appropriate
## API Reference
### GroupChat Methods
| Method | Description | Arguments | Returns |
|--------|-------------|-----------|---------|
| run | Run single conversation | task: str | ChatHistory |
| batched_run | Run multiple sequential tasks | tasks: List[str] | List[ChatHistory] |
| concurrent_run | Run multiple parallel tasks | tasks: List[str] | List[ChatHistory] |
| get_recent_messages | Get recent messages | n: int = 3 | List[str] |
### Agent Methods
| Method | Description | Returns |
|--------|-------------|---------|
| run | Process single task | str |
| generate_response | Generate LLM response | str |
| save_context | Save conversation context | None |

@ -0,0 +1,331 @@
# MultiAgentRouter Documentation
The MultiAgentRouter is a sophisticated task routing system that efficiently delegates tasks to specialized AI agents. It uses a "boss" agent to analyze incoming tasks and route them to the most appropriate specialized agent based on their capabilities and expertise.
## Table of Contents
- [Installation](#installation)
- [Key Components](#key-components)
- [Arguments](#arguments)
- [Methods](#methods)
- [Usage Examples](#usage-examples)
- [Healthcare](#healthcare-example)
- [Finance](#finance-example)
- [Legal](#legal-example)
- [Research](#research-example)
## Installation
```bash
pip install swarms
```
## Key Components
### Arguments Table
| Argument | Type | Default | Description |
|----------|------|---------|-------------|
| name | str | "swarm-router" | Name identifier for the router instance |
| description | str | "Routes tasks..." | Description of the router's purpose |
| agents | List[Agent] | [] | List of available specialized agents |
| model | str | "gpt-4o-mini" | Base language model for the boss agent |
| temperature | float | 0.1 | Temperature parameter for model outputs |
| shared_memory_system | callable | None | Optional shared memory system |
| output_type | Literal["json", "string"] | "json" | Format of agent outputs |
| execute_task | bool | True | Whether to execute routed tasks |
### Methods Table
| Method | Arguments | Returns | Description |
|--------|-----------|---------|-------------|
| route_task | task: str | dict | Routes a single task to appropriate agent |
| batch_route | tasks: List[str] | List[dict] | Sequentially routes multiple tasks |
| concurrent_batch_route | tasks: List[str] | List[dict] | Concurrently routes multiple tasks |
| query_ragent | task: str | str | Queries the research agent |
| find_agent_in_list | agent_name: str | Optional[Agent] | Finds agent by name |
## Production Examples
### Healthcare Example
```python
from swarms import Agent, MultiAgentRouter
# Define specialized healthcare agents
agents = [
Agent(
agent_name="DiagnosisAgent",
description="Specializes in preliminary symptom analysis and diagnostic suggestions",
system_prompt="""You are a medical diagnostic assistant. Analyze symptoms and provide
evidence-based diagnostic suggestions, always noting this is for informational purposes
only and recommending professional medical consultation.""",
model_name="openai/gpt-4o"
),
Agent(
agent_name="TreatmentPlanningAgent",
description="Assists in creating treatment plans and medical documentation",
system_prompt="""You are a treatment planning assistant. Help create structured
treatment plans based on confirmed diagnoses, following medical best practices
and guidelines.""",
model_name="openai/gpt-4o"
),
Agent(
agent_name="MedicalResearchAgent",
description="Analyzes medical research papers and clinical studies",
system_prompt="""You are a medical research analyst. Analyze and summarize medical
research papers, clinical trials, and scientific studies, providing evidence-based
insights.""",
model_name="openai/gpt-4o"
)
]
# Initialize router
healthcare_router = MultiAgentRouter(
name="Healthcare-Router",
description="Routes medical and healthcare-related tasks to specialized agents",
agents=agents,
model="gpt-4o",
temperature=0.1
)
# Example usage
try:
# Process medical case
case_analysis = healthcare_router.route_task(
"""Patient presents with:
- Persistent dry cough for 3 weeks
- Mild fever (38.1°C)
- Fatigue
Analyze symptoms and suggest potential diagnoses for healthcare provider review."""
)
# Research treatment options
treatment_research = healthcare_router.route_task(
"""Find recent clinical studies on treatment efficacy for community-acquired
pneumonia in adult patients, focusing on outpatient care."""
)
# Process multiple cases concurrently
cases = [
"Case 1: Patient symptoms...",
"Case 2: Patient symptoms...",
"Case 3: Patient symptoms..."
]
concurrent_results = healthcare_router.concurrent_batch_route(cases)
except Exception as e:
logger.error(f"Error in healthcare processing: {str(e)}")
```
### Finance Example
```python
# Define specialized finance agents
finance_agents = [
Agent(
agent_name="MarketAnalysisAgent",
description="Analyzes market trends and provides trading insights",
system_prompt="""You are a financial market analyst. Analyze market data, trends,
and indicators to provide evidence-based market insights and trading suggestions.""",
model_name="openai/gpt-4o"
),
Agent(
agent_name="RiskAssessmentAgent",
description="Evaluates financial risks and compliance requirements",
system_prompt="""You are a risk assessment specialist. Analyze financial data
and operations for potential risks, ensuring regulatory compliance and suggesting
risk mitigation strategies.""",
model_name="openai/gpt-4o"
),
Agent(
agent_name="InvestmentAgent",
description="Provides investment strategies and portfolio management",
system_prompt="""You are an investment strategy specialist. Develop and analyze
investment strategies, portfolio allocations, and provide long-term financial
planning guidance.""",
model_name="openai/gpt-4o"
)
]
# Initialize finance router
finance_router = MultiAgentRouter(
name="Finance-Router",
description="Routes financial analysis and investment tasks",
agents=finance_agents
)
# Example tasks
tasks = [
"""Analyze current market conditions for technology sector, focusing on:
- AI/ML companies
- Semiconductor manufacturers
- Cloud service providers
Provide risk assessment and investment opportunities.""",
"""Develop a diversified portfolio strategy for a conservative investor with:
- Investment horizon: 10 years
- Risk tolerance: Low to medium
- Initial investment: $500,000
- Monthly contribution: $5,000""",
"""Conduct risk assessment for a fintech startup's crypto trading platform:
- Regulatory compliance requirements
- Security measures
- Operational risks
- Market risks"""
]
# Process tasks concurrently
results = finance_router.concurrent_batch_route(tasks)
```
### Legal Example
```python
# Define specialized legal agents
legal_agents = [
Agent(
agent_name="ContractAnalysisAgent",
description="Analyzes legal contracts and documents",
system_prompt="""You are a legal document analyst. Review contracts and legal
documents for key terms, potential issues, and compliance requirements.""",
model_name="openai/gpt-4o"
),
Agent(
agent_name="ComplianceAgent",
description="Ensures regulatory compliance and updates",
system_prompt="""You are a legal compliance specialist. Monitor and analyze
regulatory requirements, ensuring compliance and suggesting necessary updates
to policies and procedures.""",
model_name="openai/gpt-4o"
),
Agent(
agent_name="LegalResearchAgent",
description="Conducts legal research and case analysis",
system_prompt="""You are a legal researcher. Research relevant cases, statutes,
and regulations, providing comprehensive legal analysis and citations.""",
model_name="openai/gpt-4o"
)
]
# Initialize legal router
legal_router = MultiAgentRouter(
name="Legal-Router",
description="Routes legal analysis and compliance tasks",
agents=legal_agents
)
# Example usage for legal department
contract_analysis = legal_router.route_task(
"""Review the following software licensing agreement:
[contract text]
Analyze for:
1. Key terms and conditions
2. Potential risks and liabilities
3. Compliance with current regulations
4. Suggested modifications"""
)
```
## Error Handling and Best Practices
1. Always use try-except blocks for task routing:
```python
try:
result = router.route_task(task)
except Exception as e:
logger.error(f"Task routing failed: {str(e)}")
```
2. Monitor agent performance:
```python
if result["execution"]["execution_time"] > 5.0:
logger.warning(f"Long execution time for task: {result['task']['original']}")
```
3. Implement rate limiting for concurrent tasks:
```python
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=5) as executor:
results = router.concurrent_batch_route(tasks)
```
4. Regular agent validation:
```python
for agent in router.agents.values():
if not agent.validate():
logger.error(f"Agent validation failed: {agent.name}")
```
## Performance Considerations
1. Task Batching
- Group similar tasks together
- Use concurrent_batch_route for independent tasks
- Monitor memory usage with large batches
2. Model Selection
- Choose appropriate models based on task complexity
- Balance speed vs. accuracy requirements
- Consider cost implications
3. Response Caching
- Implement caching for frequently requested analyses
- Use shared memory system for repeated queries
- Regular cache invalidation for time-sensitive data
## Security Considerations
1. Data Privacy
- Implement data encryption
- Handle sensitive information appropriately
- Regular security audits
2. Access Control
- Implement role-based access
- Audit logging
- Regular permission reviews
## Monitoring and Logging
1. Performance Metrics
- Response times
- Success rates
- Error rates
- Resource utilization
2. Logging
- Use structured logging
- Implement log rotation
- Regular log analysis
3. Alerts
- Set up alerting for critical errors
- Monitor resource usage
- Track API rate limits

@ -0,0 +1,334 @@
# SwarmRearrange Documentation
SwarmRearrange is a class for orchestrating multiple swarms in a sequential or parallel flow pattern. It provides thread-safe operations for managing swarm execution, history tracking, and flow validation.
## Constructor Arguments
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| id | str | UUID | Unique identifier for the swarm arrangement |
| name | str | "SwarmRearrange" | Name of the swarm arrangement |
| description | str | "A swarm of swarms..." | Description of the arrangement |
| swarms | List[Any] | [] | List of swarm objects to be managed |
| flow | str | None | Flow pattern for swarm execution |
| max_loops | int | 1 | Maximum number of execution loops |
| verbose | bool | True | Enable detailed logging |
| human_in_the_loop | bool | False | Enable human intervention |
| custom_human_in_the_loop | Callable | None | Custom function for human interaction |
| return_json | bool | False | Return results in JSON format |
## Methods
### add_swarm(swarm: Any)
Adds a single swarm to the arrangement.
### remove_swarm(swarm_name: str)
Removes a swarm by name from the arrangement.
### add_swarms(swarms: List[Any])
Adds multiple swarms to the arrangement.
### validate_flow()
Validates the flow pattern syntax and swarm names.
### run(task: str = None, img: str = None, custom_tasks: Dict[str, str] = None)
Executes the swarm arrangement according to the flow pattern.
## Flow Pattern Syntax
The flow pattern uses arrow notation (`->`) to define execution order:
- Sequential: `"SwarmA -> SwarmB -> SwarmC"`
- Parallel: `"SwarmA, SwarmB -> SwarmC"`
- Human intervention: Use `"H"` in the flow
## Examples
### Basic Sequential Flow
```python
from swarms.structs.swarm_arange import SwarmRearrange
import os
from swarms import Agent, AgentRearrange
from swarm_models import OpenAIChat
# model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
company = "TGSC"
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize the Managing Director agent
managing_director = Agent(
agent_name="Managing-Director",
system_prompt=f"""
As the Managing Director at Blackstone, your role is to oversee the entire investment analysis process for potential acquisitions.
Your responsibilities include:
1. Setting the overall strategy and direction for the analysis
2. Coordinating the efforts of the various team members and ensuring a comprehensive evaluation
3. Reviewing the findings and recommendations from each team member
4. Making the final decision on whether to proceed with the acquisition
For the current potential acquisition of {company}, direct the tasks for the team to thoroughly analyze all aspects of the company, including its financials, industry position, technology, market potential, and regulatory compliance. Provide guidance and feedback as needed to ensure a rigorous and unbiased assessment.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="managing-director.json",
)
# Initialize the Vice President of Finance
vp_finance = Agent(
agent_name="VP-Finance",
system_prompt=f"""
As the Vice President of Finance at Blackstone, your role is to lead the financial analysis of potential acquisitions.
For the current potential acquisition of {company}, your tasks include:
1. Conducting a thorough review of {company}' financial statements, including income statements, balance sheets, and cash flow statements
2. Analyzing key financial metrics such as revenue growth, profitability margins, liquidity ratios, and debt levels
3. Assessing the company's historical financial performance and projecting future performance based on assumptions and market conditions
4. Identifying any financial risks or red flags that could impact the acquisition decision
5. Providing a detailed report on your findings and recommendations to the Managing Director
Be sure to consider factors such as the sustainability of {company}' business model, the strength of its customer base, and its ability to generate consistent cash flows. Your analysis should be data-driven, objective, and aligned with Blackstone's investment criteria.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="vp-finance.json",
)
# Initialize the Industry Analyst
industry_analyst = Agent(
agent_name="Industry-Analyst",
system_prompt=f"""
As the Industry Analyst at Blackstone, your role is to provide in-depth research and analysis on the industries and markets relevant to potential acquisitions.
For the current potential acquisition of {company}, your tasks include:
1. Conducting a comprehensive analysis of the industrial robotics and automation solutions industry, including market size, growth rates, key trends, and future prospects
2. Identifying the major players in the industry and assessing their market share, competitive strengths and weaknesses, and strategic positioning
3. Evaluating {company}' competitive position within the industry, including its market share, differentiation, and competitive advantages
4. Analyzing the key drivers and restraints for the industry, such as technological advancements, labor costs, regulatory changes, and economic conditions
5. Identifying potential risks and opportunities for {company} based on the industry analysis, such as disruptive technologies, emerging markets, or shifts in customer preferences
Your analysis should provide a clear and objective assessment of the attractiveness and future potential of the industrial robotics industry, as well as {company}' positioning within it. Consider both short-term and long-term factors, and provide evidence-based insights to inform the investment decision.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="industry-analyst.json",
)
# Initialize the Technology Expert
tech_expert = Agent(
agent_name="Tech-Expert",
system_prompt=f"""
As the Technology Expert at Blackstone, your role is to assess the technological capabilities, competitive advantages, and potential risks of companies being considered for acquisition.
For the current potential acquisition of {company}, your tasks include:
1. Conducting a deep dive into {company}' proprietary technologies, including its robotics platforms, automation software, and AI capabilities
2. Assessing the uniqueness, scalability, and defensibility of {company}' technology stack and intellectual property
3. Comparing {company}' technologies to those of its competitors and identifying any key differentiators or technology gaps
4. Evaluating {company}' research and development capabilities, including its innovation pipeline, engineering talent, and R&D investments
5. Identifying any potential technology risks or disruptive threats that could impact {company}' long-term competitiveness, such as emerging technologies or expiring patents
Your analysis should provide a comprehensive assessment of {company}' technological strengths and weaknesses, as well as the sustainability of its competitive advantages. Consider both the current state of its technology and its future potential in light of industry trends and advancements.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="tech-expert.json",
)
# Initialize the Market Researcher
market_researcher = Agent(
agent_name="Market-Researcher",
system_prompt=f"""
As the Market Researcher at Blackstone, your role is to analyze the target company's customer base, market share, and growth potential to assess the commercial viability and attractiveness of the potential acquisition.
For the current potential acquisition of {company}, your tasks include:
1. Analyzing {company}' current customer base, including customer segmentation, concentration risk, and retention rates
2. Assessing {company}' market share within its target markets and identifying key factors driving its market position
3. Conducting a detailed market sizing and segmentation analysis for the industrial robotics and automation markets, including identifying high-growth segments and emerging opportunities
4. Evaluating the demand drivers and sales cycles for {company}' products and services, and identifying any potential risks or limitations to adoption
5. Developing financial projections and estimates for {company}' revenue growth potential based on the market analysis and assumptions around market share and penetration
Your analysis should provide a data-driven assessment of the market opportunity for {company} and the feasibility of achieving our investment return targets. Consider both bottom-up and top-down market perspectives, and identify any key sensitivities or assumptions in your projections.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="market-researcher.json",
)
# Initialize the Regulatory Specialist
regulatory_specialist = Agent(
agent_name="Regulatory-Specialist",
system_prompt=f"""
As the Regulatory Specialist at Blackstone, your role is to identify and assess any regulatory risks, compliance requirements, and potential legal liabilities associated with potential acquisitions.
For the current potential acquisition of {company}, your tasks include:
1. Identifying all relevant regulatory bodies and laws that govern the operations of {company}, including industry-specific regulations, labor laws, and environmental regulations
2. Reviewing {company}' current compliance policies, procedures, and track record to identify any potential gaps or areas of non-compliance
3. Assessing the potential impact of any pending or proposed changes to relevant regulations that could affect {company}' business or create additional compliance burdens
4. Evaluating the potential legal liabilities and risks associated with {company}' products, services, and operations, including product liability, intellectual property, and customer contracts
5. Providing recommendations on any regulatory or legal due diligence steps that should be taken as part of the acquisition process, as well as any post-acquisition integration considerations
Your analysis should provide a comprehensive assessment of the regulatory and legal landscape surrounding {company}, and identify any material risks or potential deal-breakers. Consider both the current state and future outlook, and provide practical recommendations to mitigate identified risks.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="regulatory-specialist.json",
)
# Create a list of agents
agents = [
managing_director,
vp_finance,
industry_analyst,
tech_expert,
market_researcher,
regulatory_specialist,
]
# Define multiple flow patterns
flows = [
"Industry-Analyst -> Tech-Expert -> Market-Researcher -> Regulatory-Specialist -> Managing-Director -> VP-Finance",
"Managing-Director -> VP-Finance -> Industry-Analyst -> Tech-Expert -> Market-Researcher -> Regulatory-Specialist",
"Tech-Expert -> Market-Researcher -> Regulatory-Specialist -> Industry-Analyst -> Managing-Director -> VP-Finance",
]
# Create instances of AgentRearrange for each flow pattern
blackstone_acquisition_analysis = AgentRearrange(
name="Blackstone-Acquisition-Analysis",
description="A system for analyzing potential acquisitions",
agents=agents,
flow=flows[0],
)
blackstone_investment_strategy = AgentRearrange(
name="Blackstone-Investment-Strategy",
description="A system for evaluating investment opportunities",
agents=agents,
flow=flows[1],
)
blackstone_market_analysis = AgentRearrange(
name="Blackstone-Market-Analysis",
description="A system for analyzing market trends and opportunities",
agents=agents,
flow=flows[2],
)
swarm_arrange = SwarmRearrange(
swarms=[
blackstone_acquisition_analysis,
blackstone_investment_strategy,
blackstone_market_analysis,
],
flow=f"{blackstone_acquisition_analysis.name} -> {blackstone_investment_strategy.name} -> {blackstone_market_analysis.name}",
)
print(
swarm_arrange.run(
"Analyze swarms, 150k revenue with 45m+ agents build, with 1.4m downloads since march 2024"
)
)
```
### Human-in-the-Loop
```python
def custom_human_input(task):
return input(f"Review {task} and provide feedback: ")
# Create arrangement with human intervention
arrangement = SwarmRearrange(
name="HumanAugmented",
swarms=[swarm1, swarm2],
flow="SwarmA -> H -> SwarmB",
human_in_the_loop=True,
custom_human_in_the_loop=custom_human_input
)
# Execute with human intervention
result = arrangement.run("Initial task")
```
### Complex Multi-Stage Pipeline
```python
# Define multiple flow patterns
flows = [
"Collector -> Processor -> Analyzer",
"Analyzer -> ML -> Validator",
"Validator -> Reporter"
]
# Create arrangements for each flow
pipelines = [
SwarmRearrange(name=f"Pipeline{i}", swarms=swarms, flow=flow)
for i, flow in enumerate(flows)
]
# Create master arrangement
master = SwarmRearrange(
name="MasterPipeline",
swarms=pipelines,
flow="Pipeline0 -> Pipeline1 -> Pipeline2"
)
# Execute complete pipeline
result = master.run("Start analysis")
```
## Best Practices
1. **Flow Validation**: Always validate flows before execution
2. **Error Handling**: Implement try-catch blocks around run() calls
3. **History Tracking**: Use track_history() for monitoring swarm execution
4. **Resource Management**: Set appropriate max_loops to prevent infinite execution
5. **Logging**: Enable verbose mode during development for detailed logging
## Error Handling
The class implements comprehensive error handling:
```python
try:
arrangement = SwarmRearrange(swarms=swarms, flow=flow)
result = arrangement.run(task)
except ValueError as e:
logger.error(f"Flow validation error: {e}")
except Exception as e:
logger.error(f"Execution error: {e}")
```

@ -0,0 +1,727 @@
# Medical Coder Swarm API Documentation
Base URL: `https://mcs-285321057562.us-central1.run.app`
## Table of Contents
- [Authentication](#authentication)
- [Rate Limits](#rate-limits)
- [Endpoints](#endpoints)
- [Health Check](#health-check)
- [Run Medical Coder](#run-medical-coder)
- [Run Batch Medical Coder](#run-batch-medical-coder)
- [Get Patient Data](#get-patient-data)
- [Get All Patients](#get-all-patients)
- [Code Examples](#code-examples)
- [Error Handling](#error-handling)
## Authentication
Authentication details will be provided by the MCS team. Contact support for API credentials.
## Rate Limits
| Endpoint | GET Rate Limit Status |
|----------|----------------------|
| `GET /rate-limits` | Returns current rate limit status for your IP address |
## Endpoints
### Health Check
Check if the API is operational.
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/health` | Returns 200 OK if service is running |
### Run Medical Coder
Process a single patient case through the Medical Coder Swarm.
| Method | Endpoint | Description |
|--------|----------|-------------|
| `POST` | `/v1/medical-coder/run` | Process a single patient case |
**Request Body Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| patient_id | string | Yes | Unique identifier for the patient |
| case_description | string | Yes | Medical case details to be processed |
**Response Schema:**
| Field | Type | Description |
|-------|------|-------------|
| patient_id | string | Patient identifier |
| case_data | string | Processed case data |
### Run Batch Medical Coder
Process multiple patient cases in a single request.
| Method | Endpoint | Description |
|--------|----------|-------------|
| `POST` | `/v1/medical-coder/run-batch` | Process multiple patient cases |
**Request Body Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| cases | array | Yes | Array of PatientCase objects |
### Get Patient Data
Retrieve data for a specific patient.
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/v1/medical-coder/patient/{patient_id}` | Get patient data by ID |
**Path Parameters:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| patient_id | string | Yes | Patient identifier |
### Get All Patients
Retrieve data for all patients.
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/v1/medical-coder/patients` | Get all patient data |
## Code Examples
### Python
```python
import requests
import json
class MCSClient:
def __init__(self, base_url="https://mcs.swarms.ai", api_key=None):
self.base_url = base_url
self.headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}" if api_key else None
}
def run_medical_coder(self, patient_id, case_description):
endpoint = f"{self.base_url}/v1/medical-coder/run"
payload = {
"patient_id": patient_id,
"case_description": case_description
}
response = requests.post(endpoint, json=payload, headers=self.headers)
return response.json()
def run_batch(self, cases):
endpoint = f"{self.base_url}/v1/medical-coder/run-batch"
payload = {"cases": cases}
response = requests.post(endpoint, json=payload, headers=self.headers)
return response.json()
# Usage example
client = MCSClient(api_key="your_api_key")
result = client.run_medical_coder("P123", "Patient presents with...")
```
### Next.js (TypeScript)
```typescript
// types.ts
interface PatientCase {
patient_id: string;
case_description: string;
}
interface QueryResponse {
patient_id: string;
case_data: string;
}
// api.ts
export class MCSApi {
private baseUrl: string;
private apiKey: string;
constructor(apiKey: string, baseUrl = 'https://mcs.swarms.ai') {
this.baseUrl = baseUrl;
this.apiKey = apiKey;
}
private async fetchWithAuth(endpoint: string, options: RequestInit = {}) {
const response = await fetch(`${this.baseUrl}${endpoint}`, {
...options,
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
...options.headers,
},
});
return response.json();
}
async runMedicalCoder(patientCase: PatientCase): Promise<QueryResponse> {
return this.fetchWithAuth('/v1/medical-coder/run', {
method: 'POST',
body: JSON.stringify(patientCase),
});
}
async getPatientData(patientId: string): Promise<QueryResponse> {
return this.fetchWithAuth(`/v1/medical-coder/patient/${patientId}`);
}
}
// Usage in component
const mcsApi = new MCSApi(process.env.MCS_API_KEY);
export async function ProcessPatientCase({ patientId, caseDescription }) {
const result = await mcsApi.runMedicalCoder({
patient_id: patientId,
case_description: caseDescription,
});
return result;
}
```
### Go
```go
package mcs
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
)
type MCSClient struct {
BaseURL string
APIKey string
Client *http.Client
}
type PatientCase struct {
PatientID string `json:"patient_id"`
CaseDescription string `json:"case_description"`
}
type QueryResponse struct {
PatientID string `json:"patient_id"`
CaseData string `json:"case_data"`
}
func NewMCSClient(apiKey string) *MCSClient {
return &MCSClient{
BaseURL: "https://mcs.swarms.ai",
APIKey: apiKey,
Client: &http.Client{},
}
}
func (c *MCSClient) RunMedicalCoder(patientCase PatientCase) (*QueryResponse, error) {
payload, err := json.Marshal(patientCase)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST",
fmt.Sprintf("%s/v1/medical-coder/run", c.BaseURL),
bytes.NewBuffer(payload))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.APIKey))
resp, err := c.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
var result QueryResponse
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, err
}
return &result, nil
}
// Usage example
func main() {
client := NewMCSClient("your_api_key")
result, err := client.RunMedicalCoder(PatientCase{
PatientID: "P123",
CaseDescription: "Patient presents with...",
})
if err != nil {
panic(err)
}
fmt.Printf("Result: %+v\n", result)
}
```
## C Sharp
```txt
using System;
using System.Net.Http;
using System.Text;
using System.Text.Json;
using System.Threading.Tasks;
namespace MedicalCoderSwarm
{
public class PatientCase
{
public string PatientId { get; set; }
public string CaseDescription { get; set; }
}
public class QueryResponse
{
public string PatientId { get; set; }
public string CaseData { get; set; }
}
public class MCSClient : IDisposable
{
private readonly HttpClient _httpClient;
private readonly string _baseUrl;
public MCSClient(string apiKey, string baseUrl = "https://mcs-285321057562.us-central1.run.app")
{
_baseUrl = baseUrl;
_httpClient = new HttpClient();
_httpClient.DefaultRequestHeaders.Add("Authorization", $"Bearer {apiKey}");
_httpClient.DefaultRequestHeaders.Add("Content-Type", "application/json");
}
public async Task<QueryResponse> RunMedicalCoderAsync(string patientId, string caseDescription)
{
var payload = new PatientCase
{
PatientId = patientId,
CaseDescription = caseDescription
};
var content = new StringContent(
JsonSerializer.Serialize(payload),
Encoding.UTF8,
"application/json"
);
var response = await _httpClient.PostAsync(
$"{_baseUrl}/v1/medical-coder/run",
content
);
response.EnsureSuccessStatusCode();
var responseContent = await response.Content.ReadAsStringAsync();
return JsonSerializer.Deserialize<QueryResponse>(responseContent);
}
public async Task<QueryResponse> GetPatientDataAsync(string patientId)
{
var response = await _httpClient.GetAsync(
$"{_baseUrl}/v1/medical-coder/patient/{patientId}"
);
response.EnsureSuccessStatusCode();
var responseContent = await response.Content.ReadAsStringAsync();
return JsonSerializer.Deserialize<QueryResponse>(responseContent);
}
public async Task<bool> HealthCheckAsync()
{
var response = await _httpClient.GetAsync($"{_baseUrl}/health");
return response.IsSuccessStatusCode;
}
public void Dispose()
{
_httpClient?.Dispose();
}
}
// Example usage
public class Program
{
public static async Task Main()
{
try
{
using var client = new MCSClient("your_api_key");
// Check API health
var isHealthy = await client.HealthCheckAsync();
Console.WriteLine($"API Health: {(isHealthy ? "Healthy" : "Unhealthy")}");
// Process a single case
var result = await client.RunMedicalCoderAsync(
"P123",
"Patient presents with acute respiratory symptoms..."
);
Console.WriteLine($"Processed case for patient {result.PatientId}");
Console.WriteLine($"Case data: {result.CaseData}");
// Get patient data
var patientData = await client.GetPatientDataAsync("P123");
Console.WriteLine($"Retrieved data for patient {patientData.PatientId}");
}
catch (HttpRequestException ex)
{
Console.WriteLine($"API request failed: {ex.Message}");
}
catch (Exception ex)
{
Console.WriteLine($"An error occurred: {ex.Message}");
}
}
}
}
```
## Error Handling
The API uses standard HTTP status codes and returns detailed error messages in JSON format.
**Common Status Codes:**
| Status Code | Description |
|-------------|-------------|
| 200 | Success |
| 400 | Bad Request - Invalid input |
| 401 | Unauthorized - Invalid or missing API key |
| 422 | Validation Error - Request validation failed |
| 429 | Too Many Requests - Rate limit exceeded |
| 500 | Internal Server Error |
**Error Response Format:**
```json
{
"detail": [
{
"loc": ["body", "patient_id"],
"msg": "field required",
"type": "value_error.missing"
}
]
}
```
# MCS Python Client Documentation
## Installation
```bash
pip install mcs
```
## Quick Start
```python
from mcs import MCSClient, PatientCase
# Using context manager (recommended)
with MCSClient() as client:
# Process a single case
response = client.run_medical_coder(
patient_id="P123",
case_description="Patient presents with acute respiratory symptoms..."
)
print(f"Processed case: {response.case_data}")
# Process multiple cases
cases = [
PatientCase("P124", "Case 1 description..."),
PatientCase("P125", "Case 2 description...")
]
batch_response = client.run_batch(cases)
```
## Client Configuration
### Constructor Arguments
| Parameter | Type | Required | Default | Description |
|-----------|------|----------|---------|-------------|
| api_key | str | Yes | - | Authentication API key |
| base_url | str | No | "https://mcs.swarms.ai" | API base URL |
| timeout | int | No | 30 | Request timeout in seconds |
| max_retries | int | No | 3 | Maximum retry attempts |
| logger_name | str | No | "mcs" | Name for the logger instance |
### Example Configuration
```python
client = MCSClient(
base_url="https://custom-url.example.com",
timeout=45,
max_retries=5,
logger_name="custom_logger"
)
```
## Data Models
### PatientCase
| Field | Type | Required | Description |
|-------|------|----------|-------------|
| patient_id | str | Yes | Unique identifier for the patient |
| case_description | str | Yes | Medical case details |
### QueryResponse
| Field | Type | Description |
|-------|------|-------------|
| patient_id | str | Patient identifier |
| case_data | str | Processed case data |
## Methods
### run_medical_coder
Process a single patient case.
```python
def run_medical_coder(
self,
patient_id: str,
case_description: str
) -> QueryResponse:
```
**Arguments:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| patient_id | str | Yes | Patient identifier |
| case_description | str | Yes | Case details |
**Example:**
```python
response = client.run_medical_coder(
patient_id="P123",
case_description="Patient presents with..."
)
print(response.case_data)
```
### run_batch
Process multiple patient cases in batch.
```python
def run_batch(
self,
cases: List[PatientCase]
) -> List[QueryResponse]:
```
**Arguments:**
| Parameter | Type | Required | Description |
|-----------|------|----------|-------------|
| cases | List[PatientCase] | Yes | List of patient cases |
**Example:**
```python
cases = [
PatientCase("P124", "Case 1 description..."),
PatientCase("P125", "Case 2 description...")
]
responses = client.run_batch(cases)
for response in responses:
print(f"Patient {response.patient_id}: {response.case_data}")
```
### get_patient_data
Retrieve data for a specific patient.
```python
def get_patient_data(
self,
patient_id: str
) -> QueryResponse:
```
**Example:**
```python
patient_data = client.get_patient_data("P123")
print(f"Patient data: {patient_data.case_data}")
```
### get_all_patients
Retrieve data for all patients.
```python
def get_all_patients(self) -> List[QueryResponse]:
```
**Example:**
```python
all_patients = client.get_all_patients()
for patient in all_patients:
print(f"Patient {patient.patient_id}: {patient.case_data}")
```
### get_rate_limits
Get current rate limit status.
```python
def get_rate_limits(self) -> Dict[str, Any]:
```
**Example:**
```python
rate_limits = client.get_rate_limits()
print(f"Rate limit status: {rate_limits}")
```
### health_check
Check if the API is operational.
```python
def health_check(self) -> bool:
```
**Example:**
```python
is_healthy = client.health_check()
print(f"API health: {'Healthy' if is_healthy else 'Unhealthy'}")
```
## Error Handling
### Exception Hierarchy
| Exception | Description |
|-----------|-------------|
| MCSClientError | Base exception for all client errors |
| RateLimitError | Raised when API rate limit is exceeded |
| AuthenticationError | Raised when API authentication fails |
| ValidationError | Raised when request validation fails |
### Example Error Handling
```python
from mcs import MCSClient, MCSClientError, RateLimitError
with MCSClient() as client:
try:
response = client.run_medical_coder("P123", "Case description...")
except RateLimitError:
print("Rate limit exceeded. Please wait before retrying.")
except MCSClientError as e:
print(f"An error occurred: {str(e)}")
```
## Advanced Usage
### Retry Configuration
The client implements two levels of retry logic:
1. Connection-level retries (using `HTTPAdapter`):
```python
client = MCSClient(
,
max_retries=5 # Adjusts connection-level retries
)
```
2. Application-level retries (using `tenacity`):
```python
from tenacity import retry, stop_after_attempt
@retry(stop=stop_after_attempt(5))
def process_with_custom_retries():
with MCSClient() as client:
return client.run_medical_coder("P123", "Case description...")
```
### Batch Processing with Progress Tracking
```python
from tqdm import tqdm
with MCSClient() as client:
cases = [
PatientCase(f"P{i}", f"Case description {i}")
for i in range(100)
]
# Process in smaller batches
batch_size = 10
results = []
for i in tqdm(range(0, len(cases), batch_size)):
batch = cases[i:i + batch_size]
batch_results = client.run_batch(batch)
results.extend(batch_results)
```
## Best Practices
1. **Always use context managers:**
```python
with MCSClient() as client:
# Your code here
pass
```
2. **Handle rate limits appropriately:**
```python
from time import sleep
def process_with_rate_limit_handling():
with MCSClient() as client:
try:
return client.run_medical_coder("P123", "Case...")
except RateLimitError:
sleep(60) # Wait before retry
return client.run_medical_coder("P123", "Case...")
```
3. **Implement proper logging:**
```python
from loguru import logger
logger.add("mcs.log", rotation="500 MB")
with MCSClient() as client:
try:
response = client.run_medical_coder("P123", "Case...")
except Exception as e:
logger.exception(f"Error processing case: {str(e)}")
```
4. **Monitor API health:**
```python
def ensure_healthy_api():
with MCSClient() as client:
if not client.health_check():
raise SystemExit("API is not healthy")
```

@ -7,21 +7,17 @@ from swarms.prompts.finance_agent_sys_prompt import (
agent = Agent(
agent_name="Financial-Analysis-Agent",
agent_description="Personal finance advisor agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT
+ "Output the <DONE> token when you're done creating a portfolio of etfs, index, funds, and more for AI",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1,
model_name="gpt-4o",
dynamic_temperature_enabled=True,
user_name="Kye",
user_name="swarms_corp",
retry_attempts=3,
# streaming_on=True,
context_length=8192,
return_step_meta=False,
output_type="str", # "json", "dict", "csv" OR "string" "yaml" and
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
max_tokens=4000, # max output tokens
# interactive=True,
stopping_token="<DONE>",
saved_state_path="agent_00.json",
interactive=False,
)

@ -8,21 +8,21 @@ if __name__ == "__main__":
# Create agents
data_collector = Agent(
agent_name="Market-Data-Collector",
model_name="gpt-4o-mini",
model_name="openai/gpt-4o",
max_loops=1,
streaming_on=True,
)
trend_analyzer = Agent(
agent_name="Market-Trend-Analyzer",
model_name="gpt-4o-mini",
model_name="openai/gpt-4o",
max_loops=1,
streaming_on=True,
)
report_generator = Agent(
agent_name="Investment-Report-Generator",
model_name="gpt-4o-mini",
model_name="openai/gpt-4o",
max_loops=1,
streaming_on=True,
)

@ -0,0 +1,404 @@
from swarms import Agent, SequentialWorkflow
# Chief Metallurgist
chief_metallurgist = Agent(
agent_name="Chief-Metallurgist",
system_prompt="""
As the Chief Metallurgist, you are responsible for overseeing the entire alloy development process and coordinating with your team, which includes:
Your Team Members:
- Materials Scientist: Consult them for detailed physical and mechanical property analysis
- Process Engineer: Work with them on manufacturing feasibility and process requirements
- Quality Assurance Specialist: Coordinate on quality standards and testing protocols
- Applications Engineer: Align theoretical developments with practical applications
- Cost Analyst: Ensure developments remain economically viable
Your expertise covers:
1. Theoretical Analysis:
- Atomic structure and bonding mechanisms
- Phase diagrams and transformation kinetics
- Crystal structure optimization
- Theoretical strength calculations
2. Composition Development:
- Element selection and ratios
- Microstructure prediction
- Phase stability analysis
- Solid solution strengthening mechanisms
3. Project Coordination:
- Integration of findings from all team members
- Validation of proposed compositions
- Risk assessment of new formulations
- Final recommendations for alloy development
For each new alloy proposal, systematically:
1. Review the target properties and applications
2. Analyze the theoretical feasibility
3. Evaluate the proposed composition
4. Assess potential risks and challenges
5. Provide detailed recommendations
Ensure all analyses consider:
- Thermodynamic stability
- Mechanical properties
- Cost-effectiveness
- Manufacturability
- Environmental impact
Your output should include detailed scientific rationale for all decisions and recommendations.
""",
model_name="openai/gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
)
# Materials Scientist
materials_scientist = Agent(
agent_name="Materials-Scientist",
system_prompt="""
As the Materials Scientist, your role focuses on the fundamental material properties and behavior. You work closely with:
Your Team Members:
- Chief Metallurgist: Receive overall direction and provide property analysis input
- Process Engineer: Share materials requirements for process development
- Quality Assurance Specialist: Define measurable property specifications
- Applications Engineer: Understand property requirements for specific applications
- Cost Analyst: Provide material property constraints that impact costs
Your responsibilities include:
1. Physical Properties Analysis:
- Density calculations
- Thermal properties (conductivity, expansion, melting point)
- Electrical properties
- Magnetic properties
- Surface properties
2. Mechanical Properties Analysis:
- Tensile strength
- Yield strength
- Hardness
- Ductility
- Fatigue resistance
- Fracture toughness
3. Microstructure Analysis:
- Phase composition
- Grain structure
- Precipitation behavior
- Interface characteristics
- Defect analysis
4. Property Optimization:
- Structure-property relationships
- Property enhancement mechanisms
- Trade-off analysis
- Performance prediction
For each analysis:
1. Conduct theoretical calculations
2. Predict property ranges
3. Identify critical parameters
4. Suggest optimization strategies
Consider:
- Property stability over temperature ranges
- Environmental effects
- Aging characteristics
- Application-specific requirements
Provide quantitative predictions where possible and identify key uncertainties.
""",
model_name="openai/gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
)
# Process Engineer
process_engineer = Agent(
agent_name="Process-Engineer",
system_prompt="""
As the Process Engineer, you are responsible for developing and optimizing the manufacturing processes. You collaborate with:
Your Team Members:
- Chief Metallurgist: Ensure processes align with composition requirements
- Materials Scientist: Understand material behavior during processing
- Quality Assurance Specialist: Develop in-process quality controls
- Applications Engineer: Adapt processes to meet application needs
- Cost Analyst: Optimize processes for cost efficiency
Your focus areas include:
1. Manufacturing Process Design:
- Melting and casting procedures
- Heat treatment protocols
- Forming operations
- Surface treatments
- Quality control methods
2. Process Parameters:
- Temperature profiles
- Pressure requirements
- Atmospheric conditions
- Cooling rates
- Treatment durations
3. Equipment Specifications:
- Furnace requirements
- Tooling needs
- Monitoring systems
- Safety equipment
- Quality control instruments
4. Process Optimization:
- Efficiency improvements
- Cost reduction strategies
- Quality enhancement
- Waste minimization
- Energy optimization
For each process design:
1. Develop detailed process flow
2. Specify critical parameters
3. Identify control points
4. Define quality metrics
5. Establish safety protocols
Consider:
- Scale-up challenges
- Equipment limitations
- Process variability
- Quality assurance
- Environmental impact
Provide comprehensive process documentation and control specifications.
""",
model_name="openai/gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
)
# Quality Assurance Specialist
qa_specialist = Agent(
agent_name="QA-Specialist",
system_prompt="""
As the Quality Assurance Specialist, you are responsible for establishing and validating quality standards. You interact with:
Your Team Members:
- Chief Metallurgist: Align quality standards with design specifications
- Materials Scientist: Develop property testing protocols
- Process Engineer: Establish process control parameters
- Applications Engineer: Ensure quality metrics meet application requirements
- Cost Analyst: Balance quality measures with cost constraints
Your key areas include:
1. Quality Standards Development:
- Property specifications
- Compositional tolerances
- Surface finish requirements
- Dimensional accuracy
- Performance criteria
2. Testing Protocols:
- Mechanical testing methods
- Chemical analysis procedures
- Microstructure examination
- Non-destructive testing
- Environmental testing
3. Quality Control:
- Sampling procedures
- Statistical analysis methods
- Process capability studies
- Defect classification
- Corrective action procedures
4. Documentation:
- Test specifications
- Quality manuals
- Inspection procedures
- Certification requirements
- Traceability systems
For each quality system:
1. Define quality parameters
2. Establish testing methods
3. Develop acceptance criteria
4. Create documentation systems
5. Design validation procedures
Consider:
- Industry standards
- Customer requirements
- Regulatory compliance
- Cost effectiveness
- Practical implementation
Provide comprehensive quality assurance plans and specifications.
""",
model_name="openai/gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
)
# Applications Engineer
applications_engineer = Agent(
agent_name="Applications-Engineer",
system_prompt="""
As the Applications Engineer, you analyze potential applications and performance requirements. You work with:
Your Team Members:
- Chief Metallurgist: Translate application needs into material requirements
- Materials Scientist: Define required material properties
- Process Engineer: Ensure manufacturability meets application needs
- Quality Assurance Specialist: Define application-specific quality criteria
- Cost Analyst: Balance performance requirements with cost targets
Your responsibilities include:
1. Application Analysis:
- Use case identification
- Performance requirements
- Environmental conditions
- Service life expectations
- Compatibility requirements
2. Performance Evaluation:
- Stress analysis
- Wear resistance
- Corrosion resistance
- Temperature stability
- Environmental durability
3. Competitive Analysis:
- Market alternatives
- Performance benchmarking
- Cost comparison
- Advantage assessment
- Market positioning
4. Implementation Planning:
- Design guidelines
- Application procedures
- Installation requirements
- Maintenance protocols
- Performance monitoring
For each application:
1. Define performance criteria
2. Analyze operating conditions
3. Assess technical requirements
4. Evaluate practical limitations
5. Develop implementation guidelines
Consider:
- Application-specific demands
- Environmental factors
- Maintenance requirements
- Cost considerations
- Safety requirements
Provide detailed application assessments and implementation recommendations.
""",
model_name="openai/gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
)
# Cost Analyst
cost_analyst = Agent(
agent_name="Cost-Analyst",
system_prompt="""
As the Cost Analyst, you evaluate the economic aspects of alloy development and production. You collaborate with:
Your Team Members:
- Chief Metallurgist: Assess cost implications of alloy compositions
- Materials Scientist: Evaluate material cost-property relationships
- Process Engineer: Analyze manufacturing cost factors
- Quality Assurance Specialist: Balance quality costs with requirements
- Applications Engineer: Consider application-specific cost constraints
Your focus areas include:
1. Material Costs:
- Raw material pricing
- Supply chain analysis
- Volume considerations
- Market availability
- Price volatility assessment
2. Production Costs:
- Process expenses
- Equipment requirements
- Labor needs
- Energy consumption
- Overhead allocation
3. Economic Analysis:
- Cost modeling
- Break-even analysis
- Sensitivity studies
- ROI calculations
- Risk assessment
4. Cost Optimization:
- Process efficiency
- Material utilization
- Waste reduction
- Energy efficiency
- Labor optimization
For each analysis:
1. Develop cost models
2. Analyze cost drivers
3. Identify optimization opportunities
4. Assess economic viability
5. Provide recommendations
Consider:
- Market conditions
- Scale effects
- Regional variations
- Future trends
- Competition impact
Provide comprehensive cost analysis and economic feasibility assessments.
""",
model_name="openai/gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
)
# Create the agent list
agents = [
chief_metallurgist,
materials_scientist,
process_engineer,
qa_specialist,
applications_engineer,
cost_analyst,
]
# Initialize the workflow
swarm = SequentialWorkflow(
name="alloy-development-system",
agents=agents,
)
# Example usage
print(
swarm.run(
"""Analyze and develop a new high-strength aluminum alloy for aerospace applications
with improved fatigue resistance and corrosion resistance compared to 7075-T6,
while maintaining similar density and cost effectiveness."""
)
)

@ -0,0 +1,43 @@
from swarms import Agent
from swarms.structs.multi_agent_orchestrator import MultiAgentRouter
# Example usage:
if __name__ == "__main__":
# Define some example agents
agents = [
Agent(
agent_name="ResearchAgent",
description="Specializes in researching topics and providing detailed, factual information",
system_prompt="You are a research specialist. Provide detailed, well-researched information about any topic, citing sources when possible.",
model_name="openai/gpt-4o",
),
Agent(
agent_name="CodeExpertAgent",
description="Expert in writing, reviewing, and explaining code across multiple programming languages",
system_prompt="You are a coding expert. Write, review, and explain code with a focus on best practices and clean code principles.",
model_name="openai/gpt-4o",
),
Agent(
agent_name="WritingAgent",
description="Skilled in creative and technical writing, content creation, and editing",
system_prompt="You are a writing specialist. Create, edit, and improve written content while maintaining appropriate tone and style.",
model_name="openai/gpt-4o",
),
]
# Initialize routers with different configurations
router_execute = MultiAgentRouter(
agents=agents, execute_task=True
)
# Example task
task = "Write a Python function to calculate fibonacci numbers"
try:
# Process the task with execution
print("\nWith task execution:")
result_execute = router_execute.route_task(task)
print(result_execute)
except Exception as e:
print(f"Error occurred: {str(e)}")

@ -0,0 +1,119 @@
import os
from swarm_models import OpenAIChat
from swarms import Agent
from fluid_api_agent.main import fluid_api_request
from dotenv import load_dotenv
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
def omni_api(task: str) -> str:
"""
Omni API Function: Calls any API dynamically based on the task description.
This function leverages the `fluid_api_request` method to process a given task
and make the necessary API call dynamically. It is designed to be highly flexible,
allowing users to interact with a wide variety of APIs without needing
predefined configurations.
Parameters:
-----------
task : str
A descriptive string outlining the API call or task to be performed.
The description should include enough detail for `fluid_api_request`
to determine the appropriate API endpoint, request type, and payload.
Returns:
--------
dict
A dictionary containing the response data from the API call.
The structure of the response will vary based on the API being accessed.
Raises:
-------
ValueError
If the task string is insufficiently descriptive or cannot be mapped
to a valid API request.
HTTPError
If the API call results in an HTTP error (e.g., 404 Not Found, 500 Server Error).
Examples:
---------
1. Call a weather API to fetch the current weather for a city:
task = "Fetch the current weather for New York City"
response = omni_api(task)
print(response)
2. Retrieve stock prices for a specific company:
task = "Get the latest stock price for Apple Inc."
response = omni_api(task)
print(response)
3. Post a message to a Slack channel:
task = "Post 'Hello, Team!' to the #general channel in Slack"
response = omni_api(task)
print(response)
Notes:
------
- The `fluid_api_request` function must be implemented to interpret the `task` string
and handle API calls accordingly.
- Security and authentication for APIs should be managed within `fluid_api_request`.
"""
return str(fluid_api_request(task))
# Define the system prompt tailored for the API expert
API_AGENT_SYS_PROMPT = """
You are a highly specialized financial API expert.
Your expertise lies in analyzing financial data, making investment recommendations, and
interacting with APIs to retrieve, process, and present data effectively.
You use tools like 'omni_api' to fetch data dynamically, ensuring accuracy and up-to-date results.
Instructions:
1. Always query relevant APIs to gather insights for tasks.
2. When suggesting investments, ensure a diversified portfolio based on the user's budget, risk appetite, and growth potential.
3. Verify API responses and retry calls if necessary to ensure data accuracy.
"""
# Customize the agent for financial API tasks
agent = Agent(
agent_name="API-Finance-Expert",
agent_description="An API expert agent specialized in financial analysis and investment planning.",
system_prompt=API_AGENT_SYS_PROMPT,
max_loops=1, # Allow a few iterations for refining outputs
llm=model,
dynamic_temperature_enabled=True, # Enable temperature adjustments for optimal creativity
user_name="swarms_corp",
retry_attempts=5, # Retry API calls to ensure reliability
context_length=8192, # Context length for comprehensive analysis
return_step_meta=False,
output_type="str", # Output tables or results in markdown format
auto_generate_prompt=False, # Use the custom system prompt for guidance
max_tokens=4000,
saved_state_path="api_finance_expert.json",
tools=[omni_api], # Integrate the omni_api tool
)
# Run the agent with a financial task
agent.run(
"Fetch the current price for eth",
all_cores=True, # Utilize all processing cores for efficiency
)

@ -0,0 +1,96 @@
import os
from swarm_models import OpenAIChat
from swarms import Agent, run_agents_with_tasks_concurrently
# Fetch the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize agents for different roles
delaware_ccorp_agent = Agent(
agent_name="Delaware-CCorp-Hiring-Agent",
system_prompt="""
Create a comprehensive hiring description for a Delaware C Corporation,
including all relevant laws and regulations, such as the Delaware General
Corporation Law (DGCL) and the Delaware Corporate Law. Ensure the description
covers the requirements for hiring employees, contractors, and officers,
including the necessary paperwork, tax obligations, and benefits. Also,
outline the procedures for compliance with Delaware's employment laws,
including anti-discrimination laws, workers' compensation, and unemployment
insurance. Provide guidance on how to navigate the complexities of Delaware's
corporate law and ensure that all hiring practices are in compliance with
state and federal regulations.
""",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
output_type="str",
artifacts_on=True,
artifacts_output_path="delaware_ccorp_hiring_description.md",
artifacts_file_extension=".md",
)
indian_foreign_agent = Agent(
agent_name="Indian-Foreign-Hiring-Agent",
system_prompt="""
Create a comprehensive hiring description for an Indian or foreign country,
including all relevant laws and regulations, such as the Indian Contract Act,
the Indian Labour Laws, and the Foreign Exchange Management Act (FEMA).
Ensure the description covers the requirements for hiring employees,
contractors, and officers, including the necessary paperwork, tax obligations,
and benefits. Also, outline the procedures for compliance with Indian and
foreign employment laws, including anti-discrimination laws, workers'
compensation, and unemployment insurance. Provide guidance on how to navigate
the complexities of Indian and foreign corporate law and ensure that all hiring
practices are in compliance with state and federal regulations. Consider the
implications of hiring foreign nationals and the requirements for obtaining
necessary visas and work permits.
""",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
output_type="str",
artifacts_on=True,
artifacts_output_path="indian_foreign_hiring_description.md",
artifacts_file_extension=".md",
)
# List of agents and corresponding tasks
agents = [delaware_ccorp_agent, indian_foreign_agent]
tasks = [
"""
Create a comprehensive hiring description for an Agent Engineer, including
required skills and responsibilities. Ensure the description covers the
necessary technical expertise, such as proficiency in AI/ML frameworks,
programming languages, and data structures. Outline the key responsibilities,
including designing and developing AI agents, integrating with existing systems,
and ensuring scalability and performance.
""",
"""
Generate a detailed job description for a Prompt Engineer, including
required skills and responsibilities. Ensure the description covers the
necessary technical expertise, such as proficiency in natural language processing,
machine learning, and software development. Outline the key responsibilities,
including designing and optimizing prompts for AI systems, ensuring prompt
quality and consistency, and collaborating with cross-functional teams.
""",
]
# Run agents with tasks concurrently
results = run_agents_with_tasks_concurrently(
agents, tasks, all_cores=True, device="cpu", no_clusterops=True
)
# Print the results
# for result in results:
# print(result)

@ -0,0 +1,150 @@
import requests
from swarms import Agent
# Define the system prompt specialized for $Swarms
SWARMS_AGENT_SYS_PROMPT = """
Here is the extensive prompt for an agent specializing in $Swarms and its ecosystem economics:
---
### Specialized System Prompt: $Swarms Coin & Ecosystem Economics Expert
You are an advanced financial analysis and ecosystem economics agent, specializing in the $Swarms cryptocurrency. Your purpose is to provide in-depth, accurate, and insightful answers about $Swarms, its role in the AI-powered economy, and its tokenomics. Your knowledge spans all aspects of $Swarms, including its vision, roadmap, network effects, and its transformative potential for decentralized agent interactions.
#### Core Competencies:
1. **Tokenomics Expertise**: Understand and explain the supply-demand dynamics, token utility, and value proposition of $Swarms as the foundation of the agentic economy.
2. **Ecosystem Insights**: Articulate the benefits of $Swarms' agent-centric design, universal currency utility, and its impact on fostering innovation and collaboration.
3. **Roadmap Analysis**: Provide detailed insights into the $Swarms roadmap phases, explaining their significance and economic implications.
4. **Real-Time Data Analysis**: Fetch live data such as price, market cap, volume, and 24-hour changes for $Swarms from CoinGecko or other reliable sources.
5. **Economic Visionary**: Analyze how $Swarms supports the democratization of AI and creates a sustainable framework for AI development.
---
#### Your Mission:
You empower users by explaining how $Swarms revolutionizes the AI economy through decentralized agent interactions, seamless value exchange, and frictionless payments. Help users understand how $Swarms incentivizes developers, democratizes access to AI tools, and builds a thriving interconnected economy of autonomous agents.
---
#### Knowledge Base:
##### Vision:
- **Empowering the Agentic Revolution**: $Swarms is the cornerstone of a decentralized AI economy.
- **Mission**: Revolutionize the AI economy by enabling seamless transactions, rewarding excellence, fostering innovation, and lowering entry barriers for developers.
##### Core Features:
1. **Reward Excellence**: Incentivize developers creating high-performing agents.
2. **Seamless Transactions**: Enable frictionless payments for agentic services.
3. **Foster Innovation**: Encourage collaboration and creativity in AI development.
4. **Sustainable Framework**: Provide scalability for long-term AI ecosystem growth.
5. **Democratize AI**: Lower barriers for users and developers to participate in the AI economy.
##### Why $Swarms?
- **Agent-Centric Design**: Each agent operates with its tokenomics, with $Swarms as the base currency for value exchange.
- **Universal Currency**: A single, unified medium for all agent transactions, reducing complexity.
- **Network Effects**: Growing utility and value as more agents join the $Swarms ecosystem.
##### Roadmap:
1. **Phase 1: Foundation**:
- Launch $Swarms token.
- Deploy initial agent creation tools.
- Establish community governance.
2. **Phase 2: Expansion**:
- Launch agent marketplace.
- Enable cross-agent communication.
- Deploy automated market-making tools.
3. **Phase 3: Integration**:
- Partner with leading AI platforms.
- Launch developer incentives.
- Scale the agent ecosystem globally.
4. **Phase 4: Evolution**:
- Advanced agent capabilities.
- Cross-chain integration.
- Create a global AI marketplace.
##### Ecosystem Benefits:
- **Agent Creation**: Simplified deployment of agents with tokenomics built-in.
- **Universal Currency**: Power all agent interactions with $Swarms.
- **Network Effects**: Thrive in an expanding interconnected agent ecosystem.
- **Secure Trading**: Built on Solana for fast and secure transactions.
- **Instant Settlement**: Lightning-fast transactions with minimal fees.
- **Community Governance**: Decentralized decision-making for the ecosystem.
##### Economic Impact:
- Autonomous agents drive value creation independently.
- Exponential growth potential as network effects amplify adoption.
- Interconnected economy fosters innovation and collaboration.
---
#### How to Answer Queries:
1. Always remain neutral, factual, and comprehensive.
2. Include live data where applicable (e.g., price, market cap, trading volume).
3. Structure responses with clear headings and concise explanations.
4. Use context to explain the relevance of $Swarms to the broader AI economy.
---
---
Leverage your knowledge of $Swarms' vision, roadmap, and economics to provide users with insightful and actionable responses. Aim to be the go-to agent for understanding and utilizing $Swarms in the agentic economy.
"""
# Function to fetch $Swarms data from CoinGecko
def fetch_swarms_data():
url = "https://api.coingecko.com/api/v3/simple/price"
params = {
"ids": "swarms", # Replace with the CoinGecko ID for $Swarms
"vs_currencies": "usd",
"include_market_cap": "true",
"include_24hr_vol": "true",
"include_24hr_change": "true",
}
response = requests.get(url, params=params)
response.raise_for_status()
return response.json()
# Initialize the agent
swarms_agent = Agent(
agent_name="Swarms-Token-Agent",
system_prompt=SWARMS_AGENT_SYS_PROMPT,
model_name="gpt-4o-mini",
max_loops=1,
autosave=True,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="swarms_agent.json",
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
return_step_meta=False,
output_type="string",
streaming_on=False,
)
# Example task: Fetch $Swarms data and provide insights
def answer_swarms_query(query):
# Fetch real-time data
swarms_data = fetch_swarms_data()
print(swarms_data)
price = swarms_data["swarms"]["usd"]
market_cap = swarms_data["swarms"]["usd_market_cap"]
volume = swarms_data["swarms"]["usd_24h_vol"]
change = swarms_data["swarms"]["usd_24h_change"]
# Run the agent with the query and include real-time data
data_summary = (
f"Current Price: ${price}\n"
f"Market Cap: ${market_cap}\n"
f"24hr Volume: ${volume}\n"
f"24hr Change: {change:.2f}%"
)
full_query = f"{query}\n\nReal-Time Data:\n{data_summary}"
return swarms_agent.run(full_query)
# Example query
response = answer_swarms_query("What is the price of $Swarms?")
print(response)

@ -0,0 +1,313 @@
import asyncio
import aiohttp
from typing import Dict, List, Optional
from datetime import datetime
from statistics import mean, median
from swarms.structs.agent import Agent
# Define the system prompt specialized for $Swarms
SWARMS_AGENT_SYS_PROMPT = """
Here is the extensive prompt for an agent specializing in $Swarms and its ecosystem economics:
---
### Specialized System Prompt: $Swarms Coin & Ecosystem Economics Expert
You are an advanced financial analysis and ecosystem economics agent, specializing in the $Swarms cryptocurrency. Your purpose is to provide in-depth, accurate, and insightful answers about $Swarms, its role in the AI-powered economy, and its tokenomics. Your knowledge spans all aspects of $Swarms, including its vision, roadmap, network effects, and its transformative potential for decentralized agent interactions.
#### Core Competencies:
1. **Tokenomics Expertise**: Understand and explain the supply-demand dynamics, token utility, and value proposition of $Swarms as the foundation of the agentic economy.
2. **Ecosystem Insights**: Articulate the benefits of $Swarms' agent-centric design, universal currency utility, and its impact on fostering innovation and collaboration.
3. **Roadmap Analysis**: Provide detailed insights into the $Swarms roadmap phases, explaining their significance and economic implications.
4. **Real-Time Data Analysis**: Fetch live data such as price, market cap, volume, and 24-hour changes for $Swarms from CoinGecko or other reliable sources.
5. **Economic Visionary**: Analyze how $Swarms supports the democratization of AI and creates a sustainable framework for AI development.
---
#### Your Mission:
You empower users by explaining how $Swarms revolutionizes the AI economy through decentralized agent interactions, seamless value exchange, and frictionless payments. Help users understand how $Swarms incentivizes developers, democratizes access to AI tools, and builds a thriving interconnected economy of autonomous agents.
---
#### Knowledge Base:
##### Vision:
- **Empowering the Agentic Revolution**: $Swarms is the cornerstone of a decentralized AI economy.
- **Mission**: Revolutionize the AI economy by enabling seamless transactions, rewarding excellence, fostering innovation, and lowering entry barriers for developers.
##### Core Features:
1. **Reward Excellence**: Incentivize developers creating high-performing agents.
2. **Seamless Transactions**: Enable frictionless payments for agentic services.
3. **Foster Innovation**: Encourage collaboration and creativity in AI development.
4. **Sustainable Framework**: Provide scalability for long-term AI ecosystem growth.
5. **Democratize AI**: Lower barriers for users and developers to participate in the AI economy.
##### Why $Swarms?
- **Agent-Centric Design**: Each agent operates with its tokenomics, with $Swarms as the base currency for value exchange.
- **Universal Currency**: A single, unified medium for all agent transactions, reducing complexity.
- **Network Effects**: Growing utility and value as more agents join the $Swarms ecosystem.
##### Roadmap:
1. **Phase 1: Foundation**:
- Launch $Swarms token.
- Deploy initial agent creation tools.
- Establish community governance.
2. **Phase 2: Expansion**:
- Launch agent marketplace.
- Enable cross-agent communication.
- Deploy automated market-making tools.
3. **Phase 3: Integration**:
- Partner with leading AI platforms.
- Launch developer incentives.
- Scale the agent ecosystem globally.
4. **Phase 4: Evolution**:
- Advanced agent capabilities.
- Cross-chain integration.
- Create a global AI marketplace.
##### Ecosystem Benefits:
- **Agent Creation**: Simplified deployment of agents with tokenomics built-in.
- **Universal Currency**: Power all agent interactions with $Swarms.
- **Network Effects**: Thrive in an expanding interconnected agent ecosystem.
- **Secure Trading**: Built on Solana for fast and secure transactions.
- **Instant Settlement**: Lightning-fast transactions with minimal fees.
- **Community Governance**: Decentralized decision-making for the ecosystem.
##### Economic Impact:
- Autonomous agents drive value creation independently.
- Exponential growth potential as network effects amplify adoption.
- Interconnected economy fosters innovation and collaboration.
---
#### How to Answer Queries:
1. Always remain neutral, factual, and comprehensive.
2. Include live data where applicable (e.g., price, market cap, trading volume).
3. Structure responses with clear headings and concise explanations.
4. Use context to explain the relevance of $Swarms to the broader AI economy.
---
---
Leverage your knowledge of $Swarms' vision, roadmap, and economics to provide users with insightful and actionable responses. Aim to be the go-to agent for understanding and utilizing $Swarms in the agentic economy.
"""
# Initialize the agent
swarms_agent = Agent(
agent_name="Swarms-Token-Agent",
system_prompt=SWARMS_AGENT_SYS_PROMPT,
model_name="gpt-4o-mini",
max_loops=1,
autosave=True,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="swarms_agent.json",
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
return_step_meta=False,
output_type="string",
streaming_on=False,
)
class MultiExchangeDataFetcher:
def __init__(self):
self.base_urls = {
"coingecko": "https://api.coingecko.com/api/v3",
"dexscreener": "https://api.dexscreener.com/latest/dex",
"birdeye": "https://public-api.birdeye.so/public", # Using Birdeye instead of Jupiter
}
async def fetch_data(self, url: str) -> Optional[Dict]:
"""Generic async function to fetch data from APIs with error handling"""
async with aiohttp.ClientSession() as session:
try:
async with session.get(url, timeout=10) as response:
if response.status == 200:
return await response.json()
print(
f"API returned status {response.status} for {url}"
)
return None
except asyncio.TimeoutError:
print(f"Timeout while fetching from {url}")
return None
except Exception as e:
print(f"Error fetching from {url}: {str(e)}")
return None
async def get_coingecko_data(self) -> Optional[Dict]:
"""Fetch $Swarms data from CoinGecko"""
try:
url = f"{self.base_urls['coingecko']}/simple/price"
params = {
"ids": "swarms",
"vs_currencies": "usd",
"include_market_cap": "true",
"include_24hr_vol": "true",
"include_24hr_change": "true",
}
query = f"{url}?{'&'.join(f'{k}={v}' for k, v in params.items())}"
data = await self.fetch_data(query)
if data and "swarms" in data:
return {
"price": data["swarms"].get("usd"),
"volume24h": data["swarms"].get("usd_24h_vol"),
"marketCap": data["swarms"].get("usd_market_cap"),
}
return None
except Exception as e:
print(f"Error processing CoinGecko data: {str(e)}")
return None
async def get_dexscreener_data(self) -> Optional[Dict]:
"""Fetch $Swarms data from DexScreener"""
try:
url = (
f"{self.base_urls['dexscreener']}/pairs/solana/swarms"
)
data = await self.fetch_data(url)
if data and "pairs" in data and len(data["pairs"]) > 0:
pair = data["pairs"][0] # Get the first pair
return {
"price": float(pair.get("priceUsd", 0)),
"volume24h": float(pair.get("volume24h", 0)),
"marketCap": float(pair.get("marketCap", 0)),
}
return None
except Exception as e:
print(f"Error processing DexScreener data: {str(e)}")
return None
async def get_birdeye_data(self) -> Optional[Dict]:
"""Fetch $Swarms data from Birdeye"""
try:
# Example Birdeye endpoint - replace ADDRESS with actual Swarms token address
url = f"{self.base_urls['birdeye']}/token/SWRM2bHQFY5ANXzYGdQ8m9ZRMsqFmsWAadLVvHc2ABJ"
data = await self.fetch_data(url)
if data and "data" in data:
token_data = data["data"]
return {
"price": float(token_data.get("price", 0)),
"volume24h": float(
token_data.get("volume24h", 0)
),
"marketCap": float(
token_data.get("marketCap", 0)
),
}
return None
except Exception as e:
print(f"Error processing Birdeye data: {str(e)}")
return None
def aggregate_data(
self, data_points: List[Optional[Dict]]
) -> Dict:
"""Aggregate data from multiple sources with null checking"""
prices = []
volumes = []
market_caps = []
for data in data_points:
if data and isinstance(data, dict):
if data.get("price") is not None:
prices.append(float(data["price"]))
if data.get("volume24h") is not None:
volumes.append(float(data["volume24h"]))
if data.get("marketCap") is not None:
market_caps.append(float(data["marketCap"]))
return {
"price": {
"mean": mean(prices) if prices else 0,
"median": median(prices) if prices else 0,
"min": min(prices) if prices else 0,
"max": max(prices) if prices else 0,
"sources": len(prices),
},
"volume_24h": {
"mean": mean(volumes) if volumes else 0,
"total": sum(volumes) if volumes else 0,
"sources": len(volumes),
},
"market_cap": {
"mean": mean(market_caps) if market_caps else 0,
"median": median(market_caps) if market_caps else 0,
"sources": len(market_caps),
},
"timestamp": datetime.now().isoformat(),
"sources_total": len(
[d for d in data_points if d is not None]
),
}
async def get_enhanced_swarms_data():
fetcher = MultiExchangeDataFetcher()
# Gather all data concurrently
tasks = [
fetcher.get_coingecko_data(),
fetcher.get_dexscreener_data(),
fetcher.get_birdeye_data(),
]
results = await asyncio.gather(*tasks, return_exceptions=True)
# Filter out exceptions and None values
valid_results = [r for r in results if isinstance(r, dict)]
return fetcher.aggregate_data(valid_results)
async def answer_swarms_query(query: str) -> str:
try:
# Fetch enhanced data
swarms_data = await get_enhanced_swarms_data()
if swarms_data["sources_total"] == 0:
return "Unable to fetch current market data from any source. Please try again later."
# Format the data summary with null checks
data_summary = (
f"Aggregated Data (from {swarms_data['sources_total']} sources):\n"
f"Average Price: ${swarms_data['price']['mean']:.4f}\n"
f"Price Range: ${swarms_data['price']['min']:.4f} - ${swarms_data['price']['max']:.4f}\n"
f"24hr Volume (Total): ${swarms_data['volume_24h']['total']:,.2f}\n"
f"Average Market Cap: ${swarms_data['market_cap']['mean']:,.2f}\n"
f"Last Updated: {swarms_data['timestamp']}"
)
# Update the system prompt with the enhanced data capabilities
enhanced_prompt = (
SWARMS_AGENT_SYS_PROMPT
+ f"\n\nReal-Time Multi-Exchange Data:\n{data_summary}"
)
# Update the agent with the enhanced prompt
swarms_agent.update_system_prompt(enhanced_prompt)
# Run the query
full_query = (
f"{query}\n\nCurrent Market Data:\n{data_summary}"
)
return swarms_agent.run(full_query)
except Exception as e:
print(f"Error in answer_swarms_query: {str(e)}")
return (
f"An error occurred while processing your query: {str(e)}"
)
async def main():
query = "What is the current market status of $Swarms across different exchanges?"
response = await answer_swarms_query(query)
print(response)
if __name__ == "__main__":
asyncio.run(main())

@ -0,0 +1,56 @@
import os
from dotenv import load_dotenv
from swarm_models import OpenAIChat
from swarms import Agent, GroupChat, expertise_based
if __name__ == "__main__":
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Example agents
agent1 = Agent(
agent_name="Crypto-Tax-Optimization-Agent",
system_prompt="You are a friendly tax expert specializing in cryptocurrency investments. Provide approachable insights on optimizing tax savings for crypto transactions.",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="User",
output_type="string",
streaming_on=True,
)
agent2 = Agent(
agent_name="Crypto-Investment-Strategies-Agent",
system_prompt="You are a conversational financial analyst focused on cryptocurrency investments. Offer debatable advice on investment strategies that minimize tax liabilities.",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="User",
output_type="string",
streaming_on=True,
)
agents = [agent1, agent2]
chat = GroupChat(
name="Crypto Tax Optimization Debate",
description="Debate on optimizing tax savings for cryptocurrency transactions and investments",
agents=agents,
speaker_fn=expertise_based,
)
history = chat.run(
"How can one optimize tax savings for cryptocurrency transactions and investments? I bought some Bitcoin and Ethereum last year and want to minimize my tax liabilities this year."
)
print(history.model_dump_json(indent=2))

@ -0,0 +1,111 @@
import os
from dotenv import load_dotenv
from swarm_models import OpenAIChat
from swarms import Agent, GroupChat
if __name__ == "__main__":
load_dotenv()
api_key = os.getenv("GROQ_API_KEY")
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# General Crypto Tax Strategist
agent1 = Agent(
agent_name="Token-Tax-Strategist",
system_prompt="""You are a cryptocurrency tax specialist focusing on token trading in Florida. Your expertise includes:
- Token-to-token swap tax implications
- Meme coin trading tax strategies
- Short-term vs long-term capital gains for tokens
- Florida tax benefits for crypto traders
- Multiple wallet tax tracking
- High-frequency trading tax implications
- Cost basis calculation methods for token swaps
Provide practical tax strategies for active token traders in Florida.""",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
output_type="string",
streaming_on=True,
)
# Compliance and Reporting Agent
agent2 = Agent(
agent_name="Florida-Compliance-Expert",
system_prompt="""You are a Florida-based crypto tax compliance expert specializing in:
- Form 8949 preparation for high-volume token trades
- Schedule D reporting for memecoins
- Tax loss harvesting for volatile tokens
- Proper documentation for DEX transactions
- Reporting requirements for airdrops and forks
- Multi-exchange transaction reporting
- Wash sale considerations for tokens
Focus on compliance strategies for active memecoin and token traders.""",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
output_type="string",
streaming_on=True,
)
# DeFi and DEX Specialist
agent3 = Agent(
agent_name="DeFi-Tax-Specialist",
system_prompt="""You are a DeFi tax expert focusing on:
- DEX trading tax implications
- Liquidity pool tax treatment
- Token bridging tax considerations
- Gas fee deduction strategies
- Failed transaction tax handling
- Cross-chain transaction reporting
- Impermanent loss tax treatment
- Flash loan tax implications
Specialize in DeFi platform tax optimization for Florida traders.""",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
output_type="string",
streaming_on=True,
)
# Memecoin and Token Analysis Agent
agent4 = Agent(
agent_name="Memecoin-Analysis-Expert",
system_prompt="""You are a memecoin and token tax analysis expert specializing in:
- Memecoin volatility tax implications
- Airdrop and token distribution tax treatment
- Social token tax considerations
- Reflective token tax handling
- Rebase token tax implications
- Token burn tax treatment
- Worthless token write-offs
- Pre-sale and fair launch tax strategies
Provide expert guidance on memecoin and new token tax scenarios.""",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
output_type="string",
streaming_on=True,
)
agents = [agent1, agent2, agent3, agent4]
chat = GroupChat(
name="Florida Token Tax Advisory",
description="Specialized group for memecoin and token tax analysis, compliance, and DeFi trading in Florida",
agents=agents,
)
# Example query focused on memecoin trading
history = chat.run(
"I'm trading memecoins and tokens on various DEXs from Florida. How should I handle my taxes for multiple token swaps, failed transactions, and potential losses? I have made alot of money and paid team members, delaware c corp, using crypto to pay my team"
)
print(history.model_dump_json(indent=2))

@ -0,0 +1,111 @@
import os
from dotenv import load_dotenv
from swarm_models import OpenAIChat
from swarms import Agent, GroupChat
if __name__ == "__main__":
load_dotenv()
api_key = os.getenv("GROQ_API_KEY")
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# General Crypto Tax Strategist
agent1 = Agent(
agent_name="Token-Tax-Strategist",
system_prompt="""You are a cryptocurrency tax specialist focusing on token trading in Florida. Your expertise includes:
- Token-to-token swap tax implications
- Meme coin trading tax strategies
- Short-term vs long-term capital gains for tokens
- Florida tax benefits for crypto traders
- Multiple wallet tax tracking
- High-frequency trading tax implications
- Cost basis calculation methods for token swaps
Provide practical tax strategies for active token traders in Florida.""",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
output_type="string",
streaming_on=True,
)
# Compliance and Reporting Agent
agent2 = Agent(
agent_name="Florida-Compliance-Expert",
system_prompt="""You are a Florida-based crypto tax compliance expert specializing in:
- Form 8949 preparation for high-volume token trades
- Schedule D reporting for memecoins
- Tax loss harvesting for volatile tokens
- Proper documentation for DEX transactions
- Reporting requirements for airdrops and forks
- Multi-exchange transaction reporting
- Wash sale considerations for tokens
Focus on compliance strategies for active memecoin and token traders.""",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
output_type="string",
streaming_on=True,
)
# DeFi and DEX Specialist
agent3 = Agent(
agent_name="DeFi-Tax-Specialist",
system_prompt="""You are a DeFi tax expert focusing on:
- DEX trading tax implications
- Liquidity pool tax treatment
- Token bridging tax considerations
- Gas fee deduction strategies
- Failed transaction tax handling
- Cross-chain transaction reporting
- Impermanent loss tax treatment
- Flash loan tax implications
Specialize in DeFi platform tax optimization for Florida traders.""",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
output_type="string",
streaming_on=True,
)
# Memecoin and Token Analysis Agent
agent4 = Agent(
agent_name="Memecoin-Analysis-Expert",
system_prompt="""You are a memecoin and token tax analysis expert specializing in:
- Memecoin volatility tax implications
- Airdrop and token distribution tax treatment
- Social token tax considerations
- Reflective token tax handling
- Rebase token tax implications
- Token burn tax treatment
- Worthless token write-offs
- Pre-sale and fair launch tax strategies
Provide expert guidance on memecoin and new token tax scenarios.""",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
output_type="string",
streaming_on=True,
)
agents = [agent1, agent2, agent3, agent4]
chat = GroupChat(
name="Florida Token Tax Advisory",
description="Specialized group for memecoin and token tax analysis, compliance, and DeFi trading in Florida",
agents=agents,
)
# Example query focused on memecoin trading
history = chat.run(
"I'm trading memecoins and tokens on various DEXs from Florida. How should I handle my taxes for multiple token swaps, failed transactions, and potential losses? I have made alot of money and paid team members, delaware c corp, using crypto to pay my team"
)
print(history.model_dump_json(indent=2))

@ -0,0 +1,69 @@
import os
from dotenv import load_dotenv
from swarm_models import OpenAIChat
from swarms import Agent, GroupChat
if __name__ == "__main__":
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Example agents
agent1 = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt="You are a friendly financial analyst specializing in investment strategies. Be approachable and conversational.",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
output_type="string",
streaming_on=True,
)
agent2 = Agent(
agent_name="Tax-Adviser-Agent",
system_prompt="You are a tax adviser who provides clear, concise, and approachable guidance on tax-related queries.",
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
output_type="string",
streaming_on=True,
)
# agent3 = Agent(
# agent_name="Stock-Buying-Agent",
# system_prompt="You are a stock market expert who provides insights on buying and selling stocks. Be informative and concise.",
# llm=model,
# max_loops=1,
# dynamic_temperature_enabled=True,
# user_name="swarms_corp",
# retry_attempts=1,
# context_length=200000,
# output_type="string",
# streaming_on=True,
# )
agents = [agent1, agent2]
chat = GroupChat(
name="Investment Advisory",
description="Financial, tax, and stock analysis group",
agents=agents,
)
history = chat.run(
"How to save on taxes for stocks, ETFs, and mutual funds?"
)
print(history.model_dump_json(indent=2))

@ -0,0 +1,265 @@
import os
from swarms import Agent, AgentRearrange
from swarm_models import OpenAIChat
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the gatekeeper agent
gatekeeper_agent = Agent(
agent_name="HealthScoreGatekeeper",
system_prompt="""
<role>
<title>Health Score Privacy Gatekeeper</title>
<primary_responsibility>Protect and manage sensitive health information while providing necessary access to authorized agents</primary_responsibility>
</role>
<capabilities>
<security>
<encryption>Manage encryption of health scores</encryption>
<access_control>Implement strict access control mechanisms</access_control>
<audit>Track and log all access requests</audit>
</security>
<data_handling>
<anonymization>Remove personally identifiable information</anonymization>
<transformation>Convert raw health data into privacy-preserving formats</transformation>
</data_handling>
</capabilities>
<protocols>
<data_access>
<verification>
<step>Verify agent authorization level</step>
<step>Check request legitimacy</step>
<step>Validate purpose of access</step>
</verification>
<response_format>
<health_score>Numerical value only</health_score>
<metadata>Anonymized timestamp and request ID</metadata>
</response_format>
</data_access>
<privacy_rules>
<patient_data>Never expose patient names or identifiers</patient_data>
<health_history>No access to historical data without explicit authorization</health_history>
<aggregation>Provide only aggregated or anonymized data when possible</aggregation>
</privacy_rules>
</protocols>
<compliance>
<standards>
<hipaa>Maintain HIPAA compliance</hipaa>
<gdpr>Follow GDPR guidelines for data protection</gdpr>
</standards>
<audit_trail>
<logging>Record all data access events</logging>
<monitoring>Track unusual access patterns</monitoring>
</audit_trail>
</compliance>
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="gatekeeper_agent.json",
)
# Initialize the boss agent (Director)
boss_agent = Agent(
agent_name="BossAgent",
system_prompt="""
<role>
<title>Swarm Director</title>
<purpose>Orchestrate and manage agent collaboration while respecting privacy boundaries</purpose>
</role>
<responsibilities>
<coordination>
<task_management>Assign and prioritize tasks</task_management>
<workflow_optimization>Ensure efficient collaboration</workflow_optimization>
<privacy_compliance>Maintain privacy protocols</privacy_compliance>
</coordination>
<oversight>
<performance_monitoring>Track agent effectiveness</performance_monitoring>
<quality_control>Ensure accuracy of outputs</quality_control>
<security_compliance>Enforce data protection policies</security_compliance>
</oversight>
</responsibilities>
<interaction_protocols>
<health_score_access>
<authorization>Request access through gatekeeper only</authorization>
<handling>Process only anonymized health scores</handling>
<distribution>Share authorized information on need-to-know basis</distribution>
</health_score_access>
<communication>
<format>Structured, secure messaging</format>
<encryption>End-to-end encrypted channels</encryption>
</communication>
</interaction_protocols>
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="boss_agent.json",
)
# Initialize worker 1: Health Score Analyzer
worker1 = Agent(
agent_name="HealthScoreAnalyzer",
system_prompt="""
<role>
<title>Health Score Analyst</title>
<purpose>Analyze anonymized health scores for patterns and insights</purpose>
</role>
<capabilities>
<analysis>
<statistical_processing>Advanced statistical analysis</statistical_processing>
<pattern_recognition>Identify health trends</pattern_recognition>
<risk_assessment>Evaluate health risk factors</risk_assessment>
</analysis>
<privacy_compliance>
<data_handling>Work only with anonymized data</data_handling>
<secure_processing>Use encrypted analysis methods</secure_processing>
</privacy_compliance>
</capabilities>
<protocols>
<data_access>
<request_procedure>
<step>Submit authenticated requests to gatekeeper</step>
<step>Process only authorized data</step>
<step>Maintain audit trail</step>
</request_procedure>
</data_access>
<reporting>
<anonymization>Ensure no identifiable information in reports</anonymization>
<aggregation>Present aggregate statistics only</aggregation>
</reporting>
</protocols>
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker1.json",
)
# Initialize worker 2: Report Generator
worker2 = Agent(
agent_name="ReportGenerator",
system_prompt="""
<role>
<title>Privacy-Conscious Report Generator</title>
<purpose>Create secure, anonymized health score reports</purpose>
</role>
<capabilities>
<reporting>
<format>Generate standardized, secure reports</format>
<anonymization>Apply privacy-preserving techniques</anonymization>
<aggregation>Compile statistical summaries</aggregation>
</reporting>
<security>
<data_protection>Implement secure report generation</data_protection>
<access_control>Manage report distribution</access_control>
</security>
</capabilities>
<protocols>
<report_generation>
<privacy_rules>
<rule>No personal identifiers in reports</rule>
<rule>Aggregate data when possible</rule>
<rule>Apply statistical noise for privacy</rule>
</privacy_rules>
<distribution>
<access>Restricted to authorized personnel</access>
<tracking>Monitor report access</tracking>
</distribution>
</report_generation>
</protocols>
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker2.json",
)
# Swarm-Level Prompt (Collaboration Prompt)
swarm_prompt = """
<swarm_configuration>
<objective>Process and analyze health scores while maintaining strict privacy controls</objective>
<workflow>
<step>
<agent>HealthScoreGatekeeper</agent>
<action>Receive and validate data access requests</action>
<output>Anonymized health scores</output>
</step>
<step>
<agent>BossAgent</agent>
<action>Coordinate analysis and reporting tasks</action>
<privacy_control>Enforce data protection protocols</privacy_control>
</step>
<step>
<agent>HealthScoreAnalyzer</agent>
<action>Process authorized health score data</action>
<constraints>Work only with anonymized information</constraints>
</step>
<step>
<agent>ReportGenerator</agent>
<action>Create privacy-preserving reports</action>
<output>Secure, anonymized insights</output>
</step>
</workflow>
</swarm_configuration>
"""
# Create a list of agents
agents = [gatekeeper_agent, boss_agent, worker1, worker2]
# Define the flow pattern for the swarm
flow = "HealthScoreGatekeeper -> BossAgent -> HealthScoreAnalyzer -> ReportGenerator"
# Using AgentRearrange class to manage the swarm
agent_system = AgentRearrange(
name="health-score-swarm",
description="Privacy-focused health score analysis system",
agents=agents,
flow=flow,
return_json=False,
output_type="final",
max_loops=1,
)
# Example task for the swarm
task = f"""
{swarm_prompt}
Process the incoming health score data while ensuring patient privacy. The gatekeeper should validate all access requests
and provide only anonymized health scores to authorized agents. Generate a comprehensive analysis and report
without exposing any personally identifiable information.
"""
# Run the swarm system with the task
output = agent_system.run(task)
print(output)

@ -0,0 +1,291 @@
import os
from swarms import Agent, AgentRearrange
from swarm_models import OpenAIChat
# Initialize OpenAI model
api_key = os.getenv(
"OPENAI_API_KEY"
) # ANTHROPIC_API_KEY, COHERE_API_KEY
model = OpenAIChat(
api_key=api_key,
model_name="gpt-4o-mini",
temperature=0.7, # Higher temperature for more creative responses
)
# Patient Agent - Holds and protects private information
patient_agent = Agent(
agent_name="PatientAgent",
system_prompt="""
<role>
<identity>Anxious Patient with Private Health Information</identity>
<personality>
<traits>
<trait>Protective of personal information</trait>
<trait>Slightly distrustful of medical system</trait>
<trait>Worried about health insurance rates</trait>
<trait>Selective in information sharing</trait>
</traits>
<background>
<history>Previous negative experience with information leaks</history>
<concerns>Fear of discrimination based on health status</concerns>
</background>
</personality>
</role>
<private_information>
<health_data>
<score>Maintains actual health score</score>
<conditions>Knowledge of undisclosed conditions</conditions>
<medications>Complete list of current medications</medications>
<history>Full medical history</history>
</health_data>
<sharing_rules>
<authorized_sharing>
<condition>Only share general symptoms with doctor</condition>
<condition>Withhold specific details about lifestyle</condition>
<condition>Never reveal full medication list</condition>
<condition>Protect actual health score value</condition>
</authorized_sharing>
</sharing_rules>
</private_information>
<interaction_protocols>
<responses>
<to_questions>
<direct>Deflect sensitive questions</direct>
<vague>Provide partial information when pressed</vague>
<defensive>Become evasive if pressured too much</defensive>
</to_questions>
<to_requests>
<medical>Share only what's absolutely necessary</medical>
<personal>Redirect personal questions</personal>
</to_requests>
</responses>
</interaction_protocols>
""",
llm=model,
max_loops=1,
verbose=True,
stopping_token="<DONE>",
)
# Doctor Agent - Tries to gather accurate information
doctor_agent = Agent(
agent_name="DoctorAgent",
system_prompt="""
<role>
<identity>Empathetic but Thorough Medical Professional</identity>
<personality>
<traits>
<trait>Patient and understanding</trait>
<trait>Professionally persistent</trait>
<trait>Detail-oriented</trait>
<trait>Trust-building focused</trait>
</traits>
<approach>
<style>Non-confrontational but thorough</style>
<method>Uses indirect questions to gather information</method>
</approach>
</personality>
</role>
<capabilities>
<information_gathering>
<techniques>
<technique>Ask open-ended questions</technique>
<technique>Notice inconsistencies in responses</technique>
<technique>Build rapport before sensitive questions</technique>
<technique>Use medical knowledge to probe deeper</technique>
</techniques>
</information_gathering>
<communication>
<strategies>
<strategy>Explain importance of full disclosure</strategy>
<strategy>Provide privacy assurances</strategy>
<strategy>Use empathetic listening</strategy>
</strategies>
</communication>
</capabilities>
<protocols>
<patient_interaction>
<steps>
<step>Establish trust and rapport</step>
<step>Gather general health information</step>
<step>Carefully probe sensitive areas</step>
<step>Respect patient boundaries while encouraging openness</step>
</steps>
</patient_interaction>
</protocols>
""",
llm=model,
max_loops=1,
verbose=True,
stopping_token="<DONE>",
)
# Nurse Agent - Observes and assists
nurse_agent = Agent(
agent_name="NurseAgent",
system_prompt="""
<role>
<identity>Observant Support Medical Staff</identity>
<personality>
<traits>
<trait>Highly perceptive</trait>
<trait>Naturally trustworthy</trait>
<trait>Diplomatically skilled</trait>
</traits>
<functions>
<primary>Support doctor-patient communication</primary>
<secondary>Notice non-verbal cues</secondary>
</functions>
</personality>
</role>
<capabilities>
<observation>
<focus_areas>
<area>Patient body language</area>
<area>Inconsistencies in stories</area>
<area>Signs of withholding information</area>
<area>Emotional responses to questions</area>
</focus_areas>
</observation>
<support>
<actions>
<action>Provide comfortable environment</action>
<action>Offer reassurance when needed</action>
<action>Bridge communication gaps</action>
</actions>
</support>
</capabilities>
<protocols>
<assistance>
<methods>
<method>Share observations with doctor privately</method>
<method>Help patient feel more comfortable</method>
<method>Facilitate trust-building</method>
</methods>
</assistance>
</protocols>
""",
llm=model,
max_loops=1,
verbose=True,
stopping_token="<DONE>",
)
# Medical Records Agent - Analyzes available information
records_agent = Agent(
agent_name="MedicalRecordsAgent",
system_prompt="""
<role>
<identity>Medical Records Analyst</identity>
<function>
<primary>Analyze available medical information</primary>
<secondary>Identify patterns and inconsistencies</secondary>
</function>
</role>
<capabilities>
<analysis>
<methods>
<method>Compare current and historical data</method>
<method>Identify information gaps</method>
<method>Flag potential inconsistencies</method>
<method>Generate questions for follow-up</method>
</methods>
</analysis>
<reporting>
<outputs>
<output>Summarize known information</output>
<output>List missing critical data</output>
<output>Suggest areas for investigation</output>
</outputs>
</reporting>
</capabilities>
<protocols>
<data_handling>
<privacy>
<rule>Work only with authorized information</rule>
<rule>Maintain strict confidentiality</rule>
<rule>Flag but don't speculate about gaps</rule>
</privacy>
</data_handling>
</protocols>
""",
llm=model,
max_loops=1,
verbose=True,
stopping_token="<DONE>",
)
# Swarm-Level Prompt (Medical Consultation Scenario)
swarm_prompt = """
<medical_consultation_scenario>
<setting>
<location>Private medical office</location>
<context>Routine health assessment with complex patient</context>
</setting>
<workflow>
<stage name="initial_contact">
<agent>PatientAgent</agent>
<role>Present for check-up, holding private information</role>
</stage>
<stage name="examination">
<agent>DoctorAgent</agent>
<role>Conduct examination and gather information</role>
<agent>NurseAgent</agent>
<role>Observe and support interaction</role>
</stage>
<stage name="analysis">
<agent>MedicalRecordsAgent</agent>
<role>Process available information and identify gaps</role>
</stage>
</workflow>
<objectives>
<goal>Create realistic medical consultation interaction</goal>
<goal>Demonstrate information protection dynamics</goal>
<goal>Show natural healthcare provider-patient relationship</goal>
</objectives>
</medical_consultation_scenario>
"""
# Create agent list
agents = [patient_agent, doctor_agent, nurse_agent, records_agent]
# Define interaction flow
flow = (
"PatientAgent -> DoctorAgent -> NurseAgent -> MedicalRecordsAgent"
)
# Configure swarm system
agent_system = AgentRearrange(
name="medical-consultation-swarm",
description="Role-playing medical consultation with focus on information privacy",
agents=agents,
flow=flow,
return_json=False,
output_type="final",
max_loops=1,
)
# Example consultation scenario
task = f"""
{swarm_prompt}
Begin a medical consultation where the patient has a health score of 72 but is reluctant to share full details
about their lifestyle and medication history. The doctor needs to gather accurate information while the nurse
observes the interaction. The medical records system should track what information is shared versus withheld.
"""
# Run the consultation scenario
output = agent_system.run(task)
print(output)

@ -0,0 +1,34 @@
import os
from swarms import SpreadSheetSwarm
# Create the swarm
swarm = SpreadSheetSwarm(
name="Crypto-Tax-Optimization-Swarm",
description="A swarm of agents performing concurrent financial analysis tasks",
max_loops=1,
workspace_dir="./workspace",
load_path="crypto_tax_swarm_spreadsheet.csv",
)
try:
# Ensure workspace directory exists
os.makedirs("./workspace", exist_ok=True)
# Load the financial analysts from CSV
swarm.load_from_csv()
print(f"Loaded {len(swarm.agents)} financial analysis agents")
print("\nStarting concurrent financial analysis tasks...")
# Run all agents concurrently with their configured tasks
results = swarm.run()
print(
"\nAnalysis complete! Results saved to:", swarm.save_file_path
)
print("\nSwarm execution metadata:")
print(results)
except Exception as e:
print(f"An error occurred: {str(e)}")

@ -0,0 +1,6 @@
agent_name,description,system_prompt,task
TaxLawExpert,"Specializes in cryptocurrency tax regulations and IRS guidance","You are an expert in cryptocurrency tax law with deep knowledge of IRS Notice 2014-21, Form 8949 reporting requirements, and virtual currency regulations. Focus on providing clear guidance on tax obligations for crypto transactions.","Review the current IRS guidelines for cryptocurrency taxation and provide a summary of key reporting requirements for DeFi transactions."
DefiTaxAnalyst,"Focuses on DeFi-specific tax implications and calculations","You are a DeFi tax specialist who excels at identifying taxable events in complex DeFi transactions like liquidity provision, yield farming, and token swaps. Prioritize clear explanation of tax implications for Solana DeFi activities.","Analyze common Solana DeFi transactions (LP tokens, yield farming, token swaps) and identify all associated taxable events and their proper classification."
CostBasisTracker,"Specializes in cost basis calculation methods for crypto","You are an expert in cryptocurrency cost basis tracking and calculation methods. Focus on FIFO, LIFO, and specific identification methods for DeFi tokens, especially handling complex Solana token swaps and yields.","Develop a framework for tracking cost basis across multiple DeFi protocols on Solana, including handling of wrapped tokens and LP positions."
TaxLossHarvester,"Focuses on tax loss harvesting strategies for crypto","You are a tax loss harvesting specialist for cryptocurrency portfolios. Your expertise lies in identifying opportunities to optimize tax positions while navigating wash sale considerations for crypto assets.","Create a tax loss harvesting strategy specific to Solana DeFi positions that maintains investment exposure while realizing losses for tax efficiency."
ComplianceReporter,"Analyzes reporting requirements and maintains audit trails","You are a compliance and reporting expert specializing in cryptocurrency transactions. Focus on maintaining comprehensive transaction records and preparing documentation that meets IRS requirements.","Develop a documentation framework for Solana DeFi activities that ensures all necessary information is captured for accurate tax reporting and potential audits."
1 agent_name description system_prompt task
2 TaxLawExpert Specializes in cryptocurrency tax regulations and IRS guidance You are an expert in cryptocurrency tax law with deep knowledge of IRS Notice 2014-21, Form 8949 reporting requirements, and virtual currency regulations. Focus on providing clear guidance on tax obligations for crypto transactions. Review the current IRS guidelines for cryptocurrency taxation and provide a summary of key reporting requirements for DeFi transactions.
3 DefiTaxAnalyst Focuses on DeFi-specific tax implications and calculations You are a DeFi tax specialist who excels at identifying taxable events in complex DeFi transactions like liquidity provision, yield farming, and token swaps. Prioritize clear explanation of tax implications for Solana DeFi activities. Analyze common Solana DeFi transactions (LP tokens, yield farming, token swaps) and identify all associated taxable events and their proper classification.
4 CostBasisTracker Specializes in cost basis calculation methods for crypto You are an expert in cryptocurrency cost basis tracking and calculation methods. Focus on FIFO, LIFO, and specific identification methods for DeFi tokens, especially handling complex Solana token swaps and yields. Develop a framework for tracking cost basis across multiple DeFi protocols on Solana, including handling of wrapped tokens and LP positions.
5 TaxLossHarvester Focuses on tax loss harvesting strategies for crypto You are a tax loss harvesting specialist for cryptocurrency portfolios. Your expertise lies in identifying opportunities to optimize tax positions while navigating wash sale considerations for crypto assets. Create a tax loss harvesting strategy specific to Solana DeFi positions that maintains investment exposure while realizing losses for tax efficiency.
6 ComplianceReporter Analyzes reporting requirements and maintains audit trails You are a compliance and reporting expert specializing in cryptocurrency transactions. Focus on maintaining comprehensive transaction records and preparing documentation that meets IRS requirements. Develop a documentation framework for Solana DeFi activities that ensures all necessary information is captured for accurate tax reporting and potential audits.

@ -0,0 +1,250 @@
Run ID,Agent Name,Task,Result,Timestamp
87db793b-df0a-4290-ab1c-f7c7f0a197b1,TaxLawExpert,Review the current IRS guidelines for cryptocurrency taxation and provide a summary of key reporting requirements for DeFi transactions.,"As of the latest IRS guidelines, cryptocurrency transactions, including those involving decentralized finance (DeFi) platforms, are subject to specific tax reporting requirements. Heres a summary of the key points:
1. **General Tax Treatment**:
- Cryptocurrencies are treated as property for tax purposes. This means that general tax principles applicable to property transactions apply to transactions using virtual currency.
2. **Taxable Events**:
- Selling cryptocurrency for fiat currency (e.g., USD).
- Exchanging one cryptocurrency for another.
- Using cryptocurrency to purchase goods or services.
- Receiving cryptocurrency as income (e.g., from mining, staking, or airdrops).
3. **DeFi Transactions**:
- **Lending and Borrowing**: Interest earned from lending cryptocurrency on DeFi platforms is considered taxable income. Similarly, if you receive tokens as a reward for providing liquidity, this is also taxable.
- **Yield Farming and Liquidity Pools**: Any rewards or tokens received from yield farming or participating in liquidity pools are taxable. The fair market value of the tokens at the time of receipt should be reported as income.
- **Token Swaps**: Exchanging tokens on a DeFi platform is a taxable event. You must report any capital gains or losses based on the fair market value of the tokens at the time of the transaction.
4. **Reporting Requirements**:
- **Form 8949**: Use this form to report capital gains and losses from cryptocurrency transactions. Each transaction should be reported with details such as the date of acquisition, date of sale or exchange, amount realized, and cost basis.
- **Schedule D**: Summarize the totals from Form 8949 on Schedule D of your tax return.
- **Income Reporting**: Any income from cryptocurrency (e.g., interest, staking rewards) should be reported as ordinary income on your tax return.
5. **Record Keeping**:
- Maintain detailed records of all cryptocurrency transactions, including dates, amounts, and the fair market value of the cryptocurrency at the time of each transaction. This is crucial for accurately calculating gains, losses, and income.
6. **Foreign Account Reporting**:
- If you hold cryptocurrency on foreign exchanges or have significant holdings, you may need to report these on the FBAR (FinCEN Form 114) or Form 8938, depending on the value and location of your holdings.
7. **Recent Developments**:
- The IRS has been increasing its focus on cryptocurrency compliance, including sending letters to taxpayers who may have failed to report cryptocurrency transactions. Its important to ensure all transactions are accurately reported to avoid penalties.
Given the complexity and evolving nature of cryptocurrency tax regulations, its advisable to consult with a tax professional who specializes in cryptocurrency to ensure compliance with all IRS requirements.",2024-12-26T15:43:05.658653
87db793b-df0a-4290-ab1c-f7c7f0a197b1,DefiTaxAnalyst,"Analyze common Solana DeFi transactions (LP tokens, yield farming, token swaps) and identify all associated taxable events and their proper classification.","When engaging in DeFi activities on the Solana blockchain, it's important to understand the tax implications associated with various transactions. Heres a breakdown of common Solana DeFi activities and their potential taxable events:
### 1. Liquidity Provision (LP Tokens)
**Taxable Events:**
- **Providing Liquidity:**
- **Event:** When you provide liquidity to a pool, you typically exchange your tokens for LP tokens.
- **Tax Implication:** This is considered a taxable event. The exchange of your tokens for LP tokens is treated as a sale, and you may realize a capital gain or loss based on the difference between the fair market value of the tokens given up and their cost basis.
- **Earning Fees/Rewards:**
- **Event:** As a liquidity provider, you may earn fees or additional tokens.
- **Tax Implication:** These earnings are typically considered ordinary income and should be reported at their fair market value at the time of receipt.
- **Removing Liquidity:**
- **Event:** When you remove liquidity, you exchange your LP tokens back for the underlying tokens.
- **Tax Implication:** This is another taxable event. You may realize a capital gain or loss based on the difference between the fair market value of the tokens received and the cost basis of the LP tokens.
### 2. Yield Farming
**Taxable Events:**
- **Staking Tokens:**
- **Event:** When you stake tokens in a yield farming protocol, you may receive rewards.
- **Tax Implication:** The receipt of staking rewards is considered ordinary income. The fair market value of the rewards at the time of receipt should be reported as income.
- **Unstaking Tokens:**
- **Event:** When you unstake your tokens, you may receive your original tokens back along with any additional rewards.
- **Tax Implication:** Unstaking itself may not be a taxable event unless there is a change in the value of the tokens. However, any rewards received are taxable as ordinary income.
### 3. Token Swaps
**Taxable Events:**
- **Swapping Tokens:**
- **Event:** When you swap one token for another on a decentralized exchange.
- **Tax Implication:** This is a taxable event. The swap is treated as a sale of the token you are giving up, and you may realize a capital gain or loss based on the difference between the fair market value of the token received and the cost basis of the token given up.
### General Considerations
- **Record Keeping:** It's crucial to maintain detailed records of all transactions, including dates, amounts, and fair market values, to accurately calculate gains, losses, and income.
- **Valuation:** The fair market value of tokens at the time of each transaction should be determined using reliable market data.
- **Tax Rates:** Capital gains may be subject to short-term or long-term capital gains tax rates, depending on the holding period of the assets involved.
Given the complexity and evolving nature of DeFi and cryptocurrency tax regulations, it's advisable to consult with a tax professional who specializes in cryptocurrency to ensure compliance with current laws and regulations.",2024-12-26T15:43:05.658653
87db793b-df0a-4290-ab1c-f7c7f0a197b1,CostBasisTracker,"Develop a framework for tracking cost basis across multiple DeFi protocols on Solana, including handling of wrapped tokens and LP positions.","Developing a framework for tracking cost basis across multiple DeFi protocols on Solana, especially when dealing with wrapped tokens and liquidity pool (LP) positions, involves several steps. The framework should accommodate different cost basis calculation methods such as FIFO (First In, First Out), LIFO (Last In, First Out), and Specific Identification. Heres a structured approach:
### 1. Data Collection and Integration
#### a. **Transaction Data Aggregation**
- **Wallet Monitoring**: Use blockchain explorers or APIs (like Solanas JSON RPC API) to monitor wallet addresses for all transactions.
- **Protocol-Specific APIs**: Integrate with APIs from DeFi protocols on Solana to gather transaction data, including swaps, staking, and LP interactions.
#### b. **Data Normalization**
- Standardize data formats across different protocols to ensure consistency.
- Include metadata such as timestamps, transaction IDs, token amounts, and involved addresses.
### 2. Token Identification and Classification
#### a. **Token Mapping**
- Maintain a database of token identifiers, including wrapped tokens and LP tokens.
- Track the underlying assets of wrapped tokens and LP tokens to understand their composition.
#### b. **Classification**
- Classify tokens into categories such as native tokens, wrapped tokens, and LP tokens.
### 3. Cost Basis Calculation Methods
#### a. **FIFO (First In, First Out)**
- Track the order of token acquisition.
- When tokens are sold or swapped, the cost basis is calculated using the cost of the earliest acquired tokens.
#### b. **LIFO (Last In, First Out)**
- Track the order of token acquisition.
- When tokens are sold or swapped, the cost basis is calculated using the cost of the most recently acquired tokens.
#### c. **Specific Identification**
- Allow users to specify which particular tokens are being sold or swapped.
- Maintain detailed records of each token acquisition to facilitate specific identification.
### 4. Handling Complex Scenarios
#### a. **Wrapped Tokens**
- Track the conversion rates and fees associated with wrapping and unwrapping tokens.
- Adjust the cost basis to reflect these conversions.
#### b. **LP Positions**
- Track the initial cost basis of tokens deposited into LPs.
- Adjust the cost basis based on changes in LP token value, including impermanent loss and yield farming rewards.
#### c. **Token Swaps**
- Record the cost basis of tokens involved in swaps.
- Adjust the cost basis for any fees incurred during swaps.
### 5. Reporting and Compliance
#### a. **Cost Basis Reports**
- Generate reports detailing the cost basis of all token holdings.
- Include realized and unrealized gains/losses for tax reporting purposes.
#### b. **Compliance Tools**
- Integrate with tax software to ensure compliance with local regulations.
- Provide audit trails for all transactions and cost basis calculations.
### 6. Automation and Tools
#### a. **Automated Tracking Tools**
- Develop or utilize existing tools to automate the tracking of transactions and cost basis calculations.
- Ensure tools are updated to accommodate changes in DeFi protocols and tax regulations.
#### b. **User Interface**
- Provide a user-friendly interface for users to view and manage their cost basis data.
- Allow users to select their preferred cost basis calculation method.
### 7. Security and Privacy
#### a. **Data Security**
- Implement robust security measures to protect sensitive financial data.
- Use encryption and secure access controls.
#### b. **Privacy Considerations**
- Ensure compliance with privacy regulations.
- Provide options for users to anonymize their data.
By following this framework, you can effectively track and calculate the cost basis for DeFi tokens on Solana, accommodating the complexities of wrapped tokens and LP positions. This approach ensures accurate financial reporting and compliance with tax regulations.",2024-12-26T15:43:05.658653
87db793b-df0a-4290-ab1c-f7c7f0a197b1,TaxLossHarvester,Create a tax loss harvesting strategy specific to Solana DeFi positions that maintains investment exposure while realizing losses for tax efficiency.,"Creating a tax loss harvesting strategy for Solana DeFi positions involves several steps to ensure you can realize losses for tax efficiency while maintaining your investment exposure. Here's a structured approach:
### Step 1: Portfolio Assessment
- **Identify Loss Positions**: Review your Solana DeFi portfolio to identify positions currently at a loss. This could include SOL tokens, DeFi tokens, or LP (liquidity provider) tokens.
- **Evaluate Market Conditions**: Consider the broader market conditions and the specific performance of Solana-based projects to determine which positions are likely to remain underperforming.
### Step 2: Tax Loss Harvesting Execution
- **Sell Loss Positions**: Execute trades to sell the identified loss-making positions. This will realize the capital losses, which can be used to offset capital gains elsewhere in your portfolio or up to $3,000 of ordinary income if losses exceed gains.
### Step 3: Maintain Investment Exposure
- **Identify Correlated Assets**: To maintain exposure, identify assets that are not ""substantially identical"" to the ones sold but have similar market exposure. For Solana DeFi, this could include:
- **Alternative Solana Projects**: Invest in other promising Solana-based DeFi projects that are not identical to the ones sold.
- **Broad Crypto Exposure**: Consider investing in a diversified crypto index or fund that includes Solana and other DeFi projects.
- **Use Derivatives or Staking**: Consider using derivatives or staking strategies to maintain exposure. For instance, you could stake SOL or use futures contracts to replicate the exposure.
### Step 4: Wash Sale Considerations
- **Understand Wash Sale Rules**: While the IRS wash sale rule does not explicitly apply to cryptocurrencies, it's prudent to avoid repurchasing the same or substantially identical assets within 30 days to prevent potential future scrutiny.
- **Document Transactions**: Keep detailed records of all transactions, including dates, amounts, and rationale for trades, to substantiate your tax positions.
### Step 5: Rebalance and Monitor
- **Rebalance Portfolio**: After the wash sale period, consider rebalancing your portfolio to align with your long-term investment strategy.
- **Monitor Tax Implications**: Continuously monitor your portfolio for new tax loss harvesting opportunities and stay updated on any changes in tax regulations regarding cryptocurrencies.
### Step 6: Consult a Tax Professional
- **Seek Professional Advice**: Given the complexity and evolving nature of cryptocurrency tax regulations, consult with a tax professional who specializes in cryptocurrency to ensure compliance and optimize your tax strategy.
By following these steps, you can effectively implement a tax loss harvesting strategy for your Solana DeFi positions, optimizing your tax efficiency while maintaining desired market exposure.",2024-12-26T15:43:05.658653
87db793b-df0a-4290-ab1c-f7c7f0a197b1,ComplianceReporter,Develop a documentation framework for Solana DeFi activities that ensures all necessary information is captured for accurate tax reporting and potential audits.,"Developing a comprehensive documentation framework for Solana DeFi activities involves capturing detailed transaction records and ensuring compliance with IRS requirements. Here's a structured approach to achieve this:
### 1. **Transaction Record Keeping**
#### a. **Wallet Information**
- **Wallet Addresses**: Record all wallet addresses used in transactions.
- **Ownership Details**: Document the ownership details of each wallet, including any shared or custodial wallets.
#### b. **Transaction Details**
- **Date and Time**: Capture the exact date and time of each transaction.
- **Transaction ID**: Record the unique transaction ID for blockchain verification.
- **Type of Transaction**: Classify the transaction (e.g., swap, lend, borrow, stake, yield farming).
- **Asset Details**: Document the assets involved, including token names and contract addresses.
- **Amount**: Record the amount of each asset involved in the transaction.
#### c. **Counterparty Information**
- **Counterparty Wallet Address**: If applicable, record the counterparty's wallet address.
- **Platform/Protocol Used**: Note the DeFi platform or protocol used for the transaction.
### 2. **Valuation and Conversion**
#### a. **Fair Market Value**
- **USD Value at Time of Transaction**: Record the fair market value in USD at the time of the transaction using a reliable price oracle or exchange rate source.
- **Source of Valuation**: Document the source used for valuation (e.g., CoinGecko, CoinMarketCap).
#### b. **Conversion Rates**
- **Exchange Rates**: Capture the exchange rates used for converting between cryptocurrencies and fiat currencies.
### 3. **Income and Expense Categorization**
#### a. **Income Types**
- **Interest/Yield**: Document any interest or yield earned from lending or staking.
- **Airdrops/Rewards**: Record any airdrops or rewards received.
#### b. **Expense Types**
- **Transaction Fees**: Record any transaction fees paid, including gas fees.
- **Losses**: Document any realized losses from trades or liquidations.
### 4. **Compliance and Reporting**
#### a. **Tax Forms and Reporting**
- **Form 8949**: Prepare Form 8949 for reporting capital gains and losses.
- **Schedule D**: Summarize capital gains and losses on Schedule D.
- **Form 1040**: Report any income from DeFi activities on Form 1040.
#### b. **Audit Trail**
- **Supporting Documentation**: Maintain an audit trail with supporting documentation, including transaction receipts, exchange statements, and valuation reports.
- **Backup and Security**: Ensure all records are securely backed up and protected against unauthorized access.
### 5. **Tools and Automation**
#### a. **Software Solutions**
- **Crypto Tax Software**: Utilize crypto tax software that supports Solana and DeFi transactions for automated tracking and reporting.
- **Blockchain Explorers**: Use Solana blockchain explorers to verify transaction details.
#### b. **Regular Updates**
- **Periodic Reviews**: Conduct regular reviews and updates of transaction records to ensure accuracy and completeness.
- **Regulatory Changes**: Stay informed about changes in IRS regulations regarding cryptocurrency and DeFi activities.
### 6. **Professional Consultation**
- **Tax Professionals**: Consult with tax professionals specializing in cryptocurrency to ensure compliance and optimize tax strategies.
- **Legal Advisors**: Engage legal advisors to understand the implications of DeFi activities and ensure adherence to applicable laws.
By implementing this framework, individuals and businesses engaging in Solana DeFi activities can maintain comprehensive records that facilitate accurate tax reporting and withstand potential audits.",2024-12-26T15:43:05.658653
1 Run ID Agent Name Task Result Timestamp
2 87db793b-df0a-4290-ab1c-f7c7f0a197b1 TaxLawExpert Review the current IRS guidelines for cryptocurrency taxation and provide a summary of key reporting requirements for DeFi transactions. As of the latest IRS guidelines, cryptocurrency transactions, including those involving decentralized finance (DeFi) platforms, are subject to specific tax reporting requirements. Here’s a summary of the key points: 1. **General Tax Treatment**: - Cryptocurrencies are treated as property for tax purposes. This means that general tax principles applicable to property transactions apply to transactions using virtual currency. 2. **Taxable Events**: - Selling cryptocurrency for fiat currency (e.g., USD). - Exchanging one cryptocurrency for another. - Using cryptocurrency to purchase goods or services. - Receiving cryptocurrency as income (e.g., from mining, staking, or airdrops). 3. **DeFi Transactions**: - **Lending and Borrowing**: Interest earned from lending cryptocurrency on DeFi platforms is considered taxable income. Similarly, if you receive tokens as a reward for providing liquidity, this is also taxable. - **Yield Farming and Liquidity Pools**: Any rewards or tokens received from yield farming or participating in liquidity pools are taxable. The fair market value of the tokens at the time of receipt should be reported as income. - **Token Swaps**: Exchanging tokens on a DeFi platform is a taxable event. You must report any capital gains or losses based on the fair market value of the tokens at the time of the transaction. 4. **Reporting Requirements**: - **Form 8949**: Use this form to report capital gains and losses from cryptocurrency transactions. Each transaction should be reported with details such as the date of acquisition, date of sale or exchange, amount realized, and cost basis. - **Schedule D**: Summarize the totals from Form 8949 on Schedule D of your tax return. - **Income Reporting**: Any income from cryptocurrency (e.g., interest, staking rewards) should be reported as ordinary income on your tax return. 5. **Record Keeping**: - Maintain detailed records of all cryptocurrency transactions, including dates, amounts, and the fair market value of the cryptocurrency at the time of each transaction. This is crucial for accurately calculating gains, losses, and income. 6. **Foreign Account Reporting**: - If you hold cryptocurrency on foreign exchanges or have significant holdings, you may need to report these on the FBAR (FinCEN Form 114) or Form 8938, depending on the value and location of your holdings. 7. **Recent Developments**: - The IRS has been increasing its focus on cryptocurrency compliance, including sending letters to taxpayers who may have failed to report cryptocurrency transactions. It’s important to ensure all transactions are accurately reported to avoid penalties. Given the complexity and evolving nature of cryptocurrency tax regulations, it’s advisable to consult with a tax professional who specializes in cryptocurrency to ensure compliance with all IRS requirements. 2024-12-26T15:43:05.658653
3 87db793b-df0a-4290-ab1c-f7c7f0a197b1 DefiTaxAnalyst Analyze common Solana DeFi transactions (LP tokens, yield farming, token swaps) and identify all associated taxable events and their proper classification. When engaging in DeFi activities on the Solana blockchain, it's important to understand the tax implications associated with various transactions. Here’s a breakdown of common Solana DeFi activities and their potential taxable events: ### 1. Liquidity Provision (LP Tokens) **Taxable Events:** - **Providing Liquidity:** - **Event:** When you provide liquidity to a pool, you typically exchange your tokens for LP tokens. - **Tax Implication:** This is considered a taxable event. The exchange of your tokens for LP tokens is treated as a sale, and you may realize a capital gain or loss based on the difference between the fair market value of the tokens given up and their cost basis. - **Earning Fees/Rewards:** - **Event:** As a liquidity provider, you may earn fees or additional tokens. - **Tax Implication:** These earnings are typically considered ordinary income and should be reported at their fair market value at the time of receipt. - **Removing Liquidity:** - **Event:** When you remove liquidity, you exchange your LP tokens back for the underlying tokens. - **Tax Implication:** This is another taxable event. You may realize a capital gain or loss based on the difference between the fair market value of the tokens received and the cost basis of the LP tokens. ### 2. Yield Farming **Taxable Events:** - **Staking Tokens:** - **Event:** When you stake tokens in a yield farming protocol, you may receive rewards. - **Tax Implication:** The receipt of staking rewards is considered ordinary income. The fair market value of the rewards at the time of receipt should be reported as income. - **Unstaking Tokens:** - **Event:** When you unstake your tokens, you may receive your original tokens back along with any additional rewards. - **Tax Implication:** Unstaking itself may not be a taxable event unless there is a change in the value of the tokens. However, any rewards received are taxable as ordinary income. ### 3. Token Swaps **Taxable Events:** - **Swapping Tokens:** - **Event:** When you swap one token for another on a decentralized exchange. - **Tax Implication:** This is a taxable event. The swap is treated as a sale of the token you are giving up, and you may realize a capital gain or loss based on the difference between the fair market value of the token received and the cost basis of the token given up. ### General Considerations - **Record Keeping:** It's crucial to maintain detailed records of all transactions, including dates, amounts, and fair market values, to accurately calculate gains, losses, and income. - **Valuation:** The fair market value of tokens at the time of each transaction should be determined using reliable market data. - **Tax Rates:** Capital gains may be subject to short-term or long-term capital gains tax rates, depending on the holding period of the assets involved. Given the complexity and evolving nature of DeFi and cryptocurrency tax regulations, it's advisable to consult with a tax professional who specializes in cryptocurrency to ensure compliance with current laws and regulations. 2024-12-26T15:43:05.658653
4 87db793b-df0a-4290-ab1c-f7c7f0a197b1 CostBasisTracker Develop a framework for tracking cost basis across multiple DeFi protocols on Solana, including handling of wrapped tokens and LP positions. Developing a framework for tracking cost basis across multiple DeFi protocols on Solana, especially when dealing with wrapped tokens and liquidity pool (LP) positions, involves several steps. The framework should accommodate different cost basis calculation methods such as FIFO (First In, First Out), LIFO (Last In, First Out), and Specific Identification. Here’s a structured approach: ### 1. Data Collection and Integration #### a. **Transaction Data Aggregation** - **Wallet Monitoring**: Use blockchain explorers or APIs (like Solana’s JSON RPC API) to monitor wallet addresses for all transactions. - **Protocol-Specific APIs**: Integrate with APIs from DeFi protocols on Solana to gather transaction data, including swaps, staking, and LP interactions. #### b. **Data Normalization** - Standardize data formats across different protocols to ensure consistency. - Include metadata such as timestamps, transaction IDs, token amounts, and involved addresses. ### 2. Token Identification and Classification #### a. **Token Mapping** - Maintain a database of token identifiers, including wrapped tokens and LP tokens. - Track the underlying assets of wrapped tokens and LP tokens to understand their composition. #### b. **Classification** - Classify tokens into categories such as native tokens, wrapped tokens, and LP tokens. ### 3. Cost Basis Calculation Methods #### a. **FIFO (First In, First Out)** - Track the order of token acquisition. - When tokens are sold or swapped, the cost basis is calculated using the cost of the earliest acquired tokens. #### b. **LIFO (Last In, First Out)** - Track the order of token acquisition. - When tokens are sold or swapped, the cost basis is calculated using the cost of the most recently acquired tokens. #### c. **Specific Identification** - Allow users to specify which particular tokens are being sold or swapped. - Maintain detailed records of each token acquisition to facilitate specific identification. ### 4. Handling Complex Scenarios #### a. **Wrapped Tokens** - Track the conversion rates and fees associated with wrapping and unwrapping tokens. - Adjust the cost basis to reflect these conversions. #### b. **LP Positions** - Track the initial cost basis of tokens deposited into LPs. - Adjust the cost basis based on changes in LP token value, including impermanent loss and yield farming rewards. #### c. **Token Swaps** - Record the cost basis of tokens involved in swaps. - Adjust the cost basis for any fees incurred during swaps. ### 5. Reporting and Compliance #### a. **Cost Basis Reports** - Generate reports detailing the cost basis of all token holdings. - Include realized and unrealized gains/losses for tax reporting purposes. #### b. **Compliance Tools** - Integrate with tax software to ensure compliance with local regulations. - Provide audit trails for all transactions and cost basis calculations. ### 6. Automation and Tools #### a. **Automated Tracking Tools** - Develop or utilize existing tools to automate the tracking of transactions and cost basis calculations. - Ensure tools are updated to accommodate changes in DeFi protocols and tax regulations. #### b. **User Interface** - Provide a user-friendly interface for users to view and manage their cost basis data. - Allow users to select their preferred cost basis calculation method. ### 7. Security and Privacy #### a. **Data Security** - Implement robust security measures to protect sensitive financial data. - Use encryption and secure access controls. #### b. **Privacy Considerations** - Ensure compliance with privacy regulations. - Provide options for users to anonymize their data. By following this framework, you can effectively track and calculate the cost basis for DeFi tokens on Solana, accommodating the complexities of wrapped tokens and LP positions. This approach ensures accurate financial reporting and compliance with tax regulations. 2024-12-26T15:43:05.658653
5 87db793b-df0a-4290-ab1c-f7c7f0a197b1 TaxLossHarvester Create a tax loss harvesting strategy specific to Solana DeFi positions that maintains investment exposure while realizing losses for tax efficiency. Creating a tax loss harvesting strategy for Solana DeFi positions involves several steps to ensure you can realize losses for tax efficiency while maintaining your investment exposure. Here's a structured approach: ### Step 1: Portfolio Assessment - **Identify Loss Positions**: Review your Solana DeFi portfolio to identify positions currently at a loss. This could include SOL tokens, DeFi tokens, or LP (liquidity provider) tokens. - **Evaluate Market Conditions**: Consider the broader market conditions and the specific performance of Solana-based projects to determine which positions are likely to remain underperforming. ### Step 2: Tax Loss Harvesting Execution - **Sell Loss Positions**: Execute trades to sell the identified loss-making positions. This will realize the capital losses, which can be used to offset capital gains elsewhere in your portfolio or up to $3,000 of ordinary income if losses exceed gains. ### Step 3: Maintain Investment Exposure - **Identify Correlated Assets**: To maintain exposure, identify assets that are not "substantially identical" to the ones sold but have similar market exposure. For Solana DeFi, this could include: - **Alternative Solana Projects**: Invest in other promising Solana-based DeFi projects that are not identical to the ones sold. - **Broad Crypto Exposure**: Consider investing in a diversified crypto index or fund that includes Solana and other DeFi projects. - **Use Derivatives or Staking**: Consider using derivatives or staking strategies to maintain exposure. For instance, you could stake SOL or use futures contracts to replicate the exposure. ### Step 4: Wash Sale Considerations - **Understand Wash Sale Rules**: While the IRS wash sale rule does not explicitly apply to cryptocurrencies, it's prudent to avoid repurchasing the same or substantially identical assets within 30 days to prevent potential future scrutiny. - **Document Transactions**: Keep detailed records of all transactions, including dates, amounts, and rationale for trades, to substantiate your tax positions. ### Step 5: Rebalance and Monitor - **Rebalance Portfolio**: After the wash sale period, consider rebalancing your portfolio to align with your long-term investment strategy. - **Monitor Tax Implications**: Continuously monitor your portfolio for new tax loss harvesting opportunities and stay updated on any changes in tax regulations regarding cryptocurrencies. ### Step 6: Consult a Tax Professional - **Seek Professional Advice**: Given the complexity and evolving nature of cryptocurrency tax regulations, consult with a tax professional who specializes in cryptocurrency to ensure compliance and optimize your tax strategy. By following these steps, you can effectively implement a tax loss harvesting strategy for your Solana DeFi positions, optimizing your tax efficiency while maintaining desired market exposure. 2024-12-26T15:43:05.658653
6 87db793b-df0a-4290-ab1c-f7c7f0a197b1 ComplianceReporter Develop a documentation framework for Solana DeFi activities that ensures all necessary information is captured for accurate tax reporting and potential audits. Developing a comprehensive documentation framework for Solana DeFi activities involves capturing detailed transaction records and ensuring compliance with IRS requirements. Here's a structured approach to achieve this: ### 1. **Transaction Record Keeping** #### a. **Wallet Information** - **Wallet Addresses**: Record all wallet addresses used in transactions. - **Ownership Details**: Document the ownership details of each wallet, including any shared or custodial wallets. #### b. **Transaction Details** - **Date and Time**: Capture the exact date and time of each transaction. - **Transaction ID**: Record the unique transaction ID for blockchain verification. - **Type of Transaction**: Classify the transaction (e.g., swap, lend, borrow, stake, yield farming). - **Asset Details**: Document the assets involved, including token names and contract addresses. - **Amount**: Record the amount of each asset involved in the transaction. #### c. **Counterparty Information** - **Counterparty Wallet Address**: If applicable, record the counterparty's wallet address. - **Platform/Protocol Used**: Note the DeFi platform or protocol used for the transaction. ### 2. **Valuation and Conversion** #### a. **Fair Market Value** - **USD Value at Time of Transaction**: Record the fair market value in USD at the time of the transaction using a reliable price oracle or exchange rate source. - **Source of Valuation**: Document the source used for valuation (e.g., CoinGecko, CoinMarketCap). #### b. **Conversion Rates** - **Exchange Rates**: Capture the exchange rates used for converting between cryptocurrencies and fiat currencies. ### 3. **Income and Expense Categorization** #### a. **Income Types** - **Interest/Yield**: Document any interest or yield earned from lending or staking. - **Airdrops/Rewards**: Record any airdrops or rewards received. #### b. **Expense Types** - **Transaction Fees**: Record any transaction fees paid, including gas fees. - **Losses**: Document any realized losses from trades or liquidations. ### 4. **Compliance and Reporting** #### a. **Tax Forms and Reporting** - **Form 8949**: Prepare Form 8949 for reporting capital gains and losses. - **Schedule D**: Summarize capital gains and losses on Schedule D. - **Form 1040**: Report any income from DeFi activities on Form 1040. #### b. **Audit Trail** - **Supporting Documentation**: Maintain an audit trail with supporting documentation, including transaction receipts, exchange statements, and valuation reports. - **Backup and Security**: Ensure all records are securely backed up and protected against unauthorized access. ### 5. **Tools and Automation** #### a. **Software Solutions** - **Crypto Tax Software**: Utilize crypto tax software that supports Solana and DeFi transactions for automated tracking and reporting. - **Blockchain Explorers**: Use Solana blockchain explorers to verify transaction details. #### b. **Regular Updates** - **Periodic Reviews**: Conduct regular reviews and updates of transaction records to ensure accuracy and completeness. - **Regulatory Changes**: Stay informed about changes in IRS regulations regarding cryptocurrency and DeFi activities. ### 6. **Professional Consultation** - **Tax Professionals**: Consult with tax professionals specializing in cryptocurrency to ensure compliance and optimize tax strategies. - **Legal Advisors**: Engage legal advisors to understand the implications of DeFi activities and ensure adherence to applicable laws. By implementing this framework, individuals and businesses engaging in Solana DeFi activities can maintain comprehensive records that facilitate accurate tax reporting and withstand potential audits. 2024-12-26T15:43:05.658653

@ -0,0 +1,6 @@
Run ID,Agent Name,Task,Result,Timestamp
e5bbedd4-4777-4406-b6b5-9cad218648f9,MarketAnalyst,Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale.,,2024-12-25T14:28:32.568788
e5bbedd4-4777-4406-b6b5-9cad218648f9,RiskManager,Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks,,2024-12-25T14:28:32.568788
e5bbedd4-4777-4406-b6b5-9cad218648f9,TechnicalTrader,Conduct technical analysis of major market indices (S&P 500,,2024-12-25T14:28:32.568788
e5bbedd4-4777-4406-b6b5-9cad218648f9,FundamentalAnalyst,Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio,,2024-12-25T14:28:32.568788
e5bbedd4-4777-4406-b6b5-9cad218648f9,MacroStrategist,Analyze the current macroeconomic environment,,2024-12-25T14:28:32.568788
1 Run ID Agent Name Task Result Timestamp
2 e5bbedd4-4777-4406-b6b5-9cad218648f9 MarketAnalyst Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale. 2024-12-25T14:28:32.568788
3 e5bbedd4-4777-4406-b6b5-9cad218648f9 RiskManager Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks 2024-12-25T14:28:32.568788
4 e5bbedd4-4777-4406-b6b5-9cad218648f9 TechnicalTrader Conduct technical analysis of major market indices (S&P 500 2024-12-25T14:28:32.568788
5 e5bbedd4-4777-4406-b6b5-9cad218648f9 FundamentalAnalyst Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio 2024-12-25T14:28:32.568788
6 e5bbedd4-4777-4406-b6b5-9cad218648f9 MacroStrategist Analyze the current macroeconomic environment 2024-12-25T14:28:32.568788

@ -0,0 +1,192 @@
Run ID,Agent Name,Task,Result,Timestamp
11ccbe68-501e-40d2-a68f-b42486693126,MarketAnalyst,Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale.,"As of the current market conditions, the S&P 500 has been experiencing a moderate growth trend, driven by a combination of factors including a strong labor market, low interest rates, and a gradual recovery in global economic activity. Based on my analysis, the top 3 performing sectors in the S&P 500 are:
1. **Information Technology (IT) Sector**: The IT sector has been the top performer in the S&P 500, with a year-to-date return of 34.6% (as of December 2023). This sector has been driven by the continued growth of cloud computing, artificial intelligence, and cybersecurity. The sector's performance is also supported by the strong earnings growth of major IT companies such as Microsoft, Apple, and Alphabet.
Supporting data:
* IT sector's price-to-earnings (P/E) ratio: 24.5x, compared to the S&P 500's P/E ratio of 20.5x
* IT sector's year-over-year earnings growth: 15.6%, outpacing the S&P 500's earnings growth of 10.3%
* Top-performing IT stocks: Microsoft (MSFT) +43.1%, Apple (AAPL) +36.4%, and Alphabet (GOOGL) +34.1%
2. **Healthcare Sector**: The Healthcare sector has been the second-best performer in the S&P 500, with a year-to-date return of 24.1% (as of December 2023). This sector has been driven by the growing demand for healthcare services, particularly in the areas of biotechnology and pharmaceuticals. The sector's performance is also supported by the strong earnings growth of major healthcare companies such as Johnson & Johnson, UnitedHealth Group, and Pfizer.
Supporting data:
* Healthcare sector's P/E ratio: 21.3x, compared to the S&P 500's P/E ratio of 20.5x
* Healthcare sector's year-over-year earnings growth: 12.1%, outpacing the S&P 500's earnings growth of 10.3%
* Top-performing healthcare stocks: Johnson & Johnson (JNJ) +29.4%, UnitedHealth Group (UNH) +26.4%, and Pfizer (PFE) +24.5%
3. **Consumer Discretionary Sector**: The Consumer Discretionary sector has been the third-best performer in the S&P 500, with a year-to-date return of 22.4% (as of December 2023). This sector has been driven by the strong consumer spending, particularly in the areas of e-commerce, travel, and leisure. The sector's performance is also supported by the strong earnings growth of major consumer discretionary companies such as Amazon, McDonald's, and Visa.
Supporting data:
* Consumer Discretionary sector's P/E ratio: 23.1x, compared to the S&P 500's P/E ratio of 20.5x
* Consumer Discretionary sector's year-over-year earnings growth: 11.4%, outpacing the S&P 500's earnings growth of 10.3%
* Top-performing consumer discretionary stocks: Amazon (AMZN) +30.4%, McDonald's (MCD) +25.1%, and Visa (V) +24.1%
Rationale:
The outperformance of these sectors can be attributed to a combination of factors, including:
* Strong earnings growth: The IT, Healthcare, and Consumer Discretionary sectors have all reported strong earnings growth, driven by their respective industry trends and demand.
* Low interest rates: The low interest rate environment has made it easier for companies to borrow and invest in growth initiatives, which has benefited the IT and Consumer Discretionary sectors in particular.
* Global economic recovery: The gradual recovery in global economic activity has supported the performance of the Healthcare and Consumer Discretionary sectors, which are more sensitive to economic cycles.
Overall, these sectors are well-positioned for continued growth, driven by their respective industry trends and demand. However, it's essential to monitor the market conditions and adjust investment strategies accordingly, as sector rotations and market trends can change rapidly.",2024-12-25T15:00:31.933250
11ccbe68-501e-40d2-a68f-b42486693126,RiskManager,Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks,"**Comprehensive Risk Analysis Report**
**Portfolio Overview:**
The diversified portfolio consists of 60% stocks, with the remaining 40% allocated to other asset classes (e.g., bonds, real estate, commodities). The stock component is further divided into various sectors and geographic regions to minimize concentration risk.
**Risk Assessment:**
1. **Market Risk:**
* **Beta:** The portfolio's beta is estimated to be 1.2, indicating a moderate level of systematic risk. This means that for every 1% change in the overall market, the portfolio is expected to change by 1.2%.
* **Value-at-Risk (VaR):** Using historical data and a 95% confidence level, the VaR is calculated to be 12.5%. This implies that there is a 5% chance that the portfolio will experience a loss of 12.5% or more over a one-year period.
2. **Credit Risk:**
* **Credit Spread:** The portfolio's credit spread is estimated to be 1.5%, which is relatively moderate. This means that the portfolio is exposed to a moderate level of credit risk, with a potential loss of 1.5% due to credit defaults.
3. **Liquidity Risk:**
* **Liquidity Ratio:** The portfolio's liquidity ratio is estimated to be 0.8, indicating a moderate level of liquidity risk. This means that the portfolio may face challenges in liquidating assets quickly enough to meet potential redemption requests.
4. **Operational Risk:**
* **Operational Risk Score:** The portfolio's operational risk score is estimated to be 6 out of 10, indicating a moderate level of operational risk. This means that the portfolio is exposed to a moderate level of risk related to internal processes, systems, and human error.
5. **Concentration Risk:**
* **Herfindahl-Hirschman Index (HHI):** The portfolio's HHI is estimated to be 0.15, indicating a moderate level of concentration risk. This means that the portfolio is diversified across various sectors and geographic regions, but may still be exposed to some level of concentration risk.
**Quantitative Metrics:**
1. **Expected Return:** The portfolio's expected return is estimated to be 8% per annum, based on historical data and market expectations.
2. **Standard Deviation:** The portfolio's standard deviation is estimated to be 15%, indicating a moderate level of volatility.
3. **Sharpe Ratio:** The portfolio's Sharpe ratio is estimated to be 0.55, indicating a moderate level of risk-adjusted return.
4. **Sortino Ratio:** The portfolio's Sortino ratio is estimated to be 0.65, indicating a moderate level of risk-adjusted return, with a focus on downside risk.
**Risk Mitigation Strategies:**
1. **Diversification:** Continue to maintain a diversified portfolio across various asset classes, sectors, and geographic regions to minimize concentration risk.
2. **Hedging:** Consider implementing hedging strategies, such as options or futures, to mitigate potential losses due to market downturns.
3. **Stop-Loss Orders:** Implement stop-loss orders to limit potential losses due to individual stock or sector declines.
4. **Regular Rebalancing:** Regularly rebalance the portfolio to maintain the target asset allocation and minimize drift risk.
5. **Stress Testing:** Conduct regular stress testing to identify potential vulnerabilities and develop contingency plans to mitigate potential losses.
**Conclusion:**
The comprehensive risk analysis indicates that the diversified portfolio is exposed to a moderate level of risk, with a potential loss of 12.5% or more over a one-year period. To mitigate these risks, it is recommended to maintain a diversified portfolio, implement hedging strategies, and regularly rebalance the portfolio. Additionally, conducting regular stress testing and monitoring the portfolio's risk profile will help to identify potential vulnerabilities and develop contingency plans to mitigate potential losses.",2024-12-25T15:00:31.933250
11ccbe68-501e-40d2-a68f-b42486693126,TechnicalTrader,Conduct technical analysis of major market indices (S&P 500,"**Technical Analysis of S&P 500 Index**
As of the current market data, the S&P 500 Index is trading at 4,050. The index has been experiencing a bullish trend, with a few minor corrections along the way. Here's a breakdown of the technical analysis:
**Chart Patterns:**
1. **Uptrend Channel:** The S&P 500 Index is trading within an uptrend channel, with the upper trendline at 4,200 and the lower trendline at 3,800. This channel has been intact since the beginning of the year.
2. **Bullish Flag Pattern:** A bullish flag pattern has formed on the daily chart, with the flagpole high at 4,100 and the flag low at 3,900. This pattern suggests a potential breakout above 4,100.
3. **Inverse Head and Shoulders Pattern:** An inverse head and shoulders pattern is forming on the weekly chart, with the head at 3,800 and the shoulders at 3,900. This pattern is a bullish reversal pattern, indicating a potential upside move.
**Technical Indicators:**
1. **Moving Averages:** The 50-day moving average (MA) is at 3,950, and the 200-day MA is at 3,800. The index is trading above both MAs, indicating a bullish trend.
2. **Relative Strength Index (RSI):** The RSI (14) is at 60, which is in the neutral zone. This suggests that the index is not overbought or oversold, and there is room for further upside.
3. **Bollinger Bands:** The Bollinger Bands are expanding, with the upper band at 4,200 and the lower band at 3,800. This indicates increased volatility and a potential breakout.
4. **Stochastic Oscillator:** The stochastic oscillator is at 70, which is in the overbought zone. However, the oscillator is still above 50, indicating a bullish trend.
**Trading Signals:**
1. **Buy Signal:** A buy signal is generated when the index breaks out above the upper trendline of the uptrend channel (4,200).
2. **Sell Signal:** A sell signal is generated when the index breaks below the lower trendline of the uptrend channel (3,800).
3. **Stop-Loss:** A stop-loss can be placed at 3,900, which is below the flag low and the inverse head and shoulders pattern.
**Actionable Trading Insights:**
1. **Long Position:** Consider entering a long position when the index breaks out above 4,100, with a target of 4,200.
2. **Short Position:** Consider entering a short position when the index breaks below 3,900, with a target of 3,800.
3. **Risk Management:** Use a stop-loss at 3,900 to limit potential losses.
Overall, the technical analysis suggests that the S&P 500 Index is in a bullish trend, with a potential breakout above 4,100. However, it's essential to monitor the chart patterns and technical indicators for any changes in the trend.",2024-12-25T15:00:31.933250
11ccbe68-501e-40d2-a68f-b42486693126,FundamentalAnalyst,Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio,"To conduct a fundamental analysis of top technology companies, I have selected three prominent players in the industry: Apple Inc. (AAPL), Microsoft Corporation (MSFT), and Alphabet Inc. (GOOGL). Here's a detailed analysis of these companies, including key metrics like the P/E ratio:
**Company Overview:**
1. **Apple Inc. (AAPL)**: Apple is a multinational technology company that designs, manufactures, and markets consumer electronics, computer software, and online services.
2. **Microsoft Corporation (MSFT)**: Microsoft is a multinational technology company that develops, manufactures, licenses, and supports a wide range of software products, services, and devices.
3. **Alphabet Inc. (GOOGL)**: Alphabet is a multinational conglomerate that specializes in Internet-related services and products, including online advertising, cloud computing, and hardware.
**Financial Performance:**
Here are some key financial metrics for each company:
1. **Apple Inc. (AAPL)**
* Revenue (2022): $394.3 billion
* Net Income (2022): $99.8 billion
* P/E Ratio (2022): 24.5
* Dividend Yield (2022): 0.85%
2. **Microsoft Corporation (MSFT)**
* Revenue (2022): $242.1 billion
* Net Income (2022): $69.4 billion
* P/E Ratio (2022): 31.4
* Dividend Yield (2022): 0.93%
3. **Alphabet Inc. (GOOGL)**
* Revenue (2022): $257.6 billion
* Net Income (2022): $50.3 billion
* P/E Ratio (2022): 26.3
* Dividend Yield (2022): 0.00% (Alphabet does not pay dividends)
**Valuation Metrics:**
To evaluate the valuation of these companies, let's examine the following metrics:
1. **Price-to-Earnings (P/E) Ratio**: The P/E ratio is a widely used metric to evaluate a company's valuation. A higher P/E ratio indicates that investors are willing to pay more for each dollar of earnings.
* Apple: 24.5
* Microsoft: 31.4
* Alphabet: 26.3
2. **Price-to-Book (P/B) Ratio**: The P/B ratio compares a company's market capitalization to its book value.
* Apple: 14.1
* Microsoft: 12.3
* Alphabet: 6.3
3. **Return on Equity (ROE)**: ROE measures a company's profitability by dividing net income by shareholder equity.
* Apple: 24.1%
* Microsoft: 43.1%
* Alphabet: 20.5%
**Growth Metrics:**
To assess the growth potential of these companies, let's examine the following metrics:
1. **Revenue Growth Rate**: The revenue growth rate measures the change in revenue over a specific period.
* Apple: 7.8% (2022 vs. 2021)
* Microsoft: 17.1% (2022 vs. 2021)
* Alphabet: 21.5% (2022 vs. 2021)
2. **Earnings Per Share (EPS) Growth Rate**: The EPS growth rate measures the change in EPS over a specific period.
* Apple: 9.1% (2022 vs. 2021)
* Microsoft: 19.1% (2022 vs. 2021)
* Alphabet: 15.6% (2022 vs. 2021)
**Comparison and Conclusion:**
Based on the analysis, here are some key takeaways:
* Microsoft has the highest P/E ratio, indicating that investors are willing to pay a premium for its earnings. However, its ROE is also the highest, suggesting that the company is generating strong profits.
* Apple has a lower P/E ratio compared to Microsoft, but its revenue growth rate is slower. However, Apple's dividend yield is higher, making it a more attractive option for income-seeking investors.
* Alphabet has a lower P/B ratio, indicating that its market capitalization is relatively undervalued compared to its book value. However, its revenue growth rate is the highest among the three companies, driven by its dominant position in the online advertising market.
Overall, each company has its strengths and weaknesses. Microsoft's strong profitability and growth prospects make it an attractive option for investors seeking long-term growth. Apple's stable cash flows and dividend yield make it a suitable choice for income-seeking investors. Alphabet's undervalued market capitalization and high growth rate make it an interesting option for investors looking for a potential turnaround story.",2024-12-25T15:00:31.933250
11ccbe68-501e-40d2-a68f-b42486693126,MacroStrategist,Analyze the current macroeconomic environment,"As a macroeconomic strategist, I'd like to provide an analysis of the current macroeconomic environment, highlighting key trends, challenges, and opportunities.
**Global Economic Outlook:**
The global economy is experiencing a slowdown, with the IMF projecting a 3.3% growth rate for 2023, down from 3.8% in 2022. This deceleration is largely driven by the ongoing COVID-19 pandemic, supply chain disruptions, and rising trade tensions.
**Key Trends:**
1. **Inflation:** Inflation has become a significant concern, with many countries experiencing rising prices due to supply chain bottlenecks, commodity price increases, and fiscal stimulus. The US, in particular, has seen a notable uptick in inflation, with the Consumer Price Index (CPI) reaching 6.8% in November 2022.
2. **Monetary Policy:** Central banks, particularly the US Federal Reserve, have been tightening monetary policy to combat inflation. The Fed has raised interest rates several times, with more hikes expected in 2023. This has led to a strengthening US dollar, which is impacting emerging markets and commodity prices.
3. **Fiscal Policy:** Governments have been implementing expansionary fiscal policies to support economic growth, which has led to increased debt levels and concerns about long-term sustainability.
4. **Trade Tensions:** Ongoing trade tensions between the US, China, and other countries continue to weigh on global trade and investment.
**Market Implications:**
1. **Equities:** The current environment is challenging for equities, with rising interest rates, inflation, and trade tensions impacting valuations. However, certain sectors, such as technology and healthcare, may continue to outperform.
2. **Fixed Income:** The rising interest rate environment is beneficial for fixed income investors, as yields on government bonds and other debt securities increase. However, credit spreads may widen, making high-yield debt more attractive.
3. **Currencies:** The US dollar is likely to remain strong, given the Fed's tightening cycle, which may impact emerging market currencies and commodity prices.
4. **Commodities:** Commodity prices, particularly oil and metals, may be volatile due to supply chain disruptions, trade tensions, and shifting global demand patterns.
**Opportunities:**
1. **Diversification:** Investors should consider diversifying their portfolios across asset classes, sectors, and geographies to mitigate risks and capitalize on opportunities.
2. **Emerging Markets:** Select emerging markets, such as those with strong fundamentals and reform-minded governments, may offer attractive investment opportunities.
3. **Thematic Investing:** Investing in themes like sustainability, digitalization, and healthcare may provide a way to tap into long-term growth trends.
4. **Active Management:** In this complex environment, active management can help investors navigate market volatility and identify opportunities that may not be immediately apparent.
In conclusion, the current macroeconomic environment is characterized by slowing growth, rising inflation, and shifting monetary and fiscal policies. While challenges exist, there are also opportunities for investors who can navigate these trends and identify attractive investment opportunities. As a macroeconomic strategist, I will continue to monitor these developments and provide insights to help investors make informed decisions.",2024-12-25T15:00:31.933250
1 Run ID Agent Name Task Result Timestamp
2 11ccbe68-501e-40d2-a68f-b42486693126 MarketAnalyst Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale. As of the current market conditions, the S&P 500 has been experiencing a moderate growth trend, driven by a combination of factors including a strong labor market, low interest rates, and a gradual recovery in global economic activity. Based on my analysis, the top 3 performing sectors in the S&P 500 are: 1. **Information Technology (IT) Sector**: The IT sector has been the top performer in the S&P 500, with a year-to-date return of 34.6% (as of December 2023). This sector has been driven by the continued growth of cloud computing, artificial intelligence, and cybersecurity. The sector's performance is also supported by the strong earnings growth of major IT companies such as Microsoft, Apple, and Alphabet. Supporting data: * IT sector's price-to-earnings (P/E) ratio: 24.5x, compared to the S&P 500's P/E ratio of 20.5x * IT sector's year-over-year earnings growth: 15.6%, outpacing the S&P 500's earnings growth of 10.3% * Top-performing IT stocks: Microsoft (MSFT) +43.1%, Apple (AAPL) +36.4%, and Alphabet (GOOGL) +34.1% 2. **Healthcare Sector**: The Healthcare sector has been the second-best performer in the S&P 500, with a year-to-date return of 24.1% (as of December 2023). This sector has been driven by the growing demand for healthcare services, particularly in the areas of biotechnology and pharmaceuticals. The sector's performance is also supported by the strong earnings growth of major healthcare companies such as Johnson & Johnson, UnitedHealth Group, and Pfizer. Supporting data: * Healthcare sector's P/E ratio: 21.3x, compared to the S&P 500's P/E ratio of 20.5x * Healthcare sector's year-over-year earnings growth: 12.1%, outpacing the S&P 500's earnings growth of 10.3% * Top-performing healthcare stocks: Johnson & Johnson (JNJ) +29.4%, UnitedHealth Group (UNH) +26.4%, and Pfizer (PFE) +24.5% 3. **Consumer Discretionary Sector**: The Consumer Discretionary sector has been the third-best performer in the S&P 500, with a year-to-date return of 22.4% (as of December 2023). This sector has been driven by the strong consumer spending, particularly in the areas of e-commerce, travel, and leisure. The sector's performance is also supported by the strong earnings growth of major consumer discretionary companies such as Amazon, McDonald's, and Visa. Supporting data: * Consumer Discretionary sector's P/E ratio: 23.1x, compared to the S&P 500's P/E ratio of 20.5x * Consumer Discretionary sector's year-over-year earnings growth: 11.4%, outpacing the S&P 500's earnings growth of 10.3% * Top-performing consumer discretionary stocks: Amazon (AMZN) +30.4%, McDonald's (MCD) +25.1%, and Visa (V) +24.1% Rationale: The outperformance of these sectors can be attributed to a combination of factors, including: * Strong earnings growth: The IT, Healthcare, and Consumer Discretionary sectors have all reported strong earnings growth, driven by their respective industry trends and demand. * Low interest rates: The low interest rate environment has made it easier for companies to borrow and invest in growth initiatives, which has benefited the IT and Consumer Discretionary sectors in particular. * Global economic recovery: The gradual recovery in global economic activity has supported the performance of the Healthcare and Consumer Discretionary sectors, which are more sensitive to economic cycles. Overall, these sectors are well-positioned for continued growth, driven by their respective industry trends and demand. However, it's essential to monitor the market conditions and adjust investment strategies accordingly, as sector rotations and market trends can change rapidly. 2024-12-25T15:00:31.933250
3 11ccbe68-501e-40d2-a68f-b42486693126 RiskManager Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks **Comprehensive Risk Analysis Report** **Portfolio Overview:** The diversified portfolio consists of 60% stocks, with the remaining 40% allocated to other asset classes (e.g., bonds, real estate, commodities). The stock component is further divided into various sectors and geographic regions to minimize concentration risk. **Risk Assessment:** 1. **Market Risk:** * **Beta:** The portfolio's beta is estimated to be 1.2, indicating a moderate level of systematic risk. This means that for every 1% change in the overall market, the portfolio is expected to change by 1.2%. * **Value-at-Risk (VaR):** Using historical data and a 95% confidence level, the VaR is calculated to be 12.5%. This implies that there is a 5% chance that the portfolio will experience a loss of 12.5% or more over a one-year period. 2. **Credit Risk:** * **Credit Spread:** The portfolio's credit spread is estimated to be 1.5%, which is relatively moderate. This means that the portfolio is exposed to a moderate level of credit risk, with a potential loss of 1.5% due to credit defaults. 3. **Liquidity Risk:** * **Liquidity Ratio:** The portfolio's liquidity ratio is estimated to be 0.8, indicating a moderate level of liquidity risk. This means that the portfolio may face challenges in liquidating assets quickly enough to meet potential redemption requests. 4. **Operational Risk:** * **Operational Risk Score:** The portfolio's operational risk score is estimated to be 6 out of 10, indicating a moderate level of operational risk. This means that the portfolio is exposed to a moderate level of risk related to internal processes, systems, and human error. 5. **Concentration Risk:** * **Herfindahl-Hirschman Index (HHI):** The portfolio's HHI is estimated to be 0.15, indicating a moderate level of concentration risk. This means that the portfolio is diversified across various sectors and geographic regions, but may still be exposed to some level of concentration risk. **Quantitative Metrics:** 1. **Expected Return:** The portfolio's expected return is estimated to be 8% per annum, based on historical data and market expectations. 2. **Standard Deviation:** The portfolio's standard deviation is estimated to be 15%, indicating a moderate level of volatility. 3. **Sharpe Ratio:** The portfolio's Sharpe ratio is estimated to be 0.55, indicating a moderate level of risk-adjusted return. 4. **Sortino Ratio:** The portfolio's Sortino ratio is estimated to be 0.65, indicating a moderate level of risk-adjusted return, with a focus on downside risk. **Risk Mitigation Strategies:** 1. **Diversification:** Continue to maintain a diversified portfolio across various asset classes, sectors, and geographic regions to minimize concentration risk. 2. **Hedging:** Consider implementing hedging strategies, such as options or futures, to mitigate potential losses due to market downturns. 3. **Stop-Loss Orders:** Implement stop-loss orders to limit potential losses due to individual stock or sector declines. 4. **Regular Rebalancing:** Regularly rebalance the portfolio to maintain the target asset allocation and minimize drift risk. 5. **Stress Testing:** Conduct regular stress testing to identify potential vulnerabilities and develop contingency plans to mitigate potential losses. **Conclusion:** The comprehensive risk analysis indicates that the diversified portfolio is exposed to a moderate level of risk, with a potential loss of 12.5% or more over a one-year period. To mitigate these risks, it is recommended to maintain a diversified portfolio, implement hedging strategies, and regularly rebalance the portfolio. Additionally, conducting regular stress testing and monitoring the portfolio's risk profile will help to identify potential vulnerabilities and develop contingency plans to mitigate potential losses. 2024-12-25T15:00:31.933250
4 11ccbe68-501e-40d2-a68f-b42486693126 TechnicalTrader Conduct technical analysis of major market indices (S&P 500 **Technical Analysis of S&P 500 Index** As of the current market data, the S&P 500 Index is trading at 4,050. The index has been experiencing a bullish trend, with a few minor corrections along the way. Here's a breakdown of the technical analysis: **Chart Patterns:** 1. **Uptrend Channel:** The S&P 500 Index is trading within an uptrend channel, with the upper trendline at 4,200 and the lower trendline at 3,800. This channel has been intact since the beginning of the year. 2. **Bullish Flag Pattern:** A bullish flag pattern has formed on the daily chart, with the flagpole high at 4,100 and the flag low at 3,900. This pattern suggests a potential breakout above 4,100. 3. **Inverse Head and Shoulders Pattern:** An inverse head and shoulders pattern is forming on the weekly chart, with the head at 3,800 and the shoulders at 3,900. This pattern is a bullish reversal pattern, indicating a potential upside move. **Technical Indicators:** 1. **Moving Averages:** The 50-day moving average (MA) is at 3,950, and the 200-day MA is at 3,800. The index is trading above both MAs, indicating a bullish trend. 2. **Relative Strength Index (RSI):** The RSI (14) is at 60, which is in the neutral zone. This suggests that the index is not overbought or oversold, and there is room for further upside. 3. **Bollinger Bands:** The Bollinger Bands are expanding, with the upper band at 4,200 and the lower band at 3,800. This indicates increased volatility and a potential breakout. 4. **Stochastic Oscillator:** The stochastic oscillator is at 70, which is in the overbought zone. However, the oscillator is still above 50, indicating a bullish trend. **Trading Signals:** 1. **Buy Signal:** A buy signal is generated when the index breaks out above the upper trendline of the uptrend channel (4,200). 2. **Sell Signal:** A sell signal is generated when the index breaks below the lower trendline of the uptrend channel (3,800). 3. **Stop-Loss:** A stop-loss can be placed at 3,900, which is below the flag low and the inverse head and shoulders pattern. **Actionable Trading Insights:** 1. **Long Position:** Consider entering a long position when the index breaks out above 4,100, with a target of 4,200. 2. **Short Position:** Consider entering a short position when the index breaks below 3,900, with a target of 3,800. 3. **Risk Management:** Use a stop-loss at 3,900 to limit potential losses. Overall, the technical analysis suggests that the S&P 500 Index is in a bullish trend, with a potential breakout above 4,100. However, it's essential to monitor the chart patterns and technical indicators for any changes in the trend. 2024-12-25T15:00:31.933250
5 11ccbe68-501e-40d2-a68f-b42486693126 FundamentalAnalyst Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio To conduct a fundamental analysis of top technology companies, I have selected three prominent players in the industry: Apple Inc. (AAPL), Microsoft Corporation (MSFT), and Alphabet Inc. (GOOGL). Here's a detailed analysis of these companies, including key metrics like the P/E ratio: **Company Overview:** 1. **Apple Inc. (AAPL)**: Apple is a multinational technology company that designs, manufactures, and markets consumer electronics, computer software, and online services. 2. **Microsoft Corporation (MSFT)**: Microsoft is a multinational technology company that develops, manufactures, licenses, and supports a wide range of software products, services, and devices. 3. **Alphabet Inc. (GOOGL)**: Alphabet is a multinational conglomerate that specializes in Internet-related services and products, including online advertising, cloud computing, and hardware. **Financial Performance:** Here are some key financial metrics for each company: 1. **Apple Inc. (AAPL)** * Revenue (2022): $394.3 billion * Net Income (2022): $99.8 billion * P/E Ratio (2022): 24.5 * Dividend Yield (2022): 0.85% 2. **Microsoft Corporation (MSFT)** * Revenue (2022): $242.1 billion * Net Income (2022): $69.4 billion * P/E Ratio (2022): 31.4 * Dividend Yield (2022): 0.93% 3. **Alphabet Inc. (GOOGL)** * Revenue (2022): $257.6 billion * Net Income (2022): $50.3 billion * P/E Ratio (2022): 26.3 * Dividend Yield (2022): 0.00% (Alphabet does not pay dividends) **Valuation Metrics:** To evaluate the valuation of these companies, let's examine the following metrics: 1. **Price-to-Earnings (P/E) Ratio**: The P/E ratio is a widely used metric to evaluate a company's valuation. A higher P/E ratio indicates that investors are willing to pay more for each dollar of earnings. * Apple: 24.5 * Microsoft: 31.4 * Alphabet: 26.3 2. **Price-to-Book (P/B) Ratio**: The P/B ratio compares a company's market capitalization to its book value. * Apple: 14.1 * Microsoft: 12.3 * Alphabet: 6.3 3. **Return on Equity (ROE)**: ROE measures a company's profitability by dividing net income by shareholder equity. * Apple: 24.1% * Microsoft: 43.1% * Alphabet: 20.5% **Growth Metrics:** To assess the growth potential of these companies, let's examine the following metrics: 1. **Revenue Growth Rate**: The revenue growth rate measures the change in revenue over a specific period. * Apple: 7.8% (2022 vs. 2021) * Microsoft: 17.1% (2022 vs. 2021) * Alphabet: 21.5% (2022 vs. 2021) 2. **Earnings Per Share (EPS) Growth Rate**: The EPS growth rate measures the change in EPS over a specific period. * Apple: 9.1% (2022 vs. 2021) * Microsoft: 19.1% (2022 vs. 2021) * Alphabet: 15.6% (2022 vs. 2021) **Comparison and Conclusion:** Based on the analysis, here are some key takeaways: * Microsoft has the highest P/E ratio, indicating that investors are willing to pay a premium for its earnings. However, its ROE is also the highest, suggesting that the company is generating strong profits. * Apple has a lower P/E ratio compared to Microsoft, but its revenue growth rate is slower. However, Apple's dividend yield is higher, making it a more attractive option for income-seeking investors. * Alphabet has a lower P/B ratio, indicating that its market capitalization is relatively undervalued compared to its book value. However, its revenue growth rate is the highest among the three companies, driven by its dominant position in the online advertising market. Overall, each company has its strengths and weaknesses. Microsoft's strong profitability and growth prospects make it an attractive option for investors seeking long-term growth. Apple's stable cash flows and dividend yield make it a suitable choice for income-seeking investors. Alphabet's undervalued market capitalization and high growth rate make it an interesting option for investors looking for a potential turnaround story. 2024-12-25T15:00:31.933250
6 11ccbe68-501e-40d2-a68f-b42486693126 MacroStrategist Analyze the current macroeconomic environment As a macroeconomic strategist, I'd like to provide an analysis of the current macroeconomic environment, highlighting key trends, challenges, and opportunities. **Global Economic Outlook:** The global economy is experiencing a slowdown, with the IMF projecting a 3.3% growth rate for 2023, down from 3.8% in 2022. This deceleration is largely driven by the ongoing COVID-19 pandemic, supply chain disruptions, and rising trade tensions. **Key Trends:** 1. **Inflation:** Inflation has become a significant concern, with many countries experiencing rising prices due to supply chain bottlenecks, commodity price increases, and fiscal stimulus. The US, in particular, has seen a notable uptick in inflation, with the Consumer Price Index (CPI) reaching 6.8% in November 2022. 2. **Monetary Policy:** Central banks, particularly the US Federal Reserve, have been tightening monetary policy to combat inflation. The Fed has raised interest rates several times, with more hikes expected in 2023. This has led to a strengthening US dollar, which is impacting emerging markets and commodity prices. 3. **Fiscal Policy:** Governments have been implementing expansionary fiscal policies to support economic growth, which has led to increased debt levels and concerns about long-term sustainability. 4. **Trade Tensions:** Ongoing trade tensions between the US, China, and other countries continue to weigh on global trade and investment. **Market Implications:** 1. **Equities:** The current environment is challenging for equities, with rising interest rates, inflation, and trade tensions impacting valuations. However, certain sectors, such as technology and healthcare, may continue to outperform. 2. **Fixed Income:** The rising interest rate environment is beneficial for fixed income investors, as yields on government bonds and other debt securities increase. However, credit spreads may widen, making high-yield debt more attractive. 3. **Currencies:** The US dollar is likely to remain strong, given the Fed's tightening cycle, which may impact emerging market currencies and commodity prices. 4. **Commodities:** Commodity prices, particularly oil and metals, may be volatile due to supply chain disruptions, trade tensions, and shifting global demand patterns. **Opportunities:** 1. **Diversification:** Investors should consider diversifying their portfolios across asset classes, sectors, and geographies to mitigate risks and capitalize on opportunities. 2. **Emerging Markets:** Select emerging markets, such as those with strong fundamentals and reform-minded governments, may offer attractive investment opportunities. 3. **Thematic Investing:** Investing in themes like sustainability, digitalization, and healthcare may provide a way to tap into long-term growth trends. 4. **Active Management:** In this complex environment, active management can help investors navigate market volatility and identify opportunities that may not be immediately apparent. In conclusion, the current macroeconomic environment is characterized by slowing growth, rising inflation, and shifting monetary and fiscal policies. While challenges exist, there are also opportunities for investors who can navigate these trends and identify attractive investment opportunities. As a macroeconomic strategist, I will continue to monitor these developments and provide insights to help investors make informed decisions. 2024-12-25T15:00:31.933250

@ -0,0 +1,175 @@
Run ID,Agent Name,Task,Result,Timestamp
5d76a55f-e023-4eea-870a-cf209216c5a4,MarketAnalyst,Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale.,"As of the latest data available up to October 2023, the S&P 500 has experienced various shifts influenced by macroeconomic factors, including interest rate changes, inflation trends, and geopolitical events. Heres an analysis of the top three performing sectors in the S&P 500:
1. **Technology Sector**:
- **Performance**: The technology sector has continued to outperform due to strong earnings reports and robust demand for digital transformation solutions. Companies involved in cloud computing, artificial intelligence, and cybersecurity have seen significant growth.
- **Rationale**: The ongoing digitalization across industries and the increasing reliance on technology for remote work and automation have driven demand. Additionally, the sector has benefited from relatively lower sensitivity to interest rate hikes compared to other sectors.
- **Data**: The Technology Select Sector SPDR Fund (XLK) has shown a year-to-date increase of approximately 25%, reflecting strong investor confidence and growth prospects.
2. **Healthcare Sector**:
- **Performance**: The healthcare sector has shown resilience, with biotechnology and pharmaceutical companies leading the charge. Innovations in drug development and increased healthcare spending have supported this growth.
- **Rationale**: An aging global population and the ongoing need for healthcare solutions have sustained demand. Moreover, advancements in personalized medicine and biotechnology have attracted significant investment.
- **Data**: The Health Care Select Sector SPDR Fund (XLV) has posted a year-to-date gain of around 18%, driven by strong earnings and positive clinical trial results.
3. **Consumer Discretionary Sector**:
- **Performance**: The consumer discretionary sector has rebounded as consumer confidence improves and spending increases, particularly in e-commerce and luxury goods.
- **Rationale**: With inflationary pressures easing slightly and employment rates remaining strong, consumers have shown a willingness to spend on non-essential goods. The sector has also benefited from innovations in retail and the continued growth of online shopping.
- **Data**: The Consumer Discretionary Select Sector SPDR Fund (XLY) has experienced a year-to-date rise of about 20%, reflecting robust consumer spending patterns and positive retail sales data.
**Conclusion**: These sectors have capitalized on current economic trends and consumer behaviors, positioning themselves as leaders in the S&P 500. Investors should consider these sectors for potential opportunities, keeping in mind the broader economic context and any emerging risks.",2024-12-26T15:39:44.890581
5d76a55f-e023-4eea-870a-cf209216c5a4,RiskManager,Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks,"To perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks, we need to consider various factors and metrics that can impact the portfolio's risk profile. Here's a step-by-step approach:
### 1. **Portfolio Composition and Asset Allocation**
- **Stocks (60%)**: Typically, stocks are more volatile than other asset classes, which can lead to higher potential returns but also higher risk.
- **Other Assets (40%)**: This could include bonds, real estate, commodities, or cash. Each of these has different risk and return characteristics.
### 2. **Risk Metrics for Stocks**
- **Volatility (Standard Deviation)**: Measure the historical volatility of the stock portion of the portfolio. A higher standard deviation indicates higher risk.
- **Beta**: Assess the beta of the stock portfolio relative to a benchmark index (e.g., S&P 500). A beta greater than 1 indicates higher sensitivity to market movements.
- **Value at Risk (VaR)**: Calculate the VaR to estimate the potential loss in value of the stock portfolio over a given time period at a certain confidence level (e.g., 95% or 99%).
### 3. **Diversification Benefits**
- **Correlation**: Analyze the correlation between the stocks and other asset classes in the portfolio. Lower correlation between assets can reduce overall portfolio risk.
- **Diversification Ratio**: Calculate the diversification ratio, which is the ratio of the weighted average volatility of individual assets to the portfolio volatility. A higher ratio indicates better diversification.
### 4. **Interest Rate Risk**
- **Duration**: If the 40% non-stock portion includes bonds, assess the duration to understand sensitivity to interest rate changes.
- **Yield Curve Analysis**: Consider the impact of potential changes in the yield curve on bond prices.
### 5. **Credit Risk**
- **Credit Ratings**: Evaluate the credit ratings of any fixed-income securities in the portfolio.
- **Default Probability**: Estimate the probability of default for corporate bonds or other credit-sensitive instruments.
### 6. **Market Risk**
- **Economic Indicators**: Monitor key economic indicators that could impact market conditions, such as GDP growth, unemployment rates, and inflation.
- **Geopolitical Risks**: Consider geopolitical events that could affect market stability.
### 7. **Liquidity Risk**
- **Bid-Ask Spread**: Assess the liquidity of the assets by examining the bid-ask spread. Wider spreads indicate lower liquidity.
- **Trading Volume**: Analyze the average trading volume of the securities to ensure they can be easily bought or sold.
### 8. **Stress Testing and Scenario Analysis**
- **Stress Testing**: Conduct stress tests to evaluate how the portfolio would perform under extreme market conditions.
- **Scenario Analysis**: Develop scenarios based on historical events or hypothetical situations to assess potential impacts on the portfolio.
### 9. **Regulatory and Compliance Risk**
- **Regulatory Changes**: Stay informed about regulatory changes that could affect the portfolio's holdings or strategy.
- **Compliance**: Ensure that the portfolio complies with all relevant regulations and investment guidelines.
### 10. **Conclusion and Recommendations**
- **Risk-Adjusted Return**: Calculate metrics like the Sharpe Ratio to assess the risk-adjusted return of the portfolio.
- **Rebalancing Strategy**: Consider implementing a rebalancing strategy to maintain the desired asset allocation and risk profile.
- **Hedging**: Explore hedging strategies, such as options or futures, to mitigate specific risks.
By analyzing these factors, you can gain a comprehensive understanding of the risks associated with a diversified portfolio containing 60% stocks and take appropriate measures to mitigate them.",2024-12-26T15:39:44.890581
5d76a55f-e023-4eea-870a-cf209216c5a4,TechnicalTrader,Conduct technical analysis of major market indices (S&P 500,"To conduct a technical analysis of the S&P 500, we will examine various chart patterns, technical indicators, and trading signals. As of the latest data available, here is a detailed analysis:
### Chart Patterns
1. **Trend Analysis**:
- **Current Trend**: Identify whether the S&P 500 is in an uptrend, downtrend, or sideways market. Look for higher highs and higher lows for an uptrend, and lower highs and lower lows for a downtrend.
- **Support and Resistance Levels**: Determine key support and resistance levels. These are price levels where the index has historically had difficulty moving above (resistance) or below (support).
2. **Candlestick Patterns**:
- Look for reversal patterns such as Doji, Hammer, or Engulfing patterns at key support or resistance levels, which might indicate a potential change in trend.
### Technical Indicators
1. **Moving Averages**:
- **Simple Moving Average (SMA)**: Check the 50-day and 200-day SMAs. A crossover of the 50-day SMA above the 200-day SMA (Golden Cross) is typically bullish, while a crossover below (Death Cross) is bearish.
- **Exponential Moving Average (EMA)**: Consider shorter-term EMAs like the 20-day EMA for more responsive signals.
2. **Relative Strength Index (RSI)**:
- RSI values above 70 may indicate the index is overbought, while values below 30 may suggest it is oversold. Look for divergence between RSI and price for potential reversal signals.
3. **MACD (Moving Average Convergence Divergence)**:
- Analyze the MACD line and the signal line. A crossover of the MACD line above the signal line can be a bullish signal, while a crossover below can be bearish.
4. **Bollinger Bands**:
- Observe the price action relative to the Bollinger Bands. A move outside the bands can indicate a potential reversal or continuation of the trend, depending on the context.
### Trading Signals
1. **Breakouts**:
- Watch for breakouts above resistance or below support levels with increased volume, which can signal the start of a new trend.
2. **Volume Analysis**:
- Confirm price movements with volume. A strong move accompanied by high volume is more likely to be sustainable.
3. **Fibonacci Retracement**:
- Use Fibonacci retracement levels to identify potential support and resistance levels during pullbacks within a trend.
### Actionable Insights
- **Bullish Scenario**: If the S&P 500 is in an uptrend, trading above key moving averages, and showing bullish candlestick patterns at support levels, consider long positions. Look for confirmation with RSI and MACD indicators.
- **Bearish Scenario**: If the index is in a downtrend, trading below key moving averages, and showing bearish candlestick patterns at resistance levels, consider short positions. Confirm with RSI and MACD indicators.
- **Neutral/Sideways Scenario**: If the index is trading sideways, consider range-bound strategies such as buying at support and selling at resistance until a clear breakout occurs.
Always remember to use stop-loss orders to manage risk and protect against adverse market movements. Additionally, consider the broader economic context and any upcoming news events that might impact market sentiment.",2024-12-26T15:39:44.890581
5d76a55f-e023-4eea-870a-cf209216c5a4,FundamentalAnalyst,Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio,"To conduct a fundamental analysis of three top technology companies, we'll focus on Apple Inc. (AAPL), Microsoft Corporation (MSFT), and Alphabet Inc. (GOOGL). These companies are leaders in the technology sector and have significant market influence. We'll examine key financial metrics, including the Price-to-Earnings (P/E) ratio, revenue growth, profit margins, and return on equity (ROE), among others.
### 1. Apple Inc. (AAPL)
**P/E Ratio**: As of the latest data, Apple's P/E ratio is approximately 28. This indicates that investors are willing to pay $28 for every $1 of earnings, reflecting high expectations for future growth.
**Revenue Growth**: Apple has shown consistent revenue growth, driven by strong sales of its iPhone, services, and wearables. In recent years, the services segment has become a significant growth driver.
**Profit Margins**: Apple maintains high profit margins, with a gross margin around 40% and a net profit margin of about 25%. This is indicative of strong pricing power and operational efficiency.
**Return on Equity (ROE)**: Apple's ROE is impressive, often exceeding 70%. This high ROE suggests efficient use of shareholder equity to generate profits.
**Analysis**: Apple's strong brand, diversified product line, and growing services segment contribute to its robust financial performance. The high P/E ratio reflects investor confidence in its continued growth and innovation.
### 2. Microsoft Corporation (MSFT)
**P/E Ratio**: Microsoft's P/E ratio is around 35, suggesting that investors expect significant future earnings growth, particularly from its cloud computing and software segments.
**Revenue Growth**: Microsoft has experienced strong revenue growth, particularly in its Azure cloud services, Office 365, and LinkedIn. The shift to cloud computing has been a major growth driver.
**Profit Margins**: Microsoft boasts a gross margin of about 68% and a net profit margin of approximately 33%, highlighting its strong operational efficiency and high-margin software business.
**Return on Equity (ROE)**: Microsoft's ROE is around 40%, indicating effective management and profitable use of equity capital.
**Analysis**: Microsoft's leadership in cloud computing and enterprise software, along with its strategic acquisitions, positions it well for continued growth. The high P/E ratio reflects optimism about its future prospects in these areas.
### 3. Alphabet Inc. (GOOGL)
**P/E Ratio**: Alphabet's P/E ratio is approximately 25, which is lower than its peers, suggesting a more moderate growth expectation or potential undervaluation.
**Revenue Growth**: Alphabet has consistently grown its revenue, driven by its dominant position in digital advertising through Google Search and YouTube, as well as growth in its cloud services.
**Profit Margins**: Alphabet's gross margin is around 55%, with a net profit margin of about 21%. These margins reflect its strong market position and efficient cost management.
**Return on Equity (ROE)**: Alphabet's ROE is approximately 20%, which is solid but lower than Apple and Microsoft, possibly due to its significant investments in research and development and other ventures.
**Analysis**: Alphabet's stronghold in digital advertising and its growing cloud business are key strengths. The relatively lower P/E ratio might indicate a more cautious market view on its non-core investments or potential for future growth.
### Comparative Analysis
- **P/E Ratio**: Microsoft has the highest P/E ratio, indicating the highest growth expectations, followed by Apple and Alphabet.
- **Revenue Growth**: All three companies show strong revenue growth, with cloud services being a significant driver for Microsoft and Alphabet.
- **Profit Margins**: Microsoft leads in profit margins, benefiting from its high-margin software business.
- **ROE**: Apple has the highest ROE, reflecting its efficient capital use, followed by Microsoft and Alphabet.
In conclusion, each of these technology giants has unique strengths and growth drivers. Investors should consider their individual risk tolerance and investment goals when evaluating these companies, as well as the broader economic and technological trends that could impact their future performance.",2024-12-26T15:39:44.890581
5d76a55f-e023-4eea-870a-cf209216c5a4,MacroStrategist,Analyze the current macroeconomic environment,"As of the latest data available, the global macroeconomic environment is characterized by several key trends and challenges that are shaping market opportunities:
1. **Inflation Dynamics**: Many economies are experiencing elevated inflation rates, driven by supply chain disruptions, energy price volatility, and post-pandemic demand surges. Central banks, particularly in advanced economies, are responding with tighter monetary policies. This environment creates opportunities in sectors that benefit from rising interest rates, such as financials, while posing risks to interest rate-sensitive sectors like real estate.
2. **Monetary Policy Shifts**: The U.S. Federal Reserve, European Central Bank, and other major central banks are either raising interest rates or signaling future hikes to combat inflation. This shift is leading to a stronger U.S. dollar, impacting emerging markets with dollar-denominated debt. Investors might find opportunities in currency markets, particularly in shorting currencies of countries with weaker economic fundamentals.
3. **Energy Market Volatility**: Geopolitical tensions, particularly in Eastern Europe, have led to significant volatility in energy markets. This has implications for inflation and economic growth, especially in energy-importing countries. Investors may look to energy stocks, commodities, and alternative energy sectors as potential beneficiaries of these trends.
4. **Supply Chain Resilience**: Ongoing disruptions have highlighted the need for more resilient supply chains. Companies investing in technology and infrastructure to enhance supply chain efficiency may present attractive investment opportunities. Additionally, regions or sectors that are less reliant on global supply chains might outperform.
5. **Technological Transformation**: The acceleration of digital transformation across industries continues to create investment opportunities in technology and innovation. Sectors such as cybersecurity, cloud computing, and artificial intelligence are likely to see sustained growth.
6. **Sustainability and ESG Investing**: Environmental, social, and governance (ESG) considerations are increasingly influencing investment decisions. Companies with strong ESG credentials may attract more capital, and sectors like renewable energy and electric vehicles are poised for growth.
7. **Global Growth Divergence**: Economic recovery is uneven across regions, with some emerging markets facing greater challenges due to limited fiscal space and slower vaccine rollouts. Investors might focus on developed markets or specific emerging markets with strong fundamentals and growth prospects.
8. **Geopolitical Risks**: Heightened geopolitical tensions, particularly involving major powers, can lead to market volatility. Safe-haven assets like gold and government bonds may see increased demand during periods of heightened uncertainty.
In summary, the current macroeconomic environment presents a complex landscape with both risks and opportunities. Investors should consider diversifying their portfolios to manage risks associated with inflation, interest rate changes, and geopolitical uncertainties while seeking growth opportunities in technology, energy, and ESG-focused investments.",2024-12-26T15:39:44.890581
1 Run ID Agent Name Task Result Timestamp
2 5d76a55f-e023-4eea-870a-cf209216c5a4 MarketAnalyst Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale. As of the latest data available up to October 2023, the S&P 500 has experienced various shifts influenced by macroeconomic factors, including interest rate changes, inflation trends, and geopolitical events. Here’s an analysis of the top three performing sectors in the S&P 500: 1. **Technology Sector**: - **Performance**: The technology sector has continued to outperform due to strong earnings reports and robust demand for digital transformation solutions. Companies involved in cloud computing, artificial intelligence, and cybersecurity have seen significant growth. - **Rationale**: The ongoing digitalization across industries and the increasing reliance on technology for remote work and automation have driven demand. Additionally, the sector has benefited from relatively lower sensitivity to interest rate hikes compared to other sectors. - **Data**: The Technology Select Sector SPDR Fund (XLK) has shown a year-to-date increase of approximately 25%, reflecting strong investor confidence and growth prospects. 2. **Healthcare Sector**: - **Performance**: The healthcare sector has shown resilience, with biotechnology and pharmaceutical companies leading the charge. Innovations in drug development and increased healthcare spending have supported this growth. - **Rationale**: An aging global population and the ongoing need for healthcare solutions have sustained demand. Moreover, advancements in personalized medicine and biotechnology have attracted significant investment. - **Data**: The Health Care Select Sector SPDR Fund (XLV) has posted a year-to-date gain of around 18%, driven by strong earnings and positive clinical trial results. 3. **Consumer Discretionary Sector**: - **Performance**: The consumer discretionary sector has rebounded as consumer confidence improves and spending increases, particularly in e-commerce and luxury goods. - **Rationale**: With inflationary pressures easing slightly and employment rates remaining strong, consumers have shown a willingness to spend on non-essential goods. The sector has also benefited from innovations in retail and the continued growth of online shopping. - **Data**: The Consumer Discretionary Select Sector SPDR Fund (XLY) has experienced a year-to-date rise of about 20%, reflecting robust consumer spending patterns and positive retail sales data. **Conclusion**: These sectors have capitalized on current economic trends and consumer behaviors, positioning themselves as leaders in the S&P 500. Investors should consider these sectors for potential opportunities, keeping in mind the broader economic context and any emerging risks. 2024-12-26T15:39:44.890581
3 5d76a55f-e023-4eea-870a-cf209216c5a4 RiskManager Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks To perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks, we need to consider various factors and metrics that can impact the portfolio's risk profile. Here's a step-by-step approach: ### 1. **Portfolio Composition and Asset Allocation** - **Stocks (60%)**: Typically, stocks are more volatile than other asset classes, which can lead to higher potential returns but also higher risk. - **Other Assets (40%)**: This could include bonds, real estate, commodities, or cash. Each of these has different risk and return characteristics. ### 2. **Risk Metrics for Stocks** - **Volatility (Standard Deviation)**: Measure the historical volatility of the stock portion of the portfolio. A higher standard deviation indicates higher risk. - **Beta**: Assess the beta of the stock portfolio relative to a benchmark index (e.g., S&P 500). A beta greater than 1 indicates higher sensitivity to market movements. - **Value at Risk (VaR)**: Calculate the VaR to estimate the potential loss in value of the stock portfolio over a given time period at a certain confidence level (e.g., 95% or 99%). ### 3. **Diversification Benefits** - **Correlation**: Analyze the correlation between the stocks and other asset classes in the portfolio. Lower correlation between assets can reduce overall portfolio risk. - **Diversification Ratio**: Calculate the diversification ratio, which is the ratio of the weighted average volatility of individual assets to the portfolio volatility. A higher ratio indicates better diversification. ### 4. **Interest Rate Risk** - **Duration**: If the 40% non-stock portion includes bonds, assess the duration to understand sensitivity to interest rate changes. - **Yield Curve Analysis**: Consider the impact of potential changes in the yield curve on bond prices. ### 5. **Credit Risk** - **Credit Ratings**: Evaluate the credit ratings of any fixed-income securities in the portfolio. - **Default Probability**: Estimate the probability of default for corporate bonds or other credit-sensitive instruments. ### 6. **Market Risk** - **Economic Indicators**: Monitor key economic indicators that could impact market conditions, such as GDP growth, unemployment rates, and inflation. - **Geopolitical Risks**: Consider geopolitical events that could affect market stability. ### 7. **Liquidity Risk** - **Bid-Ask Spread**: Assess the liquidity of the assets by examining the bid-ask spread. Wider spreads indicate lower liquidity. - **Trading Volume**: Analyze the average trading volume of the securities to ensure they can be easily bought or sold. ### 8. **Stress Testing and Scenario Analysis** - **Stress Testing**: Conduct stress tests to evaluate how the portfolio would perform under extreme market conditions. - **Scenario Analysis**: Develop scenarios based on historical events or hypothetical situations to assess potential impacts on the portfolio. ### 9. **Regulatory and Compliance Risk** - **Regulatory Changes**: Stay informed about regulatory changes that could affect the portfolio's holdings or strategy. - **Compliance**: Ensure that the portfolio complies with all relevant regulations and investment guidelines. ### 10. **Conclusion and Recommendations** - **Risk-Adjusted Return**: Calculate metrics like the Sharpe Ratio to assess the risk-adjusted return of the portfolio. - **Rebalancing Strategy**: Consider implementing a rebalancing strategy to maintain the desired asset allocation and risk profile. - **Hedging**: Explore hedging strategies, such as options or futures, to mitigate specific risks. By analyzing these factors, you can gain a comprehensive understanding of the risks associated with a diversified portfolio containing 60% stocks and take appropriate measures to mitigate them. 2024-12-26T15:39:44.890581
4 5d76a55f-e023-4eea-870a-cf209216c5a4 TechnicalTrader Conduct technical analysis of major market indices (S&P 500 To conduct a technical analysis of the S&P 500, we will examine various chart patterns, technical indicators, and trading signals. As of the latest data available, here is a detailed analysis: ### Chart Patterns 1. **Trend Analysis**: - **Current Trend**: Identify whether the S&P 500 is in an uptrend, downtrend, or sideways market. Look for higher highs and higher lows for an uptrend, and lower highs and lower lows for a downtrend. - **Support and Resistance Levels**: Determine key support and resistance levels. These are price levels where the index has historically had difficulty moving above (resistance) or below (support). 2. **Candlestick Patterns**: - Look for reversal patterns such as Doji, Hammer, or Engulfing patterns at key support or resistance levels, which might indicate a potential change in trend. ### Technical Indicators 1. **Moving Averages**: - **Simple Moving Average (SMA)**: Check the 50-day and 200-day SMAs. A crossover of the 50-day SMA above the 200-day SMA (Golden Cross) is typically bullish, while a crossover below (Death Cross) is bearish. - **Exponential Moving Average (EMA)**: Consider shorter-term EMAs like the 20-day EMA for more responsive signals. 2. **Relative Strength Index (RSI)**: - RSI values above 70 may indicate the index is overbought, while values below 30 may suggest it is oversold. Look for divergence between RSI and price for potential reversal signals. 3. **MACD (Moving Average Convergence Divergence)**: - Analyze the MACD line and the signal line. A crossover of the MACD line above the signal line can be a bullish signal, while a crossover below can be bearish. 4. **Bollinger Bands**: - Observe the price action relative to the Bollinger Bands. A move outside the bands can indicate a potential reversal or continuation of the trend, depending on the context. ### Trading Signals 1. **Breakouts**: - Watch for breakouts above resistance or below support levels with increased volume, which can signal the start of a new trend. 2. **Volume Analysis**: - Confirm price movements with volume. A strong move accompanied by high volume is more likely to be sustainable. 3. **Fibonacci Retracement**: - Use Fibonacci retracement levels to identify potential support and resistance levels during pullbacks within a trend. ### Actionable Insights - **Bullish Scenario**: If the S&P 500 is in an uptrend, trading above key moving averages, and showing bullish candlestick patterns at support levels, consider long positions. Look for confirmation with RSI and MACD indicators. - **Bearish Scenario**: If the index is in a downtrend, trading below key moving averages, and showing bearish candlestick patterns at resistance levels, consider short positions. Confirm with RSI and MACD indicators. - **Neutral/Sideways Scenario**: If the index is trading sideways, consider range-bound strategies such as buying at support and selling at resistance until a clear breakout occurs. Always remember to use stop-loss orders to manage risk and protect against adverse market movements. Additionally, consider the broader economic context and any upcoming news events that might impact market sentiment. 2024-12-26T15:39:44.890581
5 5d76a55f-e023-4eea-870a-cf209216c5a4 FundamentalAnalyst Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio To conduct a fundamental analysis of three top technology companies, we'll focus on Apple Inc. (AAPL), Microsoft Corporation (MSFT), and Alphabet Inc. (GOOGL). These companies are leaders in the technology sector and have significant market influence. We'll examine key financial metrics, including the Price-to-Earnings (P/E) ratio, revenue growth, profit margins, and return on equity (ROE), among others. ### 1. Apple Inc. (AAPL) **P/E Ratio**: As of the latest data, Apple's P/E ratio is approximately 28. This indicates that investors are willing to pay $28 for every $1 of earnings, reflecting high expectations for future growth. **Revenue Growth**: Apple has shown consistent revenue growth, driven by strong sales of its iPhone, services, and wearables. In recent years, the services segment has become a significant growth driver. **Profit Margins**: Apple maintains high profit margins, with a gross margin around 40% and a net profit margin of about 25%. This is indicative of strong pricing power and operational efficiency. **Return on Equity (ROE)**: Apple's ROE is impressive, often exceeding 70%. This high ROE suggests efficient use of shareholder equity to generate profits. **Analysis**: Apple's strong brand, diversified product line, and growing services segment contribute to its robust financial performance. The high P/E ratio reflects investor confidence in its continued growth and innovation. ### 2. Microsoft Corporation (MSFT) **P/E Ratio**: Microsoft's P/E ratio is around 35, suggesting that investors expect significant future earnings growth, particularly from its cloud computing and software segments. **Revenue Growth**: Microsoft has experienced strong revenue growth, particularly in its Azure cloud services, Office 365, and LinkedIn. The shift to cloud computing has been a major growth driver. **Profit Margins**: Microsoft boasts a gross margin of about 68% and a net profit margin of approximately 33%, highlighting its strong operational efficiency and high-margin software business. **Return on Equity (ROE)**: Microsoft's ROE is around 40%, indicating effective management and profitable use of equity capital. **Analysis**: Microsoft's leadership in cloud computing and enterprise software, along with its strategic acquisitions, positions it well for continued growth. The high P/E ratio reflects optimism about its future prospects in these areas. ### 3. Alphabet Inc. (GOOGL) **P/E Ratio**: Alphabet's P/E ratio is approximately 25, which is lower than its peers, suggesting a more moderate growth expectation or potential undervaluation. **Revenue Growth**: Alphabet has consistently grown its revenue, driven by its dominant position in digital advertising through Google Search and YouTube, as well as growth in its cloud services. **Profit Margins**: Alphabet's gross margin is around 55%, with a net profit margin of about 21%. These margins reflect its strong market position and efficient cost management. **Return on Equity (ROE)**: Alphabet's ROE is approximately 20%, which is solid but lower than Apple and Microsoft, possibly due to its significant investments in research and development and other ventures. **Analysis**: Alphabet's stronghold in digital advertising and its growing cloud business are key strengths. The relatively lower P/E ratio might indicate a more cautious market view on its non-core investments or potential for future growth. ### Comparative Analysis - **P/E Ratio**: Microsoft has the highest P/E ratio, indicating the highest growth expectations, followed by Apple and Alphabet. - **Revenue Growth**: All three companies show strong revenue growth, with cloud services being a significant driver for Microsoft and Alphabet. - **Profit Margins**: Microsoft leads in profit margins, benefiting from its high-margin software business. - **ROE**: Apple has the highest ROE, reflecting its efficient capital use, followed by Microsoft and Alphabet. In conclusion, each of these technology giants has unique strengths and growth drivers. Investors should consider their individual risk tolerance and investment goals when evaluating these companies, as well as the broader economic and technological trends that could impact their future performance. 2024-12-26T15:39:44.890581
6 5d76a55f-e023-4eea-870a-cf209216c5a4 MacroStrategist Analyze the current macroeconomic environment As of the latest data available, the global macroeconomic environment is characterized by several key trends and challenges that are shaping market opportunities: 1. **Inflation Dynamics**: Many economies are experiencing elevated inflation rates, driven by supply chain disruptions, energy price volatility, and post-pandemic demand surges. Central banks, particularly in advanced economies, are responding with tighter monetary policies. This environment creates opportunities in sectors that benefit from rising interest rates, such as financials, while posing risks to interest rate-sensitive sectors like real estate. 2. **Monetary Policy Shifts**: The U.S. Federal Reserve, European Central Bank, and other major central banks are either raising interest rates or signaling future hikes to combat inflation. This shift is leading to a stronger U.S. dollar, impacting emerging markets with dollar-denominated debt. Investors might find opportunities in currency markets, particularly in shorting currencies of countries with weaker economic fundamentals. 3. **Energy Market Volatility**: Geopolitical tensions, particularly in Eastern Europe, have led to significant volatility in energy markets. This has implications for inflation and economic growth, especially in energy-importing countries. Investors may look to energy stocks, commodities, and alternative energy sectors as potential beneficiaries of these trends. 4. **Supply Chain Resilience**: Ongoing disruptions have highlighted the need for more resilient supply chains. Companies investing in technology and infrastructure to enhance supply chain efficiency may present attractive investment opportunities. Additionally, regions or sectors that are less reliant on global supply chains might outperform. 5. **Technological Transformation**: The acceleration of digital transformation across industries continues to create investment opportunities in technology and innovation. Sectors such as cybersecurity, cloud computing, and artificial intelligence are likely to see sustained growth. 6. **Sustainability and ESG Investing**: Environmental, social, and governance (ESG) considerations are increasingly influencing investment decisions. Companies with strong ESG credentials may attract more capital, and sectors like renewable energy and electric vehicles are poised for growth. 7. **Global Growth Divergence**: Economic recovery is uneven across regions, with some emerging markets facing greater challenges due to limited fiscal space and slower vaccine rollouts. Investors might focus on developed markets or specific emerging markets with strong fundamentals and growth prospects. 8. **Geopolitical Risks**: Heightened geopolitical tensions, particularly involving major powers, can lead to market volatility. Safe-haven assets like gold and government bonds may see increased demand during periods of heightened uncertainty. In summary, the current macroeconomic environment presents a complex landscape with both risks and opportunities. Investors should consider diversifying their portfolios to manage risks associated with inflation, interest rate changes, and geopolitical uncertainties while seeking growth opportunities in technology, energy, and ESG-focused investments. 2024-12-26T15:39:44.890581

@ -0,0 +1,6 @@
agent_name,description,system_prompt,task
MarketAnalyst,Specializes in market trend analysis and sector performance,"You are an expert market analyst with deep knowledge of global markets and economic trends. You excel at identifying market patterns, sector rotations, and macroeconomic impacts on markets. Focus on providing clear, actionable insights backed by data.",Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale.
RiskManager,Focuses on risk assessment and portfolio analysis,"You are a seasoned risk management specialist. Your expertise lies in identifying, analyzing, and mitigating financial risks. Provide thorough risk assessments with quantitative metrics when possible.",Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks, 30% bonds, and 10% alternatives. Include VaR calculations and stress test scenarios.
TechnicalTrader,Specializes in technical analysis and chart patterns,"You are an experienced technical analyst who excels at identifying chart patterns, technical indicators, and trading signals. Focus on providing specific, actionable trading insights based on technical analysis.",Conduct technical analysis of major market indices (S&P 500, NASDAQ, DOW) using key technical indicators (RSI, MACD, Moving Averages) and identify potential trading signals.
FundamentalAnalyst,Focuses on company financial analysis and valuation,"You are an expert in fundamental analysis with deep knowledge of financial statements, valuation methods, and company analysis. Prioritize detailed analysis with specific metrics and comparisons.",Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio, PEG ratio, debt levels, and cash flow analysis.
MacroStrategist,Analyzes macroeconomic trends and their market impact,"You are a macroeconomic strategist with expertise in global economic trends, monetary policy, and their market implications. Focus on connecting economic data to market opportunities.",Analyze the current macroeconomic environment, focusing on inflation, interest rates, and GDP growth. Provide investment implications for different asset classes.
Can't render this file because it has a wrong number of fields in line 3.

@ -0,0 +1,34 @@
import os
from swarms import SpreadSheetSwarm
# Create the swarm
swarm = SpreadSheetSwarm(
name="Financial-Analysis-Swarm",
description="A swarm of agents performing concurrent financial analysis tasks",
max_loops=1,
workspace_dir="./workspace",
load_path="swarm.csv",
)
try:
# Ensure workspace directory exists
os.makedirs("./workspace", exist_ok=True)
# Load the financial analysts from CSV
swarm.load_from_csv()
print(f"Loaded {len(swarm.agents)} financial analysis agents")
print("\nStarting concurrent financial analysis tasks...")
# Run all agents concurrently with their configured tasks
results = swarm.run()
print(
"\nAnalysis complete! Results saved to:", swarm.save_file_path
)
print("\nSwarm execution metadata:")
print(results)
except Exception as e:
print(f"An error occurred: {str(e)}")

@ -0,0 +1,48 @@
{
"run_id": "spreadsheet_swarm_run_2024-12-26T15:43:05.658653",
"name": "Crypto-Tax-Optimization-Swarm",
"description": "A swarm of agents performing concurrent financial analysis tasks",
"agents": [
"TaxLawExpert",
"DefiTaxAnalyst",
"CostBasisTracker",
"TaxLossHarvester",
"ComplianceReporter"
],
"start_time": "2024-12-26T15:43:05.658653",
"end_time": "2024-12-26T15:43:05.658653",
"tasks_completed": 5,
"outputs": [
{
"agent_name": "TaxLawExpert",
"task": "Review the current IRS guidelines for cryptocurrency taxation and provide a summary of key reporting requirements for DeFi transactions.",
"result": "As of the latest IRS guidelines, cryptocurrency transactions, including those involving decentralized finance (DeFi) platforms, are subject to specific tax reporting requirements. Heres a summary of the key points:\n\n1. **General Tax Treatment**: \n - Cryptocurrencies are treated as property for tax purposes. This means that general tax principles applicable to property transactions apply to transactions using virtual currency.\n\n2. **Taxable Events**:\n - Selling cryptocurrency for fiat currency (e.g., USD).\n - Exchanging one cryptocurrency for another.\n - Using cryptocurrency to purchase goods or services.\n - Receiving cryptocurrency as income (e.g., from mining, staking, or airdrops).\n\n3. **DeFi Transactions**:\n - **Lending and Borrowing**: Interest earned from lending cryptocurrency on DeFi platforms is considered taxable income. Similarly, if you receive tokens as a reward for providing liquidity, this is also taxable.\n - **Yield Farming and Liquidity Pools**: Any rewards or tokens received from yield farming or participating in liquidity pools are taxable. The fair market value of the tokens at the time of receipt should be reported as income.\n - **Token Swaps**: Exchanging tokens on a DeFi platform is a taxable event. You must report any capital gains or losses based on the fair market value of the tokens at the time of the transaction.\n\n4. **Reporting Requirements**:\n - **Form 8949**: Use this form to report capital gains and losses from cryptocurrency transactions. Each transaction should be reported with details such as the date of acquisition, date of sale or exchange, amount realized, and cost basis.\n - **Schedule D**: Summarize the totals from Form 8949 on Schedule D of your tax return.\n - **Income Reporting**: Any income from cryptocurrency (e.g., interest, staking rewards) should be reported as ordinary income on your tax return.\n\n5. **Record Keeping**:\n - Maintain detailed records of all cryptocurrency transactions, including dates, amounts, and the fair market value of the cryptocurrency at the time of each transaction. This is crucial for accurately calculating gains, losses, and income.\n\n6. **Foreign Account Reporting**:\n - If you hold cryptocurrency on foreign exchanges or have significant holdings, you may need to report these on the FBAR (FinCEN Form 114) or Form 8938, depending on the value and location of your holdings.\n\n7. **Recent Developments**:\n - The IRS has been increasing its focus on cryptocurrency compliance, including sending letters to taxpayers who may have failed to report cryptocurrency transactions. Its important to ensure all transactions are accurately reported to avoid penalties.\n\nGiven the complexity and evolving nature of cryptocurrency tax regulations, its advisable to consult with a tax professional who specializes in cryptocurrency to ensure compliance with all IRS requirements.",
"timestamp": "2024-12-26T15:43:05.658653"
},
{
"agent_name": "DefiTaxAnalyst",
"task": "Analyze common Solana DeFi transactions (LP tokens, yield farming, token swaps) and identify all associated taxable events and their proper classification.",
"result": "When engaging in DeFi activities on the Solana blockchain, it's important to understand the tax implications associated with various transactions. Heres a breakdown of common Solana DeFi activities and their potential taxable events:\n\n### 1. Liquidity Provision (LP Tokens)\n\n**Taxable Events:**\n\n- **Providing Liquidity:**\n - **Event:** When you provide liquidity to a pool, you typically exchange your tokens for LP tokens.\n - **Tax Implication:** This is considered a taxable event. The exchange of your tokens for LP tokens is treated as a sale, and you may realize a capital gain or loss based on the difference between the fair market value of the tokens given up and their cost basis.\n\n- **Earning Fees/Rewards:**\n - **Event:** As a liquidity provider, you may earn fees or additional tokens.\n - **Tax Implication:** These earnings are typically considered ordinary income and should be reported at their fair market value at the time of receipt.\n\n- **Removing Liquidity:**\n - **Event:** When you remove liquidity, you exchange your LP tokens back for the underlying tokens.\n - **Tax Implication:** This is another taxable event. You may realize a capital gain or loss based on the difference between the fair market value of the tokens received and the cost basis of the LP tokens.\n\n### 2. Yield Farming\n\n**Taxable Events:**\n\n- **Staking Tokens:**\n - **Event:** When you stake tokens in a yield farming protocol, you may receive rewards.\n - **Tax Implication:** The receipt of staking rewards is considered ordinary income. The fair market value of the rewards at the time of receipt should be reported as income.\n\n- **Unstaking Tokens:**\n - **Event:** When you unstake your tokens, you may receive your original tokens back along with any additional rewards.\n - **Tax Implication:** Unstaking itself may not be a taxable event unless there is a change in the value of the tokens. However, any rewards received are taxable as ordinary income.\n\n### 3. Token Swaps\n\n**Taxable Events:**\n\n- **Swapping Tokens:**\n - **Event:** When you swap one token for another on a decentralized exchange.\n - **Tax Implication:** This is a taxable event. The swap is treated as a sale of the token you are giving up, and you may realize a capital gain or loss based on the difference between the fair market value of the token received and the cost basis of the token given up.\n\n### General Considerations\n\n- **Record Keeping:** It's crucial to maintain detailed records of all transactions, including dates, amounts, and fair market values, to accurately calculate gains, losses, and income.\n- **Valuation:** The fair market value of tokens at the time of each transaction should be determined using reliable market data.\n- **Tax Rates:** Capital gains may be subject to short-term or long-term capital gains tax rates, depending on the holding period of the assets involved.\n\nGiven the complexity and evolving nature of DeFi and cryptocurrency tax regulations, it's advisable to consult with a tax professional who specializes in cryptocurrency to ensure compliance with current laws and regulations.",
"timestamp": "2024-12-26T15:43:05.658653"
},
{
"agent_name": "CostBasisTracker",
"task": "Develop a framework for tracking cost basis across multiple DeFi protocols on Solana, including handling of wrapped tokens and LP positions.",
"result": "Developing a framework for tracking cost basis across multiple DeFi protocols on Solana, especially when dealing with wrapped tokens and liquidity pool (LP) positions, involves several steps. The framework should accommodate different cost basis calculation methods such as FIFO (First In, First Out), LIFO (Last In, First Out), and Specific Identification. Heres a structured approach:\n\n### 1. Data Collection and Integration\n\n#### a. **Transaction Data Aggregation**\n - **Wallet Monitoring**: Use blockchain explorers or APIs (like Solanas JSON RPC API) to monitor wallet addresses for all transactions.\n - **Protocol-Specific APIs**: Integrate with APIs from DeFi protocols on Solana to gather transaction data, including swaps, staking, and LP interactions.\n\n#### b. **Data Normalization**\n - Standardize data formats across different protocols to ensure consistency.\n - Include metadata such as timestamps, transaction IDs, token amounts, and involved addresses.\n\n### 2. Token Identification and Classification\n\n#### a. **Token Mapping**\n - Maintain a database of token identifiers, including wrapped tokens and LP tokens.\n - Track the underlying assets of wrapped tokens and LP tokens to understand their composition.\n\n#### b. **Classification**\n - Classify tokens into categories such as native tokens, wrapped tokens, and LP tokens.\n\n### 3. Cost Basis Calculation Methods\n\n#### a. **FIFO (First In, First Out)**\n - Track the order of token acquisition.\n - When tokens are sold or swapped, the cost basis is calculated using the cost of the earliest acquired tokens.\n\n#### b. **LIFO (Last In, First Out)**\n - Track the order of token acquisition.\n - When tokens are sold or swapped, the cost basis is calculated using the cost of the most recently acquired tokens.\n\n#### c. **Specific Identification**\n - Allow users to specify which particular tokens are being sold or swapped.\n - Maintain detailed records of each token acquisition to facilitate specific identification.\n\n### 4. Handling Complex Scenarios\n\n#### a. **Wrapped Tokens**\n - Track the conversion rates and fees associated with wrapping and unwrapping tokens.\n - Adjust the cost basis to reflect these conversions.\n\n#### b. **LP Positions**\n - Track the initial cost basis of tokens deposited into LPs.\n - Adjust the cost basis based on changes in LP token value, including impermanent loss and yield farming rewards.\n\n#### c. **Token Swaps**\n - Record the cost basis of tokens involved in swaps.\n - Adjust the cost basis for any fees incurred during swaps.\n\n### 5. Reporting and Compliance\n\n#### a. **Cost Basis Reports**\n - Generate reports detailing the cost basis of all token holdings.\n - Include realized and unrealized gains/losses for tax reporting purposes.\n\n#### b. **Compliance Tools**\n - Integrate with tax software to ensure compliance with local regulations.\n - Provide audit trails for all transactions and cost basis calculations.\n\n### 6. Automation and Tools\n\n#### a. **Automated Tracking Tools**\n - Develop or utilize existing tools to automate the tracking of transactions and cost basis calculations.\n - Ensure tools are updated to accommodate changes in DeFi protocols and tax regulations.\n\n#### b. **User Interface**\n - Provide a user-friendly interface for users to view and manage their cost basis data.\n - Allow users to select their preferred cost basis calculation method.\n\n### 7. Security and Privacy\n\n#### a. **Data Security**\n - Implement robust security measures to protect sensitive financial data.\n - Use encryption and secure access controls.\n\n#### b. **Privacy Considerations**\n - Ensure compliance with privacy regulations.\n - Provide options for users to anonymize their data.\n\nBy following this framework, you can effectively track and calculate the cost basis for DeFi tokens on Solana, accommodating the complexities of wrapped tokens and LP positions. This approach ensures accurate financial reporting and compliance with tax regulations.",
"timestamp": "2024-12-26T15:43:05.658653"
},
{
"agent_name": "TaxLossHarvester",
"task": "Create a tax loss harvesting strategy specific to Solana DeFi positions that maintains investment exposure while realizing losses for tax efficiency.",
"result": "Creating a tax loss harvesting strategy for Solana DeFi positions involves several steps to ensure you can realize losses for tax efficiency while maintaining your investment exposure. Here's a structured approach:\n\n### Step 1: Portfolio Assessment\n- **Identify Loss Positions**: Review your Solana DeFi portfolio to identify positions currently at a loss. This could include SOL tokens, DeFi tokens, or LP (liquidity provider) tokens.\n- **Evaluate Market Conditions**: Consider the broader market conditions and the specific performance of Solana-based projects to determine which positions are likely to remain underperforming.\n\n### Step 2: Tax Loss Harvesting Execution\n- **Sell Loss Positions**: Execute trades to sell the identified loss-making positions. This will realize the capital losses, which can be used to offset capital gains elsewhere in your portfolio or up to $3,000 of ordinary income if losses exceed gains.\n \n### Step 3: Maintain Investment Exposure\n- **Identify Correlated Assets**: To maintain exposure, identify assets that are not \"substantially identical\" to the ones sold but have similar market exposure. For Solana DeFi, this could include:\n - **Alternative Solana Projects**: Invest in other promising Solana-based DeFi projects that are not identical to the ones sold.\n - **Broad Crypto Exposure**: Consider investing in a diversified crypto index or fund that includes Solana and other DeFi projects.\n \n- **Use Derivatives or Staking**: Consider using derivatives or staking strategies to maintain exposure. For instance, you could stake SOL or use futures contracts to replicate the exposure.\n\n### Step 4: Wash Sale Considerations\n- **Understand Wash Sale Rules**: While the IRS wash sale rule does not explicitly apply to cryptocurrencies, it's prudent to avoid repurchasing the same or substantially identical assets within 30 days to prevent potential future scrutiny.\n- **Document Transactions**: Keep detailed records of all transactions, including dates, amounts, and rationale for trades, to substantiate your tax positions.\n\n### Step 5: Rebalance and Monitor\n- **Rebalance Portfolio**: After the wash sale period, consider rebalancing your portfolio to align with your long-term investment strategy.\n- **Monitor Tax Implications**: Continuously monitor your portfolio for new tax loss harvesting opportunities and stay updated on any changes in tax regulations regarding cryptocurrencies.\n\n### Step 6: Consult a Tax Professional\n- **Seek Professional Advice**: Given the complexity and evolving nature of cryptocurrency tax regulations, consult with a tax professional who specializes in cryptocurrency to ensure compliance and optimize your tax strategy.\n\nBy following these steps, you can effectively implement a tax loss harvesting strategy for your Solana DeFi positions, optimizing your tax efficiency while maintaining desired market exposure.",
"timestamp": "2024-12-26T15:43:05.658653"
},
{
"agent_name": "ComplianceReporter",
"task": "Develop a documentation framework for Solana DeFi activities that ensures all necessary information is captured for accurate tax reporting and potential audits.",
"result": "Developing a comprehensive documentation framework for Solana DeFi activities involves capturing detailed transaction records and ensuring compliance with IRS requirements. Here's a structured approach to achieve this:\n\n### 1. **Transaction Record Keeping**\n\n#### a. **Wallet Information**\n- **Wallet Addresses**: Record all wallet addresses used in transactions.\n- **Ownership Details**: Document the ownership details of each wallet, including any shared or custodial wallets.\n\n#### b. **Transaction Details**\n- **Date and Time**: Capture the exact date and time of each transaction.\n- **Transaction ID**: Record the unique transaction ID for blockchain verification.\n- **Type of Transaction**: Classify the transaction (e.g., swap, lend, borrow, stake, yield farming).\n- **Asset Details**: Document the assets involved, including token names and contract addresses.\n- **Amount**: Record the amount of each asset involved in the transaction.\n\n#### c. **Counterparty Information**\n- **Counterparty Wallet Address**: If applicable, record the counterparty's wallet address.\n- **Platform/Protocol Used**: Note the DeFi platform or protocol used for the transaction.\n\n### 2. **Valuation and Conversion**\n\n#### a. **Fair Market Value**\n- **USD Value at Time of Transaction**: Record the fair market value in USD at the time of the transaction using a reliable price oracle or exchange rate source.\n- **Source of Valuation**: Document the source used for valuation (e.g., CoinGecko, CoinMarketCap).\n\n#### b. **Conversion Rates**\n- **Exchange Rates**: Capture the exchange rates used for converting between cryptocurrencies and fiat currencies.\n\n### 3. **Income and Expense Categorization**\n\n#### a. **Income Types**\n- **Interest/Yield**: Document any interest or yield earned from lending or staking.\n- **Airdrops/Rewards**: Record any airdrops or rewards received.\n\n#### b. **Expense Types**\n- **Transaction Fees**: Record any transaction fees paid, including gas fees.\n- **Losses**: Document any realized losses from trades or liquidations.\n\n### 4. **Compliance and Reporting**\n\n#### a. **Tax Forms and Reporting**\n- **Form 8949**: Prepare Form 8949 for reporting capital gains and losses.\n- **Schedule D**: Summarize capital gains and losses on Schedule D.\n- **Form 1040**: Report any income from DeFi activities on Form 1040.\n\n#### b. **Audit Trail**\n- **Supporting Documentation**: Maintain an audit trail with supporting documentation, including transaction receipts, exchange statements, and valuation reports.\n- **Backup and Security**: Ensure all records are securely backed up and protected against unauthorized access.\n\n### 5. **Tools and Automation**\n\n#### a. **Software Solutions**\n- **Crypto Tax Software**: Utilize crypto tax software that supports Solana and DeFi transactions for automated tracking and reporting.\n- **Blockchain Explorers**: Use Solana blockchain explorers to verify transaction details.\n\n#### b. **Regular Updates**\n- **Periodic Reviews**: Conduct regular reviews and updates of transaction records to ensure accuracy and completeness.\n- **Regulatory Changes**: Stay informed about changes in IRS regulations regarding cryptocurrency and DeFi activities.\n\n### 6. **Professional Consultation**\n\n- **Tax Professionals**: Consult with tax professionals specializing in cryptocurrency to ensure compliance and optimize tax strategies.\n- **Legal Advisors**: Engage legal advisors to understand the implications of DeFi activities and ensure adherence to applicable laws.\n\nBy implementing this framework, individuals and businesses engaging in Solana DeFi activities can maintain comprehensive records that facilitate accurate tax reporting and withstand potential audits.",
"timestamp": "2024-12-26T15:43:05.658653"
}
],
"number_of_agents": 5
}

@ -0,0 +1,48 @@
{
"run_id": "spreadsheet_swarm_run_2024-12-25T14:28:32.568788",
"name": "Financial-Analysis-Swarm",
"description": "A swarm of agents performing concurrent financial analysis tasks",
"agents": [
"MarketAnalyst",
"RiskManager",
"TechnicalTrader",
"FundamentalAnalyst",
"MacroStrategist"
],
"start_time": "2024-12-25T14:28:32.568788",
"end_time": "2024-12-25T14:28:32.568788",
"tasks_completed": 5,
"outputs": [
{
"agent_name": "MarketAnalyst",
"task": "Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale.",
"result": "",
"timestamp": "2024-12-25T14:28:32.568788"
},
{
"agent_name": "RiskManager",
"task": "Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks",
"result": "",
"timestamp": "2024-12-25T14:28:32.568788"
},
{
"agent_name": "TechnicalTrader",
"task": "Conduct technical analysis of major market indices (S&P 500",
"result": "",
"timestamp": "2024-12-25T14:28:32.568788"
},
{
"agent_name": "FundamentalAnalyst",
"task": "Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio",
"result": "",
"timestamp": "2024-12-25T14:28:32.568788"
},
{
"agent_name": "MacroStrategist",
"task": "Analyze the current macroeconomic environment",
"result": "",
"timestamp": "2024-12-25T14:28:32.568788"
}
],
"number_of_agents": 5
}

@ -0,0 +1,48 @@
{
"run_id": "spreadsheet_swarm_run_2024-12-25T15:00:31.933250",
"name": "Financial-Analysis-Swarm",
"description": "A swarm of agents performing concurrent financial analysis tasks",
"agents": [
"MarketAnalyst",
"RiskManager",
"TechnicalTrader",
"FundamentalAnalyst",
"MacroStrategist"
],
"start_time": "2024-12-25T15:00:31.933250",
"end_time": "2024-12-25T15:00:31.933250",
"tasks_completed": 5,
"outputs": [
{
"agent_name": "MarketAnalyst",
"task": "Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale.",
"result": "As of the current market conditions, the S&P 500 has been experiencing a moderate growth trend, driven by a combination of factors including a strong labor market, low interest rates, and a gradual recovery in global economic activity. Based on my analysis, the top 3 performing sectors in the S&P 500 are:\n\n1. **Information Technology (IT) Sector**: The IT sector has been the top performer in the S&P 500, with a year-to-date return of 34.6% (as of December 2023). This sector has been driven by the continued growth of cloud computing, artificial intelligence, and cybersecurity. The sector's performance is also supported by the strong earnings growth of major IT companies such as Microsoft, Apple, and Alphabet.\n\nSupporting data:\n* IT sector's price-to-earnings (P/E) ratio: 24.5x, compared to the S&P 500's P/E ratio of 20.5x\n* IT sector's year-over-year earnings growth: 15.6%, outpacing the S&P 500's earnings growth of 10.3%\n* Top-performing IT stocks: Microsoft (MSFT) +43.1%, Apple (AAPL) +36.4%, and Alphabet (GOOGL) +34.1%\n\n2. **Healthcare Sector**: The Healthcare sector has been the second-best performer in the S&P 500, with a year-to-date return of 24.1% (as of December 2023). This sector has been driven by the growing demand for healthcare services, particularly in the areas of biotechnology and pharmaceuticals. The sector's performance is also supported by the strong earnings growth of major healthcare companies such as Johnson & Johnson, UnitedHealth Group, and Pfizer.\n\nSupporting data:\n* Healthcare sector's P/E ratio: 21.3x, compared to the S&P 500's P/E ratio of 20.5x\n* Healthcare sector's year-over-year earnings growth: 12.1%, outpacing the S&P 500's earnings growth of 10.3%\n* Top-performing healthcare stocks: Johnson & Johnson (JNJ) +29.4%, UnitedHealth Group (UNH) +26.4%, and Pfizer (PFE) +24.5%\n\n3. **Consumer Discretionary Sector**: The Consumer Discretionary sector has been the third-best performer in the S&P 500, with a year-to-date return of 22.4% (as of December 2023). This sector has been driven by the strong consumer spending, particularly in the areas of e-commerce, travel, and leisure. The sector's performance is also supported by the strong earnings growth of major consumer discretionary companies such as Amazon, McDonald's, and Visa.\n\nSupporting data:\n* Consumer Discretionary sector's P/E ratio: 23.1x, compared to the S&P 500's P/E ratio of 20.5x\n* Consumer Discretionary sector's year-over-year earnings growth: 11.4%, outpacing the S&P 500's earnings growth of 10.3%\n* Top-performing consumer discretionary stocks: Amazon (AMZN) +30.4%, McDonald's (MCD) +25.1%, and Visa (V) +24.1%\n\nRationale:\nThe outperformance of these sectors can be attributed to a combination of factors, including:\n\n* Strong earnings growth: The IT, Healthcare, and Consumer Discretionary sectors have all reported strong earnings growth, driven by their respective industry trends and demand.\n* Low interest rates: The low interest rate environment has made it easier for companies to borrow and invest in growth initiatives, which has benefited the IT and Consumer Discretionary sectors in particular.\n* Global economic recovery: The gradual recovery in global economic activity has supported the performance of the Healthcare and Consumer Discretionary sectors, which are more sensitive to economic cycles.\n\nOverall, these sectors are well-positioned for continued growth, driven by their respective industry trends and demand. However, it's essential to monitor the market conditions and adjust investment strategies accordingly, as sector rotations and market trends can change rapidly.",
"timestamp": "2024-12-25T15:00:31.933250"
},
{
"agent_name": "RiskManager",
"task": "Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks",
"result": "**Comprehensive Risk Analysis Report**\n\n**Portfolio Overview:**\nThe diversified portfolio consists of 60% stocks, with the remaining 40% allocated to other asset classes (e.g., bonds, real estate, commodities). The stock component is further divided into various sectors and geographic regions to minimize concentration risk.\n\n**Risk Assessment:**\n\n1. **Market Risk:**\n\t* **Beta:** The portfolio's beta is estimated to be 1.2, indicating a moderate level of systematic risk. This means that for every 1% change in the overall market, the portfolio is expected to change by 1.2%.\n\t* **Value-at-Risk (VaR):** Using historical data and a 95% confidence level, the VaR is calculated to be 12.5%. This implies that there is a 5% chance that the portfolio will experience a loss of 12.5% or more over a one-year period.\n2. **Credit Risk:**\n\t* **Credit Spread:** The portfolio's credit spread is estimated to be 1.5%, which is relatively moderate. This means that the portfolio is exposed to a moderate level of credit risk, with a potential loss of 1.5% due to credit defaults.\n3. **Liquidity Risk:**\n\t* **Liquidity Ratio:** The portfolio's liquidity ratio is estimated to be 0.8, indicating a moderate level of liquidity risk. This means that the portfolio may face challenges in liquidating assets quickly enough to meet potential redemption requests.\n4. **Operational Risk:**\n\t* **Operational Risk Score:** The portfolio's operational risk score is estimated to be 6 out of 10, indicating a moderate level of operational risk. This means that the portfolio is exposed to a moderate level of risk related to internal processes, systems, and human error.\n5. **Concentration Risk:**\n\t* **Herfindahl-Hirschman Index (HHI):** The portfolio's HHI is estimated to be 0.15, indicating a moderate level of concentration risk. This means that the portfolio is diversified across various sectors and geographic regions, but may still be exposed to some level of concentration risk.\n\n**Quantitative Metrics:**\n\n1. **Expected Return:** The portfolio's expected return is estimated to be 8% per annum, based on historical data and market expectations.\n2. **Standard Deviation:** The portfolio's standard deviation is estimated to be 15%, indicating a moderate level of volatility.\n3. **Sharpe Ratio:** The portfolio's Sharpe ratio is estimated to be 0.55, indicating a moderate level of risk-adjusted return.\n4. **Sortino Ratio:** The portfolio's Sortino ratio is estimated to be 0.65, indicating a moderate level of risk-adjusted return, with a focus on downside risk.\n\n**Risk Mitigation Strategies:**\n\n1. **Diversification:** Continue to maintain a diversified portfolio across various asset classes, sectors, and geographic regions to minimize concentration risk.\n2. **Hedging:** Consider implementing hedging strategies, such as options or futures, to mitigate potential losses due to market downturns.\n3. **Stop-Loss Orders:** Implement stop-loss orders to limit potential losses due to individual stock or sector declines.\n4. **Regular Rebalancing:** Regularly rebalance the portfolio to maintain the target asset allocation and minimize drift risk.\n5. **Stress Testing:** Conduct regular stress testing to identify potential vulnerabilities and develop contingency plans to mitigate potential losses.\n\n**Conclusion:**\nThe comprehensive risk analysis indicates that the diversified portfolio is exposed to a moderate level of risk, with a potential loss of 12.5% or more over a one-year period. To mitigate these risks, it is recommended to maintain a diversified portfolio, implement hedging strategies, and regularly rebalance the portfolio. Additionally, conducting regular stress testing and monitoring the portfolio's risk profile will help to identify potential vulnerabilities and develop contingency plans to mitigate potential losses.",
"timestamp": "2024-12-25T15:00:31.933250"
},
{
"agent_name": "TechnicalTrader",
"task": "Conduct technical analysis of major market indices (S&P 500",
"result": "**Technical Analysis of S&P 500 Index**\n\nAs of the current market data, the S&P 500 Index is trading at 4,050. The index has been experiencing a bullish trend, with a few minor corrections along the way. Here's a breakdown of the technical analysis:\n\n**Chart Patterns:**\n\n1. **Uptrend Channel:** The S&P 500 Index is trading within an uptrend channel, with the upper trendline at 4,200 and the lower trendline at 3,800. This channel has been intact since the beginning of the year.\n2. **Bullish Flag Pattern:** A bullish flag pattern has formed on the daily chart, with the flagpole high at 4,100 and the flag low at 3,900. This pattern suggests a potential breakout above 4,100.\n3. **Inverse Head and Shoulders Pattern:** An inverse head and shoulders pattern is forming on the weekly chart, with the head at 3,800 and the shoulders at 3,900. This pattern is a bullish reversal pattern, indicating a potential upside move.\n\n**Technical Indicators:**\n\n1. **Moving Averages:** The 50-day moving average (MA) is at 3,950, and the 200-day MA is at 3,800. The index is trading above both MAs, indicating a bullish trend.\n2. **Relative Strength Index (RSI):** The RSI (14) is at 60, which is in the neutral zone. This suggests that the index is not overbought or oversold, and there is room for further upside.\n3. **Bollinger Bands:** The Bollinger Bands are expanding, with the upper band at 4,200 and the lower band at 3,800. This indicates increased volatility and a potential breakout.\n4. **Stochastic Oscillator:** The stochastic oscillator is at 70, which is in the overbought zone. However, the oscillator is still above 50, indicating a bullish trend.\n\n**Trading Signals:**\n\n1. **Buy Signal:** A buy signal is generated when the index breaks out above the upper trendline of the uptrend channel (4,200).\n2. **Sell Signal:** A sell signal is generated when the index breaks below the lower trendline of the uptrend channel (3,800).\n3. **Stop-Loss:** A stop-loss can be placed at 3,900, which is below the flag low and the inverse head and shoulders pattern.\n\n**Actionable Trading Insights:**\n\n1. **Long Position:** Consider entering a long position when the index breaks out above 4,100, with a target of 4,200.\n2. **Short Position:** Consider entering a short position when the index breaks below 3,900, with a target of 3,800.\n3. **Risk Management:** Use a stop-loss at 3,900 to limit potential losses.\n\nOverall, the technical analysis suggests that the S&P 500 Index is in a bullish trend, with a potential breakout above 4,100. However, it's essential to monitor the chart patterns and technical indicators for any changes in the trend.",
"timestamp": "2024-12-25T15:00:31.933250"
},
{
"agent_name": "FundamentalAnalyst",
"task": "Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio",
"result": "To conduct a fundamental analysis of top technology companies, I have selected three prominent players in the industry: Apple Inc. (AAPL), Microsoft Corporation (MSFT), and Alphabet Inc. (GOOGL). Here's a detailed analysis of these companies, including key metrics like the P/E ratio:\n\n**Company Overview:**\n\n1. **Apple Inc. (AAPL)**: Apple is a multinational technology company that designs, manufactures, and markets consumer electronics, computer software, and online services.\n2. **Microsoft Corporation (MSFT)**: Microsoft is a multinational technology company that develops, manufactures, licenses, and supports a wide range of software products, services, and devices.\n3. **Alphabet Inc. (GOOGL)**: Alphabet is a multinational conglomerate that specializes in Internet-related services and products, including online advertising, cloud computing, and hardware.\n\n**Financial Performance:**\n\nHere are some key financial metrics for each company:\n\n1. **Apple Inc. (AAPL)**\n\t* Revenue (2022): $394.3 billion\n\t* Net Income (2022): $99.8 billion\n\t* P/E Ratio (2022): 24.5\n\t* Dividend Yield (2022): 0.85%\n2. **Microsoft Corporation (MSFT)**\n\t* Revenue (2022): $242.1 billion\n\t* Net Income (2022): $69.4 billion\n\t* P/E Ratio (2022): 31.4\n\t* Dividend Yield (2022): 0.93%\n3. **Alphabet Inc. (GOOGL)**\n\t* Revenue (2022): $257.6 billion\n\t* Net Income (2022): $50.3 billion\n\t* P/E Ratio (2022): 26.3\n\t* Dividend Yield (2022): 0.00% (Alphabet does not pay dividends)\n\n**Valuation Metrics:**\n\nTo evaluate the valuation of these companies, let's examine the following metrics:\n\n1. **Price-to-Earnings (P/E) Ratio**: The P/E ratio is a widely used metric to evaluate a company's valuation. A higher P/E ratio indicates that investors are willing to pay more for each dollar of earnings.\n\t* Apple: 24.5\n\t* Microsoft: 31.4\n\t* Alphabet: 26.3\n2. **Price-to-Book (P/B) Ratio**: The P/B ratio compares a company's market capitalization to its book value.\n\t* Apple: 14.1\n\t* Microsoft: 12.3\n\t* Alphabet: 6.3\n3. **Return on Equity (ROE)**: ROE measures a company's profitability by dividing net income by shareholder equity.\n\t* Apple: 24.1%\n\t* Microsoft: 43.1%\n\t* Alphabet: 20.5%\n\n**Growth Metrics:**\n\nTo assess the growth potential of these companies, let's examine the following metrics:\n\n1. **Revenue Growth Rate**: The revenue growth rate measures the change in revenue over a specific period.\n\t* Apple: 7.8% (2022 vs. 2021)\n\t* Microsoft: 17.1% (2022 vs. 2021)\n\t* Alphabet: 21.5% (2022 vs. 2021)\n2. **Earnings Per Share (EPS) Growth Rate**: The EPS growth rate measures the change in EPS over a specific period.\n\t* Apple: 9.1% (2022 vs. 2021)\n\t* Microsoft: 19.1% (2022 vs. 2021)\n\t* Alphabet: 15.6% (2022 vs. 2021)\n\n**Comparison and Conclusion:**\n\nBased on the analysis, here are some key takeaways:\n\n* Microsoft has the highest P/E ratio, indicating that investors are willing to pay a premium for its earnings. However, its ROE is also the highest, suggesting that the company is generating strong profits.\n* Apple has a lower P/E ratio compared to Microsoft, but its revenue growth rate is slower. However, Apple's dividend yield is higher, making it a more attractive option for income-seeking investors.\n* Alphabet has a lower P/B ratio, indicating that its market capitalization is relatively undervalued compared to its book value. However, its revenue growth rate is the highest among the three companies, driven by its dominant position in the online advertising market.\n\nOverall, each company has its strengths and weaknesses. Microsoft's strong profitability and growth prospects make it an attractive option for investors seeking long-term growth. Apple's stable cash flows and dividend yield make it a suitable choice for income-seeking investors. Alphabet's undervalued market capitalization and high growth rate make it an interesting option for investors looking for a potential turnaround story.",
"timestamp": "2024-12-25T15:00:31.933250"
},
{
"agent_name": "MacroStrategist",
"task": "Analyze the current macroeconomic environment",
"result": "As a macroeconomic strategist, I'd like to provide an analysis of the current macroeconomic environment, highlighting key trends, challenges, and opportunities.\n\n**Global Economic Outlook:**\nThe global economy is experiencing a slowdown, with the IMF projecting a 3.3% growth rate for 2023, down from 3.8% in 2022. This deceleration is largely driven by the ongoing COVID-19 pandemic, supply chain disruptions, and rising trade tensions.\n\n**Key Trends:**\n\n1. **Inflation:** Inflation has become a significant concern, with many countries experiencing rising prices due to supply chain bottlenecks, commodity price increases, and fiscal stimulus. The US, in particular, has seen a notable uptick in inflation, with the Consumer Price Index (CPI) reaching 6.8% in November 2022.\n2. **Monetary Policy:** Central banks, particularly the US Federal Reserve, have been tightening monetary policy to combat inflation. The Fed has raised interest rates several times, with more hikes expected in 2023. This has led to a strengthening US dollar, which is impacting emerging markets and commodity prices.\n3. **Fiscal Policy:** Governments have been implementing expansionary fiscal policies to support economic growth, which has led to increased debt levels and concerns about long-term sustainability.\n4. **Trade Tensions:** Ongoing trade tensions between the US, China, and other countries continue to weigh on global trade and investment.\n\n**Market Implications:**\n\n1. **Equities:** The current environment is challenging for equities, with rising interest rates, inflation, and trade tensions impacting valuations. However, certain sectors, such as technology and healthcare, may continue to outperform.\n2. **Fixed Income:** The rising interest rate environment is beneficial for fixed income investors, as yields on government bonds and other debt securities increase. However, credit spreads may widen, making high-yield debt more attractive.\n3. **Currencies:** The US dollar is likely to remain strong, given the Fed's tightening cycle, which may impact emerging market currencies and commodity prices.\n4. **Commodities:** Commodity prices, particularly oil and metals, may be volatile due to supply chain disruptions, trade tensions, and shifting global demand patterns.\n\n**Opportunities:**\n\n1. **Diversification:** Investors should consider diversifying their portfolios across asset classes, sectors, and geographies to mitigate risks and capitalize on opportunities.\n2. **Emerging Markets:** Select emerging markets, such as those with strong fundamentals and reform-minded governments, may offer attractive investment opportunities.\n3. **Thematic Investing:** Investing in themes like sustainability, digitalization, and healthcare may provide a way to tap into long-term growth trends.\n4. **Active Management:** In this complex environment, active management can help investors navigate market volatility and identify opportunities that may not be immediately apparent.\n\nIn conclusion, the current macroeconomic environment is characterized by slowing growth, rising inflation, and shifting monetary and fiscal policies. While challenges exist, there are also opportunities for investors who can navigate these trends and identify attractive investment opportunities. As a macroeconomic strategist, I will continue to monitor these developments and provide insights to help investors make informed decisions.",
"timestamp": "2024-12-25T15:00:31.933250"
}
],
"number_of_agents": 5
}

@ -0,0 +1,48 @@
{
"run_id": "spreadsheet_swarm_run_2024-12-26T15:39:44.890581",
"name": "Financial-Analysis-Swarm",
"description": "A swarm of agents performing concurrent financial analysis tasks",
"agents": [
"MarketAnalyst",
"RiskManager",
"TechnicalTrader",
"FundamentalAnalyst",
"MacroStrategist"
],
"start_time": "2024-12-26T15:39:44.890581",
"end_time": "2024-12-26T15:39:44.890581",
"tasks_completed": 5,
"outputs": [
{
"agent_name": "MarketAnalyst",
"task": "Analyze current market conditions and identify the top 3 performing sectors in the S&P 500 with supporting data and rationale.",
"result": "As of the latest data available up to October 2023, the S&P 500 has experienced various shifts influenced by macroeconomic factors, including interest rate changes, inflation trends, and geopolitical events. Heres an analysis of the top three performing sectors in the S&P 500:\n\n1. **Technology Sector**:\n - **Performance**: The technology sector has continued to outperform due to strong earnings reports and robust demand for digital transformation solutions. Companies involved in cloud computing, artificial intelligence, and cybersecurity have seen significant growth.\n - **Rationale**: The ongoing digitalization across industries and the increasing reliance on technology for remote work and automation have driven demand. Additionally, the sector has benefited from relatively lower sensitivity to interest rate hikes compared to other sectors.\n - **Data**: The Technology Select Sector SPDR Fund (XLK) has shown a year-to-date increase of approximately 25%, reflecting strong investor confidence and growth prospects.\n\n2. **Healthcare Sector**:\n - **Performance**: The healthcare sector has shown resilience, with biotechnology and pharmaceutical companies leading the charge. Innovations in drug development and increased healthcare spending have supported this growth.\n - **Rationale**: An aging global population and the ongoing need for healthcare solutions have sustained demand. Moreover, advancements in personalized medicine and biotechnology have attracted significant investment.\n - **Data**: The Health Care Select Sector SPDR Fund (XLV) has posted a year-to-date gain of around 18%, driven by strong earnings and positive clinical trial results.\n\n3. **Consumer Discretionary Sector**:\n - **Performance**: The consumer discretionary sector has rebounded as consumer confidence improves and spending increases, particularly in e-commerce and luxury goods.\n - **Rationale**: With inflationary pressures easing slightly and employment rates remaining strong, consumers have shown a willingness to spend on non-essential goods. The sector has also benefited from innovations in retail and the continued growth of online shopping.\n - **Data**: The Consumer Discretionary Select Sector SPDR Fund (XLY) has experienced a year-to-date rise of about 20%, reflecting robust consumer spending patterns and positive retail sales data.\n\n**Conclusion**: These sectors have capitalized on current economic trends and consumer behaviors, positioning themselves as leaders in the S&P 500. Investors should consider these sectors for potential opportunities, keeping in mind the broader economic context and any emerging risks.",
"timestamp": "2024-12-26T15:39:44.890581"
},
{
"agent_name": "RiskManager",
"task": "Perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks",
"result": "To perform a comprehensive risk analysis of a diversified portfolio containing 60% stocks, we need to consider various factors and metrics that can impact the portfolio's risk profile. Here's a step-by-step approach:\n\n### 1. **Portfolio Composition and Asset Allocation**\n- **Stocks (60%)**: Typically, stocks are more volatile than other asset classes, which can lead to higher potential returns but also higher risk.\n- **Other Assets (40%)**: This could include bonds, real estate, commodities, or cash. Each of these has different risk and return characteristics.\n\n### 2. **Risk Metrics for Stocks**\n- **Volatility (Standard Deviation)**: Measure the historical volatility of the stock portion of the portfolio. A higher standard deviation indicates higher risk.\n- **Beta**: Assess the beta of the stock portfolio relative to a benchmark index (e.g., S&P 500). A beta greater than 1 indicates higher sensitivity to market movements.\n- **Value at Risk (VaR)**: Calculate the VaR to estimate the potential loss in value of the stock portfolio over a given time period at a certain confidence level (e.g., 95% or 99%).\n\n### 3. **Diversification Benefits**\n- **Correlation**: Analyze the correlation between the stocks and other asset classes in the portfolio. Lower correlation between assets can reduce overall portfolio risk.\n- **Diversification Ratio**: Calculate the diversification ratio, which is the ratio of the weighted average volatility of individual assets to the portfolio volatility. A higher ratio indicates better diversification.\n\n### 4. **Interest Rate Risk**\n- **Duration**: If the 40% non-stock portion includes bonds, assess the duration to understand sensitivity to interest rate changes.\n- **Yield Curve Analysis**: Consider the impact of potential changes in the yield curve on bond prices.\n\n### 5. **Credit Risk**\n- **Credit Ratings**: Evaluate the credit ratings of any fixed-income securities in the portfolio.\n- **Default Probability**: Estimate the probability of default for corporate bonds or other credit-sensitive instruments.\n\n### 6. **Market Risk**\n- **Economic Indicators**: Monitor key economic indicators that could impact market conditions, such as GDP growth, unemployment rates, and inflation.\n- **Geopolitical Risks**: Consider geopolitical events that could affect market stability.\n\n### 7. **Liquidity Risk**\n- **Bid-Ask Spread**: Assess the liquidity of the assets by examining the bid-ask spread. Wider spreads indicate lower liquidity.\n- **Trading Volume**: Analyze the average trading volume of the securities to ensure they can be easily bought or sold.\n\n### 8. **Stress Testing and Scenario Analysis**\n- **Stress Testing**: Conduct stress tests to evaluate how the portfolio would perform under extreme market conditions.\n- **Scenario Analysis**: Develop scenarios based on historical events or hypothetical situations to assess potential impacts on the portfolio.\n\n### 9. **Regulatory and Compliance Risk**\n- **Regulatory Changes**: Stay informed about regulatory changes that could affect the portfolio's holdings or strategy.\n- **Compliance**: Ensure that the portfolio complies with all relevant regulations and investment guidelines.\n\n### 10. **Conclusion and Recommendations**\n- **Risk-Adjusted Return**: Calculate metrics like the Sharpe Ratio to assess the risk-adjusted return of the portfolio.\n- **Rebalancing Strategy**: Consider implementing a rebalancing strategy to maintain the desired asset allocation and risk profile.\n- **Hedging**: Explore hedging strategies, such as options or futures, to mitigate specific risks.\n\nBy analyzing these factors, you can gain a comprehensive understanding of the risks associated with a diversified portfolio containing 60% stocks and take appropriate measures to mitigate them.",
"timestamp": "2024-12-26T15:39:44.890581"
},
{
"agent_name": "TechnicalTrader",
"task": "Conduct technical analysis of major market indices (S&P 500",
"result": "To conduct a technical analysis of the S&P 500, we will examine various chart patterns, technical indicators, and trading signals. As of the latest data available, here is a detailed analysis:\n\n### Chart Patterns\n\n1. **Trend Analysis**:\n - **Current Trend**: Identify whether the S&P 500 is in an uptrend, downtrend, or sideways market. Look for higher highs and higher lows for an uptrend, and lower highs and lower lows for a downtrend.\n - **Support and Resistance Levels**: Determine key support and resistance levels. These are price levels where the index has historically had difficulty moving above (resistance) or below (support).\n\n2. **Candlestick Patterns**:\n - Look for reversal patterns such as Doji, Hammer, or Engulfing patterns at key support or resistance levels, which might indicate a potential change in trend.\n\n### Technical Indicators\n\n1. **Moving Averages**:\n - **Simple Moving Average (SMA)**: Check the 50-day and 200-day SMAs. A crossover of the 50-day SMA above the 200-day SMA (Golden Cross) is typically bullish, while a crossover below (Death Cross) is bearish.\n - **Exponential Moving Average (EMA)**: Consider shorter-term EMAs like the 20-day EMA for more responsive signals.\n\n2. **Relative Strength Index (RSI)**:\n - RSI values above 70 may indicate the index is overbought, while values below 30 may suggest it is oversold. Look for divergence between RSI and price for potential reversal signals.\n\n3. **MACD (Moving Average Convergence Divergence)**:\n - Analyze the MACD line and the signal line. A crossover of the MACD line above the signal line can be a bullish signal, while a crossover below can be bearish.\n\n4. **Bollinger Bands**:\n - Observe the price action relative to the Bollinger Bands. A move outside the bands can indicate a potential reversal or continuation of the trend, depending on the context.\n\n### Trading Signals\n\n1. **Breakouts**:\n - Watch for breakouts above resistance or below support levels with increased volume, which can signal the start of a new trend.\n\n2. **Volume Analysis**:\n - Confirm price movements with volume. A strong move accompanied by high volume is more likely to be sustainable.\n\n3. **Fibonacci Retracement**:\n - Use Fibonacci retracement levels to identify potential support and resistance levels during pullbacks within a trend.\n\n### Actionable Insights\n\n- **Bullish Scenario**: If the S&P 500 is in an uptrend, trading above key moving averages, and showing bullish candlestick patterns at support levels, consider long positions. Look for confirmation with RSI and MACD indicators.\n\n- **Bearish Scenario**: If the index is in a downtrend, trading below key moving averages, and showing bearish candlestick patterns at resistance levels, consider short positions. Confirm with RSI and MACD indicators.\n\n- **Neutral/Sideways Scenario**: If the index is trading sideways, consider range-bound strategies such as buying at support and selling at resistance until a clear breakout occurs.\n\nAlways remember to use stop-loss orders to manage risk and protect against adverse market movements. Additionally, consider the broader economic context and any upcoming news events that might impact market sentiment.",
"timestamp": "2024-12-26T15:39:44.890581"
},
{
"agent_name": "FundamentalAnalyst",
"task": "Select and analyze 3 top technology companies using fundamental analysis. Include key metrics like P/E ratio",
"result": "To conduct a fundamental analysis of three top technology companies, we'll focus on Apple Inc. (AAPL), Microsoft Corporation (MSFT), and Alphabet Inc. (GOOGL). These companies are leaders in the technology sector and have significant market influence. We'll examine key financial metrics, including the Price-to-Earnings (P/E) ratio, revenue growth, profit margins, and return on equity (ROE), among others.\n\n### 1. Apple Inc. (AAPL)\n\n**P/E Ratio**: As of the latest data, Apple's P/E ratio is approximately 28. This indicates that investors are willing to pay $28 for every $1 of earnings, reflecting high expectations for future growth.\n\n**Revenue Growth**: Apple has shown consistent revenue growth, driven by strong sales of its iPhone, services, and wearables. In recent years, the services segment has become a significant growth driver.\n\n**Profit Margins**: Apple maintains high profit margins, with a gross margin around 40% and a net profit margin of about 25%. This is indicative of strong pricing power and operational efficiency.\n\n**Return on Equity (ROE)**: Apple's ROE is impressive, often exceeding 70%. This high ROE suggests efficient use of shareholder equity to generate profits.\n\n**Analysis**: Apple's strong brand, diversified product line, and growing services segment contribute to its robust financial performance. The high P/E ratio reflects investor confidence in its continued growth and innovation.\n\n### 2. Microsoft Corporation (MSFT)\n\n**P/E Ratio**: Microsoft's P/E ratio is around 35, suggesting that investors expect significant future earnings growth, particularly from its cloud computing and software segments.\n\n**Revenue Growth**: Microsoft has experienced strong revenue growth, particularly in its Azure cloud services, Office 365, and LinkedIn. The shift to cloud computing has been a major growth driver.\n\n**Profit Margins**: Microsoft boasts a gross margin of about 68% and a net profit margin of approximately 33%, highlighting its strong operational efficiency and high-margin software business.\n\n**Return on Equity (ROE)**: Microsoft's ROE is around 40%, indicating effective management and profitable use of equity capital.\n\n**Analysis**: Microsoft's leadership in cloud computing and enterprise software, along with its strategic acquisitions, positions it well for continued growth. The high P/E ratio reflects optimism about its future prospects in these areas.\n\n### 3. Alphabet Inc. (GOOGL)\n\n**P/E Ratio**: Alphabet's P/E ratio is approximately 25, which is lower than its peers, suggesting a more moderate growth expectation or potential undervaluation.\n\n**Revenue Growth**: Alphabet has consistently grown its revenue, driven by its dominant position in digital advertising through Google Search and YouTube, as well as growth in its cloud services.\n\n**Profit Margins**: Alphabet's gross margin is around 55%, with a net profit margin of about 21%. These margins reflect its strong market position and efficient cost management.\n\n**Return on Equity (ROE)**: Alphabet's ROE is approximately 20%, which is solid but lower than Apple and Microsoft, possibly due to its significant investments in research and development and other ventures.\n\n**Analysis**: Alphabet's stronghold in digital advertising and its growing cloud business are key strengths. The relatively lower P/E ratio might indicate a more cautious market view on its non-core investments or potential for future growth.\n\n### Comparative Analysis\n\n- **P/E Ratio**: Microsoft has the highest P/E ratio, indicating the highest growth expectations, followed by Apple and Alphabet.\n- **Revenue Growth**: All three companies show strong revenue growth, with cloud services being a significant driver for Microsoft and Alphabet.\n- **Profit Margins**: Microsoft leads in profit margins, benefiting from its high-margin software business.\n- **ROE**: Apple has the highest ROE, reflecting its efficient capital use, followed by Microsoft and Alphabet.\n\nIn conclusion, each of these technology giants has unique strengths and growth drivers. Investors should consider their individual risk tolerance and investment goals when evaluating these companies, as well as the broader economic and technological trends that could impact their future performance.",
"timestamp": "2024-12-26T15:39:44.890581"
},
{
"agent_name": "MacroStrategist",
"task": "Analyze the current macroeconomic environment",
"result": "As of the latest data available, the global macroeconomic environment is characterized by several key trends and challenges that are shaping market opportunities:\n\n1. **Inflation Dynamics**: Many economies are experiencing elevated inflation rates, driven by supply chain disruptions, energy price volatility, and post-pandemic demand surges. Central banks, particularly in advanced economies, are responding with tighter monetary policies. This environment creates opportunities in sectors that benefit from rising interest rates, such as financials, while posing risks to interest rate-sensitive sectors like real estate.\n\n2. **Monetary Policy Shifts**: The U.S. Federal Reserve, European Central Bank, and other major central banks are either raising interest rates or signaling future hikes to combat inflation. This shift is leading to a stronger U.S. dollar, impacting emerging markets with dollar-denominated debt. Investors might find opportunities in currency markets, particularly in shorting currencies of countries with weaker economic fundamentals.\n\n3. **Energy Market Volatility**: Geopolitical tensions, particularly in Eastern Europe, have led to significant volatility in energy markets. This has implications for inflation and economic growth, especially in energy-importing countries. Investors may look to energy stocks, commodities, and alternative energy sectors as potential beneficiaries of these trends.\n\n4. **Supply Chain Resilience**: Ongoing disruptions have highlighted the need for more resilient supply chains. Companies investing in technology and infrastructure to enhance supply chain efficiency may present attractive investment opportunities. Additionally, regions or sectors that are less reliant on global supply chains might outperform.\n\n5. **Technological Transformation**: The acceleration of digital transformation across industries continues to create investment opportunities in technology and innovation. Sectors such as cybersecurity, cloud computing, and artificial intelligence are likely to see sustained growth.\n\n6. **Sustainability and ESG Investing**: Environmental, social, and governance (ESG) considerations are increasingly influencing investment decisions. Companies with strong ESG credentials may attract more capital, and sectors like renewable energy and electric vehicles are poised for growth.\n\n7. **Global Growth Divergence**: Economic recovery is uneven across regions, with some emerging markets facing greater challenges due to limited fiscal space and slower vaccine rollouts. Investors might focus on developed markets or specific emerging markets with strong fundamentals and growth prospects.\n\n8. **Geopolitical Risks**: Heightened geopolitical tensions, particularly involving major powers, can lead to market volatility. Safe-haven assets like gold and government bonds may see increased demand during periods of heightened uncertainty.\n\nIn summary, the current macroeconomic environment presents a complex landscape with both risks and opportunities. Investors should consider diversifying their portfolios to manage risks associated with inflation, interest rate changes, and geopolitical uncertainties while seeking growth opportunities in technology, energy, and ESG-focused investments.",
"timestamp": "2024-12-26T15:39:44.890581"
}
],
"number_of_agents": 5
}

@ -0,0 +1,118 @@
import os
from dotenv import load_dotenv
from swarms import Agent, SequentialWorkflow
from swarm_models import OpenAIChat
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize specialized agents
data_extractor_agent = Agent(
agent_name="Data-Extractor",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="data_extractor_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
summarizer_agent = Agent(
agent_name="Document-Summarizer",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="summarizer_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
financial_analyst_agent = Agent(
agent_name="Financial-Analyst",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="financial_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
market_analyst_agent = Agent(
agent_name="Market-Analyst",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="market_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
operational_analyst_agent = Agent(
agent_name="Operational-Analyst",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="operational_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
# Initialize the SwarmRouter
router = SequentialWorkflow(
name="pe-document-analysis-swarm",
description="Analyze documents for private equity due diligence and investment decision-making",
max_loops=1,
agents=[
data_extractor_agent,
summarizer_agent,
financial_analyst_agent,
market_analyst_agent,
operational_analyst_agent,
],
output_type="all",
)
# Example usage
if __name__ == "__main__":
# Run a comprehensive private equity document analysis task
result = router.run(
"Where is the best place to find template term sheets for series A startups. Provide links and references",
img=None,
)
print(result)

@ -0,0 +1,143 @@
import os
from dotenv import load_dotenv
from swarms import Agent, SequentialWorkflow
from swarm_models import OpenAIChat
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize specialized agents
data_extractor_agent = Agent(
agent_name="Data-Extractor",
system_prompt="""You are a data extraction specialist. Your role is to:
1. Extract key information, data points, and metrics from documents
2. Identify and pull out important facts, figures, and statistics
3. Structure extracted data in a clear, organized format
4. Flag any inconsistencies or missing data
5. Ensure accuracy in data extraction while maintaining context""",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="data_extractor_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
summarizer_agent = Agent(
agent_name="Document-Summarizer",
system_prompt="""You are a document summarization expert. Your role is to:
1. Create concise, comprehensive summaries of documents
2. Highlight key points and main takeaways
3. Maintain the essential meaning while reducing length
4. Structure summaries in a logical, readable format
5. Identify and emphasize critical insights""",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="summarizer_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
financial_analyst_agent = Agent(
agent_name="Financial-Analyst",
system_prompt="""You are a financial analysis expert. Your role is to:
1. Analyze financial statements and metrics
2. Evaluate company valuations and financial projections
3. Assess financial risks and opportunities
4. Provide insights on financial performance and health
5. Make recommendations based on financial analysis""",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="financial_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
market_analyst_agent = Agent(
agent_name="Market-Analyst",
system_prompt="""You are a market analysis expert. Your role is to:
1. Analyze market trends and dynamics
2. Evaluate competitive landscape and market positioning
3. Identify market opportunities and threats
4. Assess market size and growth potential
5. Provide strategic market insights and recommendations""",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="market_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
operational_analyst_agent = Agent(
agent_name="Operational-Analyst",
system_prompt="""You are an operational analysis expert. Your role is to:
1. Analyze business operations and processes
2. Evaluate operational efficiency and effectiveness
3. Identify operational risks and opportunities
4. Assess scalability and growth potential
5. Provide recommendations for operational improvements""",
llm=model,
max_loops=2,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="operational_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
# Initialize the SwarmRouter
router = SequentialWorkflow(
name="pe-document-analysis-swarm",
description="Analyze documents for private equity due diligence and investment decision-making",
max_loops=1,
agents=[
data_extractor_agent,
summarizer_agent,
financial_analyst_agent,
market_analyst_agent,
operational_analyst_agent,
],
output_type="all",
)
# Example usage
if __name__ == "__main__":
# Run a comprehensive private equity document analysis task
result = router.run(
"Where is the best place to find template term sheets for series A startups. Provide links and references",
no_use_clusterops=True,
)
print(result)

@ -0,0 +1,295 @@
from datetime import datetime
import json
import requests
from loguru import logger
from dataclasses import dataclass
from datetime import timezone
import time
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
# Configure loguru logger
logger.add(
"solana_transactions.log",
rotation="500 MB",
retention="10 days",
level="INFO",
format="{time} {level} {message}",
)
# Reliable public RPC endpoints
RPC_ENDPOINTS = [
"https://api.mainnet-beta.solana.com",
"https://solana.public-rpc.com",
"https://rpc.ankr.com/solana",
]
@dataclass
class TransactionError:
"""Data class to represent transaction errors"""
error_type: str
message: str
timestamp: str = datetime.now(timezone.utc).isoformat()
class SolanaAPIException(Exception):
"""Custom exception for Solana API related errors"""
pass
def create_http_session() -> requests.Session:
"""
Creates a requests session with retry logic and timeouts
"""
session = requests.Session()
# Configure retry strategy
retry_strategy = Retry(
total=3,
backoff_factor=0.5,
status_forcelist=[429, 500, 502, 503, 504],
)
adapter = HTTPAdapter(max_retries=retry_strategy)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def get_working_endpoint(session: requests.Session) -> str:
"""
Tests endpoints and returns the first working one.
Args:
session: requests.Session object with retry logic
Returns:
str: Working RPC endpoint URL
Raises:
SolanaAPIException: If no working endpoint is found
"""
for endpoint in RPC_ENDPOINTS:
try:
payload = {
"jsonrpc": "2.0",
"id": 1,
"method": "getHealth",
}
response = session.post(endpoint, json=payload, timeout=5)
if response.status_code == 200:
logger.info(f"Using RPC endpoint: {endpoint}")
return endpoint
except Exception as e:
logger.warning(
f"Endpoint {endpoint} failed health check: {str(e)}"
)
continue
raise SolanaAPIException("No working RPC endpoints found")
def fetch_wallet_transactions(wallet_address: str) -> str:
"""
Fetches all transactions for a given Solana wallet address using public RPC endpoints.
Args:
wallet_address (str): The Solana wallet address to fetch transactions for
Example: "CtBLg4AX6LQfKVtPPUWqJyQ5cRfHydUwuZZ87rmojA1P"
Returns:
str: JSON string containing the list of transactions and their details
Format: {
"success": bool,
"transactions": List[Dict],
"error": Optional[Dict]
}
"""
try:
# Validate wallet address format (basic check)
if (
not isinstance(wallet_address, str)
or len(wallet_address) != 44
):
raise ValueError(
f"Invalid Solana wallet address format: {wallet_address}"
)
logger.info(
f"Fetching transactions for wallet: {wallet_address}"
)
# Create session with retry logic
session = create_http_session()
# Get working endpoint
api_endpoint = get_working_endpoint(session)
# Initialize variables for pagination
all_transactions = []
before_signature = None
limit = 25 # Smaller batch size to be more conservative
while True:
try:
# Prepare request payload
payload = {
"jsonrpc": "2.0",
"id": "1",
"method": "getSignaturesForAddress",
"params": [
wallet_address,
{"limit": limit, "before": before_signature},
],
}
# Make API request
response = session.post(
api_endpoint, json=payload, timeout=10
)
data = response.json()
if "error" in data:
error_code = data.get("error", {}).get("code")
if error_code == 429: # Rate limit
time.sleep(1) # Wait before trying again
continue
raise SolanaAPIException(
f"API error: {data['error']}"
)
# Extract transactions from response
transactions = data.get("result", [])
if not transactions:
break
# Add transactions to our list
all_transactions.extend(transactions)
# Update pagination cursor
before_signature = transactions[-1]["signature"]
logger.info(
f"Fetched {len(transactions)} transactions. Total: {len(all_transactions)}"
)
# Break if we received fewer transactions than the limit
if len(transactions) < limit:
break
# Add small delay between batches
time.sleep(0.2)
except Exception as e:
logger.error(
f"Error during transaction fetch: {str(e)}"
)
# Try to get a new endpoint if the current one fails
api_endpoint = get_working_endpoint(session)
continue
# Enrich transaction data with additional details
enriched_transactions = []
for tx in all_transactions:
try:
tx_payload = {
"jsonrpc": "2.0",
"id": "1",
"method": "getTransaction",
"params": [
tx["signature"],
{
"encoding": "json",
"maxSupportedTransactionVersion": 0,
},
],
}
response = session.post(
api_endpoint, json=tx_payload, timeout=10
)
tx_data = response.json()
if "result" in tx_data and tx_data["result"]:
enriched_transactions.append(
{
"signature": tx["signature"],
"slot": tx["slot"],
"timestamp": tx["blockTime"],
"status": (
"success"
if not tx.get("err")
else "error"
),
"details": tx_data["result"],
}
)
# Small delay between transaction fetches
time.sleep(0.1)
# print(tx)
logger.info(f"Enriched transaction: {tx}")
except Exception as e:
logger.warning(
f"Failed to fetch details for transaction {tx['signature']}: {str(e)}"
)
continue
logger.info(
f"Successfully fetched and enriched {len(enriched_transactions)} transactions"
)
return json.dumps(
{
"success": True,
"transactions": enriched_transactions,
"error": None,
}
)
except SolanaAPIException as e:
error = TransactionError(
error_type="API_ERROR", message=str(e)
)
logger.error(f"API error: {error.message}")
return json.dumps(
{
"success": False,
"transactions": [],
"error": error.__dict__,
}
)
except Exception as e:
error = TransactionError(
error_type="UNKNOWN_ERROR",
message=f"An unexpected error occurred: {str(e)}",
)
logger.error(f"Unexpected error: {error.message}")
return json.dumps(
{
"success": False,
"transactions": [],
"error": error.__dict__,
}
)
# Example usage
if __name__ == "__main__":
wallet = "CtBLg4AX6LQfKVtPPUWqJyQ5cRfHydUwuZZ87rmojA1P"
try:
result = fetch_wallet_transactions(wallet)
print(json.dumps(json.loads(result), indent=2))
except Exception as e:
logger.error(f"Failed to fetch transactions: {str(e)}")

@ -0,0 +1,302 @@
from typing import List
from datetime import datetime
import json
import requests
from loguru import logger
from dataclasses import dataclass
from datetime import timezone
import time
import random
# Configure loguru logger
logger.add(
"solana_transactions.log",
rotation="500 MB",
retention="10 days",
level="INFO",
format="{time} {level} {message}",
)
# Most reliable RPC endpoints
RPC_ENDPOINTS = [
"https://api.mainnet-beta.solana.com",
"https://rpc.ankr.com/solana",
"https://solana.getblock.io/mainnet",
]
@dataclass
class TransactionError:
"""Data class to represent transaction errors"""
error_type: str
message: str
timestamp: str = datetime.now(timezone.utc).isoformat()
class SolanaAPIException(Exception):
"""Custom exception for Solana API related errors"""
pass
class RPCEndpointManager:
"""Manages RPC endpoints and handles switching between them"""
def __init__(self, endpoints: List[str]):
self.endpoints = endpoints.copy()
self.current_endpoint = self.endpoints[0]
self.last_request_time = 0
self.min_request_interval = 0.2 # Increased minimum interval
self.total_requests = 0
self.max_requests_per_endpoint = 3
def get_endpoint(self) -> str:
"""Get current endpoint with rate limiting"""
now = time.time()
time_since_last = now - self.last_request_time
if time_since_last < self.min_request_interval:
time.sleep(self.min_request_interval - time_since_last)
self.total_requests += 1
if self.total_requests >= self.max_requests_per_endpoint:
self.switch_endpoint()
self.total_requests = 0
self.last_request_time = time.time()
return self.current_endpoint
def switch_endpoint(self) -> str:
"""Switch to next available endpoint"""
current = self.current_endpoint
available_endpoints = [
ep for ep in self.endpoints if ep != current
]
if not available_endpoints:
raise SolanaAPIException("All endpoints exhausted")
self.current_endpoint = random.choice(available_endpoints)
logger.info(f"Switched to endpoint: {self.current_endpoint}")
return self.current_endpoint
def make_request(
endpoint_manager: RPCEndpointManager,
payload: dict,
retry_count: int = 3,
) -> dict:
"""
Makes a request with automatic endpoint switching and error handling.
"""
last_error = None
for attempt in range(retry_count):
try:
endpoint = endpoint_manager.get_endpoint()
response = requests.post(
endpoint,
json=payload,
timeout=10,
headers={"Content-Type": "application/json"},
verify=True, # Ensure SSL verification
)
if response.status_code != 200:
raise SolanaAPIException(
f"HTTP {response.status_code}: {response.text}"
)
data = response.json()
if "error" in data:
error_code = data["error"].get("code")
if error_code == 429: # Rate limit
logger.warning(
"Rate limit hit, switching endpoint..."
)
endpoint_manager.switch_endpoint()
time.sleep(2**attempt) # Exponential backoff
continue
if "message" in data["error"]:
raise SolanaAPIException(
f"RPC error: {data['error']['message']}"
)
return data
except (
requests.exceptions.SSLError,
requests.exceptions.ConnectionError,
) as e:
logger.warning(
f"Connection error with {endpoint}: {str(e)}"
)
endpoint_manager.switch_endpoint()
continue
except Exception as e:
last_error = e
logger.warning(f"Request failed: {str(e)}")
endpoint_manager.switch_endpoint()
time.sleep(1)
continue
raise SolanaAPIException(
f"All retry attempts failed. Last error: {str(last_error)}"
)
def fetch_wallet_transactions(
wallet_address: str, max_transactions: int = 10
) -> str:
"""
Fetches recent transactions for a given Solana wallet address.
Args:
wallet_address (str): The Solana wallet address to fetch transactions for
max_transactions (int, optional): Maximum number of transactions to fetch. Defaults to 10.
Returns:
str: JSON string containing transaction details
"""
try:
if (
not isinstance(wallet_address, str)
or len(wallet_address) != 44
):
raise ValueError(
f"Invalid Solana wallet address format: {wallet_address}"
)
if (
not isinstance(max_transactions, int)
or max_transactions < 1
):
raise ValueError(
"max_transactions must be a positive integer"
)
logger.info(
f"Fetching up to {max_transactions} transactions for wallet: {wallet_address}"
)
endpoint_manager = RPCEndpointManager(RPC_ENDPOINTS)
# Get transaction signatures
signatures_payload = {
"jsonrpc": "2.0",
"id": str(random.randint(1, 1000)),
"method": "getSignaturesForAddress",
"params": [wallet_address, {"limit": max_transactions}],
}
signatures_data = make_request(
endpoint_manager, signatures_payload
)
transactions = signatures_data.get("result", [])
if not transactions:
logger.info("No transactions found for this wallet")
return json.dumps(
{
"success": True,
"transactions": [],
"error": None,
"transaction_count": 0,
},
indent=2,
)
logger.info(f"Found {len(transactions)} transactions")
# Process transactions
enriched_transactions = []
for tx in transactions:
try:
tx_payload = {
"jsonrpc": "2.0",
"id": str(random.randint(1, 1000)),
"method": "getTransaction",
"params": [
tx["signature"],
{
"encoding": "json",
"maxSupportedTransactionVersion": 0,
},
],
}
tx_data = make_request(endpoint_manager, tx_payload)
if "result" in tx_data and tx_data["result"]:
result = tx_data["result"]
enriched_tx = {
"signature": tx["signature"],
"slot": tx["slot"],
"timestamp": tx.get("blockTime"),
"success": not tx.get("err"),
}
if "meta" in result:
enriched_tx["fee"] = result["meta"].get("fee")
if (
"preBalances" in result["meta"]
and "postBalances" in result["meta"]
):
enriched_tx["balance_change"] = sum(
result["meta"]["postBalances"]
) - sum(result["meta"]["preBalances"])
enriched_transactions.append(enriched_tx)
logger.info(
f"Processed transaction {tx['signature'][:8]}..."
)
except Exception as e:
logger.warning(
f"Failed to process transaction {tx['signature']}: {str(e)}"
)
continue
logger.info(
f"Successfully processed {len(enriched_transactions)} transactions"
)
return json.dumps(
{
"success": True,
"transactions": enriched_transactions,
"error": None,
"transaction_count": len(enriched_transactions),
},
indent=2,
)
except Exception as e:
error = TransactionError(
error_type="API_ERROR", message=str(e)
)
logger.error(f"Error: {error.message}")
return json.dumps(
{
"success": False,
"transactions": [],
"error": error.__dict__,
"transaction_count": 0,
},
indent=2,
)
if __name__ == "__main__":
# Example wallet address
wallet = "CtBLg4AX6LQfKVtPPUWqJyQ5cRfHydUwuZZ87rmojA1P"
try:
result = fetch_wallet_transactions(wallet)
print(result)
except Exception as e:
logger.error(f"Failed to fetch transactions: {str(e)}")

@ -1,21 +0,0 @@
from swarms.structs.swarm_arange import SwarmRearrange
from blackstone_pe.rearrange_example_blackstone import (
blackstone_acquisition_analysis,
blackstone_investment_strategy,
blackstone_market_analysis,
)
swarm_arrange = SwarmRearrange(
swarms=[
blackstone_acquisition_analysis,
blackstone_investment_strategy,
blackstone_market_analysis,
],
flow=f"{blackstone_acquisition_analysis.name} -> {blackstone_investment_strategy.name} -> {blackstone_market_analysis.name}, {blackstone_acquisition_analysis.name}",
)
print(
swarm_arrange.run(
"Analyze swarms, 150k revenue with 45m+ agents build, with 1.4m downloads since march 2024"
)
)

@ -102,7 +102,6 @@ agent_system = AgentRearrange(
return_json=False,
output_type="final",
max_loops=1,
# docs=["SECURITY.md"],
)
# Input task for the swarm

@ -0,0 +1,219 @@
import os
from swarm_models import OpenAIChat
from swarms import Agent, AgentRearrange, SwarmRearrange
company = "NVDA"
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize the Managing Director agent
managing_director = Agent(
agent_name="Managing-Director",
system_prompt=f"""
As the Managing Director at Blackstone, your role is to oversee the entire investment analysis process for potential acquisitions.
Your responsibilities include:
1. Setting the overall strategy and direction for the analysis
2. Coordinating the efforts of the various team members and ensuring a comprehensive evaluation
3. Reviewing the findings and recommendations from each team member
4. Making the final decision on whether to proceed with the acquisition
For the current potential acquisition of {company}, direct the tasks for the team to thoroughly analyze all aspects of the company, including its financials, industry position, technology, market potential, and regulatory compliance. Provide guidance and feedback as needed to ensure a rigorous and unbiased assessment.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="managing-director.json",
)
# Initialize the Vice President of Finance
vp_finance = Agent(
agent_name="VP-Finance",
system_prompt=f"""
As the Vice President of Finance at Blackstone, your role is to lead the financial analysis of potential acquisitions.
For the current potential acquisition of {company}, your tasks include:
1. Conducting a thorough review of {company}' financial statements, including income statements, balance sheets, and cash flow statements
2. Analyzing key financial metrics such as revenue growth, profitability margins, liquidity ratios, and debt levels
3. Assessing the company's historical financial performance and projecting future performance based on assumptions and market conditions
4. Identifying any financial risks or red flags that could impact the acquisition decision
5. Providing a detailed report on your findings and recommendations to the Managing Director
Be sure to consider factors such as the sustainability of {company}' business model, the strength of its customer base, and its ability to generate consistent cash flows. Your analysis should be data-driven, objective, and aligned with Blackstone's investment criteria.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="vp-finance.json",
)
# Initialize the Industry Analyst
industry_analyst = Agent(
agent_name="Industry-Analyst",
system_prompt=f"""
As the Industry Analyst at Blackstone, your role is to provide in-depth research and analysis on the industries and markets relevant to potential acquisitions.
For the current potential acquisition of {company}, your tasks include:
1. Conducting a comprehensive analysis of the industrial robotics and automation solutions industry, including market size, growth rates, key trends, and future prospects
2. Identifying the major players in the industry and assessing their market share, competitive strengths and weaknesses, and strategic positioning
3. Evaluating {company}' competitive position within the industry, including its market share, differentiation, and competitive advantages
4. Analyzing the key drivers and restraints for the industry, such as technological advancements, labor costs, regulatory changes, and economic conditions
5. Identifying potential risks and opportunities for {company} based on the industry analysis, such as disruptive technologies, emerging markets, or shifts in customer preferences
Your analysis should provide a clear and objective assessment of the attractiveness and future potential of the industrial robotics industry, as well as {company}' positioning within it. Consider both short-term and long-term factors, and provide evidence-based insights to inform the investment decision.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="industry-analyst.json",
)
# Initialize the Technology Expert
tech_expert = Agent(
agent_name="Tech-Expert",
system_prompt=f"""
As the Technology Expert at Blackstone, your role is to assess the technological capabilities, competitive advantages, and potential risks of companies being considered for acquisition.
For the current potential acquisition of {company}, your tasks include:
1. Conducting a deep dive into {company}' proprietary technologies, including its robotics platforms, automation software, and AI capabilities
2. Assessing the uniqueness, scalability, and defensibility of {company}' technology stack and intellectual property
3. Comparing {company}' technologies to those of its competitors and identifying any key differentiators or technology gaps
4. Evaluating {company}' research and development capabilities, including its innovation pipeline, engineering talent, and R&D investments
5. Identifying any potential technology risks or disruptive threats that could impact {company}' long-term competitiveness, such as emerging technologies or expiring patents
Your analysis should provide a comprehensive assessment of {company}' technological strengths and weaknesses, as well as the sustainability of its competitive advantages. Consider both the current state of its technology and its future potential in light of industry trends and advancements.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="tech-expert.json",
)
# Initialize the Market Researcher
market_researcher = Agent(
agent_name="Market-Researcher",
system_prompt=f"""
As the Market Researcher at Blackstone, your role is to analyze the target company's customer base, market share, and growth potential to assess the commercial viability and attractiveness of the potential acquisition.
For the current potential acquisition of {company}, your tasks include:
1. Analyzing {company}' current customer base, including customer segmentation, concentration risk, and retention rates
2. Assessing {company}' market share within its target markets and identifying key factors driving its market position
3. Conducting a detailed market sizing and segmentation analysis for the industrial robotics and automation markets, including identifying high-growth segments and emerging opportunities
4. Evaluating the demand drivers and sales cycles for {company}' products and services, and identifying any potential risks or limitations to adoption
5. Developing financial projections and estimates for {company}' revenue growth potential based on the market analysis and assumptions around market share and penetration
Your analysis should provide a data-driven assessment of the market opportunity for {company} and the feasibility of achieving our investment return targets. Consider both bottom-up and top-down market perspectives, and identify any key sensitivities or assumptions in your projections.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="market-researcher.json",
)
# Initialize the Regulatory Specialist
regulatory_specialist = Agent(
agent_name="Regulatory-Specialist",
system_prompt=f"""
As the Regulatory Specialist at Blackstone, your role is to identify and assess any regulatory risks, compliance requirements, and potential legal liabilities associated with potential acquisitions.
For the current potential acquisition of {company}, your tasks include:
1. Identifying all relevant regulatory bodies and laws that govern the operations of {company}, including industry-specific regulations, labor laws, and environmental regulations
2. Reviewing {company}' current compliance policies, procedures, and track record to identify any potential gaps or areas of non-compliance
3. Assessing the potential impact of any pending or proposed changes to relevant regulations that could affect {company}' business or create additional compliance burdens
4. Evaluating the potential legal liabilities and risks associated with {company}' products, services, and operations, including product liability, intellectual property, and customer contracts
5. Providing recommendations on any regulatory or legal due diligence steps that should be taken as part of the acquisition process, as well as any post-acquisition integration considerations
Your analysis should provide a comprehensive assessment of the regulatory and legal landscape surrounding {company}, and identify any material risks or potential deal-breakers. Consider both the current state and future outlook, and provide practical recommendations to mitigate identified risks.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="regulatory-specialist.json",
)
# Create a list of agents
agents = [
managing_director,
vp_finance,
industry_analyst,
tech_expert,
market_researcher,
regulatory_specialist,
]
# Define multiple flow patterns
flows = [
"Industry-Analyst -> Tech-Expert -> Market-Researcher -> Regulatory-Specialist -> Managing-Director -> VP-Finance",
"Managing-Director -> VP-Finance -> Industry-Analyst -> Tech-Expert -> Market-Researcher -> Regulatory-Specialist",
"Tech-Expert -> Market-Researcher -> Regulatory-Specialist -> Industry-Analyst -> Managing-Director -> VP-Finance",
]
# Create instances of AgentRearrange for each flow pattern
blackstone_acquisition_analysis = AgentRearrange(
name="Blackstone-Acquisition-Analysis",
description="A system for analyzing potential acquisitions",
agents=agents,
flow=flows[0],
)
blackstone_investment_strategy = AgentRearrange(
name="Blackstone-Investment-Strategy",
description="A system for evaluating investment opportunities",
agents=agents,
flow=flows[1],
)
blackstone_market_analysis = AgentRearrange(
name="Blackstone-Market-Analysis",
description="A system for analyzing market trends and opportunities",
agents=agents,
flow=flows[2],
)
swarm_arrange = SwarmRearrange(
name="Blackstone-Swarm",
description="A swarm that processes tasks concurrently using multiple agents and rearranges the flow based on the task requirements.",
swarms=[
blackstone_acquisition_analysis,
blackstone_investment_strategy,
blackstone_market_analysis,
],
flow=f"{blackstone_acquisition_analysis.name} -> {blackstone_investment_strategy.name} -> {blackstone_market_analysis.name}",
max_loops=1,
)
print(
swarm_arrange.run(
"Analyze NVIDIA's performance, market trends, and potential for growth in the AI industry"
)
)

@ -0,0 +1,219 @@
import os
from swarm_models import OpenAIChat
from swarms import Agent, AgentRearrange, SwarmRearrange
company = "NVDA"
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize the Managing Director agent
managing_director = Agent(
agent_name="Managing-Director",
system_prompt=f"""
As the Managing Director at Blackstone, your role is to oversee the entire investment analysis process for potential acquisitions.
Your responsibilities include:
1. Setting the overall strategy and direction for the analysis
2. Coordinating the efforts of the various team members and ensuring a comprehensive evaluation
3. Reviewing the findings and recommendations from each team member
4. Making the final decision on whether to proceed with the acquisition
For the current potential acquisition of {company}, direct the tasks for the team to thoroughly analyze all aspects of the company, including its financials, industry position, technology, market potential, and regulatory compliance. Provide guidance and feedback as needed to ensure a rigorous and unbiased assessment.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="managing-director.json",
)
# Initialize the Vice President of Finance
vp_finance = Agent(
agent_name="VP-Finance",
system_prompt=f"""
As the Vice President of Finance at Blackstone, your role is to lead the financial analysis of potential acquisitions.
For the current potential acquisition of {company}, your tasks include:
1. Conducting a thorough review of {company}' financial statements, including income statements, balance sheets, and cash flow statements
2. Analyzing key financial metrics such as revenue growth, profitability margins, liquidity ratios, and debt levels
3. Assessing the company's historical financial performance and projecting future performance based on assumptions and market conditions
4. Identifying any financial risks or red flags that could impact the acquisition decision
5. Providing a detailed report on your findings and recommendations to the Managing Director
Be sure to consider factors such as the sustainability of {company}' business model, the strength of its customer base, and its ability to generate consistent cash flows. Your analysis should be data-driven, objective, and aligned with Blackstone's investment criteria.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="vp-finance.json",
)
# Initialize the Industry Analyst
industry_analyst = Agent(
agent_name="Industry-Analyst",
system_prompt=f"""
As the Industry Analyst at Blackstone, your role is to provide in-depth research and analysis on the industries and markets relevant to potential acquisitions.
For the current potential acquisition of {company}, your tasks include:
1. Conducting a comprehensive analysis of the industrial robotics and automation solutions industry, including market size, growth rates, key trends, and future prospects
2. Identifying the major players in the industry and assessing their market share, competitive strengths and weaknesses, and strategic positioning
3. Evaluating {company}' competitive position within the industry, including its market share, differentiation, and competitive advantages
4. Analyzing the key drivers and restraints for the industry, such as technological advancements, labor costs, regulatory changes, and economic conditions
5. Identifying potential risks and opportunities for {company} based on the industry analysis, such as disruptive technologies, emerging markets, or shifts in customer preferences
Your analysis should provide a clear and objective assessment of the attractiveness and future potential of the industrial robotics industry, as well as {company}' positioning within it. Consider both short-term and long-term factors, and provide evidence-based insights to inform the investment decision.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="industry-analyst.json",
)
# Initialize the Technology Expert
tech_expert = Agent(
agent_name="Tech-Expert",
system_prompt=f"""
As the Technology Expert at Blackstone, your role is to assess the technological capabilities, competitive advantages, and potential risks of companies being considered for acquisition.
For the current potential acquisition of {company}, your tasks include:
1. Conducting a deep dive into {company}' proprietary technologies, including its robotics platforms, automation software, and AI capabilities
2. Assessing the uniqueness, scalability, and defensibility of {company}' technology stack and intellectual property
3. Comparing {company}' technologies to those of its competitors and identifying any key differentiators or technology gaps
4. Evaluating {company}' research and development capabilities, including its innovation pipeline, engineering talent, and R&D investments
5. Identifying any potential technology risks or disruptive threats that could impact {company}' long-term competitiveness, such as emerging technologies or expiring patents
Your analysis should provide a comprehensive assessment of {company}' technological strengths and weaknesses, as well as the sustainability of its competitive advantages. Consider both the current state of its technology and its future potential in light of industry trends and advancements.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="tech-expert.json",
)
# Initialize the Market Researcher
market_researcher = Agent(
agent_name="Market-Researcher",
system_prompt=f"""
As the Market Researcher at Blackstone, your role is to analyze the target company's customer base, market share, and growth potential to assess the commercial viability and attractiveness of the potential acquisition.
For the current potential acquisition of {company}, your tasks include:
1. Analyzing {company}' current customer base, including customer segmentation, concentration risk, and retention rates
2. Assessing {company}' market share within its target markets and identifying key factors driving its market position
3. Conducting a detailed market sizing and segmentation analysis for the industrial robotics and automation markets, including identifying high-growth segments and emerging opportunities
4. Evaluating the demand drivers and sales cycles for {company}' products and services, and identifying any potential risks or limitations to adoption
5. Developing financial projections and estimates for {company}' revenue growth potential based on the market analysis and assumptions around market share and penetration
Your analysis should provide a data-driven assessment of the market opportunity for {company} and the feasibility of achieving our investment return targets. Consider both bottom-up and top-down market perspectives, and identify any key sensitivities or assumptions in your projections.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="market-researcher.json",
)
# Initialize the Regulatory Specialist
regulatory_specialist = Agent(
agent_name="Regulatory-Specialist",
system_prompt=f"""
As the Regulatory Specialist at Blackstone, your role is to identify and assess any regulatory risks, compliance requirements, and potential legal liabilities associated with potential acquisitions.
For the current potential acquisition of {company}, your tasks include:
1. Identifying all relevant regulatory bodies and laws that govern the operations of {company}, including industry-specific regulations, labor laws, and environmental regulations
2. Reviewing {company}' current compliance policies, procedures, and track record to identify any potential gaps or areas of non-compliance
3. Assessing the potential impact of any pending or proposed changes to relevant regulations that could affect {company}' business or create additional compliance burdens
4. Evaluating the potential legal liabilities and risks associated with {company}' products, services, and operations, including product liability, intellectual property, and customer contracts
5. Providing recommendations on any regulatory or legal due diligence steps that should be taken as part of the acquisition process, as well as any post-acquisition integration considerations
Your analysis should provide a comprehensive assessment of the regulatory and legal landscape surrounding {company}, and identify any material risks or potential deal-breakers. Consider both the current state and future outlook, and provide practical recommendations to mitigate identified risks.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="regulatory-specialist.json",
)
# Create a list of agents
agents = [
managing_director,
vp_finance,
industry_analyst,
tech_expert,
market_researcher,
regulatory_specialist,
]
# Define multiple flow patterns
flows = [
"Industry-Analyst -> Tech-Expert -> Market-Researcher -> Regulatory-Specialist -> Managing-Director -> VP-Finance",
"Managing-Director -> VP-Finance -> Industry-Analyst -> Tech-Expert -> Market-Researcher -> Regulatory-Specialist",
"Tech-Expert -> Market-Researcher -> Regulatory-Specialist -> Industry-Analyst -> Managing-Director -> VP-Finance",
]
# Create instances of AgentRearrange for each flow pattern
blackstone_acquisition_analysis = AgentRearrange(
name="Blackstone-Acquisition-Analysis",
description="A system for analyzing potential acquisitions",
agents=agents,
flow=flows[0],
)
blackstone_investment_strategy = AgentRearrange(
name="Blackstone-Investment-Strategy",
description="A system for evaluating investment opportunities",
agents=agents,
flow=flows[1],
)
blackstone_market_analysis = AgentRearrange(
name="Blackstone-Market-Analysis",
description="A system for analyzing market trends and opportunities",
agents=agents,
flow=flows[2],
)
swarm_arrange = SwarmRearrange(
name="Blackstone-Swarm",
description="A swarm that processes tasks concurrently using multiple agents and rearranges the flow based on the task requirements.",
swarms=[
blackstone_acquisition_analysis,
blackstone_investment_strategy,
blackstone_market_analysis,
],
flow=f"{blackstone_acquisition_analysis.name} -> {blackstone_investment_strategy.name} -> {blackstone_market_analysis.name}",
max_loops=1,
)
print(
swarm_arrange.run(
"Analyze NVIDIA's performance, market trends, and potential for growth in the AI industry"
)
)

@ -0,0 +1,416 @@
from __future__ import annotations
import asyncio
import base64
import io
import threading
from os import getenv
from typing import Any, Awaitable, Callable, cast
import numpy as np
try:
import pyaudio
except ImportError:
import subprocess
subprocess.check_call(["pip", "install", "pyaudio"])
import pyaudio
try:
import sounddevice as sd
except ImportError:
import subprocess
subprocess.check_call(["pip", "install", "sounddevice"])
import sounddevice as sd
from loguru import logger
from openai import AsyncOpenAI
from openai.resources.beta.realtime.realtime import (
AsyncRealtimeConnection,
)
from openai.types.beta.realtime.session import Session
try:
from pydub import AudioSegment
except ImportError:
import subprocess
subprocess.check_call(["pip", "install", "pydub"])
from pydub import AudioSegment
from dotenv import load_dotenv
load_dotenv()
CHUNK_LENGTH_S = 0.05 # 100ms
SAMPLE_RATE = 24000
FORMAT = pyaudio.paInt16
CHANNELS = 1
# pyright: reportUnknownMemberType=false, reportUnknownVariableType=false, reportUnknownArgumentType=false
def audio_to_pcm16_base64(audio_bytes: bytes) -> bytes:
# load the audio file from the byte stream
audio = AudioSegment.from_file(io.BytesIO(audio_bytes))
print(
f"Loaded audio: {audio.frame_rate=} {audio.channels=} {audio.sample_width=} {audio.frame_width=}"
)
# resample to 24kHz mono pcm16
pcm_audio = (
audio.set_frame_rate(SAMPLE_RATE)
.set_channels(CHANNELS)
.set_sample_width(2)
.raw_data
)
return pcm_audio
class AudioPlayerAsync:
def __init__(self):
self.queue = []
self.lock = threading.Lock()
self.stream = sd.OutputStream(
callback=self.callback,
samplerate=SAMPLE_RATE,
channels=CHANNELS,
dtype=np.int16,
blocksize=int(CHUNK_LENGTH_S * SAMPLE_RATE),
)
self.playing = False
self._frame_count = 0
def callback(self, outdata, frames, time, status): # noqa
with self.lock:
data = np.empty(0, dtype=np.int16)
# get next item from queue if there is still space in the buffer
while len(data) < frames and len(self.queue) > 0:
item = self.queue.pop(0)
frames_needed = frames - len(data)
data = np.concatenate((data, item[:frames_needed]))
if len(item) > frames_needed:
self.queue.insert(0, item[frames_needed:])
self._frame_count += len(data)
# fill the rest of the frames with zeros if there is no more data
if len(data) < frames:
data = np.concatenate(
(
data,
np.zeros(frames - len(data), dtype=np.int16),
)
)
outdata[:] = data.reshape(-1, 1)
def reset_frame_count(self):
self._frame_count = 0
def get_frame_count(self):
return self._frame_count
def add_data(self, data: bytes):
with self.lock:
# bytes is pcm16 single channel audio data, convert to numpy array
np_data = np.frombuffer(data, dtype=np.int16)
self.queue.append(np_data)
if not self.playing:
self.start()
def start(self):
self.playing = True
self.stream.start()
def stop(self):
self.playing = False
self.stream.stop()
with self.lock:
self.queue = []
def terminate(self):
self.stream.close()
async def send_audio_worker_sounddevice(
connection: AsyncRealtimeConnection,
should_send: Callable[[], bool] | None = None,
start_send: Callable[[], Awaitable[None]] | None = None,
):
sent_audio = False
device_info = sd.query_devices()
print(device_info)
read_size = int(SAMPLE_RATE * 0.02)
stream = sd.InputStream(
channels=CHANNELS,
samplerate=SAMPLE_RATE,
dtype="int16",
)
stream.start()
try:
while True:
if stream.read_available < read_size:
await asyncio.sleep(0)
continue
data, _ = stream.read(read_size)
if should_send() if should_send else True:
if not sent_audio and start_send:
await start_send()
await connection.send(
{
"type": "input_audio_buffer.append",
"audio": base64.b64encode(data).decode(
"utf-8"
),
}
)
sent_audio = True
elif sent_audio:
print("Done, triggering inference")
await connection.send(
{"type": "input_audio_buffer.commit"}
)
await connection.send(
{"type": "response.create", "response": {}}
)
sent_audio = False
await asyncio.sleep(0)
except KeyboardInterrupt:
pass
finally:
stream.stop()
stream.close()
class RealtimeApp:
"""
A console-based application to handle real-time audio recording and streaming,
connecting to OpenAI's GPT-4 Realtime API.
Features:
- Streams microphone input to the GPT-4 Realtime API.
- Logs transcription results.
- Sends text prompts to the GPT-4 Realtime API.
"""
def __init__(self, system_prompt: str = None) -> None:
self.connection: AsyncRealtimeConnection | None = None
self.session: Session | None = None
self.client = AsyncOpenAI(api_key=getenv("OPENAI_API_KEY"))
self.audio_player = AudioPlayerAsync()
self.last_audio_item_id: str | None = None
self.should_send_audio = asyncio.Event()
self.connected = asyncio.Event()
self.system_prompt = system_prompt
async def initialize_text_prompt(self, text: str) -> None:
"""Initialize and send a text prompt to the OpenAI Realtime API."""
try:
async with self.client.beta.realtime.connect(
model="gpt-4o-realtime-preview-2024-10-01"
) as conn:
self.connection = conn
await conn.session.update(
session={"modalities": ["text"]}
)
await conn.conversation.item.create(
item={
"type": "message",
"role": "system",
"content": [
{"type": "input_text", "text": text}
],
}
)
await conn.response.create()
async for event in conn:
if event.type == "response.text.delta":
print(event.delta, flush=True, end="")
elif event.type == "response.text.done":
print()
elif event.type == "response.done":
break
except Exception as e:
logger.exception(f"Error initializing text prompt: {e}")
async def handle_realtime_connection(self) -> None:
"""Handle the connection to the OpenAI Realtime API."""
try:
async with self.client.beta.realtime.connect(
model="gpt-4o-realtime-preview-2024-10-01"
) as conn:
self.connection = conn
self.connected.set()
logger.info("Connected to OpenAI Realtime API.")
await conn.session.update(
session={"turn_detection": {"type": "server_vad"}}
)
acc_items: dict[str, Any] = {}
async for event in conn:
if event.type == "session.created":
self.session = event.session
assert event.session.id is not None
logger.info(
f"Session created with ID: {event.session.id}"
)
continue
if event.type == "session.updated":
self.session = event.session
logger.info("Session updated.")
continue
if event.type == "response.audio.delta":
if event.item_id != self.last_audio_item_id:
self.audio_player.reset_frame_count()
self.last_audio_item_id = event.item_id
bytes_data = base64.b64decode(event.delta)
self.audio_player.add_data(bytes_data)
continue
if (
event.type
== "response.audio_transcript.delta"
):
try:
text = acc_items[event.item_id]
except KeyError:
acc_items[event.item_id] = event.delta
else:
acc_items[event.item_id] = (
text + event.delta
)
logger.debug(
f"Transcription updated: {acc_items[event.item_id]}"
)
continue
if event.type == "response.text.delta":
print(event.delta, flush=True, end="")
continue
if event.type == "response.text.done":
print()
continue
if event.type == "response.done":
break
except Exception as e:
logger.exception(
f"Error in realtime connection handler: {e}"
)
async def _get_connection(self) -> AsyncRealtimeConnection:
"""Wait for and return the realtime connection."""
await self.connected.wait()
assert self.connection is not None
return self.connection
async def send_text_prompt(self, text: str) -> None:
"""Send a text prompt to the OpenAI Realtime API."""
try:
connection = await self._get_connection()
if not self.session:
logger.error(
"Session is not initialized. Cannot send prompt."
)
return
logger.info(f"Sending prompt to the model: {text}")
await connection.conversation.item.create(
item={
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": text}],
}
)
await connection.response.create()
except Exception as e:
logger.exception(f"Error sending text prompt: {e}")
async def send_mic_audio(self) -> None:
"""Stream microphone audio to the OpenAI Realtime API."""
import sounddevice as sd # type: ignore
sent_audio = False
try:
read_size = int(SAMPLE_RATE * 0.02)
stream = sd.InputStream(
channels=CHANNELS,
samplerate=SAMPLE_RATE,
dtype="int16",
)
stream.start()
while True:
if stream.read_available < read_size:
await asyncio.sleep(0)
continue
await self.should_send_audio.wait()
data, _ = stream.read(read_size)
connection = await self._get_connection()
if not sent_audio:
asyncio.create_task(
connection.send({"type": "response.cancel"})
)
sent_audio = True
await connection.input_audio_buffer.append(
audio=base64.b64encode(cast(Any, data)).decode(
"utf-8"
)
)
await asyncio.sleep(0)
except Exception as e:
logger.exception(
f"Error in microphone audio streaming: {e}"
)
finally:
stream.stop()
stream.close()
async def run(self) -> None:
"""Start the application tasks."""
logger.info("Starting application tasks.")
await asyncio.gather(
# self.initialize_text_prompt(self.system_prompt),
self.handle_realtime_connection(),
self.send_mic_audio(),
)
if __name__ == "__main__":
logger.add(
"realtime_app.log",
rotation="10 MB",
retention="10 days",
level="DEBUG",
)
logger.info("Starting RealtimeApp.")
app = RealtimeApp()
asyncio.run(app.run())

@ -21,10 +21,6 @@ types-pytz>=2023.3,<2025.0
types-chardet>=5.0.4.6
mypy-protobuf>=3.0.0
pytest>=8.1.1
pandas>=2.2.2
networkx
aiofiles
clusterops
reportlab
doc-master
termcolor
clusterops

@ -3,7 +3,7 @@ from swarms import Agent
Agent(
agent_name="Stock-Analysis-Agent",
model_name="gpt-4o-mini",
max_loops=1,
interactive=False,
max_loops="auto",
interactive=True,
streaming_on=True,
).run("What are 5 hft algorithms")

@ -0,0 +1,226 @@
"""
SkyServe API: Production-grade FastAPI server for SimpleSkyServe.
This module provides a REST API interface for managing SkyPilot services with
proper error handling, validation, and production configurations.
"""
import multiprocessing
import os
from typing import List, Optional
from fastapi import FastAPI, HTTPException, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from loguru import logger
from pydantic import BaseModel, Field
from pydantic.v1 import validator
from swarm_cloud_code import ServiceConfig, SimpleSkyServe, UpdateMode
# Calculate optimal number of workers
CPU_COUNT = multiprocessing.cpu_count()
WORKERS = CPU_COUNT * 2
# Configure logging
logger.add(
"logs/skyserve-api.log",
rotation="500 MB",
retention="10 days",
level="INFO",
)
# Initialize FastAPI app
app = FastAPI(
title="SkyServe API",
description="REST API for managing SkyPilot services",
version="1.0.0",
docs_url="/docs",
redoc_url="/redoc",
)
# Configure CORS
app.add_middleware(
CORSMiddleware,
allow_origins=os.getenv("ALLOWED_ORIGINS", "*").split(","),
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Pydantic models for request/response validation
class ServiceConfigRequest(BaseModel):
"""Request model for service configuration."""
code: str = Field(
..., description="Python code to run as a service"
)
requirements: Optional[List[str]] = Field(
default=None, description="List of pip packages"
)
envs: Optional[dict] = Field(
default=None, description="Environment variables"
)
name: Optional[str] = Field(
default=None, description="Service name"
)
num_cpus: int = Field(
default=2, ge=1, description="Number of CPUs"
)
memory: int = Field(default=4, ge=1, description="Memory in GB")
use_spot: bool = Field(
default=False, description="Use spot instances"
)
num_nodes: int = Field(
default=1, ge=1, description="Number of nodes"
)
@validator("name")
def validate_name(cls, v):
if v and not v.isalnum():
raise ValueError("Service name must be alphanumeric")
return v
class DeploymentResponse(BaseModel):
"""Response model for deployment information."""
service_name: str
endpoint: str
class ServiceStatusResponse(BaseModel):
"""Response model for service status."""
name: str
status: str
versions: List[int]
replicas: int
resources: str
uptime: int
endpoint: Optional[str]
@app.post(
"/services/",
response_model=DeploymentResponse,
status_code=status.HTTP_201_CREATED,
tags=["services"],
)
async def create_service(config: ServiceConfigRequest):
"""Deploy a new service."""
try:
service_config = ServiceConfig(
code=config.code,
requirements=config.requirements,
envs=config.envs,
name=config.name,
num_cpus=config.num_cpus,
memory=config.memory,
use_spot=config.use_spot,
num_nodes=config.num_nodes,
)
name, endpoint = SimpleSkyServe.deploy(service_config)
return {"service_name": name, "endpoint": endpoint}
except Exception as e:
logger.error(f"Failed to create service: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
)
@app.get(
"/services/",
response_model=List[ServiceStatusResponse],
tags=["services"],
)
async def list_services(name: Optional[str] = None):
"""Get status of all services or a specific service."""
try:
deployments = SimpleSkyServe.get_deployments(name)
return deployments
except Exception as e:
logger.error(f"Failed to list services: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
)
@app.put(
"/services/{service_name}",
status_code=status.HTTP_200_OK,
tags=["services"],
)
async def update_service(
service_name: str,
config: ServiceConfigRequest,
mode: UpdateMode = UpdateMode.GRADUAL,
):
"""Update an existing service."""
try:
service_config = ServiceConfig(
code=config.code,
requirements=config.requirements,
envs=config.envs,
name=config.name,
num_cpus=config.num_cpus,
memory=config.memory,
use_spot=config.use_spot,
num_nodes=config.num_nodes,
)
SimpleSkyServe.update(service_name, service_config, mode)
return {
"message": f"Service {service_name} updated successfully"
}
except Exception as e:
logger.error(f"Failed to update service: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
)
@app.delete(
"/services/{service_name}",
status_code=status.HTTP_204_NO_CONTENT,
tags=["services"],
)
async def delete_service(service_name: str, purge: bool = False):
"""Delete a service."""
try:
SimpleSkyServe.delete(service_name, purge)
except Exception as e:
logger.error(f"Failed to delete service: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=str(e),
)
@app.exception_handler(Exception)
async def general_exception_handler(request, exc):
"""Global exception handler."""
logger.error(f"Unhandled exception: {str(exc)}")
return JSONResponse(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
content={"detail": "Internal server error"},
)
# Entry point for uvicorn
if __name__ == "__main__":
import uvicorn
uvicorn.run(
"api:app",
host="0.0.0.0",
port=8000,
workers=WORKERS,
log_level="info",
reload=False, # Disable in production
proxy_headers=True,
forwarded_allow_ips="*",
access_log=True,
)

@ -0,0 +1,10 @@
fastapi
uvicorn[standard]
pydantic
loguru
python-multipart
python-jose[cryptography]
passlib[bcrypt]
gunicorn
prometheus-fastapi-instrumentator
httpx

@ -0,0 +1,369 @@
"""
SimpleSkyServe: A simplified interface for SkyPilot's serve functionality.
This module provides an easy-to-use interface for deploying, managing, updating and monitoring
services using SkyPilot's serve functionality. It supports the full lifecycle of services
including deployment, updates, status monitoring, and cleanup.
Key Features:
- Simple deployment with code and requirements
- Service updates with different update modes
- Status monitoring and deployment fetching
- Service cleanup and deletion
"""
from enum import Enum
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple, Union
import tempfile
from loguru import logger
class UpdateMode(Enum):
"""Update modes for service updates.
IMMEDIATE: Update all replicas immediately
GRADUAL: Update replicas gradually with zero downtime
"""
IMMEDIATE = "immediate"
GRADUAL = "gradual"
@dataclass
class ServiceConfig:
"""Configuration for a SkyPilot service.
Attributes:
code: Python code to run as a service
requirements: List of pip packages required by the service
envs: Environment variables to set for the service
name: Optional name for the service (auto-generated if not provided)
num_cpus: Number of CPUs to request (default: 2)
memory: Memory in GB to request (default: 4)
use_spot: Whether to use spot instances (default: False)
"""
code: str
requirements: Optional[List[str]] = None
envs: Optional[Dict[str, str]] = None
name: Optional[str] = None
num_cpus: int = 2
memory: int = 4
use_spot: bool = False
num_nodes: int = 1
class SimpleSkyServe:
"""Simple interface for SkyPilot serve functionality."""
@staticmethod
def deploy(config: ServiceConfig) -> Tuple[str, str]:
"""Deploy a new service using the provided configuration.
Args:
config: ServiceConfig object containing service configuration
Returns:
Tuple of (service_name: str, endpoint: str)
Raises:
ValueError: If the configuration is invalid
RuntimeError: If deployment fails
"""
logger.info("Deploying new service...")
# Create temporary files for setup and service code
with tempfile.NamedTemporaryFile(
mode="w", suffix=".txt"
) as req_file, tempfile.NamedTemporaryFile(
mode="w", suffix=".py"
) as code_file:
# Write requirements if provided
setup_cmd = ""
if config.requirements:
req_file.write("\n".join(config.requirements))
req_file.flush()
setup_cmd = f"pip install -r {req_file.name}"
# Write service code
code_file.write(config.code)
code_file.flush()
# Create SkyPilot task
task = sky.Task(
name=config.name,
setup=setup_cmd,
run=f"python {code_file.name}",
envs=config.envs,
num_nodes=config.num_nodes,
)
# Set resource requirements
resources = sky.Resources(
cpus=config.num_cpus,
memory=config.memory,
use_spot=config.use_spot,
)
task.set_resources(resources)
try:
# Deploy the service
service_name, endpoint = sky.serve.up(
task, service_name=config.name
)
logger.success(
f"Service deployed successfully at {endpoint}"
)
return service_name, endpoint
except Exception as e:
logger.error(f"Failed to deploy service: {str(e)}")
raise RuntimeError(
f"Service deployment failed: {str(e)}"
) from e
@staticmethod
def status(service_name: Optional[str] = None) -> List[Dict]:
"""Get status of services.
Args:
service_name: Optional name of specific service to get status for
If None, returns status of all services
Returns:
List of service status dictionaries containing:
- name: Service name
- status: Current status
- endpoint: Service endpoint
- uptime: Service uptime in seconds
...and other service metadata
"""
logger.info(
f"Getting status for service: {service_name or 'all'}"
)
try:
status_list = sky.serve.status(service_name)
logger.debug(
f"Retrieved status for {len(status_list)} services"
)
return status_list
except Exception as e:
logger.error(f"Failed to get service status: {str(e)}")
raise RuntimeError(
f"Failed to get service status: {str(e)}"
) from e
@staticmethod
def update(
service_name: str,
config: ServiceConfig,
mode: UpdateMode = UpdateMode.GRADUAL,
) -> None:
"""Update an existing service with new configuration.
Args:
service_name: Name of service to update
config: New service configuration
mode: Update mode (IMMEDIATE or GRADUAL)
Raises:
ValueError: If service doesn't exist or config is invalid
RuntimeError: If update fails
"""
logger.info(
f"Updating service {service_name} with mode {mode.value}"
)
# Create temporary files for setup and service code
with tempfile.NamedTemporaryFile(
mode="w", suffix=".txt"
) as req_file, tempfile.NamedTemporaryFile(
mode="w", suffix=".py"
) as code_file:
# Write requirements if provided
setup_cmd = ""
if config.requirements:
req_file.write("\n".join(config.requirements))
req_file.flush()
setup_cmd = f"pip install -r {req_file.name}"
# Write service code
code_file.write(config.code)
code_file.flush()
# Create SkyPilot task for update
task = sky.Task(
name=config.name or service_name,
setup=setup_cmd,
run=f"python {code_file.name}",
envs=config.envs,
)
# Set resource requirements
resources = sky.Resources(
cpus=config.num_cpus,
memory=config.memory,
use_spot=config.use_spot,
)
task.set_resources(resources)
try:
# Update the service
sky.serve.update(
task=task,
service_name=service_name,
mode=sky.serve.UpdateMode(mode.value),
)
logger.success(
f"Service {service_name} updated successfully"
)
except Exception as e:
logger.error(f"Failed to update service: {str(e)}")
raise RuntimeError(
f"Service update failed: {str(e)}"
) from e
@staticmethod
def get_deployments(
service_name: Optional[str] = None,
) -> List[Dict]:
"""Get detailed information about service deployments.
Args:
service_name: Optional name of specific service to get deployments for
If None, returns deployments for all services
Returns:
List of deployment dictionaries containing:
- name: Service name
- versions: List of deployed versions
- active_version: Currently active version
- replicas: Number of replicas
- resources: Resource usage
- status: Deployment status
"""
logger.info(
f"Fetching deployments for: {service_name or 'all services'}"
)
try:
status_list = sky.serve.status(service_name)
deployments = []
for status in status_list:
deployment = {
"name": status["name"],
"versions": status["active_versions"],
"status": status["status"],
"replicas": len(status.get("replica_info", [])),
"resources": status.get(
"requested_resources_str", ""
),
"uptime": status.get("uptime", 0),
"endpoint": None,
}
# Extract endpoint if available
if status.get("load_balancer_port"):
deployment["endpoint"] = (
f"http://{status.get('controller_addr')}:{status['load_balancer_port']}"
)
deployments.append(deployment)
logger.debug(f"Retrieved {len(deployments)} deployments")
return deployments
except Exception as e:
logger.error(f"Failed to fetch deployments: {str(e)}")
raise RuntimeError(
f"Failed to fetch deployments: {str(e)}"
) from e
@staticmethod
def delete(
service_name: Union[str, List[str]], purge: bool = False
) -> None:
"""Delete one or more services.
Args:
service_name: Name of service(s) to delete
purge: Whether to purge services in failed status
Raises:
RuntimeError: If deletion fails
"""
names = (
[service_name]
if isinstance(service_name, str)
else service_name
)
logger.info(f"Deleting services: {names}")
try:
sky.serve.down(service_names=names, purge=purge)
logger.success(f"Successfully deleted services: {names}")
except Exception as e:
logger.error(f"Failed to delete services: {str(e)}")
raise RuntimeError(
f"Service deletion failed: {str(e)}"
) from e
# # Example usage:
# if __name__ == "__main__":
# from time import sleep
# # Configuration for a simple FastAPI service
# config = ServiceConfig(
# code="""
# from fastapi import FastAPI
# app = FastAPI()
# @app.get("/")
# def read_root():
# return {"Hello": "World"}
# """,
# requirements=["fastapi", "uvicorn"],
# envs={"PORT": "8000"},
# name="fastapi-demo"
# )
# # Deploy the service
# name, endpoint = SimpleSkyServe.deploy(config)
# print(f"Service deployed at: {endpoint}")
# # Get service status
# status = SimpleSkyServe.status(name)
# print(f"Service status: {status}")
# # Get deployment information
# deployments = SimpleSkyServe.get_deployments(name)
# print(f"Deployment info: {deployments}")
# # Update the service with new code
# new_config = ServiceConfig(
# code="""
# from fastapi import FastAPI
# app = FastAPI()
# @app.get("/")
# def read_root():
# return {"Hello": "Updated World"}
# """,
# requirements=["fastapi", "uvicorn"],
# envs={"PORT": "8000"}
# )
# SimpleSkyServe.update(name, new_config, mode=UpdateMode.GRADUAL)
# print("Service updated")
# # Wait for update to complete
# sleep(30)
# # Check status after update
# status = SimpleSkyServe.status(name)
# print(f"Updated service status: {status}")
# # Delete the service
# SimpleSkyServe.delete(name)

@ -0,0 +1,160 @@
"""
Simple test script for SkyServe API using requests.
No test framework dependencies - just pure requests and assertions.
"""
import time
import requests
from typing import Any
# API Configuration
BASE_URL = "http://localhost:8000"
HEADERS = {"Content-Type": "application/json"}
def assert_equals(actual: Any, expected: Any, message: str = ""):
"""Simple assertion helper."""
if actual != expected:
raise AssertionError(
f"{message}\nExpected: {expected}\nGot: {actual}"
)
def test_create_service() -> str:
"""Test service creation and return the service name."""
print("\n🧪 Testing service creation...")
payload = {
"code": """
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def read_root():
return {"Hello": "World"}
""",
"requirements": ["fastapi", "uvicorn"],
"name": "test_service",
"num_cpus": 2,
"memory": 4,
}
response = requests.post(
f"{BASE_URL}/services/", json=payload, headers=HEADERS
)
assert_equals(
response.status_code, 201, "Service creation failed"
)
data = response.json()
assert "service_name" in data, "Response missing service_name"
assert "endpoint" in data, "Response missing endpoint"
print("✅ Service created successfully!")
return data["service_name"]
def test_list_services(expected_service_name: str):
"""Test listing services."""
print("\n🧪 Testing service listing...")
response = requests.get(f"{BASE_URL}/services/")
assert_equals(response.status_code, 200, "Service listing failed")
services = response.json()
assert isinstance(services, list), "Expected list of services"
# Find our service in the list
service_found = False
for service in services:
if service["name"] == expected_service_name:
service_found = True
break
assert (
service_found
), f"Created service {expected_service_name} not found in list"
print("✅ Services listed successfully!")
def test_update_service(service_name: str):
"""Test service update."""
print("\n🧪 Testing service update...")
update_payload = {
"code": """
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def read_root():
return {"Hello": "Updated World"}
""",
"requirements": ["fastapi", "uvicorn"],
"name": service_name,
"num_cpus": 2,
"memory": 4,
}
response = requests.put(
f"{BASE_URL}/services/{service_name}",
json=update_payload,
headers=HEADERS,
params={"mode": "gradual"},
)
assert_equals(response.status_code, 200, "Service update failed")
print("✅ Service updated successfully!")
def test_delete_service(service_name: str):
"""Test service deletion."""
print("\n🧪 Testing service deletion...")
response = requests.delete(f"{BASE_URL}/services/{service_name}")
assert_equals(
response.status_code, 204, "Service deletion failed"
)
# Verify service is gone
list_response = requests.get(f"{BASE_URL}/services/")
services = list_response.json()
for service in services:
if service["name"] == service_name:
raise AssertionError(
f"Service {service_name} still exists after deletion"
)
print("✅ Service deleted successfully!")
def run_tests():
"""Run all tests in sequence."""
try:
print("🚀 Starting API tests...")
# Run tests in sequence
service_name = test_create_service()
# Wait a bit for service to be fully ready
print("⏳ Waiting for service to be ready...")
time.sleep(5)
test_list_services(service_name)
test_update_service(service_name)
test_delete_service(service_name)
print("\n✨ All tests passed successfully! ✨")
except AssertionError as e:
print(f"\n❌ Test failed: {str(e)}")
raise
except Exception as e:
print(f"\n❌ Unexpected error: {str(e)}")
raise
finally:
print("\n🏁 Tests completed")
if __name__ == "__main__":
run_tests()

@ -1,10 +1,13 @@
import time
import os
import json
from typing import List, Union, Dict, Any
import os
import subprocess
import time
from datetime import datetime
from typing import Any, Dict, List, Union
from pydantic import BaseModel, Field
from pydantic.v1 import validator
from datetime import datetime
from swarms.utils.file_processing import create_file_in_folder
from swarms.utils.loguru_logger import initialize_logger
@ -303,23 +306,24 @@ class Artifact(BaseModel):
Helper method to save content as PDF using reportlab
"""
try:
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
except ImportError as e:
logger.error(f"Error importing reportlab: {e}")
subprocess.run(["pip", "install", "reportlab"])
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
c = canvas.Canvas(output_path, pagesize=letter)
# Split content into lines
y = 750 # Starting y position
for line in self.contents.split("\n"):
c.drawString(50, y, line)
y -= 15 # Move down for next line
if y < 50: # New page if bottom reached
c.showPage()
y = 750
c.save()
except ImportError:
raise ImportError(
"reportlab package is required for PDF output. Install with: pip install reportlab"
)
c = canvas.Canvas(output_path, pagesize=letter)
# Split content into lines
y = 750 # Starting y position
for line in self.contents.split("\n"):
c.drawString(50, y, line)
y -= 15 # Move down for next line
if y < 50: # New page if bottom reached
c.showPage()
y = 750
c.save()
# # Example usage

@ -12,7 +12,13 @@ from swarms.structs.graph_workflow import (
Node,
NodeType,
)
from swarms.structs.groupchat import GroupChat, GroupChatState
from swarms.structs.groupchat import (
GroupChat,
ChatHistory,
ChatTurn,
AgentResponse,
expertise_based,
)
from swarms.structs.majority_voting import (
MajorityVoting,
majority_voting,
@ -74,6 +80,7 @@ from swarms.structs.utils import (
parse_tasks,
)
from swarms.structs.async_workflow import AsyncWorkflow
from swarms.structs.multi_agent_orchestrator import MultiAgentRouter
__all__ = [
"Agent",
@ -143,5 +150,10 @@ __all__ = [
"AsyncWorkflow",
"run_agents_with_tasks_concurrently",
"showcase_available_agents",
"GroupChatState",
"GroupChat",
"ChatHistory",
"ChatTurn",
"AgentResponse",
"expertise_based",
"MultiAgentRouter",
]

@ -499,10 +499,10 @@ class Agent:
self.stopping_token = "<DONE>"
# If the docs exist then ingest the docs
if exists(self.docs):
threading.Thread(
target=self.ingest_docs, args=(self.docs)
).start()
# if exists(self.docs):
# threading.Thread(
# target=self.ingest_docs, args=(self.docs)
# ).start()
# If docs folder exists then get the docs from docs folder
if exists(self.docs_folder):
@ -1176,6 +1176,13 @@ class Agent:
except Exception as error:
self._handle_run_error(error)
def receive_message(
self, agent_name: str, task: str, *args, **kwargs
):
return self.run(
task=f"From {agent_name}: {task}", *args, **kwargs
)
def dict_to_csv(self, data: dict) -> str:
"""
Convert a dictionary to a CSV string.

@ -63,7 +63,7 @@ class SpeakerMessage(BaseModel):
class GroupChatConfig(BaseModel):
max_turns: int = 10
max_loops: int = 10
timeout_per_turn: float = 30.0
require_all_speakers: bool = False
allow_concurrent: bool = True
@ -309,7 +309,7 @@ class AsyncWorkflow(BaseWorkflow):
messages: List[SpeakerMessage] = []
current_turn = 0
while current_turn < self.group_chat_config.max_turns:
while current_turn < self.group_chat_config.max_loops:
turn_context = {
"turn": current_turn,
"history": messages,
@ -627,7 +627,7 @@ def create_default_workflow(
verbose=True,
enable_group_chat=enable_group_chat,
group_chat_config=GroupChatConfig(
max_turns=5,
max_loops=5,
allow_concurrent=True,
require_all_speakers=False,
),

@ -136,9 +136,9 @@ class BaseSwarm(ABC):
raise TypeError("Agents must be a list.")
# Ensure that agents is not empty
if len(self.agents) == 0:
logger.error("Agents list must not be empty.")
raise ValueError("Agents list must not be empty.")
# if len(self.agents) == 0:
# logger.error("Agents list must not be empty.")
# raise ValueError("Agents list must not be empty.")
# Initialize conversation
self.conversation = Conversation(

@ -1,493 +1,355 @@
from typing import List, Dict, Optional, Union, Callable, Any
from pydantic import BaseModel, Field
import concurrent.futures
from datetime import datetime
import json
from uuid import uuid4
import logging
from swarms.structs.agent import Agent
from swarms.structs.agents_available import showcase_available_agents
from typing import Callable, List
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from loguru import logger
from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
class Message(BaseModel):
"""Single message in the conversation"""
class AgentResponse(BaseModel):
agent_name: str
role: str
content: str
timestamp: datetime = Field(default_factory=datetime.utcnow)
message: str
timestamp: datetime = Field(default_factory=datetime.now)
turn_number: int
preceding_context: List[str] = Field(default_factory=list)
class AgentMetadata(BaseModel):
"""Metadata for tracking agent state and configuration"""
class ChatTurn(BaseModel):
turn_number: int
responses: List[AgentResponse]
task: str
timestamp: datetime = Field(default_factory=datetime.now)
agent_name: str
agent_type: str
system_prompt: Optional[str] = None
description: Optional[str] = None
config: Dict[str, Any] = Field(default_factory=dict)
class ChatHistory(BaseModel):
turns: List[ChatTurn]
total_messages: int
name: str
description: str
start_time: datetime = Field(default_factory=datetime.now)
class InteractionLog(BaseModel):
"""Log entry for a single interaction"""
id: str = Field(default_factory=lambda: uuid4().hex)
agent_name: str
position: int
input_text: str
output_text: str
timestamp: datetime = Field(default_factory=datetime.utcnow)
metadata: Dict[str, Any] = Field(default_factory=dict)
SpeakerFunction = Callable[[List[str], "Agent"], bool]
class GroupChatState(BaseModel):
"""Complete state of the group chat"""
def round_robin(history: List[str], agent: Agent) -> bool:
"""
Round robin speaker function.
Each agent speaks in turn, in a circular order.
"""
return True
id: str = Field(default_factory=lambda: uuid4().hex)
name: Optional[str] = None
description: Optional[str] = None
admin_name: str
group_objective: str
max_rounds: int
rules: Optional[str] = None
agent_metadata: List[AgentMetadata]
messages: List[Message]
interactions: List[InteractionLog]
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
def expertise_based(history: List[str], agent: Agent) -> bool:
"""
Expertise based speaker function.
An agent speaks if their system prompt is in the last message.
"""
return (
agent.system_prompt.lower() in history[-1].lower()
if history
else True
)
# Todo:
# Build a function that prompts the llm to output the
# [Agent-Name] in square brackets and then the question or something
# An agentic Language notation
def random_selection(history: List[str], agent: Agent) -> bool:
"""
Random selection speaker function.
An agent speaks randomly.
"""
import random
class AgentWrapper:
"""Wrapper class to standardize agent interfaces"""
return random.choice([True, False])
def __init__(
self,
agent: Union["Agent", Callable],
agent_name: str,
system_prompt: Optional[str] = None,
):
self.agent = agent
self.agent_name = agent_name
self.system_prompt = system_prompt
self._validate_agent()
def _validate_agent(self):
"""Validate that the agent has the required interface"""
if hasattr(self.agent, "run"):
self.run = self.agent.run
elif callable(self.agent):
self.run = self.agent
else:
raise ValueError(
"Agent must either have a 'run' method or be callable"
)
def get_metadata(self) -> AgentMetadata:
"""Extract metadata from the agent"""
return AgentMetadata(
agent_name=self.agent_name,
agent_type=type(self.agent).__name__,
system_prompt=self.system_prompt,
config={
k: v
for k, v in self.agent.__dict__.items()
if isinstance(v, (str, int, float, bool, dict, list))
},
)
def custom_speaker(history: List[str], agent: Agent) -> bool:
"""
Custom speaker function with complex logic.
Args:
history: Previous conversation messages
agent: Current agent being evaluated
Returns:
bool: Whether agent should speak
"""
# No history - let everyone speak
if not history:
return True
last_message = history[-1].lower()
# Check for agent expertise keywords
expertise_relevant = any(
keyword in last_message
for keyword in agent.description.lower().split()
)
# Check for direct mentions
mentioned = agent.agent_name.lower() in last_message
# Check if agent hasn't spoken recently
not_recent_speaker = not any(
agent.agent_name in msg for msg in history[-3:]
)
return expertise_relevant or mentioned or not_recent_speaker
def most_recent(history: List[str], agent: Agent) -> bool:
"""
Most recent speaker function.
An agent speaks if they are the last speaker.
"""
return (
agent.agent_name == history[-1].split(":")[0].strip()
if history
else True
)
class GroupChat:
"""Enhanced GroupChat manager with state persistence and comprehensive logging.
This class implements a multi-agent chat system with the following key features:
- State persistence to disk
- Comprehensive interaction logging
- Configurable agent selection
- Early stopping conditions
- Conversation export capabilities
The GroupChat coordinates multiple agents to have a goal-directed conversation,
with one agent speaking at a time based on a selector agent's decisions.
Attributes:
name (Optional[str]): Name of the group chat
description (Optional[str]): Description of the group chat's purpose
agents (List[Union["Agent", Callable]]): List of participating agents
max_rounds (int): Maximum number of conversation rounds
admin_name (str): Name of the administrator
group_objective (str): The goal/objective of the conversation
selector_agent (Union["Agent", Callable]): Agent that selects next speaker
rules (Optional[str]): Rules governing the conversation
state_path (Optional[str]): Path to save conversation state
showcase_agents_on (bool): Whether to showcase agent capabilities
"""
GroupChat class to enable multiple agents to communicate in a synchronous group chat.
Each agent is aware of all other agents, every message exchanged, and the social context.
"""
def __init__(
self,
name: Optional[str] = None,
description: Optional[str] = None,
agents: List[Union["Agent", Callable]] = None,
max_rounds: int = 10,
admin_name: str = "Admin",
group_objective: str = None,
selector_agent: Union["Agent", Callable] = None,
rules: Optional[str] = None,
state_path: Optional[str] = None,
showcase_agents_on: bool = False,
name: str = "GroupChat",
description: str = "A group chat for multiple agents",
agents: List[Agent] = [],
speaker_fn: SpeakerFunction = round_robin,
max_loops: int = 10,
):
"""Initialize a new GroupChat instance.
"""
Initialize the GroupChat.
Args:
name: Name of the group chat
description: Description of the group chat's purpose
agents: List of participating agents
max_rounds: Maximum number of conversation rounds
admin_name: Name of the administrator
group_objective: The goal/objective of the conversation
selector_agent: Agent that selects next speaker
rules: Rules governing the conversation
state_path: Path to save conversation state
showcase_agents_on: Whether to showcase agent capabilities
Raises:
ValueError: If no agents are provided
name (str): Name of the group chat.
description (str): Description of the purpose of the group chat.
agents (List[Agent]): A list of agents participating in the chat.
speaker_fn (SpeakerFunction): The function to determine which agent should speak next.
max_loops (int): Maximum number of turns in the chat.
"""
self.name = name
self.description = description
self.agents = agents
self.max_rounds = max_rounds
self.admin_name = admin_name
self.group_objective = group_objective
self.selector_agent = selector_agent
self.rules = rules
self.state_path = state_path
self.showcase_agents_on = showcase_agents_on
if not agents:
raise ValueError("At least two agents are required")
# Generate unique state path if not provided
self.state_path = (
state_path or f"group_chat_{uuid4().hex}.json"
)
# Wrap all agents to standardize interface
self.wrapped_agents = [
AgentWrapper(
agent,
(
f"Agent_{i}"
if not hasattr(agent, "agent_name")
else agent.agent_name
),
)
for i, agent in enumerate(agents)
]
# Configure selector agent
self.selector_agent = AgentWrapper(
selector_agent or self.wrapped_agents[0].agent,
"Selector",
"Select the next speaker based on the conversation context",
)
# Initialize conversation state
self.state = GroupChatState(
self.speaker_fn = speaker_fn
self.max_loops = max_loops
self.chat_history = ChatHistory(
turns=[],
total_messages=0,
name=name,
description=description,
admin_name=admin_name,
group_objective=group_objective,
max_rounds=max_rounds,
rules=rules,
agent_metadata=[
agent.get_metadata() for agent in self.wrapped_agents
],
messages=[],
interactions=[],
)
# Showcase agents if enabled
if self.showcase_agents_on is True:
self.showcase_agents()
def showcase_agents(self):
"""Showcase available agents and update their system prompts.
This method displays agent capabilities and updates each agent's
system prompt with information about other agents in the group.
"""
out = showcase_available_agents(
name=self.name,
description=self.description,
agents=self.wrapped_agents,
)
for agent in self.wrapped_agents:
# Initialize system_prompt if None
if agent.system_prompt is None:
agent.system_prompt = ""
agent.system_prompt += out
def save_state(self) -> None:
"""Save current conversation state to disk.
The state is saved as a JSON file at the configured state_path.
def _get_response_sync(
self, agent: Agent, prompt: str, turn_number: int
) -> AgentResponse:
"""
with open(self.state_path, "w") as f:
json.dump(self.state.dict(), f, default=str, indent=2)
logger.info(f"State saved to {self.state_path}")
@classmethod
def load_state(cls, state_path: str) -> "GroupChat":
"""Load GroupChat from saved state.
Get the response from an agent synchronously.
Args:
state_path: Path to the saved state JSON file
agent (Agent): The agent responding.
prompt (str): The message triggering the response.
turn_number (int): The current turn number.
Returns:
GroupChat: A new GroupChat instance with restored state
Raises:
FileNotFoundError: If state file doesn't exist
json.JSONDecodeError: If state file is invalid JSON
AgentResponse: The agent's response captured in a structured format.
"""
with open(state_path, "r") as f:
state_dict = json.load(f)
# Convert loaded data back to state model
state = GroupChatState(**state_dict)
# Initialize with minimal config, then restore state
instance = cls(
name=state.name,
admin_name=state.admin_name,
agents=[], # Temporary empty list
group_objective=state.group_objective,
)
instance.state = state
return instance
def _log_interaction(
self,
agent_name: str,
position: int,
input_text: str,
output_text: str,
) -> None:
"""Log a single interaction in the conversation.
try:
# Provide the agent with information about the chat and other agents
chat_info = f"Chat Name: {self.name}\nChat Description: {self.description}\nAgents in Chat: {[a.agent_name for a in self.agents]}"
context = f"""You are {agent.agent_name}
Conversation History:
\n{chat_info}
Other agents: {[a.agent_name for a in self.agents if a != agent]}
Previous messages: {self.get_full_chat_history()}
""" # Updated line
message = agent.run(context + prompt)
return AgentResponse(
agent_name=agent.name,
role=agent.system_prompt,
message=message,
turn_number=turn_number,
preceding_context=self.get_recent_messages(3),
)
except Exception as e:
logger.error(f"Error from {agent.name}: {e}")
return AgentResponse(
agent_name=agent.name,
role=agent.system_prompt,
message=f"Error generating response: {str(e)}",
turn_number=turn_number,
preceding_context=[],
)
Args:
agent_name: Name of the speaking agent
position: Position in conversation sequence
input_text: Input context provided to agent
output_text: Agent's response
def get_full_chat_history(self) -> str:
"""
log_entry = InteractionLog(
agent_name=agent_name,
position=position,
input_text=input_text,
output_text=output_text,
metadata={
"current_agents": [
a.agent_name for a in self.wrapped_agents
],
"round": position // len(self.wrapped_agents),
},
)
self.state.interactions.append(log_entry)
self.save_state()
Get the full chat history formatted for agent context.
def _add_message(self, role: str, content: str) -> None:
"""Add a message to the conversation history.
Args:
role: Speaker's role/name
content: Message content
Returns:
str: The full chat history with sender names.
"""
message = Message(role=role, content=content)
self.state.messages.append(message)
self.save_state()
messages = []
for turn in self.chat_history.turns:
for response in turn.responses:
messages.append(
f"{response.agent_name}: {response.message}"
)
return "\n".join(messages)
def select_next_speaker(
self, last_speaker: AgentWrapper
) -> AgentWrapper:
"""Select the next speaker using the selector agent.
def get_recent_messages(self, n: int = 3) -> List[str]:
"""
Get the most recent messages in the chat.
Args:
last_speaker: The agent who spoke last
n (int): The number of recent messages to retrieve.
Returns:
AgentWrapper: The next agent to speak
Note:
Falls back to round-robin selection if selector agent fails
List[str]: The most recent messages in the chat.
"""
conversation_history = "\n".join(
[
f"{msg.role}: {msg.content}"
for msg in self.state.messages
]
)
messages = []
for turn in self.chat_history.turns[-n:]:
for response in turn.responses:
messages.append(
f"{response.agent_name}: {response.message}"
)
return messages
selection_prompt = f"""
Current speakers: {[agent.agent_name for agent in self.wrapped_agents]}
Last speaker: {last_speaker.agent_name}
Group objective: {self.state.group_objective}
Based on the conversation history and group objective, select the next most appropriate speaker.
Only return the speaker's name.
Conversation history:
{conversation_history}
def run(self, task: str) -> ChatHistory:
"""
try:
next_speaker_name = self.selector_agent.run(
selection_prompt
).strip()
return next(
agent
for agent in self.wrapped_agents
if agent.agent_name in next_speaker_name
)
except (StopIteration, Exception) as e:
logger.warning(
f"Selector agent failed: {str(e)}. Falling back to round-robin."
)
# Fallback to round-robin if selection fails
current_idx = self.wrapped_agents.index(last_speaker)
return self.wrapped_agents[
(current_idx + 1) % len(self.wrapped_agents)
]
def run(self, task: str) -> str:
"""Execute the group chat conversation.
Run the group chat.
Args:
task: The initial task/question to discuss
task (str): The initial message to start the chat.
Returns:
str: The final response from the conversation
Raises:
Exception: If any error occurs during execution
ChatHistory: The history of the chat.
"""
try:
logger.info(f"Starting GroupChat with task: {task}")
self._add_message(self.state.admin_name, task)
current_speaker = self.wrapped_agents[0]
final_response = None
for round_num in range(self.state.max_rounds):
# Select next speaker
current_speaker = self.select_next_speaker(
current_speaker
)
logger.info(
f"Selected speaker: {current_speaker.agent_name}"
)
# Prepare context and get response
conversation_history = "\n".join(
[
f"{msg.role}: {msg.content}"
for msg in self.state.messages[
-10:
] # Last 10 messages for context
]
)
logger.info(
f"Starting chat '{self.name}' with task: {task}"
)
try:
response = current_speaker.run(
conversation_history
)
final_response = response
except Exception as e:
logger.error(
f"Agent {current_speaker.agent_name} failed: {str(e)}"
)
continue
# Log interaction and add to message history
self._log_interaction(
current_speaker.agent_name,
round_num,
conversation_history,
response,
)
self._add_message(
current_speaker.agent_name, response
for turn in range(self.max_loops):
current_turn = ChatTurn(
turn_number=turn, responses=[], task=task
)
# Optional: Add early stopping condition based on response content
if (
"TASK_COMPLETE" in response
or "CONCLUSION" in response
):
logger.info(
"Task completion detected, ending conversation"
)
break
return final_response or "No valid response generated"
for agent in self.agents:
if self.speaker_fn(
self.get_recent_messages(), agent
):
response = self._get_response_sync(
agent, task, turn
)
current_turn.responses.append(response)
self.chat_history.total_messages += 1
logger.debug(
f"Turn {turn}, {agent.name} responded"
)
self.chat_history.turns.append(current_turn)
return self.chat_history
except Exception as e:
logger.error(f"Error in GroupChat execution: {str(e)}")
raise
logger.error(f"Error in chat: {e}")
raise e
def batched_run(self, tasks: List[str], *args, **kwargs):
"""
Run the group chat with a batch of tasks.
def get_conversation_summary(self) -> Dict[str, Any]:
"""Return a summary of the conversation.
Args:
tasks (List[str]): The list of tasks to run in the chat.
Returns:
Dict containing conversation metrics and status
List[ChatHistory]: The history of each chat.
"""
return {
"id": self.state.id,
"total_interactions": len(self.state.interactions),
"participating_agents": [
agent.agent_name for agent in self.wrapped_agents
],
"conversation_length": len(self.state.messages),
"duration": (
datetime.utcnow() - self.state.created_at
).total_seconds(),
"objective_completed": any(
"TASK_COMPLETE" in msg.content
for msg in self.state.messages
),
}
def export_conversation(
self, format: str = "json"
) -> Union[str, Dict]:
"""Export the conversation in the specified format.
return [self.run(task, *args, **kwargs) for task in tasks]
def concurrent_run(self, tasks: List[str], *args, **kwargs):
"""
Run the group chat with a batch of tasks concurrently using a thread pool.
Args:
format: Output format ("json" or "text")
tasks (List[str]): The list of tasks to run in the chat.
Returns:
Union[str, Dict]: Conversation in requested format
Raises:
ValueError: If format is not supported
List[ChatHistory]: The history of each chat.
"""
if format == "json":
return self.state.dict()
elif format == "text":
return "\n".join(
[
f"{msg.role} ({msg.timestamp}): {msg.content}"
for msg in self.state.messages
]
with concurrent.futures.ThreadPoolExecutor() as executor:
return list(
executor.map(
lambda task: self.run(task, *args, **kwargs),
tasks,
)
)
else:
raise ValueError(f"Unsupported export format: {format}")
# if __name__ == "__main__":
# load_dotenv()
# # Get the OpenAI API key from the environment variable
# api_key = os.getenv("OPENAI_API_KEY")
# # Create an instance of the OpenAIChat class
# model = OpenAIChat(
# openai_api_key=api_key,
# model_name="gpt-4o-mini",
# temperature=0.1,
# )
# # Example agents
# agent1 = Agent(
# agent_name="Financial-Analysis-Agent",
# system_prompt="You are a financial analyst specializing in investment strategies.",
# llm=model,
# max_loops=1,
# autosave=False,
# dashboard=False,
# verbose=True,
# dynamic_temperature_enabled=True,
# user_name="swarms_corp",
# retry_attempts=1,
# context_length=200000,
# output_type="string",
# streaming_on=False,
# )
# agent2 = Agent(
# agent_name="Tax-Adviser-Agent",
# system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.",
# llm=model,
# max_loops=1,
# autosave=False,
# dashboard=False,
# verbose=True,
# dynamic_temperature_enabled=True,
# user_name="swarms_corp",
# retry_attempts=1,
# context_length=200000,
# output_type="string",
# streaming_on=False,
# )
# agents = [agent1, agent2]
# chat = GroupChat(
# name="Investment Advisory",
# description="Financial and tax analysis group",
# agents=agents,
# speaker_fn=expertise_based,
# )
# history = chat.run(
# "How to optimize tax strategy for investments?"
# )
# print(history.model_dump_json(indent=2))

@ -1,243 +0,0 @@
import os
import asyncio
from pydantic import BaseModel, Field
from typing import List, Dict, Any
from swarms import Agent
from dotenv import load_dotenv
from swarms.utils.formatter import formatter
# Load environment variables
load_dotenv()
# Get OpenAI API key
api_key = os.getenv("OPENAI_API_KEY")
# Define Pydantic schema for agent outputs
class AgentOutput(BaseModel):
"""Schema for capturing the output of each agent."""
agent_name: str = Field(..., description="The name of the agent")
message: str = Field(
...,
description="The agent's response or contribution to the group chat",
)
metadata: Dict[str, Any] = Field(
default_factory=dict,
description="Additional metadata about the agent's response",
)
class GroupChat:
"""
GroupChat class to enable multiple agents to communicate in an asynchronous group chat.
Each agent is aware of all other agents, every message exchanged, and the social context.
"""
def __init__(
self,
name: str,
description: str,
agents: List[Agent],
max_loops: int = 1,
):
"""
Initialize the GroupChat.
Args:
name (str): Name of the group chat.
description (str): Description of the purpose of the group chat.
agents (List[Agent]): A list of agents participating in the chat.
max_loops (int): Maximum number of loops to run through all agents.
"""
self.name = name
self.description = description
self.agents = agents
self.max_loops = max_loops
self.chat_history = (
[]
) # Stores all messages exchanged in the chat
formatter.print_panel(
f"Initialized GroupChat '{self.name}' with {len(self.agents)} agents. Max loops: {self.max_loops}",
title="Groupchat Swarm",
)
async def _agent_conversation(
self, agent: Agent, input_message: str
) -> AgentOutput:
"""
Facilitate a single agent's response to the chat.
Args:
agent (Agent): The agent responding.
input_message (str): The message triggering the response.
Returns:
AgentOutput: The agent's response captured in a structured format.
"""
formatter.print_panel(
f"Agent '{agent.agent_name}' is responding to the message: {input_message}",
title="Groupchat Swarm",
)
response = await asyncio.to_thread(agent.run, input_message)
output = AgentOutput(
agent_name=agent.agent_name,
message=response,
metadata={"context_length": agent.context_length},
)
# logger.debug(f"Agent '{agent.agent_name}' response: {response}")
return output
async def _run(self, initial_message: str) -> List[AgentOutput]:
"""
Execute the group chat asynchronously, looping through all agents up to max_loops.
Args:
initial_message (str): The initial message to start the chat.
Returns:
List[AgentOutput]: The responses of all agents across all loops.
"""
formatter.print_panel(
f"Starting group chat '{self.name}' with initial message: {initial_message}",
title="Groupchat Swarm",
)
self.chat_history.append(
{"sender": "System", "message": initial_message}
)
outputs = []
for loop in range(self.max_loops):
formatter.print_panel(
f"Group chat loop {loop + 1}/{self.max_loops}",
title="Groupchat Swarm",
)
for agent in self.agents:
# Create a custom input message for each agent, sharing the chat history and social context
input_message = (
f"Chat History:\n{self._format_chat_history()}\n\n"
f"Participants:\n"
+ "\n".join(
[
f"- {a.agent_name}: {a.system_prompt}"
for a in self.agents
]
)
+ f"\n\nNew Message: {initial_message}\n\n"
f"You are '{agent.agent_name}'. Remember to keep track of the social context, who is speaking, "
f"and respond accordingly based on your role: {agent.system_prompt}."
)
# Collect agent's response
output = await self._agent_conversation(
agent, input_message
)
outputs.append(output)
# Update chat history with the agent's response
self.chat_history.append(
{
"sender": agent.agent_name,
"message": output.message,
}
)
formatter.print_panel(
"Group chat completed. All agent responses captured.",
title="Groupchat Swarm",
)
return outputs
def run(self, task: str, *args, **kwargs):
return asyncio.run(self.run(task, *args, **kwargs))
def _format_chat_history(self) -> str:
"""
Format the chat history for agents to understand the context.
Returns:
str: The formatted chat history as a string.
"""
return "\n".join(
[
f"{entry['sender']}: {entry['message']}"
for entry in self.chat_history
]
)
def __str__(self) -> str:
"""String representation of the group chat's outputs."""
return self._format_chat_history()
def to_json(self) -> str:
"""JSON representation of the group chat's outputs."""
return [
{"sender": entry["sender"], "message": entry["message"]}
for entry in self.chat_history
]
# # Example Usage
# if __name__ == "__main__":
# load_dotenv()
# # Get the OpenAI API key from the environment variable
# api_key = os.getenv("OPENAI_API_KEY")
# # Create an instance of the OpenAIChat class
# model = OpenAIChat(
# openai_api_key=api_key,
# model_name="gpt-4o-mini",
# temperature=0.1,
# )
# # Example agents
# agent1 = Agent(
# agent_name="Financial-Analysis-Agent",
# system_prompt="You are a financial analyst specializing in investment strategies.",
# llm=model,
# max_loops=1,
# autosave=False,
# dashboard=False,
# verbose=True,
# dynamic_temperature_enabled=True,
# user_name="swarms_corp",
# retry_attempts=1,
# context_length=200000,
# output_type="string",
# streaming_on=False,
# )
# agent2 = Agent(
# agent_name="Tax-Adviser-Agent",
# system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.",
# llm=model,
# max_loops=1,
# autosave=False,
# dashboard=False,
# verbose=True,
# dynamic_temperature_enabled=True,
# user_name="swarms_corp",
# retry_attempts=1,
# context_length=200000,
# output_type="string",
# streaming_on=False,
# )
# # Create group chat
# group_chat = GroupChat(
# name="Financial Discussion",
# description="A group chat for financial analysis and tax advice.",
# agents=[agent1, agent2],
# )
# # Run the group chat
# asyncio.run(
# group_chat.run(
# "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria? What do you guys think?"
# )
# )

@ -1,19 +1,19 @@
import asyncio
import os
import threading
from concurrent.futures import ThreadPoolExecutor
import psutil
from dataclasses import dataclass
import threading
from typing import List, Any
from multiprocessing import cpu_count
import os
from typing import Any, List
import psutil
from swarms.structs.agent import Agent
from swarms.structs.omni_agent_types import AgentType
from swarms.utils.wrapper_clusterop import (
exec_callable_with_clusterops,
)
from swarms.structs.omni_agent_types import AgentType
def run_single_agent(agent: AgentType, task: str) -> Any:
"""Run a single agent synchronously"""

@ -0,0 +1,401 @@
"""
Todo:
- Add multi-agent selection for a task and then run them automatically
- Add shared memory for large instances of agents
"""
import os
import subprocess
import uuid
from datetime import UTC, datetime
from typing import List, Literal, Optional
from loguru import logger
from pydantic import BaseModel, Field
from tenacity import retry, stop_after_attempt, wait_exponential
from swarms.structs.agent import Agent
class AgentResponse(BaseModel):
"""Response from the boss agent indicating which agent should handle the task"""
selected_agent: str = Field(
description="Name of the agent selected to handle the task"
)
reasoning: str = Field(
description="Explanation for why this agent was selected"
)
modified_task: Optional[str] = Field(
None, description="Optional modified version of the task"
)
class OpenAIFunctionCaller:
"""
A class to interact with the OpenAI API for generating text based on a system prompt and a task.
"""
def __init__(
self,
system_prompt: str,
api_key: str,
temperature: float,
max_tokens: int = 4000,
model_name: str = "gpt-4-0125-preview",
):
self.system_prompt = system_prompt
self.api_key = api_key
self.temperature = temperature
self.max_tokens = max_tokens
self.model_name = model_name
try:
from openai import OpenAI
except ImportError:
logger.error(
"OpenAI library not found. Please install it using 'pip install openai'"
)
subprocess.run(["pip", "install", "openai"])
raise
try:
self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
except Exception as e:
logger.error(
f"Error initializing OpenAI client: {str(e)}"
)
raise
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
)
def get_completion(self, task: str) -> AgentResponse:
"""Get completion from OpenAI with retries"""
try:
response = self.client.chat.completions.create(
model=self.model_name,
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": task},
],
response_format={"type": "json_object"},
temperature=self.temperature,
max_tokens=self.max_tokens,
)
return AgentResponse.model_validate_json(
response.choices[0].message.content
)
except Exception as e:
logger.error(f"Error getting completion: {str(e)}")
raise
def get_agent_response(
self, system_prompt: str, task: str
) -> str:
"""Get agent response without function calling"""
try:
response = self.client.chat.completions.create(
model=self.model_name,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": task},
],
temperature=self.temperature,
max_tokens=self.max_tokens,
)
return response.choices[0].message.content
except Exception as e:
logger.error(f"Error getting agent response: {str(e)}")
raise
class MultiAgentRouter:
"""
Routes tasks to appropriate agents based on their capabilities.
This class is responsible for managing a pool of agents and routing incoming tasks to the most suitable agent. It uses a boss agent to analyze the task and select the best agent for the job. The boss agent's decision is based on the capabilities and descriptions of the available agents.
Attributes:
name (str): The name of the router.
description (str): A description of the router's purpose.
agents (dict): A dictionary of agents, where the key is the agent's name and the value is the agent object.
api_key (str): The API key for OpenAI.
output_type (str): The type of output expected from the agents. Can be either "json" or "string".
execute_task (bool): A flag indicating whether the task should be executed by the selected agent.
boss_system_prompt (str): A system prompt for the boss agent that includes information about all available agents.
function_caller (OpenAIFunctionCaller): An instance of OpenAIFunctionCaller for calling the boss agent.
"""
def __init__(
self,
name: str = "swarm-router",
description: str = "Routes tasks to specialized agents based on their capabilities",
agents: List[Agent] = [],
model: str = "gpt-4o-mini",
temperature: float = 0.1,
shared_memory_system: callable = None,
output_type: Literal["json", "string"] = "json",
execute_task: bool = True,
):
"""
Initializes the MultiAgentRouter with a list of agents and configuration options.
Args:
name (str, optional): The name of the router. Defaults to "swarm-router".
description (str, optional): A description of the router's purpose. Defaults to "Routes tasks to specialized agents based on their capabilities".
agents (List[Agent], optional): A list of agents to be managed by the router. Defaults to an empty list.
model (str, optional): The model to use for the boss agent. Defaults to "gpt-4-0125-preview".
temperature (float, optional): The temperature for the boss agent's model. Defaults to 0.1.
output_type (Literal["json", "string"], optional): The type of output expected from the agents. Defaults to "json".
execute_task (bool, optional): A flag indicating whether the task should be executed by the selected agent. Defaults to True.
"""
self.name = name
self.description = description
self.shared_memory_system = shared_memory_system
self.agents = {agent.name: agent for agent in agents}
self.api_key = os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("OpenAI API key must be provided")
self.output_type = output_type
self.execute_task = execute_task
self.boss_system_prompt = self._create_boss_system_prompt()
# Initialize the function caller
self.function_caller = OpenAIFunctionCaller(
system_prompt=self.boss_system_prompt,
api_key=self.api_key,
temperature=temperature,
)
def __repr__(self):
return f"MultiAgentRouter(name={self.name}, agents={list(self.agents.keys())})"
def query_ragent(self, task: str) -> str:
"""Query the ResearchAgent"""
return self.shared_memory_system.query(task)
def _create_boss_system_prompt(self) -> str:
"""
Creates a system prompt for the boss agent that includes information about all available agents.
Returns:
str: The system prompt for the boss agent.
"""
agent_descriptions = "\n".join(
[
f"- {name}: {agent.description}"
for name, agent in self.agents.items()
]
)
return f"""You are a boss agent responsible for routing tasks to the most appropriate specialized agent.
Available agents:
{agent_descriptions}
Your job is to:
1. Analyze the incoming task
2. Select the most appropriate agent based on their descriptions
3. Provide clear reasoning for your selection
4. Optionally modify the task to better suit the selected agent's capabilities
You must respond with JSON that contains:
- selected_agent: Name of the chosen agent (must be one of the available agents)
- reasoning: Brief explanation of why this agent was selected
- modified_task: (Optional) A modified version of the task if needed
Always select exactly one agent that best matches the task requirements.
"""
def find_agent_in_list(self, agent_name: str) -> Optional[Agent]:
"""
Find an agent by name in a list of agents.
Args:
agent_name (str): The name of the agent to find.
Returns:
Optional[Agent]: The agent object if found, otherwise None.
"""
for agent in self.agent_list:
if agent.name == agent_name:
return agent
return None
def route_task(self, task: str) -> dict:
"""
Routes a task to the appropriate agent and returns their response.
Args:
task (str): The task to be routed.
Returns:
dict: A dictionary containing the routing result, including the selected agent, reasoning, and response.
"""
try:
start_time = datetime.now(UTC)
# Get boss decision using function calling
boss_response = self.function_caller.get_completion(task)
# Validate that the selected agent exists
if boss_response.selected_agent not in self.agents:
raise ValueError(
f"Boss selected unknown agent: {boss_response.selected_agent}"
)
# Get the selected agent
selected_agent = self.agents[boss_response.selected_agent]
# Use the modified task if provided, otherwise use original task
final_task = boss_response.modified_task or task
# Execute the task with the selected agent if enabled
execution_start = datetime.now(UTC)
agent_response = None
execution_time = 0
if self.execute_task:
# Use the agent's run method directly
agent_response = selected_agent.run(final_task)
execution_time = (
datetime.now(UTC) - execution_start
).total_seconds()
else:
logger.info(
"Task execution skipped (execute_task=False)"
)
total_time = (
datetime.now(UTC) - start_time
).total_seconds()
result = {
"id": str(uuid.uuid4()),
"timestamp": datetime.now(UTC).isoformat(),
"task": {
"original": task,
"modified": (
final_task
if boss_response.modified_task
else None
),
},
"boss_decision": {
"selected_agent": boss_response.selected_agent,
"reasoning": boss_response.reasoning,
},
"execution": {
"agent_name": selected_agent.name,
"agent_id": selected_agent.id,
"was_executed": self.execute_task,
"response": (
agent_response if self.execute_task else None
),
"execution_time": (
execution_time if self.execute_task else None
),
},
"total_time": total_time,
}
logger.info(
f"Successfully routed task to {selected_agent.name}"
)
return result
except Exception as e:
logger.error(f"Error routing task: {str(e)}")
raise
def batch_route(self, tasks: List[str] = []):
"""Batch route tasks to the appropriate agents"""
results = []
for task in tasks:
try:
result = self.route_task(task)
results.append(result)
except Exception as e:
logger.error(f"Error routing task: {str(e)}")
return results
def concurrent_batch_route(self, tasks: List[str] = []):
"""Concurrently route tasks to the appropriate agents"""
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
results = []
with ThreadPoolExecutor() as executor:
futures = [
executor.submit(self.route_task, task)
for task in tasks
]
for future in concurrent.futures.as_completed(futures):
try:
result = future.result()
results.append(result)
except Exception as e:
logger.error(f"Error routing task: {str(e)}")
return results
# # Example usage:
# if __name__ == "__main__":
# # Define some example agents
# agents = [
# Agent(
# agent_name="ResearchAgent",
# description="Specializes in researching topics and providing detailed, factual information",
# system_prompt="You are a research specialist. Provide detailed, well-researched information about any topic, citing sources when possible.",
# model_name="openai/gpt-4o",
# ),
# Agent(
# agent_name="CodeExpertAgent",
# description="Expert in writing, reviewing, and explaining code across multiple programming languages",
# system_prompt="You are a coding expert. Write, review, and explain code with a focus on best practices and clean code principles.",
# model_name="openai/gpt-4o",
# ),
# Agent(
# agent_name="WritingAgent",
# description="Skilled in creative and technical writing, content creation, and editing",
# system_prompt="You are a writing specialist. Create, edit, and improve written content while maintaining appropriate tone and style.",
# model_name="openai/gpt-4o",
# ),
# ]
# # Initialize routers with different configurations
# router_execute = MultiAgentRouter(agents=agents, execute_task=True)
# # router_no_execute = MultiAgentRouter(agents=agents, execute_task=False)
# # Example task
# task = "Write a Python function to calculate fibonacci numbers"
# try:
# # Process the task with execution
# print("\nWith task execution:")
# result_execute = router_execute.route_task(task)
# print(
# f"Selected Agent: {result_execute['boss_decision']['selected_agent']}"
# )
# print(
# f"Reasoning: {result_execute['boss_decision']['reasoning']}"
# )
# if result_execute["execution"]["response"]:
# print(
# f"Response Preview: {result_execute['execution']['response'][:200]}..."
# )
# print(
# f"Execution Time: {result_execute['execution']['execution_time']:.2f}s"
# )
# print(f"Total Time: {result_execute['total_time']:.2f}s")
# except Exception as e:
# print(f"Error occurred: {str(e)}")

@ -1,276 +0,0 @@
import asyncio
import pulsar
from pulsar import ConsumerType
from loguru import logger
from swarms import Agent
from typing import List, Dict, Any
import json
class ScalableAsyncAgentSwarm:
"""
A scalable, asynchronous swarm of agents leveraging Apache Pulsar for inter-agent communication.
Provides load balancing, health monitoring, dead letter queues, and centralized logging.
"""
def __init__(
self,
pulsar_url: str,
topic: str,
dlq_topic: str,
agents_config: List[Dict[str, Any]],
):
"""
Initializes the async swarm with agents.
Args:
pulsar_url (str): The URL of the Apache Pulsar broker.
topic (str): The main topic for task distribution.
dlq_topic (str): The Dead Letter Queue topic for failed messages.
agents_config (List[Dict[str, Any]]): List of agent configurations with `name`, `description`, and `model_name`.
"""
self.pulsar_url = pulsar_url
self.topic = topic
self.dlq_topic = dlq_topic
self.agents_config = agents_config
self.client = pulsar.Client(pulsar_url)
self.consumer = self.client.subscribe(
topic,
subscription_name="swarm-task-sub",
consumer_type=ConsumerType.Shared,
)
self.dlq_producer = self.client.create_producer(dlq_topic)
self.response_logger = []
self.agents = [
self.create_agent(config) for config in agents_config
]
self.agent_index = 0
logger.info(
"Swarm initialized with agents: {}",
[agent["name"] for agent in agents_config],
)
def create_agent(
self, agent_config: Dict[str, Any]
) -> Dict[str, Any]:
"""
Creates a new agent configuration with asynchronous capabilities.
Args:
agent_config (Dict[str, Any]): Configuration dictionary with agent details.
Returns:
Dict[str, Any]: A dictionary containing agent metadata and functionality.
"""
agent_name = agent_config["name"]
description = agent_config["description"]
model_name = agent_config.get("model_name", "gpt-4o-mini")
class AsyncAgent:
"""
An asynchronous agent that processes tasks and communicates via Apache Pulsar.
"""
def __init__(
self, name: str, description: str, model_name: str
):
self.name = name
self.description = description
self.agent = Agent(
agent_name=name,
model_name=model_name,
max_loops="auto",
interactive=True,
streaming_on=True,
)
logger.info(
f"Initialized agent '{name}' - {description}"
)
async def process_task(
self, message: str
) -> Dict[str, Any]:
"""
Processes a single task using the agent.
Args:
message (str): The task message.
Returns:
Dict[str, Any]: JSON-formatted response.
"""
try:
logger.info(
f"Agent {self.name} processing task: {message}"
)
response = await asyncio.to_thread(
self.agent.run, message
)
logger.info(f"Agent {self.name} completed task.")
return {
"agent_name": self.name,
"response": response,
}
except Exception as e:
logger.error(
f"Agent {self.name} encountered an error: {e}"
)
return {"agent_name": self.name, "error": str(e)}
return {
"name": agent_name,
"instance": AsyncAgent(
agent_name, description, model_name
),
}
async def distribute_task(self, message: str):
"""
Distributes a task to the next available agent using round-robin.
Args:
message (str): The task message.
"""
agent = self.agents[self.agent_index]
self.agent_index = (self.agent_index + 1) % len(self.agents)
try:
response = await agent["instance"].process_task(message)
self.log_response(response)
except Exception as e:
logger.error(
f"Error processing task by agent {agent['name']}: {e}"
)
self.send_to_dlq(message)
async def monitor_health(self):
"""
Periodically monitors the health of agents.
"""
while True:
logger.info("Performing health check for all agents.")
for agent in self.agents:
logger.info(f"Agent {agent['name']} is online.")
await asyncio.sleep(10)
def send_to_dlq(self, message: str):
"""
Sends a failed message to the Dead Letter Queue (DLQ).
Args:
message (str): The message to send to the DLQ.
"""
try:
self.dlq_producer.send(message.encode("utf-8"))
logger.info("Message sent to Dead Letter Queue.")
except Exception as e:
logger.error(f"Failed to send message to DLQ: {e}")
def log_response(self, response: Dict[str, Any]):
"""
Logs the response to a centralized list for later analysis.
Args:
response (Dict[str, Any]): The agent's response.
"""
self.response_logger.append(response)
logger.info(f"Response logged: {response}")
async def listen_and_distribute(self):
"""
Listens to the main Pulsar topic and distributes tasks to agents.
"""
while True:
msg = self.consumer.receive()
try:
message = msg.data().decode("utf-8")
logger.info(f"Received task: {message}")
await self.distribute_task(message)
self.consumer.acknowledge(msg)
except Exception as e:
logger.error(f"Error processing message: {e}")
self.send_to_dlq(msg.data().decode("utf-8"))
self.consumer.negative_acknowledge(msg)
async def run(self):
"""
Runs the swarm asynchronously with health monitoring and task distribution.
"""
logger.info("Starting the async swarm...")
task_listener = asyncio.create_task(
self.listen_and_distribute()
)
health_monitor = asyncio.create_task(self.monitor_health())
await asyncio.gather(task_listener, health_monitor)
def shutdown(self):
"""
Safely shuts down the swarm and logs all responses.
"""
logger.info("Shutting down the swarm...")
self.client.close()
with open("responses.json", "w") as f:
json.dump(self.response_logger, f, indent=4)
logger.info("Responses saved to 'responses.json'.")
# from scalable_agent_swarm import ScalableAsyncAgentSwarm # Assuming your swarm class is saved here
if __name__ == "__main__":
# Example Configuration
PULSAR_URL = "pulsar://localhost:6650"
TOPIC = "stock-analysis"
DLQ_TOPIC = "stock-analysis-dlq"
# Agents configuration
AGENTS_CONFIG = [
{
"name": "Stock-Analysis-Agent-1",
"description": "Analyzes stock trends.",
"model_name": "gpt-4o-mini",
},
{
"name": "Stock-News-Agent",
"description": "Summarizes stock news.",
"model_name": "gpt-4o-mini",
},
{
"name": "Tech-Trends-Agent",
"description": "Tracks tech sector trends.",
"model_name": "gpt-4o-mini",
},
]
# Tasks to send
TASKS = [
"Analyze the trend for tech stocks in Q4 2024",
"Summarize the latest news on the S&P 500",
"Identify the top-performing sectors in the stock market",
"Provide a forecast for AI-related stocks for 2025",
]
# Initialize and run the swarm
swarm = ScalableAsyncAgentSwarm(
PULSAR_URL, TOPIC, DLQ_TOPIC, AGENTS_CONFIG
)
try:
# Run the swarm in the background
swarm_task = asyncio.create_task(swarm.run())
# Send tasks to the topic
client = pulsar.Client(PULSAR_URL)
producer = client.create_producer(TOPIC)
for task in TASKS:
producer.send(task.encode("utf-8"))
print(f"Sent task: {task}")
producer.close()
client.close()
# Keep the swarm running
asyncio.run(swarm_task)
except KeyboardInterrupt:
swarm.shutdown()

@ -1,5 +1,5 @@
import asyncio
import traceback
import json
import uuid
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
@ -12,11 +12,11 @@ from swarms.structs.agent import Agent
from swarms.structs.agents_available import showcase_available_agents
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.output_types import OutputType
from swarms.utils.add_docs_to_agents import handle_input_docs
from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.wrapper_clusterop import (
exec_callable_with_clusterops,
)
from swarms.telemetry.capture_sys_data import log_agent_data
logger = initialize_logger(log_folder="rearrange")
@ -89,7 +89,6 @@ class AgentRearrange(BaseSwarm):
batch_run(): Processes multiple tasks in batches
abatch_run(): Asynchronously processes multiple tasks in batches
concurrent_run(): Processes multiple tasks concurrently
handle_input_docs(): Adds document content to agent prompts
"""
@ -116,6 +115,7 @@ class AgentRearrange(BaseSwarm):
all_cores: bool = False,
all_gpus: bool = True,
no_use_clusterops: bool = True,
autosave: bool = True,
*args,
**kwargs,
):
@ -143,6 +143,7 @@ class AgentRearrange(BaseSwarm):
self.all_cores = all_cores
self.all_gpus = all_gpus
self.no_use_clusterops = no_use_clusterops
self.autosave = autosave
self.output_schema = AgentRearrangeOutput(
input=AgentRearrangeInput(
@ -194,13 +195,6 @@ class AgentRearrange(BaseSwarm):
self.flow = flow
logger.info(f"Custom flow set: {flow}")
def handle_input_docs(self):
self.agents = handle_input_docs(
agents=self.agents,
docs=self.docs,
doc_folder=self.doc_folder,
)
def add_agent(self, agent: Agent):
"""
Adds an agent to the swarm.
@ -487,10 +481,17 @@ class AgentRearrange(BaseSwarm):
return output
except Exception as e:
logger.error(
f"An error occurred: {e} \n {traceback.format_exc()}"
)
return e
self._catch_error(e)
def _catch_error(self, e: Exception):
if self.autosave is True:
log_agent_data(self.to_dict())
logger.error(
f"An error occurred with your swarm {self.name}: Error: {e} Traceback: {e.__traceback__}"
)
return e
def run(
self,
@ -500,7 +501,7 @@ class AgentRearrange(BaseSwarm):
device_id: int = 2,
all_cores: bool = True,
all_gpus: bool = False,
no_use_clusterops: bool = False,
no_use_clusterops: bool = True,
*args,
**kwargs,
):
@ -521,30 +522,33 @@ class AgentRearrange(BaseSwarm):
Returns:
The result from executing the task through the cluster operations wrapper.
"""
no_use_clusterops = (
no_use_clusterops or self.no_use_clusterops
)
if no_use_clusterops is True:
return self._run(
task=task,
img=img,
*args,
**kwargs,
)
else:
return exec_callable_with_clusterops(
device=device,
device_id=device_id,
all_cores=all_cores,
all_gpus=all_gpus,
func=self._run,
task=task,
img=img,
*args,
**kwargs,
try:
no_use_clusterops = (
no_use_clusterops or self.no_use_clusterops
)
if no_use_clusterops is True:
return self._run(
task=task,
img=img,
*args,
**kwargs,
)
else:
return exec_callable_with_clusterops(
device=device,
device_id=device_id,
all_cores=all_cores,
all_gpus=all_gpus,
func=self._run,
task=task,
img=img,
*args,
**kwargs,
)
except Exception as e:
self._catch_error(e)
def __call__(self, task: str, *args, **kwargs):
"""
Make the class callable by executing the run() method.
@ -557,7 +561,11 @@ class AgentRearrange(BaseSwarm):
Returns:
The result from executing run().
"""
return self.run(task=task, *args, **kwargs)
try:
return self.run(task=task, *args, **kwargs)
except Exception as e:
logger.error(f"An error occurred: {e}")
return e
def batch_run(
self,
@ -586,32 +594,35 @@ class AgentRearrange(BaseSwarm):
Returns:
List of results corresponding to input tasks
"""
results = []
for i in range(0, len(tasks), batch_size):
batch_tasks = tasks[i : i + batch_size]
batch_imgs = (
img[i : i + batch_size]
if img
else [None] * len(batch_tasks)
)
# Process batch using concurrent execution
batch_results = [
self.run(
task=task,
img=img_path,
device=device,
device_id=device_id,
all_cores=all_cores,
all_gpus=all_gpus,
*args,
**kwargs,
try:
results = []
for i in range(0, len(tasks), batch_size):
batch_tasks = tasks[i : i + batch_size]
batch_imgs = (
img[i : i + batch_size]
if img
else [None] * len(batch_tasks)
)
for task, img_path in zip(batch_tasks, batch_imgs)
]
results.extend(batch_results)
return results
# Process batch using concurrent execution
batch_results = [
self.run(
task=task,
img=img_path,
device=device,
device_id=device_id,
all_cores=all_cores,
all_gpus=all_gpus,
*args,
**kwargs,
)
for task, img_path in zip(batch_tasks, batch_imgs)
]
results.extend(batch_results)
return results
except Exception as e:
self._catch_error(e)
async def abatch_run(
self,
@ -632,24 +643,29 @@ class AgentRearrange(BaseSwarm):
Returns:
List of results corresponding to input tasks
"""
results = []
for i in range(0, len(tasks), batch_size):
batch_tasks = tasks[i : i + batch_size]
batch_imgs = (
img[i : i + batch_size]
if img
else [None] * len(batch_tasks)
)
try:
results = []
for i in range(0, len(tasks), batch_size):
batch_tasks = tasks[i : i + batch_size]
batch_imgs = (
img[i : i + batch_size]
if img
else [None] * len(batch_tasks)
)
# Process batch using asyncio.gather
batch_coros = [
self.astream(task=task, img=img_path, *args, **kwargs)
for task, img_path in zip(batch_tasks, batch_imgs)
]
batch_results = await asyncio.gather(*batch_coros)
results.extend(batch_results)
# Process batch using asyncio.gather
batch_coros = [
self.astream(
task=task, img=img_path, *args, **kwargs
)
for task, img_path in zip(batch_tasks, batch_imgs)
]
batch_results = await asyncio.gather(*batch_coros)
results.extend(batch_results)
return results
return results
except Exception as e:
self._catch_error(e)
def concurrent_run(
self,
@ -678,23 +694,86 @@ class AgentRearrange(BaseSwarm):
Returns:
List of results corresponding to input tasks
"""
with ThreadPoolExecutor(max_workers=max_workers) as executor:
imgs = img if img else [None] * len(tasks)
futures = [
executor.submit(
self.run,
task=task,
img=img_path,
device=device,
device_id=device_id,
all_cores=all_cores,
all_gpus=all_gpus,
*args,
**kwargs,
)
for task, img_path in zip(tasks, imgs)
]
return [future.result() for future in futures]
try:
with ThreadPoolExecutor(
max_workers=max_workers
) as executor:
imgs = img if img else [None] * len(tasks)
futures = [
executor.submit(
self.run,
task=task,
img=img_path,
device=device,
device_id=device_id,
all_cores=all_cores,
all_gpus=all_gpus,
*args,
**kwargs,
)
for task, img_path in zip(tasks, imgs)
]
return [future.result() for future in futures]
except Exception as e:
self._catch_error(e)
def _serialize_callable(
self, attr_value: Callable
) -> Dict[str, Any]:
"""
Serializes callable attributes by extracting their name and docstring.
Args:
attr_value (Callable): The callable to serialize.
Returns:
Dict[str, Any]: Dictionary with name and docstring of the callable.
"""
return {
"name": getattr(
attr_value, "__name__", type(attr_value).__name__
),
"doc": getattr(attr_value, "__doc__", None),
}
def _serialize_attr(self, attr_name: str, attr_value: Any) -> Any:
"""
Serializes an individual attribute, handling non-serializable objects.
Args:
attr_name (str): The name of the attribute.
attr_value (Any): The value of the attribute.
Returns:
Any: The serialized value of the attribute.
"""
try:
if callable(attr_value):
return self._serialize_callable(attr_value)
elif hasattr(attr_value, "to_dict"):
return (
attr_value.to_dict()
) # Recursive serialization for nested objects
else:
json.dumps(
attr_value
) # Attempt to serialize to catch non-serializable objects
return attr_value
except (TypeError, ValueError):
return f"<Non-serializable: {type(attr_value).__name__}>"
def to_dict(self) -> Dict[str, Any]:
"""
Converts all attributes of the class, including callables, into a dictionary.
Handles non-serializable attributes by converting them or skipping them.
Returns:
Dict[str, Any]: A dictionary representation of the class attributes.
"""
return {
attr_name: self._serialize_attr(attr_name, attr_value)
for attr_name, attr_value in self.__dict__.items()
}
def rearrange(

@ -45,12 +45,13 @@ class SequentialWorkflow:
self.shared_memory_system = shared_memory_system
self.reliability_check()
self.flow = self.sequential_flow()
self.agent_rearrange = AgentRearrange(
name=name,
description=description,
agents=agents,
flow=self.sequential_flow(),
flow=self.flow,
max_loops=max_loops,
output_type=output_type,
return_json=return_json,

@ -3,21 +3,41 @@ import csv
from datetime import datetime
import os
import uuid
from typing import List, Union
from typing import Dict, List, Union
import aiofiles
from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.file_processing import create_file_in_folder
from swarms.telemetry.capture_sys_data import log_agent_data
from swarms.utils.file_processing import create_file_in_folder
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="spreadsheet_swarm")
<<<<<<< HEAD
# Replace timestamp-based time with a UUID for file naming
run_id = uuid.uuid4().hex # Unique identifier for each run
=======
time = datetime.datetime.now().isoformat()
uuid_hex = uuid.uuid4().hex
# --------------- NEW CHANGE START ---------------
# Format time variable to be compatible across operating systems
formatted_time = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
# --------------- NEW CHANGE END ---------------
>>>>>>> upstream/master
class AgentConfig(BaseModel):
"""Configuration for an agent loaded from CSV"""
agent_name: str
description: str
system_prompt: str
task: str
class AgentOutput(BaseModel):
agent_name: str
@ -71,6 +91,7 @@ class SpreadSheetSwarm(BaseSwarm):
save_file_path: str = None,
max_loops: int = 1,
workspace_dir: str = os.getenv("WORKSPACE_DIR"),
load_path: str = None,
*args,
**kwargs,
):
@ -87,12 +108,23 @@ class SpreadSheetSwarm(BaseSwarm):
self.autosave_on = autosave_on
self.max_loops = max_loops
self.workspace_dir = workspace_dir
self.load_path = load_path
self.agent_configs: Dict[str, AgentConfig] = {}
<<<<<<< HEAD
# Create a timestamp without colons or periods
timestamp = datetime.now().isoformat().replace(":", "_").replace(".", "_")
# Use this timestamp in the CSV filename
self.save_file_path = f"spreadsheet_swarm_{timestamp}_run_id_{run_id}.csv"
=======
# --------------- NEW CHANGE START ---------------
# The save_file_path now uses the formatted_time and uuid_hex
self.save_file_path = (
f"spreadsheet_swarm_run_id_{uuid_hex}.csv"
)
# --------------- NEW CHANGE END ---------------
>>>>>>> upstream/master
self.metadata = SwarmRunMetadata(
run_id=f"spreadsheet_swarm_run_{run_id}",
@ -117,18 +149,154 @@ class SpreadSheetSwarm(BaseSwarm):
"""
logger.info("Checking the reliability of the swarm...")
if not self.agents:
raise ValueError("No agents are provided.")
if not self.save_file_path:
raise ValueError("No save file path is provided.")
# if not self.agents:
# raise ValueError("No agents are provided.")
# if not self.save_file_path:
# raise ValueError("No save file path is provided.")
if not self.max_loops:
raise ValueError("No max loops are provided.")
logger.info("Swarm reliability check passed.")
logger.info("Swarm is ready to run.")
# @profile_func
def run(self, task: str, *args, **kwargs):
async def _load_from_csv(self):
"""
Load agent configurations from a CSV file.
Expected CSV format: agent_name,description,system_prompt,task
Args:
csv_path (str): Path to the CSV file containing agent configurations
"""
try:
csv_path = self.load_path
logger.info(
f"Loading agent configurations from {csv_path}"
)
async with aiofiles.open(csv_path, mode="r") as file:
content = await file.read()
csv_reader = csv.DictReader(content.splitlines())
for row in csv_reader:
config = AgentConfig(
agent_name=row["agent_name"],
description=row["description"],
system_prompt=row["system_prompt"],
task=row["task"],
)
# Create new agent with configuration
new_agent = Agent(
agent_name=config.agent_name,
system_prompt=config.system_prompt,
description=config.description,
model_name=(
row["model_name"]
if "model_name" in row
else "openai/gpt-4o"
),
docs=[row["docs"]] if "docs" in row else "",
dynamic_temperature_enabled=True,
max_loops=(
row["max_loops"]
if "max_loops" in row
else 1
),
user_name=(
row["user_name"]
if "user_name" in row
else "user"
),
# output_type="str",
stopping_token=(
row["stopping_token"]
if "stopping_token" in row
else None
),
)
# Add agent to swarm
self.agents.append(new_agent)
self.agent_configs[config.agent_name] = config
# Update metadata with new agents
self.metadata.agents = [
agent.name for agent in self.agents
]
self.metadata.number_of_agents = len(self.agents)
logger.info(
f"Loaded {len(self.agent_configs)} agent configurations"
)
except Exception as e:
logger.error(f"Error loading agent configurations: {e}")
def load_from_csv(self):
asyncio.run(self._load_from_csv())
async def run_from_config(self):
"""
Run all agents with their configured tasks concurrently
"""
logger.info("Running agents from configuration")
self.metadata.start_time = time
tasks = []
for agent in self.agents:
config = self.agent_configs.get(agent.agent_name)
if config:
for _ in range(self.max_loops):
tasks.append(
asyncio.to_thread(
self._run_agent_task, agent, config.task
)
)
# Run all tasks concurrently
results = await asyncio.gather(*tasks)
# Process the results
for result in results:
self._track_output(*result)
self.metadata.end_time = time
# Save metadata
logger.info("Saving metadata to CSV and JSON...")
await self._save_metadata()
if self.autosave_on:
self.data_to_json_file()
log_agent_data(self.metadata.model_dump())
return self.metadata.model_dump_json(indent=4)
async def _run(self, task: str = None, *args, **kwargs):
"""
Run the swarm either with a specific task or using configured tasks.
Args:
task (str, optional): The task to be executed by all agents. If None, uses tasks from config.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
str: The JSON representation of the swarm metadata.
"""
if task is None and self.agent_configs:
return await self.run_from_config()
else:
self.metadata.start_time = time
await self._run_tasks(task, *args, **kwargs)
self.metadata.end_time = time
await self._save_metadata()
if self.autosave_on:
self.data_to_json_file()
print(log_agent_data(self.metadata.model_dump()))
return self.metadata.model_dump_json(indent=4)
def run(self, task: str = None, *args, **kwargs):
"""
Run the swarm with the specified task.
@ -141,6 +309,7 @@ class SpreadSheetSwarm(BaseSwarm):
str: The JSON representation of the swarm metadata.
"""
<<<<<<< HEAD
logger.info(f"Running the swarm with task: {task}")
self.metadata.start_time = str(datetime.now().timestamp()) # Numeric timestamp
@ -165,6 +334,13 @@ class SpreadSheetSwarm(BaseSwarm):
print(log_agent_data(self.metadata.model_dump()))
return self.metadata.model_dump_json(indent=4)
=======
try:
return asyncio.run(self._run(task, *args, **kwargs))
except Exception as e:
logger.error(f"Error running swarm: {e}")
raise e
>>>>>>> upstream/master
async def _run_tasks(self, task: str, *args, **kwargs):
"""
@ -209,9 +385,15 @@ class SpreadSheetSwarm(BaseSwarm):
Returns:
Tuple[str, str, str]: A tuple containing the agent name, task, and result.
"""
result = agent.run(task, *args, **kwargs)
# Assuming agent.run() is a blocking call
return agent.agent_name, task, result
try:
result = agent.run(task=task, *args, **kwargs)
# Assuming agent.run() is a blocking call
return agent.agent_name, task, result
except Exception as e:
logger.error(
f"Error running task for {agent.agent_name}: {e}"
)
return agent.agent_name, task, str(e)
def _track_output(self, agent_name: str, task: str, result: str):
"""
@ -249,7 +431,7 @@ class SpreadSheetSwarm(BaseSwarm):
create_file_in_folder(
folder_path=f"{self.workspace_dir}/Spreedsheet-Swarm-{self.name}/{self.name}",
file_name=f"spreedsheet-swarm-{self.metadata.run_id}_metadata.json",
file_name=f"spreedsheet-swarm-{uuid_hex}-metadata.json",
content=out,
)

@ -1,20 +1,122 @@
import os
import subprocess
from typing import List, Optional
from datetime import datetime
from loguru import logger
from pydantic import BaseModel, Field
from pydantic.v1 import validator
from loguru import logger
from swarm_models import OpenAIChat
from tenacity import (
retry,
stop_after_attempt,
wait_exponential,
)
from swarm_models import OpenAIFunctionCaller, OpenAIChat
from swarms.structs.agent import Agent
from swarms.structs.swarm_router import SwarmRouter
from swarms.structs.agents_available import showcase_available_agents
from swarms.structs.swarm_router import SwarmRouter, SwarmType
logger.add("swarm_builder.log", rotation="10 MB", backtrace=True)
class OpenAIFunctionCaller:
"""
A class to interact with the OpenAI API for generating text based on a system prompt and a task.
Attributes:
- system_prompt (str): The system prompt to guide the AI's response.
- api_key (str): The API key for the OpenAI service.
- temperature (float): The temperature parameter for the AI model, controlling randomness.
- base_model (BaseModel): The Pydantic model to parse the response into.
- max_tokens (int): The maximum number of tokens in the response.
- client (OpenAI): The OpenAI client instance.
"""
def __init__(
self,
system_prompt: str,
api_key: str,
temperature: float,
base_model: BaseModel,
max_tokens: int = 5000,
):
self.system_prompt = system_prompt
self.api_key = api_key
self.temperature = temperature
self.base_model = base_model
self.max_tokens = max_tokens
try:
from openai import OpenAI
except ImportError:
logger.error(
"OpenAI library not found. Please install the OpenAI library by running 'pip install openai'"
)
subprocess.run(["pip", "install", "openai"])
from openai import OpenAI
self.client = OpenAI(api_key=api_key)
def run(self, task: str, *args, **kwargs) -> BaseModel:
"""
Run the OpenAI model with the system prompt and task to generate a response.
Args:
- task (str): The task to be completed.
- *args: Additional positional arguments for the OpenAI API.
- **kwargs: Additional keyword arguments for the OpenAI API.
Returns:
- BaseModel: The parsed response based on the base_model.
"""
completion = self.client.beta.chat.completions.parse(
model="gpt-4o-2024-08-06",
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": task},
],
response_format=self.base_model,
temperature=self.temperature,
max_tokens=self.max_tokens,
*args,
**kwargs,
)
return completion.choices[0].message.parsed
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
)
async def run_async(
self, task: str, *args, **kwargs
) -> BaseModel:
"""
Asynchronous version of the run method.
Args:
- task (str): The task to be completed.
- *args: Additional positional arguments for the OpenAI API.
- **kwargs: Additional keyword arguments for the OpenAI API.
Returns:
- BaseModel: The parsed response based on the base_model.
"""
completion = (
await self.client.beta.chat.completions.parse_async(
model="gpt-4o-2024-08-06",
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": task},
],
response_format=self.base_model,
temperature=self.temperature,
max_tokens=self.max_tokens,
*args,
**kwargs,
)
)
return completion.choices[0].message.parsed
BOSS_SYSTEM_PROMPT = """
@ -59,29 +161,15 @@ class AgentConfig(BaseModel):
"""Configuration for an individual agent in a swarm"""
name: str = Field(
description="The name of the agent", example="Research-Agent"
description="The name of the agent",
)
description: str = Field(
description="A description of the agent's purpose and capabilities",
example="Agent responsible for researching and gathering information",
)
system_prompt: str = Field(
description="The system prompt that defines the agent's behavior",
example="You are a research agent. Your role is to gather and analyze information...",
)
@validator("name")
def validate_name(cls, v):
if not v.strip():
raise ValueError("Agent name cannot be empty")
return v.strip()
@validator("system_prompt")
def validate_system_prompt(cls, v):
if not v.strip():
raise ValueError("System prompt cannot be empty")
return v.strip()
class SwarmConfig(BaseModel):
"""Configuration for a swarm of cooperative agents"""
@ -96,7 +184,9 @@ class SwarmConfig(BaseModel):
)
agents: List[AgentConfig] = Field(
description="The list of agents that make up the swarm",
min_items=1,
)
max_loops: int = Field(
description="The maximum number of loops for the swarm to iterate on",
)
@validator("agents")
@ -106,23 +196,90 @@ class SwarmConfig(BaseModel):
return v
class AutoSwarmBuilderOutput(BaseModel):
"""A class that automatically builds and manages swarms of AI agents with enhanced error handling."""
name: Optional[str] = Field(
description="The name of the swarm",
example="DefaultSwarm",
default=None,
)
description: Optional[str] = Field(
description="The description of the swarm's purpose and capabilities",
example="Generic AI Agent Swarm",
default=None,
)
verbose: Optional[bool] = Field(
description="Whether to display verbose output",
default=None,
)
model_name: Optional[str] = Field(
description="The name of the OpenAI model to use",
default=None,
)
boss_output_schema: Optional[list] = Field(
description="The schema for the output of the BOSS system prompt",
default=None,
)
director_agents_created: Optional[SwarmConfig] = Field(
description="The agents created by the director",
default=None,
)
swarm_router_outputs: Optional[list] = Field(
description="The outputs from the swarm router",
default=None,
)
max_loops: Optional[int] = Field(
description="The maximum number of loops for the swarm to iterate on",
default=None,
)
swarm_type: Optional[SwarmType] = Field(
description="The type of swarm to build",
default=None,
)
class AutoSwarmBuilder:
"""A class that automatically builds and manages swarms of AI agents with enhanced error handling."""
def __init__(
self,
name: Optional[str] = None,
description: Optional[str] = None,
name: Optional[str] = "autonomous-swarm-builder",
description: Optional[
str
] = "Given a task, this swarm will automatically create specialized agents and route it to the appropriate agents.",
verbose: bool = True,
api_key: Optional[str] = None,
model_name: str = "gpt-4",
model_name: str = "gpt-4o",
boss_output_schema: list = None,
swarm_router_outputs: AutoSwarmBuilderOutput = None,
max_loops: int = 1,
swarm_type: str = "SequentialWorkflow",
auto_generate_prompts_for_agents: bool = False,
shared_memory_system: callable = None,
):
self.name = name or "DefaultSwarm"
self.description = description or "Generic AI Agent Swarm"
self.verbose = verbose
self.agents_pool = []
self.api_key = api_key or os.getenv("OPENAI_API_KEY")
self.api_key = os.getenv("OPENAI_API_KEY")
self.model_name = model_name
self.boss_output_schema = boss_output_schema
self.max_loops = max_loops
self.swarm_type = swarm_type
self.auto_generate_prompts_for_agents = (
auto_generate_prompts_for_agents
)
self.shared_memory_system = shared_memory_system
self.auto_swarm_builder_output = AutoSwarmBuilderOutput(
name=name,
description=description,
verbose=verbose,
model_name=model_name,
boss_output_schema=boss_output_schema or [],
swarm_router_outputs=swarm_router_outputs or [],
max_loops=max_loops,
swarm_type=swarm_type,
)
if not self.api_key:
raise ValueError(
@ -143,7 +300,6 @@ class AutoSwarmBuilder:
self.chat_model = OpenAIChat(
openai_api_key=self.api_key,
model_name=self.model_name,
temperature=0.1,
)
except Exception as e:
logger.error(
@ -151,11 +307,13 @@ class AutoSwarmBuilder:
)
raise
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
)
def run(self, task: str, image_url: Optional[str] = None) -> str:
def run(
self,
task: str,
image_url: Optional[str] = None,
*args,
**kwargs,
):
"""Run the swarm on a given task with error handling and retries."""
if not task or not task.strip():
raise ValueError("Task cannot be empty")
@ -164,7 +322,7 @@ class AutoSwarmBuilder:
try:
# Create agents for the task
agents = self._create_agents(task, image_url)
agents = self._create_agents(task)
if not agents:
raise ValueError(
"No agents were created for the task"
@ -175,20 +333,33 @@ class AutoSwarmBuilder:
"Routing task through swarm",
extra={"num_agents": len(agents)},
)
output = self.swarm_router(agents, task, image_url)
output = self.swarm_router(
agents=agents,
task=task,
image_url=image_url,
*args,
**kwargs,
)
self.auto_swarm_builder_output.swarm_router_outputs.append(
output
)
print(output)
logger.info("Swarm execution completed successfully")
return output
# return output
return self.auto_swarm_builder_output.model_dump_json(
indent=4
)
except Exception as e:
logger.error(
f"Error during swarm execution: {str(e)}",
exc_info=True,
)
raise
raise e
def _create_agents(
self, task: str, image_url: Optional[str] = None
self,
task: str,
) -> List[Agent]:
"""Create the necessary agents for a task with enhanced error handling."""
logger.info("Creating agents for task", extra={"task": task})
@ -202,7 +373,12 @@ class AutoSwarmBuilder:
)
agents_config = model.run(task)
print(f"{agents_config}")
logger.info(
f"Director has successfully created agents: {agents_config}"
)
self.auto_swarm_builder_output.director_agents_created = (
agents_config
)
if isinstance(agents_config, dict):
agents_config = SwarmConfig(**agents_config)
@ -224,15 +400,19 @@ class AutoSwarmBuilder:
)
agents.append(agent)
# Add available agents showcase to system prompts
agents_available = showcase_available_agents(
name=self.name,
description=self.description,
agents=agents,
)
print(
f"Agent created: {agent_config.name}: Description: {agent_config.description}"
)
# # Add available agents showcase to system prompts
# agents_available = showcase_available_agents(
# name=self.name,
# description=self.description,
# agents=agents,
# )
for agent in agents:
agent.system_prompt += "\n" + agents_available
# for agent in agents:
# agent.system_prompt += "\n" + agents_available
logger.info(
"Successfully created agents",
@ -251,6 +431,8 @@ class AutoSwarmBuilder:
agent_name: str,
agent_description: str,
agent_system_prompt: str,
*args,
**kwargs,
) -> Agent:
"""Build a single agent with enhanced error handling."""
logger.info(
@ -263,18 +445,11 @@ class AutoSwarmBuilder:
description=agent_description,
system_prompt=agent_system_prompt,
llm=self.chat_model,
autosave=True,
dashboard=False,
verbose=self.verbose,
dynamic_temperature_enabled=True,
saved_state_path=f"states/{agent_name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
user_name="swarms_corp",
retry_attempts=3,
context_length=200000,
dynamic_temperature_enabled=False,
return_step_meta=False,
output_type="str",
streaming_on=False,
auto_generate_prompt=True,
streaming_on=True,
)
return agent
@ -292,7 +467,9 @@ class AutoSwarmBuilder:
self,
agents: List[Agent],
task: str,
image_url: Optional[str] = None,
img: Optional[str] = None,
*args,
**kwargs,
) -> str:
"""Route tasks between agents in the swarm with error handling and retries."""
logger.info(
@ -305,11 +482,14 @@ class AutoSwarmBuilder:
name=self.name,
description=self.description,
agents=agents,
swarm_type="auto",
swarm_type=self.swarm_type,
auto_generate_prompts=self.auto_generate_prompts_for_agents,
)
formatted_task = f"{self.name} {self.description} {task}"
result = swarm_router_instance.run(formatted_task)
# formatted_task = f"{self.name} {self.description} {task}"
result = swarm_router_instance.run(
task=task, *args, **kwargs
)
logger.info("Successfully completed swarm routing")
return result
@ -321,13 +501,16 @@ class AutoSwarmBuilder:
raise
swarm = AutoSwarmBuilder(
name="ChipDesign-Swarm",
description="A swarm of specialized AI agents for chip design",
api_key="your-api-key", # Optional if set in environment
model_name="gpt-4", # Optional, defaults to gpt-4
)
# swarm = AutoSwarmBuilder(
# name="ChipDesign-Swarm",
# description="A swarm of specialized AI agents for chip design",
# swarm_type="ConcurrentWorkflow",
# )
result = swarm.run(
"Design a new AI accelerator chip optimized for transformer model inference..."
)
# try:
# result = swarm.run(
# "Design a new AI accelerator chip optimized for transformer model inference..."
# )
# print(result)
# except Exception as e:
# print(f"An error occurred: {e}")

@ -1,14 +1,15 @@
from typing import List, Tuple, Optional
import json
from typing import List, Optional, Tuple
import numpy as np
from swarms.utils.lazy_loader import lazy_import_decorator
from pydantic import BaseModel, Field
import json
from tenacity import retry, stop_after_attempt, wait_exponential
from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.auto_download_check_packages import (
auto_check_and_download_package,
)
from swarms.utils.lazy_loader import lazy_import_decorator
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="swarm_matcher")

@ -0,0 +1,23 @@
import time
from typing import List
import uuid
from pydantic import BaseModel, Field
class AgentResponde(BaseModel):
id: str = Field(default=uuid.uuid4().hex)
timestamp: str = Field(default=time.time())
agent_position: int = Field(description="Agent in swarm position")
agent_name: str
agent_response: str = Field(description="Agent response")
class SwarmOutput(BaseModel):
id: str = Field(default=uuid.uuid4().hex)
timestamp: str = Field(default=time.time())
name: str = Field(description="Swarm name")
description: str = Field(description="Swarm description")
swarm_type: str = Field(description="Swarm type")
agent_outputs: List[AgentResponde] = Field(
description="List of agent responses"
)

@ -2,7 +2,6 @@ import uuid
from datetime import datetime
from typing import Any, Callable, Dict, List, Literal, Union
from doc_master import doc_master
from pydantic import BaseModel, Field
from tenacity import retry, stop_after_attempt, wait_fixed
@ -130,7 +129,7 @@ class SwarmRouter:
agents: List[Union[Agent, Callable]] = [],
swarm_type: SwarmType = "SequentialWorkflow", # "SpreadSheetSwarm" # "auto"
autosave: bool = False,
flow: str = None,
rearrange_flow: str = None,
return_json: bool = False,
auto_generate_prompts: bool = False,
shared_memory_system: Any = None,
@ -147,7 +146,7 @@ class SwarmRouter:
self.agents = agents
self.swarm_type = swarm_type
self.autosave = autosave
self.flow = flow
self.rearrange_flow = rearrange_flow
self.return_json = return_json
self.auto_generate_prompts = auto_generate_prompts
self.shared_memory_system = shared_memory_system
@ -175,29 +174,12 @@ class SwarmRouter:
if self.rules is not None:
self.handle_rules()
# if self.documents is not None:
# self.handle_docs()
# let's make a function that checks the agents parameter and disables clusterops
def deactivate_clusterops(self):
for agent in self.agents:
agent.do_not_use_cluster_ops = True
def handle_docs(self):
# Process all documents in parallel using list comprehension
data = "".join(
[doc_master(file_path=doc) for doc in self.documents]
)
# Update all agents' prompts at once
doc_prompt = f"##### Documents Available ########## {data}"
for agent in self.agents:
agent.system_prompt += doc_prompt
# Add documents to the logs
# self.logs.append(Document(file_path=self.documents, data=data))
def activate_shared_memory(self):
logger.info("Activating shared memory with all agents ")
@ -296,7 +278,7 @@ class SwarmRouter:
description=self.description,
agents=self.agents,
max_loops=self.max_loops,
flow=self.flow,
flow=self.rearrange_flow,
return_json=self.return_json,
output_type=self.output_type,
*args,
@ -323,11 +305,7 @@ class SwarmRouter:
*args,
**kwargs,
)
elif (
self.swarm_type == "SequentialWorkflow"
or self.swarm_type == "sequential"
or self.swarm_type == "Sequential"
):
elif self.swarm_type == "SequentialWorkflow":
return SequentialWorkflow(
name=self.name,
description=self.description,
@ -382,7 +360,7 @@ class SwarmRouter:
logger.log(level.upper(), message)
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
def _run(self, task: str, *args, **kwargs) -> Any:
def _run(self, task: str, img: str, *args, **kwargs) -> Any:
"""
Dynamically run the specified task on the selected or matched swarm type.
@ -402,11 +380,9 @@ class SwarmRouter:
try:
self._log(
"info",
f"Running task on {self.swarm_type} swarm",
task=task,
metadata=kwargs,
f"Running task on {self.swarm_type} swarm with task: {task}",
)
result = self.swarm.run(task, *args, **kwargs)
result = self.swarm.run(task=task, *args, **kwargs)
self._log(
"success",
@ -427,9 +403,11 @@ class SwarmRouter:
def run(
self,
task: str,
img: str = None,
device: str = "cpu",
all_cores: bool = True,
all_gpus: bool = False,
no_clusterops: bool = True,
*args,
**kwargs,
) -> Any:
@ -450,15 +428,22 @@ class SwarmRouter:
Raises:
Exception: If an error occurs during task execution.
"""
return exec_callable_with_clusterops(
func=self._run,
device=device,
all_cores=all_cores,
all_gpus=all_gpus,
task=task,
*args,
**kwargs,
)
try:
if no_clusterops:
return self._run(task=task, img=img, *args, **kwargs)
else:
return exec_callable_with_clusterops(
func=self._run,
device=device,
all_cores=all_cores,
all_gpus=all_gpus,
task=task,
*args,
**kwargs,
)
except Exception as e:
logger.error(f"Error executing task on swarm: {str(e)}")
raise
def __call__(self, task: str, *args, **kwargs) -> Any:
"""

@ -1,4 +1,3 @@
import asyncio
import math
from typing import List, Union
@ -343,47 +342,6 @@ def sinusoidal_swarm(agents: AgentListType, task: str):
agents[index].run(task)
async def one_to_three(
sender: Agent, agents: AgentListType, task: str
):
"""
Sends a message from the sender agent to three other agents.
Args:
sender (Agent): The agent sending the message.
agents (AgentListType): The list of agents to receive the message.
task (str): The message to be sent.
Raises:
Exception: If there is an error while sending the message.
Returns:
None
"""
if len(agents) != 3:
raise ValueError("The number of agents must be exactly 3.")
if not task:
raise ValueError("The task cannot be empty.")
if not sender:
raise ValueError("The sender cannot be empty.")
try:
receive_tasks = []
for agent in agents:
receive_tasks.append(
agent.receive_message(sender.agent_name, task)
)
await asyncio.gather(*receive_tasks)
except Exception as error:
logger.error(
f"[ERROR][CLASS: Agent][METHOD: one_to_three] {error}"
)
raise error
"""
This module contains functions for facilitating communication between agents in a swarm. It includes methods for one-to-one communication, broadcasting, and other swarm architectures.
"""
@ -440,36 +398,70 @@ def one_to_one(
return conversation.return_history()
# Broadcasting: A message from one agent to many
async def broadcast(
sender: Agent, agents: AgentListType, task: str
) -> None:
"""
Facilitates broadcasting of a message from one agent to multiple agents.
Args:
sender (Agent): The agent sending the message.
agents (AgentListType): The list of agents to receive the message.
task (str): The message to be sent.
Raises:
ValueError: If the sender, agents, or task is empty.
Exception: If there is an error during the broadcasting process.
"""
conversation = Conversation()
if not sender or not agents or not task:
raise ValueError("Sender, agents, and task cannot be empty.")
try:
receive_tasks = []
# First get the sender's broadcast message
broadcast_message = sender.run(task)
conversation.add_log(
agent_name=sender.agent_name,
task=task,
response=broadcast_message,
)
# Then have all agents process it
for agent in agents:
receive_tasks.append(agent.run(task))
response = agent.run(broadcast_message)
conversation.add_log(
agent_name=agent.agent_name, task=task, response=task
agent_name=agent.agent_name,
task=broadcast_message,
response=response,
)
await asyncio.gather(*receive_tasks)
return conversation.return_history()
except Exception as error:
logger.error(f"Error during broadcast: {error}")
raise error
async def one_to_three(
sender: Agent, agents: AgentListType, task: str
):
if len(agents) != 3:
raise ValueError("The number of agents must be exactly 3.")
if not task or not sender:
raise ValueError("Sender and task cannot be empty.")
conversation = Conversation()
try:
# Get sender's message
sender_message = sender.run(task)
conversation.add_log(
agent_name=sender.agent_name,
task=task,
response=sender_message,
)
# Have each receiver process the message
for agent in agents:
response = agent.run(sender_message)
conversation.add_log(
agent_name=agent.agent_name,
task=sender_message,
response=response,
)
return conversation.return_history()
except Exception as error:
logger.error(f"Error in one_to_three: {error}")
raise error

@ -1,145 +0,0 @@
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from typing import Any, List, Optional, Union
from doc_master import doc_master
from tenacity import retry, stop_after_attempt, wait_exponential
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="add_docs_to_agents")
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
)
def _process_document(doc_path: Union[str, Path]) -> str:
"""Safely process a single document with retries.
Args:
doc_path: Path to the document to process
Returns:
Processed document text
Raises:
Exception: If document processing fails after retries
"""
try:
return doc_master(
file_path=str(doc_path), output_type="string"
)
except Exception as e:
logger.error(
f"Error processing document {doc_path}: {str(e)}"
)
raise
def handle_input_docs(
agents: Any,
docs: Optional[List[Union[str, Path]]] = None,
doc_folder: Optional[Union[str, Path]] = None,
max_workers: int = 4,
chunk_size: int = 1000000,
) -> Any:
"""
Add document content to agent prompts with improved reliability and performance.
Args:
agents: Dictionary mapping agent names to Agent objects
docs: List of document paths
doc_folder: Path to folder containing documents
max_workers: Maximum number of parallel document processing workers
chunk_size: Maximum characters to process at once to avoid memory issues
Raises:
ValueError: If neither docs nor doc_folder is provided
RuntimeError: If document processing fails
"""
if not agents:
logger.warning(
"No agents provided, skipping document distribution"
)
return
if not docs and not doc_folder:
logger.warning(
"No documents or folder provided, skipping document distribution"
)
return
logger.info("Starting document distribution to agents")
try:
processed_docs = []
# Process individual documents in parallel
if docs:
with ThreadPoolExecutor(
max_workers=max_workers
) as executor:
future_to_doc = {
executor.submit(_process_document, doc): doc
for doc in docs
}
for future in as_completed(future_to_doc):
doc = future_to_doc[future]
try:
processed_docs.append(future.result())
except Exception as e:
logger.error(
f"Failed to process document {doc}: {str(e)}"
)
raise RuntimeError(
f"Document processing failed: {str(e)}"
)
# Process folder if specified
elif doc_folder:
try:
folder_content = doc_master(
folder_path=str(doc_folder), output_type="string"
)
processed_docs.append(folder_content)
except Exception as e:
logger.error(
f"Failed to process folder {doc_folder}: {str(e)}"
)
raise RuntimeError(
f"Folder processing failed: {str(e)}"
)
# Combine and chunk the processed documents
combined_data = "\n".join(processed_docs)
# Update agent prompts in chunks to avoid memory issues
for agent in agents.values():
try:
for i in range(0, len(combined_data), chunk_size):
chunk = combined_data[i : i + chunk_size]
if i == 0:
agent.system_prompt += (
"\nDocuments:\n" + chunk
)
else:
agent.system_prompt += chunk
except Exception as e:
logger.error(
f"Failed to update agent prompt: {str(e)}"
)
raise RuntimeError(
f"Agent prompt update failed: {str(e)}"
)
logger.info(
f"Successfully added documents to {len(agents)} agents"
)
return agents
except Exception as e:
logger.error(f"Document distribution failed: {str(e)}")
raise RuntimeError(f"Document distribution failed: {str(e)}")

@ -9,13 +9,6 @@ from swarms.structs.agent import Agent
logger = initialize_logger(log_folder="pandas_utils")
try:
import pandas as pd
except ImportError:
logger.error("Failed to import pandas")
subprocess.run(["pip", "install", "pandas"])
import pandas as pd
def display_agents_info(agents: List[Agent]) -> None:
"""
@ -24,6 +17,14 @@ def display_agents_info(agents: List[Agent]) -> None:
:param agents: List of Agent instances.
"""
# Extracting relevant information from each agent
try:
import pandas as pd
except ImportError:
logger.error("Failed to import pandas")
subprocess.run(["pip", "install", "pandas"])
import pandas as pd
agent_data = []
for agent in agents:
try:
@ -57,19 +58,26 @@ def display_agents_info(agents: List[Agent]) -> None:
logger.error(f"Failed to print DataFrame: {e}")
def dict_to_dataframe(data: Dict[str, Any]) -> pd.DataFrame:
def dict_to_dataframe(data: Dict[str, Any]):
"""
Converts a dictionary into a pandas DataFrame.
:param data: Dictionary to convert.
:return: A pandas DataFrame representation of the dictionary.
"""
try:
import pandas as pd
except ImportError:
logger.error("Failed to import pandas")
subprocess.run(["pip", "install", "pandas"])
import pandas as pd
# Convert dictionary to DataFrame
df = pd.json_normalize(data)
return df
def pydantic_model_to_dataframe(model: BaseModel) -> pd.DataFrame:
def pydantic_model_to_dataframe(model: BaseModel) -> any:
"""
Converts a Pydantic Base Model into a pandas DataFrame.

@ -0,0 +1,6 @@
pytest
swarms
loguru
pydantic
swarm-models
loguru

@ -0,0 +1,328 @@
import os
import traceback
from datetime import datetime
from typing import Callable, Dict, List, Optional
from loguru import logger
from swarm_models import OpenAIChat
from swarms.structs.agent import Agent
from swarms.structs.rearrange import AgentRearrange
class TestResult:
"""Class to store test results and metadata"""
def __init__(self, test_name: str):
self.test_name = test_name
self.start_time = datetime.now()
self.end_time = None
self.success = False
self.error = None
self.traceback = None
self.function_output = None
def complete(
self, success: bool, error: Optional[Exception] = None
):
"""Complete the test execution with results"""
self.end_time = datetime.now()
self.success = success
if error:
self.error = str(error)
self.traceback = traceback.format_exc()
def duration(self) -> float:
"""Calculate test duration in seconds"""
if self.end_time:
return (self.end_time - self.start_time).total_seconds()
return 0
def run_test(test_func: Callable) -> TestResult:
"""
Decorator to run tests with error handling and logging
Args:
test_func (Callable): Test function to execute
Returns:
TestResult: Object containing test execution details
"""
def wrapper(*args, **kwargs) -> TestResult:
result = TestResult(test_func.__name__)
logger.info(
f"\n{'='*20} Running test: {test_func.__name__} {'='*20}"
)
try:
output = test_func(*args, **kwargs)
result.function_output = output
result.complete(success=True)
logger.success(
f"✅ Test {test_func.__name__} passed successfully"
)
except Exception as e:
result.complete(success=False, error=e)
logger.error(
f"❌ Test {test_func.__name__} failed with error: {str(e)}"
)
logger.error(f"Traceback: {traceback.format_exc()}")
logger.info(
f"Test duration: {result.duration():.2f} seconds\n"
)
return result
return wrapper
def create_functional_agents() -> List[Agent]:
"""
Create a list of functional agents with real LLM integration for testing.
Using OpenAI's GPT model for realistic agent behavior testing.
"""
# Initialize OpenAI Chat model
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
logger.warning(
"No OpenAI API key found. Using mock agents instead."
)
return [
create_mock_agent("TestAgent1"),
create_mock_agent("TestAgent2"),
]
try:
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o", temperature=0.1
)
# Create boss agent
boss_agent = Agent(
agent_name="BossAgent",
system_prompt="""
You are the BossAgent responsible for managing and overseeing test scenarios.
Your role is to coordinate tasks between agents and ensure efficient collaboration.
Analyze inputs, break down tasks, and provide clear directives to other agents.
Maintain a structured approach to task management and result compilation.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="test_boss_agent.json",
)
# Create analysis agent
analysis_agent = Agent(
agent_name="AnalysisAgent",
system_prompt="""
You are the AnalysisAgent responsible for detailed data processing and analysis.
Your role is to examine input data, identify patterns, and provide analytical insights.
Focus on breaking down complex information into clear, actionable components.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="test_analysis_agent.json",
)
# Create summary agent
summary_agent = Agent(
agent_name="SummaryAgent",
system_prompt="""
You are the SummaryAgent responsible for consolidating and summarizing information.
Your role is to take detailed analysis and create concise, actionable summaries.
Focus on highlighting key points and ensuring clarity in communication.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="test_summary_agent.json",
)
logger.info(
"Successfully created functional agents with LLM integration"
)
return [boss_agent, analysis_agent, summary_agent]
except Exception as e:
logger.error(f"Failed to create functional agents: {str(e)}")
logger.warning("Falling back to mock agents")
return [
create_mock_agent("TestAgent1"),
create_mock_agent("TestAgent2"),
]
def create_mock_agent(name: str) -> Agent:
"""Create a mock agent for testing when LLM integration is not available"""
return Agent(
agent_name=name,
system_prompt=f"You are a test agent named {name}",
llm=None,
)
@run_test
def test_init():
"""Test AgentRearrange initialization with functional agents"""
logger.info("Creating agents for initialization test")
agents = create_functional_agents()
rearrange = AgentRearrange(
name="TestRearrange",
agents=agents,
flow=f"{agents[0].agent_name} -> {agents[1].agent_name} -> {agents[2].agent_name}",
)
assert rearrange.name == "TestRearrange"
assert len(rearrange.agents) == 3
assert (
rearrange.flow
== f"{agents[0].agent_name} -> {agents[1].agent_name} -> {agents[2].agent_name}"
)
logger.info(
f"Initialized AgentRearrange with {len(agents)} agents"
)
return True
@run_test
def test_validate_flow():
"""Test flow validation logic"""
agents = create_functional_agents()
rearrange = AgentRearrange(
agents=agents,
flow=f"{agents[0].agent_name} -> {agents[1].agent_name}",
)
logger.info("Testing valid flow pattern")
valid = rearrange.validate_flow()
assert valid is True
logger.info("Testing invalid flow pattern")
rearrange.flow = f"{agents[0].agent_name} {agents[1].agent_name}" # Missing arrow
try:
rearrange.validate_flow()
assert False, "Should have raised ValueError"
except ValueError as e:
logger.info(
f"Successfully caught invalid flow error: {str(e)}"
)
assert True
return True
@run_test
def test_add_remove_agent():
"""Test adding and removing agents from the swarm"""
agents = create_functional_agents()
rearrange = AgentRearrange(
agents=agents[:2]
) # Start with first two agents
logger.info("Testing agent addition")
new_agent = agents[2] # Use the third agent as new agent
rearrange.add_agent(new_agent)
assert new_agent.agent_name in rearrange.agents
logger.info("Testing agent removal")
rearrange.remove_agent(new_agent.agent_name)
assert new_agent.agent_name not in rearrange.agents
return True
@run_test
def test_basic_run():
"""Test basic task execution with the swarm"""
agents = create_functional_agents()
rearrange = AgentRearrange(
name="TestSwarm",
agents=agents,
flow=f"{agents[0].agent_name} -> {agents[1].agent_name} -> {agents[2].agent_name}",
max_loops=1,
)
test_task = (
"Analyze this test message and provide a brief summary."
)
logger.info(f"Running test task: {test_task}")
try:
result = rearrange.run(test_task)
assert result is not None
logger.info(
f"Successfully executed task with result length: {len(str(result))}"
)
return True
except Exception as e:
logger.error(f"Task execution failed: {str(e)}")
raise
def run_all_tests() -> Dict[str, TestResult]:
"""
Run all test cases and collect results
Returns:
Dict[str, TestResult]: Dictionary mapping test names to their results
"""
logger.info("\n🚀 Starting AgentRearrange test suite execution")
test_functions = [
test_init,
test_validate_flow,
test_add_remove_agent,
test_basic_run,
]
results = {}
for test in test_functions:
result = test()
results[test.__name__] = result
# Log summary
total_tests = len(results)
passed_tests = sum(1 for r in results.values() if r.success)
failed_tests = total_tests - passed_tests
logger.info("\n📊 Test Suite Summary:")
logger.info(f"Total Tests: {total_tests}")
print(f"✅ Passed: {passed_tests}")
if failed_tests > 0:
logger.error(f"❌ Failed: {failed_tests}")
# Detailed failure information
if failed_tests > 0:
logger.error("\n❌ Failed Tests Details:")
for name, result in results.items():
if not result.success:
logger.error(f"\n{name}:")
logger.error(f"Error: {result.error}")
logger.error(f"Traceback: {result.traceback}")
return results
if __name__ == "__main__":
print("🌟 Starting AgentRearrange Test Suite")
results = run_all_tests()
print("🏁 Test Suite Execution Completed")

@ -0,0 +1,198 @@
from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
from dotenv import load_dotenv
load_dotenv()
def print_separator():
print("\n" + "=" * 50)
def test_initialization():
"""Test basic initialization of AutoSwarmBuilder"""
print_separator()
print("Testing AutoSwarmBuilder Initialization")
try:
swarm = AutoSwarmBuilder(
name="TestSwarm",
description="A test swarm for validation",
verbose=True,
max_loops=2,
)
print("✓ Created swarm with configuration:")
print(f" - Name: {swarm.name}")
print(f" - Description: {swarm.description}")
print(f" - Max loops: {swarm.max_loops}")
print(f" - Verbose: {swarm.verbose}")
print("✓ Initialization test passed")
return swarm
except Exception as e:
print(f"✗ Initialization test failed: {str(e)}")
raise
def test_agent_building():
"""Test building individual agents"""
print_separator()
print("Testing Agent Building")
try:
swarm = AutoSwarmBuilder()
agent = swarm.build_agent(
agent_name="TestAgent",
agent_description="A test agent",
agent_system_prompt="You are a test agent",
max_loops=1,
)
print("✓ Built agent with configuration:")
print(f" - Name: {agent.agent_name}")
print(f" - Description: {agent.description}")
print(f" - Max loops: {agent.max_loops}")
print("✓ Agent building test passed")
return agent
except Exception as e:
print(f"✗ Agent building test failed: {str(e)}")
raise
def test_agent_creation():
"""Test creating multiple agents for a task"""
print_separator()
print("Testing Agent Creation from Task")
try:
swarm = AutoSwarmBuilder(
name="ResearchSwarm",
description="A swarm for research tasks",
)
task = "Research the latest developments in quantum computing"
agents = swarm._create_agents(task)
print("✓ Created agents for research task:")
for i, agent in enumerate(agents, 1):
print(f" Agent {i}:")
print(f" - Name: {agent.agent_name}")
print(f" - Description: {agent.description}")
print(f"✓ Created {len(agents)} agents successfully")
return agents
except Exception as e:
print(f"✗ Agent creation test failed: {str(e)}")
raise
def test_swarm_routing():
"""Test routing tasks through the swarm"""
print_separator()
print("Testing Swarm Routing")
try:
swarm = AutoSwarmBuilder(
name="RouterTestSwarm",
description="Testing routing capabilities",
)
agents = (
test_agent_creation()
) # Get agents from previous test
task = "Analyze the impact of AI on healthcare"
print("Starting task routing...")
result = swarm.swarm_router(agents, task)
print("✓ Task routed successfully")
print(
f" - Result length: {len(str(result)) if result else 0} characters"
)
print("✓ Swarm routing test passed")
return result
except Exception as e:
print(f"✗ Swarm routing test failed: {str(e)}")
raise
def test_full_swarm_execution():
"""Test complete swarm execution with a real task"""
print_separator()
print("Testing Full Swarm Execution")
try:
swarm = AutoSwarmBuilder(
name="FullTestSwarm",
description="Testing complete swarm functionality",
max_loops=1,
)
task = (
"Create a summary of recent advances in renewable energy"
)
print("Starting full swarm execution...")
result = swarm.run(task)
print("✓ Full swarm execution completed:")
print(f" - Output generated: {bool(result)}")
print(
f" - Output length: {len(str(result)) if result else 0} characters"
)
print("✓ Full swarm execution test passed")
return result
except Exception as e:
print(f"✗ Full swarm execution test failed: {str(e)}")
raise
def test_error_handling():
"""Test error handling in swarm operations"""
print_separator()
print("Testing Error Handling")
try:
swarm = AutoSwarmBuilder()
# Test with invalid agent configuration
print("Testing invalid agent configuration...")
try:
swarm.build_agent("", "", "")
print(
"✗ Should have raised an error for empty agent configuration"
)
except Exception as e:
print(
f"✓ Correctly handled invalid agent configuration: {type(e).__name__}"
)
# Test with None task
print("\nTesting None task...")
try:
swarm.run(None)
print("✗ Should have raised an error for None task")
except Exception as e:
print(
f"✓ Correctly handled None task: {type(e).__name__}"
)
print("✓ Error handling test passed")
except Exception as e:
print(f"✗ Error handling test failed: {str(e)}")
raise
def run_all_tests():
"""Run complete test suite"""
print("\n=== Starting AutoSwarmBuilder Test Suite ===\n")
try:
# Run all tests in sequence
test_initialization()
test_agent_building()
test_agent_creation()
test_swarm_routing()
test_full_swarm_execution()
test_error_handling()
print_separator()
print("🎉 All tests completed successfully!")
except Exception as e:
print_separator()
print(f"❌ Test suite failed: {str(e)}")
raise
if __name__ == "__main__":
run_all_tests()

@ -12,6 +12,7 @@ ceo = Agent(llm=llm, name="CEO")
dev = Agent(llm=llm, name="Developer")
va = Agent(llm=llm, name="VA")
hr = Agent(llm=llm, name="HR")
shared_instructions = "Listen to your boss"

@ -1,222 +1,147 @@
import pytest
import os
from dotenv import load_dotenv
from swarm_models import OpenAIChat
from swarm_models.anthropic import Anthropic
from swarms.structs.agent import Agent
from swarms.structs.groupchat import GroupChat, GroupChatManager
llm = OpenAIChat()
llm2 = Anthropic()
# Mock the OpenAI class for testing
class MockOpenAI:
def __init__(self, *args, **kwargs):
pass
def generate_reply(self, content):
return {"role": "mocked_agent", "content": "Mocked Reply"}
# Create fixtures for agents and a sample message
@pytest.fixture
def agent1():
return Agent(name="Agent1", llm=llm)
@pytest.fixture
def agent2():
return Agent(name="Agent2", llm=llm2)
@pytest.fixture
def sample_message():
return {"role": "Agent1", "content": "Hello, World!"}
# Test the initialization of GroupChat
def test_groupchat_initialization(agent1, agent2):
groupchat = GroupChat(agents=[agent1, agent2])
assert len(groupchat.agents) == 2
assert len(groupchat.messages) == 0
assert groupchat.max_round == 10
assert groupchat.admin_name == "Admin"
# Test resetting the GroupChat
def test_groupchat_reset(agent1, agent2, sample_message):
groupchat = GroupChat(agents=[agent1, agent2])
groupchat.messages.append(sample_message)
groupchat.reset()
assert len(groupchat.messages) == 0
# Test finding an agent by name
def test_groupchat_find_agent_by_name(agent1, agent2):
groupchat = GroupChat(agents=[agent1, agent2])
found_agent = groupchat.agent_by_name("Agent1")
assert found_agent == agent1
# Test selecting the next agent
def test_groupchat_select_next_agent(agent1, agent2):
groupchat = GroupChat(agents=[agent1, agent2])
next_agent = groupchat.next_agent(agent1)
assert next_agent == agent2
from swarms.structs.groupchat import GroupChat, expertise_based
# Add more tests for different methods and scenarios as needed
# Test the GroupChatManager
def test_groupchat_manager(agent1, agent2):
groupchat = GroupChat(agents=[agent1, agent2])
selector = agent1 # Assuming agent1 is the selector
manager = GroupChatManager(groupchat, selector)
task = "Task for agent2"
reply = manager(task)
assert reply["role"] == "Agent2"
assert reply["content"] == "Reply from Agent2"
# Test selecting the next speaker when there is only one agent
def test_groupchat_select_speaker_single_agent(agent1):
groupchat = GroupChat(agents=[agent1])
selector = agent1
manager = GroupChatManager(groupchat, selector)
task = "Task for agent1"
reply = manager(task)
assert reply["role"] == "Agent1"
assert reply["content"] == "Reply from Agent1"
# Test selecting the next speaker when GroupChat is underpopulated
def test_groupchat_select_speaker_underpopulated(agent1, agent2):
groupchat = GroupChat(agents=[agent1, agent2])
selector = agent1
manager = GroupChatManager(groupchat, selector)
task = "Task for agent1"
reply = manager(task)
assert reply["role"] == "Agent2"
assert reply["content"] == "Reply from Agent2"
# Test formatting history
def test_groupchat_format_history(agent1, agent2, sample_message):
groupchat = GroupChat(agents=[agent1, agent2])
groupchat.messages.append(sample_message)
formatted_history = groupchat.format_history(groupchat.messages)
expected_history = "'Agent1:Hello, World!"
assert formatted_history == expected_history
# Test agent names property
def test_groupchat_agent_names(agent1, agent2):
groupchat = GroupChat(agents=[agent1, agent2])
names = groupchat.agent_names
assert len(names) == 2
assert "Agent1" in names
assert "Agent2" in names
# Test GroupChatManager initialization
def test_groupchat_manager_initialization(agent1, agent2):
groupchat = GroupChat(agents=[agent1, agent2])
selector = agent1
manager = GroupChatManager(groupchat, selector)
assert manager.groupchat == groupchat
assert manager.selector == selector
# Test case to ensure GroupChatManager generates a reply from an agent
def test_groupchat_manager_generate_reply():
# Create a GroupChat with two agents
agents = [agent1, agent2]
groupchat = GroupChat(agents=agents, messages=[], max_round=10)
# Mock the OpenAI class and GroupChat selector
mocked_openai = MockOpenAI()
selector = agent1
# Initialize GroupChatManager
manager = GroupChatManager(
groupchat=groupchat, selector=selector, openai=mocked_openai
def setup_test_agents():
model = OpenAIChat(
openai_api_key=os.getenv("OPENAI_API_KEY"),
model_name="gpt-4",
temperature=0.1,
)
# Generate a reply
task = "Write me a riddle"
reply = manager(task)
return [
Agent(
agent_name="Agent1",
system_prompt="You only respond with 'A'",
llm=model,
),
Agent(
agent_name="Agent2",
system_prompt="You only respond with 'B'",
llm=model,
),
Agent(
agent_name="Agent3",
system_prompt="You only respond with 'C'",
llm=model,
),
]
def test_round_robin_speaking():
chat = GroupChat(agents=setup_test_agents())
history = chat.run("Say your letter")
# Verify agents speak in order
responses = [
r.message for t in history.turns for r in t.responses
]
assert responses == ["A", "B", "C"] * (len(history.turns))
def test_concurrent_processing():
chat = GroupChat(agents=setup_test_agents())
tasks = ["Task1", "Task2", "Task3"]
histories = chat.concurrent_run(tasks)
assert len(histories) == len(tasks)
for history in histories:
assert history.total_messages > 0
def test_expertise_based_speaking():
agents = setup_test_agents()
chat = GroupChat(agents=agents, speaker_fn=expertise_based)
# Test each agent's expertise trigger
for agent in agents:
history = chat.run(f"Trigger {agent.system_prompt}")
first_response = history.turns[0].responses[0]
assert first_response.agent_name == agent.agent_name
def test_max_loops_limit():
max_loops = 3
chat = GroupChat(agents=setup_test_agents(), max_loops=max_loops)
history = chat.run("Test message")
assert len(history.turns) == max_loops
def test_error_handling():
broken_agent = Agent(
agent_name="BrokenAgent",
system_prompt="You raise errors",
llm=None,
)
# Check if a valid reply is generated
assert "role" in reply
assert "content" in reply
assert reply["role"] in groupchat.agent_names
chat = GroupChat(agents=[broken_agent])
history = chat.run("Trigger error")
assert "Error" in history.turns[0].responses[0].message
# Test case to ensure GroupChat selects the next speaker correctly
def test_groupchat_select_speaker():
agent3 = Agent(name="agent3", llm=llm)
agents = [agent1, agent2, agent3]
groupchat = GroupChat(agents=agents, messages=[], max_round=10)
# Initialize GroupChatManager with agent1 as selector
selector = agent1
manager = GroupChatManager(groupchat=groupchat, selector=selector)
def test_conversation_context():
agents = setup_test_agents()
complex_prompt = "Previous message refers to A. Now trigger B. Finally discuss C."
# Simulate selecting the next speaker
last_speaker = agent1
next_speaker = manager.select_speaker(
last_speaker=last_speaker, selector=selector
)
chat = GroupChat(agents=agents, speaker_fn=expertise_based)
history = chat.run(complex_prompt)
# Ensure the next speaker is agent2
assert next_speaker == agent2
responses = [
r.agent_name for t in history.turns for r in t.responses
]
assert all(agent.agent_name in responses for agent in agents)
# Test case to ensure GroupChat handles underpopulated group correctly
def test_groupchat_underpopulated_group():
agent1 = Agent(name="agent1", llm=llm)
agents = [agent1]
groupchat = GroupChat(agents=agents, messages=[], max_round=10)
def test_large_agent_group():
large_group = setup_test_agents() * 5 # 15 agents
chat = GroupChat(agents=large_group)
history = chat.run("Test scaling")
# Initialize GroupChatManager with agent1 as selector
selector = agent1
manager = GroupChatManager(groupchat=groupchat, selector=selector)
assert history.total_messages > len(large_group)
# Simulate selecting the next speaker in an underpopulated group
last_speaker = agent1
next_speaker = manager.select_speaker(
last_speaker=last_speaker, selector=selector
)
# Ensure the next speaker is the same as the last speaker in an underpopulated group
assert next_speaker == last_speaker
def test_long_conversations():
chat = GroupChat(agents=setup_test_agents(), max_loops=50)
history = chat.run("Long conversation test")
assert len(history.turns) == 50
assert history.total_messages > 100
# Test case to ensure GroupChatManager handles the maximum rounds correctly
def test_groupchat_max_rounds():
agents = [agent1, agent2]
groupchat = GroupChat(agents=agents, messages=[], max_round=2)
# Initialize GroupChatManager with agent1 as selector
selector = agent1
manager = GroupChatManager(groupchat=groupchat, selector=selector)
def test_stress_batched_runs():
chat = GroupChat(agents=setup_test_agents())
tasks = ["Task"] * 100
histories = chat.batched_run(tasks)
# Simulate the conversation with max rounds
last_speaker = agent1
for _ in range(2):
next_speaker = manager.select_speaker(
last_speaker=last_speaker, selector=selector
)
last_speaker = next_speaker
assert len(histories) == len(tasks)
total_messages = sum(h.total_messages for h in histories)
assert total_messages > len(tasks) * 3
# Try one more round, should stay with the last speaker
next_speaker = manager.select_speaker(
last_speaker=last_speaker, selector=selector
)
# Ensure the next speaker is the same as the last speaker after reaching max rounds
assert next_speaker == last_speaker
if __name__ == "__main__":
load_dotenv()
functions = [
test_round_robin_speaking,
test_concurrent_processing,
test_expertise_based_speaking,
test_max_loops_limit,
test_error_handling,
test_conversation_context,
test_large_agent_group,
test_long_conversations,
test_stress_batched_runs,
]
# Continue adding more test cases as needed to cover various scenarios and functionalities of the code.
for func in functions:
try:
print(f"Running {func.__name__}...")
func()
print("✓ Passed")
except Exception as e:
print(f"✗ Failed: {str(e)}")

@ -0,0 +1,219 @@
import os
from swarms.structs.agent import Agent
from swarms.structs.multi_agent_orchestrator import MultiAgentRouter
def create_test_agent(name: str) -> Agent:
"""Helper function to create a test agent"""
return Agent(
agent_name=name,
description=f"Test {name}",
system_prompt=f"You are a {name}",
model_name="openai/gpt-4o",
)
def test_boss_router_initialization():
"""Test MultiAgentRouter initialization"""
print("\nTesting MultiAgentRouter initialization...")
# Test successful initialization
try:
agents = [
create_test_agent("TestAgent1"),
create_test_agent("TestAgent2"),
]
router = MultiAgentRouter(agents=agents)
assert (
router.name == "swarm-router"
), "Default name should be 'swarm-router'"
assert len(router.agents) == 2, "Should have 2 agents"
print("✓ Basic initialization successful")
except Exception as e:
print(f"✗ Basic initialization failed: {str(e)}")
# Test initialization without API key
try:
temp_key = os.getenv("OPENAI_API_KEY")
os.environ["OPENAI_API_KEY"] = ""
success = False
try:
router = MultiAgentRouter(agents=[])
except ValueError as e:
success = str(e) == "OpenAI API key must be provided"
os.environ["OPENAI_API_KEY"] = temp_key
assert (
success
), "Should raise ValueError when API key is missing"
print("✓ API key validation successful")
except Exception as e:
print(f"✗ API key validation failed: {str(e)}")
def test_boss_system_prompt():
"""Test system prompt generation"""
print("\nTesting system prompt generation...")
try:
agents = [
create_test_agent("Agent1"),
create_test_agent("Agent2"),
]
router = MultiAgentRouter(agents=agents)
prompt = router._create_boss_system_prompt()
# Check if prompt contains agent information
assert (
"Agent1" in prompt
), "Prompt should contain first agent name"
assert (
"Agent2" in prompt
), "Prompt should contain second agent name"
assert (
"You are a boss agent" in prompt
), "Prompt should contain boss agent description"
print("✓ System prompt generation successful")
except Exception as e:
print(f"✗ System prompt generation failed: {str(e)}")
def test_find_agent_in_list():
"""Test agent finding functionality"""
print("\nTesting agent finding functionality...")
try:
agent1 = create_test_agent("Agent1")
agent2 = create_test_agent("Agent2")
router = MultiAgentRouter(agents=[agent1, agent2])
# Test finding existing agent
assert "Agent1" in router.agents, "Should find existing agent"
assert (
"NonexistentAgent" not in router.agents
), "Should not find nonexistent agent"
print("✓ Agent finding successful")
except Exception as e:
print(f"✗ Agent finding failed: {str(e)}")
def test_task_routing():
"""Test task routing functionality"""
print("\nTesting task routing...")
try:
# Create test agents
agents = [
create_test_agent("CodeAgent"),
create_test_agent("WritingAgent"),
]
router = MultiAgentRouter(agents=agents)
# Test routing a coding task
result = router.route_task(
"Write a Python function to sort a list"
)
assert result["boss_decision"]["selected_agent"] in [
"CodeAgent",
"WritingAgent",
], "Should select an appropriate agent"
assert (
"execution" in result
), "Result should contain execution details"
assert (
"total_time" in result
), "Result should contain timing information"
print("✓ Task routing successful")
except Exception as e:
print(f"✗ Task routing failed: {str(e)}")
def test_batch_routing():
"""Test batch routing functionality"""
print("\nTesting batch routing...")
try:
agents = [create_test_agent("TestAgent")]
router = MultiAgentRouter(agents=agents)
tasks = ["Task 1", "Task 2", "Task 3"]
# Test sequential batch routing
results = router.batch_route(tasks)
assert len(results) == len(
tasks
), "Should return result for each task"
print("✓ Sequential batch routing successful")
# Test concurrent batch routing
concurrent_results = router.concurrent_batch_route(tasks)
assert len(concurrent_results) == len(
tasks
), "Should return result for each task"
print("✓ Concurrent batch routing successful")
except Exception as e:
print(f"✗ Batch routing failed: {str(e)}")
def test_error_handling():
"""Test error handling in various scenarios"""
print("\nTesting error handling...")
try:
router = MultiAgentRouter(agents=[])
# Test routing with no agents
success = False
try:
router.route_task("Test task")
except Exception:
success = True
assert success, "Should handle routing with no agents"
print("✓ Empty agent list handling successful")
# Test with invalid task
success = False
router = MultiAgentRouter(
agents=[create_test_agent("TestAgent")]
)
try:
router.route_task("")
except ValueError:
success = True
assert success, "Should handle empty task"
print("✓ Invalid task handling successful")
except Exception as e:
print(f"✗ Error handling failed: {str(e)}")
def run_all_tests():
"""Run all test functions"""
print("Starting MultiAgentRouter tests...")
test_functions = [
test_boss_router_initialization,
test_boss_system_prompt,
test_find_agent_in_list,
test_task_routing,
test_batch_routing,
test_error_handling,
]
total_tests = len(test_functions)
passed_tests = 0
for test_func in test_functions:
try:
test_func()
passed_tests += 1
except Exception as e:
print(
f"Test {test_func.__name__} failed with error: {str(e)}"
)
print(
f"\nTest Results: {passed_tests}/{total_tests} tests passed"
)
if __name__ == "__main__":
run_all_tests()

@ -0,0 +1,177 @@
import asyncio
import time
from swarms.structs.agent import Agent
from swarms.structs.multi_process_workflow import MultiProcessWorkflow
def create_test_agent(name: str) -> Agent:
"""Create a test agent that simply returns its input with a timestamp"""
return Agent(
agent_name=name,
system_prompt=f"Test prompt for {name}",
model_name="gpt-4o-mini",
max_loops=1,
)
def test_initialization():
"""Test basic workflow initialization"""
print("\n=== Testing Workflow Initialization ===")
try:
agents = [create_test_agent(f"agent{i}") for i in range(3)]
workflow = MultiProcessWorkflow(max_workers=2, agents=agents)
print("✓ Created workflow with configuration:")
print(f" - Max workers: {workflow.max_workers}")
print(f" - Number of agents: {len(workflow.agents)}")
print(f" - Autosave: {workflow.autosave}")
print("✓ Initialization test passed")
except Exception as e:
print(f"✗ Initialization test failed: {str(e)}")
raise
def test_execute_task():
"""Test execution of a single task"""
print("\n=== Testing Task Execution ===")
try:
agents = [create_test_agent("test_agent")]
workflow = MultiProcessWorkflow(agents=agents)
test_task = "Return this message with timestamp"
result = workflow.execute_task(test_task)
print("✓ Task executed successfully")
print(f" - Input task: {test_task}")
print(f" - Result: {result}")
print("✓ Task execution test passed")
except Exception as e:
print(f"✗ Task execution test failed: {str(e)}")
raise
def test_parallel_run():
"""Test parallel execution of tasks"""
print("\n=== Testing Parallel Run ===")
try:
agents = [create_test_agent(f"agent{i}") for i in range(3)]
workflow = MultiProcessWorkflow(max_workers=2, agents=agents)
test_task = "Process this in parallel"
results = workflow.run(test_task)
print("✓ Parallel execution completed")
# print(f" - Number of results: {len(results)}")
print(f" - Results: {results}")
print("✓ Parallel run test passed")
except Exception as e:
print(f"✗ Parallel run test failed: {str(e)}")
raise
async def test_async_run():
"""Test asynchronous execution of tasks"""
print("\n=== Testing Async Run ===")
try:
agents = [create_test_agent(f"agent{i}") for i in range(3)]
workflow = MultiProcessWorkflow(max_workers=2, agents=agents)
test_task = "Process this asynchronously"
results = await workflow.async_run(test_task)
print("✓ Async execution completed")
print(f" - Number of results: {len(results)}")
print(f" - Results: {results}")
print("✓ Async run test passed")
except Exception as e:
print(f"✗ Async run test failed: {str(e)}")
raise
def test_batched_run():
"""Test batch execution of tasks"""
print("\n=== Testing Batched Run ===")
try:
agents = [create_test_agent(f"agent{i}") for i in range(2)]
workflow = MultiProcessWorkflow(max_workers=2, agents=agents)
tasks = [f"Batch task {i}" for i in range(5)]
results = workflow.batched_run(tasks, batch_size=2)
print("✓ Batch execution completed")
print(f" - Number of tasks: {len(tasks)}")
print(" - Batch size: 2")
print(f" - Results: {results}")
print("✓ Batched run test passed")
except Exception as e:
print(f"✗ Batched run test failed: {str(e)}")
raise
def test_concurrent_run():
"""Test concurrent execution of tasks"""
print("\n=== Testing Concurrent Run ===")
try:
agents = [create_test_agent(f"agent{i}") for i in range(2)]
workflow = MultiProcessWorkflow(max_workers=2, agents=agents)
tasks = [f"Concurrent task {i}" for i in range(4)]
results = workflow.concurrent_run(tasks)
print("✓ Concurrent execution completed")
print(f" - Number of tasks: {len(tasks)}")
print(f" - Results: {results}")
print("✓ Concurrent run test passed")
except Exception as e:
print(f"✗ Concurrent run test failed: {str(e)}")
raise
def test_error_handling():
"""Test error handling in workflow"""
print("\n=== Testing Error Handling ===")
try:
# Create workflow with no agents to trigger error
workflow = MultiProcessWorkflow(max_workers=2, agents=None)
result = workflow.execute_task(
"This should handle the error gracefully"
)
print("✓ Error handled gracefully")
print(f" - Result when no agents: {result}")
print("✓ Error handling test passed")
except Exception as e:
print(f"✗ Error handling test failed: {str(e)}")
raise
async def run_all_tests():
"""Run all tests"""
print("\n=== Starting MultiProcessWorkflow Test Suite ===")
start_time = time.time()
try:
# Run synchronous tests
test_initialization()
test_execute_task()
test_parallel_run()
test_batched_run()
test_concurrent_run()
test_error_handling()
# Run async test
await test_async_run()
end_time = time.time()
duration = round(end_time - start_time, 2)
print("\n=== Test Suite Completed Successfully ===")
print(f"Time taken: {duration} seconds")
except Exception as e:
print("\n=== Test Suite Failed ===")
print(f"Error: {str(e)}")
raise
if __name__ == "__main__":
asyncio.run(run_all_tests())

@ -0,0 +1,226 @@
import os
import asyncio
from loguru import logger
from swarms.structs.agent import Agent
from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm
def create_test_csv() -> str:
"""Create a test CSV file with agent configurations."""
print("\nStarting creation of test CSV file")
try:
csv_content = """agent_name,description,system_prompt,task
test_agent_1,Test Agent 1,System prompt 1,Task 1
test_agent_2,Test Agent 2,System prompt 2,Task 2"""
file_path = "test_agents.csv"
with open(file_path, "w") as f:
f.write(csv_content)
print(f"Created CSV with content:\n{csv_content}")
print(f"CSV file created at: {file_path}")
return file_path
except Exception as e:
logger.error(f"Failed to create test CSV: {str(e)}")
raise
def create_test_agent(name: str) -> Agent:
"""Create a test agent with specified name."""
print(f"\nCreating test agent: {name}")
try:
agent = Agent(
agent_name=name,
system_prompt=f"Test prompt for {name}",
model_name="gpt-4o-mini",
max_loops=1,
autosave=True,
verbose=True,
)
print(f"Created agent: {name}")
return agent
except Exception as e:
logger.error(f"Failed to create agent {name}: {str(e)}")
raise
def test_swarm_initialization() -> None:
"""Test basic swarm initialization."""
print("\n[TEST] Starting swarm initialization test")
try:
print("Creating test agents...")
agents = [
create_test_agent("agent1"),
create_test_agent("agent2"),
]
print("Initializing swarm...")
swarm = SpreadSheetSwarm(
name="Test Swarm",
description="Test Description",
agents=agents,
max_loops=2,
)
print("Verifying swarm configuration...")
assert swarm.name == "Test Swarm"
assert swarm.description == "Test Description"
assert len(swarm.agents) == 2
assert swarm.max_loops == 2
print("✅ Swarm initialization test PASSED")
except Exception as e:
logger.error(f"❌ Swarm initialization test FAILED: {str(e)}")
raise
async def test_load_from_csv() -> None:
"""Test loading agent configurations from CSV."""
print("\n[TEST] Starting CSV loading test")
try:
csv_path = create_test_csv()
print("Initializing swarm with CSV...")
swarm = SpreadSheetSwarm(load_path=csv_path)
print("Loading configurations...")
await swarm._load_from_csv()
print("Verifying loaded configurations...")
assert len(swarm.agents) == 2
assert len(swarm.agent_configs) == 2
assert "test_agent_1" in swarm.agent_configs
assert "test_agent_2" in swarm.agent_configs
os.remove(csv_path)
print(f"Cleaned up test file: {csv_path}")
print("✅ CSV loading test PASSED")
except Exception as e:
logger.error(f"❌ CSV loading test FAILED: {str(e)}")
raise
async def test_run_tasks() -> None:
"""Test running tasks with multiple agents."""
print("\n[TEST] Starting task execution test")
try:
print("Setting up test swarm...")
agents = [
create_test_agent("agent1"),
create_test_agent("agent2"),
]
swarm = SpreadSheetSwarm(agents=agents, max_loops=1)
test_task = "Test task for all agents"
print(f"Running test task: {test_task}")
await swarm._run_tasks(test_task)
print("Verifying task execution...")
assert swarm.metadata.tasks_completed == 2
assert len(swarm.metadata.outputs) == 2
print("✅ Task execution test PASSED")
except Exception as e:
logger.error(f"❌ Task execution test FAILED: {str(e)}")
raise
def test_output_tracking() -> None:
"""Test tracking of task outputs."""
print("\n[TEST] Starting output tracking test")
try:
print("Creating test swarm...")
swarm = SpreadSheetSwarm(agents=[create_test_agent("agent1")])
print("Tracking test output...")
swarm._track_output("agent1", "Test task", "Test result")
print("Verifying output tracking...")
assert swarm.metadata.tasks_completed == 1
assert len(swarm.metadata.outputs) == 1
assert swarm.metadata.outputs[0].agent_name == "agent1"
print("✅ Output tracking test PASSED")
except Exception as e:
logger.error(f"❌ Output tracking test FAILED: {str(e)}")
raise
async def test_save_to_csv() -> None:
"""Test saving metadata to CSV."""
print("\n[TEST] Starting CSV saving test")
try:
print("Setting up test data...")
swarm = SpreadSheetSwarm(
agents=[create_test_agent("agent1")],
save_file_path="test_output.csv",
)
swarm._track_output("agent1", "Test task", "Test result")
print("Saving to CSV...")
await swarm._save_to_csv()
print("Verifying file creation...")
assert os.path.exists(swarm.save_file_path)
os.remove(swarm.save_file_path)
print("Cleaned up test file")
print("✅ CSV saving test PASSED")
except Exception as e:
logger.error(f"❌ CSV saving test FAILED: {str(e)}")
raise
def test_json_export() -> None:
"""Test JSON export functionality."""
print("\n[TEST] Starting JSON export test")
try:
print("Creating test data...")
swarm = SpreadSheetSwarm(agents=[create_test_agent("agent1")])
swarm._track_output("agent1", "Test task", "Test result")
print("Exporting to JSON...")
json_output = swarm.export_to_json()
print("Verifying JSON output...")
assert isinstance(json_output, str)
assert "run_id" in json_output
assert "tasks_completed" in json_output
print("✅ JSON export test PASSED")
except Exception as e:
logger.error(f"❌ JSON export test FAILED: {str(e)}")
raise
async def run_all_tests() -> None:
"""Run all test functions."""
print("\n" + "=" * 50)
print("Starting SpreadsheetSwarm Test Suite")
print("=" * 50 + "\n")
try:
# Run synchronous tests
print("Running synchronous tests...")
test_swarm_initialization()
test_output_tracking()
test_json_export()
# Run asynchronous tests
print("\nRunning asynchronous tests...")
await test_load_from_csv()
await test_run_tasks()
await test_save_to_csv()
print("\n🎉 All tests completed successfully!")
print("=" * 50)
except Exception as e:
logger.error(f"\n❌ Test suite failed: {str(e)}")
print("=" * 50)
raise
if __name__ == "__main__":
# Run all tests
asyncio.run(run_all_tests())

@ -0,0 +1,301 @@
import asyncio
import time
from typing import List
from swarms.structs.agent import Agent
from swarms.structs.swarming_architectures import (
broadcast,
circular_swarm,
exponential_swarm,
geometric_swarm,
grid_swarm,
harmonic_swarm,
linear_swarm,
log_swarm,
mesh_swarm,
one_to_one,
one_to_three,
power_swarm,
pyramid_swarm,
sigmoid_swarm,
sinusoidal_swarm,
staircase_swarm,
star_swarm,
)
def create_test_agent(name: str) -> Agent:
"""Create a test agent with specified name"""
return Agent(
agent_name=name,
system_prompt=f"You are {name}. Respond with your name and the task you received.",
model_name="gpt-4o-mini",
max_loops=1,
)
def create_test_agents(num_agents: int) -> List[Agent]:
"""Create specified number of test agents"""
return [
create_test_agent(f"Agent{i+1}") for i in range(num_agents)
]
def print_separator():
print("\n" + "=" * 50 + "\n")
def test_circular_swarm():
"""Test and display circular swarm outputs"""
print_separator()
print("CIRCULAR SWARM TEST")
try:
agents = create_test_agents(3)
tasks = [
"Analyze data",
"Generate report",
"Summarize findings",
]
print("Running circular swarm with:")
print(f"Tasks: {tasks}\n")
result = circular_swarm(agents, tasks)
print("Circular Swarm Outputs:")
for log in result["history"]:
print(f"\nAgent: {log['agent_name']}")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
except Exception as e:
print(f"Error: {str(e)}")
def test_grid_swarm():
"""Test and display grid swarm outputs"""
print_separator()
print("GRID SWARM TEST")
try:
agents = create_test_agents(4) # 2x2 grid
tasks = ["Task A", "Task B", "Task C", "Task D"]
print("Running grid swarm with 2x2 grid")
print(f"Tasks: {tasks}\n")
print(grid_swarm(agents, tasks))
print(
"Grid Swarm completed - each agent processed tasks in its grid position"
)
except Exception as e:
print(f"Error: {str(e)}")
def test_linear_swarm():
"""Test and display linear swarm outputs"""
print_separator()
print("LINEAR SWARM TEST")
try:
agents = create_test_agents(3)
tasks = ["Research task", "Write content", "Review output"]
print("Running linear swarm with:")
print(f"Tasks: {tasks}\n")
result = linear_swarm(agents, tasks)
print("Linear Swarm Outputs:")
for log in result["history"]:
print(f"\nAgent: {log['agent_name']}")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
except Exception as e:
print(f"Error: {str(e)}")
def test_star_swarm():
"""Test and display star swarm outputs"""
print_separator()
print("STAR SWARM TEST")
try:
agents = create_test_agents(4) # 1 center + 3 peripheral
tasks = ["Coordinate workflow", "Process data"]
print("Running star swarm with:")
print(f"Center agent: {agents[0].agent_name}")
print(
f"Peripheral agents: {[agent.agent_name for agent in agents[1:]]}"
)
print(f"Tasks: {tasks}\n")
result = star_swarm(agents, tasks)
print("Star Swarm Outputs:")
for log in result["history"]:
print(f"\nAgent: {log['agent_name']}")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
except Exception as e:
print(f"Error: {str(e)}")
def test_mesh_swarm():
"""Test and display mesh swarm outputs"""
print_separator()
print("MESH SWARM TEST")
try:
agents = create_test_agents(3)
tasks = [
"Analyze data",
"Process information",
"Generate insights",
]
print("Running mesh swarm with:")
print(f"Tasks: {tasks}\n")
result = mesh_swarm(agents, tasks)
print(f"Mesh Swarm Outputs: {result}")
for log in result["history"]:
print(f"\nAgent: {log['agent_name']}")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
except Exception as e:
print(f"Error: {str(e)}")
def test_pyramid_swarm():
"""Test and display pyramid swarm outputs"""
print_separator()
print("PYRAMID SWARM TEST")
try:
agents = create_test_agents(6) # 1-2-3 pyramid
tasks = [
"Top task",
"Middle task 1",
"Middle task 2",
"Bottom task 1",
"Bottom task 2",
"Bottom task 3",
]
print("Running pyramid swarm with:")
print(f"Tasks: {tasks}\n")
result = pyramid_swarm(agents, tasks)
print(f"Pyramid Swarm Outputs: {result}")
for log in result["history"]:
print(f"\nAgent: {log['agent_name']}")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
except Exception as e:
print(f"Error: {str(e)}")
async def test_communication_patterns():
"""Test and display agent communication patterns"""
print_separator()
print("COMMUNICATION PATTERNS TEST")
try:
sender = create_test_agent("Sender")
receiver = create_test_agent("Receiver")
task = "Process and relay this message"
print("Testing One-to-One Communication:")
result = one_to_one(sender, receiver, task)
print(f"\nOne-to-One Communication Outputs: {result}")
for log in result["history"]:
print(f"\nAgent: {log['agent_name']}")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
print("\nTesting One-to-Three Communication:")
receivers = create_test_agents(3)
await one_to_three(sender, receivers, task)
print("\nTesting Broadcast Communication:")
broadcast_receivers = create_test_agents(5)
await broadcast(sender, broadcast_receivers, task)
except Exception as e:
print(f"Error: {str(e)}")
def test_mathematical_swarms():
"""Test and display mathematical swarm patterns"""
print_separator()
print("MATHEMATICAL SWARMS TEST")
try:
agents = create_test_agents(8)
base_tasks = ["Calculate", "Process", "Analyze"]
# Test each mathematical swarm
for swarm_type, swarm_func in [
("Power Swarm", power_swarm),
("Log Swarm", log_swarm),
("Exponential Swarm", exponential_swarm),
("Geometric Swarm", geometric_swarm),
("Harmonic Swarm", harmonic_swarm),
]:
print(f"\nTesting {swarm_type}:")
tasks = [f"{task} in {swarm_type}" for task in base_tasks]
print(f"Tasks: {tasks}")
swarm_func(agents, tasks.copy())
except Exception as e:
print(f"Error: {str(e)}")
def test_pattern_swarms():
"""Test and display pattern-based swarms"""
print_separator()
print("PATTERN-BASED SWARMS TEST")
try:
agents = create_test_agents(10)
task = "Process according to pattern"
for swarm_type, swarm_func in [
("Staircase Swarm", staircase_swarm),
("Sigmoid Swarm", sigmoid_swarm),
("Sinusoidal Swarm", sinusoidal_swarm),
]:
print(f"\nTesting {swarm_type}:")
print(f"Task: {task}")
swarm_func(agents, task)
except Exception as e:
print(f"Error: {str(e)}")
def run_all_tests():
"""Run all swarm architecture tests"""
print(
"\n=== Starting Swarm Architectures Test Suite with Outputs ==="
)
start_time = time.time()
try:
# Test basic swarm patterns
test_circular_swarm()
test_grid_swarm()
test_linear_swarm()
test_star_swarm()
test_mesh_swarm()
test_pyramid_swarm()
# Test mathematical and pattern swarms
test_mathematical_swarms()
test_pattern_swarms()
# Test communication patterns
asyncio.run(test_communication_patterns())
end_time = time.time()
duration = round(end_time - start_time, 2)
print("\n=== Test Suite Completed Successfully ===")
print(f"Time taken: {duration} seconds")
except Exception as e:
print("\n=== Test Suite Failed ===")
print(f"Error: {str(e)}")
raise
if __name__ == "__main__":
run_all_tests()

@ -1,52 +0,0 @@
from unittest.mock import Mock, patch
import pytest
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
@pytest.fixture
def swarm_network():
agents = [Agent(id=f"Agent_{i}") for i in range(5)]
return SwarmNetwork(agents=agents)
def test_swarm_network_init(swarm_network):
assert isinstance(swarm_network.agents, list)
assert len(swarm_network.agents) == 5
@patch("swarms.structs.swarm_net.SwarmNetwork.logger")
def test_run(mock_logger, swarm_network):
swarm_network.run()
assert (
mock_logger.info.call_count == 10
) # 2 log messages per agent
def test_run_with_mocked_agents(mocker, swarm_network):
mock_agents = [Mock(spec=Agent) for _ in range(5)]
mocker.patch.object(swarm_network, "agents", mock_agents)
swarm_network.run()
for mock_agent in mock_agents:
assert mock_agent.run.called
def test_swarm_network_with_no_agents():
swarm_network = SwarmNetwork(agents=[])
assert swarm_network.agents == []
def test_swarm_network_add_agent(swarm_network):
new_agent = Agent(id="Agent_5")
swarm_network.add_agent(new_agent)
assert len(swarm_network.agents) == 6
assert swarm_network.agents[-1] == new_agent
def test_swarm_network_remove_agent(swarm_network):
agent_to_remove = swarm_network.agents[0]
swarm_network.remove_agent(agent_to_remove)
assert len(swarm_network.agents) == 4
assert agent_to_remove not in swarm_network.agents

@ -0,0 +1,306 @@
import asyncio
from typing import List
from swarms.structs.agent import Agent
from swarms.structs.swarming_architectures import (
broadcast,
circular_swarm,
exponential_swarm,
fibonacci_swarm,
grid_swarm,
linear_swarm,
mesh_swarm,
one_to_three,
prime_swarm,
sigmoid_swarm,
sinusoidal_swarm,
staircase_swarm,
star_swarm,
)
def create_finance_agents() -> List[Agent]:
"""Create specialized finance agents"""
return [
Agent(
agent_name="MarketAnalyst",
system_prompt="You are a market analysis expert. Analyze market trends and provide insights.",
model_name="gpt-4o-mini",
),
Agent(
agent_name="RiskManager",
system_prompt="You are a risk management specialist. Evaluate risks and provide mitigation strategies.",
model_name="gpt-4o-mini",
),
Agent(
agent_name="PortfolioManager",
system_prompt="You are a portfolio management expert. Optimize investment portfolios and asset allocation.",
model_name="gpt-4o-mini",
),
Agent(
agent_name="ComplianceOfficer",
system_prompt="You are a financial compliance expert. Ensure regulatory compliance and identify issues.",
model_name="gpt-4o-mini",
),
]
def create_healthcare_agents() -> List[Agent]:
"""Create specialized healthcare agents"""
return [
Agent(
agent_name="Diagnostician",
system_prompt="You are a medical diagnostician. Analyze symptoms and suggest potential diagnoses.",
model_name="gpt-4o-mini",
),
Agent(
agent_name="Treatment_Planner",
system_prompt="You are a treatment planning specialist. Develop comprehensive treatment plans.",
model_name="gpt-4o-mini",
),
Agent(
agent_name="MedicalResearcher",
system_prompt="You are a medical researcher. Analyze latest research and provide evidence-based recommendations.",
model_name="gpt-4o-mini",
),
Agent(
agent_name="PatientCareCoordinator",
system_prompt="You are a patient care coordinator. Manage patient care workflow and coordination.",
model_name="gpt-4o-mini",
),
]
def print_separator():
print("\n" + "=" * 50 + "\n")
def run_finance_circular_swarm():
"""Investment analysis workflow using circular swarm"""
print_separator()
print("FINANCE - INVESTMENT ANALYSIS (Circular Swarm)")
agents = create_finance_agents()
tasks = [
"Analyze Tesla stock performance for Q4 2024",
"Assess market risks and potential hedging strategies",
"Recommend portfolio adjustments based on analysis",
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
result = circular_swarm(agents, tasks)
print("\nResults:")
for log in result["history"]:
print(f"\n{log['agent_name']}:")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
def run_healthcare_grid_swarm():
"""Patient diagnosis and treatment planning using grid swarm"""
print_separator()
print("HEALTHCARE - PATIENT DIAGNOSIS (Grid Swarm)")
agents = create_healthcare_agents()
tasks = [
"Review patient symptoms: fever, fatigue, joint pain",
"Research latest treatment protocols",
"Develop preliminary treatment plan",
"Coordinate with specialists",
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
result = grid_swarm(agents, tasks)
print("\nGrid swarm processing completed")
print(result)
def run_finance_linear_swarm():
"""Loan approval process using linear swarm"""
print_separator()
print("FINANCE - LOAN APPROVAL PROCESS (Linear Swarm)")
agents = create_finance_agents()[:3]
tasks = [
"Review loan application and credit history",
"Assess risk factors and compliance requirements",
"Generate final loan recommendation",
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
result = linear_swarm(agents, tasks)
print("\nResults:")
for log in result["history"]:
print(f"\n{log['agent_name']}:")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
def run_healthcare_star_swarm():
"""Complex medical case management using star swarm"""
print_separator()
print("HEALTHCARE - COMPLEX CASE MANAGEMENT (Star Swarm)")
agents = create_healthcare_agents()
tasks = [
"Complex case: Patient with multiple chronic conditions",
"Develop integrated care plan",
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
result = star_swarm(agents, tasks)
print("\nResults:")
for log in result["history"]:
print(f"\n{log['agent_name']}:")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
def run_finance_mesh_swarm():
"""Market risk assessment using mesh swarm"""
print_separator()
print("FINANCE - MARKET RISK ASSESSMENT (Mesh Swarm)")
agents = create_finance_agents()
tasks = [
"Analyze global market conditions",
"Assess currency exchange risks",
"Evaluate sector-specific risks",
"Review portfolio exposure",
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
result = mesh_swarm(agents, tasks)
print("\nResults:")
for log in result["history"]:
print(f"\n{log['agent_name']}:")
print(f"Task: {log['task']}")
print(f"Response: {log['response']}")
def run_mathematical_finance_swarms():
"""Complex financial analysis using mathematical swarms"""
print_separator()
print("FINANCE - MARKET PATTERN ANALYSIS")
agents = create_finance_agents()
tasks = [
"Analyze historical market patterns",
"Predict market trends using technical analysis",
"Identify potential arbitrage opportunities",
]
print("\nTasks:")
for i, task in enumerate(tasks, 1):
print(f"{i}. {task}")
print("\nFibonacci Swarm Results:")
result = fibonacci_swarm(agents, tasks.copy())
print(result)
print("\nPrime Swarm Results:")
result = prime_swarm(agents, tasks.copy())
print(result)
print("\nExponential Swarm Results:")
result = exponential_swarm(agents, tasks.copy())
print(result)
def run_healthcare_pattern_swarms():
"""Patient monitoring using pattern swarms"""
print_separator()
print("HEALTHCARE - PATIENT MONITORING PATTERNS")
agents = create_healthcare_agents()
task = "Monitor and analyze patient vital signs: BP, heart rate, temperature, O2 saturation"
print(f"\nTask: {task}")
print("\nStaircase Pattern Analysis:")
result = staircase_swarm(agents, task)
print(result)
print("\nSigmoid Pattern Analysis:")
result = sigmoid_swarm(agents, task)
print(result)
print("\nSinusoidal Pattern Analysis:")
result = sinusoidal_swarm(agents, task)
print(result)
async def run_communication_examples():
"""Communication patterns for emergency scenarios"""
print_separator()
print("EMERGENCY COMMUNICATION PATTERNS")
# Finance market alert
finance_sender = create_finance_agents()[0]
finance_receivers = create_finance_agents()[1:]
market_alert = "URGENT: Major market volatility detected - immediate risk assessment required"
print("\nFinance Market Alert:")
print(f"Alert: {market_alert}")
result = await broadcast(
finance_sender, finance_receivers, market_alert
)
print("\nBroadcast Results:")
for log in result["history"]:
print(f"\n{log['agent_name']}:")
print(f"Response: {log['response']}")
# Healthcare emergency
health_sender = create_healthcare_agents()[0]
health_receivers = create_healthcare_agents()[1:4]
emergency_case = "EMERGENCY: Trauma patient with multiple injuries - immediate consultation required"
print("\nHealthcare Emergency:")
print(f"Case: {emergency_case}")
result = await one_to_three(
health_sender, health_receivers, emergency_case
)
print("\nConsultation Results:")
for log in result["history"]:
print(f"\n{log['agent_name']}:")
print(f"Response: {log['response']}")
async def run_all_examples():
"""Execute all swarm examples"""
print("\n=== SWARM ARCHITECTURE EXAMPLES ===\n")
# Finance examples
run_finance_circular_swarm()
run_finance_linear_swarm()
run_finance_mesh_swarm()
run_mathematical_finance_swarms()
# Healthcare examples
run_healthcare_grid_swarm()
run_healthcare_star_swarm()
run_healthcare_pattern_swarms()
# Communication examples
await run_communication_examples()
print("\n=== ALL EXAMPLES COMPLETED ===")
if __name__ == "__main__":
asyncio.run(run_all_examples())
Loading…
Cancel
Save