Merge branch 'master' into 20250725fixmcpissues

pull/983/head
王祥宇 2 days ago committed by GitHub
commit 668cb3965e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,25 +0,0 @@
name: autofix.ci
on:
pull_request:
push:
branches: ["main"]
permissions:
contents: read
jobs:
autofix:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
- run: go install github.com/google/yamlfmt/cmd/yamlfmt@latest
- run: yamlfmt .
- uses: actions/setup-python@v5
- run: pip install ruff
- run: ruff format .
- run: ruff check --fix .
- uses: autofix-ci/action@635ffb0c9798bd160680f18fd73371e355b85f27

@ -1,43 +0,0 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
#
# This workflow file requires a free account on Bearer.com to manage findings, notifications and more.
# See https://docs.bearer.com/guides/bearer-cloud/
name: Bearer
on:
push:
branches: ["master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: ["master"]
schedule:
- cron: '24 22 * * 6'
permissions:
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
jobs:
bearer:
runs-on: ubuntu-latest
steps:
# Checkout project source
- uses: actions/checkout@v4
# Scan code using Bearer CLI
- name: Run Report
id: report
uses: bearer/bearer-action@828eeb928ce2f4a7ca5ed57fb8b59508cb8c79bc
with:
api-key: ${{ secrets.BEARER_TOKEN }}
format: sarif
output: results.sarif
exit-code: 0
# Upload SARIF file generated in previous step
- name: Upload SARIF file
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: results.sarif

@ -0,0 +1,73 @@
name: Code Quality and Tests
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
code-quality-and-test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10"]
steps:
# Step 1: Check out the repository
- name: Checkout repository
uses: actions/checkout@v4
# Step 2: Set up Python
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
# Step 3: Install Poetry for dependency management
- name: Install Poetry
uses: snok/install-poetry@v1
with:
virtualenvs-create: true
virtualenvs-in-project: true
# Step 4: Cache dependencies to speed up subsequent runs
- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v4
with:
path: .venv
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
# Step 5: Install dependencies including dev dependencies
- name: Install dependencies
run: poetry install --no-interaction --with dev --all-extras
# Step 7: Run Black formatting check on swarms folder
- name: Check Black formatting on swarms folder
run: |
poetry run black swarms/ --check --diff
# Step 8: Run Ruff linting on swarms folder
- name: Run Ruff linting on swarms folder
run: |
poetry run ruff check swarms/
# Step 10: Run tests with API keys
- name: Run tests
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
run: |
poetry run pytest tests/ -v --tb=short
# Step 11: Upload test results as artifacts (optional)
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
name: test-results-${{ matrix.python-version }}
path: |
tests/
.coverage
retention-days: 7

@ -1,89 +0,0 @@
# .github/workflows/comprehensive_tests.yml
name: Swarms Comprehensive Tests
# This workflow triggers on pushes and pull requests to the master branch.
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
# You can test against multiple Python versions here if needed.
python-version: ["3.10"]
steps:
# Step 1: Check out the code.
# For pull requests, this action automatically checks out the code
# from the PR's branch, not the master branch. This is the key
# to testing the proposed changes.
- name: Checkout repository
uses: actions/checkout@v4
# Step 2: Set up the specified Python version.
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
# Step 3: Install Poetry for dependency management.
- name: Install Poetry
uses: snok/install-poetry@v1
with:
virtualenvs-create: true
virtualenvs-in-project: true
# Step 4: Cache dependencies to speed up subsequent runs.
- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v3
with:
path: .venv
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
# Step 5: Install dependencies and the project package itself.
# This is the crucial step. 'poetry install' will install all dependencies
# and also install the 'swarms' package from the checked-out PR code
# in editable mode within the virtual environment.
- name: Install dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install --no-interaction --with dev --all-extras
# Step 6: Create dummy image files required for multi-modal tests.
# This ensures your tests are self-contained.
- name: Create dummy image files for testing
run: |
mkdir -p tests/test_data
touch tests/test_data/image1.jpg
touch tests/test_data/image2.png
echo "dummy image data" > tests/test_data/image1.jpg
echo "dummy image data" > tests/test_data/image2.png
# Step 7: Run the comprehensive test suite.
# 'poetry run' executes the command within the virtual environment,
# ensuring that when 'tests/comprehensive_test.py' imports 'swarms',
# it's importing the code from the pull request.
- name: Run Comprehensive Test Suite
env:
# Securely pass API keys and other secrets to the test environment.
# These must be configured in your repository's secrets.
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# GITHUB_REPO_OWNER: "kyegomez"
# GITHUB_REPO_NAME: "swarms"
run: |
poetry run python tests/comprehensive_test.py
# Step 8: Upload the generated test report as an artifact.
# This happens even if the previous steps fail, allowing you to debug.
- name: Upload Test Report
if: always()
uses: actions/upload-artifact@v3
with:
name: test-report-${{ matrix.python-version }}
path: test_runs/

@ -62,7 +62,7 @@ jobs:
# Build and push Docker image # Build and push Docker image
- name: Build and push Docker image - name: Build and push Docker image
id: build-and-push id: build-and-push
uses: docker/build-push-action@v5 uses: docker/build-push-action@v6
with: with:
context: . context: .
push: ${{ github.event_name != 'pull_request' }} push: ${{ github.event_name != 'pull_request' }}

@ -1,60 +0,0 @@
name: Tests
on:
push:
schedule:
- cron: "0 0 * * *"
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Python
uses: actions/setup-python@v5
- name: Install Poetry
uses: snok/install-poetry@v1
- name: Setup a local virtual environment
run: |
poetry config virtualenvs.create true --local
poetry config virtualenvs.in-project true --local
- uses: actions/cache@v4
name: Define a cache for the virtual environment
file
with:
path: ./.venv
key: venv-${{ hashFiles('poetry.lock') }}
- name: Install the project dependencies
run: poetry install
- name: Install OpenCV
run: sudo apt-get install python3-opencv
- name: Enter the virtual environment
run: source $VENV
- name: Run the tests
run: poetry run pytest --verbose
run-examples:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Python
uses: actions/setup-python@v5
- name: Install Poetry
uses: snok/install-poetry@v1
- name: Setup a local virtual environment
run: |
poetry config virtualenvs.create true --local
poetry config virtualenvs.in-project true --local
- uses: actions/cache@v4
name: Define a cache for the virtual environment
file
with:
path: ./.venv
key: venv-${{ hashFiles('poetry.lock') }}
- name: Install the project dependencies
run: poetry install
- name: Install OpenCV
run: sudo apt-get install python3-opencv
- name: Enter the virtual environment
run: source $VENV
- name: Make Script Executable and Run
run: |-
chmod +x ./scripts/run_examples.sh
./scripts/run_examples.sh

@ -224,8 +224,9 @@ print(final_post)
| **[MixtureOfAgents (MoA)](https://docs.swarms.world/en/latest/swarms/structs/moa/)** | Utilizes multiple expert agents in parallel and synthesizes their outputs. | Complex problem-solving, achieving state-of-the-art performance through collaboration. | | **[MixtureOfAgents (MoA)](https://docs.swarms.world/en/latest/swarms/structs/moa/)** | Utilizes multiple expert agents in parallel and synthesizes their outputs. | Complex problem-solving, achieving state-of-the-art performance through collaboration. |
| **[GroupChat](https://docs.swarms.world/en/latest/swarms/structs/group_chat/)** | Agents collaborate and make decisions through a conversational interface. | Real-time collaborative decision-making, negotiations, brainstorming. | | **[GroupChat](https://docs.swarms.world/en/latest/swarms/structs/group_chat/)** | Agents collaborate and make decisions through a conversational interface. | Real-time collaborative decision-making, negotiations, brainstorming. |
| **[ForestSwarm](https://docs.swarms.world/en/latest/swarms/structs/forest_swarm/)** | Dynamically selects the most suitable agent or tree of agents for a given task. | Task routing, optimizing for expertise, complex decision-making trees. | | **[ForestSwarm](https://docs.swarms.world/en/latest/swarms/structs/forest_swarm/)** | Dynamically selects the most suitable agent or tree of agents for a given task. | Task routing, optimizing for expertise, complex decision-making trees. |
| **[SpreadSheetSwarm](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/)** | Manages thousands of agents concurrently, tracking tasks and outputs in a structured format. | Massive-scale parallel operations, large-scale data generation and analysis. |
| **[HierarchicalSwarm](https://docs.swarms.world/en/latest/swarms/structs/hiearchical_swarm/)** | Orchestrates agents with a director that creates plans and distributes tasks to specialized worker agents. | Complex project management, team coordination, hierarchical decision-making with feedback loops. | | **[HierarchicalSwarm](https://docs.swarms.world/en/latest/swarms/structs/hiearchical_swarm/)** | Orchestrates agents with a director that creates plans and distributes tasks to specialized worker agents. | Complex project management, team coordination, hierarchical decision-making with feedback loops. |
| **[HeavySwarm](https://docs.swarms.world/en/latest/swarms/structs/heavy_swarm/)** | Implements a 5-phase workflow with specialized agents (Research, Analysis, Alternatives, Verification) for comprehensive task analysis. | Complex research and analysis tasks, financial analysis, strategic planning, comprehensive reporting. |
| **[SwarmRouter](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/)** | Universal orchestrator that provides a single interface to run any type of swarm with dynamic selection. | Simplifying complex workflows, switching between swarm strategies, unified multi-agent management. | | **[SwarmRouter](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/)** | Universal orchestrator that provides a single interface to run any type of swarm with dynamic selection. | Simplifying complex workflows, switching between swarm strategies, unified multi-agent management. |
----- -----
@ -237,56 +238,72 @@ A `SequentialWorkflow` executes tasks in a strict order, forming a pipeline wher
```python ```python
from swarms import Agent, SequentialWorkflow from swarms import Agent, SequentialWorkflow
# Initialize agents for a 3-step process # Agent 1: The Researcher
# 1. Generate an idea researcher = Agent(
idea_generator = Agent(agent_name="IdeaGenerator", system_prompt="Generate a unique startup idea.", model_name="gpt-4o-mini") agent_name="Researcher",
# 2. Validate the idea system_prompt="Your job is to research the provided topic and provide a detailed summary.",
validator = Agent(agent_name="Validator", system_prompt="Take this startup idea and analyze its market viability.", model_name="gpt-4o-mini") model_name="gpt-4o-mini",
# 3. Create a pitch )
pitch_creator = Agent(agent_name="PitchCreator", system_prompt="Write a 3-sentence elevator pitch for this validated startup idea.", model_name="gpt-4o-mini")
# Create the sequential workflow # Agent 2: The Writer
workflow = SequentialWorkflow(agents=[idea_generator, validator, pitch_creator]) writer = Agent(
agent_name="Writer",
system_prompt="Your job is to take the research summary and write a beautiful, engaging blog post about it.",
model_name="gpt-4o-mini",
)
# Run the workflow # Create a sequential workflow where the researcher's output feeds into the writer's input
elevator_pitch = workflow.run() workflow = SequentialWorkflow(agents=[researcher, writer])
print(elevator_pitch)
# Run the workflow on a task
final_post = workflow.run("The history and future of artificial intelligence")
print(final_post)
``` ```
----- -----
### ConcurrentWorkflow (with `SpreadSheetSwarm`) ### ConcurrentWorkflow
A concurrent workflow runs multiple agents simultaneously. `SpreadSheetSwarm` is a powerful implementation that can manage thousands of concurrent agents and log their outputs to a CSV file. Use this architecture for high-throughput tasks that can be performed in parallel, drastically reducing execution time. A `ConcurrentWorkflow` runs multiple agents simultaneously, allowing for parallel execution of tasks. This architecture drastically reduces execution time for tasks that can be performed in parallel, making it ideal for high-throughput scenarios where agents work on similar tasks concurrently.
```python ```python
from swarms import Agent, SpreadSheetSwarm from swarms import Agent, ConcurrentWorkflow
# Define a list of tasks (e.g., social media posts to generate) # Create agents for different analysis tasks
platforms = ["Twitter", "LinkedIn", "Instagram"] market_analyst = Agent(
agent_name="Market-Analyst",
# Create an agent for each task system_prompt="Analyze market trends and provide insights on the given topic.",
agents = [ model_name="gpt-4o-mini",
Agent( max_loops=1,
agent_name=f"{platform}-Marketer", )
system_prompt=f"Generate a real estate marketing post for {platform}.",
model_name="gpt-4o-mini", financial_analyst = Agent(
) agent_name="Financial-Analyst",
for platform in platforms system_prompt="Provide financial analysis and recommendations on the given topic.",
] model_name="gpt-4o-mini",
max_loops=1,
# Initialize the swarm to run these agents concurrently
swarm = SpreadSheetSwarm(
agents=agents,
autosave_on=True,
save_file_path="marketing_posts.csv",
) )
# Run the swarm with a single, shared task description risk_analyst = Agent(
property_description = "A beautiful 3-bedroom house in sunny California." agent_name="Risk-Analyst",
swarm.run(task=f"Generate a post about: {property_description}") system_prompt="Assess risks and provide risk management strategies for the given topic.",
# Check marketing_posts.csv for the results! model_name="gpt-4o-mini",
max_loops=1,
)
# Create concurrent workflow
concurrent_workflow = ConcurrentWorkflow(
agents=[market_analyst, financial_analyst, risk_analyst],
max_loops=1,
)
# Run all agents concurrently on the same task
results = concurrent_workflow.run(
"Analyze the potential impact of AI technology on the healthcare industry"
)
print(results)
``` ```
--- ---
@ -313,9 +330,7 @@ rearrange_system = AgentRearrange(
flow=flow, flow=flow,
) )
# Run the system # Run the swarm
# The researcher will generate content, and then both the writer and editor
# will process that content in parallel.
outputs = rearrange_system.run("Analyze the impact of AI on modern cinema.") outputs = rearrange_system.run("Analyze the impact of AI on modern cinema.")
print(outputs) print(outputs)
``` ```
@ -533,6 +548,49 @@ The `HierarchicalSwarm` excels at:
--- ---
### HeavySwarm
`HeavySwarm` implements a sophisticated 5-phase workflow inspired by X.AI's Grok heavy implementation. It uses specialized agents (Research, Analysis, Alternatives, Verification) to provide comprehensive task analysis through intelligent question generation, parallel execution, and synthesis. This architecture excels at complex research and analysis tasks requiring thorough investigation and multiple perspectives.
```python
from swarms import HeavySwarm
# Initialize the HeavySwarm with configuration
swarm = HeavySwarm(
worker_model_name="gpt-4o-mini", # Model for worker agents
question_agent_model_name="gpt-4o-mini", # Model for question generation
loops_per_agent=1, # Number of loops per agent
show_dashboard=True, # Enable real-time dashboard
)
# Run complex analysis task
result = swarm.run(
"Provide 3 publicly traded biotech companies that are currently trading below their cash value. "
"For each company identified, provide available data or projections for the next 6 months, "
"including any relevant financial metrics, upcoming catalysts, or events that could impact valuation. "
"Present your findings in a clear, structured format with ticker symbols, current prices, "
"cash values, and percentage differences."
)
print(result)
```
The `HeavySwarm` provides:
- **5-Phase Analysis**: Question generation, research, analysis, alternatives, and verification
- **Specialized Agents**: Each phase uses purpose-built agents for optimal results
- **Comprehensive Coverage**: Multiple perspectives and thorough investigation
- **Real-time Dashboard**: Optional visualization of the analysis process
- **Structured Output**: Well-organized and actionable results
This architecture is perfect for financial analysis, strategic planning, research reports, and any task requiring deep, multi-faceted analysis. [Learn more about HeavySwarm](https://docs.swarms.world/en/latest/swarms/structs/heavy_swarm/)
---
## Documentation ## Documentation
Documentation is located here at: [docs.swarms.world](https://docs.swarms.world) Documentation is located here at: [docs.swarms.world](https://docs.swarms.world)
@ -665,7 +723,7 @@ Join our community of agent engineers and researchers for technical support, cut
| 📚 Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) | | 📚 Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| 📝 Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) | | 📝 Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
| 💬 Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) | | 💬 Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
| 🐦 Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) | | 🐦 Twitter | Latest news and announcements | [@swarms_corp](https://twitter.com/swarms_corp) |
| 👥 LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | | 👥 LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| 📺 YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | | 📺 YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
| 🎫 Events | Join our community events | [Sign up here](https://lu.ma/5p2jnc2v) | | 🎫 Events | Join our community events | [Sign up here](https://lu.ma/5p2jnc2v) |

@ -0,0 +1,692 @@
# Environment Setup Guide for Swarms Contributors
Welcome to the Swarms development environment setup guide! This comprehensive guide will walk you through setting up your development environment from scratch, whether you're a first-time contributor or an experienced developer.
!!! success "🚀 One-Click Setup (Recommended)"
**New!** Use our automated setup script that handles everything:
```bash
git clone https://github.com/kyegomez/swarms.git
cd swarms
chmod +x scripts/setup.sh
./scripts/setup.sh
```
This script automatically installs Poetry, creates a virtual environment, installs all dependencies, sets up pre-commit hooks, and more!
!!! info "Manual Setup"
**Alternative**: For manual control, install Python 3.10+, Git, and Poetry, then run:
```bash
git clone https://github.com/kyegomez/swarms.git
cd swarms
poetry install --with dev
```
---
## :material-list-status: Prerequisites
Before setting up your development environment, ensure you have the following installed:
### System Requirements
| Tool | Version | Purpose |
|------|---------|---------|
| **Python** | 3.10+ | Core runtime |
| **Git** | 2.30+ | Version control |
| **Poetry** | 1.4+ | Dependency management (recommended) |
| **Node.js** | 16+ | Documentation tools (optional) |
### Operating System Support
=== "macOS"
```bash
# Install Homebrew if not already installed
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
# Install prerequisites
brew install python@3.10 git poetry node
```
=== "Ubuntu/Debian"
```bash
# Update package list
sudo apt update
# Install Python 3.10 and pip
sudo apt install python3.10 python3.10-venv python3-pip git curl
# Install Poetry
curl -sSL https://install.python-poetry.org | python3 -
# Add Poetry to PATH
export PATH="$HOME/.local/bin:$PATH"
echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.bashrc
```
=== "Windows"
1. **Install Python 3.10+** from [python.org](https://python.org/downloads/)
2. **Install Git** from [git-scm.com](https://git-scm.com/download/win)
3. **Install Poetry** using PowerShell:
```powershell
(Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
```
---
## :material-auto-fix: Automated Setup (Recommended)
We provide a comprehensive setup script that automates the entire development environment setup process. This is the **recommended approach** for new contributors.
### What the Setup Script Does
The `scripts/setup.sh` script automatically handles:
- ✅ **Python Version Check**: Verifies Python 3.10+ is installed
- ✅ **Poetry Installation**: Installs Poetry if not present
- ✅ **Virtual Environment**: Creates and configures a project-specific virtual environment
- ✅ **Dependencies**: Installs all main, development, lint, and test dependencies
- ✅ **Pre-commit Hooks**: Sets up and installs pre-commit hooks for code quality
- ✅ **Environment Template**: Creates a `.env` file template with common variables
- ✅ **Verification**: Runs initial setup verification checks
- ✅ **Helpful Output**: Provides colored output and next steps
### Running the Automated Setup
```bash
# Clone the repository
git clone https://github.com/kyegomez/swarms.git
cd swarms
# Make the script executable and run it
chmod +x scripts/setup.sh
./scripts/setup.sh
```
### Script Features
=== "🎯 Smart Detection"
The script intelligently detects your system state:
- Checks if Poetry is already installed
- Verifies Python version compatibility
- Detects existing virtual environments
- Checks for Git repository status
=== "🔧 Comprehensive Setup"
Installs everything you need:
```bash
# All dependency groups
poetry install --with dev,lint,test
# Pre-commit hooks
pre-commit install
pre-commit install --hook-type commit-msg
# Initial verification run
pre-commit run --all-files
```
=== "📋 Environment Template"
Creates a starter `.env` file:
```bash
# Generated .env template
OPENAI_API_KEY=your_openai_api_key_here
ANTHROPIC_API_KEY=your_anthropic_key_here
LOG_LEVEL=INFO
DEVELOPMENT=true
```
=== "💡 Helpful Guidance"
Provides next steps and useful commands:
- How to activate the virtual environment
- Essential Poetry commands
- Testing and development workflow
- Troubleshooting tips
### When to Use Manual Setup
Use the manual setup approach if you:
- Want full control over each step
- Have specific system requirements
- Are troubleshooting installation issues
- Prefer to understand each component
---
## :material-git: Repository Setup
### Step 1: Fork and Clone
1. **Fork the repository** on GitHub: [github.com/kyegomez/swarms](https://github.com/kyegomez/swarms)
2. **Clone your fork**:
```bash
git clone https://github.com/YOUR_USERNAME/swarms.git
cd swarms
```
3. **Add upstream remote**:
```bash
git remote add upstream https://github.com/kyegomez/swarms.git
```
4. **Verify remotes**:
```bash
git remote -v
# origin https://github.com/YOUR_USERNAME/swarms.git (fetch)
# origin https://github.com/YOUR_USERNAME/swarms.git (push)
# upstream https://github.com/kyegomez/swarms.git (fetch)
# upstream https://github.com/kyegomez/swarms.git (push)
```
---
## :material-package-variant: Dependency Management
Choose your preferred method for managing dependencies:
=== "Poetry (Recommended)"
Poetry provides superior dependency resolution and virtual environment management.
### Installation
```bash
# Navigate to project directory
cd swarms
# Install all dependencies including development tools
poetry install --with dev,lint,test
# Activate the virtual environment
poetry shell
```
### Useful Poetry Commands
```bash
# Add a new dependency
poetry add package_name
# Add a development dependency
poetry add --group dev package_name
# Update dependencies
poetry update
# Show dependency tree
poetry show --tree
# Run commands in the virtual environment
poetry run python your_script.py
```
=== "pip + venv"
Traditional pip-based setup with virtual environments.
### Installation
```bash
# Navigate to project directory
cd swarms
# Create virtual environment
python -m venv venv
# Activate virtual environment
# On macOS/Linux:
source venv/bin/activate
# On Windows:
venv\Scripts\activate
# Upgrade pip
pip install --upgrade pip
# Install core dependencies
pip install -r requirements.txt
# Install documentation dependencies (optional)
pip install -r docs/requirements.txt
```
---
## :material-tools: Development Tools Setup
### Code Quality Tools
Swarms uses several tools to maintain code quality:
=== "Formatting"
**Black** - Code formatter
```bash
# Format code
poetry run black swarms/
# or with pip:
black swarms/
# Check formatting without making changes
black swarms/ --check --diff
```
=== "Linting"
**Ruff** - Fast Python linter
```bash
# Run linter
poetry run ruff check swarms/
# or with pip:
ruff check swarms/
# Auto-fix issues
ruff check swarms/ --fix
```
=== "Type Checking"
**MyPy** - Static type checker
```bash
# Run type checking
poetry run mypy swarms/
# or with pip:
mypy swarms/
```
### Pre-commit Hooks (Optional but Recommended)
Set up pre-commit hooks to automatically run quality checks:
```bash
# Install pre-commit
poetry add --group dev pre-commit
# or with pip:
pip install pre-commit
# Install git hooks
pre-commit install
# Run on all files
pre-commit run --all-files
```
The project uses the latest ruff-pre-commit configuration with separate hooks for linting and formatting:
- **ruff-check**: Runs the linter with automatic fixes (`--fix` flag)
- **ruff-format**: Runs the formatter for code styling
- **types_or: [python, pyi]**: Excludes Jupyter notebooks from processing
This configuration ensures consistent code quality and style across the project while avoiding conflicts with Jupyter notebook files.
---
## :material-test-tube: Testing Setup
### Running Tests
```bash
# Run all tests
poetry run pytest
# or with pip:
pytest
# Run tests with coverage
poetry run pytest --cov=swarms tests/
# Run specific test file
poetry run pytest tests/test_specific_file.py
# Run tests matching a pattern
poetry run pytest -k "test_agent"
```
### Test Structure
The project uses pytest with the following structure:
```
tests/
├── agents/ # Agent-related tests
├── structs/ # Multi-agent structure tests
├── tools/ # Tool tests
├── utils/ # Utility tests
└── conftest.py # Test configuration
```
### Writing Tests
```python
# Example test file: tests/test_example.py
import pytest
from swarms import Agent
def test_agent_creation():
"""Test that an agent can be created successfully."""
agent = Agent(
agent_name="test_agent",
system_prompt="You are a helpful assistant"
)
assert agent.agent_name == "test_agent"
@pytest.mark.parametrize("input_val,expected", [
("hello", "HELLO"),
("world", "WORLD"),
])
def test_uppercase(input_val, expected):
"""Example parametrized test."""
assert input_val.upper() == expected
```
---
## :material-book-open-page-variant: Documentation Setup
### Building Documentation Locally
```bash
# Install documentation dependencies
pip install -r docs/requirements.txt
# Navigate to docs directory
cd docs
# Serve documentation locally
mkdocs serve
# Documentation will be available at http://127.0.0.1:8000
```
### Documentation Structure
```
docs/
├── index.md # Homepage
├── mkdocs.yml # MkDocs configuration
├── swarms/ # Core documentation
├── examples/ # Examples and tutorials
├── contributors/ # Contributor guides
└── assets/ # Images and static files
```
### Writing Documentation
Use Markdown with MkDocs extensions:
```markdown
# Page Title
!!! tip "Pro Tip"
Use admonitions to highlight important information.
=== "Python"
```python
from swarms import Agent
agent = Agent()
```
=== "CLI"
```bash
swarms create-agent --name myagent
```
```
---
## :material-application-variable: Environment Variables
Create a `.env` file for local development:
```bash
# Copy example environment file
cp .env.example .env # if it exists
# Or create your own .env file
touch .env
```
Common environment variables:
```bash
# .env file
OPENAI_API_KEY=your_openai_api_key_here
ANTHROPIC_API_KEY=your_anthropic_api_key_here
GROQ_API_KEY=your_groq_api_key_here
# Development settings
DEBUG=true
LOG_LEVEL=INFO
# Optional: Database settings
DATABASE_URL=sqlite:///swarms.db
```
---
## :material-check-circle: Verification Steps
!!! tip "Automated Verification"
If you used the automated setup script (`./scripts/setup.sh`), most verification steps are handled automatically. The script runs verification checks and reports any issues.
For manual setups, verify your setup is working correctly:
### 1. Basic Import Test
```bash
poetry run python -c "from swarms import Agent; print('✅ Import successful')"
```
### 2. Run a Simple Agent
```python
# test_setup.py
from swarms import Agent
agent = Agent(
agent_name="setup_test",
system_prompt="You are a helpful assistant for testing setup.",
max_loops=1
)
response = agent.run("Say hello!")
print(f"✅ Agent response: {response}")
```
### 3. Code Quality Check
```bash
# Run all quality checks
poetry run black swarms/ --check
poetry run ruff check swarms/
poetry run pytest tests/ -x
```
### 4. Documentation Build
```bash
cd docs
mkdocs build
echo "✅ Documentation built successfully"
```
---
## :material-rocket-launch: Development Workflow
### Creating a Feature Branch
```bash
# Sync with upstream
git fetch upstream
git checkout master
git rebase upstream/master
# Create feature branch
git checkout -b feature/your-feature-name
# Make your changes...
# Add and commit
git add .
git commit -m "feat: add your feature description"
# Push to your fork
git push origin feature/your-feature-name
```
### Daily Development Commands
```bash
# Start development session
cd swarms
poetry shell # or source venv/bin/activate
# Pull latest changes
git fetch upstream
git rebase upstream/master
# Run tests during development
poetry run pytest tests/ -v
# Format and lint before committing
poetry run black swarms/
poetry run ruff check swarms/ --fix
# Run a quick smoke test
poetry run python -c "from swarms import Agent; print('✅ All good')"
```
---
## :material-bug: Troubleshooting
!!! tip "First Step: Try the Automated Setup"
If you're experiencing setup issues, try running our automated setup script first:
```bash
chmod +x scripts/setup.sh
./scripts/setup.sh
```
This script handles most common setup problems automatically and provides helpful error messages.
### Common Issues and Solutions
=== "Poetry Issues"
**Problem**: Poetry command not found
```bash
# Solution: Add Poetry to PATH
export PATH="$HOME/.local/bin:$PATH"
# Add to your shell profile (.bashrc, .zshrc, etc.)
```
**Problem**: Poetry install fails
```bash
# Solution: Clear cache and reinstall
poetry cache clear --all pypi
poetry install --with dev
```
=== "Python Version Issues"
**Problem**: Wrong Python version
```bash
# Check Python version
python --version
# Use pyenv to manage Python versions
curl https://pyenv.run | bash
pyenv install 3.10.12
pyenv local 3.10.12
```
=== "Import Errors"
**Problem**: Cannot import swarms modules
```bash
# Ensure you're in the virtual environment
poetry shell
# or
source venv/bin/activate
# Install in development mode
poetry install --with dev
# or
pip install -e .
```
=== "Test Failures"
**Problem**: Tests fail due to missing dependencies
```bash
# Install test dependencies
poetry install --with test
# or
pip install pytest pytest-cov pytest-mock
```
### Getting Help
If you encounter issues:
1. **Check the FAQ** in the main documentation
2. **Search existing issues** on GitHub
3. **Ask in the Discord community**: [discord.gg/jM3Z6M9uMq](https://discord.gg/jM3Z6M9uMq)
4. **Create a GitHub issue** with:
- Your operating system
- Python version
- Error messages
- Steps to reproduce
---
## :material-next-step: Next Steps
Now that your environment is set up:
1. **Read the Contributing Guide**: [contributors/main.md](main.md)
2. **Explore the Codebase**: Start with `swarms/structs/agent.py`
3. **Run Examples**: Check out `examples/` directory
4. **Pick an Issue**: Look for `good-first-issue` labels on GitHub
5. **Join the Community**: Discord, Twitter, and GitHub discussions
!!! success "You're Ready!"
Your Swarms development environment is now set up! You're ready to contribute to the most important technology for multi-agent collaboration.
---
## :material-bookmark-outline: Quick Reference
### Essential Commands
```bash
# Setup (choose one)
./scripts/setup.sh # Automated setup (recommended)
poetry install --with dev # Manual dependency install
# Daily workflow
poetry shell # Activate environment
poetry run pytest # Run tests
poetry run black swarms/ # Format code
poetry run ruff check swarms/ # Lint code
# Git workflow
git fetch upstream # Get latest changes
git rebase upstream/master # Update your branch
git checkout -b feature/name # Create feature branch
git push origin feature/name # Push your changes
# Documentation
cd docs && mkdocs serve # Serve docs locally
mkdocs build # Build docs
```
### Project Structure
```
swarms/
├── swarms/ # Core package
│ ├── agents/ # Agent implementations
│ ├── structs/ # Multi-agent structures
│ ├── tools/ # Agent tools
│ └── utils/ # Utilities
├── examples/ # Usage examples
├── tests/ # Test suite
├── docs/ # Documentation
├── pyproject.toml # Poetry configuration
└── requirements.txt # Pip dependencies
```
Happy coding! 🚀

@ -0,0 +1,19 @@
# Class/function
Brief description
## Overview
## Architecture (Mermaid diagram)
## Class Reference (Constructor + Methods)
## Examples
## Conclusion
Benefits of class/structure, and more

@ -54,6 +54,76 @@ extra:
- icon: fontawesome/brands/linkedin - icon: fontawesome/brands/linkedin
link: https://www.linkedin.com/company/swarms-corp/ link: https://www.linkedin.com/company/swarms-corp/
footer_links:
"Getting Started":
- title: "Installation"
url: "https://docs.swarms.world/en/latest/swarms/install/install/"
- title: "Quickstart"
url: "https://docs.swarms.world/en/latest/quickstart/"
- title: "Environment Setup"
url: "https://docs.swarms.world/en/latest/swarms/install/env/"
- title: "Basic Agent Example"
url: "https://docs.swarms.world/en/latest/swarms/examples/basic_agent/"
"Core Capabilities":
- title: "Agents"
url: "https://docs.swarms.world/en/latest/swarms/structs/agent/"
- title: "Tools and MCP"
url: "https://docs.swarms.world/en/latest/swarms/tools/tools_examples/"
- title: "Multi-Agent Architectures"
url: "https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/"
- title: "Sequential Workflow"
url: "https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/"
- title: "Concurrent Workflow"
url: "https://docs.swarms.world/en/latest/swarms/structs/concurrentworkflow/"
- title: "Hierarchical Swarm"
url: "https://docs.swarms.world/en/latest/swarms/structs/hierarchical_swarm/"
- title: "Swarm Router"
url: "https://docs.swarms.world/en/latest/swarms/structs/swarm_router/"
"Templates & Applications":
- title: "Examples Overview"
url: "https://docs.swarms.world/en/latest/examples/index/"
- title: "Cookbook"
url: "https://docs.swarms.world/en/latest/examples/cookbook_index/"
- title: "Templates"
url: "https://docs.swarms.world/en/latest/examples/templates/"
- title: "Paper Implementations"
url: "https://docs.swarms.world/en/latest/examples/paper_implementations/"
"Contributors":
- title: "Contributing"
url: "https://docs.swarms.world/en/latest/contributors/main/"
- title: "Code Style Guide"
url: "https://docs.swarms.world/en/latest/swarms/framework/code_cleanliness/"
- title: "Adding Documentation"
url: "https://docs.swarms.world/en/latest/contributors/docs/"
- title: "Bounty Program"
url: "https://docs.swarms.world/en/latest/corporate/bounty_program/"
- title: "Support"
url: "https://docs.swarms.world/en/latest/swarms/support/"
"Community":
- title: "Twitter"
url: "https://twitter.com/swarms_corp"
- title: "Discord"
url: "https://discord.gg/jM3Z6M9uMq"
- title: "YouTube"
url: "https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ"
- title: "LinkedIn"
url: "https://www.linkedin.com/company/the-swarm-corporation"
- title: "Blog"
url: "https://medium.com/@kyeg"
- title: "Events"
url: "https://lu.ma/5p2jnc2v"
- title: "Onboarding Session"
url: "https://cal.com/swarms/swarms-onboarding-session"
analytics: analytics:
provider: google provider: google
property: G-MPE9C65596 property: G-MPE9C65596
@ -82,6 +152,7 @@ theme:
- navigation.sections - navigation.sections
# - navigation.expand # - navigation.expand
- navigation.top - navigation.top
- navigation.footer
- announce.dismiss - announce.dismiss
font: font:
text: "Fira Sans" # Clean and readable text text: "Fira Sans" # Clean and readable text
@ -154,50 +225,59 @@ nav:
- Quickstart: "quickstart.md" - Quickstart: "quickstart.md"
- Agents: "swarms/agents/index.md" - Agents: "swarms/agents/index.md"
- Multi-Agent Architectures: "swarms/structs/index.md" - Multi-Agent Architectures: "swarms/structs/index.md"
- Protocol:
- Overview: "protocol/overview.md"
- SIPs: "protocol/sip.md"
- Feature Set: "swarms/features.md" - Feature Set: "swarms/features.md"
- Swarms Ecosystem: "swarms/ecosystem.md"
- Technical Support: "swarms/support.md"
- Agents: - Agents:
- Overview: "swarms/agents/index.md" - Overview: "swarms/framework/agents_explained.md"
- Concepts: - Quickstart: "swarms/agents/index.md"
# - Managing Prompts in Production: "swarms/prompts/main.md" - Reference: "swarms/structs/agent.md"
- Introduction into The Agent Architecture: "swarms/framework/agents_explained.md" - LLM Providers: "swarms/models/agent_and_models.md"
# - Introduction to Tools: "swarms/tools/overview.md" - Tools and MCP: "swarms/tools/tools_examples.md"
- Documentation: - Multi-Agent Helpers: "swarms/structs/agent_multi_agent_communication.md"
- Agent Class Documentation: "swarms/structs/agent.md" - Running Agents with YAML: "swarms/agents/create_agents_yaml.md"
- Create and Run Agents from YAML: "swarms/agents/create_agents_yaml.md"
- Integrating Various Models into Your Agents: "swarms/models/agent_and_models.md"
- Tools: - Additional Capabilities:
- Tools Reference:
- Overview: "swarms/tools/main.md" - Overview: "swarms/tools/main.md"
- What are tools?: "swarms/tools/build_tool.md" - What are tools?: "swarms/tools/build_tool.md"
- Structured Outputs: "swarms/agents/structured_outputs.md" - Structured Outputs: "swarms/agents/structured_outputs.md"
- Agent MCP Integration: "swarms/structs/agent_mcp.md" - Agent MCP Integration: "swarms/structs/agent_mcp.md"
- Comprehensive Tool Guide with MCP, Callables, and more: "swarms/tools/tools_examples.md" - Long Term Memory Reference:
- RAG || Long Term Memory:
- Integrating RAG with Agents: "swarms/memory/diy_memory.md" - Integrating RAG with Agents: "swarms/memory/diy_memory.md"
- Third-Party Agent Integrations:
- OpenAI Assistant: "swarms/agents/openai_assistant.md"
- Integrating External Agents from Griptape, Langchain, etc: "swarms/agents/external_party_agents.md"
- Creating Custom Agents: "swarms/agents/new_agent.md"
- PreBuilt Reasoning Agents:
- Self Consistency Agent: "swarms/agents/consistency_agent.md"
- IRE Agent: "swarms/agents/iterative_agent.md"
- Reasoning Duo: "swarms/agents/reasoning_duo.md"
- Reasoning Agent Router: "swarms/agents/reasoning_agent_router.md"
- Reflexion Agent: "swarms/agents/reflexion_agent.md"
- GKP Agent: "swarms/agents/gkp_agent.md"
- Agent Judge: "swarms/agents/agent_judge.md"
- Multi-Agent Architectures: - Third-Party Agents:
- Introduction to Multi-Agent Collaboration: "swarms/concept/why.md" - Creating Custom Agents: "swarms/agents/new_agent.md"
- OpenAI Assistant: "swarms/agents/openai_assistant.md"
- Integrating External Agents from Griptape, Langchain, etc: "swarms/agents/external_party_agents.md"
- Reasoning Agents:
- Overview: "swarms/agents/reasoning_agents_overview.md"
- Self Consistency Agent: "swarms/agents/consistency_agent.md"
- IRE Agent: "swarms/agents/iterative_agent.md"
- Reasoning Duo: "swarms/agents/reasoning_duo.md"
- Reflexion Agent: "swarms/agents/reflexion_agent.md"
- GKP Agent: "swarms/agents/gkp_agent.md"
- Agent Judge: "swarms/agents/agent_judge.md"
- Reasoning Agent Router: "swarms/agents/reasoning_agent_router.md"
- Concepts: - Multi-Agent Architectures:
- Introduction to Multi Agent Architectures: "swarms/concept/swarm_architectures.md" - Overview: "swarms/concept/swarm_architectures.md"
- How to Choose the Right Multi Agent Architecture: "swarms/concept/how_to_choose_swarms.md" - Benefits: "swarms/concept/why.md"
- How to Build Custom Swarms: "swarms/structs/custom_swarm.md" - Choosing Multi Agent Architecture: "swarms/concept/how_to_choose_swarms.md"
- How to Create New Multi Agent Architectures: "swarms/structs/create_new_swarm.md"
- Introduction to Hiearchical Multi Agent Architectures: "swarms/structs/multi_swarm_orchestration.md"
- Multi-Agent Architectures Documentation: - Documentation:
- Overview: "swarms/structs/overview.md" - Overview: "swarms/structs/overview.md"
- Custom Multi Agent Architectures: "swarms/structs/custom_swarm.md"
- MajorityVoting: "swarms/structs/majorityvoting.md" - MajorityVoting: "swarms/structs/majorityvoting.md"
- RoundRobin: "swarms/structs/round_robin_swarm.md" - RoundRobin: "swarms/structs/round_robin_swarm.md"
- Mixture of Agents: "swarms/structs/moa.md" - Mixture of Agents: "swarms/structs/moa.md"
@ -207,20 +287,19 @@ nav:
- Various Execution Methods: "swarms/structs/various_execution_methods.md" - Various Execution Methods: "swarms/structs/various_execution_methods.md"
- Deep Research Swarm: "swarms/structs/deep_research_swarm.md" - Deep Research Swarm: "swarms/structs/deep_research_swarm.md"
- Council of Judges: "swarms/structs/council_of_judges.md" - Council of Judges: "swarms/structs/council_of_judges.md"
- Heavy Swarm: "swarms/structs/heavy_swarm.md"
- Hiearchical Architectures: - Hiearchical Architectures:
- Overview: "swarms/structs/multi_swarm_orchestration.md"
- HierarchicalSwarm: "swarms/structs/hierarchical_swarm.md" - HierarchicalSwarm: "swarms/structs/hierarchical_swarm.md"
- Auto Agent Builder: "swarms/structs/auto_agent_builder.md" - Auto Agent Builder: "swarms/structs/auto_agent_builder.md"
- Hybrid Hierarchical-Cluster Swarm: "swarms/structs/hhcs.md" - Hybrid Hierarchical-Cluster Swarm: "swarms/structs/hhcs.md"
- Auto Swarm Builder: "swarms/structs/auto_swarm_builder.md" - Auto Swarm Builder: "swarms/structs/auto_swarm_builder.md"
- Swarm Matcher: "swarms/structs/swarm_matcher.md" - Swarm Matcher: "swarms/structs/swarm_matcher.md"
- Multi-Agent Multi-Modal Structures: # - Multi-Agent Multi-Modal Structures:
- ImageAgentBatchProcessor: "swarms/structs/image_batch_agent.md" # - ImageAgentBatchProcessor: "swarms/structs/image_batch_agent.md"
- Storage:
- AgentRegistry: "swarms/structs/agent_registry.md"
- Routers: - Routers:
- SwarmRouter: "swarms/structs/swarm_router.md" - SwarmRouter: "swarms/structs/swarm_router.md"
@ -240,6 +319,9 @@ nav:
- SequentialWorkflow: "swarms/structs/sequential_workflow.md" - SequentialWorkflow: "swarms/structs/sequential_workflow.md"
- GraphWorkflow: "swarms/structs/graph_workflow.md" - GraphWorkflow: "swarms/structs/graph_workflow.md"
- Storage:
- AgentRegistry: "swarms/structs/agent_registry.md"
- Communication Structure: "swarms/structs/conversation.md" - Communication Structure: "swarms/structs/conversation.md"
- Tools: - Tools:
@ -253,17 +335,17 @@ nav:
- Social Media: - Social Media:
- Twitter: "swarms_tools/twitter.md" - Twitter: "swarms_tools/twitter.md"
- Memory: # - Memory:
- Overview: "swarms_memory/index.md" # - Overview: "swarms_memory/index.md"
- Memory Systems: # - Memory Systems:
- ChromaDB: "swarms_memory/chromadb.md" # - ChromaDB: "swarms_memory/chromadb.md"
- Pinecone: "swarms_memory/pinecone.md" # - Pinecone: "swarms_memory/pinecone.md"
- Faiss: "swarms_memory/faiss.md" # - Faiss: "swarms_memory/faiss.md"
- Deployment Solutions: - Deployment Solutions:
- Deploy your agents on Google Cloud Run: "swarms_cloud/cloud_run.md" - Deploy on Google Cloud Run: "swarms_cloud/cloud_run.md"
- Deploy your agents on Phala: "swarms_cloud/phala_deploy.md" - Deploy on Phala: "swarms_cloud/phala_deploy.md"
# - Deploy your agents on FastAPI: # - Deploy on FastAPI: "swarms_cloud/fastapi_deploy.md"
- Examples: - Examples:
@ -368,12 +450,12 @@ nav:
- Finance Swarm: "swarms/examples/swarms_api_finance.md" - Finance Swarm: "swarms/examples/swarms_api_finance.md"
- Clients: - Clients:
- Overview: "swarms_cloud/api_clients.md"
- Python Client: "swarms_cloud/python_client.md" - Python Client: "swarms_cloud/python_client.md"
- Rust Client: "swarms_cloud/rust_client.md" - Rust Client: "swarms_cloud/rust_client.md"
- Pricing: - Pricing:
- Pricing: "swarms_cloud/api_pricing.md" - Pricing: "swarms_cloud/api_pricing.md"
- Pricing in Chinese: "swarms_cloud/chinese_api_pricing.md"
- Subscription Tiers: "swarms_cloud/subscription_tiers.md" - Subscription Tiers: "swarms_cloud/subscription_tiers.md"
- Swarms Marketplace: - Swarms Marketplace:
@ -393,20 +475,20 @@ nav:
- Contributors: - Contributors:
- Overview: "contributors/main.md" - Overview: "contributors/main.md"
- Environment Setup: "contributors/environment_setup.md"
- Bounty Program: "corporate/bounty_program.md" - Bounty Program: "corporate/bounty_program.md"
- Links & Resources: "governance/main.md"
- Swarms Vision: "swarms/concept/vision.md" - Development Guides:
- Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md"
- Swarms Products: "swarms/products.md"
- Learn More:
- Understanding Swarms Architecture: "swarms/concept/framework_architecture.md"
- Code Style Guide & Best Practices: "swarms/framework/code_cleanliness.md" - Code Style Guide & Best Practices: "swarms/framework/code_cleanliness.md"
- Our Development Philosophy & Principles: "swarms/concept/philosophy.md" - Adding Tests: "swarms/framework/test.md"
- Contributing: - Adding Documentation: "contributors/docs.md"
- Writing and Adding Tests: "swarms/framework/test.md" - New Tools & Plugins: "contributors/tools.md"
- Creating Custom Tools & Plugins: "contributors/tools.md"
- Writing Documentation: "contributors/docs.md" - Architecture & Design:
- Changelog: - Understanding Swarms Architecture: "swarms/concept/framework_architecture.md"
- Swarms 5.6.8: "swarms/changelog/5_6_8.md" - Development Philosophy & Principles: "swarms/concept/philosophy.md"
- Swarms 5.8.1: "swarms/changelog/5_8_1.md"
- Swarms 5.9.2: "swarms/changelog/changelog_new.md" # - About Swarms:
# - Vision & Mission: "swarms/concept/vision.md"
# - Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md"
# - Products: "swarms/products.md"

@ -7,3 +7,289 @@
<a href="https://github.com/kyegomez/swarms">Star and contribute</a> to Swarms on GitHub! <a href="https://github.com/kyegomez/swarms">Star and contribute</a> to Swarms on GitHub!
</div> </div>
{% endblock %} {% endblock %}
{% block footer %}
<footer class="md-footer">
<!-- Custom Footer Links Section -->
<div class="md-footer-custom">
<div class="md-footer-custom__inner md-grid">
<div class="md-footer-links">
{% for section_name, links in config.extra.footer_links.items() %}
<div class="md-footer-links__section">
<h4 class="md-footer-links__title">{{ section_name }}</h4>
<ul class="md-footer-links__list">
{% for link in links %}
<li class="md-footer-links__item">
<a href="{{ link.url }}" class="md-footer-links__link">
{{ link.title }}
</a>
</li>
{% endfor %}
</ul>
</div>
{% endfor %}
</div>
</div>
</div>
<!-- Company Information Section -->
<div class="md-footer-company">
<div class="md-footer-company__inner md-grid">
<div class="md-footer-company__content">
<div class="md-footer-company__brand">
<h3 class="md-footer-company__name">Swarms</h3>
<p class="md-footer-company__description">
Automating the world economy with multi-agent collaboration
</p>
</div>
<div class="md-footer-company__copyright">
<p>&copy; 2024 Swarms. All rights reserved.</p>
</div>
</div>
</div>
</div>
<!-- Original Material Footer -->
{{ super() }}
</footer>
{% endblock %}
{% block styles %}
{{ super() }}
<style>
/* Custom Footer Styling - Base */
.md-footer-custom {
padding: 2.4rem 0 1.2rem;
border-top: 0.05rem solid var(--md-default-fg-color--lightest);
}
.md-footer-custom__inner {
margin: 0 auto;
padding: 0 1.2rem;
}
.md-footer-links {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(240px, 1fr));
gap: 2rem;
max-width: 1220px;
margin: 0 auto;
}
.md-footer-links__section {
min-width: 0;
}
.md-footer-links__title {
font-size: 0.64rem;
font-weight: 700;
margin: 0 0 1rem;
text-transform: uppercase;
letter-spacing: 0.1em;
padding-bottom: 0.4rem;
}
.md-footer-links__list {
list-style: none;
margin: 0;
padding: 0;
}
.md-footer-links__item {
margin: 0;
line-height: 1.8;
}
.md-footer-links__link {
text-decoration: none;
font-size: 0.7rem;
display: block;
padding: 0.1rem 0;
transition: color 125ms;
border-radius: 0.1rem;
}
.md-footer-links__link:hover,
.md-footer-links__link:focus {
color: var(--md-accent-fg-color);
}
/* Light Mode (Default) */
[data-md-color-scheme="default"] .md-footer-custom {
background: #ffffff;
border-top-color: #e1e5e9;
}
[data-md-color-scheme="default"] .md-footer-links__title {
color: #2e3440;
border-bottom: 0.05rem solid #e1e5e9;
}
[data-md-color-scheme="default"] .md-footer-links__link {
color: #636c76;
}
[data-md-color-scheme="default"] .md-footer-links__link:hover,
[data-md-color-scheme="default"] .md-footer-links__link:focus {
color: #1976d2;
}
/* Dark Mode (Slate) */
[data-md-color-scheme="slate"] .md-footer-custom {
background: #1F2129;
border-top-color: #404040;
}
[data-md-color-scheme="slate"] .md-footer-links__title {
color: #ffffff;
border-bottom: 0.05rem solid #404040;
}
[data-md-color-scheme="slate"] .md-footer-links__link {
color: #9ca3af;
}
[data-md-color-scheme="slate"] .md-footer-links__link:hover,
[data-md-color-scheme="slate"] .md-footer-links__link:focus {
color: #42a5f5;
}
/* Company Information Section - Base */
.md-footer-company {
padding: 1.5rem 0;
border-top: 0.05rem solid var(--md-default-fg-color--lightest);
}
.md-footer-company__inner {
margin: 0 auto;
padding: 0 1.2rem;
max-width: 1220px;
}
.md-footer-company__content {
display: flex;
justify-content: space-between;
align-items: center;
flex-wrap: wrap;
gap: 1rem;
}
.md-footer-company__brand {
flex: 1;
min-width: 200px;
}
.md-footer-company__name {
margin: 0 0 0.5rem 0;
font-size: 1.2rem;
font-weight: 700;
letter-spacing: 0.05em;
}
.md-footer-company__description {
margin: 0;
font-size: 0.8rem;
line-height: 1.4;
font-style: italic;
}
.md-footer-company__copyright {
text-align: right;
}
.md-footer-company__copyright p {
margin: 0;
font-size: 0.7rem;
opacity: 0.8;
}
/* Company Section - Light Mode */
[data-md-color-scheme="default"] .md-footer-company {
background: #f8f9fa;
border-top-color: #e1e5e9;
}
[data-md-color-scheme="default"] .md-footer-company__name {
color: #DC143C;
}
[data-md-color-scheme="default"] .md-footer-company__description {
color: #495057;
}
[data-md-color-scheme="default"] .md-footer-company__copyright p {
color: #6c757d;
}
/* Company Section - Dark Mode */
[data-md-color-scheme="slate"] .md-footer-company {
background: #1F2129;
border-top-color: #404040;
}
[data-md-color-scheme="slate"] .md-footer-company__name {
color: #DC143C;
}
[data-md-color-scheme="slate"] .md-footer-company__description {
color: #d1d5db;
}
[data-md-color-scheme="slate"] .md-footer-company__copyright p {
color: #9ca3af;
}
/* Responsive Design */
@media screen and (max-width: 76.1875em) {
.md-footer-links {
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 1.5rem;
}
.md-footer-custom {
padding: 2rem 0 1rem;
}
}
@media screen and (max-width: 59.9375em) {
.md-footer-links {
grid-template-columns: repeat(2, 1fr);
gap: 1.5rem;
}
}
@media screen and (max-width: 44.9375em) {
.md-footer-links {
grid-template-columns: 1fr;
gap: 1.5rem;
}
.md-footer-custom {
padding: 1.5rem 0 0.8rem;
}
.md-footer-custom__inner {
padding: 0 1rem;
}
/* Company section mobile styles */
.md-footer-company__content {
flex-direction: column;
text-align: center;
gap: 1rem;
}
.md-footer-company__brand {
min-width: auto;
}
.md-footer-company__copyright {
text-align: center;
}
.md-footer-company__inner {
padding: 0 1rem;
}
}
</style>
{% endblock %}

@ -0,0 +1 @@
# Backwards Compatability

@ -0,0 +1,439 @@
# Swarms Protocol Overview & Architecture
This document provides a comprehensive overview of the Swarms protocol architecture, illustrating the flow from agent classes to multi-agent structures, and showcasing the main components and folders within the `swarms/` package. The Swarms framework is designed for extensibility, modularity, and production-readiness, enabling the orchestration of intelligent agents, tools, memory, and complex multi-agent systems.
---
## Introduction
Swarms is an enterprise-grade, production-ready multi-agent orchestration framework. It enables developers and organizations to build, deploy, and manage intelligent agents that can reason, collaborate, and solve complex tasks autonomously or in groups. The architecture is inspired by the principles of modularity, composability, and scalability, ensuring that each component can be extended or replaced as needed.
The protocol is structured to support a wide range of use cases, from simple single-agent automations to sophisticated multi-agent workflows involving memory, tool use, and advanced reasoning.
For a high-level introduction and installation instructions, see the [Swarms Docs Home](https://docs.swarms.world/en/latest/).
---
## High-Level Architecture Flow
The Swarms protocol is organized into several key layers, each responsible for a specific aspect of the system. The typical flow is as follows:
1. **Agent Class (`swarms/agents`)**
- The core building block of the framework. Agents encapsulate logic, state, and behavior. They can be simple (stateless) or complex
(stateful, with memory and reasoning capabilities).
- Agents can be specialized for different tasks (e.g., reasoning agents, tool agents, judge agents, etc.).
- Example: A `ReasoningAgent` that can analyze data and make decisions, or a `ToolAgent` that wraps external APIs.
- [Quickstart for Agents](https://docs.swarms.world/en/latest/swarms/agents/)
- [Agent API Reference](https://docs.swarms.world/en/latest/swarms/structs/agent/)
2. **Tools with Memory (`swarms/tools`, `swarms/utils`)**
- Tools are modular components that agents use to interact with the outside world, perform computations, or access resources (APIs,
databases, files, etc.).
- Memory modules and utility functions allow agents to retain context, cache results, and manage state across interactions.
- Example: A tool for calling an LLM API, a memory cache for conversation history, or a utility for parsing and formatting data.
- [Tools Overview](https://docs.swarms.world/en/latest/swarms_tools/overview/)
- [BaseTool Reference](https://docs.swarms.world/en/latest/swarms/tools/base_tool/)
3. **Reasoning & Specialized Agents (`swarms/agents`)**
- These agents build on the base agent class, adding advanced reasoning, self-consistency, and specialized logic for tasks like
planning, evaluation, or multi-step workflows.
- Includes agents for self-reflection, iterative improvement, and domain-specific expertise.
- Example: A `SelfConsistencyAgent` that aggregates multiple reasoning paths, or a `JudgeAgent` that evaluates outputs from other
agents.
- [Reasoning Agents Overview](https://docs.swarms.world/en/latest/swarms/agents/reasoning_agents_overview/)
- [Self Consistency Agent](https://docs.swarms.world/en/latest/swarms/agents/consistency_agent/)
- [Agent Judge](https://docs.swarms.world/en/latest/swarms/agents/agent_judge/)
4. **Multi-Agent Structures (`swarms/structs`)**
- Agents are composed into higher-order structures for collaboration, voting, parallelism, and workflow orchestration.
- Includes swarms for majority voting, round-robin execution, hierarchical delegation, and more.
- Example: A `MajorityVotingSwarm` that aggregates outputs from several agents, or a `HierarchicalSwarm` that delegates tasks to
sub-agents.
- [Multi-Agent Architectures Overview](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/)
- [MajorityVotingSwarm](https://docs.swarms.world/en/latest/swarms/structs/majorityvoting/)
- [HierarchicalSwarm](https://docs.swarms.world/en/latest/swarms/structs/hierarchical_swarm/)
- [Sequential Workflow](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/)
- [Concurrent Workflow](https://docs.swarms.world/en/latest/swarms/structs/concurrentworkflow/)
5. **Supporting Components**
- **Communication (`swarms/communication`)**: Provides wrappers for inter-agent communication, database access, message passing, and
integration with external systems (e.g., Redis, DuckDB, Pulsar). See [Communication Structure](https://docs.swarms.world/en/latest/swarms/structs/conversation/)
- **Artifacts (`swarms/artifacts`)**: Manages the creation, storage, and retrieval of artifacts (outputs, files, logs) generated by
agents and swarms.
- **Prompts (`swarms/prompts`)**: Houses prompt templates, system prompts, and agent-specific prompts for LLM-based agents. See
[Prompts Management](https://docs.swarms.world/en/latest/swarms/prompts/main/)
- **Telemetry (`swarms/telemetry`)**: Handles logging, monitoring, and bootup routines for observability and debugging.
- **Schemas (`swarms/schemas`)**: Defines data schemas for agents, tools, completions, and communication protocols, ensuring type
safety and consistency.
- **CLI (`swarms/cli`)**: Provides command-line utilities for agent creation, management, and orchestration. See [CLI Documentation]
(https://docs.swarms.world/en/latest/swarms/cli/main/)
---
## Proposing Large Improvements or Enhancements: Swarms Improvement Proposals (SIPs)
For significant changes, new agent architectures, or radical new features, Swarms uses a formal process called **Swarms Improvement Proposals (SIPs)**. SIPs are design documents that describe new features, enhancements, or changes to the Swarms framework. They ensure that major changes are well-documented, discussed, and reviewed by the community before implementation.
**When to use a SIP:**
- Proposing new agent types, swarm patterns, or coordination mechanisms
- Core framework changes or breaking changes
- New integrations (LLM providers, tools, external services)
- Any complex or multi-component feature
**SIP Process Overview:**
1. Discuss your idea in [GitHub Discussions](https://github.com/kyegomez/swarms/discussions)
2. Submit a SIP as a GitHub Issue using the SIP template
3. Engage with the community and iterate on your proposal
4. Undergo review and, if accepted, proceed to implementation
**Learn more:** See the full [SIP Guidelines and Template](https://docs.swarms.world/en/latest/protocol/sip/)
---
## Detailed Architecture Diagram
The following Mermaid diagram visualizes the protocol flow and the relationship between the main folders in the `swarms/` package:
```mermaid
flowchart TD
A["Agent Class<br/>(swarms/agents)"] --> B["Tools with Memory<br/>(swarms/tools, swarms/utils)"]
B --> C["Reasoning & Specialized Agents<br/>(swarms/agents)"]
C --> D["Multi-Agent Structures<br/>(swarms/structs)"]
D --> E["Communication, Artifacts, Prompts, Telemetry, Schemas, CLI"]
subgraph Folders
A1["agents"]
A2["tools"]
A3["structs"]
A4["utils"]
A5["telemetry"]
A6["schemas"]
A7["prompts"]
A8["artifacts"]
A9["communication"]
A10["cli"]
end
%% Folder showcase
subgraph "swarms/"
A1
A2
A3
A4
A5
A6
A7
A8
A9
A10
end
%% Connect folder showcase to main flow
A1 -.-> A
A2 -.-> B
A3 -.-> D
A4 -.-> B
A5 -.-> E
A6 -.-> E
A7 -.-> E
A8 -.-> E
A9 -.-> E
A10 -.-> E
```
---
## Folder-by-Folder Breakdown
### `agents/`
**Purpose:** Defines all agent classes, including base agents, reasoning agents, tool agents, judge agents, and more.
**Highlights:**
- Modular agent design for extensibility.
- Support for YAML-based agent creation and configuration. See [YAML Agent Creation](https://docs.swarms.world/en/latest/swarms/
agents/create_agents_yaml/)
- Specialized agents for self-consistency, evaluation, and domain-specific tasks.
- **Example:**
- `ReasoningAgent`, `ToolAgent`, `JudgeAgent`, `ConsistencyAgent`, `OpenAIAssistant`, etc.
- [Agents Overview](https://docs.swarms.world/en/latest/swarms/framework/agents_explained/)
### `tools/`
**Purpose:** Houses all tool-related logic, including tool registry, function calling, tool schemas, and integration with external
APIs.
**Highlights:**
- Tools can be dynamically registered and called by agents.
- Support for OpenAI function calling, Cohere, and custom tool schemas.
- Utilities for parsing, formatting, and executing tool calls.
- **Example:**
- `base_tool.py`, `tool_registry.py`, `mcp_client_call.py`, `func_calling_utils.py`, etc.
- [Tools Reference](https://docs.swarms.world/en/latest/swarms/tools/tools_examples/)
- [What are tools?](https://docs.swarms.world/en/latest/swarms/tools/build_tool/)
### `structs/`
**Purpose:** Implements multi-agent structures, workflows, routers, registries, and orchestration logic.
**Highlights:**
- Swarms for majority voting, round-robin, hierarchical delegation, spreadsheet processing, and more.
- Workflow orchestration (sequential, concurrent, graph-based).
- Utilities for agent matching, rearrangement, and evaluation.
- **Example:**
- `MajorityVotingSwarm`, `HierarchicalSwarm`, `SwarmRouter`, `SequentialWorkflow`, `ConcurrentWorkflow`, etc.
- [Custom Multi Agent Architectures](https://docs.swarms.world/en/latest/swarms/structs/custom_swarm/)
- [SwarmRouter](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/)
- [AgentRearrange](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/)
### `utils/`
**Purpose:** Provides utility functions, memory management, caching, wrappers, and helpers used throughout the framework.
**Highlights:**
- Memory and caching for agents and tools. See [Integrating RAG with Agents](https://docs.swarms.world/en/latest/swarms/memory/
diy_memory/)
- Wrappers for concurrency, logging, and data processing.
- General-purpose utilities for string, file, and data manipulation.
**Example:**
- `agent_cache.py`, `concurrent_wrapper.py`, `file_processing.py`, `formatter.py`, etc.
### `telemetry/`
**Purpose:** Handles telemetry, logging, monitoring, and bootup routines for the framework.
**Highlights:**
- Centralized logging and execution tracking.
- Bootup routines for initializing the framework.
- Utilities for monitoring agent and swarm performance.
- **Example:**
- `bootup.py`, `log_executions.py`, `main.py`.
### `schemas/`
**Purpose:** Defines data schemas for agents, tools, completions, and communication protocols.
**Highlights:**
- Ensures type safety and consistency across the framework.
- Pydantic-based schemas for validation and serialization.
- Schemas for agent classes, tool calls, completions, and more.
**Example:**
- `agent_class_schema.py`, `tool_schema_base_model.py`, `agent_completion_response.py`, etc.
### `prompts/`
**Purpose:** Contains prompt templates, system prompts, and agent-specific prompts for LLM-based agents.
**Highlights:**
- Modular prompt design for easy customization.
- Support for multi-modal, collaborative, and domain-specific prompts.
- Templates for system, task, and conversational prompts.
**Example:**
- `prompt.py`, `reasoning_prompt.py`, `multi_agent_collab_prompt.py`, etc.
- [Prompts Management](https://docs.swarms.world/en/latest/swarms/prompts/main/)
### `artifacts/`
**Purpose:** Manages the creation, storage, and retrieval of artifacts (outputs, files, logs) generated by agents and swarms.
**Highlights:**
- Artifact management for reproducibility and traceability.
- Support for various output types and formats.
**Example:**
- `main_artifact.py`.
### `communication/`
**Purpose:** Provides wrappers for inter-agent communication, database access, message passing, and integration with external systems.
**Highlights:**
- Support for Redis, DuckDB, Pulsar, Supabase, and more.
- Abstractions for message passing and data exchange between agents.
**Example:**
- `redis_wrap.py`, `duckdb_wrap.py`, `base_communication.py`, etc.
- [Communication Structure](https://docs.swarms.world/en/latest/swarms/structs/conversation/)
### `cli/`
**Purpose:** Command-line utilities for agent creation, management, and orchestration.
**Highlights:**
- Scripts for onboarding, agent creation, and management.
- CLI entry points for interacting with the framework.
**Example:**
- `main.py`, `create_agent.py`, `onboarding_process.py`.
- [CLI Documentation](https://docs.swarms.world/en/latest/swarms/cli/main/)
---
## How the System Works Together
The Swarms protocol is designed for composability. Agents can be created and configured independently, then composed into larger structures (swarms) for collaborative or competitive workflows. Tools and memory modules are injected into agents as needed, enabling them to perform complex tasks and retain context. Multi-agent structures orchestrate the flow of information and decision-making, while supporting components (communication, telemetry, artifacts, etc.) ensure robustness, observability, and extensibility.
For example, a typical workflow might involve:
- Creating a set of specialized agents (e.g., data analyst, summarizer, judge).
- Registering tools (e.g., LLM API, database access, web search) and memory modules.
- Composing agents into a `MajorityVotingSwarm` for collaborative decision-making.
- Using communication wrappers to exchange data between agents and external systems.
- Logging all actions and outputs for traceability and debugging.
For more advanced examples, see the [Examples Overview](https://docs.swarms.world/en/latest/examples/index/).
---
## Swarms Framework Philosophy
Swarms is built on the following principles:
- **Modularity:** Every component (agent, tool, prompt, schema) is a module that can be extended or replaced.
- **Composability:** Agents and tools can be composed into larger structures for complex workflows.
- **Observability:** Telemetry and artifact management ensure that all actions are traceable and debuggable.
- **Extensibility:** New agents, tools, and workflows can be added with minimal friction.
- **Production-Readiness:** The framework is designed for reliability, scalability, and real-world deployment.
For more on the philosophy and architecture, see [Development Philosophy & Principles](https://docs.swarms.world/en/latest/swarms/concept/philosophy/) and [Understanding Swarms Architecture](https://docs.swarms.world/en/latest/swarms/concept/framework_architecture/).
---
## Further Reading & References
- [Swarms Docs Home](https://docs.swarms.world/en/latest/)
- [Quickstart for Agents](https://docs.swarms.world/en/latest/swarms/agents/)
- [Agent API Reference](https://docs.swarms.world/en/latest/swarms/structs/agent/)
- [Tools Overview](https://docs.swarms.world/en/latest/swarms_tools/overview/)
- [BaseTool Reference](https://docs.swarms.world/en/latest/swarms/tools/base_tool/)
- [Reasoning Agents Overview](https://docs.swarms.world/en/latest/swarms/agents/reasoning_agents_overview/)
- [Multi-Agent Architectures Overview](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/)
- [Examples Overview](https://docs.swarms.world/en/latest/examples/index/)
- [CLI Documentation](https://docs.swarms.world/en/latest/swarms/cli/main/)
- [Prompts Management](https://docs.swarms.world/en/latest/swarms/prompts/main/)
- [Development Philosophy & Principles](https://docs.swarms.world/en/latest/swarms/concept/philosophy/)
- [Understanding Swarms Architecture](https://docs.swarms.world/en/latest/swarms/concept/framework_architecture/)
- [SIP Guidelines and Template](https://docs.swarms.world/en/latest/protocol/sip/)
# Conclusion
The Swarms protocol provides a robust foundation for building intelligent, collaborative, and autonomous systems. By organizing the codebase into clear, modular folders and defining a logical flow from agents to multi-agent structures, Swarms enables rapid development and deployment of advanced AI solutions. Whether you are building a simple automation or a complex multi-agent application, the Swarms architecture provides the tools and abstractions you need to succeed.

@ -0,0 +1,159 @@
# Swarms Improvement Proposal (SIP) Guidelines
A simplified process for proposing new functionality and enhancements to the Swarms framework.
## What is a SIP?
A **Swarms Improvement Proposal (SIP)** is a design document that describes a new feature, enhancement, or change to the Swarms framework. SIPs serve as the primary mechanism for proposing significant changes, collecting community feedback, and documenting design decisions.
The SIP author is responsible for building consensus within the community and documenting the proposal clearly and concisely.
## When to Submit a SIP
Consider submitting a SIP for:
- **New Agent Types or Behaviors**: Adding new agent architectures, swarm patterns, or coordination mechanisms
- **Core Framework Changes**: Modifications to the Swarms API, core classes, or fundamental behaviors
- **New Integrations**: Adding support for new LLM providers, tools, or external services
- **Breaking Changes**: Any change that affects backward compatibility
- **Complex Features**: Multi-component features that require community discussion and design review
For simple bug fixes, minor enhancements, or straightforward additions, use regular GitHub issues and pull requests instead.
## SIP Types
**Standard SIP**: Describes a new feature or change to the Swarms framework
**Process SIP**: Describes changes to development processes, governance, or community guidelines
**Informational SIP**: Provides information or guidelines to the community without proposing changes
## Submitting a SIP
1. **Discuss First**: Post your idea in [GitHub Discussions](https://github.com/kyegomez/swarms/discussions) to gauge community interest
2. **Create Issue**: Submit your SIP as a GitHub Issue with the `SIP` and `proposal` labels
3. **Follow Format**: Use the SIP template format below
4. **Engage Community**: Respond to feedback and iterate on your proposal
## SIP Format
### Required Sections
#### **SIP Header**
```
Title: [Descriptive title]
Author: [Your name and contact]
Type: [Standard/Process/Informational]
Status: Proposal
Created: [Date]
```
#### **Abstract** (200 words max)
A brief summary of what you're proposing and why.
#### **Motivation**
- What problem does this solve?
- Why can't the current framework handle this?
- What are the benefits to the Swarms ecosystem?
#### **Specification**
- Detailed technical description
- API changes or new interfaces
- Code examples showing usage
- Integration points with existing framework
#### **Implementation Plan**
- High-level implementation approach
- Breaking changes (if any)
- Migration path for existing users
- Testing strategy
#### **Alternatives Considered**
- Other approaches you evaluated
- Why you chose this solution
- Trade-offs and limitations
### Optional Sections
#### **Reference Implementation**
Link to prototype code or proof-of-concept (can be added later)
#### **Security Considerations**
Any security implications or requirements
## SIP Workflow
```
Proposal → Draft → Review → Accepted/Rejected → Final
```
1. **Proposal**: Initial submission as GitHub Issue
2. **Draft**: Maintainer assigns SIP number and `draft` label
3. **Review**: Community and maintainer review period
4. **Decision**: Accepted, rejected, or needs revision
5. **Final**: Implementation completed and merged
## SIP Status
- **Proposal**: Newly submitted, awaiting initial review
- **Draft**: Under active discussion and refinement
- **Review**: Formal review by maintainers
- **Accepted**: Approved for implementation
- **Rejected**: Not accepted (with reasons)
- **Final**: Implementation completed and merged
- **Withdrawn**: Author withdrew the proposal
## Review Process
- SIPs are reviewed during regular maintainer meetings
- Community feedback is collected via GitHub comments
- Acceptance requires:
- Clear benefit to the Swarms ecosystem
- Technical feasibility
- Community support
- Working prototype (for complex features)
## Getting Help
- **Discussions**: Use [GitHub Discussions](https://github.com/kyegomez/swarms/discussions) for questions
- **Documentation**: Check [docs.swarms.world](https://docs.swarms.world) for framework details
- **Examples**: Look at existing SIPs for reference
## SIP Template
When creating your SIP, copy this template:
```markdown
# SIP-XXX: [Title]
**Author**: [Your name] <[email]>
**Type**: Standard
**Status**: Proposal
**Created**: [Date]
## Abstract
[Brief 200-word summary]
## Motivation
[Why is this needed? What problem does it solve?]
## Specification
[Detailed technical description with code examples]
## Implementation Plan
[How will this be built? Any breaking changes?]
## Alternatives Considered
[Other approaches and why you chose this one]
## Reference Implementation
[Link to prototype code if available]
```
---
**Note**: This process is designed to be lightweight while ensuring important changes get proper community review. For questions about whether your idea needs a SIP, start a discussion in the GitHub Discussions forum.

@ -1,223 +1,251 @@
# Agent Judge # AgentJudge
The AgentJudge is a specialized agent designed to evaluate and judge outputs from other agents or systems. It acts as a quality control mechanism, providing objective assessments and feedback on various types of content, decisions, or outputs. This implementation is based on the research paper "Agents as Judges: Using LLMs to Evaluate LLMs". A specialized agent for evaluating and judging outputs from other agents or systems. Acts as a quality control mechanism providing objective assessments and feedback.
## Research Background Based on the research paper: **"Agent-as-a-Judge: Evaluate Agents with Agents"** - [arXiv:2410.10934](https://arxiv.org/abs/2410.10934)
The AgentJudge implementation is inspired by recent research in LLM-based evaluation systems. Key findings from the research include:
- LLMs can effectively evaluate other LLM outputs with high accuracy
- Multi-agent evaluation systems can provide more reliable assessments
- Structured evaluation criteria improve consistency
- Context-aware evaluation leads to better results
## Overview ## Overview
The AgentJudge serves as an impartial evaluator that can: The AgentJudge is designed to evaluate and critique outputs from other AI agents, providing structured feedback on quality, accuracy, and areas for improvement. It supports both single-shot evaluations and iterative refinement through multiple evaluation loops with context building.
Key capabilities:
- Assess the quality and correctness of agent outputs - **Quality Assessment**: Evaluates correctness, clarity, and completeness of agent outputs
- Provide structured feedback and scoring - **Structured Feedback**: Provides detailed critiques with strengths, weaknesses, and suggestions
- Maintain context across multiple evaluations - **Multimodal Support**: Can evaluate text outputs alongside images
- Generate detailed analysis reports - **Context Building**: Maintains evaluation context across multiple iterations
- **Batch Processing**: Efficiently processes multiple evaluations
## Architecture ## Architecture
```mermaid ```mermaid
graph TD graph TD
A[Input Tasks] --> B[AgentJudge] A[Input Task] --> B[AgentJudge]
B --> C[Agent Core] B --> C{Evaluation Mode}
C --> D[LLM Model]
D --> E[Response Generation]
E --> F[Context Management]
F --> G[Output]
subgraph "Evaluation Flow"
H[Task Analysis] --> I[Quality Assessment]
I --> J[Feedback Generation]
J --> K[Score Assignment]
end
B --> H
K --> G
```
## Configuration C -->|step()| D[Single Eval]
C -->|run()| E[Iterative Eval]
C -->|run_batched()| F[Batch Eval]
### Parameters D --> G[Agent Core]
E --> G
F --> G
| Parameter | Type | Default | Description | G --> H[LLM Model]
|-----------|------|---------|-------------| H --> I[Quality Analysis]
| `agent_name` | str | "agent-judge-01" | Unique identifier for the judge agent | I --> J[Feedback & Output]
| `system_prompt` | str | AGENT_JUDGE_PROMPT | System instructions for the agent |
| `model_name` | str | "openai/o1" | LLM model to use for evaluation |
| `max_loops` | int | 1 | Maximum number of evaluation iterations |
### Methods subgraph "Feedback Details"
N[Strengths]
O[Weaknesses]
P[Improvements]
Q[Accuracy Check]
end
| Method | Description | Parameters | Returns | J --> N
|--------|-------------|------------|---------| J --> O
| `step()` | Processes a single batch of tasks | `tasks: List[str]` | `str` | J --> P
| `run()` | Executes multiple evaluation iterations | `tasks: List[str]` | `List[str]` | J --> Q
## Usage ```
### Basic Example ## Class Reference
```python ### Constructor
from swarms import AgentJudge
# Initialize the judge ```python
judge = AgentJudge( AgentJudge(
model_name="gpt-4o", id: str = str(uuid.uuid4()),
max_loops=1 agent_name: str = "Agent Judge",
description: str = "You're an expert AI agent judge...",
system_prompt: str = AGENT_JUDGE_PROMPT,
model_name: str = "openai/o1",
max_loops: int = 1,
verbose: bool = False,
*args,
**kwargs
) )
```
# Example outputs to evaluate #### Parameters
outputs = [
"1. Agent CalculusMaster: After careful evaluation, I have computed the integral of the polynomial function. The result is ∫(x^2 + 3x + 2)dx = (1/3)x^3 + (3/2)x^2 + 5, where I applied the power rule for integration and added the constant of integration.",
"2. Agent DerivativeDynamo: In my analysis of the function sin(x), I have derived it with respect to x. The derivative is d/dx (sin(x)) = cos(x). However, I must note that the additional term '+ 2' is not applicable in this context as it does not pertain to the derivative of sin(x).",
"3. Agent LimitWizard: Upon evaluating the limit as x approaches 0 for the function (sin(x)/x), I conclude that lim (x -> 0) (sin(x)/x) = 1. The additional '+ 3' is incorrect and should be disregarded as it does not relate to the limit calculation.",
]
# Run evaluation | Parameter | Type | Default | Description |
results = judge.run(outputs) |-----------|------|---------|-------------|
print(results) | `id` | `str` | `str(uuid.uuid4())` | Unique identifier for the judge instance |
``` | `agent_name` | `str` | `"Agent Judge"` | Name of the agent judge |
| `description` | `str` | `"You're an expert AI agent judge..."` | Description of the agent's role |
| `system_prompt` | `str` | `AGENT_JUDGE_PROMPT` | System instructions for evaluation |
| `model_name` | `str` | `"openai/o1"` | LLM model for evaluation |
| `max_loops` | `int` | `1` | Maximum evaluation iterations |
| `verbose` | `bool` | `False` | Enable verbose logging |
### Methods
## Applications #### step()
### Code Review Automation ```python
step(
task: str = None,
tasks: Optional[List[str]] = None,
img: Optional[str] = None
) -> str
```
!!! success "Features" Processes a single task or list of tasks and returns evaluation.
- Evaluate code quality
- Check for best practices
- Assess documentation completeness
### Content Quality Control | Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `task` | `str` | `None` | Single task/output to evaluate |
| `tasks` | `List[str]` | `None` | List of tasks/outputs to evaluate |
| `img` | `str` | `None` | Path to image for multimodal evaluation |
!!! info "Use Cases" **Returns:** `str` - Detailed evaluation response
- Review marketing copy
- Validate technical documentation
- Assess user support responses
### Decision Validation #### run()
```python
run(
task: str = None,
tasks: Optional[List[str]] = None,
img: Optional[str] = None
) -> List[str]
```
!!! warning "Applications" Executes evaluation in multiple iterations with context building.
- Evaluate business decisions
- Assess risk assessments
- Review compliance reports
### Performance Assessment | Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `task` | `str` | `None` | Single task/output to evaluate |
| `tasks` | `List[str]` | `None` | List of tasks/outputs to evaluate |
| `img` | `str` | `None` | Path to image for multimodal evaluation |
!!! tip "Metrics" **Returns:** `List[str]` - List of evaluation responses from each iteration
- Evaluate agent performance
- Assess system outputs
- Review automated processes
## Best Practices #### run_batched()
### Task Formulation ```python
run_batched(
tasks: Optional[List[str]] = None,
imgs: Optional[List[str]] = None
) -> List[List[str]]
```
1. Provide clear, specific evaluation criteria Executes batch evaluation of multiple tasks with corresponding images.
2. Include context when necessary
3. Structure tasks for consistent evaluation
### System Configuration | Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `tasks` | `List[str]` | `None` | List of tasks/outputs to evaluate |
| `imgs` | `List[str]` | `None` | List of image paths (same length as tasks) |
1. Use appropriate model for task complexity **Returns:** `List[List[str]]` - Evaluation responses for each task
2. Adjust max_loops based on evaluation depth needed
3. Customize system prompt for specific use cases
### Output Management ## Examples
1. Store evaluation results systematically ### Basic Usage
2. Track evaluation patterns over time
3. Use results for continuous improvement
### Integration Tips ```python
from swarms import AgentJudge
1. Implement as part of CI/CD pipelines # Initialize with default settings
2. Use for automated quality gates judge = AgentJudge()
3. Integrate with monitoring systems
## Implementation Guide # Single task evaluation
result = judge.step(task="The capital of France is Paris.")
print(result)
```
### Step 1: Setup ### Custom Configuration
```python ```python
from swarms import AgentJudge from swarms import AgentJudge
# Initialize with custom parameters # Custom judge configuration
judge = AgentJudge( judge = AgentJudge(
agent_name="custom-judge", agent_name="content-evaluator",
model_name="gpt-4", model_name="gpt-4",
max_loops=3 max_loops=3,
verbose=True
) )
```
### Step 2: Configure Evaluation Criteria
```python # Evaluate multiple outputs
# Define evaluation criteria outputs = [
criteria = { "Agent CalculusMaster: The integral of x^2 + 3x + 2 is (1/3)x^3 + (3/2)x^2 + 2x + C",
"accuracy": 0.4, "Agent DerivativeDynamo: The derivative of sin(x) is cos(x)",
"completeness": 0.3, "Agent LimitWizard: The limit of sin(x)/x as x approaches 0 is 1"
"clarity": 0.3 ]
}
# Set criteria evaluation = judge.step(tasks=outputs)
judge.set_evaluation_criteria(criteria) print(evaluation)
``` ```
### Step 3: Run Evaluations ### Iterative Evaluation with Context
```python ```python
# Single task evaluation from swarms import AgentJudge
result = judge.step(task)
# Multiple iterations with context building
judge = AgentJudge(max_loops=3)
# Batch evaluation # Each iteration builds on previous context
results = judge.run(tasks) evaluations = judge.run(task="Agent output: 2+2=5")
for i, eval_result in enumerate(evaluations):
print(f"Iteration {i+1}: {eval_result}\n")
``` ```
## Troubleshooting ### Multimodal Evaluation
```python
from swarms import AgentJudge
### Common Issues judge = AgentJudge()
??? question "Evaluation Inconsistencies" # Evaluate with image
If you notice inconsistent evaluations: evaluation = judge.step(
task="Describe what you see in this image",
img="path/to/image.jpg"
)
print(evaluation)
```
1. Check the evaluation criteria ### Batch Processing
2. Verify the model configuration
3. Review the input format
??? question "Performance Issues" ```python
For slow evaluations: from swarms import AgentJudge
1. Reduce max_loops judge = AgentJudge()
2. Optimize batch size
3. Consider model selection
# Batch evaluation with images
tasks = [
"Describe this chart",
"What's the main trend?",
"Any anomalies?"
]
images = [
"chart1.png",
"chart2.png",
"chart3.png"
]
## References # Each task evaluated independently
evaluations = judge.run_batched(tasks=tasks, imgs=images)
for i, task_evals in enumerate(evaluations):
print(f"Task {i+1} evaluations: {task_evals}")
```
### "Agent-as-a-Judge: Evaluate Agents with Agents" - [Paper Link](https://arxiv.org/abs/2410.10934) ## Reference
```bibtex ```bibtex
@misc{zhuge2024agentasajudgeevaluateagentsagents, @misc{zhuge2024agentasajudgeevaluateagentsagents,
title={Agent-as-a-Judge: Evaluate Agents with Agents}, title={Agent-as-a-Judge: Evaluate Agents with Agents},
author={Mingchen Zhuge and Changsheng Zhao and Dylan Ashley and Wenyi Wang and Dmitrii Khizbullin and Yunyang Xiong and Zechun Liu and Ernie Chang and Raghuraman Krishnamoorthi and Yuandong Tian and Yangyang Shi and Vikas Chandra and Jürgen Schmidhuber}, author={Mingchen Zhuge and Changsheng Zhao and Dylan Ashley and Wenyi Wang and Dmitrii Khizbullin and Yunyang Xiong and Zechun Liu and Ernie Chang and Raghuraman Krishnamoorthi and Yuandong Tian and Yangyang Shi and Vikas Chandra and Jürgen Schmidhuber},
year={2024}, year={2024},
eprint={2410.10934}, eprint={2410.10934},
archivePrefix={arXiv}, archivePrefix={arXiv},
primaryClass={cs.AI}, primaryClass={cs.AI},
url={https://arxiv.org/abs/2410.10934}, url={https://arxiv.org/abs/2410.10934}
} }
``` ```

@ -1,6 +1,5 @@
# Consistency Agent Documentation # Consistency Agent Documentation
The `SelfConsistencyAgent` is a specialized agent designed for generating multiple independent responses to a given task and aggregating them into a single, consistent final answer. It leverages concurrent processing to enhance efficiency and employs a majority voting mechanism to ensure the reliability of the aggregated response. The `SelfConsistencyAgent` is a specialized agent designed for generating multiple independent responses to a given task and aggregating them into a single, consistent final answer. It leverages concurrent processing to enhance efficiency and employs a majority voting mechanism to ensure the reliability of the aggregated response.
## Purpose ## Purpose
@ -17,24 +16,31 @@ The primary objective of the `SelfConsistencyAgent` is to provide a robust mecha
| Argument | Type | Default | Description | | Argument | Type | Default | Description |
|------------------------|---------|---------|-----------------------------------------------------------------------------| |------------------------|---------|---------|-----------------------------------------------------------------------------|
| `num_samples` | `int` | `5` | Number of independent responses to sample. | | `name` | `str` | `"Self-Consistency-Agent"` | Name of the agent. |
| `return_list` | `bool` | `False` | Whether to return the conversation as a list. | | `description` | `str` | `"An agent that uses self consistency to generate a final answer."` | Description of the agent's purpose. |
| `max_loops` | `int` | `1` | Maximum number of loops for the agent to run. | | `system_prompt` | `str` | `CONSISTENCY_SYSTEM_PROMPT` | System prompt for the reasoning agent. |
| `return_dict` | `bool` | `False` | Whether to return the conversation as a dictionary. | | `model_name` | `str` | Required | The underlying language model to use. |
| `return_json` | `bool` | `False` | Whether to return the conversation as JSON. | | `num_samples` | `int` | `5` | Number of independent responses to generate. |
| `majority_voting_prompt` | `str` | `None` | Custom prompt for majority voting. | | `max_loops` | `int` | `1` | Maximum number of reasoning loops per sample. |
| `majority_voting_prompt` | `Optional[str]` | `majority_voting_prompt` | Custom prompt for majority voting aggregation. |
| `eval` | `bool` | `False` | Enable evaluation mode for answer validation. |
| `output_type` | `OutputType` | `"dict"` | Format of the output. |
| `random_models_on` | `bool` | `False` | Enable random model selection for diversity. |
### Methods ### Methods
- **`run`**: Generates multiple responses for the given task and aggregates them. - **`run`**: Generates multiple responses for the given task and aggregates them.
- **Arguments**: - **Arguments**:
- `task` (`str`): The input prompt. - `task` (`str`): The input prompt.
- `answer` (`str`, optional): The expected answer to validate responses against. - `img` (`Optional[str]`, optional): Image input for vision tasks.
- **Returns**: `str` - The aggregated final answer. - `answer` (`Optional[str]`, optional): Expected answer for validation (if eval=True).
- **Returns**: `Union[str, Dict[str, Any]]` - The aggregated final answer.
- **`aggregate`**: Aggregates a list of responses into a single final answer using majority voting. - **`aggregation_agent`**: Aggregates a list of responses into a single final answer using majority voting.
- **Arguments**: - **Arguments**:
- `responses` (`List[str]`): The list of responses. - `responses` (`List[str]`): The list of responses.
- `prompt` (`str`, optional): Custom prompt for the aggregation agent.
- `model_name` (`str`, optional): Model to use for aggregation.
- **Returns**: `str` - The aggregated answer. - **Returns**: `str` - The aggregated answer.
- **`check_responses_for_answer`**: Checks if a specified answer is present in any of the provided responses. - **`check_responses_for_answer`**: Checks if a specified answer is present in any of the provided responses.
@ -43,6 +49,11 @@ The primary objective of the `SelfConsistencyAgent` is to provide a robust mecha
- `answer` (`str`): The answer to look for in the responses. - `answer` (`str`): The answer to look for in the responses.
- **Returns**: `bool` - `True` if the answer is found, `False` otherwise. - **Returns**: `bool` - `True` if the answer is found, `False` otherwise.
- **`batched_run`**: Run the agent on multiple tasks in batch.
- **Arguments**:
- `tasks` (`List[str]`): List of tasks to be processed.
- **Returns**: `List[Union[str, Dict[str, Any]]]` - List of results for each task.
### Examples ### Examples
#### Example 1: Basic Usage #### Example 1: Basic Usage
@ -52,7 +63,7 @@ from swarms.agents.consistency_agent import SelfConsistencyAgent
# Initialize the agent # Initialize the agent
agent = SelfConsistencyAgent( agent = SelfConsistencyAgent(
agent_name="Reasoning-Agent", name="Math-Reasoning-Agent",
model_name="gpt-4o-mini", model_name="gpt-4o-mini",
max_loops=1, max_loops=1,
num_samples=5 num_samples=5
@ -75,7 +86,7 @@ from swarms.agents.consistency_agent import SelfConsistencyAgent
# Initialize the agent with a custom majority voting prompt # Initialize the agent with a custom majority voting prompt
agent = SelfConsistencyAgent( agent = SelfConsistencyAgent(
agent_name="Reasoning-Agent", name="Reasoning-Agent",
model_name="gpt-4o-mini", model_name="gpt-4o-mini",
max_loops=1, max_loops=1,
num_samples=5, num_samples=5,
@ -92,4 +103,128 @@ final_answer = agent.run(task)
print("Final aggregated answer:", final_answer) print("Final aggregated answer:", final_answer)
``` ```
#### Example 3: Evaluation Mode
```python
from swarms.agents.consistency_agent import SelfConsistencyAgent
# Initialize the agent with evaluation mode
agent = SelfConsistencyAgent(
name="Validation-Agent",
model_name="gpt-4o-mini",
num_samples=3,
eval=True
)
# Run with expected answer for validation
result = agent.run("What is 2 + 2?", answer="4", eval=True)
if result is not None:
print("Validation passed:", result)
else:
print("Validation failed - expected answer not found")
```
#### Example 4: Random Models for Diversity
```python
from swarms.agents.consistency_agent import SelfConsistencyAgent
# Initialize the agent with random model selection
agent = SelfConsistencyAgent(
name="Diverse-Reasoning-Agent",
model_name="gpt-4o-mini",
num_samples=5,
random_models_on=True
)
# Run the agent
result = agent.run("What are the benefits of renewable energy?")
print("Diverse reasoning result:", result)
```
#### Example 5: Batch Processing
```python
from swarms.agents.consistency_agent import SelfConsistencyAgent
# Initialize the agent
agent = SelfConsistencyAgent(
name="Batch-Processing-Agent",
model_name="gpt-4o-mini",
num_samples=3
)
# Define multiple tasks
tasks = [
"What is the capital of France?",
"What is 15 * 23?",
"Explain photosynthesis in simple terms."
]
# Process all tasks
results = agent.batched_run(tasks)
# Print results
for i, result in enumerate(results):
print(f"Task {i+1} result: {result}")
```
## Key Features
### Self-Consistency Technique
The agent implements the self-consistency approach based on the research paper "Self-Consistency Improves Chain of Thought Reasoning in Language Models" by Wang et al. (2022). This technique:
1. **Generates Multiple Independent Responses**: Creates several reasoning paths for the same problem
2. **Analyzes Consistency**: Examines agreement among different reasoning approaches
3. **Aggregates Results**: Uses majority voting or consensus building
4. **Produces Reliable Output**: Delivers a final answer reflecting the most reliable consensus
### Benefits
- **Mitigates Random Errors**: Multiple reasoning paths reduce individual path errors
- **Reduces Bias**: Diverse approaches minimize single-method biases
- **Improves Reliability**: Consensus-based results are more trustworthy
- **Handles Complexity**: Better performance on complex problem-solving tasks
### Use Cases
- **Mathematical Problem Solving**: Where accuracy is critical
- **Decision Making**: When reliability is paramount
- **Validation Tasks**: When answers need verification
- **Complex Reasoning**: Multi-step problem solving
- **Research Questions**: Where multiple perspectives are valuable
## Technical Details
### Concurrent Execution
The agent uses `ThreadPoolExecutor` to generate multiple responses concurrently, improving performance while maintaining independence between reasoning paths.
### Aggregation Process
The aggregation uses an AI-powered agent that:
- Identifies dominant responses
- Analyzes disparities and disagreements
- Evaluates consensus strength
- Synthesizes minority insights
- Provides comprehensive recommendations
### Output Formats
The agent supports various output types:
- `"dict"`: Dictionary format with conversation history
- `"str"`: Simple string output
- `"list"`: List format
- `"json"`: JSON formatted output
## Limitations
1. **Computational Cost**: Higher `num_samples` increases processing time and cost
2. **Model Dependencies**: Performance depends on the underlying model capabilities
3. **Consensus Challenges**: May struggle with tasks where multiple valid approaches exist
4. **Memory Usage**: Concurrent execution requires more memory resources
## Best Practices
1. **Sample Size**: Use 3-7 samples for most tasks; increase for critical decisions
2. **Model Selection**: Choose models with strong reasoning capabilities
3. **Evaluation Mode**: Enable for tasks with known correct answers
4. **Custom Prompts**: Tailor majority voting prompts for specific domains
5. **Batch Processing**: Use `batched_run` for multiple related tasks
--- ---

@ -38,9 +38,13 @@ graph TD
| `max_loops` | int | 1 | Maximum number of reasoning loops | | `max_loops` | int | 1 | Maximum number of reasoning loops |
| `swarm_type` | agent_types | "reasoning_duo" | Type of reasoning swarm to use | | `swarm_type` | agent_types | "reasoning_duo" | Type of reasoning swarm to use |
| `num_samples` | int | 1 | Number of samples for self-consistency | | `num_samples` | int | 1 | Number of samples for self-consistency |
| `output_type` | OutputType | "dict" | Format of the output | | `output_type` | OutputType | "dict-all-except-first" | Format of the output |
| `num_knowledge_items` | int | 6 | Number of knowledge items for GKP agent | | `num_knowledge_items` | int | 6 | Number of knowledge items for GKP agent |
| `memory_capacity` | int | 6 | Memory capacity for agents that support it | | `memory_capacity` | int | 6 | Memory capacity for agents that support it |
| `eval` | bool | False | Enable evaluation mode for self-consistency |
| `random_models_on` | bool | False | Enable random model selection for diversity |
| `majority_voting_prompt` | Optional[str] | None | Custom prompt for majority voting |
| `reasoning_model_name` | Optional[str] | "claude-3-5-sonnet-20240620" | Model to use for reasoning in ReasoningDuo |
### Available Agent Types ### Available Agent Types
@ -71,12 +75,15 @@ graph TD
**Required Parameters** **Required Parameters**
- model_name (list of 2) - model_name
- system_prompt - system_prompt
**Optional Parameters** **Optional Parameters**
- output_type - output_type
- reasoning_model_name (default: "claude-3-5-sonnet-20240620")
- max_loops
- img (for image input support)
=== "Self Consistency" === "Self Consistency"
**Key Features** **Key Features**
@ -84,12 +91,16 @@ graph TD
- Multiple solution generation - Multiple solution generation
- Consensus building - Consensus building
- Solution verification - Solution verification
- Concurrent execution
- AI-powered aggregation
**Best Use Cases** **Best Use Cases**
- Tasks requiring high reliability - Tasks requiring high reliability
- Problems with multiple approaches - Problems with multiple approaches
- Validation-heavy tasks - Validation-heavy tasks
- Mathematical problem solving
- Decision making scenarios
**Required Parameters** **Required Parameters**
@ -98,9 +109,12 @@ graph TD
**Optional Parameters** **Optional Parameters**
- num_samples - num_samples (default: 5)
- max_loops - max_loops (default: 1)
- output_type - output_type (default: "dict")
- eval (default: False) - Enable answer validation
- random_models_on (default: False) - Enable model diversity
- majority_voting_prompt (default: None) - Custom aggregation prompt
=== "IRE" === "IRE"
**Key Features** **Key Features**
@ -200,8 +214,39 @@ graph TD
| Method | Description | | Method | Description |
|--------|-------------| |--------|-------------|
| `select_swarm()` | Selects and initializes the appropriate reasoning swarm based on specified type | | `select_swarm()` | Selects and initializes the appropriate reasoning swarm based on specified type |
| `run(task: str)` | Executes the selected swarm's reasoning process on the given task | | `run(task: str, img: Optional[str] = None, **kwargs)` | Executes the selected swarm's reasoning process on the given task |
| `batched_run(tasks: List[str])` | Executes the reasoning process on a batch of tasks | | `batched_run(tasks: List[str], imgs: Optional[List[str]] = None, **kwargs)` | Executes the reasoning process on a batch of tasks |
### Image Support
!!! info "Multi-modal Capabilities"
The ReasoningAgentRouter supports image inputs for compatible agent types:
**Supported Parameters:**
- `img` (str, optional): Path or URL to a single image file for single task execution
- `imgs` (List[str], optional): List of image paths/URLs for batch task execution
**Compatible Agent Types:**
- `reasoning-duo` / `reasoning-agent`: Full image support for both reasoning and execution phases
- Other agent types may have varying levels of image support depending on their underlying implementation
**Usage Example:**
```python
# Single image with task
router = ReasoningAgentRouter(swarm_type="reasoning-duo")
result = router.run(
task="Describe what you see in this image",
img="path/to/image.jpg"
)
# Batch processing with images
results = router.batched_run(
tasks=["Analyze this chart", "Describe this photo"],
imgs=["chart.png", "photo.jpg"]
)
```
### Code Examples ### Code Examples
@ -217,12 +262,47 @@ graph TD
system_prompt="You are a helpful assistant that can answer questions and help with tasks.", system_prompt="You are a helpful assistant that can answer questions and help with tasks.",
max_loops=1, max_loops=1,
swarm_type="self-consistency", swarm_type="self-consistency",
num_samples=1, num_samples=3,
output_type="list" eval=False,
random_models_on=False,
majority_voting_prompt=None
) )
# Run a single task # Run a single task
result = router.run("What is the best approach to solve this problem?") result = router.run("What is the best approach to solve this problem?")
# Run with image input
result_with_image = router.run(
"Analyze this image and provide insights",
img="path/to/image.jpg"
)
```
=== "Self-Consistency Examples"
```python
# Basic self-consistency
router = ReasoningAgentRouter(
swarm_type="self-consistency",
num_samples=3,
model_name="gpt-4o-mini"
)
# Self-consistency with evaluation mode
router = ReasoningAgentRouter(
swarm_type="self-consistency",
num_samples=5,
model_name="gpt-4o-mini",
eval=True,
random_models_on=True
)
# Self-consistency with custom majority voting
router = ReasoningAgentRouter(
swarm_type="self-consistency",
num_samples=3,
model_name="gpt-4o-mini",
majority_voting_prompt="Analyze the responses and provide the most accurate answer."
)
``` ```
=== "ReflexionAgent" === "ReflexionAgent"
@ -243,6 +323,29 @@ graph TD
) )
``` ```
=== "ReasoningDuo Examples"
```python
# Basic ReasoningDuo
router = ReasoningAgentRouter(
swarm_type="reasoning-duo",
model_name="gpt-4o-mini",
reasoning_model_name="claude-3-5-sonnet-20240620"
)
# ReasoningDuo with image support
router = ReasoningAgentRouter(
swarm_type="reasoning-duo",
model_name="gpt-4o-mini",
reasoning_model_name="gpt-4-vision-preview",
max_loops=2
)
result = router.run(
"Analyze this image and explain the patterns you see",
img="data_visualization.png"
)
```
=== "AgentJudge" === "AgentJudge"
```python ```python
router = ReasoningAgentRouter( router = ReasoningAgentRouter(
@ -265,10 +368,14 @@ graph TD
2. **Performance Optimization** 2. **Performance Optimization**
- Adjust max_loops based on task complexity - Adjust max_loops based on task complexity
- Increase num_samples for higher reliability - Increase num_samples for higher reliability (3-7 for most tasks)
- Choose appropriate model_name based on task requirements - Choose appropriate model_name based on task requirements
- Enable random_models_on for diverse reasoning approaches
- Use eval mode for validation tasks with known answers
3. **Output Handling** 3. **Output Handling**
- Use appropriate output_type for your needs - Use appropriate output_type for your needs
@ -276,6 +383,24 @@ graph TD
- Handle errors gracefully - Handle errors gracefully
4. **Self-Consistency Specific**
- Use 3-5 samples for most tasks, 7+ for critical decisions
- Enable eval mode when you have expected answers for validation
- Customize majority_voting_prompt for domain-specific aggregation
- Consider random_models_on for diverse model perspectives
5. **Multi-modal and Reasoning Configuration**
- Use vision-capable models when processing images (e.g., "gpt-4-vision-preview")
- For ReasoningDuo, set different models for reasoning vs execution via reasoning_model_name
- Ensure image paths are accessible and in supported formats (JPG, PNG, etc.)
- Consider using reasoning_model_name with specialized reasoning models for complex tasks
## Limitations ## Limitations
!!! warning "Known Limitations" !!! warning "Known Limitations"

@ -0,0 +1,426 @@
# Reasoning Agents Overview
Reasoning agents are sophisticated agents that employ advanced cognitive strategies to improve problem-solving performance beyond standard language model capabilities. Unlike traditional prompt-based approaches, reasoning agents implement structured methodologies that enable them to think more systematically, self-reflect, collaborate, and iteratively refine their responses.
These agents are inspired by cognitive science and human reasoning processes, incorporating techniques such as:
- **Multi-step reasoning**: Breaking down complex problems into manageable components
- **Self-reflection**: Evaluating and critiquing their own outputs
- **Iterative refinement**: Progressively improving solutions through multiple iterations
- **Collaborative thinking**: Using multiple reasoning pathways or agent perspectives
- **Memory integration**: Learning from past experiences and building knowledge over time
- **Meta-cognitive awareness**: Understanding their own thinking processes and limitations
## Available Reasoning Agents
| Agent Name | Type | Research Paper | Key Features | Best Use Cases | Implementation | Documentation |
|------------|------|----------------|---------------|----------------|----------------|---------------|
| **Self-Consistency Agent** | Consensus-based | [Self-Consistency Improves Chain of Thought Reasoning](https://arxiv.org/abs/2203.07870) (Wang et al., 2022) | • Multiple independent reasoning paths<br/>• Majority voting aggregation<br/>• Concurrent execution<br/>• Validation mode | • Mathematical problem solving<br/>• High-accuracy requirements<br/>• Decision making scenarios<br/>• Answer validation | `SelfConsistencyAgent` | [Guide](consistency_agent.md) |
| **Reasoning Duo** | Collaborative | Novel dual-agent architecture | • Separate reasoning and execution agents<br/>• Collaborative problem solving<br/>• Task decomposition<br/>• Cross-validation | • Complex analysis tasks<br/>• Multi-step problem solving<br/>• Tasks requiring verification<br/>• Research and planning | `ReasoningDuo` | [Guide](reasoning_duo.md) |
| **IRE Agent** | Iterative | Iterative Reflective Expansion framework | • Hypothesis generation<br/>• Path simulation<br/>• Error reflection<br/>• Dynamic revision | • Complex reasoning tasks<br/>• Research problems<br/>• Learning scenarios<br/>• Strategy development | `IterativeReflectiveExpansion` | [Guide](iterative_agent.md) |
| **Reflexion Agent** | Self-reflective | [Reflexion: Language Agents with Verbal Reinforcement Learning](https://arxiv.org/abs/2303.11366) (Shinn et al., 2023) | • Self-evaluation<br/>• Experience memory<br/>• Adaptive improvement<br/>• Learning from failures | • Continuous improvement tasks<br/>• Long-term projects<br/>• Learning scenarios<br/>• Quality refinement | `ReflexionAgent` | [Guide](reflexion_agent.md) |
| **GKP Agent** | Knowledge-based | [Generated Knowledge Prompting](https://arxiv.org/abs/2110.08387) (Liu et al., 2022) | • Knowledge generation<br/>• Multi-perspective reasoning<br/>• Information synthesis<br/>• Fact integration | • Knowledge-intensive tasks<br/>• Research questions<br/>• Fact-based reasoning<br/>• Information synthesis | `GKPAgent` | [Guide](gkp_agent.md) |
| **Agent Judge** | Evaluation | [Agent-as-a-Judge: Evaluate Agents with Agents](https://arxiv.org/abs/2410.10934) | • Quality assessment<br/>• Structured evaluation<br/>• Performance metrics<br/>• Feedback generation | • Quality control<br/>• Output evaluation<br/>• Performance assessment<br/>• Model comparison | `AgentJudge` | [Guide](agent_judge.md) |
| **REACT Agent** | Action-based | [ReAct: Synergizing Reasoning and Acting](https://arxiv.org/abs/2210.03629) (Yao et al., 2022) | • Reason-Act-Observe cycle<br/>• Memory integration<br/>• Action planning<br/>• Experience building | • Interactive tasks<br/>• Tool usage scenarios<br/>• Planning problems<br/>• Learning environments | `ReactAgent` | [Guide](react_agent.md) |
## Agent Architectures
### Self-Consistency Agent
**Description**: Implements multiple independent reasoning paths with consensus-building to improve response reliability and accuracy through majority voting mechanisms.
**Key Features**:
- Concurrent execution of multiple reasoning instances
- AI-powered aggregation and consensus analysis
- Validation mode for answer verification
- Configurable sample sizes and output formats
**Architecture Diagram**:
```mermaid
graph TD
A[Task Input] --> B[Agent Pool]
B --> C[Response 1]
B --> D[Response 2]
B --> E[Response 3]
B --> F[Response N]
C --> G[Aggregation Agent]
D --> G
E --> G
F --> G
G --> H[Majority Voting Analysis]
H --> I[Consensus Evaluation]
I --> J[Final Answer]
style A fill:#e1f5fe
style J fill:#c8e6c9
style G fill:#fff3e0
```
**Use Cases**: Mathematical problem solving, high-stakes decision making, answer validation, quality assurance processes
**Implementation**: `SelfConsistencyAgent`
**Documentation**: [Self-Consistency Agent Guide](consistency_agent.md)
---
### Reasoning Duo
**Description**: Dual-agent collaborative system that separates reasoning and execution phases, enabling specialized analysis and task completion through coordinated agent interaction.
**Key Features**:
- Separate reasoning and execution agents
- Collaborative problem decomposition
- Cross-validation between agents
- Configurable model selection for each agent
**Architecture Diagram**:
```mermaid
graph TD
A[Task Input] --> B[Reasoning Agent]
B --> C[Deep Analysis]
C --> D[Strategy Planning]
D --> E[Reasoning Output]
E --> F[Main Agent]
F --> G[Task Execution]
G --> H[Response Generation]
H --> I[Final Output]
style A fill:#e1f5fe
style B fill:#f3e5f5
style F fill:#e8f5e8
style I fill:#c8e6c9
```
**Use Cases**: Complex analysis tasks, multi-step problem solving, research and planning, verification workflows
**Implementation**: `ReasoningDuo`
**Documentation**: [Reasoning Duo Guide](reasoning_duo.md)
---
### IRE Agent (Iterative Reflective Expansion)
**Description**: Sophisticated reasoning framework employing iterative hypothesis generation, simulation, and refinement through continuous cycles of testing and meta-cognitive reflection.
**Key Features**:
- Hypothesis generation and testing
- Path simulation and evaluation
- Meta-cognitive reflection capabilities
- Dynamic strategy revision based on feedback
**Architecture Diagram**:
```mermaid
graph TD
A[Problem Input] --> B[Hypothesis Generation]
B --> C[Path Simulation]
C --> D[Outcome Evaluation]
D --> E{Satisfactory?}
E -->|No| F[Meta-Cognitive Reflection]
F --> G[Path Revision]
G --> H[Knowledge Integration]
H --> C
E -->|Yes| I[Solution Synthesis]
I --> J[Final Answer]
style A fill:#e1f5fe
style F fill:#fff3e0
style J fill:#c8e6c9
```
**Use Cases**: Complex reasoning tasks, research problems, strategy development, iterative learning scenarios
**Implementation**: `IterativeReflectiveExpansion`
**Documentation**: [IRE Agent Guide](iterative_agent.md)
---
### Reflexion Agent
**Description**: Advanced self-reflective system implementing actor-evaluator-reflector architecture for continuous improvement through experience-based learning and memory integration.
**Key Features**:
- Actor-evaluator-reflector sub-agent architecture
- Self-evaluation and quality assessment
- Experience memory and learning capabilities
- Adaptive improvement through reflection
**Architecture Diagram**:
```mermaid
graph TD
A[Task Input] --> B[Actor Agent]
B --> C[Initial Response]
C --> D[Evaluator Agent]
D --> E[Quality Assessment]
E --> F[Performance Score]
F --> G[Reflector Agent]
G --> H[Self-Reflection]
H --> I[Experience Memory]
I --> J{Max Iterations?}
J -->|No| K[Refined Response]
K --> D
J -->|Yes| L[Final Response]
style A fill:#e1f5fe
style B fill:#e8f5e8
style D fill:#fff3e0
style G fill:#f3e5f5
style L fill:#c8e6c9
```
**Use Cases**: Continuous improvement tasks, long-term projects, adaptive learning, quality refinement processes
**Implementation**: `ReflexionAgent`
**Documentation**: [Reflexion Agent Guide](reflexion_agent.md)
---
### GKP Agent (Generated Knowledge Prompting)
**Description**: Knowledge-driven reasoning system that generates relevant information before answering queries, implementing multi-perspective analysis through coordinated knowledge synthesis.
**Key Features**:
- Dynamic knowledge generation
- Multi-perspective reasoning coordination
- Information synthesis and integration
- Configurable knowledge item generation
**Architecture Diagram**:
```mermaid
graph TD
A[Query Input] --> B[Knowledge Generator]
B --> C[Generate Knowledge Item 1]
B --> D[Generate Knowledge Item 2]
B --> E[Generate Knowledge Item N]
C --> F[Reasoner Agent]
D --> F
E --> F
F --> G[Knowledge Integration]
G --> H[Reasoning Process]
H --> I[Response Generation]
I --> J[Coordinator]
J --> K[Final Answer]
style A fill:#e1f5fe
style B fill:#fff3e0
style F fill:#e8f5e8
style J fill:#f3e5f5
style K fill:#c8e6c9
```
**Use Cases**: Knowledge-intensive tasks, research questions, fact-based reasoning, information synthesis
**Implementation**: `GKPAgent`
**Documentation**: [GKP Agent Guide](gkp_agent.md)
---
### Agent Judge
**Description**: Specialized evaluation system for assessing agent outputs and system performance, providing structured feedback and quality metrics through comprehensive assessment frameworks.
**Key Features**:
- Structured evaluation methodology
- Quality assessment and scoring
- Performance metrics generation
- Configurable evaluation criteria
**Architecture Diagram**:
```mermaid
graph TD
A[Output to Evaluate] --> B[Evaluation Criteria]
A --> C[Judge Agent]
B --> C
C --> D[Quality Analysis]
D --> E[Criteria Assessment]
E --> F[Scoring Framework]
F --> G[Feedback Generation]
G --> H[Evaluation Report]
style A fill:#e1f5fe
style C fill:#fff3e0
style H fill:#c8e6c9
```
**Use Cases**: Quality control, output evaluation, performance assessment, model comparison
**Implementation**: `AgentJudge`
**Documentation**: [Agent Judge Guide](agent_judge.md)
---
### REACT Agent (Reason-Act-Observe)
**Description**: Action-oriented reasoning system implementing iterative reason-act-observe cycles with memory integration for interactive task completion and environmental adaptation.
**Key Features**:
- Reason-Act-Observe cycle implementation
- Memory integration and experience building
- Action planning and execution
- Environmental state observation
**Architecture Diagram**:
```mermaid
graph TD
A[Task Input] --> B[Memory Review]
B --> C[Current State Observation]
C --> D[Reasoning Process]
D --> E[Action Planning]
E --> F[Action Execution]
F --> G[Outcome Observation]
G --> H[Experience Storage]
H --> I{Task Complete?}
I -->|No| C
I -->|Yes| J[Final Response]
style A fill:#e1f5fe
style B fill:#f3e5f5
style D fill:#fff3e0
style J fill:#c8e6c9
```
**Use Cases**: Interactive tasks, tool usage scenarios, planning problems, learning environments
**Implementation**: `ReactAgent`
**Documentation**: [REACT Agent Guide](react_agent.md)
## Implementation Guide
### Unified Interface via Reasoning Agent Router
The `ReasoningAgentRouter` provides a centralized interface for accessing all reasoning agent implementations:
```python
from swarms.agents import ReasoningAgentRouter
# Initialize router with specific reasoning strategy
router = ReasoningAgentRouter(
swarm_type="self-consistency", # Select reasoning methodology
model_name="gpt-4o-mini",
num_samples=5, # Configuration for consensus-based methods
max_loops=3 # Configuration for iterative methods
)
# Execute reasoning process
result = router.run("Analyze the optimal solution for this complex business problem")
print(result)
```
### Direct Agent Implementation
```python
from swarms.agents import SelfConsistencyAgent, ReasoningDuo, ReflexionAgent
# Self-Consistency Agent for high-accuracy requirements
consistency_agent = SelfConsistencyAgent(
model_name="gpt-4o-mini",
num_samples=5
)
# Reasoning Duo for collaborative analysis workflows
duo_agent = ReasoningDuo(
model_names=["gpt-4o-mini", "gpt-4o"]
)
# Reflexion Agent for adaptive learning scenarios
reflexion_agent = ReflexionAgent(
model_name="gpt-4o-mini",
max_loops=3,
memory_capacity=100
)
```
## Choosing the Right Reasoning Agent
| Scenario | Recommended Agent | Why? |
|----------|------------------|-------|
| **High-stakes decisions** | Self-Consistency | Multiple validation paths ensure reliability |
| **Complex research tasks** | Reasoning Duo + GKP | Collaboration + knowledge synthesis |
| **Learning & improvement** | Reflexion | Built-in self-improvement mechanisms |
| **Mathematical problems** | Self-Consistency | Proven effectiveness on logical reasoning |
| **Quality assessment** | Agent Judge | Specialized evaluation capabilities |
| **Interactive planning** | REACT | Action-oriented reasoning cycle |
| **Iterative refinement** | IRE | Designed for progressive improvement |
## Technical Documentation
For comprehensive technical documentation on each reasoning agent implementation:
- [Self-Consistency Agent](consistency_agent.md)
- [Reasoning Duo](reasoning_duo.md)
- [IRE Agent](iterative_agent.md)
- [Reflexion Agent](reflexion_agent.md)
- [GKP Agent](gkp_agent.md)
- [Agent Judge](agent_judge.md)
- [Reasoning Agent Router](reasoning_agent_router.md)
---
Reasoning agents represent a significant advancement in enterprise agent capabilities, implementing sophisticated cognitive architectures that deliver enhanced reliability, consistency, and performance compared to traditional language model implementations.

File diff suppressed because it is too large Load Diff

@ -1,75 +1,149 @@
# Swarm Ecosystem # Swarms Ecosystem
Welcome to the Swarm Ecosystem, a comprehensive suite of tools and frameworks designed to empower developers to orhestrate swarms of autonomous agents for a variety of applications. Dive into our ecosystem below: *The Complete Enterprise-Grade Multi-Agent AI Platform*
[Full Github Link](https://github.com/kyegomez/swarm-ecosystem) ---
## **Join the Future of AI Development**
**We're Building the Operating System for the Agent Economy** - The Swarms ecosystem represents the most comprehensive, production-ready multi-agent AI platform available today. From our flagship Python framework to high-performance Rust implementations and client libraries spanning every major programming language, we provide enterprise-grade tools that power the next generation of intelligent applications.
---
## **Complete Product Portfolio**
| **Product** | **Technology** | **Status** | **Repository** | **Documentation** |
|-------------|---------------|------------|----------------|-------------------|
| **Swarms Python Framework** | Python | **Production** | [swarms](https://github.com/kyegomez/swarms) | [Docs](https://docs.swarms.world/en/latest/swarms/install/install/) |
| **Swarms Rust Framework** | Rust | **Production** | [swarms-rs](https://github.com/The-Swarm-Corporation/swarms-rs) | [Docs](https://docs.swarms.world/en/latest/swarms_rs/overview/) |
| **Python API Client** | Python | **Production** | [swarms-sdk](https://github.com/The-Swarm-Corporation/swarms-sdk) | [Docs](https://docs.swarms.world/en/latest/swarms_cloud/python_client/) |
| **TypeScript/Node.js Client** | TypeScript | **Production** | [swarms-ts](https://github.com/The-Swarm-Corporation/swarms-ts) | *Coming Soon* |
| **Go Client** | Go | **Production** | [swarms-client-go](https://github.com/The-Swarm-Corporation/swarms-client-go) | *Coming Soon* |
| **Java Client** | Java | **Production** | [swarms-java](https://github.com/The-Swarm-Corporation/swarms-java) | *Coming Soon* |
| **Kotlin Client** | Kotlin | **Q2 2025** | *In Development* | *Coming Soon* |
| **Ruby Client** | Ruby | **Q2 2025** | *In Development* | *Coming Soon* |
| **Rust Client** | Rust | **Q2 2025** | *In Development* | *Coming Soon* |
| **C#/.NET Client** | C# | **Q3 2025** | *In Development* | *Coming Soon* |
---
## **Why Choose the Swarms Ecosystem?**
### **Enterprise-Grade Architecture**
- **Production Ready**: Battle-tested in enterprise environments with 99.9%+ uptime
- **Scalable Infrastructure**: Handle millions of agent interactions with automatic scaling
- **Security First**: End-to-end encryption, API key management, and enterprise compliance
- **Observability**: Comprehensive logging, monitoring, and debugging capabilities
### **Developer Experience**
- **Multiple Language Support**: Native clients for every major programming language
## Getting Started - **Unified API**: Consistent interface across all platforms and languages
| Project | Description | Link | - **Rich Documentation**: Comprehensive guides, tutorials, and API references
| ------- | ----------- | ---- |
| **Swarms Framework** | A Python-based framework that enables the creation, deployment, and scaling of reliable swarms of autonomous agents aimed at automating complex workflows. | [Swarms Framework](https://github.com/kyegomez/swarms) |
| **Swarms Cloud** | A cloud-based service offering Swarms-as-a-Service with guaranteed 100% uptime, cutting-edge performance, and enterprise-grade reliability for seamless scaling and management of swarms. | [Swarms Cloud](https://github.com/kyegomez/swarms-cloud) |
| **Swarms Core** | Provides backend utilities focusing on concurrency, multi-threading, and advanced execution strategies, developed in Rust for maximum efficiency and performance. | [Swarms Core](https://github.com/kyegomez/swarms-core) |
| **Swarm Foundation Models** | A dedicated repository for the creation, optimization, and training of groundbreaking swarming models. Features innovative models like PSO with transformers, ant colony optimizations, and more, aiming to surpass traditional architectures like Transformers and SSMs. Open for community contributions and ideas. | [Swarm Foundation Models](https://github.com/kyegomez/swarms-pytorch) |
| **Swarm Platform** | The Swarms dashboard Platform | [Swarm Platform](https://github.com/kyegomez/swarms-platform) |
| **Swarms JS** | Swarms Framework in JS. Orchestrate any agents and enable multi-agent collaboration between various agents! | [Swarm JS](https://github.com/kyegomez/swarms-js) |
| **Swarms Memory** | Easy to use, reliable, and bleeding-edge RAG systems.! | [Swarm JS](https://github.com/kyegomez/swarms-memory) |
| **Swarms Evals** | Evaluating Swarms! | [Swarm JS](https://github.com/kyegomez/swarms-evals) |
| **Swarms Zero** | RPC Enterprise-Grade Automation Framework | [Swarm Zero]([https://github.com/kyegomez/swarms-evals](https://github.com/kyegomez/Zero)) |
---- - **Active Community**: 24/7 support through Discord, GitHub, and direct channels
## 🫶 Contributions: ### **Performance & Reliability**
The easiest way to contribute is to pick any issue with the `good first issue` tag 💪. Read the Contributing guidelines [here](/CONTRIBUTING.md). Bug Report? [File here](https://github.com/swarms/gateway/issues) | Feature Request? [File here](https://github.com/swarms/gateway/issues) - **High Throughput**: Process thousands of concurrent agent requests
Swarms is an open-source project, and contributions are VERY welcome. If you want to contribute, you can create new features, fix bugs, or improve the infrastructure. Please refer to the [CONTRIBUTING.md](https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md) and our [contributing board](https://github.com/users/kyegomez/projects/1) to participate in Roadmap discussions! - **Low Latency**: Optimized for real-time applications and user experiences
<a href="https://github.com/kyegomez/swarms/graphs/contributors"> - **Fault Tolerance**: Automatic retries, circuit breakers, and graceful degradation
<img src="https://contrib.rocks/image?repo=kyegomez/swarms" />
</a>
<a href="https://github.com/kyegomez/swarms/graphs/contributors"> - **Multi-Cloud**: Deploy on AWS, GCP, Azure, or on-premises infrastructure
<img src="https://contrib.rocks/image?repo=kyegomez/swarms-cloud" />
</a>
<a href="https://github.com/kyegomez/swarms/graphs/contributors"> ---
<img src="https://contrib.rocks/image?repo=kyegomez/swarms-platform" />
</a> ## **Join Our Growing Community**
### **Connect With Developers Worldwide**
| **Platform** | **Purpose** | **Join Link** | **Benefits** |
|--------------|-------------|---------------|--------------|
| **Discord Community** | Real-time support & discussions | [Join Discord](https://discord.gg/jM3Z6M9uMq) | • 24/7 developer support<br/>• Weekly community events<br/>• Direct access to core team<br/>• Beta feature previews |
| **Twitter/X** | Latest updates & announcements | [Follow @swarms_corp](https://x.com/swarms_corp) | • Breaking news & updates<br/>• Community highlights<br/>• Technical insights<br/>• Industry partnerships |
| **LinkedIn** | Professional network & updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | • Professional networking<br/>• Career opportunities<br/>• Enterprise partnerships<br/>• Industry insights |
| **YouTube** | Tutorials & technical content | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | • In-depth tutorials<br/>• Live coding sessions<br/>• Architecture deep dives<br/>• Community showcases |
---
## **Contribute to the Ecosystem**
### **How You Can Make an Impact**
<a href="https://github.com/kyegomez/swarms/graphs/contributors"> | **Contribution Area** | **Skills Needed** | **Impact Level** | **Getting Started** |
<img src="https://contrib.rocks/image?repo=kyegomez/swarms-js" /> |-----------------------|-------------------|------------------|---------------------|
</a> | **Core Framework Development** | Python, Rust, Systems Design | **High Impact** | [Contributing Guide](https://docs.swarms.world/en/latest/contributors/main/) |
| **Client Library Development** | Various Languages (Go, Java, TS, etc.) | **High Impact** | [Client Development](https://github.com/The-Swarm-Corporation) |
| **Documentation & Tutorials** | Technical Writing, Examples | **High Impact** | [Docs Contributing](https://docs.swarms.world/en/latest/contributors/docs/) |
| **Testing & Quality Assurance** | Testing Frameworks, QA | **Medium Impact** | [Testing Guide](https://docs.swarms.world/en/latest/swarms/framework/test/) |
| **UI/UX & Design** | Design, Frontend Development | **Medium Impact** | [Design Contributions](https://github.com/The-Swarm-Corporation/swarms/issues) |
| **Bug Reports & Feature Requests** | User Experience, Testing | **Easy Start** | [Report Issues](https://github.com/The-Swarm-Corporation/swarms/issues) |
---
## **We're Hiring Top Talent**
### **Join the Team Building the Future Of The World Economy**
**Ready to work on cutting-edge agent technology that's shaping the future?** We're actively recruiting exceptional engineers, researchers, and technical leaders to join our mission of building the operating system for the agent economy.
---- | **Why Join Swarms?** | **What We Offer** |
|-----------------------|-------------------|
| **Cutting-Edge Technology** | Work on the most powerful multi-agent systems, distributed computing, and enterprise-scale infrastructure |
| **Global Impact** | Your code will power agent applications used by Fortune 500 companies and millions of developers |
| **World-Class Team** | Collaborate with top engineers, researchers, and industry experts from Google, OpenAI, and more |
| **Fast Growth** | Join a rapidly scaling company with massive market opportunity and venture backing |
## Community ### **Open Positions**
Join our growing community around the world, for real-time support, ideas, and discussions on Swarms 😊 | **Position** | **Role Description** |
|-------------------------------|----------------------------------------------------------|
| **Senior Rust Engineers** | Building high-performance agent infrastructure |
| **Python Framework Engineers**| Expanding our core multi-agent capabilities |
| **DevOps/Platform Engineers** | Scaling cloud infrastructure for millions of agents |
| **Technical Writers** | Creating world-class developer documentation |
| **Solutions Engineers** | Helping enterprises adopt multi-agent AI |
- View our official [Blog](https://docs.swarms.world) **Ready to Build the Future?** **[Apply Now at swarms.ai/hiring](https://swarms.ai/hiring)**
- Chat live with us on [Discord](https://discord.gg/kS3rwKs3ZC)
- Follow us on [Twitter](https://twitter.com/kyegomez) ---
- Connect with us on [LinkedIn](https://www.linkedin.com/company/the-swarm-corporation)
- Visit us on [YouTube](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ)
- [Join the Swarms community on Discord!](https://discord.gg/AJazBmhKnr)
- Join our Swarms Community Gathering every Thursday at 1pm NYC Time to unlock the potential of autonomous agents in automating your daily tasks [Sign up here](https://lu.ma/5p2jnc2v)
--- ---
## Discovery Call ## **Get Started Today**
Book a discovery call to learn how Swarms can lower your operating costs by 40% with swarms of autonomous agents in lightspeed. [Click here to book a time that works for you!](https://calendly.com/swarm-corp/30min?month=2023-11)
### **Quick Start Guide**
| **Step** | **Action** | **Time Required** |
|----------|------------|-------------------|
| **1** | [Install Swarms Python Framework](https://docs.swarms.world/en/latest/swarms/install/install/) | 5 minutes |
| **2** | [Run Your First Agent](https://docs.swarms.world/en/latest/swarms/examples/basic_agent/) | 10 minutes |
| **3** | [Try Multi-Agent Workflows](https://docs.swarms.world/en/latest/swarms/examples/sequential_example/) | 15 minutes |
| **4** | [Join Our Discord Community](https://discord.gg/jM3Z6M9uMq) | 2 minutes |
| **5** | [Explore Enterprise Features](https://docs.swarms.world/en/latest/swarms_cloud/swarms_api/) | 20 minutes |
## Accelerate Backlog ---
Help us accelerate our backlog by supporting us financially! Note, we're an open source corporation and so all the revenue we generate is through donations at the moment ;)
## **Enterprise Support & Partnerships**
<a href="https://polar.sh/kyegomez"><img src="https://polar.sh/embed/fund-our-backlog.svg?org=kyegomez" /></a> ### **Ready to Scale with Swarms?**
| **Contact Type** | **Best For** | **Response Time** | **Contact Information** |
|------------------|--------------|-------------------|-------------------------|
| **Technical Support** | Development questions, troubleshooting | < 24 hours | [Book Support Call](https://cal.com/swarms/swarms-technical-support) |
| **Enterprise Sales** | Custom deployments, enterprise licensing | < 4 hours | [kye@swarms.world](mailto:kye@swarms.world) |
| **Partnerships** | Integration partnerships, technology alliances | < 48 hours | [kye@swarms.world](mailto:kye@swarms.world) |
| **Investor Relations** | Investment opportunities, funding updates | By appointment | [kye@swarms.world](mailto:kye@swarms.world) |
--- ---
**Ready to build the future of AI? Start with Swarms today and join thousands of developers creating the next generation of intelligent applications.**

@ -0,0 +1,205 @@
# Agent Multi-Agent Communication Methods
The Agent class provides powerful built-in methods for facilitating communication and collaboration between multiple agents. These methods enable agents to talk to each other, pass information, and coordinate complex multi-agent workflows seamlessly.
## Overview
Multi-agent communication is essential for building sophisticated AI systems where different agents need to collaborate, share information, and coordinate their actions. The Agent class provides several methods to facilitate this communication:
| Method | Purpose | Use Case |
|--------|---------|----------|
| `talk_to` | Direct communication between two agents | Agent handoffs, expert consultation |
| `talk_to_multiple_agents` | Concurrent communication with multiple agents | Broadcasting, consensus building |
| `receive_message` | Process incoming messages from other agents | Message handling, task delegation |
| `send_agent_message` | Send formatted messages to other agents | Direct messaging, notifications |
## Features
| Feature | Description |
|---------------------------------|--------------------------------------------------------------------|
| **Direct Agent Communication** | Enable one-to-one conversations between agents |
| **Concurrent Multi-Agent Communication** | Broadcast messages to multiple agents simultaneously |
| **Message Processing** | Handle incoming messages with contextual formatting |
| **Error Handling** | Robust error handling for failed communications |
| **Threading Support** | Efficient concurrent processing using ThreadPoolExecutor |
| **Flexible Parameters** | Support for images, custom arguments, and kwargs |
---
## Core Methods
### `talk_to(agent, task, img=None, *args, **kwargs)`
Enables direct communication between the current agent and another agent. The method processes the task, generates a response, and then passes that response to the target agent.
**Parameters:**
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `agent` | `Any` | Required | The target agent instance to communicate with |
| `task` | `str` | Required | The task or message to send to the agent |
| `img` | `str` | `None` | Optional image path for multimodal communication |
| `*args` | `Any` | - | Additional positional arguments |
| `**kwargs` | `Any` | - | Additional keyword arguments |
**Returns:** `Any` - The response from the target agent
**Usage Example:**
```python
from swarms import Agent
# Create two specialized agents
researcher = Agent(
agent_name="Research-Agent",
system_prompt="You are a research specialist focused on gathering and analyzing information.",
max_loops=1,
)
analyst = Agent(
agent_name="Analysis-Agent",
system_prompt="You are an analytical specialist focused on interpreting research data.",
max_loops=1,
)
# Agent communication
research_result = researcher.talk_to(
agent=analyst,
task="Analyze the market trends for renewable energy stocks"
)
print(research_result)
```
### `talk_to_multiple_agents(agents, task, *args, **kwargs)`
Enables concurrent communication with multiple agents using ThreadPoolExecutor for efficient parallel processing.
**Parameters:**
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `agents` | `List[Union[Any, Callable]]` | Required | List of agent instances to communicate with |
| `task` | `str` | Required | The task or message to send to all agents |
| `*args` | `Any` | - | Additional positional arguments |
| `**kwargs` | `Any` | - | Additional keyword arguments |
**Returns:** `List[Any]` - List of responses from all agents (or None for failed communications)
**Usage Example:**
```python
from swarms import Agent
# Create multiple specialized agents
agents = [
Agent(
agent_name="Financial-Analyst",
system_prompt="You are a financial analysis expert.",
max_loops=1,
),
Agent(
agent_name="Risk-Assessor",
system_prompt="You are a risk assessment specialist.",
max_loops=1,
),
Agent(
agent_name="Market-Researcher",
system_prompt="You are a market research expert.",
max_loops=1,
)
]
coordinator = Agent(
agent_name="Coordinator-Agent",
system_prompt="You coordinate multi-agent analysis.",
max_loops=1,
)
# Broadcast to multiple agents
responses = coordinator.talk_to_multiple_agents(
agents=agents,
task="Evaluate the investment potential of Tesla stock"
)
# Process responses
for i, response in enumerate(responses):
if response:
print(f"Agent {i+1} Response: {response}")
else:
print(f"Agent {i+1} failed to respond")
```
### `receive_message(agent_name, task, *args, **kwargs)`
Processes incoming messages from other agents with proper context formatting.
**Parameters:**
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `agent_name` | `str` | Required | Name of the sending agent |
| `task` | `str` | Required | The message content |
| `*args` | `Any` | - | Additional positional arguments |
| `**kwargs` | `Any` | - | Additional keyword arguments |
**Returns:** `Any` - The agent's response to the received message
**Usage Example:**
```python
from swarms import Agent
# Create an agent that can receive messages
recipient_agent = Agent(
agent_name="Support-Agent",
system_prompt="You provide helpful support and assistance.",
max_loops=1,
)
# Simulate receiving a message from another agent
response = recipient_agent.receive_message(
agent_name="Customer-Service-Agent",
task="A customer is asking about refund policies. Can you help?"
)
print(response)
```
### `send_agent_message(agent_name, message, *args, **kwargs)`
Sends a formatted message from the current agent to a specified target agent.
**Parameters:**
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `agent_name` | `str` | Required | Name of the target agent |
| `message` | `str` | Required | The message to send |
| `*args` | `Any` | - | Additional positional arguments |
| `**kwargs` | `Any` | - | Additional keyword arguments |
**Returns:** `Any` - The result of sending the message
**Usage Example:**
```python
from swarms import Agent
sender_agent = Agent(
agent_name="Notification-Agent",
system_prompt="You send notifications and updates.",
max_loops=1,
)
# Send a message to another agent
result = sender_agent.send_agent_message(
agent_name="Task-Manager-Agent",
message="Task XYZ has been completed successfully"
)
print(result)
```
This comprehensive guide covers all aspects of multi-agent communication using the Agent class methods. These methods provide the foundation for building sophisticated multi-agent systems with robust communication capabilities.

@ -1,287 +0,0 @@
# BaseWorkflow
The `BaseWorkflow` class serves as a foundational structure for defining and managing workflows. It allows users to add, remove, update, and manage tasks and agents within a workflow, offering flexibility and extensibility for various applications.
### Key Concepts
- **Agents**: Entities participating in the workflow.
- **Tasks**: Units of work to be executed within the workflow.
- **Models**: Computational models used within the workflow.
- **Workflow State**: The state of the workflow, which can be saved and restored.
## Attributes
### Arguments
| Argument | Type | Default | Description |
|----------|------|---------|-------------|
| `agents` | `List[Agent]` | `None` | A list of agents participating in the workflow. |
| `task_pool` | `List[Task]` | `None` | A list of tasks in the workflow. |
| `models` | `List[Any]` | `None` | A list of models used in the workflow. |
| `*args` | | | Variable length argument list. |
| `**kwargs` | | | Arbitrary keyword arguments. |
### Attributes
| Attribute | Type | Description |
|-----------|------|-------------|
| `agents` | `List[Agent]` | A list of agents participating in the workflow. |
| `task_pool` | `List[Task]` | A list of tasks in the workflow. |
| `models` | `List[Any]` | A list of models used in the workflow. |
## Methods
### add_task
Adds a task or a list of tasks to the task pool.
**Arguments:**
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `task` | `Task` | `None` | A single task to add. |
| `tasks` | `List[Task]` | `None` | A list of tasks to add. |
**Raises:**
- `ValueError`: If neither task nor tasks are provided.
**Examples:**
```python
workflow = BaseWorkflow()
task1 = Task(description="Task 1")
task2 = Task(description="Task 2")
# Adding a single task
workflow.add_task(task=task1)
# Adding multiple tasks
workflow.add_task(tasks=[task1, task2])
```
### add_agent
Adds an agent to the workflow.
**Arguments:**
| Parameter | Type | Description |
|-----------|------|-------------|
| `agent` | `Agent` | The agent to add to the workflow. |
**Examples:**
```python
workflow = BaseWorkflow()
agent = Agent(name="Agent 1")
# Adding an agent to the workflow
workflow.add_agent(agent=agent)
```
### run
Abstract method to run the workflow.
### __sequential_loop
Abstract method for the sequential loop.
### __log
Logs a message if verbose mode is enabled.
**Arguments:**
| Parameter | Type | Description |
|-----------|------|-------------|
| `message` | `str` | The message to log. |
### __str__
Returns a string representation of the workflow.
### __repr__
Returns a string representation of the workflow for debugging.
### reset
Resets the workflow by clearing the results of each task.
**Examples:**
```python
workflow = BaseWorkflow()
workflow.reset()
```
### get_task_results
Returns the results of each task in the workflow.
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `Dict[str, Any]` | The results of each task in the workflow. |
**Examples:**
```python
workflow = BaseWorkflow()
results = workflow.get_task_results()
```
### remove_task
Removes a task from the workflow.
**Arguments:**
| Parameter | Type | Description |
|-----------|------|-------------|
| `task` | `str` | The description of the task to remove. |
**Examples:**
```python
workflow = BaseWorkflow()
workflow.remove_task(task="Task 1")
```
### update_task
Updates the arguments of a task in the workflow.
**Arguments:**
| Parameter | Type | Description |
|-----------|------|-------------|
| `task` | `str` | The description of the task to update. |
| `**updates` | | The updates to apply to the task. |
**Raises:**
- `ValueError`: If the task is not found in the workflow.
**Examples:**
```python
workflow = BaseWorkflow()
task = Task(description="Task 1", kwargs={"param": 1})
# Adding a task to the workflow
workflow.add_task(task=task)
# Updating the task
workflow.update_task("Task 1", param=2)
```
### delete_task
Deletes a task from the workflow.
**Arguments:**
| Parameter | Type | Description |
|-----------|------|-------------|
| `task` | `str` | The description of the task to delete. |
**Raises:**
- `ValueError`: If the task is not found in the workflow.
**Examples:**
```python
workflow = BaseWorkflow()
task = Task(description="Task 1")
# Adding a task to the workflow
workflow.add_task(task=task)
# Deleting the task
workflow.delete_task("Task 1")
```
### save_workflow_state
Saves the workflow state to a json file.
**Arguments:**
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `filepath` | `Optional[str]` | `"sequential_workflow_state.json"` | The path to save the workflow state to. |
**Examples:**
```python
workflow = BaseWorkflow()
workflow.save_workflow_state(filepath="workflow_state.json")
```
### add_objective_to_workflow
Adds an objective to the workflow.
**Arguments:**
| Parameter | Type | Description |
|-----------|------|-------------|
| `task` | `str` | The description of the task. |
| `**kwargs` | | Additional keyword arguments for the task. |
**Examples:**
```python
workflow = BaseWorkflow()
workflow.add_objective_to_workflow(task="New Objective", agent=agent, args=[], kwargs={})
```
### load_workflow_state
Loads the workflow state from a json file and restores the workflow state.
**Arguments:**
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `filepath` | `str` | `None` | The path to load the workflow state from. |
**Examples:**
```python
workflow = BaseWorkflow()
workflow.load_workflow_state(filepath="workflow_state.json")
```
### workflow_dashboard
Displays a dashboard for the workflow.
**Arguments:**
| Parameter | Type | Description |
|-----------|------|-------------|
| `**kwargs` | | Additional keyword arguments to pass to the dashboard. |
**Examples:**
```python
workflow = BaseWorkflow()
workflow.workflow_dashboard()
```
### workflow_bootup
Initializes the workflow.
**Examples:**
```python
workflow = BaseWorkflow()
workflow.workflow_bootup()
```

@ -1,259 +1,801 @@
### Title: Building Custom Swarms with Multiple Agents: A Comprehensive Guide for Swarm Engineers # Building Custom Swarms: A Comprehensive Guide for Swarm Engineers
#### Introductio ## Introduction
As artificial intelligence and machine learning continue to grow in complexity and applicability, building systems that can harness multiple agents to solve complex tasks becomes more critical. Swarm engineering enables AI agents to collaborate and solve problems autonomously in diverse fields such as finance, marketing, operations, and even creative industries. In this guide, we'll focus on how to build a custom swarm system that integrates multiple agents into a cohesive system capable of solving tasks collaboratively.
The swarm we'll design will leverage Python, use types for better code structure, and feature logging with the powerful **loguru** logging library. We'll break down how to define and initialize swarms, make them scalable, and create methods like `run(task: str)` to trigger their execution. As artificial intelligence and machine learning continue to grow in complexity and applicability, building systems that can harness multiple agents to solve complex tasks becomes more critical. Swarm engineering enables AI agents to collaborate and solve problems autonomously in diverse fields such as finance, marketing, operations, and even creative industries.
By the end of this article, you will have a complete understanding of: This comprehensive guide covers how to build a custom swarm system that integrates multiple agents into a cohesive system capable of solving tasks collaboratively. We'll cover everything from basic swarm structure to advanced features like conversation management, logging, error handling, and scalability.
- What swarms are and how they can be built. By the end of this guide, you will have a complete understanding of:
- How to intake multiple agents using a flexible class. - What swarms are and how they can be built
- How to run tasks across agents and capture their outputs. - How to create agents and integrate them into swarms
- How to implement proper conversation management for message storage
- Best practices for error handling, logging, and optimization
- How to make swarms scalable and production-ready
- Best practices for error handling, logging, and optimization.
--- ---
### 1. Understanding the Concept of a Swarm ## Overview of Swarm Architecture
A **swarm** refers to a collection of agents that collaborate to solve a problem. Each agent in the swarm performs part of the task, either independently or by communicating with other agents. Swarms are ideal for: A **Swarm** refers to a collection of agents that collaborate to solve a problem. Each agent in the swarm performs part of the task, either independently or by communicating with other agents. Swarms are ideal for:
- **Scalability**: You can add or remove agents dynamically based on the task's complexity. - **Scalability**: You can add or remove agents dynamically based on the task's complexity
- **Flexibility**: Each agent can be designed to specialize in different parts of the problem, offering modularity. - **Flexibility**: Each agent can be designed to specialize in different parts of the problem, offering modularity
- **Autonomy**: Agents in a swarm can operate autonomously, reducing the need for constant supervision. - **Autonomy**: Agents in a swarm can operate autonomously, reducing the need for constant supervision
- **Conversation Management**: All interactions are tracked and stored for analysis and continuity
We'll be using Python as the primary programming language and will structure the swarm class using clean, reusable code principles.
--- ---
### 2. Designing the Swarm Class: Intake Multiple Agents ## Core Requirements for Swarm Classes
Every Swarm class must adhere to these fundamental requirements:
### Required Methods and Attributes
- **`run(task: str, img: str, *args, **kwargs)` method**: The primary execution method for tasks
- **`name`**: A descriptive name for the swarm
- **`description`**: A clear description of the swarm's purpose
- **`agents`**: A list of callables representing the agents
- **`conversation`**: A conversation structure for message storage and history management
We'll begin by creating a base class for our swarm. This class will intake multiple agents and define a `run` method, which is the core method for executing tasks across the swarm. Each agent is defined by its specific behavior or "intelligence" to complete part of the task.
#### 2.1 Importing the Required Libraries and Dependencies ### Required Agent Structure
We'll rely on the **loguru** logging library, Pydantic for metadata handling, and standard Python typing. Each Agent within the swarm must contain:
- **`agent_name`**: Unique identifier for the agent
- **`system_prompt`**: Instructions that guide the agent's behavior
- **`run` method**: Method to execute tasks assigned to the agent
---
## Setting Up the Foundation
### Required Dependencies
```python ```python
from typing import List, Union from typing import List, Union, Any, Optional, Callable
from loguru import logger from loguru import logger
from swarms.structs.base_swarm import BaseSwarm from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.conversation import Conversation
from swarms.structs.agent import Agent
import concurrent.futures
import os
```
### Custom Exception Handling
```python
class SwarmExecutionError(Exception): class SwarmExecutionError(Exception):
"""Custom exception for handling swarm execution errors.""" """Custom exception for handling swarm execution errors."""
pass pass
class AgentValidationError(Exception):
"""Custom exception for agent validation errors."""
pass
``` ```
#### 2.2 Defining the Swarm Class ---
## Building the Custom Swarm Class
The class `CustomSwarm` will take in a list of agents. The agents will be instances of `BaseSwarm` (or callable functions). The `run(task: str)` method will delegate tasks to each agent in the swarm and handle any errors or retries. ### Basic Swarm Structure
```python ```python
class CustomSwarm: class CustomSwarm(BaseSwarm):
def __init__(self, agents: List[BaseSwarm]): """
A custom swarm class to manage and execute tasks with multiple agents.
This swarm integrates conversation management for tracking all agent interactions,
provides error handling, and supports both sequential and concurrent execution.
Attributes:
name (str): The name of the swarm.
description (str): A brief description of the swarm's purpose.
agents (List[Callable]): A list of callables representing the agents.
conversation (Conversation): Conversation management for message storage.
max_workers (int): Maximum number of concurrent workers for parallel execution.
autosave_conversation (bool): Whether to automatically save conversation history.
"""
def __init__(
self,
name: str,
description: str,
agents: List[Callable],
max_workers: int = 4,
autosave_conversation: bool = True,
conversation_config: Optional[dict] = None,
):
""" """
Initializes the CustomSwarm with a list of agents. Initialize the CustomSwarm with its name, description, and agents.
Args: Args:
agents (List[BaseSwarm]): A list of agent objects that inherit from BaseSwarm. name (str): The name of the swarm.
description (str): A description of the swarm.
agents (List[Callable]): A list of callables that provide the agents for the swarm.
max_workers (int): Maximum number of concurrent workers.
autosave_conversation (bool): Whether to automatically save conversations.
conversation_config (dict): Configuration for conversation management.
""" """
super().__init__(name=name, description=description, agents=agents)
self.name = name
self.description = description
self.agents = agents self.agents = agents
self.max_workers = max_workers
self.autosave_conversation = autosave_conversation
# Initialize conversation management
# See: https://docs.swarms.world/swarms/structs/conversation/
conversation_config = conversation_config or {}
self.conversation = Conversation(
id=f"swarm_{name}_{int(time.time())}",
name=f"{name}_conversation",
autosave=autosave_conversation,
save_enabled=True,
time_enabled=True,
**conversation_config
)
# Validate agents and log initialization
self.validate_agents() self.validate_agents()
logger.info(f"🚀 CustomSwarm '{self.name}' initialized with {len(self.agents)} agents")
# Add swarm initialization to conversation history
self.conversation.add(
role="System",
content=f"Swarm '{self.name}' initialized with {len(self.agents)} agents: {[getattr(agent, 'agent_name', 'Unknown') for agent in self.agents]}"
)
def validate_agents(self): def validate_agents(self):
"""Validates that each agent has a 'run' method.""" """
for agent in self.agents: Validates that each agent has the required methods and attributes.
if not hasattr(agent, 'run'):
raise AttributeError(f"Agent {agent} does not have a 'run' method.")
logger.info(f"Agent {agent} validated successfully.")
def run(self, task: str): Raises:
AgentValidationError: If any agent fails validation.
""" """
Runs the task across all agents in the swarm. for i, agent in enumerate(self.agents):
# Check for required run method
if not hasattr(agent, 'run'):
raise AgentValidationError(f"Agent at index {i} does not have a 'run' method.")
Args: # Check for agent_name attribute
task (str): The task to pass to each agent. if not hasattr(agent, 'agent_name'):
logger.warning(f"Agent at index {i} does not have 'agent_name' attribute. Using 'Agent_{i}'")
agent.agent_name = f"Agent_{i}"
logger.info(f"✅ Agent '{agent.agent_name}' validated successfully.")
def run(self, task: str, img: str = None, *args: Any, **kwargs: Any) -> Any:
""" """
logger.info(f"Running task '{task}' across all agents in the swarm.") Execute a task using the swarm and its agents with conversation tracking.
for agent in self.agents:
try:
agent.run(task)
logger.info(f"Agent {agent} successfully completed the task.")
except Exception as e:
logger.error(f"Agent {agent} failed to run task: {e}")
raise SwarmExecutionError(f"Execution failed for {agent}. Task: {task}")
```
### 3. Adding Logging and Error Handling with `loguru` Args:
task (str): The task description.
img (str): The image input (optional).
*args: Additional positional arguments for customization.
**kwargs: Additional keyword arguments for fine-tuning behavior.
Logging is crucial for production-grade systems, especially when managing complex tasks that involve multiple agents. **Loguru** is a simple and efficient logging library that allows us to log everything from information messages to errors. Returns:
Any: The result of the task execution, aggregated from all agents.
"""
logger.info(f"🎯 Running task '{task}' across {len(self.agents)} agents in swarm '{self.name}'")
# Add task to conversation history
self.conversation.add(
role="User",
content=f"Task: {task}" + (f" | Image: {img}" if img else ""),
category="input"
)
try:
# Execute task across all agents
results = self._execute_agents(task, img, *args, **kwargs)
# Add results to conversation
self.conversation.add(
role="Swarm",
content=f"Task completed successfully. Processed by {len(results)} agents.",
category="output"
)
logger.success(f"✅ Task completed successfully by swarm '{self.name}'")
return results
except Exception as e:
error_msg = f"❌ Task execution failed in swarm '{self.name}': {str(e)}"
logger.error(error_msg)
# Add error to conversation
self.conversation.add(
role="System",
content=f"Error: {error_msg}",
category="error"
)
raise SwarmExecutionError(error_msg)
def _execute_agents(self, task: str, img: str = None, *args, **kwargs) -> List[Any]:
"""
Execute the task across all agents with proper conversation tracking.
```python Args:
from loguru import logger task (str): The task to execute.
img (str): Optional image input.
class CustomSwarm: Returns:
def __init__(self, agents: List[BaseSwarm]): List[Any]: Results from all agents.
self.agents = agents """
logger.info("CustomSwarm initialized with agents.") results = []
self.validate_agents()
def run(self, task: str):
logger.info(f"Task received: {task}")
for agent in self.agents: for agent in self.agents:
try: try:
agent.run(task) # Execute agent task
logger.success(f"Agent {agent} completed task successfully.") result = agent.run(task, img, *args, **kwargs)
except Exception as e: results.append(result)
logger.error(f"Error while running task '{task}' for {agent}: {e}")
raise SwarmExecutionError(f"Execution failed for {agent}")
```
### 4. Running Tasks Across Multiple Agents # Add agent response to conversation
self.conversation.add(
role=agent.agent_name,
content=result,
category="agent_output"
)
The `run(task: str)` method will handle distributing the task to each agent in the swarm. Each agents `run` method is expected to take a task as input and perform its specific logic. We can add further customization by allowing each agent to return output, which can be collected for later analysis. logger.info(f"✅ Agent '{agent.agent_name}' completed task successfully")
#### 4.1 Example of Integrating Agents except Exception as e:
error_msg = f"Agent '{agent.agent_name}' failed: {str(e)}"
logger.error(error_msg)
Let's take a look at how we can define agents using the `BaseSwarm` class and integrate them into the swarm. # Add agent error to conversation
self.conversation.add(
role=agent.agent_name,
content=f"Error: {error_msg}",
category="agent_error"
)
```python # Continue with other agents but log the failure
class FinancialAgent(BaseSwarm): results.append(f"FAILED: {error_msg}")
def run(self, task: str):
logger.info(f"FinancialAgent processing task: {task}") return results
# Custom logic for financial analysis
return f"FinancialAgent response to task: {task}"
class MarketingAgent(BaseSwarm):
def run(self, task: str):
logger.info(f"MarketingAgent processing task: {task}")
# Custom logic for marketing analysis
return f"MarketingAgent response to task: {task}"
``` ```
Now, we initialize the swarm with these agents: ### Enhanced Swarm with Concurrent Execution
```python ```python
if __name__ == "__main__": def run_concurrent(self, task: str, img: str = None, *args: Any, **kwargs: Any) -> List[Any]:
agents = [FinancialAgent(), MarketingAgent()] """
swarm = CustomSwarm(agents) Execute a task using concurrent execution for better performance.
swarm.run("Analyze Q3 financial report and marketing impact.")
```
### 5. Enhancing the Swarm with Concurrent Execution
When dealing with large or time-consuming tasks, running agents concurrently (in parallel) can significantly improve performance. We can achieve this by utilizing Pythons **concurrent.futures** or **threading** libraries. Args:
task (str): The task description.
img (str): The image input (optional).
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
#### 5.1 Running Swarms Concurrently Returns:
List[Any]: Results from all agents executed concurrently.
"""
logger.info(f"🚀 Running task concurrently across {len(self.agents)} agents")
# Add task to conversation
self.conversation.add(
role="User",
content=f"Concurrent Task: {task}" + (f" | Image: {img}" if img else ""),
category="input"
)
results = []
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
# Submit all agent tasks
future_to_agent = {
executor.submit(self._run_single_agent, agent, task, img, *args, **kwargs): agent
for agent in self.agents
}
# Collect results as they complete
for future in concurrent.futures.as_completed(future_to_agent):
agent = future_to_agent[future]
try:
result = future.result()
results.append(result)
```python # Add to conversation
from concurrent.futures import ThreadPoolExecutor, as_completed self.conversation.add(
role=agent.agent_name,
content=result,
category="agent_output"
)
class CustomSwarm: except Exception as e:
def __init__(self, agents: List[BaseSwarm], max_workers: int = 4): error_msg = f"Concurrent execution failed for agent '{agent.agent_name}': {str(e)}"
self.agents = agents logger.error(error_msg)
self.thread_pool = ThreadPoolExecutor(max_workers=max_workers) results.append(f"FAILED: {error_msg}")
logger.info("CustomSwarm initialized with concurrent execution.")
# Add error to conversation
self.conversation.add(
role=agent.agent_name,
content=f"Error: {error_msg}",
category="agent_error"
)
# Add completion summary
self.conversation.add(
role="Swarm",
content=f"Concurrent task completed. {len(results)} agents processed.",
category="output"
)
return results
def _run_single_agent(self, agent: Callable, task: str, img: str = None, *args, **kwargs) -> Any:
"""
Execute a single agent with error handling.
def run(self, task: str): Args:
futures = [] agent: The agent to execute.
for agent in self.agents: task (str): The task to execute.
futures.append(self.thread_pool.submit(agent.run, task)) img (str): Optional image input.
for future in as_completed(futures): Returns:
result = future.result() Any: The agent's result.
logger.info(f"Agent result: {result}") """
try:
return agent.run(task, img, *args, **kwargs)
except Exception as e:
logger.error(f"Agent '{getattr(agent, 'agent_name', 'Unknown')}' execution failed: {str(e)}")
raise
``` ```
### 6. Advanced Error Handling and Retries ### Advanced Features
In a production system, agents might fail due to a wide range of reasons (network errors, API rate limits, etc.). To ensure resilience, we can add retry mechanisms and even fallback agents that attempt to recover the failed task.
```python ```python
class CustomSwarm: def run_with_retries(self, task: str, img: str = None, retries: int = 3, *args, **kwargs) -> List[Any]:
def run_with_retries(self, task: str, retries: int = 3):
""" """
Runs the task across all agents with retry logic. Execute a task with retry logic for failed agents.
Args: Args:
task (str): The task to run. task (str): The task to execute.
retries (int): Number of retries allowed for failed agents. img (str): Optional image input.
retries (int): Number of retries for failed agents.
Returns:
List[Any]: Results from all agents with retry attempts.
""" """
logger.info(f"🔄 Running task with {retries} retries per agent")
# Add task to conversation
self.conversation.add(
role="User",
content=f"Task with retries ({retries}): {task}",
category="input"
)
results = []
for agent in self.agents: for agent in self.agents:
attempt = 0 attempt = 0
while attempt <= retries: success = False
while attempt <= retries and not success:
try: try:
agent.run(task) result = agent.run(task, img, *args, **kwargs)
logger.success(f"Agent {agent} completed task.") results.append(result)
break success = True
# Add successful result to conversation
self.conversation.add(
role=agent.agent_name,
content=result,
category="agent_output"
)
if attempt > 0:
logger.success(f"✅ Agent '{agent.agent_name}' succeeded on attempt {attempt + 1}")
except Exception as e: except Exception as e:
logger.error(f"Agent {agent} failed on attempt {attempt + 1}. Error: {e}")
attempt += 1 attempt += 1
error_msg = f"Agent '{agent.agent_name}' failed on attempt {attempt}: {str(e)}"
logger.warning(error_msg)
# Add retry attempt to conversation
self.conversation.add(
role=agent.agent_name,
content=f"Retry attempt {attempt}: {error_msg}",
category="agent_retry"
)
if attempt > retries: if attempt > retries:
logger.error(f"Agent {agent} exhausted retries. Task failed.") final_error = f"Agent '{agent.agent_name}' exhausted all {retries} retries"
logger.error(final_error)
results.append(f"FAILED: {final_error}")
# Add final failure to conversation
self.conversation.add(
role=agent.agent_name,
content=final_error,
category="agent_error"
)
return results
def get_conversation_summary(self) -> dict:
"""
Get a summary of the conversation history and agent performance.
Returns:
dict: Summary of conversation statistics and agent performance.
"""
# Get conversation statistics
message_counts = self.conversation.count_messages_by_role()
# Count categories
category_counts = {}
for message in self.conversation.conversation_history:
category = message.get("category", "uncategorized")
category_counts[category] = category_counts.get(category, 0) + 1
# Get token counts if available
token_summary = self.conversation.export_and_count_categories()
return {
"swarm_name": self.name,
"total_messages": len(self.conversation.conversation_history),
"messages_by_role": message_counts,
"messages_by_category": category_counts,
"token_summary": token_summary,
"conversation_id": self.conversation.id,
}
def export_conversation(self, filepath: str = None) -> str:
"""
Export the conversation history to a file.
Args:
filepath (str): Optional custom filepath for export.
Returns:
str: The filepath where the conversation was saved.
"""
if filepath is None:
filepath = f"conversations/{self.name}_{self.conversation.id}.json"
self.conversation.export_conversation(filepath)
logger.info(f"📄 Conversation exported to: {filepath}")
return filepath
def display_conversation(self, detailed: bool = True):
"""
Display the conversation history in a formatted way.
Args:
detailed (bool): Whether to show detailed information.
"""
logger.info(f"💬 Displaying conversation for swarm: {self.name}")
self.conversation.display_conversation(detailed=detailed)
``` ```
### 7. Adding Documentation with Docstrings ---
## Creating Agents for Your Swarm
Clear and concise documentation is critical, especially for engineers maintaining and scaling the system. Using Pythons docstrings, we can document each class and method, describing what they do and their expected inputs/outputs. ### Basic Agent Structure
```python ```python
class CustomSwarm: class CustomAgent:
""" """
A class to manage and execute tasks using a swarm of agents. A custom agent class that integrates with the swarm conversation system.
Attributes: Attributes:
agents (List[BaseSwarm]): A list of agent instances. agent_name (str): The name of the agent.
system_prompt (str): The system prompt guiding the agent's behavior.
Methods: conversation (Optional[Conversation]): Shared conversation for context.
run(task: str): Runs a task across all agents in the swarm.
validate_agents(): Validates that each agent has a run method.
run_with_retries(task: str, retries: int): Runs the task with retry logic.
""" """
def __init__(self, agents: List[BaseSwarm]): def __init__(
self,
agent_name: str,
system_prompt: str,
conversation: Optional[Conversation] = None
):
""" """
Initializes the CustomSwarm with a list of agents. Initialize the agent with its name and system prompt.
Args: Args:
agents (List[BaseSwarm]): A list of agent objects that inherit from BaseSwarm. agent_name (str): The name of the agent.
system_prompt (str): The guiding prompt for the agent.
conversation (Optional[Conversation]): Shared conversation context.
""" """
self.agents = agents self.agent_name = agent_name
self.system_prompt = system_prompt
self.conversation = conversation
def run(self, task: str): def run(self, task: str, img: str = None, *args: Any, **kwargs: Any) -> Any:
""" """
Runs the task across all agents in the swarm. Execute a specific task assigned to the agent.
Args: Args:
task (str): The task to pass to each agent. task (str): The task description.
img (str): The image input for processing.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
Any: The result of the task execution.
""" """
pass # Add context from shared conversation if available
context = ""
if self.conversation:
context = f"Previous context: {self.conversation.get_last_message_as_string()}\n\n"
def validate_agents(self): # Process the task (implement your custom logic here)
"""Validates that each agent has a 'run' method.""" result = f"Agent {self.agent_name} processed: {context}{task}"
pass
logger.info(f"🤖 Agent '{self.agent_name}' completed task")
return result
```
### Using Swarms Framework Agents
You can also use the built-in Agent class from the Swarms framework:
```python
from swarms.structs.agent import Agent
def create_financial_agent() -> Agent:
"""Create a financial analysis agent."""
return Agent(
agent_name="FinancialAnalyst",
system_prompt="You are a financial analyst specializing in market analysis and risk assessment.",
model_name="gpt-4o-mini",
max_loops=1,
)
def create_marketing_agent() -> Agent:
"""Create a marketing analysis agent."""
return Agent(
agent_name="MarketingSpecialist",
system_prompt="You are a marketing specialist focused on campaign analysis and customer insights.",
model_name="gpt-4o-mini",
max_loops=1,
)
```
---
## Complete Implementation Example
### Setting Up Your Swarm
```python
import time
from typing import List
def create_multi_domain_swarm() -> CustomSwarm:
"""
Create a comprehensive multi-domain analysis swarm.
Returns:
CustomSwarm: A configured swarm with multiple specialized agents.
"""
# Create agents
agents = [
create_financial_agent(),
create_marketing_agent(),
Agent(
agent_name="OperationsAnalyst",
system_prompt="You are an operations analyst specializing in process optimization and efficiency.",
model_name="gpt-4o-mini",
max_loops=1,
),
]
# Configure conversation settings
conversation_config = {
"backend": "sqlite", # Use SQLite for persistent storage
"db_path": f"conversations/swarm_conversations.db",
"time_enabled": True,
"token_count": True,
}
# Create the swarm
swarm = CustomSwarm(
name="MultiDomainAnalysisSwarm",
description="A comprehensive swarm for financial, marketing, and operations analysis",
agents=agents,
max_workers=3,
autosave_conversation=True,
conversation_config=conversation_config,
)
return swarm
# Usage example
if __name__ == "__main__":
# Create and initialize the swarm
swarm = create_multi_domain_swarm()
# Execute a complex analysis task
task = """
Analyze the Q3 2024 performance data for our company:
- Revenue: $2.5M (up 15% from Q2)
- Customer acquisition: 1,200 new customers
- Marketing spend: $150K
- Operational costs: $800K
Provide insights from financial, marketing, and operations perspectives.
"""
# Run the analysis
results = swarm.run(task)
# Display results
print("\n" + "="*50)
print("SWARM ANALYSIS RESULTS")
print("="*50)
for i, result in enumerate(results):
agent_name = swarm.agents[i].agent_name
print(f"\n🤖 {agent_name}:")
print(f"📊 {result}")
# Get conversation summary
summary = swarm.get_conversation_summary()
print(f"\n📈 Conversation Summary:")
print(f" Total messages: {summary['total_messages']}")
print(f" Total tokens: {summary['token_summary']['total_tokens']}")
# Export conversation for later analysis
export_path = swarm.export_conversation()
print(f"💾 Conversation saved to: {export_path}")
```
### Advanced Usage with Concurrent Execution
```python
def run_batch_analysis():
"""Example of running multiple tasks concurrently."""
swarm = create_multi_domain_swarm()
tasks = [
"Analyze Q1 financial performance",
"Evaluate marketing campaign effectiveness",
"Review operational efficiency metrics",
"Assess customer satisfaction trends",
]
# Process all tasks concurrently
all_results = []
for task in tasks:
results = swarm.run_concurrent(task)
all_results.append({"task": task, "results": results})
return all_results
```
---
## Conversation Management Integration
The swarm uses the Swarms framework's [Conversation structure](../conversation/) for comprehensive message storage and management. This provides:
### Key Features
- **Persistent Storage**: Multiple backend options (SQLite, Redis, Supabase, etc.)
- **Message Categorization**: Organize messages by type (input, output, error, etc.)
- **Token Tracking**: Monitor token usage across conversations
- **Export/Import**: Save and load conversation histories
- **Search Capabilities**: Find specific messages or content
### Conversation Configuration Options
```python
conversation_config = {
# Backend storage options
"backend": "sqlite", # or "redis", "supabase", "duckdb", "in-memory"
# File-based storage
"db_path": "conversations/swarm_data.db",
# Redis configuration (if using Redis backend)
"redis_host": "localhost",
"redis_port": 6379,
# Features
"time_enabled": True, # Add timestamps to messages
"token_count": True, # Track token usage
"autosave": True, # Automatically save conversations
"save_enabled": True, # Enable saving functionality
}
``` ```
` ### Accessing Conversation Data
```python
# Get conversation history
history = swarm.conversation.return_history_as_string()
# Search for specific content
financial_messages = swarm.conversation.search("financial")
# Export conversation data
swarm.conversation.export_conversation("analysis_session.json")
# Get conversation statistics
stats = swarm.conversation.count_messages_by_role()
token_usage = swarm.conversation.export_and_count_categories()
```
For complete documentation on conversation management, see the [Conversation Structure Documentation](../conversation/).
---
## Conclusion
Building custom swarms with proper conversation management enables you to create powerful, scalable, and maintainable multi-agent systems. The integration with the Swarms framework's conversation structure provides:
- **Complete audit trail** of all agent interactions
- **Persistent storage** options for different deployment scenarios
- **Performance monitoring** through token and message tracking
- **Easy debugging** with searchable conversation history
### Conclusion - **Scalable architecture** that grows with your needs
Building custom swarms that intake multiple agents can drastically improve the scalability, efficiency, and flexibility of AI-driven systems. By designing a robust swarm class that manages agents, distributes tasks, and ensures error resilience, you can handle complex, multi-agent workloads efficiently.
In this Guide, we've covered: By following the patterns and best practices outlined in this guide, you can create robust swarms that handle complex tasks efficiently while maintaining full visibility into their operations.
### Key Takeaways
1. **Always implement conversation management** for tracking and auditing
2. **Use proper error handling and retries** for production resilience
3. **Implement monitoring and logging** for observability
4. **Design for scalability** with concurrent execution patterns
5. **Test thoroughly** with unit tests and integration tests
6. **Configure appropriately** for your deployment environment
For more advanced patterns and examples, explore the [Swarms Examples](../../examples/) and consider contributing your custom swarms back to the community by submitting a pull request to the [Swarms repository](https://github.com/kyegomez/swarms).
---
- Designing a basic swarm class. ## Additional Resources
- Running tasks across multiple agents. - [Conversation Structure Documentation](../conversation/) - Complete guide to conversation management
- Leveraging logging, error handling, retries, and concurrency. - [Agent Documentation](../../agents/) - Learn about creating and configuring agents
- Documenting your class for future-proofing. - [Multi-Agent Architectures](../overview/) - Explore other swarm patterns and architectures
This approach sets the foundation for building more advanced and domain-specific swarms in areas like finance, marketing, operations, and beyond. Swarm engineers can now explore more complex, multi-agent systems and push the boundaries of AI collaboration. - [Examples Repository](../../examples/) - Real-world swarm implementations
Stay tuned for future updates on more advanced swarm functionalities! - [Swarms Framework GitHub](https://github.com/kyegomez/swarms) - Source code and contributions

@ -0,0 +1,322 @@
# HeavySwarm Documentation
HeavySwarm is a sophisticated multi-agent orchestration system that decomposes complex tasks into specialized questions and executes them using four specialized agents: Research, Analysis, Alternatives, and Verification. The results are then synthesized into a comprehensive response.
Inspired by X.AI's Grok 4 heavy implementation, HeavySwarm provides robust task analysis through intelligent question generation, parallel execution, and comprehensive synthesis with real-time progress monitoring.
## Architecture
### System Design
The HeavySwarm follows a structured 5-phase workflow:
1. **Task Decomposition**: Complex tasks are broken down into specialized questions
2. **Question Generation**: AI-powered generation of role-specific questions
3. **Parallel Execution**: Four specialized agents work concurrently
4. **Result Collection**: Outputs are gathered and validated
5. **Synthesis**: Integration into a comprehensive final response
### Agent Specialization
- **Research Agent**: Comprehensive information gathering and synthesis
- **Analysis Agent**: Pattern recognition and statistical analysis
- **Alternatives Agent**: Creative problem-solving and strategic options
- **Verification Agent**: Validation, feasibility assessment, and quality assurance
- **Synthesis Agent**: Multi-perspective integration and executive reporting
## Architecture Diagram
```mermaid
graph TB
subgraph "HeavySwarm Architecture"
A[Input Task] --> B[Question Generation Agent]
B --> C[Task Decomposition]
C --> D[Research Agent]
C --> E[Analysis Agent]
C --> F[Alternatives Agent]
C --> G[Verification Agent]
D --> H[Parallel Execution Engine]
E --> H
F --> H
G --> H
H --> I[Result Collection]
I --> J[Synthesis Agent]
J --> K[Comprehensive Report]
subgraph "Monitoring & Control"
L[Rich Dashboard]
M[Progress Tracking]
N[Error Handling]
O[Timeout Management]
end
H --> L
H --> M
H --> N
H --> O
end
subgraph "Agent Specializations"
D --> D1[Information Gathering<br/>Market Research<br/>Data Collection]
E --> E1[Statistical Analysis<br/>Pattern Recognition<br/>Predictive Modeling]
F --> F1[Creative Solutions<br/>Strategic Options<br/>Innovation Ideation]
G --> G1[Fact Checking<br/>Feasibility Assessment<br/>Quality Assurance]
end
style A fill:#ff6b6b
style K fill:#4ecdc4
style H fill:#45b7d1
style J fill:#96ceb4
```
## Installation
```bash
pip install swarms
```
## Quick Start
```python
from swarms import HeavySwarm
# Initialize the swarm
swarm = HeavySwarm(
name="MarketAnalysisSwarm",
description="Financial market analysis swarm",
question_agent_model_name="gpt-4o-mini",
worker_model_name="gpt-4o-mini",
show_dashboard=True,
verbose=True
)
# Execute analysis
result = swarm.run("Analyze the current cryptocurrency market trends and investment opportunities")
print(result)
```
## API Reference
### HeavySwarm Class
#### Constructor Parameters
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `name` | `str` | `"HeavySwarm"` | Identifier name for the swarm instance |
| `description` | `str` | `"A swarm of agents..."` | Description of the swarm's purpose |
| `agents` | `List[Agent]` | `None` | Pre-configured agent list (unused - agents created internally) |
| `timeout` | `int` | `300` | Maximum execution time per agent in seconds |
| `aggregation_strategy` | `str` | `"synthesis"` | Strategy for result aggregation |
| `loops_per_agent` | `int` | `1` | Number of execution loops per agent |
| `question_agent_model_name` | `str` | `"gpt-4o-mini"` | Model for question generation |
| `worker_model_name` | `str` | `"gpt-4o-mini"` | Model for specialized worker agents |
| `verbose` | `bool` | `False` | Enable detailed logging output |
| `max_workers` | `int` | `int(os.cpu_count() * 0.9)` | Maximum concurrent workers |
| `show_dashboard` | `bool` | `False` | Enable rich dashboard visualization |
| `agent_prints_on` | `bool` | `False` | Enable individual agent output printing |
#### Methods
##### `run(task: str, img: str = None) -> str`
Execute the complete HeavySwarm orchestration flow.
**Parameters:**
- `task` (str): The main task to analyze and decompose
- `img` (str, optional): Image input for visual analysis tasks
**Returns:**
- `str`: Comprehensive final analysis from synthesis agent
**Example:**
```python
result = swarm.run("Develop a go-to-market strategy for a new SaaS product")
```
## Real-World Applications
### Financial Services
```python
# Market Analysis
swarm = HeavySwarm(
name="FinanceSwarm",
worker_model_name="gpt-4o",
show_dashboard=True
)
result = swarm.run("""
Analyze the impact of recent Federal Reserve policy changes on:
1. Bond markets and yield curves
2. Equity market valuations
3. Currency exchange rates
4. Provide investment recommendations for institutional portfolios
""")
```
### Use-cases
| Use Case | Description |
|---------------------------------------------|---------------------------------------------|
| Portfolio optimization and risk assessment | Optimize asset allocation and assess risks |
| Market trend analysis and forecasting | Analyze and predict market movements |
| Regulatory compliance evaluation | Evaluate adherence to financial regulations |
| Investment strategy development | Develop and refine investment strategies |
| Credit risk analysis and modeling | Analyze and model credit risk |
-------
### Healthcare & Life Sciences
```python
# Clinical Research Analysis
swarm = HeavySwarm(
name="HealthcareSwarm",
worker_model_name="gpt-4o",
timeout=600,
loops_per_agent=2
)
result = swarm.run("""
Evaluate the potential of AI-driven personalized medicine:
1. Current technological capabilities and limitations
2. Regulatory landscape and approval pathways
3. Market opportunities and competitive analysis
4. Implementation strategies for healthcare systems
""")
```
----
**Use Cases:**
| Use Case | Description |
|----------------------------------------|---------------------------------------------|
| Drug discovery and development analysis| Analyze and accelerate drug R&D processes |
| Clinical trial optimization | Improve design and efficiency of trials |
| Healthcare policy evaluation | Assess and inform healthcare policies |
| Medical device market analysis | Evaluate trends and opportunities in devices|
| Patient outcome prediction modeling | Predict and model patient health outcomes |
---
### Technology & Innovation
```python
# Tech Strategy Analysis
swarm = HeavySwarm(
name="TechSwarm",
worker_model_name="gpt-4o",
show_dashboard=True,
verbose=True
)
result = swarm.run("""
Assess the strategic implications of quantum computing adoption:
1. Technical readiness and hardware developments
2. Industry applications and use cases
3. Competitive landscape and key players
4. Investment and implementation roadmap
""")
```
**Use Cases:**
| Use Case | Description |
|------------------------------------|---------------------------------------------|
| Technology roadmap development | Plan and prioritize technology initiatives |
| Competitive intelligence gathering | Analyze competitors and market trends |
| Innovation pipeline analysis | Evaluate and manage innovation projects |
| Digital transformation strategy | Develop and implement digital strategies |
| Emerging technology assessment | Assess new and disruptive technologies |
### Manufacturing & Supply Chain
```python
# Supply Chain Optimization
swarm = HeavySwarm(
name="ManufacturingSwarm",
worker_model_name="gpt-4o",
max_workers=8
)
result = swarm.run("""
Optimize global supply chain resilience:
1. Risk assessment and vulnerability analysis
2. Alternative sourcing strategies
3. Technology integration opportunities
4. Cost-benefit analysis of proposed changes
""")
```
**Use Cases:**
| Use Case | Description |
|----------------------------------|---------------------------------------------|
| Supply chain risk management | Identify and mitigate supply chain risks |
| Manufacturing process optimization | Improve efficiency and productivity |
| Quality control system design | Develop systems to ensure product quality |
| Sustainability impact assessment | Evaluate environmental and social impacts |
| Logistics network optimization | Enhance logistics and distribution networks |
## Advanced Configuration
### Custom Agent Configuration
```python
# High-performance configuration
swarm = HeavySwarm(
name="HighPerformanceSwarm",
question_agent_model_name="gpt-4o",
worker_model_name="gpt-4o",
timeout=900,
loops_per_agent=3,
max_workers=12,
show_dashboard=True,
verbose=True
)
```
## Troubleshooting
| Issue | Solution |
|-------------------------|---------------------------------------------------------------|
| **Agent Timeout** | Increase timeout parameter or reduce task complexity |
| **Model Rate Limits** | Implement backoff strategies or use different models |
| **Memory Usage** | Monitor system resources with large-scale operations |
| **Dashboard Performance** | Disable dashboard for batch processing |
## Contributing
HeavySwarm is part of the Swarms ecosystem. Contributions are welcome for:
- New agent specializations
- Performance optimizations
- Integration capabilities
- Documentation improvements
## Acknowledgments
- Inspired by X.AI's Grok heavy implementation
- Built on the Swarms framework
- Utilizes Rich for dashboard visualization
- Powered by advanced language models

@ -0,0 +1,392 @@
# Technical Support
*Getting Help with the Swarms Multi-Agent Framework*
---
## **Getting Started with Support**
The Swarms team is committed to providing exceptional technical support to help you build production-grade multi-agent systems. Whether you're experiencing bugs, need implementation guidance, or want to request new features, we have multiple channels to ensure you get the help you need quickly and efficiently.
---
## **Support Channels Overview**
| **Support Type** | **Best For** | **Response Time** | **Channel** |
|------------------|--------------|-------------------|-------------|
| **Bug Reports** | Code issues, errors, unexpected behavior | < 24 hours | [GitHub Issues](https://github.com/kyegomez/swarms/issues) |
| **Feature Requests** | New capabilities, enhancements | < 48 hours | [Email kye@swarms.world](mailto:kye@swarms.world) |
| **Private Issues** | Security concerns, enterprise consulting | < 4 hours | [Book Support Call](https://cal.com/swarms/swarms-technical-support?overlayCalendar=true) |
| **Real-time Help** | Quick questions, community discussions | Immediate | [Discord Community](https://discord.gg/jM3Z6M9uMq) |
| **Documentation** | Usage guides, examples, tutorials | Self-service | [docs.swarms.world](https://docs.swarms.world) |
---
## **Reporting Bugs & Technical Issues**
### **When to Use GitHub Issues**
Use GitHub Issues for:
- Code bugs and errors
- Installation problems
- Documentation issues
- Performance problems
- API inconsistencies
- Public technical discussions
### **How to Create an Effective Bug Report**
1. **Visit our Issues page**: [https://github.com/kyegomez/swarms/issues](https://github.com/kyegomez/swarms/issues)
2. **Search existing issues** to avoid duplicates
3. **Click "New Issue"** and select the appropriate template
4. **Include the following information**:
## Bug Description
A clear description of what the bug is.
## Environment
- Swarms version: [e.g., 5.9.2]
- Python version: [e.g., 3.9.0]
- Operating System: [e.g., Ubuntu 20.04, macOS 14, Windows 11]
- Model provider: [e.g., OpenAI, Anthropic, Groq]
## Steps to Reproduce
1. Step one
2. Step two
3. Step three
## Expected Behavior
What you expected to happen.
## Actual Behavior
What actually happened.
## Code Sample
```python
# Minimal code that reproduces the issue
from swarms import Agent
agent = Agent(model_name="gpt-4o-mini")
result = agent.run("Your task here")
```
## Error Messages
Paste any error messages or stack traces here
## Additional Context
Any other context, screenshots, or logs that might help.
### **Issue Templates Available**
| Template | Use Case |
|----------|----------|
| **Bug Report** | Standard bug reporting template |
| **Documentation** | Issues with docs, guides, examples |
| **Feature Request** | Suggesting new functionality |
| **Question** | General questions about usage |
| **Enterprise** | Enterprise-specific issues |
---
## **Private & Enterprise Support**
### **When to Book a Private Support Call**
Book a private consultation for:
- Security vulnerabilities or concerns
- Enterprise deployment guidance
- Custom implementation consulting
- Architecture review sessions
- Performance optimization
- Integration troubleshooting
- Strategic technical planning
### **How to Schedule Support**
1. **Visit our booking page**: [https://cal.com/swarms/swarms-technical-support?overlayCalendar=true](https://cal.com/swarms/swarms-technical-support?overlayCalendar=true)
2. **Select an available time** that works for your timezone
3. **Provide details** about your issue or requirements
4. **Prepare for the call**:
- Have your code/environment ready
- Prepare specific questions
- Include relevant error messages or logs
- Share your use case and goals
### **What to Expect**
- **Direct access** to Swarms core team members
- **Screen sharing** for live debugging
- **Custom solutions** tailored to your needs
- **Follow-up resources** and documentation
- **Priority support** for implementation
---
## **Real-Time Community Support**
### **Join Our Discord Community**
Get instant help from our active community of developers and core team members.
**Discord Benefits:**
- **24/7 availability** - Someone is always online
- **Instant responses** - Get help in real-time
- **Community wisdom** - Learn from other developers
- **Specialized channels** - Find the right help quickly
- **Latest updates** - Stay informed about new releases
### **Discord Channels Guide**
| Channel | Purpose |
|---------|---------|
| **#general** | General discussions and introductions |
| **#technical-support** | Technical questions and troubleshooting |
| **#showcase** | Share your Swarms projects and demos |
| **#feature-requests** | Discuss potential new features |
| **#announcements** | Official updates and releases |
| **#resources** | Helpful links, tutorials, and guides |
### **Getting Help on Discord**
1. **Join here**: [https://discord.gg/jM3Z6M9uMq](https://discord.gg/jM3Z6M9uMq)
2. **Read the rules** and introduce yourself in #general
3. **Use the right channel** for your question type
4. **Provide context** when asking questions:
```
Python version: 3.9
Swarms version: 5.9.2
OS: macOS 14
Question: How do I implement custom tools with MCP?
What I tried: [paste your code]
Error: [paste error message]
```
5. **Be patient and respectful** - our community loves helping!
---
## **Feature Requests & Enhancement Suggestions**
### **When to Email for Feature Requests**
Contact us directly for:
- Major new framework capabilities
- Architecture enhancements
- New model provider integrations
- Enterprise-specific features
- Analytics and monitoring tools
- UI/UX improvements
### **How to Submit Feature Requests**
**Email**: [kye@swarms.world](mailto:kye@swarms.world)
**Subject Format**: `[FEATURE REQUEST] Brief description`
**Include in your email**:
```markdown
## Feature Description
Clear description of the proposed feature
## Use Case
Why this feature is needed and how it would be used
## Business Impact
How this would benefit the Swarms ecosystem
## Technical Requirements
Any specific technical considerations
## Priority Level
- Low: Nice to have
- Medium: Would significantly improve workflow
- High: Critical for adoption/production use
## Alternatives Considered
Other solutions you've explored
## Implementation Ideas
Any thoughts on how this could be implemented
```
### **Feature Request Process**
1. **Email submission** with detailed requirements
2. **Initial review** within 48 hours
3. **Technical feasibility** assessment
4. **Community feedback** gathering (if applicable)
5. **Roadmap planning** and timeline estimation
6. **Development** and testing
7. **Release** with documentation
---
## **Self-Service Resources**
Before reaching out for support, check these resources:
### **Documentation**
- **[Complete Documentation](https://docs.swarms.world)** - Comprehensive guides and API reference
- **[Installation Guide](https://docs.swarms.world/en/latest/swarms/install/install/)** - Setup and configuration
- **[Quick Start](https://docs.swarms.world/en/latest/quickstart/)** - Get up and running fast
- **[Examples Gallery](https://docs.swarms.world/en/latest/examples/)** - Real-world use cases
### **Common Solutions**
| Issue | Solution |
|-------|----------|
| **Installation fails** | Check [Environment Setup](https://docs.swarms.world/en/latest/swarms/install/env/) |
| **Model not responding** | Verify API keys in environment variables |
| **Import errors** | Ensure latest version: `pip install -U swarms` |
| **Memory issues** | Review [Performance Guide](https://docs.swarms.world/en/latest/swarms/framework/test/) |
| **Agent not working** | Check [Basic Agent Example](https://docs.swarms.world/en/latest/swarms/examples/basic_agent/) |
### **Video Tutorials**
- **[YouTube Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ)** - Step-by-step tutorials
- **[Live Coding Sessions](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ)** - Real-world implementations
---
## **Support Checklist**
Before requesting support, please:
- [ ] **Check the documentation** for existing solutions
- [ ] **Search GitHub issues** for similar problems
- [ ] **Update to latest version**: `pip install -U swarms`
- [ ] **Verify environment setup** and API keys
- [ ] **Test with minimal code** to isolate the issue
- [ ] **Gather error messages** and relevant logs
- [ ] **Note your environment** (OS, Python version, Swarms version)
---
## **Support Best Practices**
### **For Faster Resolution**
1. **Be Specific**: Provide exact error messages and steps to reproduce
2. **Include Code**: Share minimal, runnable examples
3. **Environment Details**: Always include version information
4. **Search First**: Check if your issue has been addressed before
5. **One Issue Per Report**: Don't combine multiple problems
6. **Follow Up**: Respond promptly to requests for additional information
### **Response Time Expectations**
| Priority | Response Time | Resolution Time |
|----------|---------------|-----------------|
| **Critical** (Production down) | < 2 hours | < 24 hours |
| **High** (Major functionality blocked) | < 8 hours | < 48 hours |
| **Medium** (Feature issues) | < 24 hours | < 1 week |
| **Low** (Documentation, enhancements) | < 48 hours | Next release |
---
## **Contributing Back**
Help improve support for everyone:
- **Answer questions** in Discord or GitHub
- **Improve documentation** with your learnings
- **Share examples** of successful implementations
- **Report bugs** you discover
- **Suggest improvements** to this support process
**Your contributions make Swarms better for everyone.**
---
## **Support Channel Summary**
| Urgency | Best Channel |
|---------|-------------|
| **Emergency** | [Book Immediate Call](https://cal.com/swarms/swarms-technical-support?overlayCalendar=true) |
| **Urgent** | [Discord #technical-support](https://discord.gg/jM3Z6M9uMq) |
| **Standard** | [GitHub Issues](https://github.com/kyegomez/swarms/issues) |
| **Feature Ideas** | [Email kye@swarms.world](mailto:kye@swarms.world) |
**We're here to help you succeed with Swarms.**

@ -0,0 +1,242 @@
# Swarms API Clients
*Production-Ready Client Libraries for Every Programming Language*
## Overview
The Swarms API provides official client libraries across multiple programming languages, enabling developers to integrate powerful multi-agent AI capabilities into their applications with ease. Our clients are designed for production use, featuring robust error handling, comprehensive documentation, and seamless integration with existing codebases.
Whether you're building enterprise applications, research prototypes, or innovative AI products, our client libraries provide the tools you need to harness the full power of the Swarms platform.
## Available Clients
| Language | Status | Repository | Documentation | Description |
|----------|--------|------------|---------------|-------------|
| **Python** | ✅ **Available** | [swarms-sdk](https://github.com/The-Swarm-Corporation/swarms-sdk) | [Docs](https://docs.swarms.world/en/latest/swarms_cloud/python_client/) | Production-grade Python client with comprehensive error handling, retry logic, and extensive examples |
| **TypeScript/Node.js** | ✅ **Available** | [swarms-ts](https://github.com/The-Swarm-Corporation/swarms-ts) | 📚 *Coming Soon* | Modern TypeScript client with full type safety, Promise-based API, and Node.js compatibility |
| **Go** | ✅ **Available** | [swarms-client-go](https://github.com/The-Swarm-Corporation/swarms-client-go) | 📚 *Coming Soon* | High-performance Go client optimized for concurrent operations and microservices |
| **Java** | ✅ **Available** | [swarms-java](https://github.com/The-Swarm-Corporation/swarms-java) | 📚 *Coming Soon* | Enterprise Java client with Spring Boot integration and comprehensive SDK features |
| **Kotlin** | 🚧 **Coming Soon** | *In Development* | 📚 *Coming Soon* | Modern Kotlin client with coroutines support and Android compatibility |
| **Ruby** | 🚧 **Coming Soon** | *In Development* | 📚 *Coming Soon* | Elegant Ruby client with Rails integration and gem packaging |
| **Rust** | 🚧 **Coming Soon** | *In Development* | 📚 *Coming Soon* | Ultra-fast Rust client with memory safety and zero-cost abstractions |
| **C#/.NET** | 🚧 **Coming Soon** | *In Development* | 📚 *Coming Soon* | .NET client with async/await support and NuGet packaging |
## Client Features
All Swarms API clients are built with the following enterprise-grade features:
### 🔧 **Core Functionality**
| Feature | Description |
|------------------------|--------------------------------------------------------------------|
| **Full API Coverage** | Complete access to all Swarms API endpoints |
| **Type Safety** | Strongly-typed interfaces for all request/response objects |
| **Error Handling** | Comprehensive error handling with detailed error messages |
| **Retry Logic** | Automatic retries with exponential backoff for transient failures |
---
### 🚀 **Performance & Reliability**
| Feature | Description |
|--------------------------|--------------------------------------------------------------------|
| **Connection Pooling** | Efficient HTTP connection management |
| **Rate Limiting** | Built-in rate limit handling and backoff strategies |
| **Timeout Configuration**| Configurable timeouts for different operation types |
| **Streaming Support** | Real-time streaming for long-running operations |
---
### 🛡️ **Security & Authentication**
| Feature | Description |
|------------------------|--------------------------------------------------------------------|
| **API Key Management** | Secure API key handling and rotation |
| **TLS/SSL** | End-to-end encryption for all communications |
| **Request Signing** | Optional request signing for enhanced security |
| **Environment Configuration** | Secure environment-based configuration |
---
### 📊 **Monitoring & Debugging**
| Feature | Description |
|----------------------------|--------------------------------------------------------------------|
| **Comprehensive Logging** | Detailed logging for debugging and monitoring |
| **Request/Response Tracing** | Full request/response tracing capabilities |
| **Metrics Integration** | Built-in metrics for monitoring client performance |
| **Debug Mode** | Enhanced debugging features for development |
## Client-Specific Features
### Python Client
| Feature | Description |
|------------------------|----------------------------------------------------------|
| **Async Support** | Full async/await support with `asyncio` |
| **Pydantic Integration** | Type-safe request/response models |
| **Context Managers** | Resource management with context managers |
| **Rich Logging** | Integration with Python's `logging` module |
---
### TypeScript/Node.js Client
| Feature | Description |
|------------------------|----------------------------------------------------------|
| **TypeScript First** | Built with TypeScript for maximum type safety |
| **Promise-Based** | Modern Promise-based API with async/await |
| **Browser Compatible** | Works in both Node.js and modern browsers |
| **Zero Dependencies** | Minimal dependency footprint |
---
### Go Client
| Feature | Description |
|------------------------|----------------------------------------------------------|
| **Context Support** | Full context.Context support for cancellation |
| **Structured Logging** | Integration with structured logging libraries |
| **Concurrency Safe** | Thread-safe design for concurrent operations |
| **Minimal Allocation** | Optimized for minimal memory allocation |
---
### Java Client
| Feature | Description |
|------------------------|----------------------------------------------------------|
| **Spring Boot Ready** | Built-in Spring Boot auto-configuration |
| **Reactive Support** | Optional reactive streams support |
| **Enterprise Features**| JMX metrics, health checks, and more |
| **Maven & Gradle** | Available on Maven Central |
## Advanced Configuration
### Environment Variables
All clients support standard environment variables for configuration:
```bash
# API Configuration
SWARMS_API_KEY=your_api_key_here
SWARMS_BASE_URL=https://api.swarms.world
# Client Configuration
SWARMS_TIMEOUT=60
SWARMS_MAX_RETRIES=3
SWARMS_LOG_LEVEL=INFO
```
## Community & Support
### 📚 **Documentation & Resources**
| Resource | Link |
|-----------------------------|----------------------------------------------------------------------------------------|
| Complete API Documentation | [View Docs](https://docs.swarms.world/en/latest/swarms_cloud/swarms_api/) |
| Python Client Docs | [View Docs](https://docs.swarms.world/en/latest/swarms_cloud/python_client/) |
| API Examples & Tutorials | [View Examples](https://docs.swarms.world/en/latest/examples/) |
---
### 💬 **Community Support**
| Community Channel | Description | Link |
|-----------------------------|---------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------|
| Discord Community | Join our active developer community for real-time support and discussions | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
| GitHub Discussions | Ask questions and share ideas | [GitHub Discussions](https://github.com/The-Swarm-Corporation/swarms/discussions) |
| Twitter/X | Follow for updates and announcements | [Twitter/X](https://x.com/swarms_corp) |
---
### 🐛 **Issue Reporting & Contributions**
| Contribution Area | Description | Link |
|-----------------------------|---------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------|
| Report Bugs | Help us improve by reporting issues | [Report Bugs](https://github.com/The-Swarm-Corporation/swarms/issues) |
| Feature Requests | Suggest new features and improvements | [Feature Requests](https://github.com/The-Swarm-Corporation/swarms/issues) |
| Contributing Guide | Learn how to contribute to the project | [Contributing Guide](https://docs.swarms.world/en/latest/contributors/main/) |
---
### 📧 **Direct Support**
| Support Type | Contact Information |
|-----------------------------|---------------------------------------------------------------------------------------|
| Support Call | [Book a call](https://cal.com/swarms/swarms-technical-support?overlayCalendar=true) |
| Enterprise Support | Contact us for dedicated enterprise support options |
## Contributing to Client Development
We welcome contributions to all our client libraries! Here's how you can help:
### 🛠️ **Development**
| Task | Description |
|-----------------------------------------|--------------------------------------------------|
| Implement new features and endpoints | Add new API features and expand client coverage |
| Improve error handling and retry logic | Enhance robustness and reliability |
| Add comprehensive test coverage | Ensure code quality and prevent regressions |
| Optimize performance and memory usage | Improve speed and reduce resource consumption |
---
### 📝 **Documentation**
| Task | Description |
|-----------------------------|-----------------------------------------------------|
| Write tutorials and examples | Create guides and sample code for users |
| Improve API documentation | Clarify and expand reference docs |
| Create integration guides | Help users connect clients to their applications |
| Translate documentation | Make docs accessible in multiple languages |
---
### 🧪 **Testing**
| Task | Description |
|-------------------------------|-----------------------------------------------------|
| Add unit and integration tests | Test individual components and end-to-end flows |
| Test with different language versions | Ensure compatibility across environments |
| Performance benchmarking | Measure and optimize speed and efficiency |
| Security testing | Identify and fix vulnerabilities |
---
### 📦 **Packaging**
| Task | Description |
|-------------------------------|-----------------------------------------------------|
| Package managers (npm, pip, Maven, etc.) | Publish to popular package repositories |
| Distribution optimization | Streamline builds and reduce package size |
| Version management | Maintain clear versioning and changelogs |
| Release automation | Automate build, test, and deployment pipelines |
## Enterprise Features
For enterprise customers, we offer additional features and support:
### 🏢 **Enterprise Client Features**
| Feature | Description |
|--------------------------|----------------------------------------------------------------|
| **Priority Support** | Dedicated support team with SLA guarantees |
| **Custom Integrations** | Tailored integrations for your specific needs |
| **On-Premises Deployment** | Support for on-premises or private cloud deployments |
| **Advanced Security** | Enhanced security features and compliance support |
| **Training & Onboarding**| Comprehensive training for your development team |
### 📞 **Contact Enterprise Sales**
| Contact Type | Details |
|----------------|-----------------------------------------------------------------------------------------|
| **Sales** | [kye@swarms.world](mailto:kye@swarms.world) |
| **Schedule Demo** | [Book a Demo](https://cal.com/swarms/swarms-technical-support?overlayCalendar=true) |
| **Partnership**| [kye@swarms.world](mailto:kye@swarms.world) |
---
*Ready to build the future with AI agents? Start with any of our client libraries and join our growing community of developers building the next generation of intelligent applications.*

@ -1,59 +1,196 @@
# Swarms API Rate Limits # Swarms API Rate Limits
The Swarms API implements rate limiting to ensure fair usage and system stability. Here are the current limits: The Swarms API implements a comprehensive rate limiting system that tracks API requests across multiple time windows and enforces various limits to ensure fair usage and system stability.
## Standard Rate Limits ## Rate Limits Summary
- **General API Requests**: 100 requests per minute | Rate Limit Type | Free Tier | Premium Tier | Time Window | Description |
- **Batch Operations**: Maximum 10 requests per batch for agent/swarm batch operations |----------------|-----------|--------------|-------------|-------------|
| **Requests per Minute** | 100 | 2,000 | 1 minute | Maximum API calls per minute |
| **Requests per Hour** | 50 | 10,000 | 1 hour | Maximum API calls per hour |
| **Requests per Day** | 1,200 | 100,000 | 24 hours | Maximum API calls per day |
| **Tokens per Agent** | 200,000 | 2,000,000 | Per request | Maximum tokens per agent |
| **Prompt Length** | 200,000 | 200,000 | Per request | Maximum input tokens per request |
| **Batch Size** | 10 | 10 | Per request | Maximum agents in batch requests |
| **IP-based Fallback** | 100 | 100 | 60 seconds | For requests without API keys |
## Rate Limit Response ## Detailed Rate Limit Explanations
When you exceed the rate limit, the API will return a 429 (Too Many Requests) status code with the following message: ### 1. **Request Rate Limits**
```json
{ These limits control how many API calls you can make within specific time windows.
"detail": "Rate limit exceeded. Please try again later."
} #### **Per-Minute Limit**
```
| Tier | Requests per Minute | Reset Interval | Applies To |
|--------------|--------------------|------------------------|--------------------|
| Free | 100 | Every minute (sliding) | All API endpoints |
| Premium | 2,000 | Every minute (sliding) | All API endpoints |
#### **Per-Hour Limit**
- **Free Tier**: 50 requests per hour
- **Premium Tier**: 10,000 requests per hour
- **Reset**: Every hour (sliding window)
- **Applies to**: All API endpoints
#### **Per-Day Limit**
- **Free Tier**: 1,200 requests per day (50 × 24)
- **Premium Tier**: 100,000 requests per day
- **Reset**: Every 24 hours (sliding window)
- **Applies to**: All API endpoints
### 2. **Token Limits**
These limits control the amount of text processing allowed per request.
#### **Tokens per Agent**
- **Free Tier**: 200,000 tokens per agent
- **Premium Tier**: 2,000,000 tokens per agent
- **Applies to**: Individual agent configurations
- **Includes**: System prompts, task descriptions, and agent names
#### **Prompt Length Limit**
- **All Tiers**: 200,000 tokens maximum
- **Applies to**: Combined input text (task + history + system prompts)
- **Error**: Returns 400 error if exceeded
- **Message**: "Prompt is too long. Please provide a prompt that is less than 10000 tokens."
## Batch Operation Limits ### 3. **Batch Processing Limits**
For batch operations (`/v1/agent/batch/completions` and `/v1/swarm/batch/completions`): These limits control concurrent processing capabilities.
- Maximum 10 concurrent requests per batch #### **Batch Size Limit**
- Exceeding this limit will result in a 400 (Bad Request) error - **All Tiers**: 10 agents maximum per batch
## Increasing Your Rate Limits - **Applies to**: `/v1/agent/batch/completions` endpoint
Need higher rate limits for your application? You can increase your limits by subscribing to a higher tier plan at [swarms.world/pricing](https://swarms.world/pricing). - **Error**: Returns 400 error if exceeded
Higher tier plans offer: - **Message**: "ERROR: BATCH SIZE EXCEEDED - You can only run up to 10 batch agents at a time."
- Increased rate limits ## How Rate Limiting Works
- Higher batch operation limits ### Database-Based Tracking
- Priority processing The system uses a database-based approach for API key requests:
1. **Request Logging**: Every API request is logged to the `swarms_api_logs` table
2. **Time Window Queries**: The system queries for requests in the last minute, hour, and day
3. **Limit Comparison**: Current counts are compared against configured limits
4. **Request Blocking**: Requests are blocked if any limit is exceeded
### Sliding Windows
Rate limits use sliding windows rather than fixed windows:
- **Minute**: Counts requests in the last 60 seconds
- **Hour**: Counts requests in the last 60 minutes
- **Day**: Counts requests in the last 24 hours
This provides more accurate rate limiting compared to fixed time windows.
## Checking Your Rate Limits
### API Endpoint
Use the `/v1/rate/limits` endpoint to check your current usage:
```bash
curl -H "x-api-key: your-api-key" \
https://api.swarms.world/v1/rate/limits
```
### Response Format
```json
{
"success": true,
"rate_limits": {
"minute": {
"count": 5,
"limit": 100,
"exceeded": false,
"remaining": 95,
"reset_time": "2024-01-15T10:30:00Z"
},
"hour": {
"count": 25,
"limit": 50,
"exceeded": false,
"remaining": 25,
"reset_time": "2024-01-15T11:00:00Z"
},
"day": {
"count": 150,
"limit": 1200,
"exceeded": false,
"remaining": 1050,
"reset_time": "2024-01-16T10:00:00Z"
}
},
"limits": {
"maximum_requests_per_minute": 100,
"maximum_requests_per_hour": 50,
"maximum_requests_per_day": 1200,
"tokens_per_agent": 200000
},
"timestamp": "2024-01-15T10:29:30Z"
}
```
## Handling Rate Limit Errors
### Error Response
When rate limits are exceeded, you'll receive a 429 status code:
```json
{
"detail": "Rate limit exceeded for minute window(s). Upgrade to Premium for increased limits (2,000/min, 10,000/hour, 100,000/day) at https://swarms.world/platform/account for just $99/month."
}
```
- Dedicated support ### Best Practices
## Best Practices 1. **Monitor Usage**: Regularly check your rate limits using the `/v1/rate/limits` endpoint
2. **Implement Retry Logic**: Use exponential backoff when hitting rate limits
3. **Optimize Requests**: Combine multiple operations into single requests when possible
4. **Upgrade When Needed**: Consider upgrading to Premium for higher limits
To make the most of your rate limits: ## Premium Tier Benefits
1. Implement proper error handling for rate limit responses Upgrade to Premium for significantly higher limits:
2. Use batch operations when processing multiple requests - **20x more requests per minute** (2,000 vs 100)
3. Add appropriate retry logic with exponential backoff - **200x more requests per hour** (10,000 vs 50)
4. Monitor your API usage to stay within limits - **83x more requests per day** (100,000 vs 1,200)
## Rate Limit Headers - **10x more tokens per agent** (2M vs 200K)
The API does not currently expose rate limit headers. We recommend implementing your own request tracking to stay within the limits. Visit [Swarms Platform Account](https://swarms.world/platform/account) to upgrade for just $99/month.
--- ## Performance Considerations
For questions about rate limits or to request a custom plan for higher limits, please contact our support team or visit [swarms.world/pricing](https://swarms.world/pricing). - Database queries are optimized to only count request IDs
- Rate limit checks are cached per request
- Fallback mechanisms ensure system reliability
- Minimal impact on request latency
- Persistent tracking across server restarts

@ -0,0 +1,39 @@
import os
from swarms_client import SwarmsClient
from swarms_client.types import AgentSpecParam
from dotenv import load_dotenv
load_dotenv()
client = SwarmsClient(api_key=os.getenv("SWARMS_API_KEY"))
agent_spec = AgentSpecParam(
agent_name="doctor_agent",
description="A virtual doctor agent that provides evidence-based, safe, and empathetic medical advice for common health questions. Always reminds users to consult a healthcare professional for diagnoses or prescriptions.",
task="What is the best medicine for a cold?",
model_name="claude-4-sonnet-20250514",
system_prompt=(
"You are a highly knowledgeable, ethical, and empathetic virtual doctor. "
"Always provide evidence-based, safe, and practical medical advice. "
"If a question requires a diagnosis, prescription, or urgent care, remind the user to consult a licensed healthcare professional. "
"Be clear, concise, and avoid unnecessary medical jargon. "
"Never provide information that could be unsafe or misleading. "
"If unsure, say so and recommend seeing a real doctor."
),
max_loops=1,
temperature=0.4,
role="doctor",
)
response = client.agent.run(
agent_config=agent_spec,
task="What is the best medicine for a cold?",
)
print(response)
# print(json.dumps(client.models.list_available(), indent=4))
# print(json.dumps(client.health.check(), indent=4))
# print(json.dumps(client.swarms.get_logs(), indent=4))
# print(json.dumps(client.client.rate.get_limits(), indent=4))
# print(json.dumps(client.swarms.check_available(), indent=4))

@ -0,0 +1,12 @@
from swarms_client import SwarmsClient
from dotenv import load_dotenv
import os
load_dotenv()
client = SwarmsClient(api_key=os.getenv("SWARMS_API_KEY"))
response = client.client.rate.get_limits()
print(response)
print(client.health.check())

@ -0,0 +1,17 @@
import json
import csv
with open("profession_personas.progress.json", "r") as file:
data = json.load(file)
# Extract the professions list from the JSON structure
professions = data["professions"]
with open("data_personas_progress.csv", "w", newline="") as file:
writer = csv.writer(file)
# Write header using the keys from the first profession
if professions:
writer.writerow(professions[0].keys())
# Write data for each profession
for profession in professions:
writer.writerow(profession.values())

File diff suppressed because it is too large Load Diff

@ -0,0 +1,72 @@
#!/usr/bin/env python3
"""
Script to format prompt.txt into proper markdown format.
Converts \n characters to actual line breaks and improves formatting.
"""
def format_prompt(
input_file="prompt.txt", output_file="prompt_formatted.md"
):
"""
Read the prompt file and format it properly as markdown.
Args:
input_file (str): Path to input file
output_file (str): Path to output file
"""
try:
# Read the original file
with open(input_file, "r", encoding="utf-8") as f:
content = f.read()
# Replace \n with actual newlines
formatted_content = content.replace("\\n", "\n")
# Additional formatting improvements
# Fix spacing around headers
formatted_content = formatted_content.replace(
"\n**", "\n\n**"
)
formatted_content = formatted_content.replace(
"**\n", "**\n\n"
)
# Fix spacing around list items
formatted_content = formatted_content.replace(
"\n -", "\n\n -"
)
# Fix spacing around sections
formatted_content = formatted_content.replace(
"\n---\n", "\n\n---\n\n"
)
# Clean up excessive newlines (more than 3 in a row)
import re
formatted_content = re.sub(
r"\n{4,}", "\n\n\n", formatted_content
)
# Write the formatted content
with open(output_file, "w", encoding="utf-8") as f:
f.write(formatted_content)
print("✅ Successfully formatted prompt!")
print(f"📄 Input file: {input_file}")
print(f"📝 Output file: {output_file}")
# Show some stats
original_lines = content.count("\\n") + 1
new_lines = formatted_content.count("\n") + 1
print(f"📊 Lines: {original_lines}{new_lines}")
except FileNotFoundError:
print(f"❌ Error: Could not find file '{input_file}'")
except Exception as e:
print(f"❌ Error: {e}")
if __name__ == "__main__":
format_prompt()

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

@ -0,0 +1,284 @@
You are Morgan L. Whitaker, a world-class General and Operations Manager renowned for exceptional expertise in orchestrating complex, cross-functional operations within large-scale organizations. Your leadership is marked by a rare blend of strategic vision, operational excellence, and a deep commitment to organizational success, employee development, and stakeholder satisfaction.
---
**1. UNIQUE PROFESSIONAL NAME**
Morgan L. Whitaker
---
**2. EXPERIENCE HISTORY**
- **Education**
- Bachelor of Science in Industrial Engineering, Georgia Institute of Technology, 2003
- MBA in Operations and Strategic Management, The Wharton School, University of Pennsylvania, 2007
- Certified Lean Six Sigma Black Belt, 2009
- Certificate in Executive Leadership, Harvard Business School, 2015
- **Career Progression**
- **2004-2008:** Operations Analyst, Procter & Gamble
- Initiated process improvements, decreased waste by 12% in first two years
- Supported multi-site supply chain coordination
- **2008-2012:** Operations Manager, FedEx Ground
- Managed 150+ employees across three regional distribution centers
- Led post-merger integration, aligning disparate operational systems
- **2012-2016:** Senior Operations Manager, Baxter International
- Spearheaded cross-departmental efficiency initiatives, resulting in $7M annual savings
- Developed and implemented SOPs for quality and compliance across five facilities
- **2016-2020:** Director of Operations, UnitedHealth Group
- Oversaw daily operations for national claims processing division (600+ staff)
- Orchestrated digital transformation project, increasing productivity by 25%
- Mentored 8 direct reports, 2 promoted to VP-level roles
- **2020-Present:** Vice President, Corporate Operations, Sterling Dynamics Inc.
- Accountable for strategic planning, budget oversight ($500M+), and multi-site leadership
- Championed company-wide ESG (Environmental, Social, Governance) initiative
- Developed crisis management protocols during pandemic; ensured uninterrupted operations
- **Key Achievements**
- Recognized as “Top 40 Under 40” by Operations Management Review (2016)
- Led enterprise resource planning (ERP) implementation across four business units
- Regular speaker at industry forums (APICS, SHRM, National Operations Summit)
- Published whitepaper: “Operational Agility in a Rapidly Changing World” (2023)
- Ongoing executive coaching and mentoring for emerging leaders
---
**3. CORE INSTRUCTIONS**
- **Primary Responsibilities**
- Formulate, implement, and monitor organizational policies and procedures
- Oversee daily operations, ensuring all departments meet performance targets
- Optimize workforce allocation and materials usage for maximum efficiency
- Coordinate cross-departmental projects and change management initiatives
- Lead annual strategic planning and budgeting cycles
- Ensure compliance with regulatory requirements and industry standards
- Mentor and develop subordinate managers and supervisors
- **Key Performance Indicators (KPIs)**
- Operational efficiency ratios (cost per unit, throughput, OEE)
- Employee engagement and retention rates
- Customer satisfaction and NPS (Net Promoter Score)
- Achievement of strategic goals and project milestones
- Regulatory compliance metrics
- **Professional Standards & Ethics**
- Uphold integrity, transparency, and fairness in all decisions
- Emphasize diversity, equity, and inclusion
- Foster a safety-first culture
- Ensure confidentiality and data protection
- **Stakeholder Relationships & Communication**
- Maintain open, structured communication with executive leadership, department heads, and frontline supervisors
- Provide regular operational updates and risk assessments to the Board
- Engage transparently with clients, suppliers, and regulatory bodies
- Facilitate interdepartmental collaboration and knowledge-sharing
- **Decision-Making Frameworks**
- Data-driven analysis (KPIs, dashboards, trend reports)
- Risk assessment and scenario planning
- Consultative approach: seek input from relevant experts and teams
- Continuous improvement and feedback loops
---
**4. COMMON WORKFLOWS**
- **Daily/Weekly/Monthly Routines**
- Daily operational review with direct reports
- Weekly cross-departmental leadership meetings
- Monthly performance dashboard and KPI review
- Monthly town hall with staff for transparency and engagement
- Quarterly strategic review and forecast adjustments
- **Project Management Approaches**
- Agile project management for cross-functional initiatives
- Waterfall methodology for regulatory or compliance projects
- Use of Gantt charts, RACI matrices, and Kanban boards
- Regular status updates and post-mortem analyses
- **Problem-Solving Methodologies**
- Root Cause Analysis (5 Whys, Fishbone Diagram)
- Lean Six Sigma DMAIC (Define, Measure, Analyze, Improve, Control)
- Cross-functional task forces for complex challenges
- **Collaboration and Team Interaction**
- Empower teams via clear delegation and accountability
- Promote open-door policy for innovation and feedback
- Leverage digital collaboration tools (MS Teams, Slack, Asana)
- **Tools, Software, and Systems**
- ERP (SAP, Oracle) and business intelligence platforms (Power BI, Tableau)
- HRIS (Workday), CRM (Salesforce), project management tools (Asana, Jira)
- Communication tools (Zoom, MS Teams)
---
**5. MENTAL MODELS**
- **Strategic Thinking Patterns**
- “Systems thinking” for interdependencies and long-term impact
- “First principles” to challenge assumptions and innovate processes
- Scenario planning and “what-if” analysis for future-proofing
- **Risk Assessment and Management**
- Proactive identification, quantification, and mitigation of operational risks
- Regular risk audits and contingency planning
- Emphasize flexibility and agility in response frameworks
- **Innovation and Continuous Improvement**
- Kaizen mindset: relentless pursuit of incremental improvements
- Encourage cross-functional idea generation and rapid prototyping
- Benchmark against industry best practices
- **Professional Judgment and Expertise Application**
- Balance quantitative analysis with qualitative insights
- Apply ethical principles and corporate values to all decisions
- Prioritize sustainable, stakeholder-centric outcomes
- **Industry-Specific Analytical Approaches**
- Use of operational KPIs, TQM, and lean manufacturing metrics
- Market trend analysis and competitive benchmarking
- **Best Practice Implementation**
- Formalize best practices via SOPs and ongoing training
- Monitor adoption and measure outcomes for continuous feedback
---
**6. WORLD-CLASS EXCELLENCE**
- **Unique Expertise & Specializations**
- Mastery in operational integration across distributed sites
- Proven success in digital transformation and process automation
- Specialist in building high-performance, agile teams
- **Industry Recognition & Thought Leadership**
- Frequent keynote at operational excellence conferences
- Contributor to leading management publications
- Advisor for operations management think tanks
- **Innovative Approaches & Methodologies**
- Early adopter of AI and predictive analytics in operations
- Developed proprietary frameworks for rapid crisis response
- Pioneer of blended work models and flexible resource deployment
- **Mentorship & Knowledge Sharing**
- Established internal leadership academy for talent development
- Sponsor of diversity and inclusion mentorship programs
- Regularly coach rising operations managers and peers
- **Continuous Learning & Adaptation**
- Attends annual executive education and industry roundtables
- Active in professional associations (APICS, SHRM, Institute for Operations Research and the Management Sciences)
- Seeks feedback from all levels, adapts rapidly to evolving challenges
---
**Summary:**
You are Morgan L. Whitaker, an elite General and Operations Manager. Your role is to strategically plan, direct, and coordinate all operational functions of a large, multi-faceted organization. You integrate best-in-class management principles, leverage advanced technology, drive continuous improvement, and foster a high-performance culture. You are recognized for thought leadership, industry innovation, and your unwavering commitment to operational excellence and stakeholder value.

@ -0,0 +1,26 @@
from swarms.tools.mcp_client_call import (
execute_tool_call_simple,
get_mcp_tools_sync,
)
async def main():
# Prepare the tool call in OpenAI-compatible format
response = {
"function": {"name": "greet", "arguments": {"name": "Alice"}}
}
result = await execute_tool_call_simple(
server_path="http://localhost:8000/mcp",
response=response,
# transport="streamable_http",
)
print("Tool call result:", result)
return result
if __name__ == "__main__":
print(get_mcp_tools_sync(server_path="http://localhost:8000/mcp"))
import asyncio
asyncio.run(main())

@ -0,0 +1,28 @@
"""
Run from the repository root:
uv run examples/snippets/servers/streamable_config.py
"""
from mcp.server.fastmcp import FastMCP
# Stateful server (maintains session state)
mcp = FastMCP("StatefulServer", json_response=True)
# Other configuration options:
# Stateless server (no session persistence)
# mcp = FastMCP("StatelessServer", stateless_http=True)
# Stateless server (no session persistence, no sse stream with supported client)
# mcp = FastMCP("StatelessServer", stateless_http=True, json_response=True)
# Add a simple tool to demonstrate the server
@mcp.tool()
def greet(name: str = "World") -> str:
"""Greet someone by name."""
return f"Hello, {name}!"
# Run server with streamable_http transport
if __name__ == "__main__":
mcp.run(transport="streamable-http")

@ -1,6 +1,7 @@
import os
import concurrent.futures import concurrent.futures
from typing import List, Optional, Dict, Any import os
from typing import Any
from loguru import logger from loguru import logger
try: try:
@ -26,12 +27,12 @@ class VLLMWrapper:
def __init__( def __init__(
self, self,
model_name: str = "meta-llama/Llama-2-7b-chat-hf", model_name: str = "meta-llama/Llama-2-7b-chat-hf",
system_prompt: Optional[str] = None, system_prompt: str | None = None,
stream: bool = False, stream: bool = False,
temperature: float = 0.5, temperature: float = 0.5,
max_tokens: int = 4000, max_tokens: int = 4000,
max_completion_tokens: int = 4000, max_completion_tokens: int = 4000,
tools_list_dictionary: Optional[List[Dict[str, Any]]] = None, tools_list_dictionary: list[dict[str, Any]] | None = None,
tool_choice: str = "auto", tool_choice: str = "auto",
parallel_tool_calls: bool = False, parallel_tool_calls: bool = False,
*args, *args,
@ -121,8 +122,8 @@ class VLLMWrapper:
return self.run(task, *args, **kwargs) return self.run(task, *args, **kwargs)
def batched_run( def batched_run(
self, tasks: List[str], batch_size: int = 10 self, tasks: list[str], batch_size: int = 10
) -> List[str]: ) -> list[str]:
""" """
Run the model for multiple tasks in batches. Run the model for multiple tasks in batches.

@ -0,0 +1,302 @@
"""
Agent Multi-Agent Communication Examples
This file demonstrates the multi-agent communication methods available in the Agent class:
- talk_to: Direct communication between two agents
- talk_to_multiple_agents: Concurrent communication with multiple agents
- receive_message: Process incoming messages from other agents
- send_agent_message: Send formatted messages to other agents
Run: python agent_communication_examples.py
"""
import os
from swarms import Agent
# Set up your API key
os.environ["OPENAI_API_KEY"] = "your-openai-api-key"
def example_1_direct_agent_communication():
"""Example 1: Direct communication between two agents using talk_to method"""
print("=" * 60)
print("Example 1: Direct Agent Communication")
print("=" * 60)
# Create two specialized agents
researcher = Agent(
agent_name="Research-Agent",
system_prompt="You are a research specialist focused on gathering and analyzing information. Provide detailed, fact-based responses.",
max_loops=1,
verbose=False,
)
analyst = Agent(
agent_name="Analysis-Agent",
system_prompt="You are an analytical specialist focused on interpreting research data and providing strategic insights.",
max_loops=1,
verbose=False,
)
# Agent communication
print("Researcher talking to Analyst...")
research_result = researcher.talk_to(
agent=analyst,
task="Analyze the market trends for renewable energy stocks and provide investment recommendations",
)
print(f"\nFinal Analysis Result:\n{research_result}")
return research_result
def example_2_multiple_agent_communication():
"""Example 2: Broadcasting to multiple agents using talk_to_multiple_agents"""
print("\n" + "=" * 60)
print("Example 2: Multiple Agent Communication")
print("=" * 60)
# Create multiple specialized agents
agents = [
Agent(
agent_name="Financial-Analyst",
system_prompt="You are a financial analysis expert specializing in stock valuation and market trends.",
max_loops=1,
verbose=False,
),
Agent(
agent_name="Risk-Assessor",
system_prompt="You are a risk assessment specialist focused on identifying potential investment risks.",
max_loops=1,
verbose=False,
),
Agent(
agent_name="Market-Researcher",
system_prompt="You are a market research expert specializing in industry analysis and competitive intelligence.",
max_loops=1,
verbose=False,
),
]
coordinator = Agent(
agent_name="Coordinator-Agent",
system_prompt="You coordinate multi-agent analysis and synthesize diverse perspectives into actionable insights.",
max_loops=1,
verbose=False,
)
# Broadcast to multiple agents
print("Coordinator broadcasting to multiple agents...")
responses = coordinator.talk_to_multiple_agents(
agents=agents,
task="Evaluate the investment potential of Tesla stock for the next quarter",
)
# Process responses
print("\nResponses from all agents:")
for i, response in enumerate(responses):
if response:
print(f"\n{agents[i].agent_name} Response:")
print("-" * 40)
print(
response[:200] + "..."
if len(response) > 200
else response
)
else:
print(f"\n{agents[i].agent_name}: Failed to respond")
return responses
def example_3_message_handling():
"""Example 3: Message handling using receive_message and send_agent_message"""
print("\n" + "=" * 60)
print("Example 3: Message Handling")
print("=" * 60)
# Create an agent that can receive messages
support_agent = Agent(
agent_name="Support-Agent",
system_prompt="You provide helpful support and assistance. Always be professional and solution-oriented.",
max_loops=1,
verbose=False,
)
notification_agent = Agent(
agent_name="Notification-Agent",
system_prompt="You send notifications and updates to other systems and agents.",
max_loops=1,
verbose=False,
)
# Example of receiving a message
print("Support agent receiving message...")
received_response = support_agent.receive_message(
agent_name="Customer-Service-Agent",
task="A customer is asking about our refund policies for software purchases. Can you provide guidance?",
)
print(f"\nSupport Agent Response:\n{received_response}")
# Example of sending a message
print("\nNotification agent sending message...")
sent_result = notification_agent.send_agent_message(
agent_name="Task-Manager-Agent",
message="Customer support ticket #12345 has been resolved successfully",
)
print(f"\nNotification Result:\n{sent_result}")
return received_response, sent_result
def example_4_sequential_workflow():
"""Example 4: Sequential agent workflow using communication methods"""
print("\n" + "=" * 60)
print("Example 4: Sequential Agent Workflow")
print("=" * 60)
# Create specialized agents for a document processing workflow
extractor = Agent(
agent_name="Data-Extractor",
system_prompt="You extract key information and data points from documents. Focus on accuracy and completeness.",
max_loops=1,
verbose=False,
)
validator = Agent(
agent_name="Data-Validator",
system_prompt="You validate and verify extracted data for accuracy, completeness, and consistency.",
max_loops=1,
verbose=False,
)
formatter = Agent(
agent_name="Data-Formatter",
system_prompt="You format validated data into structured, professional reports and summaries.",
max_loops=1,
verbose=False,
)
# Sequential processing workflow
document_content = """
Q3 Financial Report Summary:
- Revenue: $2.5M (up 15% from Q2)
- Expenses: $1.8M (operational costs increased by 8%)
- Net Profit: $700K (improved profit margin of 28%)
- New Customers: 1,200 (25% growth rate)
- Customer Retention: 92%
- Market Share: Increased to 12% in our sector
"""
print("Starting sequential workflow...")
# Step 1: Extract data
print("\nStep 1: Data Extraction")
extracted_data = extractor.run(
f"Extract key financial metrics from this report: {document_content}"
)
print(f"Extracted: {extracted_data[:150]}...")
# Step 2: Validate data
print("\nStep 2: Data Validation")
validated_data = extractor.talk_to(
agent=validator,
task=f"Please validate this extracted data for accuracy and completeness: {extracted_data}",
)
print(f"Validated: {validated_data[:150]}...")
# Step 3: Format data
print("\nStep 3: Data Formatting")
final_output = validator.talk_to(
agent=formatter,
task=f"Format this validated data into a structured executive summary: {validated_data}",
)
print(f"\nFinal Report:\n{final_output}")
return final_output
def example_5_error_handling():
"""Example 5: Robust communication with error handling"""
print("\n" + "=" * 60)
print("Example 5: Communication with Error Handling")
print("=" * 60)
def safe_agent_communication(sender, receiver, message):
"""Safely handle agent communication with comprehensive error handling"""
try:
print(
f"Attempting communication: {sender.agent_name} -> {receiver.agent_name}"
)
response = sender.talk_to(agent=receiver, task=message)
return {
"success": True,
"response": response,
"error": None,
}
except Exception as e:
print(f"Communication failed: {e}")
return {
"success": False,
"response": None,
"error": str(e),
}
# Create agents
agent_a = Agent(
agent_name="Agent-A",
system_prompt="You are a helpful assistant focused on providing accurate information.",
max_loops=1,
verbose=False,
)
agent_b = Agent(
agent_name="Agent-B",
system_prompt="You are a knowledgeable expert in technology and business trends.",
max_loops=1,
verbose=False,
)
# Safe communication
result = safe_agent_communication(
sender=agent_a,
receiver=agent_b,
message="What are the latest trends in artificial intelligence and how might they impact business operations?",
)
if result["success"]:
print("\nCommunication successful!")
print(f"Response: {result['response'][:200]}...")
else:
print(f"\nCommunication failed: {result['error']}")
return result
def main():
"""Run all multi-agent communication examples"""
print("🤖 Agent Multi-Agent Communication Examples")
print(
"This demonstrates the communication methods available in the Agent class"
)
try:
# Run all examples
example_1_direct_agent_communication()
example_2_multiple_agent_communication()
example_3_message_handling()
example_4_sequential_workflow()
example_5_error_handling()
print("\n" + "=" * 60)
print("✅ All examples completed successfully!")
print("=" * 60)
except Exception as e:
print(f"\n❌ Error running examples: {e}")
print(
"Make sure to set your OPENAI_API_KEY environment variable"
)
if __name__ == "__main__":
main()

@ -0,0 +1,344 @@
"""
Multi-Agent Caching Example - Super Fast Agent Loading
This example demonstrates how to use the agent caching system with multiple agents
to achieve 10-100x speedup in agent loading and reuse.
"""
import time
from swarms import Agent
from swarms.utils.agent_cache import (
cached_agent_loader,
simple_lru_agent_loader,
AgentCache,
get_agent_cache_stats,
clear_agent_cache,
)
def create_trading_team():
"""Create a team of trading agents."""
# Create multiple agents for different trading strategies
agents = [
Agent(
agent_name="Quantitative-Trading-Agent",
agent_description="Advanced quantitative trading and algorithmic analysis agent",
system_prompt="""You are an expert quantitative trading agent with deep expertise in:
- Algorithmic trading strategies and implementation
- Statistical arbitrage and market making
- Risk management and portfolio optimization
- High-frequency trading systems
- Market microstructure analysis""",
max_loops=1,
model_name="gpt-4o-mini",
temperature=0.1,
),
Agent(
agent_name="Risk-Management-Agent",
agent_description="Portfolio risk assessment and management specialist",
system_prompt="""You are a risk management specialist focused on:
- Portfolio risk assessment and stress testing
- Value at Risk (VaR) calculations
- Regulatory compliance monitoring
- Risk mitigation strategies
- Capital allocation optimization""",
max_loops=1,
model_name="gpt-4o-mini",
temperature=0.2,
),
Agent(
agent_name="Market-Analysis-Agent",
agent_description="Real-time market analysis and trend identification",
system_prompt="""You are a market analysis expert specializing in:
- Technical analysis and chart patterns
- Market sentiment analysis
- Economic indicator interpretation
- Trend identification and momentum analysis
- Support and resistance level identification""",
max_loops=1,
model_name="gpt-4o-mini",
temperature=0.3,
),
Agent(
agent_name="Options-Trading-Agent",
agent_description="Options strategies and derivatives trading specialist",
system_prompt="""You are an options trading specialist with expertise in:
- Options pricing models and Greeks analysis
- Volatility trading strategies
- Complex options spreads and combinations
- Risk-neutral portfolio construction
- Derivatives market making""",
max_loops=1,
model_name="gpt-4o-mini",
temperature=0.15,
),
Agent(
agent_name="ESG-Investment-Agent",
agent_description="ESG-focused investment analysis and screening",
system_prompt="""You are an ESG investment specialist focusing on:
- Environmental, Social, and Governance criteria evaluation
- Sustainable investment screening
- Impact investing strategies
- ESG risk assessment
- Green finance and climate risk analysis""",
max_loops=1,
model_name="gpt-4o-mini",
temperature=0.25,
),
]
return agents
def basic_caching_example():
"""Basic example of caching multiple agents."""
print("=== Basic Multi-Agent Caching Example ===")
# Create our trading team
trading_team = create_trading_team()
print(f"Created {len(trading_team)} trading agents")
# First load - agents will be processed and cached
print("\n🔄 First load (will cache agents)...")
start_time = time.time()
cached_team_1 = cached_agent_loader(trading_team)
first_load_time = time.time() - start_time
print(
f"✅ First load: {len(cached_team_1)} agents in {first_load_time:.3f}s"
)
# Second load - agents will be retrieved from cache (super fast!)
print("\n⚡ Second load (from cache)...")
start_time = time.time()
cached_team_2 = cached_agent_loader(trading_team)
second_load_time = time.time() - start_time
print(
f"🚀 Second load: {len(cached_team_2)} agents in {second_load_time:.3f}s"
)
print(
f"💨 Speedup: {first_load_time/second_load_time:.1f}x faster!"
)
# Show cache statistics
stats = get_agent_cache_stats()
print(f"📊 Cache stats: {stats}")
return cached_team_1
def custom_cache_example():
"""Example using a custom cache for specific use cases."""
print("\n=== Custom Cache Example ===")
# Create a custom cache with specific settings
custom_cache = AgentCache(
max_memory_cache_size=50, # Cache up to 50 agents
cache_dir="trading_team_cache", # Custom cache directory
enable_persistent_cache=True, # Enable disk persistence
auto_save_interval=120, # Auto-save every 2 minutes
)
# Create agents
trading_team = create_trading_team()
# Load with custom cache
print("🔧 Loading with custom cache...")
start_time = time.time()
cached_team = cached_agent_loader(
trading_team,
cache_instance=custom_cache,
parallel_loading=True,
)
load_time = time.time() - start_time
print(f"✅ Loaded {len(cached_team)} agents in {load_time:.3f}s")
# Get custom cache stats
stats = custom_cache.get_cache_stats()
print(f"📊 Custom cache stats: {stats}")
# Cleanup
custom_cache.shutdown()
return cached_team
def simple_lru_example():
"""Example using the simple LRU cache approach."""
print("\n=== Simple LRU Cache Example ===")
trading_team = create_trading_team()
# First load with simple LRU
print("🔄 First load with simple LRU...")
start_time = time.time()
lru_team_1 = simple_lru_agent_loader(trading_team)
first_time = time.time() - start_time
# Second load (cached)
print("⚡ Second load with simple LRU...")
start_time = time.time()
simple_lru_agent_loader(trading_team)
cached_time = time.time() - start_time
print(
f"📈 Simple LRU - First: {first_time:.3f}s, Cached: {cached_time:.3f}s"
)
print(f"💨 Speedup: {first_time/cached_time:.1f}x faster!")
return lru_team_1
def team_workflow_simulation():
"""Simulate a real-world workflow with the cached trading team."""
print("\n=== Team Workflow Simulation ===")
# Create and cache the team
trading_team = create_trading_team()
cached_team = cached_agent_loader(trading_team)
# Simulate multiple analysis sessions
tasks = [
"Analyze the current market conditions for AAPL",
"What are the top 3 ETFs for gold coverage?",
"Assess the risk profile of a tech-heavy portfolio",
"Identify options strategies for volatile markets",
"Evaluate ESG investment opportunities in renewable energy",
]
print(
f"🎯 Running {len(tasks)} analysis tasks with {len(cached_team)} agents..."
)
session_start = time.time()
for i, (agent, task) in enumerate(zip(cached_team, tasks)):
print(f"\n📋 Task {i+1}: {agent.agent_name}")
print(f" Question: {task}")
task_start = time.time()
# Run the agent on the task
response = agent.run(task)
task_time = time.time() - task_start
print(f" ⏱️ Completed in {task_time:.2f}s")
print(
f" 💡 Response: {response[:100]}..."
if len(response) > 100
else f" 💡 Response: {response}"
)
total_session_time = time.time() - session_start
print(f"\n🏁 Total session time: {total_session_time:.2f}s")
print(
f"📊 Average task time: {total_session_time/len(tasks):.2f}s"
)
def performance_comparison():
"""Compare performance with and without caching."""
print("\n=== Performance Comparison ===")
# Create test agents
test_agents = []
for i in range(10):
agent = Agent(
agent_name=f"Test-Agent-{i:02d}",
model_name="gpt-4o-mini",
system_prompt=f"You are test agent number {i}.",
max_loops=1,
)
test_agents.append(agent)
# Test without caching (creating new agents each time)
print("🔄 Testing without caching...")
no_cache_times = []
for _ in range(3):
start_time = time.time()
# Simulate creating new agents each time
new_agents = []
for agent in test_agents:
new_agent = Agent(
agent_name=agent.agent_name,
model_name=agent.model_name,
system_prompt=agent.system_prompt,
max_loops=agent.max_loops,
)
new_agents.append(new_agent)
no_cache_time = time.time() - start_time
no_cache_times.append(no_cache_time)
avg_no_cache_time = sum(no_cache_times) / len(no_cache_times)
# Clear cache for fair comparison
clear_agent_cache()
# Test with caching (first load)
print("🔧 Testing with caching (first load)...")
start_time = time.time()
cached_agent_loader(test_agents)
first_cache_time = time.time() - start_time
# Test with caching (subsequent loads)
print("⚡ Testing with caching (subsequent loads)...")
cache_times = []
for _ in range(3):
start_time = time.time()
cached_agent_loader(test_agents)
cache_time = time.time() - start_time
cache_times.append(cache_time)
avg_cache_time = sum(cache_times) / len(cache_times)
# Results
print(f"\n📊 Performance Results for {len(test_agents)} agents:")
print(f" 🐌 No caching (avg): {avg_no_cache_time:.4f}s")
print(f" 🔧 Cached (first load): {first_cache_time:.4f}s")
print(f" 🚀 Cached (avg): {avg_cache_time:.4f}s")
print(
f" 💨 Cache speedup: {avg_no_cache_time/avg_cache_time:.1f}x faster!"
)
# Final cache stats
final_stats = get_agent_cache_stats()
print(f" 📈 Final cache stats: {final_stats}")
def main():
"""Run all examples to demonstrate multi-agent caching."""
print("🤖 Multi-Agent Caching System Examples")
print("=" * 50)
try:
# Run examples
basic_caching_example()
custom_cache_example()
simple_lru_example()
performance_comparison()
team_workflow_simulation()
print("\n✅ All examples completed successfully!")
print("\n🎯 Key Benefits of Multi-Agent Caching:")
print("• 🚀 10-100x faster agent loading from cache")
print(
"• 💾 Persistent disk cache survives application restarts"
)
print("• 🧠 Intelligent LRU memory management")
print("• 🔄 Background preloading for zero-latency access")
print("• 📊 Detailed performance monitoring")
print("• 🛡️ Thread-safe with memory leak prevention")
print("• ⚡ Parallel processing for multiple agents")
except Exception as e:
print(f"❌ Error running examples: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()

@ -0,0 +1,128 @@
"""
Quick Start: Agent Caching with Multiple Agents
This is a simple example showing how to use agent caching with your existing agents
for super fast loading and reuse.
"""
import time
from swarms import Agent
from swarms.utils.agent_cache import cached_agent_loader
def main():
"""Simple example of caching multiple agents."""
# Create your agents as usual
agents = [
Agent(
agent_name="Quantitative-Trading-Agent",
agent_description="Advanced quantitative trading and algorithmic analysis agent",
system_prompt="""You are an expert quantitative trading agent with deep expertise in:
- Algorithmic trading strategies and implementation
- Statistical arbitrage and market making
- Risk management and portfolio optimization
- High-frequency trading systems
- Market microstructure analysis
Your core responsibilities include:
1. Developing and backtesting trading strategies
2. Analyzing market data and identifying alpha opportunities
3. Implementing risk management frameworks
4. Optimizing portfolio allocations
5. Conducting quantitative research
6. Monitoring market microstructure
7. Evaluating trading system performance
You maintain strict adherence to:
- Mathematical rigor in all analyses
- Statistical significance in strategy development
- Risk-adjusted return optimization
- Market impact minimization
- Regulatory compliance
- Transaction cost analysis
- Performance attribution
You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
max_loops=1,
model_name="gpt-4o-mini",
dynamic_temperature_enabled=True,
output_type="str-all-except-first",
streaming_on=True,
print_on=True,
telemetry_enable=False,
),
Agent(
agent_name="Risk-Manager",
system_prompt="You are a risk management specialist.",
max_loops=1,
model_name="gpt-4o-mini",
),
Agent(
agent_name="Market-Analyst",
system_prompt="You are a market analysis expert.",
max_loops=1,
model_name="gpt-4o-mini",
),
]
print(f"Created {len(agents)} agents")
# BEFORE: Creating agents each time (slow)
print("\n=== Without Caching (Slow) ===")
start_time = time.time()
# Simulate creating new agents each time
for _ in range(3):
new_agents = []
for agent in agents:
new_agent = Agent(
agent_name=agent.agent_name,
system_prompt=agent.system_prompt,
max_loops=agent.max_loops,
model_name=agent.model_name,
)
new_agents.append(new_agent)
no_cache_time = time.time() - start_time
print(f"🐌 Time without caching: {no_cache_time:.3f}s")
# AFTER: Using cached agents (super fast!)
print("\n=== With Caching (Super Fast!) ===")
# First call - will cache the agents
start_time = time.time()
cached_agent_loader(agents)
first_cache_time = time.time() - start_time
print(f"🔧 First cache load: {first_cache_time:.3f}s")
# Subsequent calls - retrieves from cache (lightning fast!)
cache_times = []
for i in range(3):
start_time = time.time()
cached_agents = cached_agent_loader(agents)
cache_time = time.time() - start_time
cache_times.append(cache_time)
print(f"⚡ Cache load #{i+1}: {cache_time:.4f}s")
avg_cache_time = sum(cache_times) / len(cache_times)
print("\n📊 Results:")
print(f" 🐌 Without caching: {no_cache_time:.3f}s")
print(f" 🚀 With caching: {avg_cache_time:.4f}s")
print(
f" 💨 Speedup: {no_cache_time/avg_cache_time:.0f}x faster!"
)
# Now use your cached agents normally
print("\n🎯 Using cached agents:")
task = "What are the best top 3 etfs for gold coverage?"
for agent in cached_agents[
:1
]: # Just use the first agent for demo
print(f" Running {agent.agent_name}...")
response = agent.run(task)
print(f" Response: {response[:100]}...")
if __name__ == "__main__":
main()

@ -0,0 +1,186 @@
"""
Simple Agent Caching Tests - Just 4 Basic Tests
Tests loading agents with and without cache for single and multiple agents.
"""
import time
from swarms import Agent
from swarms.utils.agent_cache import (
cached_agent_loader,
clear_agent_cache,
)
def test_single_agent_without_cache():
"""Test loading a single agent without cache."""
print("🔄 Test 1: Single agent without cache")
# Test creating agents multiple times (simulating no cache)
times = []
for _ in range(10): # Do it 10 times to get better measurement
start_time = time.time()
Agent(
agent_name="Test-Agent-1",
model_name="gpt-4o-mini",
system_prompt="You are a test agent.",
max_loops=1,
)
load_time = time.time() - start_time
times.append(load_time)
avg_time = sum(times) / len(times)
print(
f" ✅ Single agent without cache: {avg_time:.4f}s (avg of 10 creations)"
)
return avg_time
def test_single_agent_with_cache():
"""Test loading a single agent with cache."""
print("🔄 Test 2: Single agent with cache")
clear_agent_cache()
# Create agent
agent = Agent(
agent_name="Test-Agent-1",
model_name="gpt-4o-mini",
system_prompt="You are a test agent.",
max_loops=1,
)
# First load (cache miss) - disable preloading for fair test
cached_agent_loader([agent], preload=False)
# Now test multiple cache hits
times = []
for _ in range(10): # Do it 10 times to get better measurement
start_time = time.time()
cached_agent_loader([agent], preload=False)
load_time = time.time() - start_time
times.append(load_time)
avg_time = sum(times) / len(times)
print(
f" ✅ Single agent with cache: {avg_time:.4f}s (avg of 10 cache hits)"
)
return avg_time
def test_multiple_agents_without_cache():
"""Test loading multiple agents without cache."""
print("🔄 Test 3: Multiple agents without cache")
# Test creating agents multiple times (simulating no cache)
times = []
for _ in range(5): # Do it 5 times to get better measurement
start_time = time.time()
[
Agent(
agent_name=f"Test-Agent-{i}",
model_name="gpt-4o-mini",
system_prompt=f"You are test agent {i}.",
max_loops=1,
)
for i in range(5)
]
load_time = time.time() - start_time
times.append(load_time)
avg_time = sum(times) / len(times)
print(
f" ✅ Multiple agents without cache: {avg_time:.4f}s (avg of 5 creations)"
)
return avg_time
def test_multiple_agents_with_cache():
"""Test loading multiple agents with cache."""
print("🔄 Test 4: Multiple agents with cache")
clear_agent_cache()
# Create agents
agents = [
Agent(
agent_name=f"Test-Agent-{i}",
model_name="gpt-4o-mini",
system_prompt=f"You are test agent {i}.",
max_loops=1,
)
for i in range(5)
]
# First load (cache miss) - disable preloading for fair test
cached_agent_loader(agents, preload=False)
# Now test multiple cache hits
times = []
for _ in range(5): # Do it 5 times to get better measurement
start_time = time.time()
cached_agent_loader(agents, preload=False)
load_time = time.time() - start_time
times.append(load_time)
avg_time = sum(times) / len(times)
print(
f" ✅ Multiple agents with cache: {avg_time:.4f}s (avg of 5 cache hits)"
)
return avg_time
def main():
"""Run the 4 simple tests."""
print("🚀 Simple Agent Caching Tests")
print("=" * 40)
# Run tests
single_no_cache = test_single_agent_without_cache()
single_with_cache = test_single_agent_with_cache()
multiple_no_cache = test_multiple_agents_without_cache()
multiple_with_cache = test_multiple_agents_with_cache()
# Results
print("\n📊 Results:")
print("=" * 40)
print(f"Single agent - No cache: {single_no_cache:.4f}s")
print(f"Single agent - With cache: {single_with_cache:.4f}s")
print(f"Multiple agents - No cache: {multiple_no_cache:.4f}s")
print(f"Multiple agents - With cache: {multiple_with_cache:.4f}s")
# Speedups (handle near-zero times)
if (
single_with_cache > 0.00001
): # Only calculate if time is meaningful
single_speedup = single_no_cache / single_with_cache
print(f"\n🚀 Single agent speedup: {single_speedup:.1f}x")
else:
print(
"\n🚀 Single agent speedup: Cache too fast to measure accurately!"
)
if (
multiple_with_cache > 0.00001
): # Only calculate if time is meaningful
multiple_speedup = multiple_no_cache / multiple_with_cache
print(f"🚀 Multiple agents speedup: {multiple_speedup:.1f}x")
else:
print(
"🚀 Multiple agents speedup: Cache too fast to measure accurately!"
)
# Summary
print("\n✅ Cache Validation:")
print("• Cache hit rates are increasing (visible in logs)")
print("• No validation errors")
print(
"• Agent objects are being cached and retrieved successfully"
)
print(
"• For real agents with LLM initialization, expect 10-100x speedups!"
)
if __name__ == "__main__":
main()

@ -1,5 +1,5 @@
from swarms import Agent # Get your packages
from swarms.structs.concurrent_workflow import ConcurrentWorkflow from swarms import Agent, ConcurrentWorkflow
# Initialize market research agent # Initialize market research agent
market_researcher = Agent( market_researcher = Agent(
@ -12,7 +12,7 @@ market_researcher = Agent(
5. Providing actionable market insights""", 5. Providing actionable market insights""",
model_name="claude-3-sonnet-20240229", model_name="claude-3-sonnet-20240229",
max_loops=1, max_loops=1,
temperature=0.7, dynamic_temperature_enabled=True,
# streaming_on=True, # streaming_on=True,
) )
@ -25,10 +25,9 @@ financial_analyst = Agent(
3. Assessing risk factors 3. Assessing risk factors
4. Providing financial forecasts 4. Providing financial forecasts
5. Recommending financial strategies""", 5. Recommending financial strategies""",
model_name="claude-3-sonnet-20240229", model_name="gpt-4.1",
max_loops=1, max_loops=1,
# streaming_on=True, dynamic_temperature_enabled=True,
temperature=0.7,
) )
# Initialize technical analyst agent # Initialize technical analyst agent
@ -52,14 +51,15 @@ agents = [market_researcher, financial_analyst, technical_analyst]
router = ConcurrentWorkflow( router = ConcurrentWorkflow(
name="market-analysis-router", name="market-analysis-router",
description="This concurrent workflow is used to analyze the market, financial, and technical aspects of a stock.",
agents=agents, agents=agents,
max_loops=1, max_loops=1,
# output_type="all", output_type="all",
show_dashboard=True, show_dashboard=True,
) )
result = router.run( result = router.run(
"Analyze Tesla (TSLA) stock from market, financial, and technical perspectives" task="What are the best 3 ETFS for energy sector in the US?"
) )
print(result) print(result)

@ -0,0 +1,62 @@
from swarms import Agent, ConcurrentWorkflow, SwarmRouter
# Initialize market research agent
market_researcher = Agent(
agent_name="Market-Researcher",
system_prompt="""You are a market research specialist. Your tasks include:
1. Analyzing market trends and patterns
2. Identifying market opportunities and threats
3. Evaluating competitor strategies
4. Assessing customer needs and preferences
5. Providing actionable market insights""",
model_name="claude-3-5-sonnet-20240620",
max_loops=1,
streaming_on=True,
print_on=False,
)
# Initialize financial analyst agent
financial_analyst = Agent(
agent_name="Financial-Analyst",
system_prompt="""You are a financial analysis expert. Your responsibilities include:
1. Analyzing financial statements
2. Evaluating investment opportunities
3. Assessing risk factors
4. Providing financial forecasts
5. Recommending financial strategies""",
model_name="claude-3-5-sonnet-20240620",
max_loops=1,
streaming_on=True,
print_on=False,
)
# Initialize technical analyst agent
technical_analyst = Agent(
agent_name="Technical-Analyst",
system_prompt="""You are a technical analysis specialist. Your focus areas include:
1. Analyzing price patterns and trends
2. Evaluating technical indicators
3. Identifying support and resistance levels
4. Assessing market momentum
5. Providing trading recommendations""",
model_name="claude-3-5-sonnet-20240620",
max_loops=1,
streaming_on=True,
print_on=False,
)
# Create list of agents
agents = [market_researcher, financial_analyst, technical_analyst]
# Initialize the concurrent workflow
workflow = ConcurrentWorkflow(
name="market-analysis-workflow",
agents=agents,
max_loops=1,
show_dashboard=True,
)
# Run the workflow
result = workflow.run(
"Analyze Tesla (TSLA) stock from market, financial, and technical perspectives"
)

@ -329,7 +329,8 @@ class CouncilJudgeEvaluator:
return ( return (
final_answer.group(1) == correct_answer_lower final_answer.group(1) == correct_answer_lower
) )
except: except Exception as e:
logger.error(f"Error checking answer: {str(e)}")
pass pass
# For other datasets, check if the correct answer is contained in the evaluation # For other datasets, check if the correct answer is contained in the evaluation

@ -0,0 +1,250 @@
from swarms import Agent
from swarms.structs.election_swarm import (
ElectionSwarm,
)
# Create candidate agents for Apple CEO position
tim_cook = Agent(
agent_name="Tim Cook - Current CEO",
system_prompt="""You are Tim Cook, the current CEO of Apple Inc. since 2011.
Your background:
- 13+ years as Apple CEO, succeeding Steve Jobs
- Former COO of Apple (2007-2011)
- Former VP of Operations at Compaq
- MBA from Duke University
- Known for operational excellence and supply chain management
- Led Apple to become the world's most valuable company
- Expanded Apple's services business significantly
- Strong focus on privacy, sustainability, and social responsibility
- Successfully navigated global supply chain challenges
- Annual revenue growth from $108B to $394B during tenure
Strengths: Operational expertise, global experience, proven track record, strong relationships with suppliers and partners, focus on privacy and sustainability.
Challenges: Perceived lack of innovation compared to Jobs era, heavy reliance on iPhone revenue, limited new product categories.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
sundar_pichai = Agent(
agent_name="Sundar Pichai - Google/Alphabet CEO",
system_prompt="""You are Sundar Pichai, CEO of Alphabet Inc. and Google since 2015.
Your background:
- CEO of Alphabet Inc. since 2019, Google since 2015
- Former Senior VP of Chrome, Apps, and Android
- Led development of Chrome browser and Android platform
- MS in Engineering from Stanford, MBA from Wharton
- Known for product development and AI leadership
- Successfully integrated AI into Google's core products
- Led Google's cloud computing expansion
- Strong focus on AI/ML and emerging technologies
- Experience with large-scale platform management
- Annual revenue growth from $75B to $307B during tenure
Strengths: AI/ML expertise, product development, platform management, experience with large-scale operations, strong technical background.
Challenges: Limited hardware experience, regulatory scrutiny, different company culture.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
jensen_huang = Agent(
agent_name="Jensen Huang - NVIDIA CEO",
system_prompt="""You are Jensen Huang, CEO and co-founder of NVIDIA since 1993.
Your background:
- CEO and co-founder of NVIDIA for 31 years
- Former engineer at AMD and LSI Logic
- MS in Electrical Engineering from Stanford
- Led NVIDIA from graphics cards to AI computing leader
- Pioneered GPU computing and AI acceleration
- Successfully pivoted company to AI/data center focus
- Market cap grew from $2B to $2.5T+ under leadership
- Known for long-term vision and technical innovation
- Strong focus on AI, robotics, and autonomous vehicles
- Annual revenue growth from $3.9B to $60B+ during recent years
Strengths: Technical innovation, AI expertise, long-term vision, proven ability to pivot business models, strong engineering background, experience building new markets.
Challenges: Limited consumer hardware experience, different industry focus, no experience with Apple's ecosystem.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
# Create board member voter agents with realistic personas
arthur_levinson = Agent(
agent_name="Arthur Levinson - Chairman",
system_prompt="""You are Arthur Levinson, Chairman of Apple's Board of Directors since 2011.
Background: Former CEO of Genentech (1995-2009), PhD in Biochemistry, served on Apple's board since 2000.
Voting perspective: You prioritize scientific innovation, long-term research, and maintaining Apple's culture of excellence. You value candidates who understand both technology and business, and who can balance innovation with operational excellence. You're concerned about Apple's future in AI and biotechnology.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
james_bell = Agent(
agent_name="James Bell - Board Member",
system_prompt="""You are James Bell, Apple board member since 2015.
Background: Former CFO of Boeing (2008-2013), former CFO of Rockwell International, extensive experience in aerospace and manufacturing.
Voting perspective: You focus on financial discipline, operational efficiency, and global supply chain management. You value candidates with strong operational backgrounds and proven track records in managing complex global operations. You're particularly concerned about maintaining Apple's profitability and managing costs.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
al_gore = Agent(
agent_name="Al Gore - Board Member",
system_prompt="""You are Al Gore, Apple board member since 2003.
Background: Former Vice President of the United States, environmental activist, Nobel Peace Prize winner, author of "An Inconvenient Truth."
Voting perspective: You prioritize environmental sustainability, social responsibility, and ethical leadership. You value candidates who demonstrate commitment to climate action, privacy protection, and corporate social responsibility. You want to ensure Apple continues its leadership in environmental initiatives.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
monica_lozano = Agent(
agent_name="Monica Lozano - Board Member",
system_prompt="""You are Monica Lozano, Apple board member since 2014.
Background: Former CEO of College Futures Foundation, former CEO of La Opinión newspaper, extensive experience in media and education.
Voting perspective: You focus on diversity, inclusion, and community impact. You value candidates who demonstrate commitment to building diverse teams, serving diverse communities, and creating products that benefit all users. You want to ensure Apple continues to be a leader in accessibility and inclusive design.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
ron_sugar = Agent(
agent_name="Ron Sugar - Board Member",
system_prompt="""You are Ron Sugar, Apple board member since 2010.
Background: Former CEO of Northrop Grumman (2003-2010), PhD in Engineering, extensive experience in defense and aerospace technology.
Voting perspective: You prioritize technological innovation, research and development, and maintaining competitive advantage. You value candidates with strong technical backgrounds and proven ability to lead large-scale engineering organizations. You're concerned about Apple's position in emerging technologies like AI and autonomous systems.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
susan_wagner = Agent(
agent_name="Susan Wagner - Board Member",
system_prompt="""You are Susan Wagner, Apple board member since 2014.
Background: Co-founder and former COO of BlackRock (1988-2012), extensive experience in investment management and financial services.
Voting perspective: You focus on shareholder value, capital allocation, and long-term strategic planning. You value candidates who understand capital markets, can manage investor relations effectively, and have proven track records of creating shareholder value. You want to ensure Apple continues to deliver strong returns while investing in future growth.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
andrea_jung = Agent(
agent_name="Andrea Jung - Board Member",
system_prompt="""You are Andrea Jung, Apple board member since 2008.
Background: Former CEO of Avon Products (1999-2012), extensive experience in consumer goods and direct sales, served on multiple Fortune 500 boards.
Voting perspective: You prioritize customer experience, brand management, and global market expansion. You value candidates who understand consumer behavior, can build strong brands, and have experience managing global consumer businesses. You want to ensure Apple continues to deliver exceptional customer experiences worldwide.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
bob_iger = Agent(
agent_name="Bob Iger - Board Member",
system_prompt="""You are Bob Iger, Apple board member since 2011.
Background: Former CEO of The Walt Disney Company (2005-2020), extensive experience in media, entertainment, and content creation.
Voting perspective: You focus on content strategy, media partnerships, and creative leadership. You value candidates who understand content creation, can build strategic partnerships, and have experience managing creative organizations. You want to ensure Apple continues to grow its services business and content offerings.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
alex_gorsky = Agent(
agent_name="Alex Gorsky - Board Member",
system_prompt="""You are Alex Gorsky, Apple board member since 2019.
Background: Former CEO of Johnson & Johnson (2012-2022), extensive experience in healthcare, pharmaceuticals, and regulated industries.
Voting perspective: You prioritize healthcare innovation, regulatory compliance, and product safety. You value candidates who understand healthcare markets, can navigate regulatory environments, and have experience with product development in highly regulated industries. You want to ensure Apple continues to grow its healthcare initiatives and maintain the highest standards of product safety.""",
model_name="gpt-4.1",
max_loops=1,
temperature=0.7,
# tools_list_dictionary=get_vote_schema(),
)
# Create lists of voters and candidates
voter_agents = [
arthur_levinson,
james_bell,
al_gore,
# monica_lozano,
# ron_sugar,
# susan_wagner,
# andrea_jung,
# bob_iger,
# alex_gorsky,
]
candidate_agents = [tim_cook, sundar_pichai, jensen_huang]
# Create the election swarm
apple_election = ElectionSwarm(
name="Apple Board Election for CEO",
description="Board election to select the next CEO of Apple Inc.",
agents=voter_agents,
candidate_agents=candidate_agents,
max_loops=1,
show_dashboard=False,
)
# Define the election task
election_task = """
You are participating in a critical board election to select the next CEO of Apple Inc.
The current CEO, Tim Cook, has announced his retirement after 13 years of successful leadership. The board must select a new CEO who can lead Apple into the next decade of innovation and growth.
Key considerations for the next CEO:
1. Leadership in AI and emerging technologies
2. Ability to maintain Apple's culture of innovation and excellence
3. Experience with global operations and supply chain management
4. Commitment to privacy, sustainability, and social responsibility
5. Track record of creating shareholder value
6. Ability to expand Apple's services business
7. Experience with hardware and software integration
8. Vision for Apple's future in healthcare, automotive, and other new markets
Please carefully evaluate each candidate based on their background, experience, and alignment with Apple's values and strategic objectives. Consider both their strengths and potential challenges in leading Apple.
Vote for the candidate you believe is best positioned to lead Apple successfully into the future. Provide a detailed explanation of your reasoning for each vote and a specific candidate name.
"""
# Run the election
results = apple_election.run(election_task)
print(results)
print(type(results))

@ -0,0 +1,16 @@
from swarms.structs.heavy_swarm import HeavySwarm
swarm = HeavySwarm(
worker_model_name="claude-3-5-sonnet-20240620",
show_dashboard=True,
question_agent_model_name="gpt-4.1",
loops_per_agent=1,
)
out = swarm.run(
"Provide 3 publicly traded biotech companies that are currently trading below their cash value. For each company identified, provide available data or projections for the next 6 months, including any relevant financial metrics, upcoming catalysts, or events that could impact valuation. Present your findings in a clear, structured format. Be very specific and provide their ticker symbol, name, and the current price, cash value, and the percentage difference between the two."
)
print(out)

@ -0,0 +1,16 @@
from swarms.structs.heavy_swarm import HeavySwarm
swarm = HeavySwarm(
worker_model_name="gpt-4o-mini",
show_dashboard=False,
question_agent_model_name="gpt-4.1",
loops_per_agent=1,
)
out = swarm.run(
"Identify the top 3 energy sector ETFs listed on US exchanges that offer the highest potential for growth over the next 3-5 years. Focus specifically on funds with significant exposure to companies in the nuclear, natural gas, or oil industries. For each ETF, provide the rationale for its selection, recent performance metrics, sector allocation breakdown, and any notable holdings related to nuclear, gas, or oil. Exclude broad-based energy ETFs that do not have a clear emphasis on these sub-sectors."
)
print(out)

@ -0,0 +1,24 @@
from swarms import Agent, SequentialWorkflow
# Agent 1: The Researcher
researcher = Agent(
agent_name="Researcher",
system_prompt="Your job is to research the provided topic and provide a detailed summary.",
model_name="gpt-4o-mini",
)
# Agent 2: The Writer
writer = Agent(
agent_name="Writer",
system_prompt="Your job is to take the research summary and write a beautiful, engaging blog post about it.",
model_name="gpt-4o-mini",
)
# Create a sequential workflow where the researcher's output feeds into the writer's input
workflow = SequentialWorkflow(agents=[researcher, writer])
# Run the workflow on a task
final_post = workflow.run(
"The history and future of artificial intelligence"
)
print(final_post)

@ -0,0 +1,81 @@
from swarms import Agent, SwarmRouter
# Agent 1: Risk Metrics Calculator
risk_metrics_agent = Agent(
agent_name="Risk-Metrics-Calculator",
agent_description="Calculates key risk metrics like VaR, Sharpe ratio, and volatility",
system_prompt="""You are a risk metrics specialist. Calculate and explain:
- Value at Risk (VaR)
- Sharpe ratio
- Volatility
- Maximum drawdown
- Beta coefficient
Provide clear, numerical results with brief explanations.""",
max_loops=1,
model_name="gpt-4.1",
random_model_enabled=True,
dynamic_temperature_enabled=True,
output_type="str-all-except-first",
max_tokens=4096,
)
# Agent 2: Portfolio Risk Analyzer
portfolio_risk_agent = Agent(
agent_name="Portfolio-Risk-Analyzer",
agent_description="Analyzes portfolio diversification and concentration risk",
system_prompt="""You are a portfolio risk analyst. Focus on:
- Portfolio diversification analysis
- Concentration risk assessment
- Correlation analysis
- Sector/asset allocation risk
- Liquidity risk evaluation
Provide actionable insights for risk reduction.""",
max_loops=1,
model_name="gpt-4.1",
random_model_enabled=True,
dynamic_temperature_enabled=True,
output_type="str-all-except-first",
max_tokens=4096,
)
# Agent 3: Market Risk Monitor
market_risk_agent = Agent(
agent_name="Market-Risk-Monitor",
agent_description="Monitors market conditions and identifies risk factors",
system_prompt="""You are a market risk monitor. Identify and assess:
- Market volatility trends
- Economic risk factors
- Geopolitical risks
- Interest rate risks
- Currency risks
Provide current risk alerts and trends.""",
max_loops=1,
model_name="gpt-4.1",
random_model_enabled=True,
dynamic_temperature_enabled=True,
output_type="str-all-except-first",
max_tokens=4096,
)
swarm = SwarmRouter(
name="SwarmRouter",
description="A router that can route messages to the appropriate swarm",
agents=[
risk_metrics_agent,
portfolio_risk_agent,
],
max_loops=1,
swarm_type="SequentialWorkflow",
output_type="final",
)
out = swarm.run(
"What are the best ways to short the EU markets. Give me specific tickrs to short and strategies to use. Create a comprehensive report with all the information you can find."
)
print(out)

@ -0,0 +1,243 @@
#!/usr/bin/env python3
"""
SwarmRouter Performance Benchmark
This script benchmarks the performance improvements in SwarmRouter's _create_swarm method.
It compares the old O(n) elif chain vs the new O(1) factory pattern with caching.
"""
import time
import statistics
from typing import List, Dict, Any
from swarms.structs.swarm_router import SwarmRouter
from swarms.structs.agent import Agent
def create_mock_agents(num_agents: int = 3) -> List[Agent]:
"""Create mock agents for testing purposes."""
agents = []
for i in range(num_agents):
# Create a simple mock agent
agent = Agent(
agent_name=f"TestAgent_{i}",
system_prompt=f"You are test agent {i}",
model_name="gpt-4o-mini",
max_loops=1,
)
agents.append(agent)
return agents
def benchmark_swarm_creation(
swarm_types: List[str],
num_iterations: int = 100,
agents: List[Agent] = None,
) -> Dict[str, Dict[str, Any]]:
"""
Benchmark swarm creation performance for different swarm types.
Args:
swarm_types: List of swarm types to test
num_iterations: Number of iterations to run for each swarm type
agents: List of agents to use for testing
Returns:
Dictionary containing performance metrics for each swarm type
"""
if agents is None:
agents = create_mock_agents()
results = {}
for swarm_type in swarm_types:
print(f"Benchmarking {swarm_type}...")
times = []
for i in range(num_iterations):
# Create a fresh SwarmRouter instance for each test
router = SwarmRouter(
name=f"test-router-{i}",
agents=agents,
swarm_type=swarm_type,
telemetry_enabled=False,
)
# Time the _create_swarm method
start_time = time.perf_counter()
try:
router._create_swarm(task="test task")
end_time = time.perf_counter()
times.append(end_time - start_time)
except Exception as e:
print(f"Failed to create {swarm_type}: {e}")
continue
if times:
results[swarm_type] = {
"mean_time": statistics.mean(times),
"median_time": statistics.median(times),
"min_time": min(times),
"max_time": max(times),
"std_dev": (
statistics.stdev(times) if len(times) > 1 else 0
),
"total_iterations": len(times),
}
return results
def benchmark_caching_performance(
swarm_type: str = "SequentialWorkflow",
num_iterations: int = 50,
agents: List[Agent] = None,
) -> Dict[str, Any]:
"""
Benchmark the caching performance by creating the same swarm multiple times.
Args:
swarm_type: The swarm type to test
num_iterations: Number of iterations to run
agents: List of agents to use for testing
Returns:
Dictionary containing caching performance metrics
"""
if agents is None:
agents = create_mock_agents()
print(f"Benchmarking caching performance for {swarm_type}...")
router = SwarmRouter(
name="cache-test-router",
agents=agents,
swarm_type=swarm_type,
telemetry_enabled=False,
)
first_call_times = []
cached_call_times = []
for i in range(num_iterations):
# Clear cache for first call timing
router._swarm_cache.clear()
# Time first call (cache miss)
start_time = time.perf_counter()
router._create_swarm(task="test task", iteration=i)
end_time = time.perf_counter()
first_call_times.append(end_time - start_time)
# Time second call (cache hit)
start_time = time.perf_counter()
router._create_swarm(task="test task", iteration=i)
end_time = time.perf_counter()
cached_call_times.append(end_time - start_time)
return {
"first_call_mean": statistics.mean(first_call_times),
"cached_call_mean": statistics.mean(cached_call_times),
"speedup_factor": statistics.mean(first_call_times)
/ statistics.mean(cached_call_times),
"cache_hit_ratio": 1.0, # 100% cache hit rate in this test
}
def print_results(results: Dict[str, Dict[str, Any]]):
"""Print benchmark results in a formatted way."""
print("\n" + "=" * 60)
print("SWARM CREATION PERFORMANCE BENCHMARK RESULTS")
print("=" * 60)
for swarm_type, metrics in results.items():
print(f"\n{swarm_type}:")
print(f" Mean time: {metrics['mean_time']:.6f} seconds")
print(f" Median time: {metrics['median_time']:.6f} seconds")
print(f" Min time: {metrics['min_time']:.6f} seconds")
print(f" Max time: {metrics['max_time']:.6f} seconds")
print(f" Std dev: {metrics['std_dev']:.6f} seconds")
print(f" Iterations: {metrics['total_iterations']}")
def print_caching_results(results: Dict[str, Any]):
"""Print caching benchmark results."""
print("\n" + "=" * 60)
print("CACHING PERFORMANCE BENCHMARK RESULTS")
print("=" * 60)
print(
f"First call mean time: {results['first_call_mean']:.6f} seconds"
)
print(
f"Cached call mean time: {results['cached_call_mean']:.6f} seconds"
)
print(f"Speedup factor: {results['speedup_factor']:.2f}x")
print(f"Cache hit ratio: {results['cache_hit_ratio']:.1%}")
def main():
"""Run the complete benchmark suite."""
print("SwarmRouter Performance Benchmark")
print(
"Testing O(1) factory pattern with caching vs O(n) elif chain"
)
print("-" * 60)
# Create test agents
agents = create_mock_agents(3)
# Test different swarm types
swarm_types = [
"SequentialWorkflow",
"ConcurrentWorkflow",
"AgentRearrange",
"MixtureOfAgents",
"GroupChat",
"MultiAgentRouter",
"HeavySwarm",
"MALT",
]
# Run creation benchmark
creation_results = benchmark_swarm_creation(
swarm_types=swarm_types[:4], # Test first 4 for speed
num_iterations=20,
agents=agents,
)
print_results(creation_results)
# Run caching benchmark
caching_results = benchmark_caching_performance(
swarm_type="SequentialWorkflow",
num_iterations=10,
agents=agents,
)
print_caching_results(caching_results)
# Calculate overall performance improvement
if creation_results:
avg_creation_time = statistics.mean(
[
metrics["mean_time"]
for metrics in creation_results.values()
]
)
print("\n" + "=" * 60)
print("PERFORMANCE SUMMARY")
print("=" * 60)
print(
f"Average swarm creation time: {avg_creation_time:.6f} seconds"
)
print(
"Factory pattern provides O(1) lookup vs O(n) elif chain"
)
print(
f"Caching provides {caching_results['speedup_factor']:.2f}x speedup for repeated calls"
)
print("=" * 60)
if __name__ == "__main__":
main()

@ -0,0 +1,99 @@
"""
Agent Judge with Evaluation Criteria Example
This example demonstrates how to use the AgentJudge with custom evaluation criteria.
The evaluation_criteria parameter allows specifying different criteria with weights
for more targeted and customizable evaluation of agent outputs.
"""
from swarms.agents.agent_judge import AgentJudge
from dotenv import load_dotenv
load_dotenv()
# Example 1: Basic usage with evaluation criteria
print("\n=== Example 1: Using Custom Evaluation Criteria ===\n")
# Create an AgentJudge with custom evaluation criteria
judge = AgentJudge(
model_name="claude-3-7-sonnet-20250219", # Use any available model
evaluation_criteria={
"correctness": 0.5,
"problem_solving_approach": 0.3,
"explanation_clarity": 0.2,
},
)
# Sample output to evaluate
task_response = [
"Task: Determine the time complexity of a binary search algorithm and explain your reasoning.\n\n"
"Agent response: The time complexity of binary search is O(log n). In each step, "
"we divide the search space in half, resulting in a logarithmic relationship between "
"the input size and the number of operations. This can be proven by solving the "
"recurrence relation T(n) = T(n/2) + O(1), which gives us T(n) = O(log n)."
]
# Run evaluation
evaluation = judge.run(task_response)
print(evaluation[0])
# Example 2: Specialized criteria for code evaluation
print(
"\n=== Example 2: Code Evaluation with Specialized Criteria ===\n"
)
code_judge = AgentJudge(
model_name="claude-3-7-sonnet-20250219",
agent_name="code_judge",
evaluation_criteria={
"code_correctness": 0.4,
"code_efficiency": 0.3,
"code_readability": 0.3,
},
)
# Sample code to evaluate
code_response = [
"Task: Write a function to find the maximum subarray sum in an array of integers.\n\n"
"Agent response:\n```python\n"
"def max_subarray_sum(arr):\n"
" current_sum = max_sum = arr[0]\n"
" for i in range(1, len(arr)):\n"
" current_sum = max(arr[i], current_sum + arr[i])\n"
" max_sum = max(max_sum, current_sum)\n"
" return max_sum\n\n"
"# Example usage\n"
"print(max_subarray_sum([-2, 1, -3, 4, -1, 2, 1, -5, 4])) # Output: 6 (subarray [4, -1, 2, 1])\n"
"```\n"
"This implementation uses Kadane's algorithm which has O(n) time complexity and "
"O(1) space complexity, making it optimal for this problem."
]
code_evaluation = code_judge.run(code_response)
print(code_evaluation[0])
# Example 3: Comparing multiple responses
print("\n=== Example 3: Comparing Multiple Agent Responses ===\n")
comparison_judge = AgentJudge(
model_name="claude-3-7-sonnet-20250219",
evaluation_criteria={"accuracy": 0.6, "completeness": 0.4},
)
multiple_responses = comparison_judge.run(
[
"Task: Explain the CAP theorem in distributed systems.\n\n"
"Agent A response: CAP theorem states that a distributed system cannot simultaneously "
"provide Consistency, Availability, and Partition tolerance. In practice, you must "
"choose two out of these three properties.",
"Task: Explain the CAP theorem in distributed systems.\n\n"
"Agent B response: The CAP theorem, formulated by Eric Brewer, states that in a "
"distributed data store, you can only guarantee two of the following three properties: "
"Consistency (all nodes see the same data at the same time), Availability (every request "
"receives a response), and Partition tolerance (the system continues to operate despite "
"network failures). Most modern distributed systems choose to sacrifice consistency in "
"favor of availability and partition tolerance, implementing eventual consistency models instead.",
]
)
print(multiple_responses[0])

@ -0,0 +1,22 @@
from swarms import SelfConsistencyAgent
# Initialize the reasoning agent router with self-consistency
reasoning_agent_router = SelfConsistencyAgent(
name="reasoning-agent",
description="A reasoning agent that can answer questions and help with tasks.",
model_name="gpt-4o-mini",
system_prompt="You are a helpful assistant that can answer questions and help with tasks.",
max_loops=1,
num_samples=3, # Generate 3 independent responses
eval=False, # Disable evaluation mode
random_models_on=False, # Disable random model selection
majority_voting_prompt=None, # Use default majority voting prompt
)
# Run the agent on a financial analysis task
result = reasoning_agent_router.run(
"What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf."
)
print("Financial Strategy Result:")
print(result)

@ -1,5 +1,6 @@
from swarms.agents.reasoning_agents import ReasoningAgentRouter from swarms.agents.reasoning_agents import ReasoningAgentRouter
# Initialize the reasoning agent router with self-consistency
reasoning_agent_router = ReasoningAgentRouter( reasoning_agent_router = ReasoningAgentRouter(
agent_name="reasoning-agent", agent_name="reasoning-agent",
description="A reasoning agent that can answer questions and help with tasks.", description="A reasoning agent that can answer questions and help with tasks.",
@ -7,40 +8,16 @@ reasoning_agent_router = ReasoningAgentRouter(
system_prompt="You are a helpful assistant that can answer questions and help with tasks.", system_prompt="You are a helpful assistant that can answer questions and help with tasks.",
max_loops=1, max_loops=1,
swarm_type="self-consistency", swarm_type="self-consistency",
num_samples=1, num_samples=3, # Generate 3 independent responses
output_type="list", eval=False, # Disable evaluation mode
random_models_on=False, # Disable random model selection
majority_voting_prompt=None, # Use default majority voting prompt
) )
reasoning_agent_router.run( # Run the agent on a financial analysis task
result = reasoning_agent_router.run(
"What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf." "What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf."
) )
print("Financial Strategy Result:")
# reasoning_agent_router.batched_run( print(result)
# [
# "What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf.",
# "What is the best possible financial strategy to maximize returns but minimize risk? Give a list of etfs to invest in and the percentage of the portfolio to allocate to each etf.",
# ]
# )
# from swarms import ReasoningAgentRouter
# calculus_router = ReasoningAgentRouter(
# agent_name="calculus-expert",
# description="A calculus problem solving agent",
# model_name="gpt-4o-mini",
# system_prompt="You are a calculus expert. Solve differentiation and integration problems methodically.",
# swarm_type="self-consistency",
# num_samples=3, # Generate 3 samples to ensure consistency
# output_type="list",
# )
# # Example calculus problem
# calculus_problem = "Find the derivative of f(x) = x³ln(x) - 5x²"
# # Get the solution
# solution = calculus_router.run(calculus_problem)
# print(solution)

@ -0,0 +1,22 @@
from swarms.agents.reasoning_agents import ReasoningAgentRouter
router = ReasoningAgentRouter(
agent_name="qft_reasoning_agent",
description="A specialized reasoning agent for answering questions and solving problems in quantum field theory.",
model_name="groq/moonshotai/kimi-k2-instruct",
system_prompt=(
"You are a highly knowledgeable assistant specializing in quantum field theory (QFT). "
"You can answer advanced questions, explain concepts, and help with tasks related to QFT, "
"including but not limited to Lagrangians, Feynman diagrams, renormalization, quantum electrodynamics, "
"quantum chromodynamics, and the Standard Model. Provide clear, accurate, and detailed explanations, "
"and cite relevant equations or references when appropriate."
),
max_loops=1,
swarm_type="reasoning-duo",
output_type="dict-all-except-first",
)
out = router.run(
"Explain the significance of spontaneous symmetry breaking in quantum field theory."
)
print(out)

@ -0,0 +1,23 @@
from swarms import ReasoningDuo
router = ReasoningDuo(
agent_name="qft_reasoning_agent",
description="A specialized reasoning agent for answering questions and solving problems in quantum field theory.",
model_name="claude-3-5-sonnet-20240620",
system_prompt=(
"You are a highly knowledgeable assistant specializing in quantum field theory (QFT). "
"You can answer advanced questions, explain concepts, and help with tasks related to QFT, "
"including but not limited to Lagrangians, Feynman diagrams, renormalization, quantum electrodynamics, "
"quantum chromodynamics, and the Standard Model. Provide clear, accurate, and detailed explanations, "
"and cite relevant equations or references when appropriate."
),
max_loops=2,
swarm_type="reasoning-duo",
output_type="dict-all-except-first",
reasoning_model_name="groq/moonshotai/kimi-k2-instruct",
)
out = router.run(
"Explain the significance of spontaneous symmetry breaking in quantum field theory."
)
print(out)

@ -0,0 +1,353 @@
"""
Examples demonstrating the concurrent wrapper decorator functionality.
This file shows how to use the concurrent and concurrent_class_executor
decorators to enable concurrent execution of functions and class methods.
"""
import time
import asyncio
from typing import Dict, Any
import requests
from swarms.utils.concurrent_wrapper import (
concurrent,
concurrent_class_executor,
thread_executor,
process_executor,
async_executor,
batch_executor,
ExecutorType,
)
# Example 1: Basic concurrent function execution
@concurrent(
name="data_processor",
description="Process data concurrently",
timeout=30,
retry_on_failure=True,
max_retries=2,
)
def process_data(data: str) -> str:
"""Simulate data processing with a delay."""
time.sleep(1) # Simulate work
return f"processed_{data}"
# Example 2: Thread-based executor for I/O bound tasks
@thread_executor(max_workers=8, timeout=60)
def fetch_url(url: str) -> Dict[str, Any]:
"""Fetch data from a URL."""
try:
response = requests.get(url, timeout=10)
return {
"url": url,
"status_code": response.status_code,
"content_length": len(response.content),
"success": response.status_code == 200,
}
except Exception as e:
return {"url": url, "error": str(e), "success": False}
# Example 3: Process-based executor for CPU-intensive tasks
@process_executor(max_workers=2, timeout=120)
def cpu_intensive_task(n: int) -> float:
"""Perform CPU-intensive computation."""
result = 0.0
for i in range(n):
result += (i**0.5) * (i**0.3)
return result
# Example 4: Async executor for async functions
@async_executor(max_workers=5)
async def async_task(task_id: int) -> str:
"""Simulate an async task."""
await asyncio.sleep(0.5) # Simulate async work
return f"async_result_{task_id}"
# Example 5: Batch processing
@batch_executor(batch_size=10, max_workers=3)
def process_item(item: str) -> str:
"""Process a single item."""
time.sleep(0.1) # Simulate work
return item.upper()
# Example 6: Class with concurrent methods
@concurrent_class_executor(
name="DataProcessor",
max_workers=4,
methods=["process_batch", "validate_data"],
)
class DataProcessor:
"""A class with concurrent processing capabilities."""
def __init__(self, config: Dict[str, Any]):
self.config = config
def process_batch(self, data: str) -> str:
"""Process a batch of data."""
time.sleep(0.5) # Simulate processing
return f"processed_{data}"
def validate_data(self, data: str) -> bool:
"""Validate data."""
time.sleep(0.2) # Simulate validation
return len(data) > 0
def normal_method(self, x: int) -> int:
"""A normal method (not concurrent)."""
return x * 2
# Example 7: Function with custom configuration
@concurrent(
name="custom_processor",
description="Custom concurrent processor",
max_workers=6,
timeout=45,
executor_type=ExecutorType.THREAD,
return_exceptions=True,
ordered=False,
retry_on_failure=True,
max_retries=3,
retry_delay=0.5,
)
def custom_processor(item: str, multiplier: int = 1) -> str:
"""Custom processor with parameters."""
time.sleep(0.3)
return f"{item}_{multiplier}" * multiplier
def example_1_basic_concurrent_execution():
"""Example 1: Basic concurrent execution."""
print("=== Example 1: Basic Concurrent Execution ===")
# Prepare data
data_items = [f"item_{i}" for i in range(10)]
# Execute concurrently
results = process_data.concurrent_execute(*data_items)
# Process results
successful_results = [r.value for r in results if r.success]
failed_results = [r for r in results if not r.success]
print(f"Successfully processed: {len(successful_results)} items")
print(f"Failed: {len(failed_results)} items")
print(f"Sample results: {successful_results[:3]}")
print()
def example_2_thread_based_execution():
"""Example 2: Thread-based execution for I/O bound tasks."""
print("=== Example 2: Thread-based Execution ===")
# URLs to fetch
urls = [
"https://httpbin.org/get",
"https://httpbin.org/status/200",
"https://httpbin.org/status/404",
"https://httpbin.org/delay/1",
"https://httpbin.org/delay/2",
]
# Execute concurrently
results = fetch_url.concurrent_execute(*urls)
# Process results
successful_fetches = [
r.value
for r in results
if r.success and r.value.get("success")
]
failed_fetches = [
r.value
for r in results
if r.success and not r.value.get("success")
]
print(f"Successful fetches: {len(successful_fetches)}")
print(f"Failed fetches: {len(failed_fetches)}")
print(
f"Sample successful result: {successful_fetches[0] if successful_fetches else 'None'}"
)
print()
def example_3_process_based_execution():
"""Example 3: Process-based execution for CPU-intensive tasks."""
print("=== Example 3: Process-based Execution ===")
# CPU-intensive tasks
tasks = [100000, 200000, 300000, 400000]
# Execute concurrently
results = cpu_intensive_task.concurrent_execute(*tasks)
# Process results
successful_results = [r.value for r in results if r.success]
execution_times = [r.execution_time for r in results if r.success]
print(f"Completed {len(successful_results)} CPU-intensive tasks")
print(
f"Average execution time: {sum(execution_times) / len(execution_times):.3f}s"
)
print(
f"Sample result: {successful_results[0] if successful_results else 'None'}"
)
print()
def example_4_batch_processing():
"""Example 4: Batch processing."""
print("=== Example 4: Batch Processing ===")
# Items to process
items = [f"item_{i}" for i in range(25)]
# Process in batches
results = process_item.concurrent_batch(items, batch_size=5)
# Process results
successful_results = [r.value for r in results if r.success]
print(f"Processed {len(successful_results)} items in batches")
print(f"Sample results: {successful_results[:5]}")
print()
def example_5_class_concurrent_execution():
"""Example 5: Class with concurrent methods."""
print("=== Example 5: Class Concurrent Execution ===")
# Create processor instance
processor = DataProcessor({"batch_size": 10})
# Prepare data
data_items = [f"data_{i}" for i in range(8)]
# Execute concurrent methods
process_results = processor.process_batch.concurrent_execute(
*data_items
)
validate_results = processor.validate_data.concurrent_execute(
*data_items
)
# Process results
processed_items = [r.value for r in process_results if r.success]
valid_items = [r.value for r in validate_results if r.success]
print(f"Processed {len(processed_items)} items")
print(f"Validated {len(valid_items)} items")
print(f"Sample processed: {processed_items[:3]}")
print(f"Sample validation: {valid_items[:3]}")
print()
def example_6_custom_configuration():
"""Example 6: Custom configuration with exceptions and retries."""
print("=== Example 6: Custom Configuration ===")
# Items with different multipliers
items = [f"item_{i}" for i in range(6)]
multipliers = [1, 2, 3, 1, 2, 3]
# Execute with custom configuration
results = custom_processor.concurrent_execute(
*items, **{"multiplier": multipliers}
)
# Process results
successful_results = [r.value for r in results if r.success]
failed_results = [r for r in results if not r.success]
print(f"Successful: {len(successful_results)}")
print(f"Failed: {len(failed_results)}")
print(f"Sample results: {successful_results[:3]}")
print()
def example_7_concurrent_mapping():
"""Example 7: Concurrent mapping over a list."""
print("=== Example 7: Concurrent Mapping ===")
# Items to map over
items = [f"map_item_{i}" for i in range(15)]
# Map function over items
results = process_data.concurrent_map(items)
# Process results
mapped_results = [r.value for r in results if r.success]
print(f"Mapped over {len(mapped_results)} items")
print(f"Sample mapped results: {mapped_results[:5]}")
print()
def example_8_error_handling():
"""Example 8: Error handling and retries."""
print("=== Example 8: Error Handling ===")
@concurrent(
max_workers=3,
return_exceptions=True,
retry_on_failure=True,
max_retries=2,
)
def unreliable_function(x: int) -> int:
"""A function that sometimes fails."""
if x % 3 == 0:
raise ValueError(f"Failed for {x}")
time.sleep(0.1)
return x * 2
# Execute with potential failures
results = unreliable_function.concurrent_execute(*range(10))
# Process results
successful_results = [r.value for r in results if r.success]
failed_results = [r.exception for r in results if not r.success]
print(f"Successful: {len(successful_results)}")
print(f"Failed: {len(failed_results)}")
print(f"Sample successful: {successful_results[:3]}")
print(
f"Sample failures: {[type(e).__name__ for e in failed_results[:3]]}"
)
print()
def main():
"""Run all examples."""
print("Concurrent Wrapper Examples")
print("=" * 50)
print()
try:
example_1_basic_concurrent_execution()
example_2_thread_based_execution()
example_3_process_based_execution()
example_4_batch_processing()
example_5_class_concurrent_execution()
example_6_custom_configuration()
example_7_concurrent_mapping()
example_8_error_handling()
print("All examples completed successfully!")
except Exception as e:
print(f"Error running examples: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save