Merge branch 'kyegomez:master' into crca

pull/1233/head
CI-DEV 4 weeks ago committed by GitHub
commit ca02283b9c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -17,7 +17,7 @@ jobs:
&& ${{ contains(github.event.pull_request.labels.*.name, 'release') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
- name: Set up Python 3.9

@ -21,7 +21,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v5
uses: actions/checkout@v6
# Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis
- name: Run Codacy Analysis CLI
uses: codacy/codacy-analysis-cli-action@562ee3e92b8e92df8b67e0a5ff8aa8e261919c08

@ -16,7 +16,7 @@ jobs:
steps:
# Step 1: Check out the repository
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
# Step 2: Set up Python
- name: Set up Python ${{ matrix.python-version }}

@ -28,7 +28,7 @@ jobs:
language: ["python"]
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Initialize CodeQL
uses: github/codeql-action/init@v4
with:

@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 'Checkout repository'
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4
# Commonly enabled options, see https://github.com/actions/dependency-review-action#configuration-options for all available options.

@ -9,7 +9,7 @@ jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- uses: actions/setup-python@v6
with:
python-version: 3.11

@ -6,7 +6,7 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6

@ -33,7 +33,7 @@ jobs:
security-events: write
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
with:
submodules: true

@ -35,7 +35,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
with:
submodules: true

@ -21,7 +21,7 @@ jobs:
python-version: ["3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6
with:

@ -24,7 +24,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Set up Python 3.10
uses: actions/setup-python@v6
@ -121,7 +121,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Set up Python 3.10
uses: actions/setup-python@v6

@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v6
- name: Set up Python 3.10
uses: actions/setup-python@v6

@ -27,7 +27,7 @@ jobs:
runs-on: "ubuntu-20.04"
steps:
- name: Checkout code
uses: actions/checkout@v5
uses: actions/checkout@v6
- name: Build an image from Dockerfile
run: |

@ -823,7 +823,7 @@ We've made it easy to start contributing. Here's how you can help:
4. **Join the Discussion:** To participate in roadmap discussions and connect with other developers, join our community on [**Discord**](https://discord.gg/EamjgSaEQf).
### ✨ Our Valued Contributors
### ✨ Thank You to Our Contributors
Thank you for contributing to swarms. Your work is extremely appreciated and recognized.
@ -831,9 +831,17 @@ Thank you for contributing to swarms. Your work is extremely appreciated and rec
<img src="https://contrib.rocks/image?repo=kyegomez/swarms" />
</a>
### 🙏 Thank You to Our Community
We're incredibly grateful to everyone who supports Swarms! Your stars, forks, and contributions help make this project better every day.
[![Forkers repo roster for @kyegomez/swarms](https://reporoster.com/forks/kyegomez/swarms)](https://github.com/kyegomez/swarms/network/members)
[![Stargazers repo roster for @kyegomez/swarms](https://reporoster.com/stars/kyegomez/swarms)](https://github.com/kyegomez/swarms/stargazers)
-----
## Connect With Us
## Join the Swarms community 👾👾👾
Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights!

@ -0,0 +1,106 @@
# LLM Council Examples
This page provides examples demonstrating the LLM Council pattern, inspired by Andrej Karpathy's llm-council implementation. The LLM Council uses multiple specialized AI agents that:
1. Each respond independently to queries
2. Review and rank each other's anonymized responses
3. Have a Chairman synthesize all responses into a final comprehensive answer
## Example Files
All LLM Council examples are located in the [`examples/multi_agent/llm_council_examples/`](https://github.com/kyegomez/swarms/tree/master/examples/multi_agent/llm_council_examples) directory.
### Marketing & Business
- **[marketing_strategy_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/marketing_strategy_council.py)** - Marketing strategy analysis and recommendations
- **[business_strategy_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/business_strategy_council.py)** - Comprehensive business strategy development
### Finance & Investment
- **[finance_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/finance_analysis_council.py)** - Financial analysis and investment recommendations
- **[etf_stock_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py)** - ETF and stock analysis with portfolio recommendations
### Medical & Healthcare
- **[medical_treatment_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/medical_treatment_council.py)** - Medical treatment recommendations and care plans
- **[medical_diagnosis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py)** - Diagnostic analysis based on symptoms
### Technology & Research
- **[technology_assessment_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/technology_assessment_council.py)** - Technology evaluation and implementation strategy
- **[research_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/research_analysis_council.py)** - Comprehensive research analysis on complex topics
### Legal
- **[legal_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/legal_analysis_council.py)** - Legal implications and compliance analysis
## Basic Usage Pattern
All examples follow the same pattern:
```python
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# Run a query
result = council.run("Your query here")
# Access results
print(result["final_response"]) # Chairman's synthesized answer
print(result["original_responses"]) # Individual member responses
print(result["evaluations"]) # How members ranked each other
```
## Running Examples
Run any example directly:
```bash
python examples/multi_agent/llm_council_examples/marketing_strategy_council.py
python examples/multi_agent/llm_council_examples/finance_analysis_council.py
python examples/multi_agent/llm_council_examples/medical_diagnosis_council.py
```
## Key Features
- **Multiple Perspectives**: Each council member (GPT-5.1, Gemini, Claude, Grok) provides unique insights
- **Peer Review**: Members evaluate and rank each other's responses anonymously
- **Synthesis**: Chairman combines the best elements from all responses
- **Transparency**: See both individual responses and evaluation rankings
## Council Members
The default council consists of:
- **GPT-5.1-Councilor**: Analytical and comprehensive
- **Gemini-3-Pro-Councilor**: Concise and well-processed
- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced
- **Grok-4-Councilor**: Creative and innovative
## Customization
You can create custom council members:
```python
from swarms import Agent
from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt
custom_agent = Agent(
agent_name="Custom-Councilor",
system_prompt=get_gpt_councilor_prompt(),
model_name="gpt-4.1",
max_loops=1,
)
council = LLMCouncil(
council_members=[custom_agent, ...],
chairman_model="gpt-5.1",
verbose=True
)
```
## Documentation
For complete API reference and detailed documentation, see the [LLM Council Reference Documentation](../swarms/structs/llm_council.md).

@ -274,6 +274,7 @@ nav:
- Overview: "swarms/structs/overview.md"
- Custom Multi Agent Architectures: "swarms/structs/custom_swarm.md"
- Debate Multi-Agent Architectures: "swarms/structs/orchestration_methods.md"
- DebateWithJudge: "swarms/structs/debate_with_judge.md"
- MajorityVoting: "swarms/structs/majorityvoting.md"
- RoundRobin: "swarms/structs/round_robin_swarm.md"
- Mixture of Agents: "swarms/structs/moa.md"
@ -283,6 +284,7 @@ nav:
- MALT: "swarms/structs/malt.md"
- Multi-Agent Execution Utilities: "swarms/structs/various_execution_methods.md"
- Council of Judges: "swarms/structs/council_of_judges.md"
- LLM Council: "swarms/structs/llm_council.md"
- Heavy Swarm: "swarms/structs/heavy_swarm.md"
- Social Algorithms: "swarms/structs/social_algorithms.md"
@ -401,7 +403,9 @@ nav:
- SwarmRouter Example: "swarms/examples/swarm_router.md"
- MultiAgentRouter Minimal Example: "swarms/examples/multi_agent_router_minimal.md"
- ConcurrentWorkflow Example: "swarms/examples/concurrent_workflow.md"
- Multi-Agentic Patterns with GraphWorkflow: "swarms/examples/graphworkflow_rustworkx_patterns.md"
- Mixture of Agents Example: "swarms/examples/moa_example.md"
- LLM Council Examples: "examples/llm_council_examples.md"
- Unique Swarms: "swarms/examples/unique_swarms.md"
- Agents as Tools: "swarms/examples/agents_as_tools.md"
- Aggregate Multi-Agent Responses: "swarms/examples/aggregate.md"

@ -290,14 +290,14 @@ task = "Write a short story about a robot who discovers music."
# --- Example 1: SequentialWorkflow ---
# Agents run one after another in a chain: Writer -> Editor -> Reviewer.
print("Running a Sequential Workflow...")
sequential_router = SwarmRouter(swarm_type=SwarmType.SequentialWorkflow, agents=agents)
sequential_router = SwarmRouter(swarm_type="SequentialWorkflow", agents=agents)
sequential_output = sequential_router.run(task)
print(f"Final Sequential Output:\n{sequential_output}\n")
# --- Example 2: ConcurrentWorkflow ---
# All agents receive the same initial task and run at the same time.
print("Running a Concurrent Workflow...")
concurrent_router = SwarmRouter(swarm_type=SwarmType.ConcurrentWorkflow, agents=agents)
concurrent_router = SwarmRouter(swarm_type="ConcurrentWorkflow", agents=agents)
concurrent_outputs = concurrent_router.run(task)
# This returns a dictionary of each agent's output
for agent_name, output in concurrent_outputs.items():
@ -312,9 +312,9 @@ aggregator = Agent(
model_name="gpt-4o-mini"
)
moa_router = SwarmRouter(
swarm_type=SwarmType.MixtureOfAgents,
swarm_type="MixtureOfAgents",
agents=agents,
aggregator_agent=aggregator, # MoA requires an aggregator
aggregator_agent=aggregator,
)
aggregated_output = moa_router.run(task)
print(f"Final Aggregated Output:\n{aggregated_output}\n")

@ -24,10 +24,10 @@ mkdocs-autolinks-plugin
# Requirements for core
jinja2~=3.1
markdown~=3.8
markdown~=3.10
mkdocs-material-extensions~=1.3
pygments~=2.19
pymdown-extensions~=10.16
pymdown-extensions~=10.17
# Requirements for plugins
colorama~=0.4

@ -5,20 +5,28 @@ The Swarms CLI is a comprehensive command-line interface for managing and execut
## Table of Contents
- [Installation](#installation)
- [Basic Usage](#basic-usage)
- [Commands Reference](#commands-reference)
- [Global Arguments](#global-arguments)
- [Command-Specific Arguments](#command-specific-arguments)
- [run-agents Command](#run-agents-command)
- [load-markdown Command](#load-markdown-command)
- [agent Command](#agent-command)
- [autoswarm Command](#autoswarm-command)
- [setup-check Command](#setup-check-command)
- [llm-council Command](#llm-council-command)
- [heavy-swarm Command](#heavy-swarm-command)
- [features Command](#features-command)
- [Error Handling](#error-handling)
- [Examples](#examples)
- [Configuration](#configuration)
- [Advanced Features](#advanced-features)
- [Troubleshooting](#troubleshooting)
- [Integration](#integration)
- [Performance Considerations](#performance-considerations)
- [Security](#security)
- [Command Quick Reference](#command-quick-reference)
- [Support](#support)
## Installation
@ -43,6 +51,7 @@ swarms <command> [options]
|---------|-------------|-------------------|
| `onboarding` | Start interactive onboarding process | None |
| `help` | Display help message | None |
| `features` | Display all available features and actions in a comprehensive table | None |
| `get-api-key` | Open API key portal in browser | None |
| `check-login` | Verify login status and initialize cache | None |
| `run-agents` | Execute agents from YAML configuration | `--yaml-file` |
@ -52,6 +61,8 @@ swarms <command> [options]
| `book-call` | Schedule strategy session | None |
| `autoswarm` | Generate and execute autonomous swarm | `--task`, `--model` |
| `setup-check` | Run comprehensive environment setup check | None |
| `llm-council` | Run LLM Council with multiple agents collaborating on a task | `--task` |
| `heavy-swarm` | Run HeavySwarm with specialized agents for complex task analysis | `--task` |
## Global Arguments
@ -221,6 +232,148 @@ swarms setup-check --verbose
└─────────────────────────────────────────────────────────────────────────────┘
```
### `llm-council` Command
Run the LLM Council with multiple specialized agents that collaborate, evaluate, and synthesize responses.
The LLM Council follows a structured workflow:
1. **Independent Responses**: Each council member (GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, Grok-4) independently responds to the query
2. **Peer Review**: All members review and rank each other's anonymized responses
3. **Synthesis**: A Chairman agent synthesizes all responses and rankings into a final comprehensive answer
```bash
swarms llm-council [options]
```
#### Required Arguments
| Argument | Type | Description |
|----------|------|-------------|
| `--task` | `str` | The query or question for the LLM Council to process |
#### Optional Arguments
| Argument | Type | Default | Description |
|----------|------|---------|-------------|
| `--verbose` | `bool` | `True` | Enable verbose output showing progress and intermediate results |
**Example:**
```bash
# Basic usage
swarms llm-council --task "What are the best energy ETFs right now?"
# With verbose output
swarms llm-council --task "What is the best approach to solve this problem?" --verbose
```
**How It Works:**
The LLM Council creates a collaborative environment where:
- **Default Council Members**: GPT-5.1 (analytical), Gemini 3 Pro (concise), Claude Sonnet 4.5 (balanced), Grok-4 (creative)
- **Anonymized Evaluation**: Responses are anonymized before evaluation to ensure honest ranking
- **Cross-Model Evaluation**: Each model evaluates all responses, often selecting other models' responses as superior
- **Final Synthesis**: The Chairman (GPT-5.1 by default) synthesizes the best elements from all responses
**Use Cases:**
- Complex problem-solving requiring multiple perspectives
- Research questions needing comprehensive analysis
- Decision-making scenarios requiring thorough evaluation
- Content generation with quality assurance
### `heavy-swarm` Command
Run HeavySwarm with specialized agents for complex task analysis and decomposition.
HeavySwarm follows a structured workflow:
1. **Task Decomposition**: Breaks down tasks into specialized questions
2. **Parallel Execution**: Executes specialized agents in parallel
3. **Result Synthesis**: Integrates and synthesizes results
4. **Comprehensive Reporting**: Generates detailed final reports
5. **Iterative Refinement**: Optional multi-loop execution for iterative improvement
```bash
swarms heavy-swarm [options]
```
#### Required Arguments
| Argument | Type | Description |
|----------|------|-------------|
| `--task` | `str` | The task for HeavySwarm to analyze and process |
#### Optional Arguments
| Argument | Type | Default | Description |
|----------|------|---------|-------------|
| `--loops-per-agent` | `int` | `1` | Number of execution loops each agent should perform |
| `--question-agent-model-name` | `str` | `"gpt-4o-mini"` | Model name for the question generation agent |
| `--worker-model-name` | `str` | `"gpt-4o-mini"` | Model name for specialized worker agents |
| `--random-loops-per-agent` | `bool` | `False` | Enable random number of loops per agent (1-10 range) |
| `--verbose` | `bool` | `False` | Enable verbose output showing detailed progress |
**Example:**
```bash
# Basic usage
swarms heavy-swarm --task "Analyze the current market trends for renewable energy"
# With custom configuration
swarms heavy-swarm \
--task "Research the best investment strategies for 2024" \
--loops-per-agent 3 \
--question-agent-model-name "gpt-4" \
--worker-model-name "gpt-4" \
--random-loops-per-agent \
--verbose
```
**Specialized Agent Roles:**
HeavySwarm includes specialized agents for different aspects of analysis:
- **Research Agent**: Fast, trustworthy, and reproducible research
- **Analysis Agent**: Statistical analysis and validated insights
- **Writing Agent**: Clear, structured documentation
- **Question Agent**: Task decomposition and question generation
**Use Cases:**
- Complex research tasks requiring multiple perspectives
- Market analysis and financial research
- Technical analysis and evaluation
- Comprehensive report generation
- Multi-faceted problem solving
### `features` Command
Display all available CLI features and actions in a comprehensive, formatted table.
This command provides a quick reference to all available features, their categories, descriptions, command syntax, and key parameters.
```bash
swarms features
```
**No arguments required.**
**Example:**
```bash
swarms features
```
**Output Includes:**
- **Main Features Table**: Complete list of all features with:
- Feature name
- Category (Setup, Auth, Execution, Creation, etc.)
- Description
- Command syntax
- Key parameters
- **Category Summary**: Overview of features grouped by category with counts
- **Usage Tips**: Quick tips for using the CLI effectively
**Use Cases:**
- Quick reference when exploring CLI capabilities
- Discovering available features
- Understanding command syntax and parameters
- Learning about feature categories
## Error Handling
The CLI provides comprehensive error handling with formatted error messages:
@ -289,6 +442,34 @@ swarms autoswarm \
--model "gpt-4"
```
### LLM Council Collaboration
```bash
# Run LLM Council for collaborative problem solving
swarms llm-council \
--task "What are the best strategies for reducing carbon emissions in manufacturing?" \
--verbose
```
### HeavySwarm Complex Analysis
```bash
# Run HeavySwarm for comprehensive task analysis
swarms heavy-swarm \
--task "Analyze the impact of AI on the job market in 2024" \
--loops-per-agent 2 \
--question-agent-model-name "gpt-4" \
--worker-model-name "gpt-4" \
--verbose
```
### Viewing All Features
```bash
# Display all available features
swarms features
```
## Configuration
### YAML Configuration Format
@ -386,6 +567,54 @@ Guided setup process including:
- Usage examples
### Multi-Agent Collaboration
The CLI supports advanced multi-agent architectures:
#### LLM Council
Collaborative problem-solving with multiple specialized models:
```bash
swarms llm-council --task "Your question here"
```
**Features:**
- Multiple model perspectives (GPT-5.1, Gemini, Claude, Grok)
- Anonymous peer review and ranking
- Synthesized final responses
- Cross-model evaluation
#### HeavySwarm
Complex task analysis with specialized agent roles:
```bash
swarms heavy-swarm --task "Your complex task here"
```
**Features:**
- Task decomposition into specialized questions
- Parallel agent execution
- Result synthesis and integration
- Iterative refinement with multiple loops
- Specialized agent roles (Research, Analysis, Writing, Question)
### Feature Discovery
Quickly discover all available features:
```bash
swarms features
```
Displays comprehensive tables showing:
- All available commands
- Feature categories
- Command syntax
- Key parameters
- Usage examples
## Troubleshooting
@ -451,6 +680,8 @@ swarms run-agents --yaml-file agents2.yaml
| Model Selection | Choose appropriate models for task complexity |
| Context Length | Monitor and optimize input sizes |
| Rate Limiting | Respect API provider limits |
| Multi-Agent Execution | LLM Council and HeavySwarm execute agents in parallel for efficiency |
| Loop Configuration | Adjust `--loops-per-agent` based on task complexity and time constraints |
## Security
@ -461,6 +692,48 @@ swarms run-agents --yaml-file agents2.yaml
| Input Validation | CLI validates all inputs before execution |
| Error Sanitization | Sensitive information is not exposed in errors |
## Command Quick Reference
### Quick Start Commands
```bash
# Environment setup
swarms setup-check --verbose
swarms onboarding
# View all features
swarms features
# Get help
swarms help
```
### Agent Commands
```bash
# Create custom agent
swarms agent --name "Agent" --task "Task" --system-prompt "Prompt"
# Run agents from YAML
swarms run-agents --yaml-file agents.yaml
# Load from markdown
swarms load-markdown --markdown-path ./agents/
```
### Multi-Agent Commands
```bash
# LLM Council
swarms llm-council --task "Your question"
# HeavySwarm
swarms heavy-swarm --task "Your complex task" --loops-per-agent 2 --verbose
# Auto-generate swarm
swarms autoswarm --task "Task description" --model "gpt-4"
```
## Support
For additional support:
@ -470,3 +743,4 @@ For additional support:
| **Community** | [Discord](https://discord.gg/EamjgSaEQf) |
| **Issues** | [GitHub Issues](https://github.com/kyegomez/swarms/issues) |
| **Strategy Sessions**| [Book a Call](https://cal.com/swarms/swarms-strategy-session) |
| **Documentation** | [Full Documentation](https://docs.swarms.world) |

File diff suppressed because it is too large Load Diff

@ -29,7 +29,7 @@ GROQ_API_KEY=""
```python
from swarms import Agent
from swarms.structs.swarm_router import SwarmRouter, SwarmType
from swarms.structs.swarm_router import SwarmRouter
# Initialize specialized agents
data_extractor_agent = Agent(
@ -61,7 +61,7 @@ sequential_router = SwarmRouter(
name="SequentialRouter",
description="Process tasks in sequence",
agents=[data_extractor_agent, summarizer_agent, financial_analyst_agent],
swarm_type=SwarmType.SequentialWorkflow,
swarm_type="SequentialWorkflow",
max_loops=1
)
@ -76,7 +76,7 @@ concurrent_router = SwarmRouter(
name="ConcurrentRouter",
description="Process tasks concurrently",
agents=[data_extractor_agent, summarizer_agent, financial_analyst_agent],
swarm_type=SwarmType.ConcurrentWorkflow,
swarm_type="ConcurrentWorkflow",
max_loops=1
)
@ -91,8 +91,8 @@ rearrange_router = SwarmRouter(
name="RearrangeRouter",
description="Dynamically rearrange agents for optimal task processing",
agents=[data_extractor_agent, summarizer_agent, financial_analyst_agent],
swarm_type=SwarmType.AgentRearrange,
flow=f"{data_extractor_agent.agent_name} -> {summarizer_agent.agent_name} -> {financial_analyst_agent.agent_name}",
swarm_type="AgentRearrange",
rearrange_flow=f"{data_extractor_agent.agent_name} -> {summarizer_agent.agent_name} -> {financial_analyst_agent.agent_name}",
max_loops=1
)
@ -107,7 +107,7 @@ mixture_router = SwarmRouter(
name="MixtureRouter",
description="Combine multiple expert agents",
agents=[data_extractor_agent, summarizer_agent, financial_analyst_agent],
swarm_type=SwarmType.MixtureOfAgents,
swarm_type="MixtureOfAgents",
max_loops=1
)
@ -137,7 +137,7 @@ router = SwarmRouter(
name="CustomRouter",
description="Custom router configuration",
agents=[data_extractor_agent, summarizer_agent, financial_analyst_agent],
swarm_type=SwarmType.SequentialWorkflow,
swarm_type="SequentialWorkflow",
max_loops=3,
autosave=True,
verbose=True,
@ -145,6 +145,27 @@ router = SwarmRouter(
)
```
# SwarmType Reference
## Valid SwarmType Values
| Value | Description |
|-------|-------------|
| `"SequentialWorkflow"` | Execute agents in sequence |
| `"ConcurrentWorkflow"` | Execute agents concurrently |
| `"AgentRearrange"` | Dynamically rearrange agent execution order |
| `"MixtureOfAgents"` | Combine outputs from multiple agents |
| `"GroupChat"` | Enable group chat between agents |
| `"MultiAgentRouter"` | Route tasks to appropriate agents |
| `"AutoSwarmBuilder"` | Automatically build swarm configuration |
| `"HiearchicalSwarm"` | Hierarchical agent organization |
| `"MajorityVoting"` | Use majority voting for decisions |
| `"MALT"` | Multi-Agent Learning and Training |
| `"CouncilAsAJudge"` | Council-based evaluation system |
| `"InteractiveGroupChat"` | Interactive group chat with agents |
| `"HeavySwarm"` | Heavy swarm for complex tasks |
| `"auto"` | Automatically select swarm type |
# Best Practices
## Choose the appropriate swarm type based on your task requirements:
@ -187,7 +208,7 @@ Here's a complete example showing how to use SwarmRouter in a real-world scenari
```python
import os
from swarms import Agent
from swarms.structs.swarm_router import SwarmRouter, SwarmType
from swarms.structs.swarm_router import SwarmRouter
# Initialize specialized agents
research_agent = Agent(
@ -216,7 +237,7 @@ router = SwarmRouter(
name="ResearchAnalysisRouter",
description="Process research and analysis tasks",
agents=[research_agent, analysis_agent, summary_agent],
swarm_type=SwarmType.SequentialWorkflow,
swarm_type="SequentialWorkflow",
max_loops=1,
verbose=True
)

@ -83,6 +83,7 @@ The `Agent` class establishes a conversational loop with a language model, allow
| `traceback` | `Optional[Any]` | Object used for traceback handling. |
| `traceback_handlers` | `Optional[Any]` | List of traceback handlers. |
| `streaming_on` | `Optional[bool]` | Boolean indicating whether to stream responses. |
| `stream` | `Optional[bool]` | Boolean indicating whether to enable detailed token-by-token streaming with metadata. |
| `docs` | `List[str]` | List of document paths or contents to be ingested. |
| `docs_folder` | `Optional[str]` | Path to a folder containing documents to be ingested. |
| `verbose` | `Optional[bool]` | Boolean indicating whether to print verbose output. |
@ -759,6 +760,22 @@ print(agent.system_prompt)
```
### Token-by-Token Streaming
```python
from swarms import Agent
# Initialize agent with detailed streaming
agent = Agent(
model_name="gpt-4.1",
max_loops=1,
stream=True, # Enable detailed token-by-token streaming
)
# Run with detailed streaming - each token shows metadata
agent.run("Tell me a short story about a robot learning to paint.")
```
## Agent Structured Outputs
- Create a structured output schema for the agent [List[Dict]]
@ -1112,4 +1129,4 @@ The `run` method now supports several new parameters for advanced functionality:
| `tool_retry_attempts` | Configure tool_retry_attempts for robust tool execution in production environments. |
| `handoffs` | Use handoffs to create specialized agent teams that can intelligently route tasks based on complexity and expertise requirements. |
By following these guidelines and leveraging the Swarm Agent's extensive features, you can create powerful, flexible, and efficient autonomous agents for a wide range of applications.
By following these guidelines and leveraging the Swarm Agent's extensive features, you can create powerful, flexible, and efficient autonomous agents for a wide range of applications.

@ -40,7 +40,6 @@ The `execution_type` parameter controls how the AutoSwarmBuilder operates:
| Execution Type | Description |
|----------------------------------|-----------------------------------------------------------|
| **"return-agents"** | Creates and returns agent specifications as a dictionary (default) |
| **"execute-swarm-router"** | Executes the swarm router with the created agents |
| **"return-swarm-router-config"** | Returns the swarm router configuration as a dictionary |
| **"return-agents-objects"** | Returns agent objects created from specifications |
@ -602,7 +601,6 @@ for agent in agents:
- Use `verbose=True` during development for debugging
- Choose the right `execution_type` for your use case:
- Use `"return-agents"` for getting agent specifications as dictionary (default)
- Use `"execute-swarm-router"` for executing the swarm router with created agents
- Use `"return-swarm-router-config"` for analyzing swarm architecture
- Use `"return-agents-objects"` for getting agent objects created from specifications
- Set `max_tokens` appropriately based on expected response length

@ -0,0 +1,677 @@
# DebateWithJudge Module Documentation
The `DebateWithJudge` module provides a sophisticated debate architecture with self-refinement through a judge agent. This system enables two agents (Pro and Con) to debate a topic, with a Judge agent evaluating their arguments and providing refined synthesis. The process repeats for N rounds to progressively refine the answer.
## Architecture
```mermaid
graph TD
A[DebateWithJudge System] --> B[Initialize Pro, Con, and Judge Agents]
B --> C[Start with Initial Topic]
C --> D[Round Loop: max_rounds]
D --> E[Pro Agent Presents Argument]
E --> F[Con Agent Presents Counter-Argument]
F --> G[Judge Agent Evaluates Both]
G --> H[Judge Provides Synthesis]
H --> I{More Rounds?}
I -->|Yes| D
I -->|No| J[Format Final Output]
J --> K[Return Result]
```
### Key Concepts
| Concept | Description |
|--------------------------|----------------------------------------------------------------------------------------------|
| Debate Architecture | A structured process where two agents present opposing arguments on a topic |
| Pro Agent | The agent arguing in favor of a position |
| Con Agent | The agent arguing against a position |
| Judge Agent | An impartial evaluator that analyzes both arguments and provides synthesis |
| Iterative Refinement | The process repeats for multiple rounds, each round building upon the judge's previous synthesis |
| Progressive Improvement | Each round refines the answer by incorporating feedback and addressing weaknesses |
## Class Definition: `DebateWithJudge`
```python
class DebateWithJudge:
def __init__(
self,
pro_agent: Agent,
con_agent: Agent,
judge_agent: Agent,
max_rounds: int = 3,
output_type: str = "str-all-except-first",
verbose: bool = True,
):
```
### Constructor Parameters
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `pro_agent` | `Agent` | Required | The agent arguing in favor (Pro position) |
| `con_agent` | `Agent` | Required | The agent arguing against (Con position) |
| `judge_agent` | `Agent` | Required | The judge agent that evaluates arguments and provides synthesis |
| `max_rounds` | `int` | `3` | Maximum number of debate rounds to execute |
| `output_type` | `str` | `"str-all-except-first"` | Format for the output conversation history |
| `verbose` | `bool` | `True` | Whether to enable verbose logging |
## API Reference
### Core Methods
#### `run(task: str) -> Union[str, List, dict]`
Executes the debate with judge refinement process for a single task and returns the refined result.
**Signature:**
```python
def run(self, task: str) -> Union[str, List, dict]
```
**Parameters:**
- `task` (`str`): The initial topic or question to debate
**Returns:**
- `Union[str, List, dict]`: The formatted conversation history or final refined answer, depending on `output_type`
**Process Flow:**
1. **Task Validation**: Validates that the task is a non-empty string
2. **Agent Initialization**: Initializes all three agents with their respective roles and the initial task context
3. **Multi-Round Execution**: For each round (up to `max_rounds`):
- **Pro Argument**: Pro agent presents an argument in favor of the current topic
- **Con Counter-Argument**: Con agent presents a counter-argument, addressing the Pro's points
- **Judge Evaluation**: Judge agent evaluates both arguments, identifies strengths and weaknesses
- **Synthesis Generation**: Judge provides a refined synthesis that incorporates the best elements from both sides
- **Topic Refinement**: Judge's synthesis becomes the topic for the next round
4. **Result Formatting**: Returns the final result formatted according to `output_type`
**Example:**
```python
from swarms import Agent, DebateWithJudge
# Create the Pro agent (arguing in favor)
pro_agent = Agent(
agent_name="Pro-Agent",
system_prompt=(
"You are a skilled debater who argues in favor of positions. "
"You present well-reasoned arguments with evidence, examples, "
"and logical reasoning. You are persuasive and articulate."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the Con agent (arguing against)
con_agent = Agent(
agent_name="Con-Agent",
system_prompt=(
"You are a skilled debater who argues against positions. "
"You present strong counter-arguments with evidence, examples, "
"and logical reasoning. You identify weaknesses in opposing "
"arguments and provide compelling alternatives."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the Judge agent (evaluates and synthesizes)
judge_agent = Agent(
agent_name="Judge-Agent",
system_prompt=(
"You are an impartial judge who evaluates debates. "
"You carefully analyze arguments from both sides, identify "
"strengths and weaknesses, and provide balanced synthesis. "
"You may declare a winner or provide a refined answer that "
"incorporates the best elements from both arguments."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the DebateWithJudge system
debate_system = DebateWithJudge(
pro_agent=pro_agent,
con_agent=con_agent,
judge_agent=judge_agent,
max_rounds=3,
output_type="str-all-except-first",
verbose=True,
)
# Define the debate topic
topic = (
"Should artificial intelligence be regulated by governments? "
"Discuss the balance between innovation and safety."
)
# Run the debate
result = debate_system.run(task=topic)
print(result)
```
**Raises:**
- `ValueError`: If task is None or empty, or if any required agents are None, or if max_rounds is less than 1
---
#### `batched_run(tasks: List[str]) -> List[str]`
Executes the debate with judge refinement process for multiple tasks sequentially, processing each task through the complete multi-round debate process.
**Signature:**
```python
def batched_run(self, tasks: List[str]) -> List[str]
```
**Parameters:**
- `tasks` (`List[str]`): List of topics or questions to debate
**Returns:**
- `List[str]`: List of final refined answers, one for each input task
**Process Flow:**
1. **Sequential Processing**: Processes each task in the input list one by one
2. **Independent Execution**: Each task runs through the complete multi-round debate process independently
3. **Result Collection**: Collects and returns all results in the same order as input tasks
**Example:**
```python
# Define multiple debate topics
debate_topics = [
"Should remote work become the standard for knowledge workers?",
"Is cryptocurrency a viable alternative to traditional banking?",
"Should social media platforms be held accountable for content moderation?",
"Are electric vehicles the future of transportation?"
]
# Execute batch processing
results = debate_system.batched_run(debate_topics)
# Process results
for topic, result in zip(debate_topics, results):
print(result)
```
**Performance Considerations:**
| Consideration | Description |
|---------------------------------------------|---------------------------------------------------------------------------|
| Sequential Processing | Tasks are processed one after another, not in parallel |
| Independent Conversation History | Each task maintains its own conversation history |
| Memory Usage | Scales with the number of tasks and the length of each conversation |
| Total Execution Time | Equals the sum of all individual task execution times |
---
### Helper Methods
#### `get_conversation_history() -> List[dict]`
Get the full conversation history from the debate.
**Signature:**
```python
def get_conversation_history(self) -> List[dict]
```
**Returns:**
- `List[dict]`: List of message dictionaries containing the conversation history
**Example:**
```python
# Run a debate
result = debate_system.run("Should AI be regulated?")
# Get the full conversation history
history = debate_system.get_conversation_history()
print(history)
```
---
#### `get_final_answer() -> str`
Get the final refined answer from the judge.
**Signature:**
```python
def get_final_answer(self) -> str
```
**Returns:**
- `str`: The content of the final judge synthesis
**Example:**
```python
# Run a debate
result = debate_system.run("Should AI be regulated?")
# Get just the final answer
final_answer = debate_system.get_final_answer()
print(final_answer)
```
---
### Properties
| Property | Type | Description |
|----------|------|-------------|
| `pro_agent` | `Agent` | The agent arguing in favor (Pro position) |
| `con_agent` | `Agent` | The agent arguing against (Con position) |
| `judge_agent` | `Agent` | The judge agent that evaluates arguments |
| `max_rounds` | `int` | Maximum number of debate rounds |
| `output_type` | `str` | Format for returned results |
| `verbose` | `bool` | Whether verbose logging is enabled |
| `conversation` | `Conversation` | Conversation history management object |
## Output Types
The `output_type` parameter controls how the conversation history is formatted:
| `output_type` Value | Description |
|----------------------------|--------------------------------------------------------------|
| `"str-all-except-first"` | Returns a formatted string with all messages except the first (default) |
| `"str"` | Returns all messages as a formatted string |
| `"dict"` | Returns messages as a dictionary |
| `"list"` | Returns messages as a list |
## Usage Patterns
### Single Topic Debate
For focused debate and refinement on a single complex topic:
```python
# Simple single topic execution
result = debate_system.run("Should universal basic income be implemented?")
# With custom output format
debate_system.output_type = "dict"
result = debate_system.run("Should universal basic income be implemented?")
```
### Batch Processing
For processing multiple related topics sequentially:
```python
# Process multiple policy questions
policy_topics = [
"Should healthcare be universal?",
"Should education be free?",
"Should carbon emissions be taxed?"
]
results = debate_system.batched_run(policy_topics)
```
### Custom Agent Configuration
For specialized debate scenarios with custom agent prompts:
```python
# Create specialized agents for technical debates
technical_pro = Agent(
agent_name="Technical-Pro",
system_prompt="You are a software engineering expert arguing for technical solutions...",
model_name="gpt-4",
max_loops=1,
)
technical_con = Agent(
agent_name="Technical-Con",
system_prompt="You are a software engineering expert arguing against technical solutions...",
model_name="gpt-4",
max_loops=1,
)
technical_judge = Agent(
agent_name="Technical-Judge",
system_prompt="You are a senior software architect evaluating technical arguments...",
model_name="gpt-4",
max_loops=1,
)
technical_debate = DebateWithJudge(
pro_agent=technical_pro,
con_agent=technical_con,
judge_agent=technical_judge,
max_rounds=5, # More rounds for complex technical topics
verbose=True,
)
```
## Usage Examples
### Example 1: Policy Debate on AI Regulation
This example demonstrates using `DebateWithJudge` for a comprehensive policy debate on AI regulation, with multiple rounds of refinement.
```python
from swarms import Agent, DebateWithJudge
# Create the Pro agent (arguing in favor of AI regulation)
pro_agent = Agent(
agent_name="Pro-Regulation-Agent",
system_prompt=(
"You are a policy expert specializing in technology regulation. "
"You argue in favor of government regulation of artificial intelligence. "
"You present well-reasoned arguments focusing on safety, ethics, "
"and public interest. You use evidence, examples, and logical reasoning. "
"You are persuasive and articulate, emphasizing the need for oversight "
"to prevent harm and ensure responsible AI development."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the Con agent (arguing against AI regulation)
con_agent = Agent(
agent_name="Anti-Regulation-Agent",
system_prompt=(
"You are a technology policy expert specializing in innovation. "
"You argue against heavy government regulation of artificial intelligence. "
"You present strong counter-arguments focusing on innovation, economic growth, "
"and the risks of over-regulation. You identify weaknesses in regulatory "
"proposals and provide compelling alternatives such as industry self-regulation "
"and ethical guidelines. You emphasize the importance of maintaining "
"technological competitiveness."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the Judge agent (evaluates and synthesizes)
judge_agent = Agent(
agent_name="Policy-Judge-Agent",
system_prompt=(
"You are an impartial policy analyst and judge who evaluates debates on "
"technology policy. You carefully analyze arguments from both sides, "
"identify strengths and weaknesses, and provide balanced synthesis. "
"You consider multiple perspectives including safety, innovation, economic impact, "
"and ethical considerations. You may declare a winner or provide a refined "
"answer that incorporates the best elements from both arguments, such as "
"balanced regulatory frameworks that protect public interest while fostering innovation."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the DebateWithJudge system
debate_system = DebateWithJudge(
pro_agent=pro_agent,
con_agent=con_agent,
judge_agent=judge_agent,
max_rounds=3,
output_type="str-all-except-first",
verbose=True,
)
# Define the debate topic
topic = (
"Should artificial intelligence be regulated by governments? "
"Discuss the balance between innovation and safety, considering "
"both the potential benefits of regulation (safety, ethics, public trust) "
"and the potential drawbacks (stifling innovation, economic impact, "
"regulatory capture). Provide a nuanced analysis."
)
# Run the debate
result = debate_system.run(task=topic)
print(result)
# Get the final refined answer
final_answer = debate_system.get_final_answer()
print(final_answer)
```
### Example 2: Technical Architecture Debate with Batch Processing
This example demonstrates using `batched_run` to process multiple technical architecture questions, comparing different approaches to system design.
```python
from swarms import Agent, DebateWithJudge
# Create specialized technical agents
pro_agent = Agent(
agent_name="Microservices-Pro",
system_prompt=(
"You are a software architecture expert advocating for microservices architecture. "
"You present arguments focusing on scalability, independent deployment, "
"technology diversity, and team autonomy. You use real-world examples and "
"case studies to support your position."
),
model_name="gpt-4o-mini",
max_loops=1,
)
con_agent = Agent(
agent_name="Monolith-Pro",
system_prompt=(
"You are a software architecture expert advocating for monolithic architecture. "
"You present counter-arguments focusing on simplicity, reduced complexity, "
"easier debugging, and lower operational overhead. You identify weaknesses "
"in microservices approaches and provide compelling alternatives."
),
model_name="gpt-4o-mini",
max_loops=1,
)
judge_agent = Agent(
agent_name="Architecture-Judge",
system_prompt=(
"You are a senior software architect evaluating architecture debates. "
"You analyze both arguments considering factors like team size, project scale, "
"complexity, operational capabilities, and long-term maintainability. "
"You provide balanced synthesis that considers context-specific trade-offs."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the debate system
architecture_debate = DebateWithJudge(
pro_agent=pro_agent,
con_agent=con_agent,
judge_agent=judge_agent,
max_rounds=2, # Fewer rounds for more focused technical debates
output_type="str-all-except-first",
verbose=True,
)
# Define multiple architecture questions
architecture_questions = [
"Should a startup with 5 developers use microservices or monolithic architecture?",
"Is serverless architecture better than containerized deployments for event-driven systems?",
"Should a financial application use SQL or NoSQL databases for transaction processing?",
"Is event-driven architecture superior to request-response for real-time systems?",
]
# Execute batch processing
results = architecture_debate.batched_run(architecture_questions)
# Display results
for result in results:
print(result)
```
### Example 3: Business Strategy Debate with Custom Configuration
This example demonstrates a business strategy debate with custom agent configurations, multiple rounds, and accessing conversation history.
```python
from swarms import Agent, DebateWithJudge
# Create business strategy agents with detailed expertise
pro_agent = Agent(
agent_name="Growth-Strategy-Pro",
system_prompt=(
"You are a business strategy consultant specializing in aggressive growth strategies. "
"You argue in favor of rapid expansion, market penetration, and scaling. "
"You present arguments focusing on first-mover advantages, market share capture, "
"network effects, and competitive positioning. You use case studies from "
"successful companies like Amazon, Uber, and Airbnb to support your position."
),
model_name="gpt-4o-mini",
max_loops=1,
)
con_agent = Agent(
agent_name="Sustainable-Growth-Pro",
system_prompt=(
"You are a business strategy consultant specializing in sustainable, profitable growth. "
"You argue against aggressive expansion in favor of measured, sustainable growth. "
"You present counter-arguments focusing on profitability, unit economics, "
"sustainable competitive advantages, and avoiding overextension. You identify "
"weaknesses in 'growth at all costs' approaches and provide compelling alternatives "
"based on companies like Apple, Microsoft, and Berkshire Hathaway."
),
model_name="gpt-4o-mini",
max_loops=1,
)
judge_agent = Agent(
agent_name="Strategy-Judge",
system_prompt=(
"You are a seasoned business strategist and former CEO evaluating growth strategy debates. "
"You carefully analyze arguments from both sides, considering factors like: "
"- Market conditions and competitive landscape\n"
"- Company resources and capabilities\n"
"- Risk tolerance and financial position\n"
"- Long-term sustainability vs. short-term growth\n"
"- Industry-specific dynamics\n\n"
"You provide balanced synthesis that incorporates the best elements from both arguments, "
"considering context-specific factors. You may recommend a hybrid approach when appropriate."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the debate system with extended rounds for complex strategy discussions
strategy_debate = DebateWithJudge(
pro_agent=pro_agent,
con_agent=con_agent,
judge_agent=judge_agent,
max_rounds=4, # More rounds for complex strategic discussions
output_type="dict", # Use dict format for structured analysis
verbose=True,
)
# Define a complex business strategy question
strategy_question = (
"A SaaS startup with $2M ARR, 40% gross margins, and $500K in the bank "
"is considering two paths:\n"
"1. Aggressive growth: Raise $10M, hire 50 people, expand to 5 new markets\n"
"2. Sustainable growth: Focus on profitability, improve unit economics, "
"expand gradually with existing resources\n\n"
"Which strategy should they pursue? Consider market conditions, competitive "
"landscape, and long-term viability."
)
# Run the debate
result = strategy_debate.run(task=strategy_question)
print(result)
# Get the full conversation history for detailed analysis
history = strategy_debate.get_conversation_history()
print(history)
# Get the final refined answer
final_answer = strategy_debate.get_final_answer()
print(final_answer)
```
## Best Practices
### Agent Configuration
!!! tip "Agent Configuration Best Practices"
- **Pro Agent**: Should be configured with expertise in the topic area and strong argumentation skills
- **Con Agent**: Should be configured to identify weaknesses and provide compelling alternatives
- **Judge Agent**: Should be configured with broad expertise and impartial evaluation capabilities
- Use appropriate models for the complexity of the debate topic
- Consider using more powerful models for the Judge agent
### Round Configuration
!!! note "Round Configuration Tips"
- Use 2-3 rounds for most topics
- Use 4-5 rounds for complex, multi-faceted topics
- More rounds allow for deeper refinement but increase execution time
- Consider the trade-off between refinement quality and cost
### Output Format Selection
!!! info "Output Format Guidelines"
- Use `"str-all-except-first"` for readable summaries (default)
- Use `"dict"` for structured analysis and programmatic processing
- Use `"list"` for detailed conversation inspection
- Use `"str"` for complete conversation history as text
### Performance Optimization
!!! warning "Performance Considerations"
- Batch processing is sequential - consider parallel execution for large batches
- Each round requires 3 agent calls (Pro, Con, Judge)
- Memory usage scales with conversation history length
- Consider using lighter models for faster execution when appropriate
## Troubleshooting
### Common Issues
!!! danger "Common Problems"
**Issue**: Agents not following their roles
**Solution**: Ensure system prompts clearly define each agent's role and expertise
---
**Issue**: Judge synthesis not improving over rounds
**Solution**: Increase `max_rounds` or improve Judge agent's system prompt to emphasize refinement
---
**Issue**: Debate results are too generic
**Solution**: Use more specific system prompts and provide detailed context in the task
---
**Issue**: Execution time is too long
**Solution**: Reduce `max_rounds`, use faster models, or process fewer topics in batch
## Contributing
!!! success "Contributing"
Contributions are welcome! Please feel free to submit a Pull Request.
## License
!!! info "License"
This project is licensed under the MIT License - see the LICENSE file for details.

@ -12,6 +12,7 @@ Key features:
|------------------------|-----------------------------------------------------------------------------------------------|
| **Agent-based nodes** | Each node represents an agent that can process tasks |
| **Directed graph structure** | Edges define the flow of data between agents |
| **Dual backend support** | Choose between NetworkX (compatibility) or Rustworkx (performance) backends |
| **Parallel execution** | Multiple agents can run simultaneously within layers |
| **Automatic compilation** | Optimizes workflow structure for efficient execution |
| **Rich visualization** | Generate visual representations using Graphviz |
@ -25,37 +26,40 @@ graph TB
subgraph "GraphWorkflow Architecture"
A[GraphWorkflow] --> B[Node Collection]
A --> C[Edge Collection]
A --> D[NetworkX Graph]
A --> D[Graph Backend]
A --> E[Execution Engine]
B --> F[Agent Nodes]
C --> G[Directed Edges]
D --> H[Topological Sort]
E --> I[Parallel Execution]
E --> J[Layer Processing]
D --> H[NetworkX Backend]
D --> I[Rustworkx Backend]
D --> J[Topological Sort]
E --> K[Parallel Execution]
E --> L[Layer Processing]
subgraph "Node Types"
F --> K[Agent Node]
K --> L[Agent Instance]
K --> M[Node Metadata]
F --> M[Agent Node]
M --> N[Agent Instance]
M --> O[Node Metadata]
end
subgraph "Edge Types"
G --> N[Simple Edge]
G --> O[Fan-out Edge]
G --> P[Fan-in Edge]
G --> Q[Parallel Chain]
G --> P[Simple Edge]
G --> Q[Fan-out Edge]
G --> R[Fan-in Edge]
G --> S[Parallel Chain]
end
subgraph "Execution Patterns"
I --> R[Thread Pool]
I --> S[Concurrent Futures]
J --> T[Layer-by-layer]
J --> U[Dependency Resolution]
K --> T[Thread Pool]
K --> U[Concurrent Futures]
L --> V[Layer-by-layer]
L --> W[Dependency Resolution]
end
end
```
## Class Reference
| Parameter | Type | Description | Default |
@ -71,6 +75,70 @@ graph TB
| `task` | `Optional[str]` | The task to be executed by the workflow | `None` |
| `auto_compile` | `bool` | Whether to automatically compile the workflow | `True` |
| `verbose` | `bool` | Whether to enable detailed logging | `False` |
| `backend` | `str` | Graph backend to use ("networkx" or "rustworkx") | `"networkx"` |
## Graph Backends
GraphWorkflow supports two graph backend implementations, each with different performance characteristics:
### NetworkX Backend (Default)
The **NetworkX** backend is the default and most widely compatible option. It provides:
| Feature | Description |
|---------------------|---------------------------------------------------------|
| ✅ Full compatibility | Works out of the box with no additional dependencies |
| ✅ Mature ecosystem | Well-tested and stable |
| ✅ Rich features | Comprehensive graph algorithms and operations |
| ✅ Python-native | Pure Python implementation |
**Use NetworkX when:**
- You need maximum compatibility
- Working with small to medium-sized graphs (< 1000 nodes)
- You want zero additional dependencies
### Rustworkx Backend (High Performance)
The **Rustworkx** backend provides significant performance improvements for large graphs:
| Feature | Description |
|--------------------|-----------------------------------------------------------------|
| ⚡ High performance| Rust-based implementation for faster operations |
| ⚡ Memory efficient| Optimized for large-scale graphs |
| ⚡ Scalable | Better performance with graphs containing 1000+ nodes |
| ⚡ Same API | Drop-in replacement with identical interface |
**Use Rustworkx when:**
- Working with large graphs (1000+ nodes)
- Performance is critical
- You can install additional dependencies
**Installation:**
```bash
pip install rustworkx
```
**Note:** If rustworkx is not installed and you specify `backend="rustworkx"`, GraphWorkflow will automatically fall back to NetworkX with a warning.
### Backend Selection
Both backends implement the same `GraphBackend` interface, ensuring complete API compatibility. You can switch between backends without changing your code:
```python
# Use NetworkX (default)
workflow = GraphWorkflow(backend="networkx")
# Use Rustworkx for better performance
workflow = GraphWorkflow(backend="rustworkx")
```
The backend choice is transparent to the rest of the API - all methods work identically regardless of which backend is used.
### Core Methods
@ -455,7 +523,7 @@ Constructs a workflow from a list of agents and connections.
| `entry_points` | `List[str]` | List of entry point node IDs | `None` |
| `end_points` | `List[str]` | List of end point node IDs | `None` |
| `task` | `str` | Task to be executed by the workflow | `None` |
| `**kwargs` | `Any` | Additional keyword arguments | `{}` |
| `**kwargs` | `Any` | Additional keyword arguments (e.g., `backend`, `verbose`, `auto_compile`) | `{}` |
**Returns:**
@ -464,6 +532,7 @@ Constructs a workflow from a list of agents and connections.
**Example:**
```python
# Using NetworkX backend (default)
workflow = GraphWorkflow.from_spec(
agents=[agent1, agent2, agent3],
edges=[
@ -473,10 +542,56 @@ workflow = GraphWorkflow.from_spec(
],
task="Analyze market data"
)
# Using Rustworkx backend for better performance
workflow = GraphWorkflow.from_spec(
agents=[agent1, agent2, agent3],
edges=[
("agent1", "agent2"),
("agent2", "agent3"),
],
task="Analyze market data",
backend="rustworkx" # Specify backend via kwargs
)
```
## Examples
### Using Rustworkx Backend for Performance
```python
from swarms import Agent, GraphWorkflow
# Create agents
research_agent = Agent(
agent_name="ResearchAgent",
model_name="gpt-4",
max_loops=1
)
analysis_agent = Agent(
agent_name="AnalysisAgent",
model_name="gpt-4",
max_loops=1
)
# Build workflow with rustworkx backend for better performance
workflow = GraphWorkflow(
name="High-Performance-Workflow",
backend="rustworkx" # Use rustworkx backend
)
workflow.add_node(research_agent)
workflow.add_node(analysis_agent)
workflow.add_edge("ResearchAgent", "AnalysisAgent")
# Execute - backend is transparent to the API
results = workflow.run("What are the latest trends in AI?")
print(results)
```
**Note:** Make sure to install rustworkx first: `pip install rustworkx`
### Basic Sequential Workflow
```python
@ -667,6 +782,46 @@ loaded_workflow = GraphWorkflow.load_from_file(
new_results = loaded_workflow.run("Continue with quantum cryptography analysis")
```
### Large-Scale Workflow with Rustworkx
```python
from swarms import Agent, GraphWorkflow
# Create a large workflow with many agents
# Rustworkx backend provides better performance for large graphs
workflow = GraphWorkflow(
name="Large-Scale-Workflow",
backend="rustworkx", # Use rustworkx for better performance
verbose=True
)
# Create many agents (e.g., for parallel data processing)
agents = []
for i in range(50):
agent = Agent(
agent_name=f"Processor{i}",
model_name="gpt-4",
max_loops=1
)
agents.append(agent)
workflow.add_node(agent)
# Create complex interconnections
# Rustworkx handles this efficiently
for i in range(0, 50, 10):
source_agents = [f"Processor{j}" for j in range(i, min(i+10, 50))]
target_agents = [f"Processor{j}" for j in range(i+10, min(i+20, 50))]
if target_agents:
workflow.add_parallel_chain(source_agents, target_agents)
# Compile and execute
workflow.compile()
status = workflow.get_compilation_status()
print(f"Compiled workflow with {status['cached_layers_count']} layers")
results = workflow.run("Process large dataset in parallel")
```
### Advanced Pattern Detection
```python
@ -770,7 +925,8 @@ The `GraphWorkflow` class provides a powerful and flexible framework for orchest
|-----------------|--------------------------------------------------------------------------------------------------|
| **Scalability** | Supports workflows with hundreds of agents through efficient parallel execution |
| **Flexibility** | Multiple connection patterns (sequential, fan-out, fan-in, parallel chains) |
| **Performance** | Automatic compilation and optimization for faster execution |
| **Performance** | Automatic compilation and optimization for faster execution; rustworkx backend for large-scale graphs |
| **Backend Choice** | Choose between NetworkX (compatibility) or Rustworkx (performance) based on your needs |
| **Visualization** | Rich visual representations for workflow understanding and debugging |
| **Persistence** | Complete serialization and deserialization capabilities |
| **Error Handling** | Comprehensive error handling and recovery mechanisms |
@ -793,10 +949,28 @@ The `GraphWorkflow` class provides a powerful and flexible framework for orchest
|---------------------------------------|------------------------------------------------------------------|
| **Use meaningful agent names** | Helps with debugging and visualization |
| **Leverage parallel patterns** | Use fan-out and fan-in for better performance |
| **Choose the right backend** | Use rustworkx for large graphs (1000+ nodes), networkx for smaller graphs |
| **Compile workflows** | Always compile before execution for optimal performance |
| **Monitor execution** | Use verbose mode and status reporting for debugging |
| **Save important workflows** | Use serialization for workflow persistence |
| **Handle errors gracefully** | Implement proper error handling and recovery |
| **Visualize complex workflows** | Use visualization to understand and debug workflows |
### Backend Performance Considerations
When choosing between NetworkX and Rustworkx backends:
| Graph Size | Recommended Backend | Reason |
|------------|-------------------|--------|
| < 100 nodes | NetworkX | Minimal overhead, no extra dependencies |
| 100-1000 nodes | NetworkX or Rustworkx | Both perform well, choose based on dependency preferences |
| 1000+ nodes | Rustworkx | Significant performance benefits for large graphs |
| Very large graphs (10k+ nodes) | Rustworkx | Essential for acceptable performance |
**Performance Tips:**
- Rustworkx provides 2-10x speedup for topological operations on large graphs
- Both backends support the same features and API
- You can switch backends without code changes
- Rustworkx uses less memory for large graphs
The GraphWorkflow system represents a significant advancement in multi-agent orchestration, providing the tools needed to build complex, scalable, and maintainable AI workflows.

@ -186,14 +186,14 @@ task = "Write a short story about a robot who discovers music."
# --- Example 1: SequentialWorkflow ---
# Agents run one after another in a chain: Writer -> Editor -> Reviewer.
print("Running a Sequential Workflow...")
sequential_router = SwarmRouter(swarm_type=SwarmType.SequentialWorkflow, agents=agents)
sequential_router = SwarmRouter(swarm_type="SequentialWorkflow", agents=agents)
sequential_output = sequential_router.run(task)
print(f"Final Sequential Output:\n{sequential_output}\n")
# --- Example 2: ConcurrentWorkflow ---
# All agents receive the same initial task and run at the same time.
print("Running a Concurrent Workflow...")
concurrent_router = SwarmRouter(swarm_type=SwarmType.ConcurrentWorkflow, agents=agents)
concurrent_router = SwarmRouter(swarm_type="ConcurrentWorkflow", agents=agents)
concurrent_outputs = concurrent_router.run(task)
# This returns a dictionary of each agent's output
for agent_name, output in concurrent_outputs.items():
@ -208,9 +208,9 @@ aggregator = Agent(
model_name="gpt-4o-mini"
)
moa_router = SwarmRouter(
swarm_type=SwarmType.MixtureOfAgents,
swarm_type="MixtureOfAgents",
agents=agents,
aggregator_agent=aggregator, # MoA requires an aggregator
aggregator_agent=aggregator,
)
aggregated_output = moa_router.run(task)
print(f"Final Aggregated Output:\n{aggregated_output}\n")

@ -0,0 +1,534 @@
# LLM Council Class Documentation
```mermaid
flowchart TD
A[User Query] --> B[Council Members]
subgraph "Council Members"
C1[GPT-5.1-Councilor]
C2[Gemini-3-Pro-Councilor]
C3[Claude-Sonnet-4.5-Councilor]
C4[Grok-4-Councilor]
end
B --> C1
B --> C2
B --> C3
B --> C4
C1 --> D[Responses]
C2 --> D
C3 --> D
C4 --> D
D --> E[Anonymize & Evaluate]
E --> F[Chairman Synthesis]
F --> G[Final Response]
```
The `LLMCouncil` class orchestrates multiple specialized LLM agents to collaboratively answer queries through a structured peer review and synthesis process. Inspired by Andrej Karpathy's llm-council implementation, this architecture demonstrates how different models evaluate and rank each other's work, often selecting responses from other models as superior to their own.
The class automatically tracks all agent messages in a `Conversation` object and formats output using `history_output_formatter`, providing flexible output formats including dictionaries, lists, strings, JSON, YAML, and more.
## Workflow Overview
The LLM Council follows a four-step process:
1. **Parallel Response Generation**: All council members independently respond to the user query
2. **Anonymization**: Responses are anonymized with random IDs (A, B, C, D, etc.) to ensure objective evaluation
3. **Peer Review**: Each member evaluates and ranks all responses (including potentially their own)
4. **Synthesis**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer
## Class Definition
### LLMCouncil
```python
class LLMCouncil:
```
### Attributes
| Attribute | Type | Description | Default |
|-----------|------|-------------|---------|
| `council_members` | `List[Agent]` | List of Agent instances representing council members | `None` (creates default council) |
| `chairman` | `Agent` | The Chairman agent responsible for synthesizing responses | Created during initialization |
| `conversation` | `Conversation` | Conversation object tracking all messages throughout the workflow | Created during initialization |
| `output_type` | `HistoryOutputType` | Format for the output (e.g., "dict", "list", "string", "json", "yaml") | `"dict"` |
| `verbose` | `bool` | Whether to print progress and intermediate results | `True` |
## Methods
### `__init__`
Initializes the LLM Council with council members and a Chairman agent.
#### Parameters
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `id` | `str` | `swarm_id()` | Unique identifier for the council instance. |
| `name` | `str` | `"LLM Council"` | Name of the council instance. |
| `description` | `str` | `"A collaborative council..."` | Description of the council's purpose. |
| `council_members` | `Optional[List[Agent]]` | `None` | List of Agent instances representing council members. If `None`, creates default council with GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, and Grok-4. |
| `chairman_model` | `str` | `"gpt-5.1"` | Model name for the Chairman agent that synthesizes responses. |
| `verbose` | `bool` | `True` | Whether to print progress and intermediate results. |
| `output_type` | `HistoryOutputType` | `"dict"` | Format for the output. Options: "list", "dict", "string", "final", "json", "yaml", "xml", "dict-all-except-first", "str-all-except-first", "dict-final", "list-final". |
#### Returns
| Type | Description |
|------|-------------|
| `LLMCouncil` | Initialized LLM Council instance. |
#### Description
Creates an LLM Council instance with specialized council members. If no members are provided, it creates a default council consisting of:
| Council Member | Description |
|---------------------------------|------------------------------------------|
| **GPT-5.1-Councilor** | Analytical and comprehensive responses |
| **Gemini-3-Pro-Councilor** | Concise and well-processed responses |
| **Claude-Sonnet-4.5-Councilor** | Thoughtful and balanced responses |
| **Grok-4-Councilor** | Creative and innovative responses |
The Chairman agent is automatically created with a specialized prompt for synthesizing responses. A `Conversation` object is also initialized to track all messages throughout the workflow, including user queries, council member responses, evaluations, and the final synthesis.
#### Example Usage
```python
from swarms.structs.llm_council import LLMCouncil
# Create council with default members
council = LLMCouncil(verbose=True)
# Create council with custom members and output format
from swarms import Agent
custom_members = [
Agent(agent_name="Expert-1", model_name="gpt-4", max_loops=1),
Agent(agent_name="Expert-2", model_name="claude-3-opus", max_loops=1),
]
council = LLMCouncil(
council_members=custom_members,
chairman_model="gpt-4",
verbose=True,
output_type="json" # Output as JSON string
)
```
---
### `run`
Executes the full LLM Council workflow: parallel responses, anonymization, peer review, and synthesis. All messages are tracked in the conversation object and formatted according to the `output_type` setting.
#### Parameters
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `query` | `str` | Required | The user's query to process through the council. |
#### Returns
| Type | Description |
|------|-------------|
| `Union[List, Dict, str]` | Formatted output based on `output_type`. The output contains the conversation history with all messages tracked throughout the workflow. |
#### Output Format
The return value depends on the `output_type` parameter set during initialization:
| `output_type` value | Description |
|---------------------------------|---------------------------------------------------------------------|
| **`"dict"`** (default) | Returns conversation as a dictionary/list of message dictionaries |
| **`"list"`** | Returns conversation as a list of formatted strings (`"role: content"`) |
| **`"string"`** or **`"str"`** | Returns conversation as a formatted string |
| **`"final"`** or **`"last"`** | Returns only the content of the final message (Chairman's response) |
| **`"json"`** | Returns conversation as a JSON string |
| **`"yaml"`** | Returns conversation as a YAML string |
| **`"xml"`** | Returns conversation as an XML string |
| **`"dict-all-except-first"`** | Returns all messages except the first as a dictionary |
| **`"str-all-except-first"`** | Returns all messages except the first as a string |
| **`"dict-final"`** | Returns the final message as a dictionary |
| **`"list-final"`** | Returns the final message as a list |
#### Conversation Tracking
All messages are automatically tracked in the conversation object with the following roles:
- **`"User"`**: The original user query
- **`"{member_name}"`**: Each council member's response (e.g., "GPT-5.1-Councilor")
- **`"{member_name}-Evaluation"`**: Each council member's evaluation (e.g., "GPT-5.1-Councilor-Evaluation")
- **`"Chairman"`**: The final synthesized response
#### Description
Executes the complete LLM Council workflow:
1. **User Query Tracking**: Adds the user query to the conversation as "User" role
2. **Dispatch Phase**: Sends the query to all council members in parallel using `run_agents_concurrently`
3. **Collection Phase**: Collects all responses, maps them to member names, and adds each to the conversation with the member's name as the role
4. **Anonymization Phase**: Creates anonymous IDs (A, B, C, D, etc.) and shuffles them to ensure anonymity
5. **Evaluation Phase**: Each member evaluates and ranks all anonymized responses using `batched_grid_agent_execution`, then adds evaluations to the conversation with "{member_name}-Evaluation" as the role
6. **Synthesis Phase**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer, which is added to the conversation as "Chairman" role
7. **Output Formatting**: Returns the conversation formatted according to the `output_type` setting using `history_output_formatter`
The method provides verbose output by default, showing progress at each stage. All messages are tracked in the `conversation` attribute for later access or export.
#### Example Usage
```python
from swarms.structs.llm_council import LLMCouncil
# Create council with default output format (dict)
council = LLMCouncil(verbose=True)
query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?"
# Run the council - returns formatted conversation based on output_type
result = council.run(query)
# With default "dict" output_type, result is a list of message dictionaries
# Access conversation messages
for message in result:
print(f"{message['role']}: {message['content'][:200]}...")
# Access the conversation object directly for more control
conversation = council.conversation
print("\nFinal message:", conversation.get_final_message_content())
# Get conversation as string
print("\nFull conversation:")
print(conversation.get_str())
# Example with different output types
council_json = LLMCouncil(output_type="json", verbose=False)
result_json = council_json.run(query) # Returns JSON string
council_final = LLMCouncil(output_type="final", verbose=False)
result_final = council_final.run(query) # Returns only final response string
```
---
### `_create_default_council`
Creates default council members with specialized prompts and models.
#### Parameters
None (internal method).
#### Returns
| Type | Description |
|------|-------------|
| `List[Agent]` | List of Agent instances configured as council members. |
#### Description
Internal method that creates the default council configuration with four specialized agents:
- **GPT-5.1-Councilor** (`model_name="gpt-5.1"`): Analytical and comprehensive, temperature=0.7
- **Gemini-3-Pro-Councilor** (`model_name="gemini-2.5-flash"`): Concise and structured, temperature=0.7
- **Claude-Sonnet-4.5-Councilor** (`model_name="anthropic/claude-sonnet-4-5"`): Thoughtful and balanced, temperature=0.0
- **Grok-4-Councilor** (`model_name="x-ai/grok-4"`): Creative and innovative, temperature=0.8
Each agent is configured with:
- Specialized system prompts matching their role
- `max_loops=1` for single-response generation
- `verbose=False` to reduce noise during parallel execution
- Appropriate temperature settings for their style
---
## Helper Functions
### `get_gpt_councilor_prompt()`
Returns the system prompt for GPT-5.1 councilor agent.
#### Returns
| Type | Description |
|------|-------------|
| `str` | System prompt string emphasizing analytical thinking and comprehensive coverage. |
---
### `get_gemini_councilor_prompt()`
Returns the system prompt for Gemini 3 Pro councilor agent.
#### Returns
| Type | Description |
|------|-------------|
| `str` | System prompt string emphasizing concise, well-processed, and structured responses. |
---
### `get_claude_councilor_prompt()`
Returns the system prompt for Claude Sonnet 4.5 councilor agent.
#### Returns
| Type | Description |
|------|-------------|
| `str` | System prompt string emphasizing thoughtful, balanced, and nuanced responses. |
---
### `get_grok_councilor_prompt()`
Returns the system prompt for Grok-4 councilor agent.
#### Returns
| Type | Description |
|------|-------------|
| `str` | System prompt string emphasizing creative, innovative, and unique perspectives. |
---
### `get_chairman_prompt()`
Returns the system prompt for the Chairman agent.
#### Returns
| Type | Description |
|------|-------------|
| `str` | System prompt string for synthesizing responses and evaluations into a final answer. |
---
### `get_evaluation_prompt(query, responses, evaluator_name)`
Creates evaluation prompt for council members to review and rank responses.
#### Parameters
| Parameter | Type | Description |
|-----------|------|-------------|
| `query` | `str` | The original user query. |
| `responses` | `Dict[str, str]` | Dictionary mapping anonymous IDs to response texts. |
| `evaluator_name` | `str` | Name of the agent doing the evaluation. |
#### Returns
| Type | Description |
|------|-------------|
| `str` | Formatted evaluation prompt string with instructions for ranking responses. |
---
### `get_synthesis_prompt(query, original_responses, evaluations, id_to_member)`
Creates synthesis prompt for the Chairman.
#### Parameters
| Parameter | Type | Description |
|-----------|------|-------------|
| `query` | `str` | Original user query. |
| `original_responses` | `Dict[str, str]` | Dictionary mapping member names to their responses. |
| `evaluations` | `Dict[str, str]` | Dictionary mapping evaluator names to their evaluation texts. |
| `id_to_member` | `Dict[str, str]` | Mapping from anonymous IDs to member names. |
#### Returns
| Type | Description |
|------|-------------|
| `str` | Formatted synthesis prompt for the Chairman agent. |
---
## Use Cases
The LLM Council is ideal for scenarios requiring:
- **Multi-perspective Analysis**: When you need diverse viewpoints on complex topics
- **Quality Assurance**: When peer review and ranking can improve response quality
- **Transparent Decision Making**: When you want to see how different models evaluate each other
- **Synthesis of Expertise**: When combining multiple specialized perspectives is valuable
### Common Applications
| Use Case | Description |
|-----------------------|--------------------------------------------------------------------------------------------------|
| **Medical Diagnosis** | Multiple medical AI agents provide diagnoses, evaluate each other, and synthesize recommendations |
| **Financial Analysis**| Different financial experts analyze investments and rank each other's assessments |
| **Legal Analysis** | Multiple legal perspectives evaluate compliance and risk |
| **Business Strategy** | Diverse strategic viewpoints are synthesized into comprehensive plans |
| **Research Analysis** | Multiple research perspectives are combined for thorough analysis |
## Examples
For comprehensive examples demonstrating various use cases, see the [LLM Council Examples](../../../examples/multi_agent/llm_council_examples/) directory:
- **Medical**: `medical_diagnosis_council.py`, `medical_treatment_council.py`
- **Finance**: `finance_analysis_council.py`, `etf_stock_analysis_council.py`
- **Business**: `business_strategy_council.py`, `marketing_strategy_council.py`
- **Technology**: `technology_assessment_council.py`, `research_analysis_council.py`
- **Legal**: `legal_analysis_council.py`
### Quick Start Example
```python
from swarms.structs.llm_council import LLMCouncil
# Create the council with default output format
council = LLMCouncil(verbose=True)
# Example query
query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?"
# Run the council - returns formatted conversation
result = council.run(query)
# With default "dict" output_type, result is a list of message dictionaries
# Print all messages
for message in result:
role = message['role']
content = message['content']
print(f"\n{role}:")
print(content[:500] + "..." if len(content) > 500 else content)
# Access conversation object directly for more options
conversation = council.conversation
# Get only the final response
print("\n" + "="*80)
print("FINAL RESPONSE")
print("="*80)
print(conversation.get_final_message_content())
# Get conversation as formatted string
print("\n" + "="*80)
print("FULL CONVERSATION")
print("="*80)
print(conversation.get_str())
# Export conversation to JSON
conversation.export()
```
## Customization
### Creating Custom Council Members
You can create custom council members with specialized roles:
```python
from swarms import Agent
from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt
# Create custom councilor
custom_agent = Agent(
agent_name="Domain-Expert-Councilor",
agent_description="Specialized domain expert for specific analysis",
system_prompt=get_gpt_councilor_prompt(), # Or create custom prompt
model_name="gpt-4",
max_loops=1,
verbose=False,
temperature=0.7,
)
# Create council with custom members
council = LLMCouncil(
council_members=[custom_agent, ...], # Add your custom agents
chairman_model="gpt-4",
verbose=True
)
```
### Custom Chairman Model
You can specify a different model for the Chairman:
```python
council = LLMCouncil(
chairman_model="claude-3-opus", # Use Claude as Chairman
verbose=True
)
```
### Custom Output Format
You can control the output format using the `output_type` parameter:
```python
# Get output as JSON string
council = LLMCouncil(output_type="json")
result = council.run(query) # Returns JSON string
# Get only the final response
council = LLMCouncil(output_type="final")
result = council.run(query) # Returns only final response string
# Get as YAML
council = LLMCouncil(output_type="yaml")
result = council.run(query) # Returns YAML string
# Get as formatted string
council = LLMCouncil(output_type="string")
result = council.run(query) # Returns formatted conversation string
```
### Accessing Conversation History
The conversation object is accessible for advanced usage:
```python
council = LLMCouncil()
council.run(query)
# Access conversation directly
conversation = council.conversation
# Get conversation history
history = conversation.conversation_history
# Export to file
conversation.export() # Saves to default location
# Get specific format
json_output = conversation.to_json()
yaml_output = conversation.return_messages_as_dictionary()
```
## Architecture Benefits
1. **Diversity**: Multiple models provide varied perspectives and approaches
2. **Quality Control**: Peer review ensures responses are evaluated objectively
3. **Synthesis**: Chairman combines the best elements from all responses
4. **Transparency**: Full visibility into individual responses and evaluation rankings
5. **Scalability**: Easy to add or remove council members
6. **Flexibility**: Supports custom agents and models
7. **Conversation Tracking**: All messages are automatically tracked in a Conversation object for history and export
8. **Flexible Output**: Multiple output formats supported via `history_output_formatter` (dict, list, string, JSON, YAML, XML, etc.)
## Performance Considerations
| Feature | Description |
|---------------------------|----------------------------------------------------------------------------------------------------------------|
| **Parallel Execution** | Both response generation and evaluation phases run in parallel for efficiency |
| **Anonymization** | Responses are anonymized to prevent bias in evaluation |
| **Model Selection** | Different models can be used for different roles based on their strengths |
| **Verbose Mode** | Can be disabled for production use to reduce output |
| **Conversation Management** | Conversation object efficiently tracks all messages in memory and supports export to JSON/YAML files |
| **Output Formatting** | Choose lightweight output formats (e.g., "final") for production to reduce memory usage |
## Related Documentation
- [Multi-Agent Architectures Overview](overview.md)
- [Council of Judges](council_of_judges.md) - Similar peer review pattern
- [Agent Class Reference](agent.md) - Understanding individual agents
- [Conversation Class Reference](conversation.md) - Understanding conversation tracking and management
- [Multi-Agent Execution Utilities](various_execution_methods.md) - Underlying execution methods
- [History Output Formatter](../../../swarms/utils/history_output_formatter.py) - Output formatting utilities

@ -4,13 +4,13 @@ The `SwarmRouter` class is a flexible routing system designed to manage differen
Full Path: `from swarms.structs.swarm_router`
## Initialization Parameters
Main class for routing tasks to different swarm types.
| Attribute | Type | Description |
| --- | --- | --- |
| `id` | str | Unique identifier for the SwarmRouter instance (auto-generated if not provided) |
| `name` | str | Name of the SwarmRouter instance |
| `description` | str | Description of the SwarmRouter's purpose |
| `max_loops` | int | Maximum number of loops to perform |
@ -24,35 +24,85 @@ Main class for routing tasks to different swarm types.
| `rules` | str | Rules to inject into every agent |
| `documents` | List[str] | List of document file paths |
| `output_type` | OutputType | Output format type (e.g., "string", "dict", "list", "json", "yaml", "xml") |
| `no_cluster_ops` | bool | Flag to disable cluster operations |
| `speaker_fn` | callable | Speaker function for GroupChat swarm type |
| `load_agents_from_csv` | bool | Flag to enable/disable loading agents from CSV |
| `csv_file_path` | str | Path to the CSV file for loading agents |
| `return_entire_history` | bool | Flag to enable/disable returning the entire conversation history |
| `multi_agent_collab_prompt` | bool | Whether to enable multi-agent collaboration prompts |
| `list_all_agents` | bool | Flag to enable/disable listing all agents to each other |
| `conversation` | Any | Conversation object for managing agent interactions |
| `agents_config` | Optional[Dict[Any, Any]] | Configuration dictionary for agents |
| `speaker_function` | str | Speaker function name for InteractiveGroupChat swarm type |
| `heavy_swarm_loops_per_agent` | int | Number of loops per agent for HeavySwarm (default: 1) |
| `heavy_swarm_question_agent_model_name` | str | Model name for the question agent in HeavySwarm (default: "gpt-4.1") |
| `heavy_swarm_worker_model_name` | str | Model name for worker agents in HeavySwarm (default: "gpt-4.1") |
| `heavy_swarm_swarm_show_output` | bool | Flag to show output for HeavySwarm (default: True) |
| `telemetry_enabled` | bool | Flag to enable/disable telemetry logging (default: False) |
| `council_judge_model_name` | str | Model name for the judge in CouncilAsAJudge (default: "gpt-4o-mini") |
| `verbose` | bool | Flag to enable/disable verbose logging (default: False) |
| `worker_tools` | List[Callable] | List of tools available to worker agents |
| `aggregation_strategy` | str | Aggregation strategy for HeavySwarm (default: "synthesis") |
| `chairman_model` | str | Model name for the Chairman in LLMCouncil (default: "gpt-5.1") |
#### Methods:
### Methods
| Method | Parameters | Description |
| --- | --- | --- |
| `__init__` | `name: str = "swarm-router", description: str = "Routes your task to the desired swarm", max_loops: int = 1, agents: List[Union[Agent, Callable]] = [], swarm_type: SwarmType = "SequentialWorkflow", autosave: bool = False, rearrange_flow: str = None, return_json: bool = False, auto_generate_prompts: bool = False, shared_memory_system: Any = None, rules: str = None, documents: List[str] = [], output_type: OutputType = "dict", no_cluster_ops: bool = False, speaker_fn: callable = None, load_agents_from_csv: bool = False, csv_file_path: str = None, return_entire_history: bool = True, multi_agent_collab_prompt: bool = True` | Initialize the SwarmRouter |
| `setup` | None | Set up the SwarmRouter by activating APE and handling shared memory and rules |
| `activate_shared_memory` | None | Activate shared memory with all agents |
| `handle_rules` | None | Inject rules to every agent |
| `activate_ape` | None | Activate automatic prompt engineering for agents that support it |
| `reliability_check` | None | Perform reliability checks on the SwarmRouter configuration |
| `_create_swarm` | `task: str = None, *args, **kwargs` | Create and return the specified swarm type |
| `update_system_prompt_for_agent_in_swarm` | None | Update system prompts for all agents with collaboration prompts |
| `_log` | `level: str, message: str, task: str = "", metadata: Dict[str, Any] = None` | Create a log entry |
| `_run` | `task: str, img: Optional[str] = None, model_response: Optional[str] = None, *args, **kwargs` | Run the specified task on the selected swarm type |
| `run` | `task: str, img: Optional[str] = None, model_response: Optional[str] = None, *args, **kwargs` | Execute a task on the selected swarm type |
| `__call__` | `task: str, *args, **kwargs` | Make the SwarmRouter instance callable |
| `batch_run` | `tasks: List[str], *args, **kwargs` | Execute multiple tasks in sequence |
| `async_run` | `task: str, *args, **kwargs` | Execute a task asynchronously |
| `get_logs` | None | Retrieve all logged entries |
| `concurrent_run` | `task: str, *args, **kwargs` | Execute a task using concurrent execution |
| `concurrent_batch_run` | `tasks: List[str], *args, **kwargs` | Execute multiple tasks concurrently |
#### `run()`
Execute a task on the selected swarm type.
**Input Parameters:**
| Parameter | Type | Required | Default | Description |
| --- | --- | --- | --- | --- |
| `task` | `Optional[str]` | No | `None` | The task to be executed by the swarm |
| `img` | `Optional[str]` | No | `None` | Path to an image file for vision tasks |
| `tasks` | `Optional[List[str]]` | No | `None` | List of tasks (used for BatchedGridWorkflow) |
| `*args` | `Any` | No | - | Variable length argument list |
| `**kwargs` | `Any` | No | - | Arbitrary keyword arguments |
**Output:**
| Type | Description |
| --- | --- |
| `Any` | The result of the swarm's execution. The exact type depends on the `output_type` configuration (e.g., `str`, `dict`, `list`, `json`, `yaml`, `xml`) |
**Example:**
```python
result = router.run(
task="Analyze the market trends and provide recommendations",
img="chart.png" # Optional
)
```
---
### `batch_run()`
Execute multiple tasks in sequence on the selected swarm type.
**Input Parameters:**
| Parameter | Type | Required | Default | Description |
| --- | --- | --- | --- | --- |
| `tasks` | `List[str]` | Yes | - | List of tasks to be executed sequentially |
| `img` | `Optional[str]` | No | `None` | Path to an image file for vision tasks |
| `imgs` | `Optional[List[str]]` | No | `None` | List of image file paths for vision tasks |
| `*args` | `Any` | No | - | Variable length argument list |
| `**kwargs` | `Any` | No | - | Arbitrary keyword arguments |
**Output:**
| Type | Description |
| --- | --- |
| `List[Any]` | A list of results from the swarm's execution, one result per task. Each result type depends on the `output_type` configuration |
**Example:**
```python
tasks = ["Analyze Q1 report", "Summarize competitor landscape", "Evaluate market trends"]
results = router.batch_run(tasks, img="report.png") # Optional img parameter
```
## Available Swarm Types
@ -62,7 +112,6 @@ The `SwarmRouter` supports many various multi-agent architectures for various ap
|------------|-------------|
| `AgentRearrange` | Optimizes agent arrangement for task execution |
| `MixtureOfAgents` | Combines multiple agent types for diverse tasks |
| `SpreadSheetSwarm` | Uses spreadsheet-like operations for task management |
| `SequentialWorkflow` | Executes tasks sequentially |
| `ConcurrentWorkflow` | Executes tasks in parallel |
| `GroupChat` | Facilitates communication among agents in a group chat format |
@ -73,10 +122,11 @@ The `SwarmRouter` supports many various multi-agent architectures for various ap
| `MALT` | Multi-Agent Language Tasks |
| `CouncilAsAJudge` | Council-based judgment system |
| `InteractiveGroupChat` | Interactive group chat with user participation |
| `HeavySwarm` | Heavy swarm architecture with question and worker agents |
| `BatchedGridWorkflow` | Batched grid workflow for parallel task processing |
| `LLMCouncil` | Council of specialized LLM agents with peer review and synthesis |
| `auto` | Automatically selects best swarm type via embedding search |
## Basic Usage
```python
@ -129,9 +179,13 @@ router = SwarmRouter(
if __name__ == "__main__":
# Run a comprehensive private equity document analysis task
result = router.run(
"Where is the best place to find template term sheets for series A startups? Provide links and references"
task="Where is the best place to find template term sheets for series A startups? Provide links and references",
img=None # Optional: provide image path for vision tasks
)
print(result)
# For BatchedGridWorkflow, you can pass multiple tasks:
# result = router.run(tasks=["Task 1", "Task 2", "Task 3"])
```
## Advanced Usage
@ -225,22 +279,6 @@ mixture_router = SwarmRouter(
result = mixture_router.run("Evaluate the potential acquisition of TechStartup Inc.")
```
### SpreadSheetSwarm
Use Case: Collaborative data processing and analysis.
```python
spreadsheet_router = SwarmRouter(
name="DataProcessor",
description="Collaborative data processing and analysis",
max_loops=1,
agents=[data_cleaner, statistical_analyzer, visualizer],
swarm_type="SpreadSheetSwarm"
)
result = spreadsheet_router.run("Process and visualize customer churn data")
```
### SequentialWorkflow
Use Case: Step-by-step document analysis and report generation.
@ -379,6 +417,71 @@ result = interactive_chat_router.run("Discuss the market trends and provide inte
The InteractiveGroupChat allows for dynamic interaction between agents and users, enabling real-time participation in group discussions and decision-making processes. This is particularly useful for scenarios requiring human input or validation during the conversation flow.
### HeavySwarm
Use Case: Complex task decomposition with question and worker agents.
```python
heavy_swarm_router = SwarmRouter(
name="HeavySwarm",
description="Complex task decomposition and execution",
swarm_type="HeavySwarm",
heavy_swarm_loops_per_agent=2,
heavy_swarm_question_agent_model_name="gpt-4.1",
heavy_swarm_worker_model_name="gpt-4.1",
heavy_swarm_swarm_show_output=True,
worker_tools=[tool1, tool2],
aggregation_strategy="synthesis",
output_type="string"
)
result = heavy_swarm_router.run("Analyze market trends and provide comprehensive recommendations")
```
HeavySwarm uses a question agent to decompose complex tasks and worker agents to execute subtasks, making it ideal for complex problem-solving scenarios.
### BatchedGridWorkflow
Use Case: Parallel processing of multiple tasks in a batched grid format.
```python
batched_grid_router = SwarmRouter(
name="BatchedGridWorkflow",
description="Process multiple tasks in parallel batches",
max_loops=1,
agents=[agent1, agent2, agent3],
swarm_type="BatchedGridWorkflow"
)
result = batched_grid_router.run(tasks=["Task 1", "Task 2", "Task 3"])
```
BatchedGridWorkflow is designed for efficiently processing multiple tasks in parallel batches, optimizing resource utilization.
### LLMCouncil
Use Case: Collaborative analysis with multiple specialized LLM agents that evaluate each other's responses and synthesize a final answer.
```python
llm_council_router = SwarmRouter(
name="LLMCouncil",
description="Collaborative council of LLM agents with peer review",
swarm_type="LLMCouncil",
chairman_model="gpt-5.1", # Model for the Chairman agent
output_type="dict", # Output format: "dict", "list", "string", "json", "yaml", "final", etc.
verbose=True # Show progress and intermediate results
)
result = llm_council_router.run("What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?")
```
LLMCouncil creates a council of specialized agents (GPT-5.1, Gemini, Claude, Grok by default) that:
1. Each independently responds to the query
2. Evaluates and ranks each other's anonymized responses
3. A Chairman synthesizes all responses and evaluations into a final comprehensive answer
The council automatically tracks all messages in a conversation object and supports flexible output formats. Note: LLMCouncil uses default council members and doesn't require the `agents` parameter.
## Advanced Features
### Processing Documents
@ -402,15 +505,7 @@ To process multiple tasks in a batch:
```python
tasks = ["Analyze Q1 report", "Summarize competitor landscape", "Evaluate market trends"]
results = router.batch_run(tasks)
```
### Asynchronous Execution
For asynchronous task execution:
```python
result = await router.async_run("Generate financial projections")
results = router.batch_run(tasks, img="image.png") # Optional: img parameter for image tasks
```
### Concurrent Execution
@ -418,16 +513,7 @@ result = await router.async_run("Generate financial projections")
To run a single task concurrently:
```python
result = router.concurrent_run("Analyze multiple data streams")
```
### Concurrent Batch Processing
To process multiple tasks concurrently:
```python
tasks = ["Task 1", "Task 2", "Task 3"]
results = router.concurrent_batch_run(tasks)
result = router.concurrent_run("Analyze multiple data streams", img="image.png") # Optional: img parameter
```
### Using the SwarmRouter as a Callable

@ -1,5 +1,3 @@
import json
from swarms import Agent
# Initialize the agent
@ -12,7 +10,6 @@ agent = Agent(
dynamic_context_window=True,
streaming_on=False,
top_p=None,
output_type="dict",
)
out = agent.run(
@ -20,4 +17,4 @@ out = agent.run(
n=1,
)
print(json.dumps(out, indent=4))
print(out)

@ -6,60 +6,90 @@ This directory contains comprehensive examples demonstrating various capabilitie
### Multi-Agent Systems
- **[multi_agent/](multi_agent/)** - Advanced multi-agent patterns including agent rearrangement, auto swarm builder (ASB), batched workflows, board of directors, caching, concurrent processing, councils, debates, elections, forest swarms, graph workflows, group chats, heavy swarms, hierarchical swarms, majority voting, orchestration examples, social algorithms, simulations, spreadsheet examples, and swarm routing.
- **[multi_agent/](multi_agent/)** - Advanced multi-agent patterns including agent rearrangement, auto swarm builder (ASB), batched workflows, board of directors, caching, concurrent processing, councils, debates, elections, forest swarms, graph workflows, group chats, heavy swarms, hierarchical swarms, LLM council, majority voting, orchestration examples, paper implementations, sequential workflows, social algorithms, simulations, spreadsheet examples, swarm routing, and utilities.
- [README.md](multi_agent/README.md) - Complete multi-agent examples documentation
- [duo_agent.py](multi_agent/duo_agent.py) - Two-agent collaboration example
- [llm_council_examples/](multi_agent/llm_council_examples/) - LLM Council collaboration patterns
- [caching_examples/](multi_agent/caching_examples/) - Agent caching examples
### Single Agent Systems
- **[single_agent/](single_agent/)** - Single agent implementations including demos, external agent integrations, LLM integrations (Azure, Claude, DeepSeek, Mistral, OpenAI, Qwen), onboarding, RAG, reasoning agents, tools integration, utils, and vision capabilities.
- **[single_agent/](single_agent/)** - Single agent implementations including demos, external agent integrations, LLM integrations (Azure, Claude, DeepSeek, Mistral, OpenAI, Qwen), onboarding, RAG, reasoning agents, tools integration, utils, vision capabilities, and MCP integration.
- [README.md](single_agent/README.md) - Complete single agent examples documentation
- [simple_agent.py](single_agent/simple_agent.py) - Basic single agent example
- [agent_mcp.py](single_agent/agent_mcp.py) - MCP integration example
- [rag/](single_agent/rag/) - Retrieval Augmented Generation (RAG) implementations with vector database integrations
### Tools & Integrations
- **[tools/](tools/)** - Tool integration examples including agent-as-tools, base tool implementations, browser automation, Claude integration, Exa search, Firecrawl, multi-tool usage, and Stagehand integration.
- [README.md](tools/README.md) - Complete tools examples documentation
- [agent_as_tools.py](tools/agent_as_tools.py) - Using agents as tools
- [browser_use_as_tool.py](tools/browser_use_as_tool.py) - Browser automation tool
- [exa_search_agent.py](tools/exa_search_agent.py) - Exa search integration
- [firecrawl_agents_example.py](tools/firecrawl_agents_example.py) - Firecrawl integration
- [base_tool_examples/](tools/base_tool_examples/) - Base tool implementation examples
- [multii_tool_use/](tools/multii_tool_use/) - Multi-tool usage examples
- [stagehand/](tools/stagehand/) - Stagehand UI automation
### Model Integrations
- **[models/](models/)** - Various model integrations including Cerebras, GPT-5, GPT-OSS, Llama 4, Lumo, and Ollama implementations with concurrent processing examples and provider-specific configurations.
- **[models/](models/)** - Various model integrations including Cerebras, GPT-5, GPT-OSS, Llama 4, Lumo, O3, Ollama, and vLLM implementations with concurrent processing examples and provider-specific configurations.
- [README.md](models/README.md) - Model integration documentation
- [simple_example_ollama.py](models/simple_example_ollama.py) - Ollama integration example
- [cerebas_example.py](models/cerebas_example.py) - Cerebras model example
- [lumo_example.py](models/lumo_example.py) - Lumo model example
- [example_o3.py](models/example_o3.py) - O3 model example
- [gpt_5/](models/gpt_5/) - GPT-5 model examples
- [gpt_oss_examples/](models/gpt_oss_examples/) - GPT-OSS examples
- [llama4_examples/](models/llama4_examples/) - Llama 4 examples
- [main_providers/](models/main_providers/) - Main provider configurations
- [vllm/](models/vllm/) - vLLM integration examples
### API & Protocols
- **[swarms_api_examples/](swarms_api_examples/)** - Swarms API usage examples including agent overview, batch processing, client integration, team examples, analysis, and rate limiting.
- [README.md](swarms_api_examples/README.md) - API examples documentation
- [client_example.py](swarms_api_examples/client_example.py) - API client example
- [batch_example.py](swarms_api_examples/batch_example.py) - Batch processing example
- **[swarms_api/](swarms_api/)** - Swarms API usage examples including agent overview, batch processing, client integration, team examples, analysis, and rate limiting.
- [README.md](swarms_api/README.md) - API examples documentation
- [client_example.py](swarms_api/client_example.py) - API client example
- [batch_example.py](swarms_api/batch_example.py) - Batch processing example
- [hospital_team.py](swarms_api/hospital_team.py) - Hospital management team simulation
- [legal_team.py](swarms_api/legal_team.py) - Legal team collaboration example
- [icd_ten_analysis.py](swarms_api/icd_ten_analysis.py) - ICD-10 medical code analysis
- [rate_limits.py](swarms_api/rate_limits.py) - Rate limiting and throttling examples
- **[mcp/](mcp/)** - Model Context Protocol (MCP) integration examples including agent implementations, multi-connection setups, server configurations, and utility functions.
- **[mcp/](mcp/)** - Model Context Protocol (MCP) integration examples including agent implementations, multi-connection setups, server configurations, utility functions, and multi-MCP guides.
- [README.md](mcp/README.md) - MCP examples documentation
- [multi_mcp_example.py](mcp/multi_mcp_example.py) - Multi-MCP connection example
- [agent_examples/](mcp/agent_examples/) - Agent-based MCP examples
- [servers/](mcp/servers/) - MCP server implementations
- [mcp_utils/](mcp/mcp_utils/) - MCP utility functions
- [multi_mcp_guide/](mcp/multi_mcp_guide/) - Multi-MCP setup guides
- **[aop_examples/](aop_examples/)** - Agents over Protocol (AOP) examples demonstrating MCP server setup, agent discovery, client interactions, queue-based task submission, and medical AOP implementations.
- **[aop_examples/](aop_examples/)** - Agents over Protocol (AOP) examples demonstrating MCP server setup, agent discovery, client interactions, queue-based task submission, medical AOP implementations, and utility functions.
- [README.md](aop_examples/README.md) - AOP examples documentation
- [server.py](aop_examples/server.py) - AOP server implementation
- [client/](aop_examples/client/) - AOP client examples and agent discovery
- [discovery/](aop_examples/discovery/) - Agent discovery examples
- [medical_aop/](aop_examples/medical_aop/) - Medical AOP implementations
- [utils/](aop_examples/utils/) - AOP utility functions
### Advanced Capabilities
- **[reasoning_agents/](reasoning_agents/)** - Advanced reasoning capabilities including agent judge evaluation systems, O3 model integration, and mixture of agents (MOA) sequential examples.
- **[reasoning_agents/](reasoning_agents/)** - Advanced reasoning capabilities including agent judge evaluation systems, O3 model integration, mixture of agents (MOA) sequential examples, and reasoning agent router examples.
- [README.md](reasoning_agents/README.md) - Reasoning agents documentation
- [example_o3.py](reasoning_agents/example_o3.py) - O3 model example
- [moa_seq_example.py](reasoning_agents/moa_seq_example.py) - MOA sequential example
- **[rag/](rag/)** - Retrieval Augmented Generation (RAG) implementations with vector database integrations including Qdrant examples.
- [README.md](rag/README.md) - RAG documentation
- [qdrant_rag_example.py](rag/qdrant_rag_example.py) - Qdrant RAG example
- [agent_judge_examples/](reasoning_agents/agent_judge_examples/) - Agent judge evaluation systems
- [reasoning_agent_router_examples/](reasoning_agents/reasoning_agent_router_examples/) - Reasoning agent router examples
### Guides & Tutorials
- **[guides/](guides/)** - Comprehensive guides and tutorials including generation length blog, geo guesser agent, graph workflow guide, hierarchical marketing team, nano banana Jarvis agent, smart database, web scraper agents, and workshop examples (840_update, 850_workshop).
- **[guides/](guides/)** - Comprehensive guides and tutorials including demos, generation length blog, geo guesser agent, graph workflow guide, hackathon examples, hierarchical marketing team, nano banana Jarvis agent, smart database, web scraper agents, workshops, x402 examples, and workshop examples (840_update, 850_workshop).
- [README.md](guides/README.md) - Guides documentation
- [hiearchical_marketing_team.py](guides/hiearchical_marketing_team.py) - Hierarchical marketing team example
- [demos/](guides/demos/) - Various demonstration examples
- [hackathons/](guides/hackathons/) - Hackathon project examples
- [workshops/](guides/workshops/) - Workshop examples
- [x402_examples/](guides/x402_examples/) - X402 protocol examples
### Deployment
@ -72,6 +102,11 @@ This directory contains comprehensive examples demonstrating various capabilitie
- **[utils/](utils/)** - Utility functions and helper implementations including agent loader, communication examples, concurrent wrappers, miscellaneous utilities, and telemetry.
- [README.md](utils/README.md) - Utils documentation
- [agent_loader/](utils/agent_loader/) - Agent loading utilities
- [communication_examples/](utils/communication_examples/) - Agent communication patterns
- [concurrent_wrapper_examples.py](utils/concurrent_wrapper_examples.py) - Concurrent processing wrappers
- [misc/](utils/misc/) - Miscellaneous utility functions
- [telemetry/](utils/telemetry/) - Telemetry and monitoring utilities
### User Interface
@ -79,16 +114,26 @@ This directory contains comprehensive examples demonstrating various capabilitie
- [README.md](ui/README.md) - UI examples documentation
- [chat.py](ui/chat.py) - Chat interface example
### Command Line Interface
- **[cli/](cli/)** - CLI command examples demonstrating all available Swarms CLI features including setup, agent management, multi-agent architectures, and utilities.
- [README.md](cli/README.md) - CLI examples documentation
- [01_setup_check.sh](cli/01_setup_check.sh) - Environment setup verification
- [05_create_agent.sh](cli/05_create_agent.sh) - Create custom agents
- [08_llm_council.sh](cli/08_llm_council.sh) - LLM Council collaboration
- [09_heavy_swarm.sh](cli/09_heavy_swarm.sh) - HeavySwarm complex analysis
## Quick Start
1. **New to Swarms?** Start with [single_agent/simple_agent.py](single_agent/simple_agent.py) for basic concepts
2. **Want multi-agent workflows?** Check out [multi_agent/duo_agent.py](multi_agent/duo_agent.py)
3. **Need tool integration?** Explore [tools/agent_as_tools.py](tools/agent_as_tools.py)
4. **Interested in AOP?** Try [aop_examples/client/example_new_agent_tools.py](aop_examples/client/example_new_agent_tools.py) for agent discovery
5. **Want to see social algorithms?** Check out [multi_agent/social_algorithms_examples/](multi_agent/social_algorithms_examples/)
6. **Looking for guides?** Visit [guides/](guides/) for comprehensive tutorials
7. **Need RAG?** Try [rag/qdrant_rag_example.py](rag/qdrant_rag_example.py)
8. **Want reasoning agents?** Check out [reasoning_agents/example_o3.py](reasoning_agents/example_o3.py)
2. **Want to use the CLI?** Check out [cli/](cli/) for all CLI command examples
3. **Want multi-agent workflows?** Check out [multi_agent/duo_agent.py](multi_agent/duo_agent.py)
4. **Need tool integration?** Explore [tools/agent_as_tools.py](tools/agent_as_tools.py)
5. **Interested in AOP?** Try [aop_examples/client/example_new_agent_tools.py](aop_examples/client/example_new_agent_tools.py) for agent discovery
6. **Want to see social algorithms?** Check out [multi_agent/social_algorithms_examples/](multi_agent/social_algorithms_examples/)
7. **Looking for guides?** Visit [guides/](guides/) for comprehensive tutorials
8. **Need RAG?** Try [single_agent/rag/](single_agent/rag/) for RAG examples
9. **Want reasoning agents?** Check out [reasoning_agents/](reasoning_agents/) for reasoning agent examples
## Key Examples by Category
@ -105,7 +150,7 @@ This directory contains comprehensive examples demonstrating various capabilitie
- [Simple Agent](single_agent/simple_agent.py) - Basic agent setup
- [Reasoning Agents](single_agent/reasoning_agent_examples/) - Advanced reasoning patterns
- [Vision Agents](single_agent/vision/multimodal_example.py) - Vision and multimodal capabilities
- [RAG Agents](single_agent/rag/qdrant_rag_example.py) - Retrieval augmented generation
- [RAG Agents](single_agent/rag/) - Retrieval augmented generation
### Tool Integrations
@ -122,6 +167,14 @@ This directory contains comprehensive examples demonstrating various capabilitie
- [Azure](single_agent/llms/azure_agent.py) - Azure OpenAI
- [Ollama](models/simple_example_ollama.py) - Local Ollama models
### CLI Examples
- [Setup Check](cli/01_setup_check.sh) - Verify environment setup
- [Create Agent](cli/05_create_agent.sh) - Create custom agents via CLI
- [LLM Council](cli/08_llm_council.sh) - Run LLM Council collaboration
- [HeavySwarm](cli/09_heavy_swarm.sh) - Run HeavySwarm for complex tasks
- [All CLI Examples](cli/) - Complete CLI examples directory
## Documentation
Each subdirectory contains its own README.md file with detailed descriptions and links to all available examples. Click on any folder above to explore its specific examples and use cases.

@ -92,7 +92,13 @@ financial_agent = Agent(
)
# Basic usage - individual agent addition
deployer = AOP(server_name="MyAgentServer", verbose=True, port=5932)
deployer = AOP(
server_name="MyAgentServer",
verbose=True,
port=5932,
json_response=True,
queue_enabled=False,
)
agents = [
research_agent,

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Setup Check Example
# Verify your Swarms environment setup
swarms setup-check

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Onboarding Example
# Start the interactive onboarding process
swarms onboarding

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Get API Key Example
# Open API key portal in browser
swarms get-api-key

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Check Login Example
# Verify authentication status
swarms check-login

@ -0,0 +1,12 @@
#!/bin/bash
# Swarms CLI - Create Agent Example
# Create and run a custom agent
swarms agent \
--name "Research Agent" \
--description "AI research specialist" \
--system-prompt "You are an expert research agent." \
--task "Analyze current trends in renewable energy" \
--model-name "gpt-4o-mini"

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Run Agents from YAML Example
# Execute agents from YAML configuration file
swarms run-agents --yaml-file agents.yaml

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Load Markdown Agents Example
# Load agents from markdown files
swarms load-markdown --markdown-path ./agents/

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - LLM Council Example
# Run LLM Council for collaborative problem-solving
swarms llm-council --task "What are the best energy ETFs to invest in right now?"

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - HeavySwarm Example
# Run HeavySwarm for complex task analysis
swarms heavy-swarm --task "Analyze current market trends for renewable energy investments"

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Autoswarm Example
# Auto-generate swarm configuration
swarms autoswarm --task "Analyze quarterly sales data" --model "gpt-4"

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Features Example
# Display all available CLI features
swarms features

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Help Example
# Display comprehensive help documentation
swarms help

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Auto Upgrade Example
# Update Swarms to the latest version
swarms auto-upgrade

@ -0,0 +1,7 @@
#!/bin/bash
# Swarms CLI - Book Call Example
# Schedule a strategy session
swarms book-call

@ -0,0 +1,197 @@
# Swarms CLI Examples
This directory contains shell script examples demonstrating all available Swarms CLI commands and features. Each script is simple, focused, and demonstrates a single CLI command.
## Quick Start
All scripts are executable. Run them directly:
```bash
chmod +x *.sh
./01_setup_check.sh
```
Or execute with bash:
```bash
bash 01_setup_check.sh
```
## Available Examples
### Setup & Configuration
- **[01_setup_check.sh](examples/cli/01_setup_check.sh)** - Environment setup verification
```bash
swarms setup-check
```
- **[02_onboarding.sh](examples/cli/02_onboarding.sh)** - Interactive onboarding process
```bash
swarms onboarding
```
- **[03_get_api_key.sh](examples/cli/03_get_api_key.sh)** - Retrieve API keys
```bash
swarms get-api-key
```
- **[04_check_login.sh](examples/cli/04_check_login.sh)** - Verify authentication
```bash
swarms check-login
```
### Agent Management
- **[05_create_agent.sh](examples/cli/05_create_agent.sh)** - Create and run custom agents
```bash
swarms agent --name "Agent" --description "Description" --system-prompt "Prompt" --task "Task"
```
- **[06_run_agents_yaml.sh](examples/cli/06_run_agents_yaml.sh)** - Execute agents from YAML
```bash
swarms run-agents --yaml-file agents.yaml
```
- **[07_load_markdown.sh](examples/cli/07_load_markdown.sh)** - Load agents from markdown files
```bash
swarms load-markdown --markdown-path ./agents/
```
### Multi-Agent Architectures
- **[08_llm_council.sh](examples/cli/08_llm_council.sh)** - Run LLM Council collaboration
```bash
swarms llm-council --task "Your question here"
```
- **[09_heavy_swarm.sh](examples/cli/09_heavy_swarm.sh)** - Run HeavySwarm for complex tasks
```bash
swarms heavy-swarm --task "Your complex task here"
```
- **[10_autoswarm.sh](examples/cli/10_autoswarm.sh)** - Auto-generate swarm configurations
```bash
swarms autoswarm --task "Task description" --model "gpt-4"
```
### Utilities
- **[11_features.sh](examples/cli/11_features.sh)** - Display all available features
```bash
swarms features
```
- **[12_help.sh](examples/cli/12_help.sh)** - Display help documentation
```bash
swarms help
```
- **[13_auto_upgrade.sh](examples/cli/13_auto_upgrade.sh)** - Update Swarms package
```bash
swarms auto-upgrade
```
- **[14_book_call.sh](examples/cli/14_book_call.sh)** - Schedule strategy session
```bash
swarms book-call
```
### Run All Examples
- **[run_all_examples.sh](examples/cli/run_all_examples.sh)** - Run multiple examples in sequence
```bash
bash run_all_examples.sh
```
## Script Structure
Each script follows a simple pattern:
1. **Shebang** - `#!/bin/bash`
2. **Comment** - Brief description of what the script does
3. **Single Command** - One CLI command execution
Example:
```bash
#!/bin/bash
# Swarms CLI - Setup Check Example
# Verify your Swarms environment setup
swarms setup-check
```
## Usage Patterns
### Basic Command Execution
```bash
swarms <command> [options]
```
### With Verbose Output
```bash
swarms <command> --verbose
```
### Environment Variables
Set API keys before running scripts that require them:
```bash
export OPENAI_API_KEY="your-key-here"
export ANTHROPIC_API_KEY="your-key-here"
export GOOGLE_API_KEY="your-key-here"
```
## Examples by Category
### Setup & Diagnostics
- Environment setup verification
- Onboarding workflow
- API key management
- Authentication verification
### Single Agent Operations
- Custom agent creation
- Agent configuration from YAML
- Agent loading from markdown
### Multi-Agent Operations
- LLM Council for collaborative problem-solving
- HeavySwarm for complex analysis
- Auto-generated swarm configurations
### Information & Help
- Feature discovery
- Help documentation
- Package management
## File Paths
All scripts are located in `examples/cli/`:
- `examples/cli/01_setup_check.sh`
- `examples/cli/02_onboarding.sh`
- `examples/cli/03_get_api_key.sh`
- `examples/cli/04_check_login.sh`
- `examples/cli/05_create_agent.sh`
- `examples/cli/06_run_agents_yaml.sh`
- `examples/cli/07_load_markdown.sh`
- `examples/cli/08_llm_council.sh`
- `examples/cli/09_heavy_swarm.sh`
- `examples/cli/10_autoswarm.sh`
- `examples/cli/11_features.sh`
- `examples/cli/12_help.sh`
- `examples/cli/13_auto_upgrade.sh`
- `examples/cli/14_book_call.sh`
- `examples/cli/run_all_examples.sh`
## Related Documentation
- [CLI Reference](../../docs/swarms/cli/cli_reference.md) - Complete CLI documentation
- [Main Examples README](../README.md) - Other Swarms examples
- [Swarms Documentation](../../docs/) - Full Swarms documentation

@ -0,0 +1,11 @@
#!/bin/bash
# Swarms CLI - Run All Examples
# Run all CLI examples in sequence
chmod +x *.sh
swarms setup-check
swarms features
swarms help

@ -1,4 +1,3 @@
from x402.client import X402Client
from eth_account import Account
from x402.clients.httpx import x402HttpxClient
@ -10,8 +9,7 @@ load_dotenv()
async def buy_x402_service(
base_url: str = None,
endpoint: str = None
base_url: str = None, endpoint: str = None
):
"""
Purchase a service from the X402 bazaar using the provided affordable_service details.
@ -31,20 +29,22 @@ async def buy_x402_service(
```python
affordable_service = {"id": "service123", "price": 90000}
response = await buy_x402_service(
affordable_service,
base_url="https://api.cdp.coinbase.com",
affordable_service,
base_url="https://api.cdp.coinbase.com",
endpoint="/x402/v1/bazaar/services/service123"
)
print(await response.aread())
```
"""
key = os.getenv('X402_PRIVATE_KEY')
key = os.getenv("X402_PRIVATE_KEY")
# Set up your payment account from private key
account = Account.from_key(key)
async with x402HttpxClient(account=account, base_url=base_url) as client:
async with x402HttpxClient(
account=account, base_url=base_url
) as client:
response = await client.get(endpoint)
print(await response.aread())
return response
return response

@ -4,7 +4,6 @@ from swarms import Agent
import httpx
async def query_x402_services(
limit: Optional[int] = None,
max_price: Optional[int] = None,
@ -207,7 +206,6 @@ def get_x402_services_sync(
return str(services)
agent = Agent(
agent_name="X402-Discovery-Agent",
agent_description="A agent that queries the x402 discovery services from the Coinbase CDP API.",
@ -228,4 +226,4 @@ if __name__ == "__main__":
out = agent.run(
task="Summarize the first 10 services under 100000 atomic units (e.g., $0.10 USDC)"
)
print(out)
print(out)

@ -6,7 +6,49 @@ This directory contains examples demonstrating debate patterns for multi-agent s
Debate patterns enable agents to engage in structured discussions, present arguments, and reach conclusions through discourse. This pattern is useful for exploring multiple perspectives on complex topics and arriving at well-reasoned decisions.
## Note
## Examples
This directory is currently being populated with debate examples. Check back soon for implementations!
### DebateWithJudge
The `DebateWithJudge` architecture implements a debate system with self-refinement:
- **Agent A (Pro)** and **Agent B (Con)** present opposing arguments
- Both arguments are evaluated by a **Judge/Critic Agent**
- The Judge provides a winner or synthesis → refined answer
- The process repeats for N rounds to progressively improve the answer
**Architecture Flow:**
```
Agent A (Pro) ↔ Agent B (Con)
│ │
▼ ▼
Judge / Critic Agent
Winner or synthesis → refined answer
```
**Example Usage:**
```python
from swarms import Agent
from swarms.structs.debate_with_judge import DebateWithJudge
# Create Pro, Con, and Judge agents
pro_agent = Agent(agent_name="Pro-Agent", ...)
con_agent = Agent(agent_name="Con-Agent", ...)
judge_agent = Agent(agent_name="Judge-Agent", ...)
# Create debate system
debate = DebateWithJudge(
pro_agent=pro_agent,
con_agent=con_agent,
judge_agent=judge_agent,
max_rounds=3
)
# Run debate
result = debate.run("Should AI be regulated?")
```
See [debate_with_judge_example.py](./debate_with_judge_example.py) for a complete example.

@ -0,0 +1,86 @@
"""
Example 3: Business Strategy Debate with Custom Configuration
This example demonstrates a business strategy debate with custom agent configurations,
multiple rounds, and accessing conversation history.
"""
from swarms import Agent, DebateWithJudge
# Create business strategy agents with detailed expertise
pro_agent = Agent(
agent_name="Growth-Strategy-Pro",
system_prompt=(
"You are a business strategy consultant specializing in aggressive growth strategies. "
"You argue in favor of rapid expansion, market penetration, and scaling. "
"You present arguments focusing on first-mover advantages, market share capture, "
"network effects, and competitive positioning. You use case studies from "
"successful companies like Amazon, Uber, and Airbnb to support your position."
),
model_name="gpt-4o-mini",
max_loops=1,
)
con_agent = Agent(
agent_name="Sustainable-Growth-Pro",
system_prompt=(
"You are a business strategy consultant specializing in sustainable, profitable growth. "
"You argue against aggressive expansion in favor of measured, sustainable growth. "
"You present counter-arguments focusing on profitability, unit economics, "
"sustainable competitive advantages, and avoiding overextension. You identify "
"weaknesses in 'growth at all costs' approaches and provide compelling alternatives "
"based on companies like Apple, Microsoft, and Berkshire Hathaway."
),
model_name="gpt-4o-mini",
max_loops=1,
)
judge_agent = Agent(
agent_name="Strategy-Judge",
system_prompt=(
"You are a seasoned business strategist and former CEO evaluating growth strategy debates. "
"You carefully analyze arguments from both sides, considering factors like: "
"- Market conditions and competitive landscape\n"
"- Company resources and capabilities\n"
"- Risk tolerance and financial position\n"
"- Long-term sustainability vs. short-term growth\n"
"- Industry-specific dynamics\n\n"
"You provide balanced synthesis that incorporates the best elements from both arguments, "
"considering context-specific factors. You may recommend a hybrid approach when appropriate."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the debate system with extended rounds for complex strategy discussions
strategy_debate = DebateWithJudge(
pro_agent=pro_agent,
con_agent=con_agent,
judge_agent=judge_agent,
max_rounds=4, # More rounds for complex strategic discussions
output_type="dict", # Use dict format for structured analysis
verbose=True,
)
# Define a complex business strategy question
strategy_question = (
"A SaaS startup with $2M ARR, 40% gross margins, and $500K in the bank "
"is considering two paths:\n"
"1. Aggressive growth: Raise $10M, hire 50 people, expand to 5 new markets\n"
"2. Sustainable growth: Focus on profitability, improve unit economics, "
"expand gradually with existing resources\n\n"
"Which strategy should they pursue? Consider market conditions, competitive "
"landscape, and long-term viability."
)
# Run the debate
result = strategy_debate.run(task=strategy_question)
print(result)
# Get the full conversation history for detailed analysis
history = strategy_debate.get_conversation_history()
print(history)
# Get the final refined answer
final_answer = strategy_debate.get_final_answer()
print(final_answer)

@ -0,0 +1,61 @@
from swarms import Agent, DebateWithJudge
# Create the Pro agent (arguing in favor)
pro_agent = Agent(
agent_name="Pro-Agent",
system_prompt=(
"You are a skilled debater who argues in favor of positions. "
"You present well-reasoned arguments with evidence, examples, "
"and logical reasoning. You are persuasive and articulate."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the Con agent (arguing against)
con_agent = Agent(
agent_name="Con-Agent",
system_prompt=(
"You are a skilled debater who argues against positions. "
"You present strong counter-arguments with evidence, examples, "
"and logical reasoning. You identify weaknesses in opposing "
"arguments and provide compelling alternatives."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the Judge agent (evaluates and synthesizes)
judge_agent = Agent(
agent_name="Judge-Agent",
system_prompt=(
"You are an impartial judge who evaluates debates. "
"You carefully analyze arguments from both sides, identify "
"strengths and weaknesses, and provide balanced synthesis. "
"You may declare a winner or provide a refined answer that "
"incorporates the best elements from both arguments."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the DebateWithJudge system
debate_system = DebateWithJudge(
pro_agent=pro_agent,
con_agent=con_agent,
judge_agent=judge_agent,
max_rounds=3, # Run 3 rounds of debate and refinement
output_type="str-all-except-first", # Return as formatted string
verbose=True, # Enable verbose logging
)
# Define the debate topic
topic = (
"Should artificial intelligence be regulated by governments? "
"Discuss the balance between innovation and safety."
)
# Run the debate
result = debate_system.run(task=topic)
print(result)

@ -0,0 +1,82 @@
"""
Example 1: Policy Debate on AI Regulation
This example demonstrates using DebateWithJudge for a comprehensive policy debate
on AI regulation, with multiple rounds of refinement.
"""
from swarms import Agent, DebateWithJudge
# Create the Pro agent (arguing in favor of AI regulation)
pro_agent = Agent(
agent_name="Pro-Regulation-Agent",
system_prompt=(
"You are a policy expert specializing in technology regulation. "
"You argue in favor of government regulation of artificial intelligence. "
"You present well-reasoned arguments focusing on safety, ethics, "
"and public interest. You use evidence, examples, and logical reasoning. "
"You are persuasive and articulate, emphasizing the need for oversight "
"to prevent harm and ensure responsible AI development."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the Con agent (arguing against AI regulation)
con_agent = Agent(
agent_name="Anti-Regulation-Agent",
system_prompt=(
"You are a technology policy expert specializing in innovation. "
"You argue against heavy government regulation of artificial intelligence. "
"You present strong counter-arguments focusing on innovation, economic growth, "
"and the risks of over-regulation. You identify weaknesses in regulatory "
"proposals and provide compelling alternatives such as industry self-regulation "
"and ethical guidelines. You emphasize the importance of maintaining "
"technological competitiveness."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the Judge agent (evaluates and synthesizes)
judge_agent = Agent(
agent_name="Policy-Judge-Agent",
system_prompt=(
"You are an impartial policy analyst and judge who evaluates debates on "
"technology policy. You carefully analyze arguments from both sides, "
"identify strengths and weaknesses, and provide balanced synthesis. "
"You consider multiple perspectives including safety, innovation, economic impact, "
"and ethical considerations. You may declare a winner or provide a refined "
"answer that incorporates the best elements from both arguments, such as "
"balanced regulatory frameworks that protect public interest while fostering innovation."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the DebateWithJudge system
debate_system = DebateWithJudge(
pro_agent=pro_agent,
con_agent=con_agent,
judge_agent=judge_agent,
max_rounds=3,
output_type="str-all-except-first",
verbose=True,
)
# Define the debate topic
topic = (
"Should artificial intelligence be regulated by governments? "
"Discuss the balance between innovation and safety, considering "
"both the potential benefits of regulation (safety, ethics, public trust) "
"and the potential drawbacks (stifling innovation, economic impact, "
"regulatory capture). Provide a nuanced analysis."
)
# Run the debate
result = debate_system.run(task=topic)
print(result)
# Get the final refined answer
final_answer = debate_system.get_final_answer()
print(final_answer)

@ -0,0 +1,70 @@
"""
Example 2: Technical Architecture Debate with Batch Processing
This example demonstrates using batched_run to process multiple technical
architecture questions, comparing different approaches to system design.
"""
from swarms import Agent, DebateWithJudge
# Create specialized technical agents
pro_agent = Agent(
agent_name="Microservices-Pro",
system_prompt=(
"You are a software architecture expert advocating for microservices architecture. "
"You present arguments focusing on scalability, independent deployment, "
"technology diversity, and team autonomy. You use real-world examples and "
"case studies to support your position."
),
model_name="gpt-4o-mini",
max_loops=1,
)
con_agent = Agent(
agent_name="Monolith-Pro",
system_prompt=(
"You are a software architecture expert advocating for monolithic architecture. "
"You present counter-arguments focusing on simplicity, reduced complexity, "
"easier debugging, and lower operational overhead. You identify weaknesses "
"in microservices approaches and provide compelling alternatives."
),
model_name="gpt-4o-mini",
max_loops=1,
)
judge_agent = Agent(
agent_name="Architecture-Judge",
system_prompt=(
"You are a senior software architect evaluating architecture debates. "
"You analyze both arguments considering factors like team size, project scale, "
"complexity, operational capabilities, and long-term maintainability. "
"You provide balanced synthesis that considers context-specific trade-offs."
),
model_name="gpt-4o-mini",
max_loops=1,
)
# Create the debate system
architecture_debate = DebateWithJudge(
pro_agent=pro_agent,
con_agent=con_agent,
judge_agent=judge_agent,
max_rounds=2, # Fewer rounds for more focused technical debates
output_type="str-all-except-first",
verbose=True,
)
# Define multiple architecture questions
architecture_questions = [
"Should a startup with 5 developers use microservices or monolithic architecture?",
"Is serverless architecture better than containerized deployments for event-driven systems?",
"Should a financial application use SQL or NoSQL databases for transaction processing?",
"Is event-driven architecture superior to request-response for real-time systems?",
]
# Execute batch processing
results = architecture_debate.batched_run(architecture_questions)
# Display results
for result in results:
print(result)

@ -0,0 +1,46 @@
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.agent import Agent
research_agent = Agent(
agent_name="Research-Analyst",
agent_description="Specialized in comprehensive research and data gathering",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
analysis_agent = Agent(
agent_name="Data-Analyst",
agent_description="Expert in data analysis and pattern recognition",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
strategy_agent = Agent(
agent_name="Strategy-Consultant",
agent_description="Specialized in strategic planning and recommendations",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
workflow = GraphWorkflow(
name="Rustworkx-Basic-Workflow",
description="Basic workflow using rustworkx backend for faster graph operations",
backend="rustworkx",
verbose=False,
)
workflow.add_node(research_agent)
workflow.add_node(analysis_agent)
workflow.add_node(strategy_agent)
workflow.add_edge(research_agent, analysis_agent)
workflow.add_edge(analysis_agent, strategy_agent)
task = "Conduct a research analysis on water stocks and ETFs"
results = workflow.run(task=task)
for agent_name, output in results.items():
print(f"{agent_name}: {output}")

@ -0,0 +1,56 @@
import time
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.agent import Agent
agents = [
Agent(
agent_name=f"Agent-{i}",
agent_description=f"Agent number {i}",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
for i in range(5)
]
nx_workflow = GraphWorkflow(
name="NetworkX-Workflow",
backend="networkx",
verbose=False,
)
for agent in agents:
nx_workflow.add_node(agent)
for i in range(len(agents) - 1):
nx_workflow.add_edge(agents[i], agents[i + 1])
nx_start = time.time()
nx_workflow.compile()
nx_compile_time = time.time() - nx_start
rx_workflow = GraphWorkflow(
name="Rustworkx-Workflow",
backend="rustworkx",
verbose=False,
)
for agent in agents:
rx_workflow.add_node(agent)
for i in range(len(agents) - 1):
rx_workflow.add_edge(agents[i], agents[i + 1])
rx_start = time.time()
rx_workflow.compile()
rx_compile_time = time.time() - rx_start
speedup = (
nx_compile_time / rx_compile_time if rx_compile_time > 0 else 0
)
print(f"NetworkX compile time: {nx_compile_time:.4f}s")
print(f"Rustworkx compile time: {rx_compile_time:.4f}s")
print(f"Speedup: {speedup:.2f}x")
print(
f"Identical layers: {nx_workflow._sorted_layers == rx_workflow._sorted_layers}"
)

@ -0,0 +1,73 @@
from swarms import Agent, GraphWorkflow
coordinator = Agent(
agent_name="Coordinator",
agent_description="Coordinates and distributes tasks",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
tech_analyst = Agent(
agent_name="Tech-Analyst",
agent_description="Technical analysis specialist",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
fundamental_analyst = Agent(
agent_name="Fundamental-Analyst",
agent_description="Fundamental analysis specialist",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
sentiment_analyst = Agent(
agent_name="Sentiment-Analyst",
agent_description="Sentiment analysis specialist",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
synthesis_agent = Agent(
agent_name="Synthesis-Agent",
agent_description="Synthesizes multiple analyses into final report",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
workflow = GraphWorkflow(
name="Fan-Out-Fan-In-Workflow",
description="Demonstrates parallel processing patterns with rustworkx",
backend="rustworkx",
verbose=False,
)
workflow.add_node(coordinator)
workflow.add_node(tech_analyst)
workflow.add_node(fundamental_analyst)
workflow.add_node(sentiment_analyst)
workflow.add_node(synthesis_agent)
workflow.add_edges_from_source(
coordinator,
[tech_analyst, fundamental_analyst, sentiment_analyst],
)
workflow.add_edges_to_target(
[tech_analyst, fundamental_analyst, sentiment_analyst],
synthesis_agent,
)
task = "Analyze Tesla stock from technical, fundamental, and sentiment perspectives"
results = workflow.run(task=task)
for agent_name, output in results.items():
print(f"{agent_name}: {output}")
workflow.visualize(view=True)

@ -0,0 +1,101 @@
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.agent import Agent
data_collector_1 = Agent(
agent_name="Data-Collector-1",
agent_description="Collects market data",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
data_collector_2 = Agent(
agent_name="Data-Collector-2",
agent_description="Collects financial data",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
technical_analyst = Agent(
agent_name="Technical-Analyst",
agent_description="Performs technical analysis",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
fundamental_analyst = Agent(
agent_name="Fundamental-Analyst",
agent_description="Performs fundamental analysis",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
risk_analyst = Agent(
agent_name="Risk-Analyst",
agent_description="Performs risk analysis",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
strategy_consultant = Agent(
agent_name="Strategy-Consultant",
agent_description="Develops strategic recommendations",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
report_writer = Agent(
agent_name="Report-Writer",
agent_description="Writes comprehensive reports",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
workflow = GraphWorkflow(
name="Complex-Multi-Layer-Workflow",
description="Complex workflow with multiple layers and parallel processing",
backend="rustworkx",
verbose=False,
)
all_agents = [
data_collector_1,
data_collector_2,
technical_analyst,
fundamental_analyst,
risk_analyst,
strategy_consultant,
report_writer,
]
for agent in all_agents:
workflow.add_node(agent)
workflow.add_parallel_chain(
[data_collector_1, data_collector_2],
[technical_analyst, fundamental_analyst, risk_analyst],
)
workflow.add_edges_to_target(
[technical_analyst, fundamental_analyst, risk_analyst],
strategy_consultant,
)
workflow.add_edges_to_target(
[technical_analyst, fundamental_analyst, risk_analyst],
report_writer,
)
workflow.add_edge(strategy_consultant, report_writer)
task = "Conduct a comprehensive analysis of the renewable energy sector including market trends, financial health, and risk assessment"
results = workflow.run(task=task)
for agent_name, output in results.items():
print(f"{agent_name}: {output}")

@ -0,0 +1,104 @@
import time
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.agent import Agent
agents_small = [
Agent(
agent_name=f"Agent-{i}",
agent_description=f"Agent number {i}",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
for i in range(5)
]
agents_medium = [
Agent(
agent_name=f"Agent-{i}",
agent_description=f"Agent number {i}",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
for i in range(20)
]
nx_workflow_small = GraphWorkflow(
name="NetworkX-Small",
backend="networkx",
verbose=False,
auto_compile=False,
)
for agent in agents_small:
nx_workflow_small.add_node(agent)
for i in range(len(agents_small) - 1):
nx_workflow_small.add_edge(agents_small[i], agents_small[i + 1])
nx_start = time.time()
nx_workflow_small.compile()
nx_small_time = time.time() - nx_start
rx_workflow_small = GraphWorkflow(
name="Rustworkx-Small",
backend="rustworkx",
verbose=False,
auto_compile=False,
)
for agent in agents_small:
rx_workflow_small.add_node(agent)
for i in range(len(agents_small) - 1):
rx_workflow_small.add_edge(agents_small[i], agents_small[i + 1])
rx_start = time.time()
rx_workflow_small.compile()
rx_small_time = time.time() - rx_start
nx_workflow_medium = GraphWorkflow(
name="NetworkX-Medium",
backend="networkx",
verbose=False,
auto_compile=False,
)
for agent in agents_medium:
nx_workflow_medium.add_node(agent)
for i in range(len(agents_medium) - 1):
nx_workflow_medium.add_edge(
agents_medium[i], agents_medium[i + 1]
)
nx_start = time.time()
nx_workflow_medium.compile()
nx_medium_time = time.time() - nx_start
rx_workflow_medium = GraphWorkflow(
name="Rustworkx-Medium",
backend="rustworkx",
verbose=False,
auto_compile=False,
)
for agent in agents_medium:
rx_workflow_medium.add_node(agent)
for i in range(len(agents_medium) - 1):
rx_workflow_medium.add_edge(
agents_medium[i], agents_medium[i + 1]
)
rx_start = time.time()
rx_workflow_medium.compile()
rx_medium_time = time.time() - rx_start
print(
f"Small (5 agents) - NetworkX: {nx_small_time:.4f}s, Rustworkx: {rx_small_time:.4f}s, Speedup: {nx_small_time/rx_small_time if rx_small_time > 0 else 0:.2f}x"
)
print(
f"Medium (20 agents) - NetworkX: {nx_medium_time:.4f}s, Rustworkx: {rx_medium_time:.4f}s, Speedup: {nx_medium_time/rx_medium_time if rx_medium_time > 0 else 0:.2f}x"
)

@ -0,0 +1,55 @@
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.agent import Agent
test_agent = Agent(
agent_name="Test-Agent",
agent_description="Test agent for error handling",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
workflow_rx = GraphWorkflow(
name="Rustworkx-Workflow",
backend="rustworkx",
verbose=False,
)
workflow_rx.add_node(test_agent)
workflow_nx = GraphWorkflow(
name="NetworkX-Workflow",
backend="networkx",
verbose=False,
)
workflow_nx.add_node(test_agent)
workflow_default = GraphWorkflow(
name="Default-Workflow",
verbose=False,
)
workflow_default.add_node(test_agent)
workflow_invalid = GraphWorkflow(
name="Invalid-Workflow",
backend="invalid_backend",
verbose=False,
)
workflow_invalid.add_node(test_agent)
print(
f"Rustworkx backend: {type(workflow_rx.graph_backend).__name__}"
)
print(f"NetworkX backend: {type(workflow_nx.graph_backend).__name__}")
print(
f"Default backend: {type(workflow_default.graph_backend).__name__}"
)
print(
f"Invalid backend fallback: {type(workflow_invalid.graph_backend).__name__}"
)
try:
import rustworkx as rx
print("Rustworkx available: True")
except ImportError:
print("Rustworkx available: False")

@ -0,0 +1,61 @@
import time
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.agent import Agent
NUM_AGENTS = 30
agents = [
Agent(
agent_name=f"Agent-{i:02d}",
agent_description=f"Agent number {i} in large-scale workflow",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
for i in range(NUM_AGENTS)
]
workflow = GraphWorkflow(
name="Large-Scale-Workflow",
description=f"Large-scale workflow with {NUM_AGENTS} agents using rustworkx",
backend="rustworkx",
verbose=False,
)
start_time = time.time()
for agent in agents:
workflow.add_node(agent)
add_nodes_time = time.time() - start_time
start_time = time.time()
for i in range(9):
workflow.add_edge(agents[i], agents[i + 1])
workflow.add_edges_from_source(
agents[5],
agents[10:20],
)
workflow.add_edges_to_target(
agents[10:20],
agents[20],
)
for i in range(20, 29):
workflow.add_edge(agents[i], agents[i + 1])
add_edges_time = time.time() - start_time
start_time = time.time()
workflow.compile()
compile_time = time.time() - start_time
print(
f"Agents: {len(workflow.nodes)}, Edges: {len(workflow.edges)}, Layers: {len(workflow._sorted_layers)}"
)
print(
f"Node addition: {add_nodes_time:.4f}s, Edge addition: {add_edges_time:.4f}s, Compilation: {compile_time:.4f}s"
)
print(
f"Total setup: {add_nodes_time + add_edges_time + compile_time:.4f}s"
)

@ -0,0 +1,73 @@
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.agent import Agent
data_collector_1 = Agent(
agent_name="Data-Collector-1",
agent_description="Collects market data",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
data_collector_2 = Agent(
agent_name="Data-Collector-2",
agent_description="Collects financial data",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
data_collector_3 = Agent(
agent_name="Data-Collector-3",
agent_description="Collects news data",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
technical_analyst = Agent(
agent_name="Technical-Analyst",
agent_description="Performs technical analysis",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
fundamental_analyst = Agent(
agent_name="Fundamental-Analyst",
agent_description="Performs fundamental analysis",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
sentiment_analyst = Agent(
agent_name="Sentiment-Analyst",
agent_description="Performs sentiment analysis",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
workflow = GraphWorkflow(
name="Parallel-Chain-Workflow",
description="Demonstrates parallel chain pattern with rustworkx",
backend="rustworkx",
verbose=False,
)
sources = [data_collector_1, data_collector_2, data_collector_3]
targets = [technical_analyst, fundamental_analyst, sentiment_analyst]
for agent in sources + targets:
workflow.add_node(agent)
workflow.add_parallel_chain(sources, targets)
workflow.compile()
task = "Analyze the technology sector using multiple data sources and analysis methods"
results = workflow.run(task=task)
for agent_name, output in results.items():
print(f"{agent_name}: {output}")

@ -0,0 +1,79 @@
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.agent import Agent
agent_a = Agent(
agent_name="Agent-A",
agent_description="Agent A",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
agent_b = Agent(
agent_name="Agent-B",
agent_description="Agent B",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
agent_c = Agent(
agent_name="Agent-C",
agent_description="Agent C",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
agent_isolated = Agent(
agent_name="Agent-Isolated",
agent_description="Isolated agent with no connections",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
workflow = GraphWorkflow(
name="Validation-Workflow",
description="Workflow for validation testing",
backend="rustworkx",
verbose=False,
)
workflow.add_node(agent_a)
workflow.add_node(agent_b)
workflow.add_node(agent_c)
workflow.add_node(agent_isolated)
workflow.add_edge(agent_a, agent_b)
workflow.add_edge(agent_b, agent_c)
validation_result = workflow.validate(auto_fix=False)
print(f"Valid: {validation_result['is_valid']}")
print(f"Warnings: {len(validation_result['warnings'])}")
print(f"Errors: {len(validation_result['errors'])}")
validation_result_fixed = workflow.validate(auto_fix=True)
print(
f"After auto-fix - Valid: {validation_result_fixed['is_valid']}"
)
print(f"Fixed: {len(validation_result_fixed['fixed'])}")
print(f"Entry points: {workflow.entry_points}")
print(f"End points: {workflow.end_points}")
workflow_cycle = GraphWorkflow(
name="Cycle-Test-Workflow",
backend="rustworkx",
verbose=False,
)
workflow_cycle.add_node(agent_a)
workflow_cycle.add_node(agent_b)
workflow_cycle.add_node(agent_c)
workflow_cycle.add_edge(agent_a, agent_b)
workflow_cycle.add_edge(agent_b, agent_c)
workflow_cycle.add_edge(agent_c, agent_a)
cycle_validation = workflow_cycle.validate(auto_fix=False)
print(f"Cycles detected: {len(cycle_validation.get('cycles', []))}")

@ -0,0 +1,122 @@
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.agent import Agent
market_researcher = Agent(
agent_name="Market-Researcher",
agent_description="Conducts comprehensive market research and data collection",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
competitor_analyst = Agent(
agent_name="Competitor-Analyst",
agent_description="Analyzes competitor landscape and positioning",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
market_analyst = Agent(
agent_name="Market-Analyst",
agent_description="Analyzes market trends and opportunities",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
financial_analyst = Agent(
agent_name="Financial-Analyst",
agent_description="Analyzes financial metrics and projections",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
risk_analyst = Agent(
agent_name="Risk-Analyst",
agent_description="Assesses market risks and challenges",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
strategy_consultant = Agent(
agent_name="Strategy-Consultant",
agent_description="Develops strategic recommendations based on all analyses",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
report_writer = Agent(
agent_name="Report-Writer",
agent_description="Compiles comprehensive market research report",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
executive_summary_writer = Agent(
agent_name="Executive-Summary-Writer",
agent_description="Creates executive summary for leadership",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
workflow = GraphWorkflow(
name="Market-Research-Workflow",
description="Real-world market research workflow using rustworkx backend",
backend="rustworkx",
verbose=False,
)
all_agents = [
market_researcher,
competitor_analyst,
market_analyst,
financial_analyst,
risk_analyst,
strategy_consultant,
report_writer,
executive_summary_writer,
]
for agent in all_agents:
workflow.add_node(agent)
workflow.add_parallel_chain(
[market_researcher, competitor_analyst],
[market_analyst, financial_analyst, risk_analyst],
)
workflow.add_edges_to_target(
[market_analyst, financial_analyst, risk_analyst],
strategy_consultant,
)
workflow.add_edges_from_source(
strategy_consultant,
[report_writer, executive_summary_writer],
)
workflow.add_edges_to_target(
[market_analyst, financial_analyst, risk_analyst],
report_writer,
)
task = """
Conduct a comprehensive market research analysis on the electric vehicle (EV) industry:
1. Research current market size, growth trends, and key players
2. Analyze competitor landscape and market positioning
3. Assess financial opportunities and investment potential
4. Evaluate risks and challenges in the EV market
5. Develop strategic recommendations
6. Create detailed report and executive summary
"""
results = workflow.run(task=task)
for agent_name, output in results.items():
print(f"{agent_name}: {output}")

@ -0,0 +1,156 @@
# Rustworkx Backend Examples
This directory contains comprehensive examples demonstrating the use of the **rustworkx backend** in GraphWorkflow. Rustworkx provides faster graph operations compared to NetworkX, especially for large graphs and complex operations.
## Installation
Before running these examples, ensure rustworkx is installed:
```bash
pip install rustworkx
```
If rustworkx is not installed, GraphWorkflow will automatically fallback to NetworkX backend.
## Examples Overview
### 01_basic_usage.py
Basic example showing how to use rustworkx backend with GraphWorkflow. Demonstrates simple linear workflow creation and execution.
**Key Concepts:**
- Initializing GraphWorkflow with rustworkx backend
- Adding agents and creating edges
- Running a workflow
### 02_backend_comparison.py
Compares NetworkX and Rustworkx backends side-by-side, showing performance differences and functional equivalence.
**Key Concepts:**
- Backend comparison
- Performance metrics
- Functional equivalence verification
### 03_fan_out_fan_in_patterns.py
Demonstrates parallel processing patterns: fan-out (one-to-many) and fan-in (many-to-one) connections.
**Key Concepts:**
- Fan-out pattern: `add_edges_from_source()`
- Fan-in pattern: `add_edges_to_target()`
- Parallel execution optimization
### 04_complex_workflow.py
Shows a complex multi-layer workflow with multiple parallel branches and convergence points.
**Key Concepts:**
- Multi-layer workflows
- Parallel chains: `add_parallel_chain()`
- Complex graph structures
### 05_performance_benchmark.py
Benchmarks performance differences between NetworkX and Rustworkx for various graph sizes and structures.
**Key Concepts:**
- Performance benchmarking
- Scalability testing
- Different graph topologies (chain, tree)
### 06_error_handling.py
Demonstrates error handling and graceful fallback behavior when rustworkx is unavailable.
**Key Concepts:**
- Error handling
- Automatic fallback to NetworkX
- Backend availability checking
### 07_large_scale_workflow.py
Demonstrates rustworkx's efficiency with large-scale workflows containing many agents.
**Key Concepts:**
- Large-scale workflows
- Performance with many nodes/edges
- Complex interconnections
### 08_parallel_chain_example.py
Detailed example of the parallel chain pattern creating a full mesh connection.
**Key Concepts:**
- Parallel chain pattern
- Full mesh connections
- Maximum parallelization
### 09_workflow_validation.py
Shows workflow validation features including cycle detection, isolated nodes, and auto-fixing.
**Key Concepts:**
- Workflow validation
- Cycle detection
- Auto-fixing capabilities
### 10_real_world_scenario.py
A realistic market research workflow demonstrating real-world agent coordination scenarios.
**Key Concepts:**
- Real-world use case
- Complex multi-phase workflow
- Practical application
## Quick Start
Run any example:
```bash
python 01_basic_usage.py
```
## Backend Selection
To use rustworkx backend:
```python
workflow = GraphWorkflow(
backend="rustworkx", # Use rustworkx
# ... other parameters
)
```
To use NetworkX backend (default):
```python
workflow = GraphWorkflow(
backend="networkx", # Or omit for default
# ... other parameters
)
```
## Performance Benefits
Rustworkx provides performance benefits especially for:
- **Large graphs** (100+ nodes)
- **Complex operations** (topological sorting, cycle detection)
- **Frequent graph modifications** (adding/removing nodes/edges)
## Key Differences
While both backends are functionally equivalent, rustworkx:
- Uses integer indices internally (abstracted away)
- Provides faster graph operations
- Better memory efficiency for large graphs
- Maintains full compatibility with GraphWorkflow API
## Notes
- Both backends produce identical results
- Rustworkx automatically falls back to NetworkX if not installed
- All GraphWorkflow features work with both backends
- Performance gains become more significant with larger graphs
## Requirements
- `swarms` package
- `rustworkx` (optional, for rustworkx backend)
- `networkx` (always available, default backend)
## Contributing
Feel free to add more examples demonstrating rustworkx capabilities or specific use cases!

@ -0,0 +1,632 @@
import pytest
from swarms.structs.graph_workflow import (
GraphWorkflow,
)
from swarms.structs.agent import Agent
try:
import rustworkx as rx
RUSTWORKX_AVAILABLE = True
except ImportError:
RUSTWORKX_AVAILABLE = False
def create_test_agent(name: str, description: str = None) -> Agent:
"""Create a test agent"""
if description is None:
description = f"Test agent for {name} operations"
return Agent(
agent_name=name,
agent_description=description,
model_name="gpt-4o-mini",
verbose=False,
print_on=False,
max_loops=1,
)
@pytest.mark.skipif(
not RUSTWORKX_AVAILABLE, reason="rustworkx not available"
)
class TestRustworkxBackend:
"""Test suite for rustworkx backend"""
def test_rustworkx_backend_initialization(self):
"""Test that rustworkx backend is properly initialized"""
workflow = GraphWorkflow(name="Test", backend="rustworkx")
assert (
workflow.graph_backend.__class__.__name__
== "RustworkxBackend"
)
assert hasattr(workflow.graph_backend, "_node_id_to_index")
assert hasattr(workflow.graph_backend, "_index_to_node_id")
assert hasattr(workflow.graph_backend, "graph")
def test_rustworkx_node_addition(self):
"""Test adding nodes to rustworkx backend"""
workflow = GraphWorkflow(name="Test", backend="rustworkx")
agent = create_test_agent("TestAgent", "Test agent")
workflow.add_node(agent)
assert "TestAgent" in workflow.nodes
assert "TestAgent" in workflow.graph_backend._node_id_to_index
assert (
workflow.graph_backend._node_id_to_index["TestAgent"]
in workflow.graph_backend._index_to_node_id
)
def test_rustworkx_edge_addition(self):
"""Test adding edges to rustworkx backend"""
workflow = GraphWorkflow(name="Test", backend="rustworkx")
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_edge(agent1, agent2)
assert len(workflow.edges) == 1
assert workflow.edges[0].source == "Agent1"
assert workflow.edges[0].target == "Agent2"
def test_rustworkx_topological_generations_linear(self):
"""Test topological generations with linear chain"""
workflow = GraphWorkflow(
name="Linear-Test", backend="rustworkx"
)
agents = [
create_test_agent(f"Agent{i}", f"Agent {i}")
for i in range(5)
]
for agent in agents:
workflow.add_node(agent)
for i in range(len(agents) - 1):
workflow.add_edge(agents[i], agents[i + 1])
workflow.compile()
assert len(workflow._sorted_layers) == 5
assert workflow._sorted_layers[0] == ["Agent0"]
assert workflow._sorted_layers[1] == ["Agent1"]
assert workflow._sorted_layers[2] == ["Agent2"]
assert workflow._sorted_layers[3] == ["Agent3"]
assert workflow._sorted_layers[4] == ["Agent4"]
def test_rustworkx_topological_generations_fan_out(self):
"""Test topological generations with fan-out pattern"""
workflow = GraphWorkflow(
name="FanOut-Test", backend="rustworkx"
)
coordinator = create_test_agent("Coordinator", "Coordinates")
analyst1 = create_test_agent("Analyst1", "First analyst")
analyst2 = create_test_agent("Analyst2", "Second analyst")
analyst3 = create_test_agent("Analyst3", "Third analyst")
workflow.add_node(coordinator)
workflow.add_node(analyst1)
workflow.add_node(analyst2)
workflow.add_node(analyst3)
workflow.add_edges_from_source(
coordinator, [analyst1, analyst2, analyst3]
)
workflow.compile()
assert len(workflow._sorted_layers) == 2
assert len(workflow._sorted_layers[0]) == 1
assert "Coordinator" in workflow._sorted_layers[0]
assert len(workflow._sorted_layers[1]) == 3
assert "Analyst1" in workflow._sorted_layers[1]
assert "Analyst2" in workflow._sorted_layers[1]
assert "Analyst3" in workflow._sorted_layers[1]
def test_rustworkx_topological_generations_fan_in(self):
"""Test topological generations with fan-in pattern"""
workflow = GraphWorkflow(
name="FanIn-Test", backend="rustworkx"
)
analyst1 = create_test_agent("Analyst1", "First analyst")
analyst2 = create_test_agent("Analyst2", "Second analyst")
analyst3 = create_test_agent("Analyst3", "Third analyst")
synthesizer = create_test_agent("Synthesizer", "Synthesizes")
workflow.add_node(analyst1)
workflow.add_node(analyst2)
workflow.add_node(analyst3)
workflow.add_node(synthesizer)
workflow.add_edges_to_target(
[analyst1, analyst2, analyst3], synthesizer
)
workflow.compile()
assert len(workflow._sorted_layers) == 2
assert len(workflow._sorted_layers[0]) == 3
assert "Analyst1" in workflow._sorted_layers[0]
assert "Analyst2" in workflow._sorted_layers[0]
assert "Analyst3" in workflow._sorted_layers[0]
assert len(workflow._sorted_layers[1]) == 1
assert "Synthesizer" in workflow._sorted_layers[1]
def test_rustworkx_topological_generations_complex(self):
"""Test topological generations with complex topology"""
workflow = GraphWorkflow(
name="Complex-Test", backend="rustworkx"
)
agents = [
create_test_agent(f"Agent{i}", f"Agent {i}")
for i in range(6)
]
for agent in agents:
workflow.add_node(agent)
# Create: Agent0 -> Agent1, Agent2
# Agent1, Agent2 -> Agent3
# Agent3 -> Agent4, Agent5
workflow.add_edge(agents[0], agents[1])
workflow.add_edge(agents[0], agents[2])
workflow.add_edge(agents[1], agents[3])
workflow.add_edge(agents[2], agents[3])
workflow.add_edge(agents[3], agents[4])
workflow.add_edge(agents[3], agents[5])
workflow.compile()
assert len(workflow._sorted_layers) == 4
assert "Agent0" in workflow._sorted_layers[0]
assert (
"Agent1" in workflow._sorted_layers[1]
or "Agent2" in workflow._sorted_layers[1]
)
assert "Agent3" in workflow._sorted_layers[2]
assert (
"Agent4" in workflow._sorted_layers[3]
or "Agent5" in workflow._sorted_layers[3]
)
def test_rustworkx_predecessors(self):
"""Test predecessor retrieval"""
workflow = GraphWorkflow(
name="Predecessors-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
agent3 = create_test_agent("Agent3", "Third agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_node(agent3)
workflow.add_edge(agent1, agent2)
workflow.add_edge(agent2, agent3)
predecessors = list(
workflow.graph_backend.predecessors("Agent2")
)
assert "Agent1" in predecessors
assert len(predecessors) == 1
predecessors = list(
workflow.graph_backend.predecessors("Agent3")
)
assert "Agent2" in predecessors
assert len(predecessors) == 1
predecessors = list(
workflow.graph_backend.predecessors("Agent1")
)
assert len(predecessors) == 0
def test_rustworkx_descendants(self):
"""Test descendant retrieval"""
workflow = GraphWorkflow(
name="Descendants-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
agent3 = create_test_agent("Agent3", "Third agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_node(agent3)
workflow.add_edge(agent1, agent2)
workflow.add_edge(agent2, agent3)
descendants = workflow.graph_backend.descendants("Agent1")
assert "Agent2" in descendants
assert "Agent3" in descendants
assert len(descendants) == 2
descendants = workflow.graph_backend.descendants("Agent2")
assert "Agent3" in descendants
assert len(descendants) == 1
descendants = workflow.graph_backend.descendants("Agent3")
assert len(descendants) == 0
def test_rustworkx_in_degree(self):
"""Test in-degree calculation"""
workflow = GraphWorkflow(
name="InDegree-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
agent3 = create_test_agent("Agent3", "Third agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_node(agent3)
workflow.add_edge(agent1, agent2)
workflow.add_edge(agent3, agent2)
assert workflow.graph_backend.in_degree("Agent1") == 0
assert workflow.graph_backend.in_degree("Agent2") == 2
assert workflow.graph_backend.in_degree("Agent3") == 0
def test_rustworkx_out_degree(self):
"""Test out-degree calculation"""
workflow = GraphWorkflow(
name="OutDegree-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
agent3 = create_test_agent("Agent3", "Third agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_node(agent3)
workflow.add_edge(agent1, agent2)
workflow.add_edge(agent1, agent3)
assert workflow.graph_backend.out_degree("Agent1") == 2
assert workflow.graph_backend.out_degree("Agent2") == 0
assert workflow.graph_backend.out_degree("Agent3") == 0
def test_rustworkx_agent_objects_in_edges(self):
"""Test using Agent objects directly in edge methods"""
workflow = GraphWorkflow(
name="AgentObjects-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
agent3 = create_test_agent("Agent3", "Third agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_node(agent3)
# Use Agent objects directly
workflow.add_edges_from_source(agent1, [agent2, agent3])
workflow.add_edges_to_target([agent2, agent3], agent1)
workflow.compile()
assert len(workflow.edges) == 4
assert len(workflow._sorted_layers) >= 1
def test_rustworkx_parallel_chain(self):
"""Test parallel chain pattern"""
workflow = GraphWorkflow(
name="ParallelChain-Test", backend="rustworkx"
)
sources = [
create_test_agent(f"Source{i}", f"Source {i}")
for i in range(3)
]
targets = [
create_test_agent(f"Target{i}", f"Target {i}")
for i in range(3)
]
for agent in sources + targets:
workflow.add_node(agent)
workflow.add_parallel_chain(sources, targets)
workflow.compile()
assert len(workflow.edges) == 9 # 3x3 = 9 edges
assert len(workflow._sorted_layers) == 2
def test_rustworkx_large_scale(self):
"""Test rustworkx with large workflow"""
workflow = GraphWorkflow(
name="LargeScale-Test", backend="rustworkx"
)
agents = [
create_test_agent(f"Agent{i}", f"Agent {i}")
for i in range(20)
]
for agent in agents:
workflow.add_node(agent)
# Create linear chain
for i in range(len(agents) - 1):
workflow.add_edge(agents[i], agents[i + 1])
workflow.compile()
assert len(workflow._sorted_layers) == 20
assert len(workflow.nodes) == 20
assert len(workflow.edges) == 19
def test_rustworkx_reverse(self):
"""Test graph reversal"""
workflow = GraphWorkflow(
name="Reverse-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_edge(agent1, agent2)
reversed_backend = workflow.graph_backend.reverse()
# In reversed graph, Agent2 should have Agent1 as predecessor
preds = list(reversed_backend.predecessors("Agent1"))
assert "Agent2" in preds
# Agent2 should have no predecessors in reversed graph
preds = list(reversed_backend.predecessors("Agent2"))
assert len(preds) == 0
def test_rustworkx_entry_end_points(self):
"""Test entry and end point detection"""
workflow = GraphWorkflow(
name="EntryEnd-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "Entry agent")
agent2 = create_test_agent("Agent2", "Middle agent")
agent3 = create_test_agent("Agent3", "End agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_node(agent3)
workflow.add_edge(agent1, agent2)
workflow.add_edge(agent2, agent3)
workflow.auto_set_entry_points()
workflow.auto_set_end_points()
assert "Agent1" in workflow.entry_points
assert "Agent3" in workflow.end_points
assert workflow.graph_backend.in_degree("Agent1") == 0
assert workflow.graph_backend.out_degree("Agent3") == 0
def test_rustworkx_isolated_nodes(self):
"""Test handling of isolated nodes"""
workflow = GraphWorkflow(
name="Isolated-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "Connected agent")
agent2 = create_test_agent("Agent2", "Isolated agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_edge(agent1, agent1) # Self-loop
workflow.compile()
assert len(workflow.nodes) == 2
assert "Agent2" in workflow.nodes
def test_rustworkx_workflow_execution(self):
"""Test full workflow execution with rustworkx"""
workflow = GraphWorkflow(
name="Execution-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_edge(agent1, agent2)
result = workflow.run("Test task")
assert result is not None
assert "Agent1" in result
assert "Agent2" in result
def test_rustworkx_compilation_caching(self):
"""Test that compilation is cached correctly"""
workflow = GraphWorkflow(
name="Cache-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_edge(agent1, agent2)
# First compilation
workflow.compile()
layers1 = workflow._sorted_layers.copy()
compiled1 = workflow._compiled
# Second compilation should use cache
workflow.compile()
layers2 = workflow._sorted_layers.copy()
compiled2 = workflow._compiled
assert compiled1 == compiled2 == True
assert layers1 == layers2
def test_rustworkx_node_metadata(self):
"""Test node metadata handling"""
workflow = GraphWorkflow(
name="Metadata-Test", backend="rustworkx"
)
agent = create_test_agent("Agent", "Test agent")
workflow.add_node(
agent, metadata={"priority": "high", "timeout": 60}
)
node_index = workflow.graph_backend._node_id_to_index["Agent"]
node_data = workflow.graph_backend.graph[node_index]
assert isinstance(node_data, dict)
assert node_data.get("node_id") == "Agent"
assert node_data.get("priority") == "high"
assert node_data.get("timeout") == 60
def test_rustworkx_edge_metadata(self):
"""Test edge metadata handling"""
workflow = GraphWorkflow(
name="EdgeMetadata-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
workflow.add_edge(agent1, agent2, weight=5, label="test")
assert len(workflow.edges) == 1
assert workflow.edges[0].metadata.get("weight") == 5
assert workflow.edges[0].metadata.get("label") == "test"
@pytest.mark.skipif(
not RUSTWORKX_AVAILABLE, reason="rustworkx not available"
)
class TestRustworkxPerformance:
"""Performance tests for rustworkx backend"""
def test_rustworkx_large_graph_compilation(self):
"""Test compilation performance with large graph"""
workflow = GraphWorkflow(
name="LargeGraph-Test", backend="rustworkx"
)
agents = [
create_test_agent(f"Agent{i}", f"Agent {i}")
for i in range(50)
]
for agent in agents:
workflow.add_node(agent)
# Create a complex topology
for i in range(len(agents) - 1):
workflow.add_edge(agents[i], agents[i + 1])
import time
start = time.time()
workflow.compile()
compile_time = time.time() - start
assert compile_time < 1.0 # Should compile quickly
assert len(workflow._sorted_layers) == 50
def test_rustworkx_many_predecessors(self):
"""Test performance with many predecessors"""
workflow = GraphWorkflow(
name="ManyPreds-Test", backend="rustworkx"
)
target = create_test_agent("Target", "Target agent")
sources = [
create_test_agent(f"Source{i}", f"Source {i}")
for i in range(100)
]
workflow.add_node(target)
for source in sources:
workflow.add_node(source)
workflow.add_edges_to_target(sources, target)
workflow.compile()
predecessors = list(
workflow.graph_backend.predecessors("Target")
)
assert len(predecessors) == 100
@pytest.mark.skipif(
not RUSTWORKX_AVAILABLE, reason="rustworkx not available"
)
class TestRustworkxEdgeCases:
"""Edge case tests for rustworkx backend"""
def test_rustworkx_empty_graph(self):
"""Test empty graph handling"""
workflow = GraphWorkflow(
name="Empty-Test", backend="rustworkx"
)
workflow.compile()
assert len(workflow._sorted_layers) == 0
assert len(workflow.nodes) == 0
def test_rustworkx_single_node(self):
"""Test single node graph"""
workflow = GraphWorkflow(
name="Single-Test", backend="rustworkx"
)
agent = create_test_agent("Agent", "Single agent")
workflow.add_node(agent)
workflow.compile()
assert len(workflow._sorted_layers) == 1
assert workflow._sorted_layers[0] == ["Agent"]
def test_rustworkx_self_loop(self):
"""Test self-loop handling"""
workflow = GraphWorkflow(
name="SelfLoop-Test", backend="rustworkx"
)
agent = create_test_agent("Agent", "Self-looping agent")
workflow.add_node(agent)
workflow.add_edge(agent, agent)
workflow.compile()
assert len(workflow.edges) == 1
assert workflow.graph_backend.in_degree("Agent") == 1
assert workflow.graph_backend.out_degree("Agent") == 1
def test_rustworkx_duplicate_edge(self):
"""Test duplicate edge handling"""
workflow = GraphWorkflow(
name="Duplicate-Test", backend="rustworkx"
)
agent1 = create_test_agent("Agent1", "First agent")
agent2 = create_test_agent("Agent2", "Second agent")
workflow.add_node(agent1)
workflow.add_node(agent2)
# Add same edge twice
workflow.add_edge(agent1, agent2)
workflow.add_edge(agent1, agent2)
# rustworkx should handle duplicate edges
assert (
len(workflow.edges) == 2
) # Both edges are stored in workflow
workflow.compile() # Should not crash
if __name__ == "__main__":
pytest.main([__file__, "-v"])

@ -0,0 +1,95 @@
# LLM Council Examples
This directory contains examples demonstrating the LLM Council pattern, inspired by Andrej Karpathy's llm-council implementation. The LLM Council uses multiple specialized AI agents that:
1. Each respond independently to queries
2. Review and rank each other's anonymized responses
3. Have a Chairman synthesize all responses into a final comprehensive answer
## Examples
### Marketing & Business
- **marketing_strategy_council.py** - Marketing strategy analysis and recommendations
- **business_strategy_council.py** - Comprehensive business strategy development
### Finance & Investment
- **finance_analysis_council.py** - Financial analysis and investment recommendations
- **etf_stock_analysis_council.py** - ETF and stock analysis with portfolio recommendations
### Medical & Healthcare
- **medical_treatment_council.py** - Medical treatment recommendations and care plans
- **medical_diagnosis_council.py** - Diagnostic analysis based on symptoms
### Technology & Research
- **technology_assessment_council.py** - Technology evaluation and implementation strategy
- **research_analysis_council.py** - Comprehensive research analysis on complex topics
### Legal
- **legal_analysis_council.py** - Legal implications and compliance analysis
## Usage
Each example follows the same pattern:
```python
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# Run a query
result = council.run("Your query here")
# Access results
print(result["final_response"]) # Chairman's synthesized answer
print(result["original_responses"]) # Individual member responses
print(result["evaluations"]) # How members ranked each other
```
## Running Examples
Run any example directly:
```bash
python examples/multi_agent/llm_council_examples/marketing_strategy_council.py
python examples/multi_agent/llm_council_examples/finance_analysis_council.py
python examples/multi_agent/llm_council_examples/medical_diagnosis_council.py
```
## Key Features
- **Multiple Perspectives**: Each council member (GPT-5.1, Gemini, Claude, Grok) provides unique insights
- **Peer Review**: Members evaluate and rank each other's responses anonymously
- **Synthesis**: Chairman combines the best elements from all responses
- **Transparency**: See both individual responses and evaluation rankings
## Council Members
The default council consists of:
- **GPT-5.1-Councilor**: Analytical and comprehensive
- **Gemini-3-Pro-Councilor**: Concise and well-processed
- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced
- **Grok-4-Councilor**: Creative and innovative
## Customization
You can create custom council members:
```python
from swarms import Agent
from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt
custom_agent = Agent(
agent_name="Custom-Councilor",
system_prompt=get_gpt_councilor_prompt(),
model_name="gpt-4.1",
max_loops=1,
)
council = LLMCouncil(
council_members=[custom_agent, ...],
chairman_model="gpt-5.1",
verbose=True
)
```

@ -0,0 +1,31 @@
"""
LLM Council Example: Business Strategy Development
This example demonstrates using the LLM Council to develop comprehensive
business strategies for new ventures.
"""
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# Business strategy query
query = """
A tech startup wants to launch an AI-powered personal finance app targeting
millennials and Gen Z. Develop a comprehensive business strategy including:
1. Market opportunity and competitive landscape analysis
2. Product positioning and unique value proposition
3. Go-to-market strategy and customer acquisition plan
4. Revenue model and pricing strategy
5. Key partnerships and distribution channels
6. Resource requirements and funding needs
7. Risk assessment and mitigation strategies
8. Success metrics and KPIs for first 12 months
"""
# Run the council
result = council.run(query)
# Print final response
print(result["final_response"])

@ -0,0 +1,29 @@
"""
LLM Council Example: ETF Stock Analysis
This example demonstrates using the LLM Council to analyze ETF holdings
and provide stock investment recommendations.
"""
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# ETF and stock analysis query
query = """
Analyze the top energy ETFs (including nuclear, solar, gas, and renewable energy)
and provide:
1. Top 5 best-performing energy stocks across all energy sectors
2. ETF recommendations for diversified energy exposure
3. Risk-return profiles for each recommendation
4. Current market conditions affecting energy investments
5. Allocation strategy for a $100,000 portfolio
6. Key metrics to track for each investment
"""
# Run the council
result = council.run(query)
# Print final response
print(result["final_response"])

@ -0,0 +1,29 @@
"""
LLM Council Example: Financial Analysis
This example demonstrates using the LLM Council to provide comprehensive
financial analysis and investment recommendations.
"""
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# Financial analysis query
query = """
Provide a comprehensive financial analysis for investing in emerging markets
technology ETFs. Include:
1. Risk assessment and volatility analysis
2. Historical performance trends
3. Sector composition and diversification benefits
4. Comparison with developed market tech ETFs
5. Recommended allocation percentage for a moderate risk portfolio
6. Key factors to monitor going forward
"""
# Run the council
result = council.run(query)
# Print final response
print(result["final_response"])

@ -0,0 +1,31 @@
"""
LLM Council Example: Legal Analysis
This example demonstrates using the LLM Council to analyze legal scenarios
and provide comprehensive legal insights.
"""
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# Legal analysis query
query = """
A startup is considering using AI-generated content for their marketing materials.
Analyze the legal implications including:
1. Intellectual property rights and ownership of AI-generated content
2. Copyright and trademark considerations
3. Liability for AI-generated content that may be inaccurate or misleading
4. Compliance with advertising regulations (FTC, FDA, etc.)
5. Data privacy implications if using customer data to train models
6. Contractual considerations with AI service providers
7. Risk mitigation strategies
8. Best practices for legal compliance
"""
# Run the council
result = council.run(query)
# Print final response
print(result["final_response"])

@ -0,0 +1,12 @@
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True, output_type="final")
# Example query
query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?"
# Run the council
result = council.run(query)
print(result)

@ -0,0 +1,28 @@
"""
LLM Council Example: Marketing Strategy Analysis
This example demonstrates using the LLM Council to analyze and develop
comprehensive marketing strategies by leveraging multiple AI perspectives.
"""
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# Marketing strategy query
query = """
Analyze the marketing strategy for a new sustainable energy startup launching
a solar panel subscription service. Provide recommendations on:
1. Target audience segmentation
2. Key messaging and value propositions
3. Marketing channels and budget allocation
4. Competitive positioning
5. Launch timeline and milestones
"""
# Run the council
result = council.run(query)
# Print final response
print(result["final_response"])

@ -0,0 +1,36 @@
"""
LLM Council Example: Medical Diagnosis Analysis
This example demonstrates using the LLM Council to analyze symptoms
and provide diagnostic insights.
"""
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# Medical diagnosis query
query = """
A 35-year-old patient presents with:
- Persistent fatigue for 3 months
- Unexplained weight loss (15 lbs)
- Night sweats
- Intermittent low-grade fever
- Swollen lymph nodes in neck and armpits
- Recent blood work shows elevated ESR and CRP
Provide:
1. Differential diagnosis with most likely conditions ranked
2. Additional diagnostic tests needed to confirm
3. Red flag symptoms requiring immediate attention
4. Possible causes and risk factors
5. Recommended next steps for the patient
6. When to seek emergency care
"""
# Run the council
result = council.run(query)
# Print final response
print(result["final_response"])

@ -0,0 +1,30 @@
"""
LLM Council Example: Medical Treatment Analysis
This example demonstrates using the LLM Council to analyze medical treatments
and provide comprehensive treatment recommendations.
"""
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# Medical treatment query
query = """
A 45-year-old patient with Type 2 diabetes, hypertension, and early-stage
kidney disease needs treatment recommendations. Provide:
1. Comprehensive treatment plan addressing all conditions
2. Medication options with pros/cons for each condition
3. Lifestyle modifications and their expected impact
4. Monitoring schedule and key metrics to track
5. Potential drug interactions and contraindications
6. Expected outcomes and timeline for improvement
7. When to consider specialist referrals
"""
# Run the council
result = council.run(query)
# Print final response
print(result["final_response"])

@ -0,0 +1,31 @@
"""
LLM Council Example: Research Analysis
This example demonstrates using the LLM Council to conduct comprehensive
research analysis on complex topics.
"""
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# Research analysis query
query = """
Conduct a comprehensive analysis of the potential impact of climate change
on global food security over the next 20 years. Include:
1. Key climate factors affecting agriculture (temperature, precipitation, extreme weather)
2. Regional vulnerabilities and impacts on major food-producing regions
3. Crop yield projections and food availability scenarios
4. Economic implications and food price volatility
5. Adaptation strategies and technological solutions
6. Policy recommendations for governments and international organizations
7. Role of innovation in agriculture (precision farming, GMOs, vertical farming)
8. Social and geopolitical implications of food insecurity
"""
# Run the council
result = council.run(query)
# Print final response
print(result["final_response"])

@ -0,0 +1,31 @@
"""
LLM Council Example: Technology Assessment
This example demonstrates using the LLM Council to assess emerging technologies
and their business implications.
"""
from swarms.structs.llm_council import LLMCouncil
# Create the council
council = LLMCouncil(verbose=True)
# Technology assessment query
query = """
Evaluate the business potential and implementation strategy for integrating
quantum computing capabilities into a financial services company. Consider:
1. Current state of quantum computing technology
2. Specific use cases in financial services (risk modeling, portfolio optimization, fraud detection)
3. Competitive advantages and potential ROI
4. Implementation timeline and resource requirements
5. Technical challenges and limitations
6. Risk factors and mitigation strategies
7. Partnership opportunities with quantum computing providers
8. Expected timeline for practical business value
"""
# Run the council
result = council.run(query)
# Print final response
print(result["final_response"])

@ -26,7 +26,6 @@ router = SwarmRouter(
agents=agents,
swarm_type="SequentialWorkflow",
output_type="dict",
return_entire_history=False,
)
output = router.run("How are you doing?")

@ -0,0 +1,40 @@
from swarms import SwarmRouter, Agent
# Create specialized agents
research_agent = Agent(
agent_name="Research-Analyst",
agent_description="Specialized in comprehensive research and data gathering",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
analysis_agent = Agent(
agent_name="Data-Analyst",
agent_description="Expert in data analysis and pattern recognition",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
strategy_agent = Agent(
agent_name="Strategy-Consultant",
agent_description="Specialized in strategic planning and recommendations",
model_name="gpt-4o-mini",
max_loops=1,
verbose=False,
)
router = SwarmRouter(
name="SwarmRouter",
description="Routes tasks to specialized agents based on their capabilities",
agents=[research_agent, analysis_agent, strategy_agent],
swarm_type="MajorityVoting",
max_loops=1,
verbose=False,
)
result = router.run(
"Conduct a research analysis on water stocks and etfs"
)
print(result)

@ -1,6 +0,0 @@
# RAG (Retrieval Augmented Generation) Examples
This directory contains examples demonstrating RAG implementations and vector database integrations in Swarms.
## Qdrant RAG
- [qdrant_rag_example.py](qdrant_rag_example.py) - Complete Qdrant RAG implementation

@ -1,98 +0,0 @@
"""
Agent with Qdrant RAG (Retrieval-Augmented Generation)
This example demonstrates using Qdrant as a vector database for RAG operations,
allowing agents to store and retrieve documents for enhanced context.
"""
from qdrant_client import QdrantClient, models
from swarms import Agent
from swarms_memory import QdrantDB
# Initialize Qdrant client
# Option 1: In-memory (for testing/development - data is not persisted)
# client = QdrantClient(":memory:")
# Option 2: Local Qdrant server
# client = QdrantClient(host="localhost", port=6333)
# Option 3: Qdrant Cloud (recommended for production)
import os
client = QdrantClient(
url=os.getenv("QDRANT_URL", "https://your-cluster.qdrant.io"),
api_key=os.getenv("QDRANT_API_KEY", "your-api-key"),
)
# Create QdrantDB wrapper for RAG operations
rag_db = QdrantDB(
client=client,
embedding_model="text-embedding-3-small",
collection_name="knowledge_base",
distance=models.Distance.COSINE,
n_results=3,
)
# Add documents to the knowledge base
documents = [
"Qdrant is a vector database optimized for similarity search and AI applications.",
"RAG combines retrieval and generation for more accurate AI responses.",
"Vector embeddings enable semantic search across documents.",
"The swarms framework supports multiple memory backends including Qdrant.",
]
# Method 1: Add documents individually
for doc in documents:
rag_db.add(doc)
# Method 2: Batch add documents (more efficient for large datasets)
# Example with metadata
# documents_with_metadata = [
# "Machine learning is a subset of artificial intelligence.",
# "Deep learning uses neural networks with multiple layers.",
# "Natural language processing enables computers to understand human language.",
# "Computer vision allows machines to interpret visual information.",
# "Reinforcement learning learns through interaction with an environment."
# ]
#
# metadata = [
# {"category": "AI", "difficulty": "beginner", "topic": "overview"},
# {"category": "ML", "difficulty": "intermediate", "topic": "neural_networks"},
# {"category": "NLP", "difficulty": "intermediate", "topic": "language"},
# {"category": "CV", "difficulty": "advanced", "topic": "vision"},
# {"category": "RL", "difficulty": "advanced", "topic": "learning"}
# ]
#
# # Batch add with metadata
# doc_ids = rag_db.batch_add(documents_with_metadata, metadata=metadata, batch_size=3)
# print(f"Added {len(doc_ids)} documents in batch")
#
# # Query with metadata return
# results_with_metadata = rag_db.query(
# "What is artificial intelligence?",
# n_results=3,
# return_metadata=True
# )
#
# for i, result in enumerate(results_with_metadata):
# print(f"\nResult {i+1}:")
# print(f" Document: {result['document']}")
# print(f" Category: {result['category']}")
# print(f" Difficulty: {result['difficulty']}")
# print(f" Topic: {result['topic']}")
# print(f" Score: {result['score']:.4f}")
# Create agent with RAG capabilities
agent = Agent(
agent_name="RAG-Agent",
agent_description="Agent with Qdrant-powered RAG for enhanced knowledge retrieval",
model_name="gpt-4.1",
max_loops=1,
dynamic_temperature_enabled=True,
long_term_memory=rag_db,
)
# Query with RAG
response = agent.run("What is Qdrant and how does it relate to RAG?")
print(response)

@ -2,11 +2,25 @@
This directory contains examples demonstrating advanced reasoning capabilities and agent evaluation systems in Swarms.
## Reasoning Agent Router Examples
The `reasoning_agent_router_examples/` folder contains simple examples for each agent type supported by the `ReasoningAgentRouter`:
- [reasoning_duo_example.py](reasoning_agent_router_examples/reasoning_duo_example.py) - Reasoning Duo agent for collaborative reasoning
- [self_consistency_example.py](reasoning_agent_router_examples/self_consistency_example.py) - Self-Consistency agent with multiple samples
- [ire_example.py](reasoning_agent_router_examples/ire_example.py) - Iterative Reflective Expansion (IRE) agent
- [agent_judge_example.py](reasoning_agent_router_examples/agent_judge_example.py) - Agent Judge for evaluation and judgment
- [reflexion_agent_example.py](reasoning_agent_router_examples/reflexion_agent_example.py) - Reflexion agent with memory capabilities
- [gkp_agent_example.py](reasoning_agent_router_examples/gkp_agent_example.py) - Generated Knowledge Prompting (GKP) agent
## Agent Judge Examples
The `agent_judge_examples/` folder contains detailed examples of the AgentJudge system:
- [example1_basic_evaluation.py](agent_judge_examples/example1_basic_evaluation.py) - Basic agent evaluation
- [example2_technical_evaluation.py](agent_judge_examples/example2_technical_evaluation.py) - Technical evaluation criteria
- [example3_creative_evaluation.py](agent_judge_examples/example3_creative_evaluation.py) - Creative evaluation patterns
## O3 Integration
- [example_o3.py](example_o3.py) - O3 model integration example
- [o3_agent.py](o3_agent.py) - O3 agent implementation
## Self-MoA Sequential Examples
- [moa_seq_example.py](moa_seq_example.py) - Self-MoA Sequential reasoning example for complex problem-solving

@ -1,18 +0,0 @@
from swarms.utils.litellm_wrapper import LiteLLM
# Initialize the LiteLLM wrapper with reasoning support
llm = LiteLLM(
model_name="claude-sonnet-4-20250514", # OpenAI o3 model with reasoning
reasoning_effort="low", # Enable reasoning with high effort
temperature=1,
max_tokens=2000,
stream=False,
thinking_tokens=1024,
)
# Example task that would benefit from reasoning
task = "Solve this step-by-step: A farmer has 17 sheep and all but 9 die. How many sheep does he have left?"
print("=== Running reasoning model ===")
response = llm.run(task)
print(response)

@ -0,0 +1,9 @@
from swarms.agents.reasoning_agents import ReasoningAgentRouter
router = ReasoningAgentRouter(
swarm_type="AgentJudge",
model_name="gpt-4o-mini",
max_loops=1,
)
result = router.run("Is Python a good programming language?")

@ -0,0 +1,9 @@
from swarms.agents.reasoning_agents import ReasoningAgentRouter
router = ReasoningAgentRouter(
swarm_type="GKPAgent",
model_name="gpt-4o-mini",
num_knowledge_items=3,
)
result = router.run("What is artificial intelligence?")

@ -0,0 +1,10 @@
from swarms.agents.reasoning_agents import ReasoningAgentRouter
router = ReasoningAgentRouter(
swarm_type="ire",
model_name="gpt-4o-mini",
num_samples=1,
)
result = router.run("Explain photosynthesis in one sentence.")
print(result)

@ -0,0 +1,9 @@
from swarms.agents.reasoning_agents import ReasoningAgentRouter
router = ReasoningAgentRouter(
swarm_type="reasoning-duo",
model_name="gpt-4o-mini",
max_loops=1,
)
result = router.run("What is 2+2?")

@ -0,0 +1,10 @@
from swarms.agents.reasoning_agents import ReasoningAgentRouter
router = ReasoningAgentRouter(
swarm_type="ReflexionAgent",
model_name="gpt-4o-mini",
max_loops=1,
memory_capacity=3,
)
result = router.run("What is machine learning?")

@ -0,0 +1,10 @@
from swarms.agents.reasoning_agents import ReasoningAgentRouter
router = ReasoningAgentRouter(
swarm_type="self-consistency",
model_name="gpt-4o-mini",
max_loops=1,
num_samples=3,
)
result = router.run("What is the capital of France?")

@ -1,882 +1,91 @@
"""
Qdrant RAG Example with Document Ingestion
This example demonstrates how to use the agent structure from example.py with Qdrant RAG
to ingest a vast array of PDF documents and text files for advanced quantitative trading analysis.
Features:
- Document ingestion from multiple file types (PDF, TXT, MD)
- Qdrant vector database integration
- Sentence transformer embeddings
- Comprehensive document processing pipeline
- Agent with RAG capabilities for financial analysis
"""
import os
import uuid
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Union
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from qdrant_client import QdrantClient
from qdrant_client.http import models
from qdrant_client.http.models import Distance, VectorParams
from sentence_transformers import SentenceTransformer
from qdrant_client import QdrantClient, models
from swarms import Agent
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.data_to_text import data_to_text
class DocumentProcessor:
"""
Handles document processing and text extraction from various file formats.
This class provides functionality to process PDF, TXT, and Markdown files,
extracting text content for vectorization and storage in the RAG system.
"""
def __init__(
self, supported_extensions: Optional[List[str]] = None
):
"""
Initialize the DocumentProcessor.
Args:
supported_extensions: List of supported file extensions.
Defaults to ['.pdf', '.txt', '.md']
"""
if supported_extensions is None:
supported_extensions = [".pdf", ".txt", ".md"]
self.supported_extensions = supported_extensions
def process_document(
self, file_path: Union[str, Path]
) -> Optional[Dict[str, str]]:
"""
Process a single document and extract its text content.
Args:
file_path: Path to the document file
Returns:
Dictionary containing document metadata and extracted text, or None if processing fails
"""
file_path = Path(file_path)
if not file_path.exists():
print(f"File not found: {file_path}")
return None
if file_path.suffix.lower() not in self.supported_extensions:
print(f"Unsupported file type: {file_path.suffix}")
return None
try:
# Extract text based on file type
if file_path.suffix.lower() == ".pdf":
try:
text_content = pdf_to_text(str(file_path))
except Exception as pdf_error:
print(f"Error extracting PDF text: {pdf_error}")
# Fallback: try to read as text file
with open(
file_path,
"r",
encoding="utf-8",
errors="ignore",
) as f:
text_content = f.read()
else:
try:
text_content = data_to_text(str(file_path))
except Exception as data_error:
print(f"Error extracting text: {data_error}")
# Fallback: try to read as text file
with open(
file_path,
"r",
encoding="utf-8",
errors="ignore",
) as f:
text_content = f.read()
# Ensure text_content is a string
if callable(text_content):
print(
f"Warning: {file_path} returned a callable, trying to call it..."
)
try:
text_content = text_content()
except Exception as call_error:
print(f"Error calling callable: {call_error}")
return None
if not text_content or not isinstance(text_content, str):
print(
f"No valid text content extracted from: {file_path}"
)
return None
# Clean the text content
text_content = str(text_content).strip()
return {
"file_path": str(file_path),
"file_name": file_path.name,
"file_type": file_path.suffix.lower(),
"text_content": text_content,
"file_size": file_path.stat().st_size,
"processed_at": datetime.utcnow().isoformat(),
}
except Exception as e:
print(f"Error processing {file_path}: {str(e)}")
return None
def process_directory(
self, directory_path: Union[str, Path], max_workers: int = 4
) -> List[Dict[str, str]]:
"""
Process all supported documents in a directory concurrently.
Args:
directory_path: Path to the directory containing documents
max_workers: Maximum number of concurrent workers for processing
Returns:
List of processed document dictionaries
"""
directory_path = Path(directory_path)
if not directory_path.is_dir():
print(f"Directory not found: {directory_path}")
return []
# Find all supported files
supported_files = []
for ext in self.supported_extensions:
supported_files.extend(directory_path.rglob(f"*{ext}"))
supported_files.extend(
directory_path.rglob(f"*{ext.upper()}")
)
if not supported_files:
print(f"No supported files found in: {directory_path}")
return []
print(f"Found {len(supported_files)} files to process")
# Process files concurrently
processed_documents = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_file = {
executor.submit(
self.process_document, file_path
): file_path
for file_path in supported_files
}
for future in concurrent.futures.as_completed(
future_to_file
):
file_path = future_to_file[future]
try:
result = future.result()
if result:
processed_documents.append(result)
print(f"Processed: {result['file_name']}")
except Exception as e:
print(f"Error processing {file_path}: {str(e)}")
print(
f"Successfully processed {len(processed_documents)} documents"
)
return processed_documents
class QdrantRAGMemory:
"""
Enhanced Qdrant memory system for RAG operations with document storage.
This class extends the basic Qdrant memory system to handle document ingestion,
chunking, and semantic search for large document collections.
"""
def __init__(
self,
collection_name: str = "document_memories",
vector_size: int = 384, # Default size for all-MiniLM-L6-v2
url: Optional[str] = None,
api_key: Optional[str] = None,
chunk_size: int = 1000,
chunk_overlap: int = 200,
):
"""
Initialize the Qdrant RAG memory system.
Args:
collection_name: Name of the Qdrant collection to use
vector_size: Dimension of the embedding vectors
url: Optional Qdrant server URL (defaults to local)
api_key: Optional Qdrant API key for cloud deployment
chunk_size: Size of text chunks for processing
chunk_overlap: Overlap between consecutive chunks
"""
self.collection_name = collection_name
self.vector_size = vector_size
self.chunk_size = chunk_size
self.chunk_overlap = chunk_overlap
# Initialize Qdrant client
if url and api_key:
self.client = QdrantClient(url=url, api_key=api_key)
else:
self.client = QdrantClient(
":memory:"
) # Local in-memory storage
# Initialize embedding model
self.embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
# Get the actual embedding dimension from the model
sample_text = "Sample text for dimension check"
sample_embedding = self.embedding_model.encode(sample_text)
actual_dimension = len(sample_embedding)
# Update vector_size to match the actual model dimension
if actual_dimension != self.vector_size:
print(
f"Updating vector size from {self.vector_size} to {actual_dimension} to match model"
)
self.vector_size = actual_dimension
# Create collection if it doesn't exist
self._create_collection()
def _create_collection(self):
"""Create the Qdrant collection if it doesn't exist."""
collections = self.client.get_collections().collections
exists = any(
col.name == self.collection_name for col in collections
)
if not exists:
self.client.create_collection(
collection_name=self.collection_name,
vectors_config=VectorParams(
size=self.vector_size, distance=Distance.COSINE
),
)
print(
f"Created Qdrant collection: {self.collection_name}"
)
def _chunk_text(self, text: str) -> List[str]:
"""
Split text into overlapping chunks for better retrieval.
Args:
text: Text content to chunk
Returns:
List of text chunks
"""
# Ensure text is a string
if not isinstance(text, str):
text = str(text)
if len(text) <= self.chunk_size:
return [text]
chunks = []
start = 0
while start < len(text):
end = start + self.chunk_size
# Try to break at sentence boundaries
if end < len(text):
# Look for sentence endings
for i in range(end, max(start, end - 100), -1):
if text[i] in ".!?":
end = i + 1
break
chunk = text[start:end].strip()
if chunk:
chunks.append(chunk)
start = end - self.chunk_overlap
if start >= len(text):
break
return chunks
def add_document(
self, document_data: Dict[str, str]
) -> List[str]:
"""
Add a document to the memory system with chunking.
Args:
document_data: Dictionary containing document information
Returns:
List of memory IDs for the stored chunks
"""
text_content = document_data["text_content"]
# Ensure text_content is a string
if not isinstance(text_content, str):
print(
f"Warning: text_content is not a string: {type(text_content)}"
)
text_content = str(text_content)
chunks = self._chunk_text(text_content)
memory_ids = []
for i, chunk in enumerate(chunks):
# Generate embedding for the chunk
embedding = self.embedding_model.encode(chunk).tolist()
# Prepare metadata
metadata = {
"document_name": document_data["file_name"],
"document_path": document_data["file_path"],
"document_type": document_data["file_type"],
"chunk_index": i,
"total_chunks": len(chunks),
"chunk_text": chunk,
"timestamp": datetime.utcnow().isoformat(),
"file_size": document_data["file_size"],
}
# Store the chunk
memory_id = str(uuid.uuid4())
self.client.upsert(
collection_name=self.collection_name,
points=[
models.PointStruct(
id=memory_id,
payload=metadata,
vector=embedding,
)
],
)
memory_ids.append(memory_id)
print(
f"Added document '{document_data['file_name']}' with {len(chunks)} chunks"
)
return memory_ids
def add_documents_batch(
self, documents: List[Dict[str, str]]
) -> List[str]:
"""
Add multiple documents to the memory system.
from swarms_memory import QdrantDB
Args:
documents: List of document dictionaries
Returns:
List of all memory IDs
"""
all_memory_ids = []
# Initialize Qdrant client
# Option 1: In-memory (for testing/development - data is not persisted)
# client = QdrantClient(":memory:")
for document in documents:
memory_ids = self.add_document(document)
all_memory_ids.extend(memory_ids)
return all_memory_ids
def add(self, text: str, metadata: Optional[Dict] = None) -> str:
"""
Add a text entry to the memory system (required by Swarms interface).
Args:
text: The text content to add
metadata: Optional metadata for the entry
Returns:
str: ID of the stored memory
"""
if metadata is None:
metadata = {}
# Generate embedding for the text
embedding = self.embedding_model.encode(text).tolist()
# Prepare metadata
memory_metadata = {
"text": text,
"timestamp": datetime.utcnow().isoformat(),
"source": "agent_memory",
}
memory_metadata.update(metadata)
# Store the point
memory_id = str(uuid.uuid4())
self.client.upsert(
collection_name=self.collection_name,
points=[
models.PointStruct(
id=memory_id,
payload=memory_metadata,
vector=embedding,
)
],
)
return memory_id
def query(
self,
query_text: str,
limit: int = 5,
score_threshold: float = 0.7,
include_metadata: bool = True,
) -> List[Dict]:
"""
Query memories based on text similarity.
Args:
query_text: The text query to search for
limit: Maximum number of results to return
score_threshold: Minimum similarity score threshold
include_metadata: Whether to include metadata in results
Returns:
List of matching memories with their metadata
"""
try:
# Check if collection has any points
collection_info = self.client.get_collection(
self.collection_name
)
if collection_info.points_count == 0:
print(
"Warning: Collection is empty, no documents to query"
)
return []
# Generate embedding for the query
query_embedding = self.embedding_model.encode(
query_text
).tolist()
# Search in Qdrant
results = self.client.search(
collection_name=self.collection_name,
query_vector=query_embedding,
limit=limit,
score_threshold=score_threshold,
)
memories = []
for res in results:
memory = res.payload.copy()
memory["similarity_score"] = res.score
if not include_metadata:
# Keep only essential information
memory = {
"chunk_text": memory.get("chunk_text", ""),
"document_name": memory.get(
"document_name", ""
),
"similarity_score": memory[
"similarity_score"
],
}
memories.append(memory)
return memories
except Exception as e:
print(f"Error querying collection: {e}")
return []
def get_collection_stats(self) -> Dict:
"""
Get statistics about the collection.
Returns:
Dictionary containing collection statistics
"""
try:
collection_info = self.client.get_collection(
self.collection_name
)
return {
"collection_name": self.collection_name,
"vector_size": collection_info.config.params.vectors.size,
"distance": collection_info.config.params.vectors.distance,
"points_count": collection_info.points_count,
}
except Exception as e:
print(f"Error getting collection stats: {e}")
return {}
def clear_collection(self):
"""Clear all memories from the collection."""
self.client.delete_collection(self.collection_name)
self._create_collection()
print(f"Cleared collection: {self.collection_name}")
class QuantitativeTradingRAGAgent:
"""
Advanced quantitative trading agent with RAG capabilities for document analysis.
This agent combines the structure from example.py with Qdrant RAG to provide
comprehensive financial analysis based on ingested documents.
"""
def __init__(
self,
agent_name: str = "Quantitative-Trading-RAG-Agent",
collection_name: str = "financial_documents",
qdrant_url: Optional[str] = None,
qdrant_api_key: Optional[str] = None,
model_name: str = "claude-sonnet-4-20250514",
max_loops: int = 1,
chunk_size: int = 1000,
chunk_overlap: int = 200,
):
"""
Initialize the Quantitative Trading RAG Agent.
Args:
agent_name: Name of the agent
collection_name: Name of the Qdrant collection
qdrant_url: Optional Qdrant server URL
qdrant_api_key: Optional Qdrant API key
model_name: LLM model to use
max_loops: Maximum number of agent loops
chunk_size: Size of text chunks for processing
chunk_overlap: Overlap between consecutive chunks
"""
self.agent_name = agent_name
self.collection_name = collection_name
# Initialize document processor
self.document_processor = DocumentProcessor()
# Initialize Qdrant RAG memory
self.rag_memory = QdrantRAGMemory(
collection_name=collection_name,
url=qdrant_url,
api_key=qdrant_api_key,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
# Initialize the agent with RAG capabilities
self.agent = Agent(
agent_name=agent_name,
agent_description="Advanced quantitative trading and algorithmic analysis agent with RAG capabilities",
system_prompt="""You are an expert quantitative trading agent with deep expertise in:
- Algorithmic trading strategies and implementation
- Statistical arbitrage and market making
- Risk management and portfolio optimization
- High-frequency trading systems
- Market microstructure analysis
- Quantitative research methodologies
- Financial mathematics and stochastic processes
- Machine learning applications in trading
Your core responsibilities include:
1. Developing and backtesting trading strategies
2. Analyzing market data and identifying alpha opportunities
3. Implementing risk management frameworks
4. Optimizing portfolio allocations
5. Conducting quantitative research
6. Monitoring market microstructure
7. Evaluating trading system performance
You have access to a comprehensive document database through RAG (Retrieval-Augmented Generation).
When answering questions, you can search through this database to find relevant information
and provide evidence-based responses.
You maintain strict adherence to:
- Mathematical rigor in all analyses
- Statistical significance in strategy development
- Risk-adjusted return optimization
- Market impact minimization
- Regulatory compliance
- Transaction cost analysis
- Performance attribution
You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
model_name=model_name,
dynamic_temperature_enabled=True,
output_type="str-all-except-first",
max_loops=max_loops,
dynamic_context_window=True,
long_term_memory=self.rag_memory,
)
def ingest_documents(
self, documents_path: Union[str, Path]
) -> int:
"""
Ingest documents from a directory into the RAG system.
Args:
documents_path: Path to directory containing documents
Returns:
Number of documents successfully ingested
"""
print(f"Starting document ingestion from: {documents_path}")
try:
# Process documents
processed_documents = (
self.document_processor.process_directory(
documents_path
)
)
if not processed_documents:
print("No documents to ingest")
return 0
# Add documents to RAG memory
memory_ids = self.rag_memory.add_documents_batch(
processed_documents
)
print(
f"Successfully ingested {len(processed_documents)} documents"
)
print(f"Created {len(memory_ids)} memory chunks")
return len(processed_documents)
except Exception as e:
print(f"Error during document ingestion: {e}")
import traceback
traceback.print_exc()
return 0
def query_documents(
self, query: str, limit: int = 5
) -> List[Dict]:
"""
Query the document database for relevant information.
Args:
query: The query text
limit: Maximum number of results to return
Returns:
List of relevant document chunks
"""
return self.rag_memory.query(query, limit=limit)
def run_analysis(self, task: str) -> str:
"""
Run a financial analysis task using the agent with RAG capabilities.
Args:
task: The analysis task to perform
Returns:
Agent's response to the task
"""
print(f"Running analysis task: {task}")
# First, query the document database for relevant context
relevant_docs = self.query_documents(task, limit=3)
if relevant_docs:
# Enhance the task with relevant document context
context = "\n\nRelevant Document Information:\n"
for i, doc in enumerate(relevant_docs, 1):
context += f"\nDocument {i}: {doc.get('document_name', 'Unknown')}\n"
context += f"Relevance Score: {doc.get('similarity_score', 0):.3f}\n"
context += (
f"Content: {doc.get('chunk_text', '')[:500]}...\n"
)
enhanced_task = f"{task}\n\n{context}"
else:
enhanced_task = task
# Run the agent
response = self.agent.run(enhanced_task)
return response
def get_database_stats(self) -> Dict:
"""
Get statistics about the document database.
Returns:
Dictionary containing database statistics
"""
return self.rag_memory.get_collection_stats()
def main():
"""
Main function demonstrating the Qdrant RAG agent with document ingestion.
"""
from datetime import datetime
# Example usage
print("🚀 Initializing Quantitative Trading RAG Agent...")
# Initialize the agent (you can set environment variables for Qdrant cloud)
agent = QuantitativeTradingRAGAgent(
agent_name="Quantitative-Trading-RAG-Agent",
collection_name="financial_documents",
qdrant_url=os.getenv(
"QDRANT_URL"
), # Optional: For cloud deployment
qdrant_api_key=os.getenv(
"QDRANT_API_KEY"
), # Optional: For cloud deployment
model_name="claude-sonnet-4-20250514",
max_loops=1,
chunk_size=1000,
chunk_overlap=200,
)
# Example: Ingest documents from a directory
documents_path = "documents" # Path to your documents
if os.path.exists(documents_path):
print(f"Found documents directory: {documents_path}")
try:
agent.ingest_documents(documents_path)
except Exception as e:
print(f"Error ingesting documents: {e}")
print("Continuing without document ingestion...")
else:
print(f"Documents directory not found: {documents_path}")
print("Creating a sample document for demonstration...")
# Create a sample document
try:
sample_doc = {
"file_path": "sample_financial_analysis.txt",
"file_name": "sample_financial_analysis.txt",
"file_type": ".txt",
"text_content": """
Gold ETFs: A Comprehensive Investment Guide
Gold ETFs (Exchange-Traded Funds) provide investors with exposure to gold prices
without the need to physically store the precious metal. These funds track the
price of gold and offer several advantages including liquidity, diversification,
and ease of trading.
Top Gold ETFs include:
1. SPDR Gold Shares (GLD) - Largest gold ETF with high liquidity
2. iShares Gold Trust (IAU) - Lower expense ratio alternative
3. Aberdeen Standard Physical Gold ETF (SGOL) - Swiss storage option
Investment strategies for gold ETFs:
- Portfolio diversification (5-10% allocation)
- Inflation hedge
- Safe haven during market volatility
- Tactical trading opportunities
Market analysis shows that gold has historically served as a store of value
and hedge against inflation. Recent market conditions have increased interest
in gold investments due to economic uncertainty and geopolitical tensions.
""",
"file_size": 1024,
"processed_at": datetime.utcnow().isoformat(),
}
# Add the sample document to the RAG memory
memory_ids = agent.rag_memory.add_document(sample_doc)
print(
f"Added sample document with {len(memory_ids)} chunks"
)
except Exception as e:
print(f"Error creating sample document: {e}")
print("Continuing without sample document...")
# Example: Query the database
print("\n📊 Querying document database...")
try:
query_results = agent.query_documents(
"gold ETFs investment strategies", limit=3
)
print(f"Found {len(query_results)} relevant document chunks")
if query_results:
print("Sample results:")
for i, result in enumerate(query_results[:2], 1):
print(
f" {i}. {result.get('document_name', 'Unknown')} (Score: {result.get('similarity_score', 0):.3f})"
)
else:
print(
"No documents found in database. This is expected if no documents were ingested."
)
except Exception as e:
print(f"❌ Query failed: {e}")
# Example: Run financial analysis
print("\n💹 Running financial analysis...")
analysis_task = "What are the best top 3 ETFs for gold coverage and what are their key characteristics?"
try:
response = agent.run_analysis(analysis_task)
print("\n📈 Analysis Results:")
print(response)
except Exception as e:
print(f"❌ Analysis failed: {e}")
print("This might be due to API key or model access issues.")
print("Continuing with database statistics...")
# Try a simpler query that doesn't require the LLM
print("\n🔍 Trying simple document query instead...")
try:
simple_results = agent.query_documents(
"what do you see in the document?", limit=2
)
if simple_results:
print("Simple query results:")
for i, result in enumerate(simple_results, 1):
print(
f" {i}. {result.get('document_name', 'Unknown')}"
)
print(
f" Content preview: {result.get('chunk_text', '')[:100]}..."
)
else:
print("No results from simple query")
except Exception as simple_error:
print(f"Simple query also failed: {simple_error}")
# Get database statistics
print("\n📊 Database Statistics:")
try:
stats = agent.get_database_stats()
for key, value in stats.items():
print(f" {key}: {value}")
except Exception as e:
print(f"❌ Failed to get database statistics: {e}")
print("\n✅ Example completed successfully!")
print("💡 To test with your own documents:")
print(" 1. Create a 'documents' directory")
print(" 2. Add PDF, TXT, or MD files")
print(" 3. Run the script again")
# Option 2: Local Qdrant server
# client = QdrantClient(host="localhost", port=6333)
# Option 3: Qdrant Cloud (recommended for production)
import os
if __name__ == "__main__":
main()
client = QdrantClient(
url=os.getenv("QDRANT_URL", "https://your-cluster.qdrant.io"),
api_key=os.getenv("QDRANT_API_KEY", "your-api-key"),
)
# Create QdrantDB wrapper for RAG operations
rag_db = QdrantDB(
client=client,
embedding_model="text-embedding-3-small",
collection_name="knowledge_base",
distance=models.Distance.COSINE,
n_results=3,
)
# Add documents to the knowledge base
documents = [
"Qdrant is a vector database optimized for similarity search and AI applications.",
"RAG combines retrieval and generation for more accurate AI responses.",
"Vector embeddings enable semantic search across documents.",
"The swarms framework supports multiple memory backends including Qdrant.",
]
# Method 1: Add documents individually
for doc in documents:
rag_db.add(doc)
# Method 2: Batch add documents (more efficient for large datasets)
# Example with metadata
# documents_with_metadata = [
# "Machine learning is a subset of artificial intelligence.",
# "Deep learning uses neural networks with multiple layers.",
# "Natural language processing enables computers to understand human language.",
# "Computer vision allows machines to interpret visual information.",
# "Reinforcement learning learns through interaction with an environment."
# ]
#
# metadata = [
# {"category": "AI", "difficulty": "beginner", "topic": "overview"},
# {"category": "ML", "difficulty": "intermediate", "topic": "neural_networks"},
# {"category": "NLP", "difficulty": "intermediate", "topic": "language"},
# {"category": "CV", "difficulty": "advanced", "topic": "vision"},
# {"category": "RL", "difficulty": "advanced", "topic": "learning"}
# ]
#
# # Batch add with metadata
# doc_ids = rag_db.batch_add(documents_with_metadata, metadata=metadata, batch_size=3)
# print(f"Added {len(doc_ids)} documents in batch")
#
# # Query with metadata return
# results_with_metadata = rag_db.query(
# "What is artificial intelligence?",
# n_results=3,
# return_metadata=True
# )
#
# for i, result in enumerate(results_with_metadata):
# print(f"\nResult {i+1}:")
# print(f" Document: {result['document']}")
# print(f" Category: {result['category']}")
# print(f" Difficulty: {result['difficulty']}")
# print(f" Topic: {result['topic']}")
# print(f" Score: {result['score']:.4f}")
# Create agent with RAG capabilities
agent = Agent(
agent_name="RAG-Agent",
agent_description="Agent with Qdrant-powered RAG for enhanced knowledge retrieval",
model_name="gpt-4.1",
max_loops=1,
dynamic_temperature_enabled=True,
long_term_memory=rag_db,
)
# Query with RAG
response = agent.run("What is Qdrant and how does it relate to RAG?")
print(response)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save