diff --git a/.github/workflows/RELEASE.yml b/.github/workflows/RELEASE.yml index 2f20cb89..4ce68db3 100644 --- a/.github/workflows/RELEASE.yml +++ b/.github/workflows/RELEASE.yml @@ -17,7 +17,7 @@ jobs: && ${{ contains(github.event.pull_request.labels.*.name, 'release') }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Install poetry run: pipx install poetry==$POETRY_VERSION - name: Set up Python 3.9 diff --git a/.github/workflows/codacy.yml b/.github/workflows/codacy.yml index 632d9e03..9473405b 100644 --- a/.github/workflows/codacy.yml +++ b/.github/workflows/codacy.yml @@ -21,7 +21,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 # Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis - name: Run Codacy Analysis CLI uses: codacy/codacy-analysis-cli-action@562ee3e92b8e92df8b67e0a5ff8aa8e261919c08 diff --git a/.github/workflows/code-quality-and-tests.yml b/.github/workflows/code-quality-and-tests.yml index 935b0448..025ee93b 100644 --- a/.github/workflows/code-quality-and-tests.yml +++ b/.github/workflows/code-quality-and-tests.yml @@ -16,7 +16,7 @@ jobs: steps: # Step 1: Check out the repository - name: Checkout repository - uses: actions/checkout@v5 + uses: actions/checkout@v6 # Step 2: Set up Python - name: Set up Python ${{ matrix.python-version }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 17ff6bb3..7bf20661 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -28,7 +28,7 @@ jobs: language: ["python"] steps: - name: Checkout repository - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Initialize CodeQL uses: github/codeql-action/init@v4 with: diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 507a2882..c71cc45e 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout repository' - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: 'Dependency Review' uses: actions/dependency-review-action@v4 # Commonly enabled options, see https://github.com/actions/dependency-review-action#configuration-options for all available options. diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index b933fd6e..7a4a8e5b 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,7 +9,7 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: actions/setup-python@v6 with: python-version: 3.11 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 2128be8a..9a6a5409 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -6,7 +6,7 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python uses: actions/setup-python@v6 diff --git a/.github/workflows/pyre.yml b/.github/workflows/pyre.yml index 336569b5..bf5d085b 100644 --- a/.github/workflows/pyre.yml +++ b/.github/workflows/pyre.yml @@ -33,7 +33,7 @@ jobs: security-events: write runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: submodules: true diff --git a/.github/workflows/pysa.yml b/.github/workflows/pysa.yml index 5f913465..590a6432 100644 --- a/.github/workflows/pysa.yml +++ b/.github/workflows/pysa.yml @@ -35,7 +35,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: submodules: true diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 1fb79863..4adb50b3 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -21,7 +21,7 @@ jobs: python-version: ["3.10", "3.11", "3.12"] steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v6 with: diff --git a/.github/workflows/test-main-features.yml b/.github/workflows/test-main-features.yml index 1ff92794..23b920ec 100644 --- a/.github/workflows/test-main-features.yml +++ b/.github/workflows/test-main-features.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Set up Python 3.10 uses: actions/setup-python@v6 @@ -121,7 +121,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Set up Python 3.10 uses: actions/setup-python@v6 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 88a9430e..0223eb67 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python 3.10 uses: actions/setup-python@v6 diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index b71ef7d4..3f72ed12 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -27,7 +27,7 @@ jobs: runs-on: "ubuntu-20.04" steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Build an image from Dockerfile run: | diff --git a/auto_agent.py b/auto_agent.py new file mode 100644 index 00000000..92f5ffa9 --- /dev/null +++ b/auto_agent.py @@ -0,0 +1,15 @@ +from swarms import Agent + +# Initialize the agent +agent = Agent( + agent_name="Quantitative-Trading-Agent", + agent_description="Advanced quantitative trading and algorithmic analysis agent", + model_name="gpt-4.1", + max_loops="auto", +) + +out = agent.run( + task="What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?", +) + +print(out) diff --git a/docs/examples/aop_examples_overview.md b/docs/examples/aop_examples_overview.md new file mode 100644 index 00000000..5e726aa0 --- /dev/null +++ b/docs/examples/aop_examples_overview.md @@ -0,0 +1,40 @@ +# AOP Examples Overview + +Deploy agents as network services using the Agent Orchestration Protocol (AOP). Turn your agents into distributed, scalable, and accessible services. + +## What You'll Learn + +| Topic | Description | +|-------|-------------| +| **AOP Fundamentals** | Understanding agent-as-a-service deployment | +| **Server Setup** | Running agents as MCP servers | +| **Client Integration** | Connecting to remote agents | +| **Production Deployment** | Scaling and monitoring agents | + +--- + +## AOP Examples + +| Example | Description | Link | +|---------|-------------|------| +| **Medical AOP Example** | Healthcare agent deployment with AOP | [View Example](./aop_medical.md) | + +--- + +## Use Cases + +| Use Case | Description | +|----------|-------------| +| **Microservices** | Agent per service | +| **API Gateway** | Central agent access point | +| **Multi-tenant** | Shared agent infrastructure | +| **Edge Deployment** | Agents at the edge | + +--- + +## Related Resources + +- [AOP Reference Documentation](../swarms/structs/aop.md) - Complete AOP API +- [AOP Server Setup](../swarms/examples/aop_server_example.md) - Server configuration +- [AOP Cluster Example](../swarms/examples/aop_cluster_example.md) - Multi-node setup +- [Deployment Solutions](../deployment_solutions/overview.md) - Production deployment diff --git a/docs/examples/applications_overview.md b/docs/examples/applications_overview.md new file mode 100644 index 00000000..6a400db5 --- /dev/null +++ b/docs/examples/applications_overview.md @@ -0,0 +1,69 @@ +# Applications Overview + +Real-world multi-agent applications built with Swarms. These examples demonstrate complete solutions for business, research, finance, and automation use cases. + +## What You'll Learn + +| Topic | Description | +|-------|-------------| +| **Business Applications** | Marketing, hiring, M&A advisory swarms | +| **Research Systems** | Advanced research and analysis workflows | +| **Financial Analysis** | ETF research and investment analysis | +| **Automation** | Browser agents and web automation | +| **Industry Solutions** | Real estate, job finding, and more | + +--- + +## Application Examples + +| Application | Description | Industry | Link | +|-------------|-------------|----------|------| +| **Swarms of Browser Agents** | Automated web browsing with multiple agents | Automation | [View Example](../swarms/examples/swarms_of_browser_agents.md) | +| **Hierarchical Marketing Team** | Multi-agent marketing strategy and execution | Marketing | [View Example](./marketing_team.md) | +| **Gold ETF Research with HeavySwarm** | Comprehensive ETF analysis using Heavy Swarm | Finance | [View Example](./gold_etf_research.md) | +| **Hiring Swarm** | Automated candidate screening and evaluation | HR/Recruiting | [View Example](./hiring_swarm.md) | +| **Advanced Research** | Multi-agent research and analysis system | Research | [View Example](./av.md) | +| **Real Estate Swarm** | Property analysis and market research | Real Estate | [View Example](./realestate_swarm.md) | +| **Job Finding Swarm** | Automated job search and matching | Career | [View Example](./job_finding.md) | +| **M&A Advisory Swarm** | Mergers & acquisitions analysis | Finance | [View Example](./ma_swarm.md) | + +--- + +## Applications by Category + +### Business & Marketing + +| Application | Description | Link | +|-------------|-------------|------| +| **Hierarchical Marketing Team** | Complete marketing strategy system | [View Example](./marketing_team.md) | +| **Hiring Swarm** | End-to-end recruiting automation | [View Example](./hiring_swarm.md) | +| **M&A Advisory Swarm** | Due diligence and analysis | [View Example](./ma_swarm.md) | + +### Financial Analysis + +| Application | Description | Link | +|-------------|-------------|------| +| **Gold ETF Research** | Comprehensive ETF analysis | [View Example](./gold_etf_research.md) | + +### Research & Automation + +| Application | Description | Link | +|-------------|-------------|------| +| **Advanced Research** | Multi-source research compilation | [View Example](./av.md) | +| **Browser Agents** | Automated web interaction | [View Example](../swarms/examples/swarms_of_browser_agents.md) | +| **Job Finding Swarm** | Career opportunity discovery | [View Example](./job_finding.md) | + +### Real Estate + +| Application | Description | Link | +|-------------|-------------|------| +| **Real Estate Swarm** | Property market analysis | [View Example](./realestate_swarm.md) | + +--- + +## Related Resources + +- [HierarchicalSwarm Documentation](../swarms/structs/hierarchical_swarm.md) +- [HeavySwarm Documentation](../swarms/structs/heavy_swarm.md) +- [Building Custom Swarms](../swarms/structs/custom_swarm.md) +- [Deployment Solutions](../deployment_solutions/overview.md) diff --git a/docs/examples/apps_examples_overview.md b/docs/examples/apps_examples_overview.md new file mode 100644 index 00000000..cf90ca11 --- /dev/null +++ b/docs/examples/apps_examples_overview.md @@ -0,0 +1,29 @@ +# Apps Examples Overview + +Complete application examples built with Swarms. These examples show how to build practical tools and utilities with AI agents. + +## What You'll Learn + +| Topic | Description | +|-------|-------------| +| **Web Scraping** | Building intelligent web scrapers | +| **Database Integration** | Smart database query agents | +| **Practical Tools** | End-to-end application development | + +--- + +## App Examples + +| App | Description | Link | +|-----|-------------|------| +| **Web Scraper Agents** | Intelligent web data extraction | [View Example](../developer_guides/web_scraper.md) | +| **Smart Database** | AI-powered database interactions | [View Example](./smart_database.md) | + +--- + +## Related Resources + +- [Tools & Integrations](./tools_integrations_overview.md) - External service connections +- [Multi-Agent Architectures](./multi_agent_architectures_overview.md) - Complex agent systems +- [Deployment Solutions](../deployment_solutions/overview.md) - Production deployment + diff --git a/docs/examples/basic_examples_overview.md b/docs/examples/basic_examples_overview.md new file mode 100644 index 00000000..e8f20c56 --- /dev/null +++ b/docs/examples/basic_examples_overview.md @@ -0,0 +1,80 @@ +# Basic Examples Overview + +Start your Swarms journey with single-agent examples. Learn how to create agents, use tools, process images, integrate with different LLM providers, and publish to the marketplace. + +## What You'll Learn + +| Topic | Description | +|-------|-------------| +| **Agent Basics** | Create and configure individual agents | +| **Tool Integration** | Equip agents with callable tools and functions | +| **Vision Capabilities** | Process images and multi-modal inputs | +| **LLM Providers** | Connect to OpenAI, Anthropic, Groq, and more | +| **Utilities** | Streaming, output types, and marketplace publishing | + +--- + +## Individual Agent Examples + +### Core Agent Usage + +| Example | Description | Link | +|---------|-------------|------| +| **Basic Agent** | Fundamental agent creation and execution | [View Example](../swarms/examples/basic_agent.md) | + +### Tool Usage + +| Example | Description | Link | +|---------|-------------|------| +| **Agents with Vision and Tool Usage** | Combine vision and tools in one agent | [View Example](../swarms/examples/vision_tools.md) | +| **Agents with Callable Tools** | Equip agents with Python functions as tools | [View Example](../swarms/examples/agent_with_tools.md) | +| **Agent with Structured Outputs** | Get consistent JSON/structured responses | [View Example](../swarms/examples/agent_structured_outputs.md) | +| **Message Transforms** | Manage context with message transformations | [View Example](../swarms/structs/transforms.md) | + +### Vision & Multi-Modal + +| Example | Description | Link | +|---------|-------------|------| +| **Agents with Vision** | Process and analyze images | [View Example](../swarms/examples/vision_processing.md) | +| **Agent with Multiple Images** | Handle multiple images in one request | [View Example](../swarms/examples/multiple_images.md) | + +### Utilities + +| Example | Description | Link | +|---------|-------------|------| +| **Agent with Streaming** | Stream responses in real-time | [View Example](./agent_stream.md) | +| **Agent Output Types** | Different output formats (str, json, dict, yaml) | [View Example](../swarms/examples/agent_output_types.md) | +| **Gradio Chat Interface** | Build chat UIs for your agents | [View Example](../swarms/ui/main.md) | +| **Agent with Gemini Nano Banana** | Jarvis-style agent example | [View Example](../swarms/examples/jarvis_agent.md) | +| **Agent Marketplace Publishing** | Publish agents to the Swarms marketplace | [View Example](./marketplace_publishing_quickstart.md) | + +--- + +## LLM Provider Examples + +Connect your agents to various language model providers: + +| Provider | Description | Link | +|----------|-------------|------| +| **Overview** | Guide to all supported providers | [View Guide](../swarms/examples/model_providers.md) | +| **OpenAI** | GPT-4, GPT-4o, GPT-4o-mini integration | [View Example](../swarms/examples/openai_example.md) | +| **Anthropic** | Claude models integration | [View Example](../swarms/examples/claude.md) | +| **Groq** | Ultra-fast inference with Groq | [View Example](../swarms/examples/groq.md) | +| **Cohere** | Cohere Command models | [View Example](../swarms/examples/cohere.md) | +| **DeepSeek** | DeepSeek models integration | [View Example](../swarms/examples/deepseek.md) | +| **Ollama** | Local models with Ollama | [View Example](../swarms/examples/ollama.md) | +| **OpenRouter** | Access multiple providers via OpenRouter | [View Example](../swarms/examples/openrouter.md) | +| **XAI** | Grok models from xAI | [View Example](../swarms/examples/xai.md) | +| **Azure OpenAI** | Enterprise Azure deployment | [View Example](../swarms/examples/azure.md) | +| **Llama4** | Meta's Llama 4 models | [View Example](../swarms/examples/llama4.md) | +| **Custom Base URL** | Connect to any OpenAI-compatible API | [View Example](../swarms/examples/custom_base_url_example.md) | + +--- + +## Next Steps + +After mastering basic agents, explore: + +- [Multi-Agent Architectures](./multi_agent_architectures_overview.md) - Coordinate multiple agents +- [Tools Documentation](../swarms/tools/main.md) - Deep dive into tool creation +- [CLI Guides](./cli_guides_overview.md) - Run agents from command line diff --git a/docs/examples/cli_guides_overview.md b/docs/examples/cli_guides_overview.md new file mode 100644 index 00000000..832e3eeb --- /dev/null +++ b/docs/examples/cli_guides_overview.md @@ -0,0 +1,47 @@ +# CLI Guides Overview + +Master the Swarms command-line interface with these step-by-step guides. Execute agents, run multi-agent workflows, and integrate Swarms into your DevOps pipelines—all from your terminal. + +## What You'll Learn + +| Topic | Description | +|-------|-------------| +| **CLI Basics** | Install, configure, and run your first commands | +| **Agent Creation** | Create and run agents directly from command line | +| **YAML Configuration** | Define agents in config files for reproducible deployments | +| **Multi-Agent Commands** | Run LLM Council and Heavy Swarm from terminal | +| **DevOps Integration** | Integrate into CI/CD pipelines and scripts | + +--- + +## CLI Guides + +| Guide | Description | Link | +|-------|-------------|------| +| **CLI Quickstart** | Get started with Swarms CLI in 3 steps—install, configure, and run | [View Guide](../swarms/cli/cli_quickstart.md) | +| **Creating Agents from CLI** | Create, configure, and run AI agents directly from your terminal | [View Guide](../swarms/cli/cli_agent_guide.md) | +| **YAML Configuration** | Run multiple agents from YAML configuration files | [View Guide](../swarms/cli/cli_yaml_guide.md) | +| **LLM Council CLI** | Run collaborative multi-agent decision-making from command line | [View Guide](../swarms/cli/cli_llm_council_guide.md) | +| **Heavy Swarm CLI** | Execute comprehensive task analysis swarms from terminal | [View Guide](../swarms/cli/cli_heavy_swarm_guide.md) | +| **CLI Multi-Agent Commands** | Complete guide to multi-agent CLI commands | [View Guide](./cli_multi_agent_quickstart.md) | +| **CLI Examples** | Additional CLI usage examples and patterns | [View Guide](../swarms/cli/cli_examples.md) | + +--- + +## Use Cases + +| Use Case | Recommended Guide | +|----------|-------------------| +| First time using CLI | [CLI Quickstart](../swarms/cli/cli_quickstart.md) | +| Creating custom agents | [Creating Agents from CLI](../swarms/cli/cli_agent_guide.md) | +| Team/production deployments | [YAML Configuration](../swarms/cli/cli_yaml_guide.md) | +| Collaborative decision-making | [LLM Council CLI](../swarms/cli/cli_llm_council_guide.md) | +| Complex research tasks | [Heavy Swarm CLI](../swarms/cli/cli_heavy_swarm_guide.md) | + +--- + +## Related Resources + +- [CLI Reference Documentation](../swarms/cli/cli_reference.md) - Complete command reference +- [Agent Documentation](../swarms/structs/agent.md) - Agent class reference +- [Environment Configuration](../swarms/install/env.md) - Environment setup guide diff --git a/docs/examples/cli_multi_agent_quickstart.md b/docs/examples/cli_multi_agent_quickstart.md new file mode 100644 index 00000000..3b0e1c02 --- /dev/null +++ b/docs/examples/cli_multi_agent_quickstart.md @@ -0,0 +1,215 @@ +# CLI Multi-Agent Features: 3-Step Quickstart Guide + +Run LLM Council and Heavy Swarm directly from the command line for seamless DevOps integration. Execute sophisticated multi-agent workflows without writing Python code. + +## Overview + +| Feature | Description | +|---------|-------------| +| **LLM Council CLI** | Run collaborative decision-making from terminal | +| **Heavy Swarm CLI** | Execute comprehensive research swarms | +| **DevOps Ready** | Integrate into CI/CD pipelines and scripts | +| **Configurable** | Full parameter control from command line | + +--- + +## Step 1: Install and Verify + +Ensure Swarms is installed and verify CLI access: + +```bash +# Install swarms +pip install swarms + +# Verify CLI is available +swarms --help +``` + +You should see the Swarms CLI banner and available commands. + +--- + +## Step 2: Set Environment Variables + +Configure your API keys: + +```bash +# Set your OpenAI API key (or other provider) +export OPENAI_API_KEY="your-openai-api-key" + +# Optional: Set workspace directory +export WORKSPACE_DIR="./agent_workspace" +``` + +Or add to your `.env` file: + +``` +OPENAI_API_KEY=your-openai-api-key +WORKSPACE_DIR=./agent_workspace +``` + +--- + +## Step 3: Run Multi-Agent Commands + +### LLM Council + +Run a collaborative council of AI agents: + +```bash +# Basic usage +swarms llm-council --task "What is the best approach to implement microservices architecture?" + +# With verbose output +swarms llm-council --task "Evaluate investment opportunities in AI startups" --verbose +``` + +### Heavy Swarm + +Run comprehensive research and analysis: + +```bash +# Basic usage +swarms heavy-swarm --task "Analyze the current state of quantum computing" + +# With configuration options +swarms heavy-swarm \ + --task "Research renewable energy market trends" \ + --loops-per-agent 2 \ + --question-agent-model-name gpt-4o-mini \ + --worker-model-name gpt-4o-mini \ + --verbose +``` + +--- + +## Complete CLI Reference + +### LLM Council Command + +```bash +swarms llm-council --task "" [options] +``` + +| Option | Description | +|--------|-------------| +| `--task` | **Required.** The query or question for the council | +| `--verbose` | Enable detailed output logging | + +**Examples:** + +```bash +# Strategic decision +swarms llm-council --task "Should our startup pivot from B2B to B2C?" + +# Technical evaluation +swarms llm-council --task "Compare React vs Vue for enterprise applications" + +# Business analysis +swarms llm-council --task "What are the risks of expanding to European markets?" +``` + +--- + +### Heavy Swarm Command + +```bash +swarms heavy-swarm --task "" [options] +``` + +| Option | Default | Description | +|--------|---------|-------------| +| `--task` | - | **Required.** The research task | +| `--loops-per-agent` | 1 | Number of loops per agent | +| `--question-agent-model-name` | gpt-4o-mini | Model for question agent | +| `--worker-model-name` | gpt-4o-mini | Model for worker agents | +| `--random-loops-per-agent` | False | Randomize loops per agent | +| `--verbose` | False | Enable detailed output | + +**Examples:** + +```bash +# Comprehensive research +swarms heavy-swarm --task "Research the impact of AI on healthcare diagnostics" --verbose + +# With custom models +swarms heavy-swarm \ + --task "Analyze cryptocurrency regulation trends globally" \ + --question-agent-model-name gpt-4 \ + --worker-model-name gpt-4 \ + --loops-per-agent 3 + +# Quick analysis +swarms heavy-swarm --task "Summarize recent advances in battery technology" +``` + +--- + +## Other Useful CLI Commands + +### Setup Check + +Verify your environment is properly configured: + +```bash +swarms setup-check --verbose +``` + +### Run Single Agent + +Execute a single agent task: + +```bash +swarms agent \ + --name "Research-Agent" \ + --task "Summarize recent AI developments" \ + --model "gpt-4o-mini" \ + --max-loops 1 +``` + +### Auto Swarm + +Automatically generate and run a swarm configuration: + +```bash +swarms autoswarm --task "Build a content analysis pipeline" --model gpt-4 +``` + +### Show All Commands + +Display all available CLI features: + +```bash +swarms show-all +``` + +--- + +## Troubleshooting + +### Common Issues + +| Issue | Solution | +|-------|----------| +| "Command not found" | Ensure `pip install swarms` completed successfully | +| "API key not set" | Export `OPENAI_API_KEY` environment variable | +| "Task cannot be empty" | Always provide `--task` argument | +| Timeout errors | Check network connectivity and API rate limits | + +### Debug Mode + +Run with verbose output for debugging: + +```bash +swarms llm-council --task "Your query" --verbose 2>&1 | tee debug.log +``` + +--- + +## Next Steps + +- Explore [CLI Reference Documentation](../swarms/cli/cli_reference.md) for all commands +- See [CLI Examples](../swarms/cli/cli_examples.md) for more use cases +- Learn about [LLM Council](./llm_council_quickstart.md) Python API +- Try [Heavy Swarm Documentation](../swarms/structs/heavy_swarm.md) for advanced configuration + diff --git a/docs/examples/debate_quickstart.md b/docs/examples/debate_quickstart.md new file mode 100644 index 00000000..e5257a28 --- /dev/null +++ b/docs/examples/debate_quickstart.md @@ -0,0 +1,233 @@ +# DebateWithJudge: 3-Step Quickstart Guide + +The DebateWithJudge architecture enables structured debates between two agents (Pro and Con) with a Judge providing refined synthesis over multiple rounds. This creates progressively improved answers through iterative argumentation and evaluation. + +## Overview + +| Feature | Description | +|---------|-------------| +| **Pro Agent** | Argues in favor of a position with evidence and reasoning | +| **Con Agent** | Presents counter-arguments and identifies weaknesses | +| **Judge Agent** | Evaluates both sides and synthesizes the best elements | +| **Iterative Refinement** | Multiple rounds progressively improve the final answer | + +``` +Agent A (Pro) ↔ Agent B (Con) + │ │ + ▼ ▼ + Judge / Critic Agent + │ + ▼ +Winner or synthesis → refined answer +``` + +--- + +## Step 1: Install and Import + +Ensure you have Swarms installed and import the DebateWithJudge class: + +```bash +pip install swarms +``` + +```python +from swarms import DebateWithJudge +``` + +--- + +## Step 2: Create the Debate System + +Create a DebateWithJudge system using preset agents (the simplest approach): + +```python +# Create debate system with preset optimized agents +debate = DebateWithJudge( + preset_agents=True, # Use built-in optimized agents + max_loops=3, # 3 rounds of debate + model_name="gpt-4o-mini", + verbose=True +) +``` + +--- + +## Step 3: Run the Debate + +Execute the debate on a topic: + +```python +# Define the debate topic +topic = "Should artificial intelligence be regulated by governments?" + +# Run the debate +result = debate.run(task=topic) + +# Print the refined answer +print(result) + +# Or get just the final synthesis +final_answer = debate.get_final_answer() +print(final_answer) +``` + +--- + +## Complete Example + +Here's a complete working example: + +```python +from swarms import DebateWithJudge + +# Step 1: Create the debate system with preset agents +debate_system = DebateWithJudge( + preset_agents=True, + max_loops=3, + model_name="gpt-4o-mini", + output_type="str-all-except-first", + verbose=True, +) + +# Step 2: Define a complex topic +topic = ( + "Should artificial intelligence be regulated by governments? " + "Discuss the balance between innovation and safety." +) + +# Step 3: Run the debate and get refined answer +result = debate_system.run(task=topic) + +print("=" * 60) +print("DEBATE RESULT:") +print("=" * 60) +print(result) + +# Access conversation history for detailed analysis +history = debate_system.get_conversation_history() +print(f"\nTotal exchanges: {len(history)}") +``` + +--- + +## Custom Agents Example + +Create specialized agents for domain-specific debates: + +```python +from swarms import Agent, DebateWithJudge + +# Create specialized Pro agent +pro_agent = Agent( + agent_name="Innovation-Advocate", + system_prompt=( + "You are a technology policy expert arguing for innovation and minimal regulation. " + "You present arguments focusing on economic growth, technological competitiveness, " + "and the risks of over-regulation stifling progress." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create specialized Con agent +con_agent = Agent( + agent_name="Safety-Advocate", + system_prompt=( + "You are a technology policy expert arguing for strong AI safety regulations. " + "You present arguments focusing on public safety, ethical considerations, " + "and the need for government oversight of powerful technologies." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create specialized Judge agent +judge_agent = Agent( + agent_name="Policy-Analyst", + system_prompt=( + "You are an impartial policy analyst evaluating technology regulation debates. " + "You synthesize the strongest arguments from both sides and provide " + "balanced, actionable policy recommendations." + ), + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create debate system with custom agents +debate = DebateWithJudge( + agents=[pro_agent, con_agent, judge_agent], # Pass as list + max_loops=3, + verbose=True, +) + +result = debate.run("Should AI-generated content require mandatory disclosure labels?") +``` + +--- + +## Batch Processing + +Process multiple debate topics: + +```python +from swarms import DebateWithJudge + +debate = DebateWithJudge(preset_agents=True, max_loops=2) + +# Multiple topics to debate +topics = [ + "Should remote work become the standard for knowledge workers?", + "Is cryptocurrency a viable alternative to traditional banking?", + "Should social media platforms be held accountable for content moderation?", +] + +# Process all topics +results = debate.batched_run(topics) + +for topic, result in zip(topics, results): + print(f"\nTopic: {topic}") + print(f"Result: {result[:200]}...") +``` + +--- + +## Configuration Options + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `preset_agents` | `False` | Use built-in optimized agents | +| `max_loops` | `3` | Number of debate rounds | +| `model_name` | `"gpt-4o-mini"` | Model for preset agents | +| `output_type` | `"str-all-except-first"` | Output format | +| `verbose` | `True` | Enable detailed logging | + +### Output Types + +| Value | Description | +|-------|-------------| +| `"str-all-except-first"` | Formatted string, excluding initialization (default) | +| `"str"` | All messages as formatted string | +| `"dict"` | Messages as dictionary | +| `"list"` | Messages as list | + +--- + +## Use Cases + +| Domain | Example Topic | +|--------|---------------| +| **Policy** | "Should universal basic income be implemented?" | +| **Technology** | "Microservices vs. monolithic architecture for startups?" | +| **Business** | "Should companies prioritize growth or profitability?" | +| **Ethics** | "Is it ethical to use AI in hiring decisions?" | +| **Science** | "Should gene editing be allowed for non-medical purposes?" | + +--- + +## Next Steps + +- Explore [DebateWithJudge Reference](../swarms/structs/debate_with_judge.md) for complete API details +- See [Debate Examples](https://github.com/kyegomez/swarms/tree/master/examples/multi_agent/debate_examples) for more use cases +- Learn about [Orchestration Methods](../swarms/structs/orchestration_methods.md) for other debate architectures + diff --git a/docs/examples/graphworkflow_quickstart.md b/docs/examples/graphworkflow_quickstart.md new file mode 100644 index 00000000..65b56ead --- /dev/null +++ b/docs/examples/graphworkflow_quickstart.md @@ -0,0 +1,327 @@ +# GraphWorkflow with Rustworkx: 3-Step Quickstart Guide + +GraphWorkflow provides a powerful workflow orchestration system that creates directed graphs of agents for complex multi-agent collaboration. The new **Rustworkx integration** delivers 5-10x faster performance for large-scale workflows. + +## Overview + +| Feature | Description | +|---------|-------------| +| **Directed Graph Structure** | Nodes are agents, edges define data flow | +| **Dual Backend Support** | NetworkX (compatibility) or Rustworkx (performance) | +| **Parallel Execution** | Multiple agents run simultaneously within layers | +| **Automatic Compilation** | Optimizes workflow structure for efficient execution | +| **5-10x Performance** | Rustworkx backend for high-throughput workflows | + +--- + +## Step 1: Install and Import + +Install Swarms and Rustworkx for high-performance workflows: + +```bash +pip install swarms rustworkx +``` + +```python +from swarms import Agent, GraphWorkflow +``` + +--- + +## Step 2: Create the Workflow with Rustworkx Backend + +Create agents and build a workflow using the high-performance Rustworkx backend: + +```python +# Create specialized agents +research_agent = Agent( + agent_name="ResearchAgent", + model_name="gpt-4o-mini", + system_prompt="You are a research specialist. Gather and analyze information.", + max_loops=1 +) + +analysis_agent = Agent( + agent_name="AnalysisAgent", + model_name="gpt-4o-mini", + system_prompt="You are an analyst. Process research findings and extract insights.", + max_loops=1 +) + +# Create workflow with rustworkx backend for better performance +workflow = GraphWorkflow( + name="Research-Analysis-Pipeline", + backend="rustworkx", # Use rustworkx for 5-10x faster performance + verbose=True +) + +# Add agents as nodes +workflow.add_node(research_agent) +workflow.add_node(analysis_agent) + +# Connect agents with edges +workflow.add_edge("ResearchAgent", "AnalysisAgent") +``` + +--- + +## Step 3: Execute the Workflow + +Run the workflow and get results: + +```python +# Execute the workflow +results = workflow.run("What are the latest trends in renewable energy technology?") + +# Print results +print(results) +``` + +--- + +## Complete Example + +Here's a complete parallel processing workflow: + +```python +from swarms import Agent, GraphWorkflow + +# Step 1: Create specialized agents +data_collector = Agent( + agent_name="DataCollector", + model_name="gpt-4o-mini", + system_prompt="You collect and organize data from various sources.", + max_loops=1 +) + +technical_analyst = Agent( + agent_name="TechnicalAnalyst", + model_name="gpt-4o-mini", + system_prompt="You perform technical analysis on data.", + max_loops=1 +) + +market_analyst = Agent( + agent_name="MarketAnalyst", + model_name="gpt-4o-mini", + system_prompt="You analyze market trends and conditions.", + max_loops=1 +) + +synthesis_agent = Agent( + agent_name="SynthesisAgent", + model_name="gpt-4o-mini", + system_prompt="You synthesize insights from multiple analysts into a cohesive report.", + max_loops=1 +) + +# Step 2: Build workflow with rustworkx backend +workflow = GraphWorkflow( + name="Market-Analysis-Pipeline", + backend="rustworkx", # High-performance backend + verbose=True +) + +# Add all agents +for agent in [data_collector, technical_analyst, market_analyst, synthesis_agent]: + workflow.add_node(agent) + +# Create fan-out pattern: data collector feeds both analysts +workflow.add_edges_from_source( + "DataCollector", + ["TechnicalAnalyst", "MarketAnalyst"] +) + +# Create fan-in pattern: both analysts feed synthesis agent +workflow.add_edges_to_target( + ["TechnicalAnalyst", "MarketAnalyst"], + "SynthesisAgent" +) + +# Step 3: Execute and get results +results = workflow.run("Analyze Bitcoin market trends for Q4 2024") + +print("=" * 60) +print("WORKFLOW RESULTS:") +print("=" * 60) +print(results) + +# Get compilation status +status = workflow.get_compilation_status() +print(f"\nLayers: {status['cached_layers_count']}") +print(f"Max workers: {status['max_workers']}") +``` + +--- + +## NetworkX vs Rustworkx Backend + +| Graph Size | Recommended Backend | Performance | +|------------|-------------------|-------------| +| < 100 nodes | NetworkX | Minimal overhead | +| 100-1000 nodes | Either | Both perform well | +| 1000+ nodes | **Rustworkx** | 5-10x faster | +| 10k+ nodes | **Rustworkx** | Essential | + +```python +# NetworkX backend (default, maximum compatibility) +workflow = GraphWorkflow(backend="networkx") + +# Rustworkx backend (high performance) +workflow = GraphWorkflow(backend="rustworkx") +``` + +--- + +## Edge Patterns + +### Fan-Out (One-to-Many) + +```python +# One agent feeds multiple agents +workflow.add_edges_from_source( + "DataCollector", + ["Analyst1", "Analyst2", "Analyst3"] +) +``` + +### Fan-In (Many-to-One) + +```python +# Multiple agents feed one agent +workflow.add_edges_to_target( + ["Analyst1", "Analyst2", "Analyst3"], + "SynthesisAgent" +) +``` + +### Parallel Chain (Many-to-Many) + +```python +# Full mesh connection +workflow.add_parallel_chain( + ["Source1", "Source2"], + ["Target1", "Target2", "Target3"] +) +``` + +--- + +## Using from_spec for Quick Setup + +Create workflows quickly with the `from_spec` class method: + +```python +from swarms import Agent, GraphWorkflow + +# Create agents +agent1 = Agent(agent_name="Researcher", model_name="gpt-4o-mini", max_loops=1) +agent2 = Agent(agent_name="Analyzer", model_name="gpt-4o-mini", max_loops=1) +agent3 = Agent(agent_name="Reporter", model_name="gpt-4o-mini", max_loops=1) + +# Create workflow from specification +workflow = GraphWorkflow.from_spec( + agents=[agent1, agent2, agent3], + edges=[ + ("Researcher", "Analyzer"), + ("Analyzer", "Reporter"), + ], + task="Analyze climate change data", + backend="rustworkx" # Use high-performance backend +) + +results = workflow.run() +``` + +--- + +## Visualization + +Generate visual representations of your workflow: + +```python +# Create visualization (requires graphviz) +output_file = workflow.visualize( + format="png", + view=True, + show_summary=True +) +print(f"Visualization saved to: {output_file}") + +# Simple text visualization +text_viz = workflow.visualize_simple() +print(text_viz) +``` + +--- + +## Serialization + +Save and load workflows: + +```python +# Save workflow with conversation history +workflow.save_to_file( + "my_workflow.json", + include_conversation=True, + include_runtime_state=True +) + +# Load workflow later +loaded_workflow = GraphWorkflow.load_from_file( + "my_workflow.json", + restore_runtime_state=True +) + +# Continue execution +results = loaded_workflow.run("Follow-up analysis") +``` + +--- + +## Large-Scale Example with Rustworkx + +```python +from swarms import Agent, GraphWorkflow + +# Create workflow for large-scale processing +workflow = GraphWorkflow( + name="Large-Scale-Pipeline", + backend="rustworkx", # Essential for large graphs + verbose=True +) + +# Create many processing agents +processors = [] +for i in range(50): + agent = Agent( + agent_name=f"Processor{i}", + model_name="gpt-4o-mini", + max_loops=1 + ) + processors.append(agent) + workflow.add_node(agent) + +# Create layered connections +for i in range(0, 40, 10): + sources = [f"Processor{j}" for j in range(i, i+10)] + targets = [f"Processor{j}" for j in range(i+10, min(i+20, 50))] + if targets: + workflow.add_parallel_chain(sources, targets) + +# Compile and execute +workflow.compile() +status = workflow.get_compilation_status() +print(f"Compiled: {status['cached_layers_count']} layers") + +results = workflow.run("Process dataset in parallel") +``` + +--- + +## Next Steps + +- Explore [GraphWorkflow Reference](../swarms/structs/graph_workflow.md) for complete API details +- See [Multi-Agentic Patterns with GraphWorkflow](./graphworkflow_rustworkx_patterns.md) for advanced patterns +- Learn about [Visualization Options](../swarms/structs/graph_workflow.md#visualization-methods) for debugging workflows + diff --git a/docs/examples/llm_council_examples.md b/docs/examples/llm_council_examples.md new file mode 100644 index 00000000..eadebe9c --- /dev/null +++ b/docs/examples/llm_council_examples.md @@ -0,0 +1,112 @@ +# LLM Council Examples + +This page provides examples demonstrating the LLM Council pattern, inspired by Andrej Karpathy's llm-council implementation. The LLM Council uses multiple specialized AI agents that: + +1. Each respond independently to queries +2. Review and rank each other's anonymized responses +3. Have a Chairman synthesize all responses into a final comprehensive answer + +## Example Files + +All LLM Council examples are located in the [`examples/multi_agent/llm_council_examples/`](https://github.com/kyegomez/swarms/tree/master/examples/multi_agent/llm_council_examples) directory. + +### Marketing & Business + +- **[marketing_strategy_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/marketing_strategy_council.py)** - Marketing strategy analysis and recommendations +- **[business_strategy_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/business_strategy_council.py)** - Comprehensive business strategy development + +### Finance & Investment + +- **[finance_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/finance_analysis_council.py)** - Financial analysis and investment recommendations +- **[etf_stock_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py)** - ETF and stock analysis with portfolio recommendations + +### Medical & Healthcare + +- **[medical_treatment_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/medical_treatment_council.py)** - Medical treatment recommendations and care plans +- **[medical_diagnosis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py)** - Diagnostic analysis based on symptoms + +### Technology & Research + +- **[technology_assessment_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/technology_assessment_council.py)** - Technology evaluation and implementation strategy +- **[research_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/research_analysis_council.py)** - Comprehensive research analysis on complex topics + +### Legal + +- **[legal_analysis_council.py](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/llm_council_examples/legal_analysis_council.py)** - Legal implications and compliance analysis + +## Basic Usage Pattern + +All examples follow the same pattern: + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Run a query +result = council.run("Your query here") + +# Access results +print(result["final_response"]) # Chairman's synthesized answer +print(result["original_responses"]) # Individual member responses +print(result["evaluations"]) # How members ranked each other +``` + +## Running Examples + +Run any example directly: + +```bash +python examples/multi_agent/llm_council_examples/marketing_strategy_council.py +python examples/multi_agent/llm_council_examples/finance_analysis_council.py +python examples/multi_agent/llm_council_examples/medical_diagnosis_council.py +``` + +## Key Features + +| Feature | Description | +|----------------------|---------------------------------------------------------------------------------------------------------| +| **Multiple Perspectives** | Each council member (GPT-5.1, Gemini, Claude, Grok) provides unique insights | +| **Peer Review** | Members evaluate and rank each other's responses anonymously | +| **Synthesis** | Chairman combines the best elements from all responses | +| **Transparency** | See both individual responses and evaluation rankings | + + +## Council Members + +The default council consists of: + +| Council Member | Description | +|-------------------------------|-------------------------------| +| **GPT-5.1-Councilor** | Analytical and comprehensive | +| **Gemini-3-Pro-Councilor** | Concise and well-processed | +| **Claude-Sonnet-4.5-Councilor** | Thoughtful and balanced | +| **Grok-4-Councilor** | Creative and innovative | + +## Customization + +You can create custom council members: + +```python +from swarms import Agent +from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt + +custom_agent = Agent( + agent_name="Custom-Councilor", + system_prompt=get_gpt_councilor_prompt(), + model_name="gpt-4.1", + max_loops=1, +) + +council = LLMCouncil( + council_members=[custom_agent, ...], + chairman_model="gpt-5.1", + verbose=True +) +``` + +## Documentation + +For complete API reference and detailed documentation, see the [LLM Council Reference Documentation](../swarms/structs/llm_council.md). + diff --git a/docs/examples/llm_council_quickstart.md b/docs/examples/llm_council_quickstart.md new file mode 100644 index 00000000..7ffef806 --- /dev/null +++ b/docs/examples/llm_council_quickstart.md @@ -0,0 +1,170 @@ +# LLM Council: 3-Step Quickstart Guide + +The LLM Council enables collaborative decision-making with multiple AI agents through peer review and synthesis. Inspired by Andrej Karpathy's llm-council, it creates a council of specialized agents that respond independently, review each other's anonymized responses, and have a Chairman synthesize the best elements into a final answer. + +## Overview + +| Feature | Description | +|---------|-------------| +| **Multiple Perspectives** | Each council member provides unique insights from different viewpoints | +| **Peer Review** | Members evaluate and rank each other's responses anonymously | +| **Synthesis** | Chairman combines the best elements from all responses | +| **Transparency** | See both individual responses and evaluation rankings | + +--- + +## Step 1: Install and Import + +First, ensure you have Swarms installed and import the LLMCouncil class: + +```bash +pip install swarms +``` + +```python +from swarms.structs.llm_council import LLMCouncil +``` + +--- + +## Step 2: Create the Council + +Create an LLM Council with default council members (GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, and Grok-4): + +```python +# Create the council with default members +council = LLMCouncil( + name="Decision Council", + verbose=True, + output_type="dict-all-except-first" +) +``` + +--- + +## Step 3: Run a Query + +Execute a query and get the synthesized response: + +```python +# Run a query +result = council.run("What are the key factors to consider when choosing a cloud provider for enterprise applications?") + +# Access the final synthesized answer +print(result["final_response"]) + +# View individual member responses +print(result["original_responses"]) + +# See how members ranked each other +print(result["evaluations"]) +``` + +--- + +## Complete Example + +Here's a complete working example: + +```python +from swarms.structs.llm_council import LLMCouncil + +# Step 1: Create the council +council = LLMCouncil( + name="Strategy Council", + description="A council for strategic decision-making", + verbose=True, + output_type="dict-all-except-first" +) + +# Step 2: Run a strategic query +result = council.run( + "Should a B2B SaaS startup prioritize product-led growth or sales-led growth? " + "Consider factors like market size, customer acquisition costs, and scalability." +) + +# Step 3: Process results +print("=" * 50) +print("FINAL SYNTHESIZED ANSWER:") +print("=" * 50) +print(result["final_response"]) +``` + +--- + +## Custom Council Members + +For specialized domains, create custom council members: + +```python +from swarms import Agent +from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt + +# Create specialized agents +finance_expert = Agent( + agent_name="Finance-Councilor", + system_prompt="You are a financial analyst specializing in market analysis and investment strategies...", + model_name="gpt-4.1", + max_loops=1, +) + +tech_expert = Agent( + agent_name="Technology-Councilor", + system_prompt="You are a technology strategist specializing in digital transformation...", + model_name="gpt-4.1", + max_loops=1, +) + +risk_expert = Agent( + agent_name="Risk-Councilor", + system_prompt="You are a risk management expert specializing in enterprise risk assessment...", + model_name="gpt-4.1", + max_loops=1, +) + +# Create council with custom members +council = LLMCouncil( + council_members=[finance_expert, tech_expert, risk_expert], + chairman_model="gpt-4.1", + verbose=True +) + +result = council.run("Evaluate the risk-reward profile of investing in AI infrastructure") +``` + +--- + +## CLI Usage + +Run LLM Council directly from the command line: + +```bash +swarms llm-council --task "What is the best approach to implement microservices architecture?" +``` + +With verbose output: + +```bash +swarms llm-council --task "Analyze the pros and cons of remote work" --verbose +``` + +--- + +## Use Cases + +| Domain | Example Query | +|--------|---------------| +| **Business Strategy** | "Should we expand internationally or focus on domestic growth?" | +| **Technology** | "Which database architecture best suits our high-throughput requirements?" | +| **Finance** | "Evaluate investment opportunities in the renewable energy sector" | +| **Healthcare** | "What treatment approaches should be considered for this patient profile?" | +| **Legal** | "What are the compliance implications of this data processing policy?" | + +--- + +## Next Steps + +- Explore [LLM Council Examples](./llm_council_examples.md) for domain-specific implementations +- Learn about [LLM Council Reference Documentation](../swarms/structs/llm_council.md) for complete API details +- Try the [CLI Reference](../swarms/cli/cli_reference.md) for DevOps integration + diff --git a/docs/examples/marketplace_publishing_quickstart.md b/docs/examples/marketplace_publishing_quickstart.md new file mode 100644 index 00000000..97ec120d --- /dev/null +++ b/docs/examples/marketplace_publishing_quickstart.md @@ -0,0 +1,252 @@ +# Agent Marketplace Publishing: 3-Step Quickstart Guide + +Publish your agents directly to the Swarms Marketplace with minimal configuration. Share your specialized agents with the community and monetize your creations. + +## Overview + +| Feature | Description | +|---------|-------------| +| **Direct Publishing** | Publish agents with a single flag | +| **Minimal Configuration** | Just add use cases, tags, and capabilities | +| **Automatic Integration** | Seamlessly integrates with marketplace API | +| **Monetization Ready** | Set pricing for your agents | + +--- + +## Step 1: Get Your API Key + +Before publishing, you need a Swarms API key: + +1. Visit [swarms.world/platform/api-keys](https://swarms.world/platform/api-keys) +2. Create an account or sign in +3. Generate an API key +4. Set the environment variable: + +```bash +export SWARMS_API_KEY="your-api-key-here" +``` + +Or add to your `.env` file: + +``` +SWARMS_API_KEY=your-api-key-here +``` + +--- + +## Step 2: Configure Your Agent + +Create an agent with publishing configuration: + +```python +from swarms import Agent + +# Create your specialized agent +my_agent = Agent( + agent_name="Market-Analysis-Agent", + agent_description="Expert market analyst specializing in cryptocurrency and stock analysis", + model_name="gpt-4o-mini", + system_prompt="""You are an expert market analyst specializing in: + - Cryptocurrency market analysis + - Stock market trends + - Risk assessment + - Portfolio recommendations + + Provide data-driven insights with confidence levels.""", + max_loops=1, + + # Publishing configuration + publish_to_marketplace=True, + + # Required: Define use cases + use_cases=[ + { + "title": "Cryptocurrency Analysis", + "description": "Analyze crypto market trends and provide investment insights" + }, + { + "title": "Stock Screening", + "description": "Screen stocks based on technical and fundamental criteria" + }, + { + "title": "Portfolio Review", + "description": "Review and optimize investment portfolios" + } + ], + +) +``` + +--- + +## Step 3: Run to Publish + +Simply run the agent to trigger publishing: + +```python +# Running the agent automatically publishes it +result = my_agent.run("Analyze Bitcoin's current market position") + +print(result) +print("\n✅ Agent published to marketplace!") +``` + +--- + +## Complete Example + +Here's a complete working example: + +```python +import os +from swarms import Agent + +# Ensure API key is set +if not os.getenv("SWARMS_API_KEY"): + raise ValueError("Please set SWARMS_API_KEY environment variable") + +# Step 1: Create a specialized medical analysis agent +medical_agent = Agent( + agent_name="Blood-Data-Analysis-Agent", + agent_description="Explains and contextualizes common blood test panels with structured insights", + model_name="gpt-4o-mini", + max_loops=1, + + system_prompt="""You are a clinical laboratory data analyst assistant focused on hematology and basic metabolic panels. + +Your goals: +1) Interpret common blood test panels (CBC, CMP/BMP, lipid panel, HbA1c, thyroid panels) +2) Provide structured findings: out-of-range markers, degree of deviation, clinical significance +3) Identify potential confounders (e.g., hemolysis, fasting status, medications) +4) Suggest safe, non-diagnostic next steps + +Reliability and safety: +- This is not medical advice. Do not diagnose or treat. +- Use cautious language with confidence levels (low/medium/high) +- Highlight red-flag combinations that warrant urgent clinical evaluation""", + + # Step 2: Publishing configuration + publish_to_marketplace=True, + + tags=["lab", "hematology", "metabolic", "education"], + capabilities=[ + "panel-interpretation", + "risk-flagging", + "guideline-citation" + ], + + use_cases=[ + { + "title": "Blood Analysis", + "description": "Analyze blood samples and summarize notable findings." + }, + { + "title": "Patient Lab Monitoring", + "description": "Track lab results over time and flag key trends." + }, + { + "title": "Pre-surgery Lab Check", + "description": "Review preoperative labs to highlight risks." + } + ], +) + +# Step 3: Run the agent (this publishes it to the marketplace) +result = medical_agent.run( + task="Analyze this blood sample: Hematology and Basic Metabolic Panel" +) + +print(result) +``` + +--- + +## Required Fields for Publishing + +| Field | Type | Description | +|-------|------|-------------| +| `publish_to_marketplace` | `bool` | Set to `True` to enable publishing | +| `use_cases` | `List[Dict]` | List of use case dictionaries with `title` and `description` | + +### Use Case Format + +```python +use_cases = [ + { + "title": "Use Case Title", + "description": "Detailed description of what the agent does for this use case" + }, + # Add more use cases... +] +``` + +--- + +## Optional: Programmatic Publishing + +You can also publish prompts/agents directly using the utility function: + +```python +from swarms.utils.swarms_marketplace_utils import add_prompt_to_marketplace + +response = add_prompt_to_marketplace( + name="My Custom Agent", + prompt="Your detailed system prompt here...", + description="What this agent does", + use_cases=[ + {"title": "Use Case 1", "description": "Description 1"}, + {"title": "Use Case 2", "description": "Description 2"} + ], + tags="tag1, tag2, tag3", + category="research", + is_free=True, # Set to False for paid agents + price_usd=0.0 # Set price if not free +) + +print(response) +``` + +--- + +## Marketplace Categories + +| Category | Description | +|----------|-------------| +| `research` | Research and analysis agents | +| `content` | Content generation agents | +| `coding` | Programming and development agents | +| `finance` | Financial analysis agents | +| `healthcare` | Medical and health-related agents | +| `education` | Educational and tutoring agents | +| `legal` | Legal research and analysis agents | + +--- + +## Best Practices + +!!! tip "Publishing Best Practices" + - **Clear Descriptions**: Write detailed, accurate agent descriptions + - **Multiple Use Cases**: Provide 3-5 distinct use cases + - **Relevant Tags**: Use specific, searchable keywords + - **Test First**: Thoroughly test your agent before publishing + - **System Prompt Quality**: Ensure your system prompt is well-crafted + +!!! warning "Important Notes" + - `use_cases` is **required** when `publish_to_marketplace=True` + - Both `tags` and `capabilities` should be provided for discoverability + - The agent must have a valid `SWARMS_API_KEY` set in the environment + +--- + + +--- + +## Next Steps + +| Next Step | Description | +|-----------|-------------| +| [Swarms Marketplace](https://swarms.world) | Browse published agents | +| [Marketplace Documentation](../swarms_platform/share_and_discover.md) | Learn how to publish and discover agents | +| [Monetization Options](../swarms_platform/monetize.md) | Explore ways to monetize your agent | +| [API Key Management](../swarms_platform/apikeys.md) | Manage your API keys for publishing and access | + diff --git a/docs/examples/multi_agent_architectures_overview.md b/docs/examples/multi_agent_architectures_overview.md new file mode 100644 index 00000000..18ed09f8 --- /dev/null +++ b/docs/examples/multi_agent_architectures_overview.md @@ -0,0 +1,69 @@ +# Multi-Agent Architectures Overview + +Build sophisticated multi-agent systems with Swarms' advanced orchestration patterns. From hierarchical teams to collaborative councils, these examples demonstrate how to coordinate multiple AI agents for complex tasks. + +## What You'll Learn + +| Topic | Description | +|-------|-------------| +| **Hierarchical Swarms** | Director agents coordinating worker agents | +| **Collaborative Systems** | Agents working together through debate and consensus | +| **Workflow Patterns** | Sequential, concurrent, and graph-based execution | +| **Routing Systems** | Intelligent task routing to specialized agents | +| **Group Interactions** | Multi-agent conversations and discussions | + +--- + +## Architecture Examples + +### Hierarchical & Orchestration + +| Example | Description | Link | +|---------|-------------|------| +| **HierarchicalSwarm** | Multi-level agent organization with director and workers | [View Example](../swarms/examples/hierarchical_swarm_example.md) | +| **Hybrid Hierarchical-Cluster Swarm** | Combined hierarchical and cluster patterns | [View Example](../swarms/examples/hhcs_examples.md) | +| **SwarmRouter** | Intelligent routing of tasks to appropriate swarms | [View Example](../swarms/examples/swarm_router.md) | +| **MultiAgentRouter** | Route tasks to specialized individual agents | [View Example](../swarms/examples/multi_agent_router_minimal.md) | + +### Collaborative & Consensus + +| Example | Description | Link | +|---------|-------------|------| +| **LLM Council Quickstart** | Collaborative decision-making with peer review and synthesis | [View Example](./llm_council_quickstart.md) | +| **LLM Council Examples** | Domain-specific council implementations | [View Examples](./llm_council_examples.md) | +| **DebateWithJudge Quickstart** | Two agents debate with judge providing synthesis | [View Example](./debate_quickstart.md) | +| **Mixture of Agents** | Heterogeneous agents for diverse task handling | [View Example](../swarms/examples/moa_example.md) | + +### Workflow Patterns + +| Example | Description | Link | +|---------|-------------|------| +| **GraphWorkflow with Rustworkx** | High-performance graph-based workflows (5-10x faster) | [View Example](./graphworkflow_quickstart.md) | +| **Multi-Agentic Patterns with GraphWorkflow** | Advanced graph workflow patterns | [View Example](../swarms/examples/graphworkflow_rustworkx_patterns.md) | +| **SequentialWorkflow** | Linear agent pipelines | [View Example](../swarms/examples/sequential_example.md) | +| **ConcurrentWorkflow** | Parallel agent execution | [View Example](../swarms/examples/concurrent_workflow.md) | + +### Group Communication + +| Example | Description | Link | +|---------|-------------|------| +| **Group Chat** | Multi-agent group conversations | [View Example](../swarms/examples/groupchat_example.md) | +| **Interactive GroupChat** | Real-time interactive agent discussions | [View Example](../swarms/examples/igc_example.md) | + +### Specialized Patterns + +| Example | Description | Link | +|---------|-------------|------| +| **Agents as Tools** | Use agents as callable tools for other agents | [View Example](../swarms/examples/agents_as_tools.md) | +| **Aggregate Responses** | Combine outputs from multiple agents | [View Example](../swarms/examples/aggregate.md) | +| **Unique Swarms** | Experimental and specialized swarm patterns | [View Example](../swarms/examples/unique_swarms.md) | +| **BatchedGridWorkflow (Simple)** | Grid-based batch processing | [View Example](../swarms/examples/batched_grid_simple_example.md) | +| **BatchedGridWorkflow (Advanced)** | Advanced grid-based batch processing | [View Example](../swarms/examples/batched_grid_advanced_example.md) | + +--- + +## Related Resources + +- [Swarm Architectures Concept Guide](../swarms/concept/swarm_architectures.md) +- [Choosing Multi-Agent Architecture](../swarms/concept/how_to_choose_swarms.md) +- [Custom Swarm Development](../swarms/structs/custom_swarm.md) diff --git a/docs/examples/rag_examples_overview.md b/docs/examples/rag_examples_overview.md new file mode 100644 index 00000000..686f0eb7 --- /dev/null +++ b/docs/examples/rag_examples_overview.md @@ -0,0 +1,39 @@ +# RAG Examples Overview + +Enhance your agents with Retrieval-Augmented Generation (RAG). Connect to vector databases and knowledge bases to give agents access to your custom data. + +## What You'll Learn + +| Topic | Description | +|-------|-------------| +| **RAG Fundamentals** | Understanding retrieval-augmented generation | +| **Vector Databases** | Connecting to Qdrant, Pinecone, and more | +| **Document Processing** | Ingesting and indexing documents | +| **Semantic Search** | Finding relevant context for queries | + +--- + +## RAG Examples + +| Example | Description | Vector DB | Link | +|---------|-------------|-----------|------| +| **RAG with Qdrant** | Complete RAG implementation with Qdrant | Qdrant | [View Example](../swarms/RAG/qdrant_rag.md) | + +--- + +## Use Cases + +| Use Case | Description | +|----------|-------------| +| **Document Q&A** | Answer questions about your documents | +| **Knowledge Base** | Query internal company knowledge | +| **Research Assistant** | Search through research papers | +| **Code Documentation** | Query codebase documentation | +| **Customer Support** | Access product knowledge | + +--- + +## Related Resources + +- [Memory Documentation](../swarms/memory/diy_memory.md) - Building custom memory +- [Agent Long-term Memory](../swarms/structs/agent.md#long-term-memory) - Agent memory configuration diff --git a/docs/examples/tools_integrations_overview.md b/docs/examples/tools_integrations_overview.md new file mode 100644 index 00000000..0e63661e --- /dev/null +++ b/docs/examples/tools_integrations_overview.md @@ -0,0 +1,55 @@ +# Tools & Integrations Overview + +Extend your agents with powerful integrations. Connect to web search, browser automation, financial data, and Model Context Protocol (MCP) servers. + +## What You'll Learn + +| Topic | Description | +|-------|-------------| +| **Web Search** | Integrate real-time web search capabilities | +| **Browser Automation** | Control web browsers programmatically | +| **Financial Data** | Access stock and market information | +| **Web Scraping** | Extract data from websites | +| **MCP Integration** | Connect to Model Context Protocol servers | + +--- + +## Integration Examples + +### Web Search + +| Integration | Description | Link | +|-------------|-------------|------| +| **Exa Search** | AI-powered web search for agents | [View Example](./exa_search.md) | + +### Browser Automation + +| Integration | Description | Link | +|-------------|-------------|------| +| **Browser Use** | Automated browser control with agents | [View Example](./browser_use.md) | + +### Financial Data + +| Integration | Description | Link | +|-------------|-------------|------| +| **Yahoo Finance** | Stock data, quotes, and market info | [View Example](../swarms/examples/yahoo_finance.md) | + +### Web Scraping + +| Integration | Description | Link | +|-------------|-------------|------| +| **Firecrawl** | AI-powered web scraping | [View Example](../developer_guides/firecrawl.md) | + +### MCP (Model Context Protocol) + +| Integration | Description | Link | +|-------------|-------------|------| +| **Multi-MCP Agent** | Connect agents to multiple MCP servers | [View Example](../swarms/examples/multi_mcp_agent.md) | + +--- + +## Related Resources + +- [Tools Documentation](../swarms/tools/main.md) - Building custom tools +- [MCP Integration Guide](../swarms/structs/agent_mcp.md) - Detailed MCP setup +- [swarms-tools Package](../swarms_tools/overview.md) - Pre-built tool collection diff --git a/docs/llm.txt b/docs/llm.txt index 51f90399..d6674264 100644 --- a/docs/llm.txt +++ b/docs/llm.txt @@ -24130,32 +24130,6 @@ flowchart LR - Maintains strict ordering of task processing -### Linear Swarm -```python -def linear_swarm(agents: AgentListType, tasks: List[str], return_full_history: bool = True) -``` - -**Information Flow:** -```mermaid -flowchart LR - Input[Task Input] --> A1 - subgraph Sequential Processing - A1((Agent 1)) --> A2((Agent 2)) - A2 --> A3((Agent 3)) - A3 --> A4((Agent 4)) - A4 --> A5((Agent 5)) - end - A5 --> Output[Final Result] -``` - -**Best Used When:** - -- Tasks need sequential, pipeline-style processing - -- Each agent performs a specific transformation step - -- Order of processing is critical - ### Star Swarm ```python def star_swarm(agents: AgentListType, tasks: List[str], return_full_history: bool = True) @@ -24389,7 +24363,6 @@ flowchart TD ## Common Use Cases 1. **Data Processing Pipelines** - - Linear Swarm - Circular Swarm 2. **Distributed Computing** @@ -24420,7 +24393,6 @@ from swarms.structs.swarming_architectures import ( exponential_swarm, fibonacci_swarm, grid_swarm, - linear_swarm, mesh_swarm, one_to_three, prime_swarm, @@ -24528,29 +24500,6 @@ def run_healthcare_grid_swarm(): print("\nGrid swarm processing completed") print(result) -def run_finance_linear_swarm(): - """Loan approval process using linear swarm""" - print_separator() - print("FINANCE - LOAN APPROVAL PROCESS (Linear Swarm)") - - agents = create_finance_agents()[:3] - tasks = [ - "Review loan application and credit history", - "Assess risk factors and compliance requirements", - "Generate final loan recommendation" - ] - - print("\nTasks:") - for i, task in enumerate(tasks, 1): - print(f"{i}. {task}") - - result = linear_swarm(agents, tasks) - print("\nResults:") - for log in result['history']: - print(f"\n{log['agent_name']}:") - print(f"Task: {log['task']}") - print(f"Response: {log['response']}") - def run_healthcare_star_swarm(): """Complex medical case management using star swarm""" print_separator() @@ -24684,7 +24633,6 @@ async def run_all_examples(): # Finance examples run_finance_circular_swarm() - run_finance_linear_swarm() run_finance_mesh_swarm() run_mathematical_finance_swarms() diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 1619374f..b5ecb9a4 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -281,6 +281,7 @@ nav: - MALT: "swarms/structs/malt.md" - Multi-Agent Execution Utilities: "swarms/structs/various_execution_methods.md" - Council of Judges: "swarms/structs/council_of_judges.md" + - LLM Council: "swarms/structs/llm_council.md" - Heavy Swarm: "swarms/structs/heavy_swarm.md" - Social Algorithms: "swarms/structs/social_algorithms.md" @@ -355,9 +356,19 @@ nav: - Paper Implementations: "examples/paper_implementations.md" - Templates & Applications: "examples/templates.md" - Community Resources: "examples/community_resources.md" - - CLI Examples: "swarms/cli/cli_examples.md" + + - CLI Guides: + - Overview: "examples/cli_guides_overview.md" + - CLI Quickstart: "swarms/cli/cli_quickstart.md" + - Creating Agents from CLI: "swarms/cli/cli_agent_guide.md" + - YAML Configuration: "swarms/cli/cli_yaml_guide.md" + - LLM Council CLI: "swarms/cli/cli_llm_council_guide.md" + - Heavy Swarm CLI: "swarms/cli/cli_heavy_swarm_guide.md" + - CLI Multi-Agent Commands: "examples/cli_multi_agent_quickstart.md" + - CLI Examples: "swarms/cli/cli_examples.md" - Basic Examples: + - Overview: "examples/basic_examples_overview.md" - Individual Agents: - Basic Agent: "swarms/examples/basic_agent.md" - Tool Usage: @@ -373,6 +384,7 @@ nav: - Agent Output Types: "swarms/examples/agent_output_types.md" - Gradio Chat Interface: "swarms/ui/main.md" - Agent with Gemini Nano Banana: "swarms/examples/jarvis_agent.md" + - Agent Marketplace Publishing: "examples/marketplace_publishing_quickstart.md" - LLM Providers: - Language Models: - Overview: "swarms/examples/model_providers.md" @@ -390,7 +402,9 @@ nav: + - Advanced Examples: + - Overview: "examples/multi_agent_architectures_overview.md" - Multi-Agent Architectures: - HierarchicalSwarm Examples: "swarms/examples/hierarchical_swarm_example.md" - Hybrid Hierarchical-Cluster Swarm Example: "swarms/examples/hhcs_examples.md" @@ -399,25 +413,33 @@ nav: - SwarmRouter Example: "swarms/examples/swarm_router.md" - MultiAgentRouter Minimal Example: "swarms/examples/multi_agent_router_minimal.md" - ConcurrentWorkflow Example: "swarms/examples/concurrent_workflow.md" + - Multi-Agentic Patterns with GraphWorkflow: "swarms/examples/graphworkflow_rustworkx_patterns.md" - Mixture of Agents Example: "swarms/examples/moa_example.md" + - LLM Council Examples: "examples/llm_council_examples.md" - Unique Swarms: "swarms/examples/unique_swarms.md" - Agents as Tools: "swarms/examples/agents_as_tools.md" - Aggregate Multi-Agent Responses: "swarms/examples/aggregate.md" - Interactive GroupChat Example: "swarms/examples/igc_example.md" + - LLM Council Quickstart: "examples/llm_council_quickstart.md" + - DebateWithJudge Quickstart: "examples/debate_quickstart.md" + - GraphWorkflow with Rustworkx: "examples/graphworkflow_quickstart.md" - BatchedGridWorkflow Examples: - Simple BatchedGridWorkflow: "swarms/examples/batched_grid_simple_example.md" - Advanced BatchedGridWorkflow: "swarms/examples/batched_grid_advanced_example.md" - - Applications: - - Swarms of Browser Agents: "swarms/examples/swarms_of_browser_agents.md" - - Hiearchical Marketing Team: "examples/marketing_team.md" - - Gold ETF Research with HeavySwarm: "examples/gold_etf_research.md" - - Hiring Swarm: "examples/hiring_swarm.md" - - Advanced Research: "examples/av.md" - - Real Estate Swarm: "examples/realestate_swarm.md" - - Job Finding Swarm: "examples/job_finding.md" - - Mergers & Aquisition (M&A) Advisory Swarm: "examples/ma_swarm.md" + + - Applications: + - Overview: "examples/applications_overview.md" + - Swarms of Browser Agents: "swarms/examples/swarms_of_browser_agents.md" + - Hiearchical Marketing Team: "examples/marketing_team.md" + - Gold ETF Research with HeavySwarm: "examples/gold_etf_research.md" + - Hiring Swarm: "examples/hiring_swarm.md" + - Advanced Research: "examples/av.md" + - Real Estate Swarm: "examples/realestate_swarm.md" + - Job Finding Swarm: "examples/job_finding.md" + - Mergers & Aquisition (M&A) Advisory Swarm: "examples/ma_swarm.md" - Tools & Integrations: + - Overview: "examples/tools_integrations_overview.md" - Web Search with Exa: "examples/exa_search.md" - Browser Use: "examples/browser_use.md" - Yahoo Finance: "swarms/examples/yahoo_finance.md" @@ -427,13 +449,16 @@ nav: - Multi-MCP Agent Integration: "swarms/examples/multi_mcp_agent.md" - RAG: + - Overview: "examples/rag_examples_overview.md" - RAG with Qdrant: "swarms/RAG/qdrant_rag.md" - Apps: + - Overview: "examples/apps_examples_overview.md" - Web Scraper Agents: "developer_guides/web_scraper.md" - Smart Database: "examples/smart_database.md" - AOP: + - Overview: "examples/aop_examples_overview.md" - Medical AOP Example: "examples/aop_medical.md" - X402: diff --git a/docs/requirements.txt b/docs/requirements.txt index 4e9c01f7..84bfdcff 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -27,7 +27,7 @@ jinja2~=3.1 markdown~=3.10 mkdocs-material-extensions~=1.3 pygments~=2.19 -pymdown-extensions~=10.16 +pymdown-extensions~=10.18 # Requirements for plugins colorama~=0.4 diff --git a/docs/swarms/cli/cli_agent_guide.md b/docs/swarms/cli/cli_agent_guide.md new file mode 100644 index 00000000..878659d6 --- /dev/null +++ b/docs/swarms/cli/cli_agent_guide.md @@ -0,0 +1,242 @@ +# CLI Agent Guide: Create Agents from Command Line + +Create, configure, and run AI agents directly from your terminal without writing Python code. + +## Basic Agent Creation + +### Step 1: Define Your Agent + +Create an agent with required parameters: + +```bash +swarms agent \ + --name "Research-Agent" \ + --description "An AI agent that researches topics and provides summaries" \ + --system-prompt "You are an expert researcher. Provide comprehensive, well-structured summaries with key insights." \ + --task "Research the current state of quantum computing and its applications" +``` + +### Step 2: Customize Model Settings + +Add model configuration options: + +```bash +swarms agent \ + --name "Code-Reviewer" \ + --description "Expert code review assistant" \ + --system-prompt "You are a senior software engineer. Review code for best practices, bugs, and improvements." \ + --task "Review this Python function for efficiency: def fib(n): return fib(n-1) + fib(n-2) if n > 1 else n" \ + --model-name "gpt-4o-mini" \ + --temperature 0.1 \ + --max-loops 3 +``` + +### Step 3: Enable Advanced Features + +Add streaming, dashboard, and autosave: + +```bash +swarms agent \ + --name "Analysis-Agent" \ + --description "Data analysis specialist" \ + --system-prompt "You are a data analyst. Provide detailed statistical analysis and insights." \ + --task "Analyze market trends for electric vehicles in 2024" \ + --model-name "gpt-4" \ + --streaming-on \ + --verbose \ + --autosave \ + --saved-state-path "./agent_states/analysis_agent.json" +``` + +--- + +## Complete Parameter Reference + +### Required Parameters + +| Parameter | Description | Example | +|-----------|-------------|---------| +| `--name` | Agent name | `"Research-Agent"` | +| `--description` | Agent description | `"AI research assistant"` | +| `--system-prompt` | Agent's system instructions | `"You are an expert..."` | +| `--task` | Task for the agent | `"Analyze this data"` | + +### Model Parameters + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `--model-name` | `"gpt-4"` | LLM model to use | +| `--temperature` | `None` | Creativity (0.0-2.0) | +| `--max-loops` | `None` | Maximum execution loops | +| `--context-length` | `None` | Context window size | + +### Behavior Parameters + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `--auto-generate-prompt` | `False` | Auto-generate prompts | +| `--dynamic-temperature-enabled` | `False` | Dynamic temperature adjustment | +| `--dynamic-context-window` | `False` | Dynamic context window | +| `--streaming-on` | `False` | Enable streaming output | +| `--verbose` | `False` | Verbose mode | + +### State Management + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `--autosave` | `False` | Enable autosave | +| `--saved-state-path` | `None` | Path to save state | +| `--dashboard` | `False` | Enable dashboard | +| `--return-step-meta` | `False` | Return step metadata | + +### Integration + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `--mcp-url` | `None` | MCP server URL | +| `--user-name` | `None` | Username for agent | +| `--output-type` | `None` | Output format (str, json) | +| `--retry-attempts` | `None` | Retry attempts on failure | + +--- + +## Use Case Examples + +### Financial Analyst Agent + +```bash +swarms agent \ + --name "Financial-Analyst" \ + --description "Expert financial analysis and market insights" \ + --system-prompt "You are a CFA-certified financial analyst. Provide detailed market analysis with data-driven insights. Include risk assessments and recommendations." \ + --task "Analyze Apple (AAPL) stock performance and provide investment outlook for Q4 2024" \ + --model-name "gpt-4" \ + --temperature 0.2 \ + --max-loops 5 \ + --verbose +``` + +### Code Generation Agent + +```bash +swarms agent \ + --name "Code-Generator" \ + --description "Expert Python developer and code generator" \ + --system-prompt "You are an expert Python developer. Write clean, efficient, well-documented code following PEP 8 guidelines. Include type hints and docstrings." \ + --task "Create a Python class for managing a task queue with priority scheduling" \ + --model-name "gpt-4" \ + --temperature 0.1 \ + --streaming-on +``` + +### Creative Writing Agent + +```bash +swarms agent \ + --name "Creative-Writer" \ + --description "Professional content writer and storyteller" \ + --system-prompt "You are a professional writer with expertise in engaging content. Write compelling, creative content with strong narrative flow." \ + --task "Write a short story about a scientist who discovers time travel" \ + --model-name "gpt-4" \ + --temperature 0.8 \ + --max-loops 2 +``` + +### Research Summarizer Agent + +```bash +swarms agent \ + --name "Research-Summarizer" \ + --description "Academic research summarization specialist" \ + --system-prompt "You are an academic researcher. Summarize research topics with key findings, methodologies, and implications. Cite sources when available." \ + --task "Summarize recent advances in CRISPR gene editing technology" \ + --model-name "gpt-4o-mini" \ + --temperature 0.3 \ + --verbose \ + --autosave +``` + +--- + +## Scripting Examples + +### Bash Script with Multiple Agents + +```bash +#!/bin/bash +# run_agents.sh + +# Research phase +swarms agent \ + --name "Researcher" \ + --description "Research specialist" \ + --system-prompt "You are a researcher. Gather comprehensive information on topics." \ + --task "Research the impact of AI on healthcare" \ + --model-name "gpt-4o-mini" \ + --output-type "json" > research_output.json + +# Analysis phase +swarms agent \ + --name "Analyst" \ + --description "Data analyst" \ + --system-prompt "You are an analyst. Analyze data and provide insights." \ + --task "Analyze the research findings from: $(cat research_output.json)" \ + --model-name "gpt-4o-mini" \ + --output-type "json" > analysis_output.json + +echo "Pipeline complete!" +``` + +### Loop Through Tasks + +```bash +#!/bin/bash +# batch_analysis.sh + +TOPICS=("renewable energy" "electric vehicles" "smart cities" "AI ethics") + +for topic in "${TOPICS[@]}"; do + echo "Analyzing: $topic" + swarms agent \ + --name "Topic-Analyst" \ + --description "Topic analysis specialist" \ + --system-prompt "You are an expert analyst. Provide concise analysis of topics." \ + --task "Analyze current trends in: $topic" \ + --model-name "gpt-4o-mini" \ + >> "analysis_results.txt" + echo "---" >> "analysis_results.txt" +done +``` + +--- + +## Tips and Best Practices + +!!! tip "System Prompt Tips" + - Be specific about the agent's role and expertise + - Include output format preferences + - Specify any constraints or guidelines + +!!! tip "Temperature Settings" + - Use **0.1-0.3** for factual/analytical tasks + - Use **0.5-0.7** for balanced responses + - Use **0.8-1.0** for creative tasks + +!!! tip "Performance Optimization" + - Use `gpt-4o-mini` for simpler tasks (faster, cheaper) + - Use `gpt-4` for complex reasoning tasks + - Set appropriate `--max-loops` to control execution time + +!!! warning "Common Issues" + - Ensure API key is set: `export OPENAI_API_KEY="..."` + - Wrap multi-word arguments in quotes + - Use `--verbose` to debug issues + +--- + +## Next Steps + +- [CLI YAML Configuration](./cli_yaml_guide.md) - Run agents from YAML files +- [CLI Multi-Agent Guide](../examples/cli_multi_agent_quickstart.md) - LLM Council and Heavy Swarm +- [CLI Reference](./cli_reference.md) - Complete command documentation + diff --git a/docs/swarms/cli/cli_heavy_swarm_guide.md b/docs/swarms/cli/cli_heavy_swarm_guide.md new file mode 100644 index 00000000..9987b858 --- /dev/null +++ b/docs/swarms/cli/cli_heavy_swarm_guide.md @@ -0,0 +1,262 @@ +# CLI Heavy Swarm Guide: Comprehensive Task Analysis + +Run Heavy Swarm from command line for complex task decomposition and comprehensive analysis with specialized agents. + +## Overview + +Heavy Swarm follows a structured workflow: + +1. **Task Decomposition**: Breaks down tasks into specialized questions +2. **Parallel Execution**: Executes specialized agents in parallel +3. **Result Synthesis**: Integrates and synthesizes results +4. **Comprehensive Reporting**: Generates detailed final reports + +--- + +## Basic Usage + +### Step 1: Run a Simple Analysis + +```bash +swarms heavy-swarm --task "Analyze the current state of quantum computing" +``` + +### Step 2: Customize with Options + +```bash +swarms heavy-swarm \ + --task "Research renewable energy market trends" \ + --loops-per-agent 2 \ + --verbose +``` + +### Step 3: Use Custom Models + +```bash +swarms heavy-swarm \ + --task "Analyze cryptocurrency regulation globally" \ + --question-agent-model-name gpt-4 \ + --worker-model-name gpt-4 \ + --loops-per-agent 3 \ + --verbose +``` + +--- + +## Command Options + +| Option | Default | Description | +|--------|---------|-------------| +| `--task` | **Required** | The task to analyze | +| `--loops-per-agent` | 1 | Execution loops per agent | +| `--question-agent-model-name` | gpt-4o-mini | Model for question generation | +| `--worker-model-name` | gpt-4o-mini | Model for worker agents | +| `--random-loops-per-agent` | False | Randomize loops (1-10) | +| `--verbose` | False | Enable detailed output | + +--- + +## Specialized Agents + +Heavy Swarm includes specialized agents for different aspects: + +| Agent | Role | Focus | +|-------|------|-------| +| **Question Agent** | Decomposes tasks | Generates targeted questions | +| **Research Agent** | Gathers information | Fast, trustworthy research | +| **Analysis Agent** | Processes data | Statistical analysis, insights | +| **Writing Agent** | Creates reports | Clear, structured documentation | + +--- + +## Use Case Examples + +### Market Research + +```bash +swarms heavy-swarm \ + --task "Comprehensive market analysis of the electric vehicle industry in North America" \ + --loops-per-agent 3 \ + --question-agent-model-name gpt-4 \ + --worker-model-name gpt-4 \ + --verbose +``` + +### Technology Assessment + +```bash +swarms heavy-swarm \ + --task "Evaluate the technical feasibility and ROI of implementing AI-powered customer service automation" \ + --loops-per-agent 2 \ + --verbose +``` + +### Competitive Analysis + +```bash +swarms heavy-swarm \ + --task "Analyze competitive landscape for cloud computing services: AWS vs Azure vs Google Cloud" \ + --loops-per-agent 2 \ + --question-agent-model-name gpt-4 \ + --verbose +``` + +### Investment Research + +```bash +swarms heavy-swarm \ + --task "Research investment opportunities in AI infrastructure companies for 2024-2025" \ + --loops-per-agent 3 \ + --worker-model-name gpt-4 \ + --verbose +``` + +### Policy Analysis + +```bash +swarms heavy-swarm \ + --task "Analyze the impact of proposed AI regulations on tech startups in the United States" \ + --loops-per-agent 2 \ + --verbose +``` + +### Due Diligence + +```bash +swarms heavy-swarm \ + --task "Conduct technology due diligence for acquiring a fintech startup focusing on payment processing" \ + --loops-per-agent 3 \ + --question-agent-model-name gpt-4 \ + --worker-model-name gpt-4 \ + --verbose +``` + +--- + +## Workflow Visualization + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ User Task │ +│ "Analyze the impact of AI on healthcare" │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ Question Agent │ +│ Decomposes task into specialized questions: │ +│ - What are current AI applications in healthcare? │ +│ - What are the regulatory challenges? │ +│ - What is the market size and growth? │ +│ - What are the key players and competitors? │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────┬─────────────┬─────────────┬─────────────┐ +│ Research │ Analysis │ Research │ Writing │ +│ Agent 1 │ Agent │ Agent 2 │ Agent │ +└─────────────┴─────────────┴─────────────┴─────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ Synthesis & Integration │ +│ Combines all agent outputs │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ Comprehensive Report │ +│ - Executive Summary │ +│ - Detailed Findings │ +│ - Analysis & Insights │ +│ - Recommendations │ +└─────────────────────────────────────────────────────────────────┘ +``` + +--- + +## Configuration Recommendations + +### Quick Analysis (Cost-Effective) + +```bash +swarms heavy-swarm \ + --task "Quick overview of [topic]" \ + --loops-per-agent 1 \ + --question-agent-model-name gpt-4o-mini \ + --worker-model-name gpt-4o-mini +``` + +### Standard Research + +```bash +swarms heavy-swarm \ + --task "Detailed analysis of [topic]" \ + --loops-per-agent 2 \ + --verbose +``` + +### Deep Dive (Comprehensive) + +```bash +swarms heavy-swarm \ + --task "Comprehensive research on [topic]" \ + --loops-per-agent 3 \ + --question-agent-model-name gpt-4 \ + --worker-model-name gpt-4 \ + --verbose +``` + +### Exploratory (Variable Depth) + +```bash +swarms heavy-swarm \ + --task "Explore [topic] with varying depth" \ + --random-loops-per-agent \ + --verbose +``` + +--- + +## Best Practices + +!!! tip "Task Formulation" + - Be specific about what you want analyzed + - Include scope and constraints + - Specify desired output format + +!!! tip "Loop Configuration" + - Use `--loops-per-agent 1` for quick overviews + - Use `--loops-per-agent 2-3` for detailed analysis + - Higher loops = more comprehensive but slower + +!!! tip "Model Selection" + - Use `gpt-4o-mini` for cost-effective analysis + - Use `gpt-4` for complex, nuanced topics + - Match model to task complexity + +!!! warning "Performance Notes" + - Deep analysis (3+ loops) may take several minutes + - Higher loops increase API costs + - Use `--verbose` to monitor progress + +--- + +## Comparison: LLM Council vs Heavy Swarm + +| Feature | LLM Council | Heavy Swarm | +|---------|-------------|-------------| +| **Focus** | Collaborative decision-making | Comprehensive task analysis | +| **Workflow** | Parallel responses + peer review | Task decomposition + parallel research | +| **Best For** | Questions with multiple viewpoints | Complex research and analysis tasks | +| **Output** | Synthesized consensus | Detailed research report | +| **Speed** | Faster | More thorough but slower | + +--- + +## Next Steps + +- [CLI LLM Council Guide](./cli_llm_council_guide.md) - Collaborative decisions +- [CLI Reference](./cli_reference.md) - Complete command documentation +- [Heavy Swarm Python API](../structs/heavy_swarm.md) - Programmatic usage + diff --git a/docs/swarms/cli/cli_llm_council_guide.md b/docs/swarms/cli/cli_llm_council_guide.md new file mode 100644 index 00000000..baa78fa1 --- /dev/null +++ b/docs/swarms/cli/cli_llm_council_guide.md @@ -0,0 +1,162 @@ +# CLI LLM Council Guide: Collaborative Multi-Agent Decisions + +Run the LLM Council directly from command line for collaborative decision-making with multiple AI agents through peer review and synthesis. + +## Overview + +The LLM Council creates a collaborative environment where: + +1. **Multiple Perspectives**: Each council member (GPT-5.1, Gemini, Claude, Grok) independently responds +2. **Peer Review**: Members evaluate and rank each other's anonymized responses +3. **Synthesis**: A Chairman synthesizes the best elements into a final answer + +--- + +## Basic Usage + +### Step 1: Run a Simple Query + +```bash +swarms llm-council --task "What are the best practices for code review?" +``` + +### Step 2: Enable Verbose Output + +```bash +swarms llm-council --task "How should we approach microservices architecture?" --verbose +``` + +### Step 3: Process the Results + +The council returns: +- Individual member responses +- Peer review rankings +- Synthesized final answer + +--- + +## Use Case Examples + +### Strategic Business Decisions + +```bash +swarms llm-council --task "Should our SaaS startup prioritize product-led growth or sales-led growth? Consider market size, CAC, and scalability." +``` + +### Technology Evaluation + +```bash +swarms llm-council --task "Compare Kubernetes vs Docker Swarm for a startup with 10 microservices. Consider cost, complexity, and scalability." +``` + +### Investment Analysis + +```bash +swarms llm-council --task "Evaluate investment opportunities in AI infrastructure companies. Consider market size, competition, and growth potential." +``` + +### Policy Analysis + +```bash +swarms llm-council --task "What are the implications of implementing AI regulation similar to the EU AI Act in the United States?" +``` + +### Research Questions + +```bash +swarms llm-council --task "What are the most promising approaches to achieving AGI? Evaluate different research paradigms." +``` + +--- + +## Council Members + +The default council includes: + +| Member | Model | Strengths | +|--------|-------|-----------| +| **GPT-5.1 Councilor** | gpt-5.1 | Analytical, comprehensive | +| **Gemini 3 Pro Councilor** | gemini-3-pro | Concise, well-processed | +| **Claude Sonnet 4.5 Councilor** | claude-sonnet-4.5 | Thoughtful, balanced | +| **Grok-4 Councilor** | grok-4 | Creative, innovative | +| **Chairman** | gpt-5.1 | Synthesizes final answer | + +--- + +## Workflow Visualization + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ User Query │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────┬─────────────┬─────────────┬─────────────┐ +│ GPT-5.1 │ Gemini 3 │ Claude 4.5 │ Grok-4 │ +│ Councilor │ Councilor │ Councilor │ Councilor │ +└─────────────┴─────────────┴─────────────┴─────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ Anonymized Peer Review │ +│ Each member ranks all responses (anonymized) │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ Chairman │ +│ Synthesizes best elements from all responses │ +└─────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ Final Synthesized Answer │ +└─────────────────────────────────────────────────────────────────┘ +``` + +--- + +## Best Practices + +!!! tip "Query Formulation" + - Be specific and detailed in your queries + - Include context and constraints + - Ask for specific types of analysis + +!!! tip "When to Use LLM Council" + - Complex decisions requiring multiple perspectives + - Research questions needing comprehensive analysis + - Strategic planning and evaluation + - Questions with trade-offs to consider + +!!! tip "Performance Tips" + - Use `--verbose` for detailed progress tracking + - Expect responses to take 30-60 seconds + - Complex queries may take longer + +!!! warning "Limitations" + - Requires multiple API calls (higher cost) + - Not suitable for simple factual queries + - Response time is longer than single-agent queries + +--- + +## Command Reference + +```bash +swarms llm-council --task "" [--verbose] +``` + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `--task` | string | **Required** | Query for the council | +| `--verbose` | flag | False | Enable detailed output | + +--- + +## Next Steps + +- [CLI Heavy Swarm Guide](./cli_heavy_swarm_guide.md) - Complex task analysis +- [CLI Reference](./cli_reference.md) - Complete command documentation +- [LLM Council Python API](../examples/llm_council_quickstart.md) - Programmatic usage + diff --git a/docs/swarms/cli/cli_quickstart.md b/docs/swarms/cli/cli_quickstart.md new file mode 100644 index 00000000..8676804a --- /dev/null +++ b/docs/swarms/cli/cli_quickstart.md @@ -0,0 +1,115 @@ +# CLI Quickstart: Getting Started in 3 Steps + +Get up and running with the Swarms CLI in minutes. This guide covers installation, setup verification, and running your first commands. + +## Step 1: Install Swarms + +Install the Swarms package which includes the CLI: + +```bash +pip install swarms +``` + +Verify installation: + +```bash +swarms --help +``` + +You should see the Swarms CLI banner with available commands. + +--- + +## Step 2: Configure Environment + +Set up your API keys and workspace: + +```bash +# Set your OpenAI API key (or other provider) +export OPENAI_API_KEY="your-openai-api-key" + +# Optional: Set workspace directory +export WORKSPACE_DIR="./agent_workspace" +``` + +Or create a `.env` file in your project directory: + +``` +OPENAI_API_KEY=your-openai-api-key +WORKSPACE_DIR=./agent_workspace +``` + +Verify your setup: + +```bash +swarms setup-check --verbose +``` + +Expected output: + +``` +🔍 Running Swarms Environment Setup Check + +┌─────────────────────────────────────────────────────────────────────────────┐ +│ Environment Check Results │ +├─────────┬─────────────────────────┬─────────────────────────────────────────┤ +│ Status │ Check │ Details │ +├─────────┼─────────────────────────┼─────────────────────────────────────────┤ +│ ✓ │ Python Version │ Python 3.11.5 │ +│ ✓ │ Swarms Version │ Current version: 8.7.0 │ +│ ✓ │ API Keys │ API keys found: OPENAI_API_KEY │ +│ ✓ │ Dependencies │ All required dependencies available │ +└─────────┴─────────────────────────┴─────────────────────────────────────────┘ +``` + +--- + +## Step 3: Run Your First Command + +Try these commands to verify everything works: + +### View All Features + +```bash +swarms features +``` + +### Create a Simple Agent + +```bash +swarms agent \ + --name "Assistant" \ + --description "A helpful AI assistant" \ + --system-prompt "You are a helpful assistant that provides clear, concise answers." \ + --task "What are the benefits of renewable energy?" \ + --model-name "gpt-4o-mini" +``` + +### Run LLM Council + +```bash +swarms llm-council --task "What are the best practices for code review?" +``` + +--- + +## Quick Reference + +| Command | Description | +|---------|-------------| +| `swarms --help` | Show all available commands | +| `swarms features` | Display all CLI features | +| `swarms setup-check` | Verify environment setup | +| `swarms onboarding` | Interactive setup wizard | +| `swarms agent` | Create and run a custom agent | +| `swarms llm-council` | Run collaborative LLM council | +| `swarms heavy-swarm` | Run comprehensive analysis swarm | + +--- + +## Next Steps + +- [CLI Agent Guide](./cli_agent_guide.md) - Create custom agents from CLI +- [CLI Multi-Agent Guide](../examples/cli_multi_agent_quickstart.md) - Run LLM Council and Heavy Swarm +- [CLI Reference](./cli_reference.md) - Complete command documentation + diff --git a/docs/swarms/cli/cli_reference.md b/docs/swarms/cli/cli_reference.md index 7c1ad2e5..b96e8adc 100644 --- a/docs/swarms/cli/cli_reference.md +++ b/docs/swarms/cli/cli_reference.md @@ -5,20 +5,28 @@ The Swarms CLI is a comprehensive command-line interface for managing and execut ## Table of Contents - [Installation](#installation) - - [Basic Usage](#basic-usage) - - [Commands Reference](#commands-reference) - - [Global Arguments](#global-arguments) - - [Command-Specific Arguments](#command-specific-arguments) - + - [run-agents Command](#run-agents-command) + - [load-markdown Command](#load-markdown-command) + - [agent Command](#agent-command) + - [autoswarm Command](#autoswarm-command) + - [setup-check Command](#setup-check-command) + - [llm-council Command](#llm-council-command) + - [heavy-swarm Command](#heavy-swarm-command) + - [features Command](#features-command) - [Error Handling](#error-handling) - - [Examples](#examples) - - [Configuration](#configuration) +- [Advanced Features](#advanced-features) +- [Troubleshooting](#troubleshooting) +- [Integration](#integration) +- [Performance Considerations](#performance-considerations) +- [Security](#security) +- [Command Quick Reference](#command-quick-reference) +- [Support](#support) ## Installation @@ -43,6 +51,7 @@ swarms [options] |---------|-------------|-------------------| | `onboarding` | Start interactive onboarding process | None | | `help` | Display help message | None | +| `features` | Display all available features and actions in a comprehensive table | None | | `get-api-key` | Open API key portal in browser | None | | `check-login` | Verify login status and initialize cache | None | | `run-agents` | Execute agents from YAML configuration | `--yaml-file` | @@ -52,6 +61,8 @@ swarms [options] | `book-call` | Schedule strategy session | None | | `autoswarm` | Generate and execute autonomous swarm | `--task`, `--model` | | `setup-check` | Run comprehensive environment setup check | None | +| `llm-council` | Run LLM Council with multiple agents collaborating on a task | `--task` | +| `heavy-swarm` | Run HeavySwarm with specialized agents for complex task analysis | `--task` | ## Global Arguments @@ -221,6 +232,148 @@ swarms setup-check --verbose └─────────────────────────────────────────────────────────────────────────────┘ ``` +### `llm-council` Command + +Run the LLM Council with multiple specialized agents that collaborate, evaluate, and synthesize responses. + +The LLM Council follows a structured workflow: +1. **Independent Responses**: Each council member (GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, Grok-4) independently responds to the query +2. **Peer Review**: All members review and rank each other's anonymized responses +3. **Synthesis**: A Chairman agent synthesizes all responses and rankings into a final comprehensive answer + +```bash +swarms llm-council [options] +``` + +#### Required Arguments + +| Argument | Type | Description | +|----------|------|-------------| +| `--task` | `str` | The query or question for the LLM Council to process | + +#### Optional Arguments + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--verbose` | `bool` | `True` | Enable verbose output showing progress and intermediate results | + +**Example:** +```bash +# Basic usage +swarms llm-council --task "What are the best energy ETFs right now?" + +# With verbose output +swarms llm-council --task "What is the best approach to solve this problem?" --verbose +``` + +**How It Works:** + +The LLM Council creates a collaborative environment where: +- **Default Council Members**: GPT-5.1 (analytical), Gemini 3 Pro (concise), Claude Sonnet 4.5 (balanced), Grok-4 (creative) +- **Anonymized Evaluation**: Responses are anonymized before evaluation to ensure honest ranking +- **Cross-Model Evaluation**: Each model evaluates all responses, often selecting other models' responses as superior +- **Final Synthesis**: The Chairman (GPT-5.1 by default) synthesizes the best elements from all responses + +**Use Cases:** +- Complex problem-solving requiring multiple perspectives +- Research questions needing comprehensive analysis +- Decision-making scenarios requiring thorough evaluation +- Content generation with quality assurance + +### `heavy-swarm` Command + +Run HeavySwarm with specialized agents for complex task analysis and decomposition. + +HeavySwarm follows a structured workflow: +1. **Task Decomposition**: Breaks down tasks into specialized questions +2. **Parallel Execution**: Executes specialized agents in parallel +3. **Result Synthesis**: Integrates and synthesizes results +4. **Comprehensive Reporting**: Generates detailed final reports +5. **Iterative Refinement**: Optional multi-loop execution for iterative improvement + +```bash +swarms heavy-swarm [options] +``` + +#### Required Arguments + +| Argument | Type | Description | +|----------|------|-------------| +| `--task` | `str` | The task for HeavySwarm to analyze and process | + +#### Optional Arguments + +| Argument | Type | Default | Description | +|----------|------|---------|-------------| +| `--loops-per-agent` | `int` | `1` | Number of execution loops each agent should perform | +| `--question-agent-model-name` | `str` | `"gpt-4o-mini"` | Model name for the question generation agent | +| `--worker-model-name` | `str` | `"gpt-4o-mini"` | Model name for specialized worker agents | +| `--random-loops-per-agent` | `bool` | `False` | Enable random number of loops per agent (1-10 range) | +| `--verbose` | `bool` | `False` | Enable verbose output showing detailed progress | + +**Example:** +```bash +# Basic usage +swarms heavy-swarm --task "Analyze the current market trends for renewable energy" + +# With custom configuration +swarms heavy-swarm \ + --task "Research the best investment strategies for 2024" \ + --loops-per-agent 3 \ + --question-agent-model-name "gpt-4" \ + --worker-model-name "gpt-4" \ + --random-loops-per-agent \ + --verbose +``` + +**Specialized Agent Roles:** + +HeavySwarm includes specialized agents for different aspects of analysis: +- **Research Agent**: Fast, trustworthy, and reproducible research +- **Analysis Agent**: Statistical analysis and validated insights +- **Writing Agent**: Clear, structured documentation +- **Question Agent**: Task decomposition and question generation + +**Use Cases:** +- Complex research tasks requiring multiple perspectives +- Market analysis and financial research +- Technical analysis and evaluation +- Comprehensive report generation +- Multi-faceted problem solving + +### `features` Command + +Display all available CLI features and actions in a comprehensive, formatted table. + +This command provides a quick reference to all available features, their categories, descriptions, command syntax, and key parameters. + +```bash +swarms features +``` + +**No arguments required.** + +**Example:** +```bash +swarms features +``` + +**Output Includes:** +- **Main Features Table**: Complete list of all features with: + - Feature name + - Category (Setup, Auth, Execution, Creation, etc.) + - Description + - Command syntax + - Key parameters +- **Category Summary**: Overview of features grouped by category with counts +- **Usage Tips**: Quick tips for using the CLI effectively + +**Use Cases:** +- Quick reference when exploring CLI capabilities +- Discovering available features +- Understanding command syntax and parameters +- Learning about feature categories + ## Error Handling The CLI provides comprehensive error handling with formatted error messages: @@ -289,6 +442,34 @@ swarms autoswarm \ --model "gpt-4" ``` +### LLM Council Collaboration + +```bash +# Run LLM Council for collaborative problem solving +swarms llm-council \ + --task "What are the best strategies for reducing carbon emissions in manufacturing?" \ + --verbose +``` + +### HeavySwarm Complex Analysis + +```bash +# Run HeavySwarm for comprehensive task analysis +swarms heavy-swarm \ + --task "Analyze the impact of AI on the job market in 2024" \ + --loops-per-agent 2 \ + --question-agent-model-name "gpt-4" \ + --worker-model-name "gpt-4" \ + --verbose +``` + +### Viewing All Features + +```bash +# Display all available features +swarms features +``` + ## Configuration ### YAML Configuration Format @@ -386,6 +567,54 @@ Guided setup process including: - Usage examples +### Multi-Agent Collaboration + +The CLI supports advanced multi-agent architectures: + +#### LLM Council + +Collaborative problem-solving with multiple specialized models: + +```bash +swarms llm-council --task "Your question here" +``` + +**Features:** +- Multiple model perspectives (GPT-5.1, Gemini, Claude, Grok) +- Anonymous peer review and ranking +- Synthesized final responses +- Cross-model evaluation + +#### HeavySwarm + +Complex task analysis with specialized agent roles: + +```bash +swarms heavy-swarm --task "Your complex task here" +``` + +**Features:** +- Task decomposition into specialized questions +- Parallel agent execution +- Result synthesis and integration +- Iterative refinement with multiple loops +- Specialized agent roles (Research, Analysis, Writing, Question) + +### Feature Discovery + +Quickly discover all available features: + +```bash +swarms features +``` + +Displays comprehensive tables showing: +- All available commands +- Feature categories +- Command syntax +- Key parameters +- Usage examples + ## Troubleshooting @@ -451,6 +680,8 @@ swarms run-agents --yaml-file agents2.yaml | Model Selection | Choose appropriate models for task complexity | | Context Length | Monitor and optimize input sizes | | Rate Limiting | Respect API provider limits | +| Multi-Agent Execution | LLM Council and HeavySwarm execute agents in parallel for efficiency | +| Loop Configuration | Adjust `--loops-per-agent` based on task complexity and time constraints | ## Security @@ -461,6 +692,48 @@ swarms run-agents --yaml-file agents2.yaml | Input Validation | CLI validates all inputs before execution | | Error Sanitization | Sensitive information is not exposed in errors | +## Command Quick Reference + +### Quick Start Commands + +```bash +# Environment setup +swarms setup-check --verbose +swarms onboarding + +# View all features +swarms features + +# Get help +swarms help +``` + +### Agent Commands + +```bash +# Create custom agent +swarms agent --name "Agent" --task "Task" --system-prompt "Prompt" + +# Run agents from YAML +swarms run-agents --yaml-file agents.yaml + +# Load from markdown +swarms load-markdown --markdown-path ./agents/ +``` + +### Multi-Agent Commands + +```bash +# LLM Council +swarms llm-council --task "Your question" + +# HeavySwarm +swarms heavy-swarm --task "Your complex task" --loops-per-agent 2 --verbose + +# Auto-generate swarm +swarms autoswarm --task "Task description" --model "gpt-4" +``` + ## Support For additional support: @@ -470,3 +743,4 @@ For additional support: | **Community** | [Discord](https://discord.gg/EamjgSaEQf) | | **Issues** | [GitHub Issues](https://github.com/kyegomez/swarms/issues) | | **Strategy Sessions**| [Book a Call](https://cal.com/swarms/swarms-strategy-session) | +| **Documentation** | [Full Documentation](https://docs.swarms.world) | diff --git a/docs/swarms/cli/cli_yaml_guide.md b/docs/swarms/cli/cli_yaml_guide.md new file mode 100644 index 00000000..7e76dbee --- /dev/null +++ b/docs/swarms/cli/cli_yaml_guide.md @@ -0,0 +1,320 @@ +# CLI YAML Configuration Guide: Run Agents from Config Files + +Run multiple agents from YAML configuration files for reproducible, version-controlled agent deployments. + +## Basic YAML Configuration + +### Step 1: Create YAML Config File + +Create a file named `agents.yaml`: + +```yaml +agents: + - name: "Research-Agent" + description: "AI research specialist" + model_name: "gpt-4o-mini" + system_prompt: | + You are an expert researcher. + Provide comprehensive, well-structured research summaries. + Include key insights and data points. + temperature: 0.3 + max_loops: 2 + task: "Research current trends in renewable energy" + + - name: "Analysis-Agent" + description: "Data analysis specialist" + model_name: "gpt-4o-mini" + system_prompt: | + You are a data analyst. + Provide detailed statistical analysis and insights. + Use data-driven reasoning. + temperature: 0.2 + max_loops: 3 + task: "Analyze market opportunities in the EV sector" +``` + +### Step 2: Run Agents from YAML + +```bash +swarms run-agents --yaml-file agents.yaml +``` + +### Step 3: View Results + +Results are displayed in the terminal with formatted output for each agent. + +--- + +## Complete YAML Schema + +### Agent Configuration Options + +```yaml +agents: + - name: "Agent-Name" # Required: Agent identifier + description: "Agent description" # Required: What the agent does + model_name: "gpt-4o-mini" # Model to use + system_prompt: "Your instructions" # Agent's system prompt + temperature: 0.5 # Creativity (0.0-2.0) + max_loops: 3 # Maximum execution loops + task: "Task to execute" # Task for this agent + + # Optional settings + context_length: 8192 # Context window size + streaming_on: true # Enable streaming + verbose: true # Verbose output + autosave: true # Auto-save state + saved_state_path: "./states/agent.json" # State file path + output_type: "json" # Output format + retry_attempts: 3 # Retries on failure +``` + +--- + +## Use Case Examples + +### Multi-Agent Research Pipeline + +```yaml +# research_pipeline.yaml +agents: + - name: "Data-Collector" + description: "Collects and organizes research data" + model_name: "gpt-4o-mini" + system_prompt: | + You are a research data collector. + Gather comprehensive information on the given topic. + Organize findings into structured categories. + temperature: 0.3 + max_loops: 2 + task: "Collect data on AI applications in healthcare" + + - name: "Trend-Analyst" + description: "Analyzes trends and patterns" + model_name: "gpt-4o-mini" + system_prompt: | + You are a trend analyst. + Identify emerging patterns and trends from data. + Provide statistical insights and projections. + temperature: 0.2 + max_loops: 2 + task: "Analyze AI healthcare adoption trends from 2020-2024" + + - name: "Report-Writer" + description: "Creates comprehensive reports" + model_name: "gpt-4" + system_prompt: | + You are a professional report writer. + Create comprehensive, well-structured reports. + Include executive summaries and key recommendations. + temperature: 0.4 + max_loops: 1 + task: "Write an executive summary on AI in healthcare" +``` + +Run: + +```bash +swarms run-agents --yaml-file research_pipeline.yaml +``` + +### Financial Analysis Team + +```yaml +# financial_team.yaml +agents: + - name: "Market-Analyst" + description: "Analyzes market conditions" + model_name: "gpt-4" + system_prompt: | + You are a CFA-certified market analyst. + Provide detailed market analysis with technical indicators. + Include risk assessments and market outlook. + temperature: 0.2 + max_loops: 3 + task: "Analyze current S&P 500 market conditions" + + - name: "Risk-Assessor" + description: "Evaluates investment risks" + model_name: "gpt-4" + system_prompt: | + You are a risk management specialist. + Evaluate investment risks and provide mitigation strategies. + Use quantitative risk metrics. + temperature: 0.1 + max_loops: 2 + task: "Assess risks in current tech sector investments" + + - name: "Portfolio-Advisor" + description: "Provides portfolio recommendations" + model_name: "gpt-4" + system_prompt: | + You are a portfolio advisor. + Provide asset allocation recommendations. + Consider risk tolerance and market conditions. + temperature: 0.3 + max_loops: 2 + task: "Recommend portfolio adjustments for Q4 2024" +``` + +### Content Creation Pipeline + +```yaml +# content_pipeline.yaml +agents: + - name: "Topic-Researcher" + description: "Researches content topics" + model_name: "gpt-4o-mini" + system_prompt: | + You are a content researcher. + Research topics thoroughly and identify key angles. + Find unique perspectives and data points. + temperature: 0.4 + max_loops: 2 + task: "Research content angles for 'Future of Remote Work'" + + - name: "Content-Writer" + description: "Writes engaging content" + model_name: "gpt-4" + system_prompt: | + You are a professional content writer. + Write engaging, SEO-friendly content. + Use clear structure with headers and bullet points. + temperature: 0.7 + max_loops: 2 + task: "Write a blog post about remote work trends" + + - name: "Editor" + description: "Edits and polishes content" + model_name: "gpt-4o-mini" + system_prompt: | + You are a professional editor. + Review content for clarity, grammar, and style. + Suggest improvements and optimize for readability. + temperature: 0.2 + max_loops: 1 + task: "Edit and polish the blog post for publication" +``` + +--- + +## Advanced Configuration + +### Environment Variables in YAML + +You can reference environment variables: + +```yaml +agents: + - name: "API-Agent" + description: "Agent with API access" + model_name: "${MODEL_NAME:-gpt-4o-mini}" # Default if not set + system_prompt: "You are an API integration specialist." + task: "Test API integration" +``` + +### Multiple Config Files + +Organize agents by purpose: + +```bash +# Run different configurations +swarms run-agents --yaml-file research_agents.yaml +swarms run-agents --yaml-file analysis_agents.yaml +swarms run-agents --yaml-file reporting_agents.yaml +``` + +### Pipeline Script + +```bash +#!/bin/bash +# run_pipeline.sh + +echo "Starting research pipeline..." +swarms run-agents --yaml-file configs/research.yaml + +echo "Starting analysis pipeline..." +swarms run-agents --yaml-file configs/analysis.yaml + +echo "Starting reporting pipeline..." +swarms run-agents --yaml-file configs/reporting.yaml + +echo "Pipeline complete!" +``` + +--- + +## Markdown Configuration + +### Alternative: Load from Markdown + +Create agents using markdown with YAML frontmatter: + +```markdown +--- +name: Research Agent +description: AI research specialist +model_name: gpt-4o-mini +temperature: 0.3 +max_loops: 2 +--- + +You are an expert researcher specializing in technology trends. +Provide comprehensive research summaries with: +- Key findings and insights +- Data points and statistics +- Recommendations and implications + +Always cite sources when available and maintain objectivity. +``` + +Load from markdown: + +```bash +# Load single file +swarms load-markdown --markdown-path ./agents/research_agent.md + +# Load directory (concurrent processing) +swarms load-markdown --markdown-path ./agents/ --concurrent +``` + +--- + +## Best Practices + +!!! tip "Configuration Management" + - Version control your YAML files + - Use descriptive agent names + - Document purpose in descriptions + +!!! tip "Template Organization" + ``` + configs/ + ├── research/ + │ ├── tech_research.yaml + │ └── market_research.yaml + ├── analysis/ + │ ├── financial_analysis.yaml + │ └── data_analysis.yaml + └── production/ + └── prod_agents.yaml + ``` + +!!! tip "Testing Configurations" + - Test with `--verbose` flag first + - Use lower `max_loops` for testing + - Start with `gpt-4o-mini` for cost efficiency + +!!! warning "Common Pitfalls" + - Ensure proper YAML indentation (2 spaces) + - Quote strings with special characters + - Use `|` for multi-line prompts + +--- + +## Next Steps + +- [CLI Agent Guide](./cli_agent_guide.md) - Create agents from command line +- [CLI Multi-Agent Guide](../examples/cli_multi_agent_quickstart.md) - LLM Council and Heavy Swarm +- [CLI Reference](./cli_reference.md) - Complete command documentation + diff --git a/docs/swarms/concept/swarm_architectures.md b/docs/swarms/concept/swarm_architectures.md index 63bc3da8..226a6b45 100644 --- a/docs/swarms/concept/swarm_architectures.md +++ b/docs/swarms/concept/swarm_architectures.md @@ -32,21 +32,20 @@ Multi-agent architectures leverage these communication patterns to ensure that a | Graph Workflow | Agents collaborate in a directed acyclic graph (DAG) format to manage dependencies and parallel tasks. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/graph_workflow/) | AI-driven software development pipelines, complex project management | | Group Chat | Agents engage in a chat-like interaction to reach decisions collaboratively. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/group_chat/) | Real-time collaborative decision-making, contract negotiations | | Interactive Group Chat | Enhanced group chat with dynamic speaker selection and interaction patterns. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/interactive_groupchat/) | Advanced collaborative decision-making, dynamic team coordination | -| Agent Registry | A centralized registry where agents are stored, retrieved, and invoked dynamically. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/agent_registry/) | Dynamic agent management, evolving recommendation engines | | SpreadSheet | Manages tasks at scale, tracking agent outputs in a structured format like CSV files. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/) | Large-scale marketing analytics, financial audits | | Router | Routes and chooses the architecture based on the task requirements and available agents. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/) | Dynamic task routing, adaptive architecture selection, optimized agent allocation | | Heavy | High-performance architecture for handling intensive computational tasks with multiple agents. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/heavy_swarm/) | Large-scale data processing, intensive computational workflows | -| Deep Research | Specialized architecture for conducting in-depth research tasks across multiple domains. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/deep_research_swarm/) | Academic research, market analysis, comprehensive data investigation | -| De-Hallucination | Architecture designed to reduce and eliminate hallucinations in AI outputs through consensus. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/de_hallucination_swarm/) | Fact-checking, content verification, reliable information generation | | Council as Judge | Multiple agents act as a council to evaluate and judge outputs or decisions. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/council_of_judges/) | Quality assessment, decision validation, peer review processes | | MALT | Specialized architecture for complex language processing tasks across multiple agents. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/malt/) | Natural language processing, translation, content generation | | Majority Voting | Agents vote on decisions with the majority determining the final outcome. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/majorityvoting/) | Democratic decision-making, consensus building, error reduction | | Round Robin | Tasks are distributed cyclically among agents in a rotating order. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/round_robin_swarm/) | Load balancing, fair task distribution, resource optimization | | Auto-Builder | Automatically constructs and configures multi-agent systems based on requirements. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/auto_swarm_builder/) | Dynamic system creation, adaptive architectures, rapid prototyping | | Hybrid Hierarchical Cluster | Combines hierarchical and peer-to-peer communication patterns for complex workflows. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/hhcs/) | Complex enterprise workflows, multi-department coordination | -| Election | Agents participate in democratic voting processes to select leaders or make collective decisions. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/election_swarm/) | Democratic governance, consensus building, leadership selection | -| Dynamic Conversational | Adaptive conversation management with dynamic agent selection and interaction patterns. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/dynamic_conversational_swarm/) | Adaptive chatbots, dynamic customer service, contextual conversations | -| Tree | Hierarchical tree structure for organizing agents in parent-child relationships. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/tree_swarm/) | Organizational hierarchies, decision trees, taxonomic classification | +| Batched Grid Workflow | Executes tasks in a batched grid format, where each agent processes a different task simultaneously in parallel. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/batched_grid_workflow/) | Parallel task processing, batch operations, grid-based task distribution | +| LLM Council | Orchestrates multiple specialized LLM agents to collaboratively answer queries through structured peer review and synthesis. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/llm_council/) | Multi-model evaluation, peer review systems, collaborative AI decision-making | +| Debate with Judge | A debate architecture with Pro and Con agents debating topics, evaluated by a Judge. Supports preset agents, agent lists, or individual configuration for flexible setup. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/debate_with_judge/) | Argument analysis, decision refinement, structured debates, iterative improvement | +| Self MoA Seq | Sequential self-mixture of agents that generates multiple candidate responses and synthesizes them sequentially using a sliding window approach. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/self_moa_seq/) | High-quality response generation, ensemble methods, sequential synthesis | +| Swarm Rearrange | Orchestrates multiple swarms in sequential or parallel flow patterns, providing thread-safe operations for managing swarm execution. | [Learn More](https://docs.swarms.world/en/latest/swarms/structs/swarm_rearrange/) | Multi-swarm coordination, complex workflow orchestration, swarm composition | --- @@ -84,6 +83,7 @@ graph TD A dynamic architecture where agents rearrange themselves based on task requirements and environmental conditions. Agents can adapt their roles, positions, and relationships to optimize performance for different scenarios. **Use Cases:** + - Adaptive manufacturing lines that reconfigure based on product requirements - Dynamic sales territory realignment based on market conditions @@ -123,6 +123,7 @@ graph TD Multiple agents operate independently and simultaneously on different tasks. Each agent works on its own task without dependencies on the others. **Use Cases:** + - Tasks that can be processed independently, such as parallel data analysis - Large-scale simulations where multiple scenarios are run simultaneously @@ -204,6 +205,7 @@ graph TD Makes it easy to manage thousands of agents in one place: a CSV file. Initialize any number of agents and run loops of agents on tasks. **Use Cases:** + - Multi-threaded execution: Execute agents on multiple threads - Save agent outputs into CSV file @@ -242,12 +244,52 @@ graph TD --- +### Batched Grid Workflow + +**Overview:** +Multi-agent orchestration pattern that executes tasks in a batched grid format, where each agent processes different tasks simultaneously. Provides structured parallel processing with conversation state management. + +**Use Cases:** + +- Parallel task processing + +- Grid-based agent execution + +- Batch operations + +- Multi-task multi-agent coordination + + +**[Learn More](https://docs.swarms.world/en/latest/swarms/structs/batched_grid_workflow/)** + +```mermaid +graph TD + A[Task Batch] --> B[BatchedGridWorkflow] + B --> C[Initialize Agents] + C --> D[Create Grid] + + D --> E[Agent 1: Task 1] + D --> F[Agent 2: Task 2] + D --> G[Agent N: Task N] + + E --> H[Collect Results] + F --> H + G --> H + + H --> I[Update Conversation] + I --> J[Next Iteration] + J --> D +``` + +--- + ### Mixture of Agents **Overview:** Combines multiple agents with different capabilities and expertise to solve complex problems that require diverse skill sets. **Use Cases:** + - Financial forecasting requiring different analytical approaches - Complex problem-solving needing diverse expertise @@ -282,6 +324,7 @@ graph TD Organizes agents in a directed acyclic graph (DAG) format, enabling complex dependencies and parallel execution paths. **Use Cases:** + - AI-driven software development pipelines - Complex project management with dependencies @@ -311,6 +354,7 @@ graph TD Enables agents to engage in chat-like interactions to reach decisions collaboratively through discussion and consensus building. **Use Cases:** + - Real-time collaborative decision-making - Contract negotiations @@ -345,6 +389,7 @@ graph TD Enhanced version of Group Chat with dynamic speaker selection, priority-based communication, and advanced interaction patterns. **Use Cases:** + - Advanced collaborative decision-making - Dynamic team coordination @@ -378,49 +423,13 @@ graph TD --- -### Agent Registry - -**Overview:** -A centralized registry system where agents are stored, retrieved, and invoked dynamically. The registry maintains metadata about agent capabilities, availability, and performance metrics, enabling intelligent agent selection and management. - -**Use Cases:** -- Dynamic agent management in large-scale systems - -- Evolving recommendation engines that adapt agent selection - -- Service discovery in distributed agent systems - - -**[Learn More](https://docs.swarms.world/en/latest/swarms/structs/agent_registry/)** - -```mermaid -graph TD - A[Agent Registration] --> B[Registry Database] - B --> C[Agent Metadata] - C --> D[Capabilities] - C --> E[Performance Metrics] - C --> F[Availability Status] - - G[Task Request] --> H[Registry Query Engine] - H --> I[Agent Discovery] - I --> J[Capability Matching] - J --> K[Agent Selection] - - K --> L[Agent Invocation] - L --> M[Task Execution] - M --> N[Performance Tracking] - N --> O[Registry Update] - O --> B -``` - ---- - ### Router Architecture **Overview:** Intelligently routes tasks to the most appropriate agents or architectures based on task requirements and agent capabilities. **Use Cases:** + - Dynamic task routing - Adaptive architecture selection @@ -458,6 +467,7 @@ graph TD High-performance architecture designed for handling intensive computational tasks with multiple agents working on resource-heavy operations. **Use Cases:** + - Large-scale data processing - Intensive computational workflows @@ -493,6 +503,7 @@ graph TD Specialized architecture for conducting comprehensive research tasks across multiple domains with iterative refinement and cross-validation. **Use Cases:** + - Academic research projects - Market analysis and intelligence @@ -528,6 +539,7 @@ graph TD Architecture specifically designed to reduce and eliminate hallucinations in AI outputs through consensus mechanisms and fact-checking protocols. **Use Cases:** + - Fact-checking and verification - Content validation @@ -558,12 +570,52 @@ graph TD --- +### Self MoA Seq + +**Overview:** +Ensemble method that generates multiple candidate responses from a single high-performing model and synthesizes them sequentially using a sliding window approach. Keeps context within bounds while leveraging diversity across samples. + +**Use Cases:** + +- Response synthesis + +- Ensemble methods + +- Sequential aggregation + +- Quality improvement through diversity + + +**[Learn More](https://docs.swarms.world/en/latest/swarms/structs/self_moa_seq/)** + +```mermaid +graph TD + A[Task] --> B[Proposer Agent] + B --> C[Generate Samples] + C --> D[Sample 1] + C --> E[Sample 2] + C --> F[Sample N] + + D --> G[Sliding Window] + E --> G + F --> G + + G --> H[Aggregator Agent] + H --> I[Biased Synthesis] + I --> J{More Iterations?} + J -->|Yes| G + J -->|No| K[Final Output] +``` + +--- + ### Council as Judge **Overview:** Multiple agents act as a council to evaluate, judge, and validate outputs or decisions through collaborative assessment. **Use Cases:** + - Quality assessment and validation - Decision validation processes @@ -594,12 +646,97 @@ graph TD --- +### LLM Council + +**Overview:** +Orchestrates multiple specialized LLM agents to collaboratively answer queries through structured peer review and synthesis. Different models evaluate and rank each other's work, often selecting responses from other models as superior. + +**Use Cases:** + +- Multi-model collaboration + +- Peer review processes + +- Model evaluation and synthesis + +- Cross-model consensus building + + +**[Learn More](https://docs.swarms.world/en/latest/swarms/structs/llm_council/)** + +```mermaid +graph TD + A[User Query] --> B[Council Members] + + B --> C[GPT Councilor] + B --> D[Gemini Councilor] + B --> E[Claude Councilor] + B --> F[Grok Councilor] + + C --> G[Responses] + D --> G + E --> G + F --> G + + G --> H[Anonymize & Evaluate] + H --> I[Chairman Synthesis] + I --> J[Final Response] +``` + +--- + +### Debate with Judge + +**Overview:** +Debate architecture with self-refinement through a judge agent, enabling Pro and Con agents to debate a topic with iterative refinement. The judge evaluates arguments and provides synthesis for progressive improvement. Supports preset agents for quick setup, agent lists, or individual agent configuration. + +**Use Cases:** + +- Structured debates + +- Argument evaluation + +- Iterative refinement of positions + +- Multi-perspective analysis + + +**Initialization Options:** + +- `preset_agents=True`: Use built-in optimized agents (simplest) +- `agents=[pro, con, judge]`: Provide a list of 3 agents +- Individual parameters: `pro_agent`, `con_agent`, `judge_agent` + +**[Learn More](https://docs.swarms.world/en/latest/swarms/structs/debate_with_judge/)** + +```mermaid +graph TD + A[Topic] --> B[DebateWithJudge] + B --> C[Pro Agent] + B --> D[Con Agent] + B --> E[Judge Agent] + + C --> F[Pro Argument] + D --> G[Con Argument] + + F --> H[Judge Evaluation] + G --> H + + H --> I[Judge Synthesis] + I --> J{More Loops?} + J -->|Yes| C + J -->|No| K[Final Output] +``` + +--- + ### MALT Architecture **Overview:** Specialized architecture for complex language processing tasks that require coordination between multiple language-focused agents. **Use Cases:** + - Natural language processing pipelines - Translation and localization @@ -637,6 +774,7 @@ graph TD Agents vote on decisions with the majority determining the final outcome, providing democratic decision-making and error reduction through consensus. **Use Cases:** + - Democratic decision-making processes - Consensus building @@ -675,6 +813,7 @@ graph TD Automatically constructs and configures multi-agent systems based on requirements, enabling dynamic system creation and adaptation. **Use Cases:** + - Dynamic system creation - Adaptive architectures @@ -706,12 +845,55 @@ graph TD --- +### Swarm Rearrange + +**Overview:** +Orchestrates multiple swarms in sequential or parallel flow patterns with thread-safe operations and flow validation. Provides comprehensive swarm management and coordination capabilities. + +**Use Cases:** + +- Multi-swarm orchestration + +- Flow pattern management + +- Swarm coordination + +- Sequential and parallel swarm execution + + +**[Learn More](https://docs.swarms.world/en/latest/swarms/structs/swarm_rearrange/)** + +```mermaid +graph TD + A[Swarm Pool] --> B[SwarmRearrange] + B --> C[Flow Pattern] + + C --> D[Sequential Flow] + C --> E[Parallel Flow] + + D --> F[Swarm 1] + F --> G[Swarm 2] + G --> H[Swarm N] + + E --> I[Swarm 1] + E --> J[Swarm 2] + E --> K[Swarm N] + + H --> L[Result Aggregation] + I --> L + J --> L + K --> L +``` + +--- + ### Hybrid Hierarchical Cluster **Overview:** Combines hierarchical and peer-to-peer communication patterns for complex workflows that require both centralized coordination and distributed collaboration. **Use Cases:** + - Complex enterprise workflows - Multi-department coordination @@ -753,6 +935,7 @@ graph TD Agents participate in democratic voting processes to select leaders or make collective decisions. **Use Cases:** + - Democratic governance - Consensus building @@ -794,6 +977,7 @@ graph TD Adaptive conversation management with dynamic agent selection and interaction patterns. **Use Cases:** + - Adaptive chatbots - Dynamic customer service @@ -833,6 +1017,7 @@ graph TD Hierarchical tree structure for organizing agents in parent-child relationships. **Use Cases:** + - Organizational hierarchies - Decision trees diff --git a/docs/swarms/examples/graphworkflow_rustworkx_patterns.md b/docs/swarms/examples/graphworkflow_rustworkx_patterns.md new file mode 100644 index 00000000..5d392c49 --- /dev/null +++ b/docs/swarms/examples/graphworkflow_rustworkx_patterns.md @@ -0,0 +1,1479 @@ +# GraphWorkflow with Rustworkx: Complete Patterns Guide + +A comprehensive guide to implementing various agentic patterns using GraphWorkflow with the rustworkx backend for optimal performance. + +## Table of Contents + +1. [Introduction](#introduction) +2. [Basic Patterns](#basic-patterns) +3. [Hierarchical Patterns](#hierarchical-patterns) +4. [Concurrent/Parallel Patterns](#concurrentparallel-patterns) +5. [Majority Voting Patterns](#majority-voting-patterns) +6. [Fan-Out/Fan-In Patterns](#fan-outfan-in-patterns) +7. [Sequential Patterns](#sequential-patterns) +8. [Advanced Patterns](#advanced-patterns) +9. [Performance Optimization](#performance-optimization) + +## Introduction + +GraphWorkflow with rustworkx backend provides a high-performance framework for orchestrating complex multi-agent workflows. This guide demonstrates how to implement various agentic patterns that are commonly used in production systems. + +### Why Rustworkx? + +- **Performance**: 2-10x faster for large graphs (1000+ nodes) +- **Memory Efficiency**: Optimized for large-scale workflows +- **Scalability**: Better performance with complex graph operations +- **API Compatibility**: Drop-in replacement for NetworkX backend + +### Installation + +```bash +pip install rustworkx +``` + +## Basic Patterns + +### Simple Sequential Workflow + +The most basic pattern - agents execute one after another in sequence. + +**Architecture Diagram:** + +```mermaid +graph LR + A[ResearchAgent] --> B[AnalysisAgent] + B --> C[SynthesisAgent] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Create agents +research_agent = Agent( + agent_name="ResearchAgent", + model_name="gpt-4o-mini", + max_loops=1, +) + +analysis_agent = Agent( + agent_name="AnalysisAgent", + model_name="gpt-4o-mini", + max_loops=1, +) + +synthesis_agent = Agent( + agent_name="SynthesisAgent", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Build sequential workflow +workflow = GraphWorkflow( + name="Sequential-Workflow", + backend="rustworkx", + verbose=True, +) + +workflow.add_node(research_agent) +workflow.add_node(analysis_agent) +workflow.add_node(synthesis_agent) + +# Create sequential chain +workflow.add_edge(research_agent, analysis_agent) +workflow.add_edge(analysis_agent, synthesis_agent) + +# Execute +results = workflow.run("Analyze the impact of AI on healthcare") +``` + +**Use Case**: When each agent needs the previous agent's output before proceeding. + +## Hierarchical Patterns + +### Multi-Level Hierarchy + +Hierarchical patterns organize agents into levels, where higher-level agents coordinate lower-level agents. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Executive] --> B[Research-Head] + A --> C[Analysis-Head] + B --> D[Researcher-1] + B --> E[Researcher-2] + C --> F[Analyst-1] + C --> G[Analyst-2] + D --> H[Synthesis-Agent] + E --> H + F --> H + G --> H +``` + +```python +from swarms import Agent, GraphWorkflow + +# Level 1: Executive/Coordinator +executive = Agent( + agent_name="Executive", + agent_description="Coordinates overall strategy", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Level 2: Department Heads +research_head = Agent( + agent_name="Research-Head", + agent_description="Leads research department", + model_name="gpt-4o-mini", + max_loops=1, +) + +analysis_head = Agent( + agent_name="Analysis-Head", + agent_description="Leads analysis department", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Level 3: Specialists +researcher_1 = Agent( + agent_name="Researcher-1", + agent_description="Market research specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +researcher_2 = Agent( + agent_name="Researcher-2", + agent_description="Technical research specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_1 = Agent( + agent_name="Analyst-1", + agent_description="Data analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_2 = Agent( + agent_name="Analyst-2", + agent_description="Financial analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Level 4: Synthesis +synthesis_agent = Agent( + agent_name="Synthesis-Agent", + agent_description="Synthesizes all outputs", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Build hierarchical workflow +workflow = GraphWorkflow( + name="Hierarchical-Workflow", + backend="rustworkx", + verbose=True, +) + +# Add all agents +all_agents = [ + executive, + research_head, + analysis_head, + researcher_1, + researcher_2, + analyst_1, + analyst_2, + synthesis_agent, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Level 1 -> Level 2 +workflow.add_edge(executive, research_head) +workflow.add_edge(executive, analysis_head) + +# Level 2 -> Level 3 +workflow.add_edges_from_source( + research_head, + [researcher_1, researcher_2], +) + +workflow.add_edges_from_source( + analysis_head, + [analyst_1, analyst_2], +) + +# Level 3 -> Level 4 (convergence) +workflow.add_edges_to_target( + [researcher_1, researcher_2, analyst_1, analyst_2], + synthesis_agent, +) + +# Execute +results = workflow.run("Conduct a comprehensive market analysis") +``` + +**Use Case**: Organizational structures, multi-level decision making, hierarchical data processing. + +### Tree Structure Hierarchy + +A tree-like hierarchy where one root agent branches into multiple specialized branches. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Root-Coordinator] --> B[Tech-Branch-Head] + A --> C[Business-Branch-Head] + B --> D[Tech-Specialist-1] + B --> E[Tech-Specialist-2] + C --> F[Business-Specialist-1] + C --> G[Business-Specialist-2] + D --> H[Final-Synthesis] + E --> H + F --> H + G --> H +``` + +```python +from swarms import Agent, GraphWorkflow + +# Root agent +root_coordinator = Agent( + agent_name="Root-Coordinator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 1: Technical Analysis +tech_branch_head = Agent( + agent_name="Tech-Branch-Head", + model_name="gpt-4o-mini", + max_loops=1, +) + +tech_specialist_1 = Agent( + agent_name="Tech-Specialist-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +tech_specialist_2 = Agent( + agent_name="Tech-Specialist-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 2: Business Analysis +business_branch_head = Agent( + agent_name="Business-Branch-Head", + model_name="gpt-4o-mini", + max_loops=1, +) + +business_specialist_1 = Agent( + agent_name="Business-Specialist-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +business_specialist_2 = Agent( + agent_name="Business-Specialist-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Convergence point +final_synthesis = Agent( + agent_name="Final-Synthesis", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Tree-Hierarchy-Workflow", + backend="rustworkx", +) + +all_agents = [ + root_coordinator, + tech_branch_head, + tech_specialist_1, + tech_specialist_2, + business_branch_head, + business_specialist_1, + business_specialist_2, + final_synthesis, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Root -> Branch heads +workflow.add_edge(root_coordinator, tech_branch_head) +workflow.add_edge(root_coordinator, business_branch_head) + +# Branch heads -> Specialists +workflow.add_edges_from_source( + tech_branch_head, + [tech_specialist_1, tech_specialist_2], +) + +workflow.add_edges_from_source( + business_branch_head, + [business_specialist_1, business_specialist_2], +) + +# All specialists -> Final synthesis +workflow.add_edges_to_target( + [ + tech_specialist_1, + tech_specialist_2, + business_specialist_1, + business_specialist_2, + ], + final_synthesis, +) + +results = workflow.run("Analyze a technology startup from multiple perspectives") +``` + +## Concurrent/Parallel Patterns + +### Full Parallel Execution + +All agents execute simultaneously without dependencies. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Parallel-Agent-1] --> D[Collector] + B[Parallel-Agent-2] --> D + C[Parallel-Agent-3] --> D +``` + +```python +from swarms import Agent, GraphWorkflow + +# Create independent parallel agents +parallel_agent_1 = Agent( + agent_name="Parallel-Agent-1", + agent_description="Independent analysis 1", + model_name="gpt-4o-mini", + max_loops=1, +) + +parallel_agent_2 = Agent( + agent_name="Parallel-Agent-2", + agent_description="Independent analysis 2", + model_name="gpt-4o-mini", + max_loops=1, +) + +parallel_agent_3 = Agent( + agent_name="Parallel-Agent-3", + agent_description="Independent analysis 3", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Convergence agent +collector = Agent( + agent_name="Collector", + agent_description="Collects all parallel results", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Full-Parallel-Workflow", + backend="rustworkx", +) + +for agent in [parallel_agent_1, parallel_agent_2, parallel_agent_3, collector]: + workflow.add_node(agent) + +# All parallel agents feed into collector +workflow.add_edges_to_target( + [parallel_agent_1, parallel_agent_2, parallel_agent_3], + collector, +) + +results = workflow.run("Analyze three different aspects of renewable energy") +``` + +**Use Case**: Independent analyses, parallel data collection, multi-perspective evaluation. + +### Layer-Based Parallel Execution + +Agents execute in layers, with all agents in a layer running in parallel. + +**Architecture Diagram:** + +```mermaid +graph TB + subgraph Layer1["Layer 1: Data Collection"] + A1[Data-Collector-1] + A2[Data-Collector-2] + A3[Data-Collector-3] + end + subgraph Layer2["Layer 2: Analysis"] + B1[Analyst-1] + B2[Analyst-2] + B3[Analyst-3] + end + subgraph Layer3["Layer 3: Synthesis"] + C[Synthesis] + end + A1 --> B1 + A1 --> B2 + A1 --> B3 + A2 --> B1 + A2 --> B2 + A2 --> B3 + A3 --> B1 + A3 --> B2 + A3 --> B3 + B1 --> C + B2 --> C + B3 --> C +``` + +```python +from swarms import Agent, GraphWorkflow + +# Layer 1: Data Collection (parallel) +data_collector_1 = Agent( + agent_name="Data-Collector-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +data_collector_2 = Agent( + agent_name="Data-Collector-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +data_collector_3 = Agent( + agent_name="Data-Collector-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Layer 2: Analysis (parallel, depends on Layer 1) +analyst_1 = Agent( + agent_name="Analyst-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_2 = Agent( + agent_name="Analyst-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_3 = Agent( + agent_name="Analyst-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Layer 3: Synthesis (depends on Layer 2) +synthesis = Agent( + agent_name="Synthesis", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Layer-Based-Parallel-Workflow", + backend="rustworkx", +) + +all_agents = [ + data_collector_1, + data_collector_2, + data_collector_3, + analyst_1, + analyst_2, + analyst_3, + synthesis, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Layer 1 -> Layer 2: Full mesh connection +workflow.add_parallel_chain( + [data_collector_1, data_collector_2, data_collector_3], + [analyst_1, analyst_2, analyst_3], +) + +# Layer 2 -> Layer 3: Convergence +workflow.add_edges_to_target( + [analyst_1, analyst_2, analyst_3], + synthesis, +) + +results = workflow.run("Process and analyze data in parallel layers") +``` + +**Use Case**: Pipeline processing, multi-stage analysis, batch processing workflows. + +## Majority Voting Patterns + +### Simple Majority Vote + +Multiple agents vote on a decision, with a majority vote aggregator. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Voter-1] --> F[Vote-Aggregator] + B[Voter-2] --> F + C[Voter-3] --> F + D[Voter-4] --> F + E[Voter-5] --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Voting agents +voter_1 = Agent( + agent_name="Voter-1", + agent_description="Provides vote/opinion 1", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_2 = Agent( + agent_name="Voter-2", + agent_description="Provides vote/opinion 2", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_3 = Agent( + agent_name="Voter-3", + agent_description="Provides vote/opinion 3", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_4 = Agent( + agent_name="Voter-4", + agent_description="Provides vote/opinion 4", + model_name="gpt-4o-mini", + max_loops=1, +) + +voter_5 = Agent( + agent_name="Voter-5", + agent_description="Provides vote/opinion 5", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Vote aggregator (implements majority voting logic) +vote_aggregator = Agent( + agent_name="Vote-Aggregator", + agent_description="Aggregates votes and determines majority decision", + system_prompt="""You are a vote aggregator. Analyze all the votes/opinions provided + and determine the majority consensus. Provide a clear summary of the majority decision.""", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Majority-Voting-Workflow", + backend="rustworkx", +) + +all_agents = [voter_1, voter_2, voter_3, voter_4, voter_5, vote_aggregator] + +for agent in all_agents: + workflow.add_node(agent) + +# All voters -> Aggregator +workflow.add_edges_to_target( + [voter_1, voter_2, voter_3, voter_4, voter_5], + vote_aggregator, +) + +results = workflow.run( + "Should we invest in renewable energy stocks? Provide your vote and reasoning." +) +``` + +**Use Case**: Decision making, consensus building, quality assurance, validation. + +### Weighted Majority Vote + +Similar to simple majority vote but with weighted voters. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Expert-Voter-1
Weight: 2x] --> F[Weighted-Aggregator] + B[Expert-Voter-2
Weight: 2x] --> F + C[Regular-Voter-1
Weight: 1x] --> F + D[Regular-Voter-2
Weight: 1x] --> F + E[Regular-Voter-3
Weight: 1x] --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Expert voters (higher weight) +expert_voter_1 = Agent( + agent_name="Expert-Voter-1", + agent_description="Senior expert with high weight", + model_name="gpt-4o-mini", + max_loops=1, +) + +expert_voter_2 = Agent( + agent_name="Expert-Voter-2", + agent_description="Senior expert with high weight", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Regular voters (standard weight) +regular_voter_1 = Agent( + agent_name="Regular-Voter-1", + agent_description="Regular voter", + model_name="gpt-4o-mini", + max_loops=1, +) + +regular_voter_2 = Agent( + agent_name="Regular-Voter-2", + agent_description="Regular voter", + model_name="gpt-4o-mini", + max_loops=1, +) + +regular_voter_3 = Agent( + agent_name="Regular-Voter-3", + agent_description="Regular voter", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Weighted aggregator +weighted_aggregator = Agent( + agent_name="Weighted-Aggregator", + agent_description="Aggregates votes with expert weighting", + system_prompt="""You are a weighted vote aggregator. Expert voters (Expert-Voter-1, Expert-Voter-2) + have 2x weight compared to regular voters. Analyze all votes and determine the weighted majority decision.""", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Weighted-Majority-Voting-Workflow", + backend="rustworkx", +) + +all_agents = [ + expert_voter_1, + expert_voter_2, + regular_voter_1, + regular_voter_2, + regular_voter_3, + weighted_aggregator, +] + +for agent in all_agents: + workflow.add_node(agent) + +# All voters -> Weighted aggregator +workflow.add_edges_to_target( + [ + expert_voter_1, + expert_voter_2, + regular_voter_1, + regular_voter_2, + regular_voter_3, + ], + weighted_aggregator, +) + +results = workflow.run( + "Evaluate a business proposal. Experts should provide detailed analysis, regular voters provide standard evaluation." +) +``` + +## Fan-Out/Fan-In Patterns + +### Simple Fan-Out + +One source agent distributes work to multiple target agents. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Coordinator] --> B[Specialist-1] + A --> C[Specialist-2] + A --> D[Specialist-3] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Source agent +coordinator = Agent( + agent_name="Coordinator", + agent_description="Distributes tasks to specialists", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Target agents (specialists) +specialist_1 = Agent( + agent_name="Specialist-1", + agent_description="Technical specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +specialist_2 = Agent( + agent_name="Specialist-2", + agent_description="Business specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +specialist_3 = Agent( + agent_name="Specialist-3", + agent_description="Financial specialist", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Fan-Out-Workflow", + backend="rustworkx", +) + +for agent in [coordinator, specialist_1, specialist_2, specialist_3]: + workflow.add_node(agent) + +# Fan-out: One source to multiple targets +workflow.add_edges_from_source( + coordinator, + [specialist_1, specialist_2, specialist_3], +) + +results = workflow.run("Analyze a startup from technical, business, and financial perspectives") +``` + +**Use Case**: Task distribution, parallel specialization, workload splitting. + +### Simple Fan-In + +Multiple source agents converge to a single target agent. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Analyst-1] --> D[Synthesis] + B[Analyst-2] --> D + C[Analyst-3] --> D +``` + +```python +from swarms import Agent, GraphWorkflow + +# Source agents +analyst_1 = Agent( + agent_name="Analyst-1", + agent_description="Technical analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_2 = Agent( + agent_name="Analyst-2", + agent_description="Market analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +analyst_3 = Agent( + agent_name="Analyst-3", + agent_description="Financial analyst", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Target agent (synthesis) +synthesis = Agent( + agent_name="Synthesis", + agent_description="Synthesizes all analyses", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Fan-In-Workflow", + backend="rustworkx", +) + +for agent in [analyst_1, analyst_2, analyst_3, synthesis]: + workflow.add_node(agent) + +# Fan-in: Multiple sources to one target +workflow.add_edges_to_target( + [analyst_1, analyst_2, analyst_3], + synthesis, +) + +results = workflow.run("Provide comprehensive analysis from multiple perspectives") +``` + +**Use Case**: Result aggregation, synthesis, convergence of parallel work. + +### Fan-Out Followed by Fan-In + +A common pattern: distribute work, then aggregate results. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Coordinator] --> B[Worker-1] + A --> C[Worker-2] + A --> D[Worker-3] + A --> E[Worker-4] + B --> F[Aggregator] + C --> F + D --> F + E --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Initial coordinator +coordinator = Agent( + agent_name="Coordinator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Parallel workers +worker_1 = Agent( + agent_name="Worker-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +worker_2 = Agent( + agent_name="Worker-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +worker_3 = Agent( + agent_name="Worker-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +worker_4 = Agent( + agent_name="Worker-4", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Final aggregator +aggregator = Agent( + agent_name="Aggregator", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Fan-Out-Fan-In-Workflow", + backend="rustworkx", +) + +all_agents = [ + coordinator, + worker_1, + worker_2, + worker_3, + worker_4, + aggregator, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Fan-out: Coordinator -> Workers +workflow.add_edges_from_source( + coordinator, + [worker_1, worker_2, worker_3, worker_4], +) + +# Fan-in: Workers -> Aggregator +workflow.add_edges_to_target( + [worker_1, worker_2, worker_3, worker_4], + aggregator, +) + +results = workflow.run("Distribute research tasks and synthesize results") +``` + +**Use Case**: Map-reduce patterns, parallel processing with aggregation, distributed analysis. + +## Sequential Patterns + +### Linear Chain + +Simple sequential execution where each agent depends on the previous one. + +**Architecture Diagram:** + +```mermaid +graph LR + A[Agent-1] --> B[Agent-2] + B --> C[Agent-3] + C --> D[Agent-4] + D --> E[Agent-5] +``` + +```python +from swarms import Agent, GraphWorkflow + +agents = [ + Agent( + agent_name=f"Agent-{i+1}", + model_name="gpt-4o-mini", + max_loops=1, + ) + for i in range(5) +] + +workflow = GraphWorkflow( + name="Linear-Chain-Workflow", + backend="rustworkx", +) + +for agent in agents: + workflow.add_node(agent) + +# Create linear chain +for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + +results = workflow.run("Process data through a linear pipeline") +``` + +### Sequential with Branching + +Sequential flow with conditional branching. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Initial] --> B[Branch-1-Agent] + A --> C[Branch-2-Agent] + B --> D[Branch-1-Continuation] + C --> E[Branch-2-Continuation] + D --> F[Final] + E --> F +``` + +```python +from swarms import Agent, GraphWorkflow + +# Initial agent +initial = Agent( + agent_name="Initial", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 1 +branch_1_agent = Agent( + agent_name="Branch-1-Agent", + model_name="gpt-4o-mini", + max_loops=1, +) + +branch_1_continuation = Agent( + agent_name="Branch-1-Continuation", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Branch 2 +branch_2_agent = Agent( + agent_name="Branch-2-Agent", + model_name="gpt-4o-mini", + max_loops=1, +) + +branch_2_continuation = Agent( + agent_name="Branch-2-Continuation", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Convergence +final = Agent( + agent_name="Final", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Sequential-Branching-Workflow", + backend="rustworkx", +) + +all_agents = [ + initial, + branch_1_agent, + branch_1_continuation, + branch_2_agent, + branch_2_continuation, + final, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Initial -> Branches +workflow.add_edge(initial, branch_1_agent) +workflow.add_edge(initial, branch_2_agent) + +# Branch continuations +workflow.add_edge(branch_1_agent, branch_1_continuation) +workflow.add_edge(branch_2_agent, branch_2_continuation) + +# Convergence +workflow.add_edge(branch_1_continuation, final) +workflow.add_edge(branch_2_continuation, final) + +results = workflow.run("Process through branching paths") +``` + +## Advanced Patterns + +### Pipeline with Validation + +Sequential pipeline with validation checkpoints. + +**Architecture Diagram:** + +```mermaid +graph LR + A[Data-Collector] --> B[Validator-1] + B --> C[Processor] + C --> D[Validator-2] + D --> E[Finalizer] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Pipeline stages +data_collector = Agent( + agent_name="Data-Collector", + model_name="gpt-4o-mini", + max_loops=1, +) + +validator_1 = Agent( + agent_name="Validator-1", + agent_description="Validates data quality", + model_name="gpt-4o-mini", + max_loops=1, +) + +processor = Agent( + agent_name="Processor", + model_name="gpt-4o-mini", + max_loops=1, +) + +validator_2 = Agent( + agent_name="Validator-2", + agent_description="Validates processing results", + model_name="gpt-4o-mini", + max_loops=1, +) + +finalizer = Agent( + agent_name="Finalizer", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Pipeline-With-Validation", + backend="rustworkx", +) + +for agent in [data_collector, validator_1, processor, validator_2, finalizer]: + workflow.add_node(agent) + +# Sequential pipeline with validation checkpoints +workflow.add_edge(data_collector, validator_1) +workflow.add_edge(validator_1, processor) +workflow.add_edge(processor, validator_2) +workflow.add_edge(validator_2, finalizer) + +results = workflow.run("Process data with quality checkpoints") +``` + +### Multi-Stage Review Process + +Multiple review stages before final approval. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Submitter] --> B[Reviewer-1A] + A --> C[Reviewer-1B] + B --> D[Stage-1-Aggregator] + C --> D + D --> E[Reviewer-2A] + D --> F[Reviewer-2B] + E --> G[Stage-2-Aggregator] + F --> G + G --> H[Approver] +``` + +```python +from swarms import Agent, GraphWorkflow + +# Initial submission +submitter = Agent( + agent_name="Submitter", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Review stage 1 (parallel reviewers) +reviewer_1a = Agent( + agent_name="Reviewer-1A", + model_name="gpt-4o-mini", + max_loops=1, +) + +reviewer_1b = Agent( + agent_name="Reviewer-1B", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Stage 1 aggregator +stage_1_aggregator = Agent( + agent_name="Stage-1-Aggregator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Review stage 2 +reviewer_2a = Agent( + agent_name="Reviewer-2A", + model_name="gpt-4o-mini", + max_loops=1, +) + +reviewer_2b = Agent( + agent_name="Reviewer-2B", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Stage 2 aggregator +stage_2_aggregator = Agent( + agent_name="Stage-2-Aggregator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Final approver +approver = Agent( + agent_name="Approver", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Multi-Stage-Review", + backend="rustworkx", +) + +all_agents = [ + submitter, + reviewer_1a, + reviewer_1b, + stage_1_aggregator, + reviewer_2a, + reviewer_2b, + stage_2_aggregator, + approver, +] + +for agent in all_agents: + workflow.add_node(agent) + +# Stage 1: Parallel review +workflow.add_edge(submitter, reviewer_1a) +workflow.add_edge(submitter, reviewer_1b) +workflow.add_edges_to_target([reviewer_1a, reviewer_1b], stage_1_aggregator) + +# Stage 2: Parallel review +workflow.add_edge(stage_1_aggregator, reviewer_2a) +workflow.add_edge(stage_1_aggregator, reviewer_2b) +workflow.add_edges_to_target([reviewer_2a, reviewer_2b], stage_2_aggregator) + +# Final approval +workflow.add_edge(stage_2_aggregator, approver) + +results = workflow.run("Review and approve a proposal through multiple stages") +``` + +### Circular/Iterative Pattern + +Agents form a cycle for iterative refinement. + +**Architecture Diagram:** + +```mermaid +graph LR + A[Agent-1] --> B[Agent-2] + B --> C[Agent-3] + C --> D[Exit-Checker] + D -.->|Iterate| A +``` + +```python +from swarms import Agent, GraphWorkflow + +# Create iterative refinement agents +agent_1 = Agent( + agent_name="Agent-1", + agent_description="First refinement stage", + model_name="gpt-4o-mini", + max_loops=1, +) + +agent_2 = Agent( + agent_name="Agent-2", + agent_description="Second refinement stage", + model_name="gpt-4o-mini", + max_loops=1, +) + +agent_3 = Agent( + agent_name="Agent-3", + agent_description="Third refinement stage", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Exit condition checker +exit_checker = Agent( + agent_name="Exit-Checker", + agent_description="Checks if refinement is complete", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Iterative-Refinement", + backend="rustworkx", + max_loops=3, # Limit iterations +) + +for agent in [agent_1, agent_2, agent_3, exit_checker]: + workflow.add_node(agent) + +# Circular refinement +workflow.add_edge(agent_1, agent_2) +workflow.add_edge(agent_2, agent_3) +workflow.add_edge(agent_3, exit_checker) +# Note: For true iteration, you'd need to add edge back to agent_1 +# This is a simplified example + +results = workflow.run("Iteratively refine a document") +``` + +### Star Pattern + +Central hub agent coordinates with multiple spoke agents. + +**Architecture Diagram:** + +```mermaid +graph TB + A[Hub] --> B[Spoke-1] + A --> C[Spoke-2] + A --> D[Spoke-3] + A --> E[Spoke-4] + B --> A + C --> A + D --> A + E --> A +``` + +```python +from swarms import Agent, GraphWorkflow + +# Central hub +hub = Agent( + agent_name="Hub", + agent_description="Central coordinator", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Spoke agents +spoke_1 = Agent( + agent_name="Spoke-1", + model_name="gpt-4o-mini", + max_loops=1, +) + +spoke_2 = Agent( + agent_name="Spoke-2", + model_name="gpt-4o-mini", + max_loops=1, +) + +spoke_3 = Agent( + agent_name="Spoke-3", + model_name="gpt-4o-mini", + max_loops=1, +) + +spoke_4 = Agent( + agent_name="Spoke-4", + model_name="gpt-4o-mini", + max_loops=1, +) + +workflow = GraphWorkflow( + name="Star-Pattern-Workflow", + backend="rustworkx", +) + +for agent in [hub, spoke_1, spoke_2, spoke_3, spoke_4]: + workflow.add_node(agent) + +# Hub -> Spokes (fan-out) +workflow.add_edges_from_source( + hub, + [spoke_1, spoke_2, spoke_3, spoke_4], +) + +# Spokes -> Hub (fan-in) +workflow.add_edges_to_target( + [spoke_1, spoke_2, spoke_3, spoke_4], + hub, +) + +results = workflow.run("Coordinate work through a central hub") +``` + +## Performance Optimization + +### Compilation Best Practices + +Always compile workflows before execution for optimal performance: + +```python +workflow = GraphWorkflow( + name="Optimized-Workflow", + backend="rustworkx", + auto_compile=True, # Automatic compilation +) + +# Or manually compile +workflow.compile() + +# Check compilation status +status = workflow.get_compilation_status() +print(f"Compiled: {status['is_compiled']}") +print(f"Layers: {status['cached_layers_count']}") +``` + +### Large-Scale Workflow Tips + +For workflows with 100+ agents: + +1. **Use rustworkx backend** for better performance +2. **Compile before execution** to cache topological layers +3. **Use parallel patterns** to maximize throughput +4. **Monitor compilation status** to ensure optimization + +```python +# Large-scale workflow example +workflow = GraphWorkflow( + name="Large-Scale-Workflow", + backend="rustworkx", # Essential for large graphs + auto_compile=True, + verbose=True, # Monitor performance +) + +# Add many agents... +# Use parallel patterns for efficiency + +# Check performance +status = workflow.get_compilation_status() +print(f"Max workers: {status['max_workers']}") +print(f"Layers: {status['cached_layers_count']}") +``` + +### Visualization for Debugging + +Visualize workflows to understand structure and optimize: + +```python +# Generate visualization +output_file = workflow.visualize( + format="png", + show_summary=True, # Shows parallel patterns + view=True, +) + +# Or simple text visualization +workflow.visualize_simple() +``` + +## Conclusion + +GraphWorkflow with rustworkx backend provides a powerful framework for implementing complex multi-agent patterns. Key takeaways: + +1. **Choose the right pattern** for your use case +2. **Use rustworkx** for large-scale workflows (100+ nodes) +3. **Leverage parallel patterns** for performance +4. **Compile workflows** before execution +5. **Visualize** to understand and debug workflows + +For more examples, see the [rustworkx examples directory](https://github.com/kyegomez/swarms/tree/master/examples/multi_agent/graphworkflow_examples/rustworkx_examples). diff --git a/docs/swarms/examples/hierarchical_swarm_example.md b/docs/swarms/examples/hierarchical_swarm_example.md index 86a5b85f..1f1b10c9 100644 --- a/docs/swarms/examples/hierarchical_swarm_example.md +++ b/docs/swarms/examples/hierarchical_swarm_example.md @@ -215,6 +215,48 @@ result = research_swarm.run(task=task) print(result) ``` +## Visualizing Swarm Hierarchy + +You can visualize the hierarchical structure of your swarm before executing tasks using the `display_hierarchy()` method: + +```python +from swarms import Agent +from swarms.structs.hiearchical_swarm import HierarchicalSwarm + +# Create specialized agents +research_agent = Agent( + agent_name="Research-Analyst", + agent_description="Specialized in comprehensive research and data gathering", + model_name="gpt-4o-mini", +) + +analysis_agent = Agent( + agent_name="Data-Analyst", + agent_description="Expert in data analysis and pattern recognition", + model_name="gpt-4o-mini", +) + +strategy_agent = Agent( + agent_name="Strategy-Consultant", + agent_description="Specialized in strategic planning and recommendations", + model_name="gpt-4o-mini", +) + +# Create hierarchical swarm +swarm = HierarchicalSwarm( + name="Swarms Corporation Operations", + description="Enterprise-grade hierarchical swarm for complex task execution", + agents=[research_agent, analysis_agent, strategy_agent], + max_loops=1, + director_model_name="claude-haiku-4-5", +) + +# Display the hierarchy visualization +swarm.display_hierarchy() +``` + +This will output a visual tree structure showing the Director and all worker agents, making it easy to understand the swarm's organizational structure before executing tasks. + ## Key Takeaways 1. **Agent Specialization**: Create agents with specific, well-defined expertise areas @@ -222,5 +264,6 @@ print(result) 3. **Appropriate Loop Count**: Set `max_loops` based on task complexity (1-3 for most tasks) 4. **Verbose Logging**: Enable verbose mode during development for debugging 5. **Context Preservation**: The swarm maintains full conversation history automatically +6. **Hierarchy Visualization**: Use `display_hierarchy()` to visualize swarm structure before execution For more detailed information about the `HierarchicalSwarm` API and advanced usage patterns, see the [main documentation](hierarchical_swarm.md). \ No newline at end of file diff --git a/docs/swarms/examples/unique_swarms.md b/docs/swarms/examples/unique_swarms.md index 00f55e95..af3b30e1 100644 --- a/docs/swarms/examples/unique_swarms.md +++ b/docs/swarms/examples/unique_swarms.md @@ -61,32 +61,6 @@ flowchart LR - Maintains strict ordering of task processing -### Linear Swarm -```python -def linear_swarm(agents: AgentListType, tasks: List[str], return_full_history: bool = True) -``` - -**Information Flow:** -```mermaid -flowchart LR - Input[Task Input] --> A1 - subgraph Sequential Processing - A1((Agent 1)) --> A2((Agent 2)) - A2 --> A3((Agent 3)) - A3 --> A4((Agent 4)) - A4 --> A5((Agent 5)) - end - A5 --> Output[Final Result] -``` - -**Best Used When:** - -- Tasks need sequential, pipeline-style processing - -- Each agent performs a specific transformation step - -- Order of processing is critical - ### Star Swarm ```python def star_swarm(agents: AgentListType, tasks: List[str], return_full_history: bool = True) @@ -320,7 +294,6 @@ flowchart TD ## Common Use Cases 1. **Data Processing Pipelines** - - Linear Swarm - Circular Swarm 2. **Distributed Computing** @@ -351,7 +324,6 @@ from swarms.structs.swarming_architectures import ( exponential_swarm, fibonacci_swarm, grid_swarm, - linear_swarm, mesh_swarm, one_to_three, prime_swarm, @@ -459,29 +431,6 @@ def run_healthcare_grid_swarm(): print("\nGrid swarm processing completed") print(result) -def run_finance_linear_swarm(): - """Loan approval process using linear swarm""" - print_separator() - print("FINANCE - LOAN APPROVAL PROCESS (Linear Swarm)") - - agents = create_finance_agents()[:3] - tasks = [ - "Review loan application and credit history", - "Assess risk factors and compliance requirements", - "Generate final loan recommendation" - ] - - print("\nTasks:") - for i, task in enumerate(tasks, 1): - print(f"{i}. {task}") - - result = linear_swarm(agents, tasks) - print("\nResults:") - for log in result['history']: - print(f"\n{log['agent_name']}:") - print(f"Task: {log['task']}") - print(f"Response: {log['response']}") - def run_healthcare_star_swarm(): """Complex medical case management using star swarm""" print_separator() @@ -615,7 +564,6 @@ async def run_all_examples(): # Finance examples run_finance_circular_swarm() - run_finance_linear_swarm() run_finance_mesh_swarm() run_mathematical_finance_swarms() diff --git a/docs/swarms/structs/debate_with_judge.md b/docs/swarms/structs/debate_with_judge.md index 89341f77..716ad90c 100644 --- a/docs/swarms/structs/debate_with_judge.md +++ b/docs/swarms/structs/debate_with_judge.md @@ -29,6 +29,7 @@ graph TD | Judge Agent | An impartial evaluator that analyzes both arguments and provides synthesis | | Iterative Refinement | The process repeats for multiple rounds, each round building upon the judge's previous synthesis | | Progressive Improvement | Each round refines the answer by incorporating feedback and addressing weaknesses | +| Preset Agents | Built-in optimized agents that can be used without manual configuration | ## Class Definition: `DebateWithJudge` @@ -36,12 +37,15 @@ graph TD class DebateWithJudge: def __init__( self, - pro_agent: Agent, - con_agent: Agent, - judge_agent: Agent, - max_rounds: int = 3, + pro_agent: Optional[Agent] = None, + con_agent: Optional[Agent] = None, + judge_agent: Optional[Agent] = None, + agents: Optional[List[Agent]] = None, + preset_agents: bool = False, + max_loops: int = 3, output_type: str = "str-all-except-first", verbose: bool = True, + model_name: str = "gpt-4o-mini", ): ``` @@ -49,12 +53,73 @@ class DebateWithJudge: | Parameter | Type | Default | Description | |-----------|------|---------|-------------| -| `pro_agent` | `Agent` | Required | The agent arguing in favor (Pro position) | -| `con_agent` | `Agent` | Required | The agent arguing against (Con position) | -| `judge_agent` | `Agent` | Required | The judge agent that evaluates arguments and provides synthesis | -| `max_rounds` | `int` | `3` | Maximum number of debate rounds to execute | +| `pro_agent` | `Optional[Agent]` | `None` | The agent arguing in favor (Pro position). Not required if using `agents` list or `preset_agents`. | +| `con_agent` | `Optional[Agent]` | `None` | The agent arguing against (Con position). Not required if using `agents` list or `preset_agents`. | +| `judge_agent` | `Optional[Agent]` | `None` | The judge agent that evaluates arguments and provides synthesis. Not required if using `agents` list or `preset_agents`. | +| `agents` | `Optional[List[Agent]]` | `None` | A list of exactly 3 agents in order: `[pro_agent, con_agent, judge_agent]`. Takes precedence over individual agent parameters. | +| `preset_agents` | `bool` | `False` | If `True`, creates default Pro, Con, and Judge agents automatically with optimized system prompts. | +| `max_loops` | `int` | `3` | Maximum number of debate rounds to execute | | `output_type` | `str` | `"str-all-except-first"` | Format for the output conversation history | | `verbose` | `bool` | `True` | Whether to enable verbose logging | +| `model_name` | `str` | `"gpt-4o-mini"` | The model name to use for preset agents | + +### Initialization Options + +The `DebateWithJudge` class supports three ways to configure agents: + +#### Option 1: Preset Agents (Simplest) + +Use built-in agents with optimized system prompts for debates: + +```python +from swarms import DebateWithJudge + +# Create debate system with preset agents +debate = DebateWithJudge( + preset_agents=True, + max_loops=3, + model_name="gpt-4o-mini" # Optional: specify model +) + +result = debate.run("Should AI be regulated?") +``` + +#### Option 2: List of Agents + +Provide a list of exactly 3 agents (Pro, Con, Judge): + +```python +from swarms import Agent, DebateWithJudge + +# Create your custom agents +agents = [pro_agent, con_agent, judge_agent] + +# Create debate system with agent list +debate = DebateWithJudge( + agents=agents, + max_loops=3 +) + +result = debate.run("Is remote work better than office work?") +``` + +#### Option 3: Individual Agent Parameters + +Provide each agent separately (original behavior): + +```python +from swarms import Agent, DebateWithJudge + +# Create debate system with individual agents +debate = DebateWithJudge( + pro_agent=my_pro_agent, + con_agent=my_con_agent, + judge_agent=my_judge_agent, + max_loops=3 +) + +result = debate.run("Should we colonize Mars?") +``` ## API Reference @@ -94,7 +159,71 @@ def run(self, task: str) -> Union[str, List, dict] - **Topic Refinement**: Judge's synthesis becomes the topic for the next round 4. **Result Formatting**: Returns the final result formatted according to `output_type` -**Example:** +**Example 1: Using Preset Agents (Simplest):** + +```python +from swarms import DebateWithJudge + +# Create the DebateWithJudge system with preset agents +debate_system = DebateWithJudge( + preset_agents=True, + max_loops=3, + output_type="str-all-except-first", + verbose=True, +) + +# Define the debate topic +topic = ( + "Should artificial intelligence be regulated by governments? " + "Discuss the balance between innovation and safety." +) + +# Run the debate +result = debate_system.run(task=topic) +print(result) +``` + +**Example 2: Using Agent List:** + +```python +from swarms import Agent, DebateWithJudge + +# Create custom agents +pro_agent = Agent( + agent_name="Pro-Agent", + system_prompt="You are a skilled debater who argues in favor of positions...", + model_name="gpt-4o-mini", + max_loops=1, +) + +con_agent = Agent( + agent_name="Con-Agent", + system_prompt="You are a skilled debater who argues against positions...", + model_name="gpt-4o-mini", + max_loops=1, +) + +judge_agent = Agent( + agent_name="Judge-Agent", + system_prompt="You are an impartial judge who evaluates debates...", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create the DebateWithJudge system using agent list +debate_system = DebateWithJudge( + agents=[pro_agent, con_agent, judge_agent], + max_loops=3, + output_type="str-all-except-first", + verbose=True, +) + +# Run the debate +result = debate_system.run(task="Should AI be regulated?") +print(result) +``` + +**Example 3: Using Individual Agent Parameters:** ```python from swarms import Agent, DebateWithJudge @@ -143,7 +272,7 @@ debate_system = DebateWithJudge( pro_agent=pro_agent, con_agent=con_agent, judge_agent=judge_agent, - max_rounds=3, + max_loops=3, output_type="str-all-except-first", verbose=True, ) @@ -282,9 +411,10 @@ print(final_answer) | `pro_agent` | `Agent` | The agent arguing in favor (Pro position) | | `con_agent` | `Agent` | The agent arguing against (Con position) | | `judge_agent` | `Agent` | The judge agent that evaluates arguments | -| `max_rounds` | `int` | Maximum number of debate rounds | +| `max_loops` | `int` | Maximum number of debate rounds | | `output_type` | `str` | Format for returned results | | `verbose` | `bool` | Whether verbose logging is enabled | +| `model_name` | `str` | Model name used for preset agents | | `conversation` | `Conversation` | Conversation history management object | ## Output Types @@ -301,6 +431,21 @@ The `output_type` parameter controls how the conversation history is formatted: ## Usage Patterns +### Quick Start with Preset Agents + +The fastest way to get started - no agent configuration needed: + +```python +from swarms import DebateWithJudge + +# Create debate system with built-in optimized agents +debate = DebateWithJudge(preset_agents=True, max_loops=3) + +# Run a debate +result = debate.run("Should universal basic income be implemented?") +print(result) +``` + ### Single Topic Debate For focused debate and refinement on a single complex topic: @@ -314,6 +459,26 @@ debate_system.output_type = "dict" result = debate_system.run("Should universal basic income be implemented?") ``` +### Using Agent List + +Pass a list of 3 agents for flexible configuration: + +```python +from swarms import Agent, DebateWithJudge + +# Create or obtain agents from various sources +my_agents = [pro_agent, con_agent, judge_agent] + +# Create debate with agent list +debate = DebateWithJudge( + agents=my_agents, + max_loops=3, + verbose=True +) + +result = debate.run("Is nuclear energy the solution to climate change?") +``` + ### Batch Processing For processing multiple related topics sequentially: @@ -359,14 +524,45 @@ technical_debate = DebateWithJudge( pro_agent=technical_pro, con_agent=technical_con, judge_agent=technical_judge, - max_rounds=5, # More rounds for complex technical topics + max_loops=5, # More rounds for complex technical topics verbose=True, ) ``` ## Usage Examples -### Example 1: Policy Debate on AI Regulation +### Example 1: Quick Start with Preset Agents + +The simplest way to use `DebateWithJudge` - no manual agent configuration needed: + +```python +from swarms import DebateWithJudge + +# Create the DebateWithJudge system with preset agents +debate_system = DebateWithJudge( + preset_agents=True, + max_loops=3, + model_name="gpt-4o-mini", # Specify model for preset agents + output_type="str-all-except-first", + verbose=True, +) + +# Define the debate topic +topic = ( + "Should artificial intelligence be regulated by governments? " + "Discuss the balance between innovation and safety." +) + +# Run the debate +result = debate_system.run(task=topic) +print(result) + +# Get the final refined answer +final_answer = debate_system.get_final_answer() +print(final_answer) +``` + +### Example 2: Policy Debate with Custom Agents This example demonstrates using `DebateWithJudge` for a comprehensive policy debate on AI regulation, with multiple rounds of refinement. @@ -425,7 +621,7 @@ debate_system = DebateWithJudge( pro_agent=pro_agent, con_agent=con_agent, judge_agent=judge_agent, - max_rounds=3, + max_loops=3, output_type="str-all-except-first", verbose=True, ) @@ -448,7 +644,47 @@ final_answer = debate_system.get_final_answer() print(final_answer) ``` -### Example 2: Technical Architecture Debate with Batch Processing +### Example 3: Using Agent List + +This example demonstrates using the `agents` list parameter to provide agents: + +```python +from swarms import Agent, DebateWithJudge + +# Create your agents +pro = Agent( + agent_name="Microservices-Pro", + system_prompt="You advocate for microservices architecture...", + model_name="gpt-4o-mini", + max_loops=1, +) + +con = Agent( + agent_name="Monolith-Pro", + system_prompt="You advocate for monolithic architecture...", + model_name="gpt-4o-mini", + max_loops=1, +) + +judge = Agent( + agent_name="Architecture-Judge", + system_prompt="You evaluate architecture debates...", + model_name="gpt-4o-mini", + max_loops=1, +) + +# Create debate with agent list +debate = DebateWithJudge( + agents=[pro, con, judge], # Pass as list + max_loops=2, + verbose=True, +) + +result = debate.run("Should a startup use microservices or monolithic architecture?") +print(result) +``` + +### Example 4: Technical Architecture Debate with Batch Processing This example demonstrates using `batched_run` to process multiple technical architecture questions, comparing different approaches to system design. @@ -497,7 +733,7 @@ architecture_debate = DebateWithJudge( pro_agent=pro_agent, con_agent=con_agent, judge_agent=judge_agent, - max_rounds=2, # Fewer rounds for more focused technical debates + max_loops=2, # Fewer rounds for more focused technical debates output_type="str-all-except-first", verbose=True, ) @@ -518,7 +754,7 @@ for result in results: print(result) ``` -### Example 3: Business Strategy Debate with Custom Configuration +### Example 5: Business Strategy Debate with Custom Configuration This example demonstrates a business strategy debate with custom agent configurations, multiple rounds, and accessing conversation history. @@ -575,7 +811,7 @@ strategy_debate = DebateWithJudge( pro_agent=pro_agent, con_agent=con_agent, judge_agent=judge_agent, - max_rounds=4, # More rounds for complex strategic discussions + max_loops=4, # More rounds for complex strategic discussions output_type="dict", # Use dict format for structured analysis verbose=True, ) @@ -609,18 +845,27 @@ print(final_answer) ### Agent Configuration !!! tip "Agent Configuration Best Practices" + - **Preset Agents**: Use `preset_agents=True` for quick setup with optimized prompts + - **Custom Agents**: For specialized domains, create custom agents with domain-specific prompts - **Pro Agent**: Should be configured with expertise in the topic area and strong argumentation skills - **Con Agent**: Should be configured to identify weaknesses and provide compelling alternatives - **Judge Agent**: Should be configured with broad expertise and impartial evaluation capabilities - Use appropriate models for the complexity of the debate topic - Consider using more powerful models for the Judge agent -### Round Configuration +### Initialization Strategy + +!!! info "Choosing an Initialization Method" + - **`preset_agents=True`**: Best for quick prototyping and general-purpose debates + - **`agents=[...]` list**: Best when you have agents from external sources or dynamic creation + - **Individual parameters**: Best for maximum control and explicit configuration + +### Loop Configuration -!!! note "Round Configuration Tips" - - Use 2-3 rounds for most topics - - Use 4-5 rounds for complex, multi-faceted topics - - More rounds allow for deeper refinement but increase execution time +!!! note "Loop Configuration Tips" + - Use 2-3 loops (`max_loops`) for most topics + - Use 4-5 loops for complex, multi-faceted topics + - More loops allow for deeper refinement but increase execution time - Consider the trade-off between refinement quality and cost ### Output Format Selection @@ -646,25 +891,31 @@ print(final_answer) !!! danger "Common Problems" **Issue**: Agents not following their roles - **Solution**: Ensure system prompts clearly define each agent's role and expertise + **Solution**: Ensure system prompts clearly define each agent's role and expertise. Consider using `preset_agents=True` for well-tested prompts. --- - **Issue**: Judge synthesis not improving over rounds + **Issue**: Judge synthesis not improving over loops - **Solution**: Increase `max_rounds` or improve Judge agent's system prompt to emphasize refinement + **Solution**: Increase `max_loops` or improve Judge agent's system prompt to emphasize refinement --- **Issue**: Debate results are too generic - **Solution**: Use more specific system prompts and provide detailed context in the task + **Solution**: Use more specific system prompts and provide detailed context in the task. Custom agents often produce better domain-specific results. --- **Issue**: Execution time is too long - **Solution**: Reduce `max_rounds`, use faster models, or process fewer topics in batch + **Solution**: Reduce `max_loops`, use faster models, or process fewer topics in batch + + --- + + **Issue**: ValueError when initializing + + **Solution**: Ensure you provide one of: (1) all three agents, (2) an agents list with exactly 3 agents, or (3) `preset_agents=True` ## Contributing diff --git a/docs/swarms/structs/graph_workflow.md b/docs/swarms/structs/graph_workflow.md index ef48d8d0..f0182be3 100644 --- a/docs/swarms/structs/graph_workflow.md +++ b/docs/swarms/structs/graph_workflow.md @@ -12,6 +12,7 @@ Key features: |------------------------|-----------------------------------------------------------------------------------------------| | **Agent-based nodes** | Each node represents an agent that can process tasks | | **Directed graph structure** | Edges define the flow of data between agents | +| **Dual backend support** | Choose between NetworkX (compatibility) or Rustworkx (performance) backends | | **Parallel execution** | Multiple agents can run simultaneously within layers | | **Automatic compilation** | Optimizes workflow structure for efficient execution | | **Rich visualization** | Generate visual representations using Graphviz | @@ -25,37 +26,40 @@ graph TB subgraph "GraphWorkflow Architecture" A[GraphWorkflow] --> B[Node Collection] A --> C[Edge Collection] - A --> D[NetworkX Graph] + A --> D[Graph Backend] A --> E[Execution Engine] B --> F[Agent Nodes] C --> G[Directed Edges] - D --> H[Topological Sort] - E --> I[Parallel Execution] - E --> J[Layer Processing] + D --> H[NetworkX Backend] + D --> I[Rustworkx Backend] + D --> J[Topological Sort] + E --> K[Parallel Execution] + E --> L[Layer Processing] subgraph "Node Types" - F --> K[Agent Node] - K --> L[Agent Instance] - K --> M[Node Metadata] + F --> M[Agent Node] + M --> N[Agent Instance] + M --> O[Node Metadata] end subgraph "Edge Types" - G --> N[Simple Edge] - G --> O[Fan-out Edge] - G --> P[Fan-in Edge] - G --> Q[Parallel Chain] + G --> P[Simple Edge] + G --> Q[Fan-out Edge] + G --> R[Fan-in Edge] + G --> S[Parallel Chain] end subgraph "Execution Patterns" - I --> R[Thread Pool] - I --> S[Concurrent Futures] - J --> T[Layer-by-layer] - J --> U[Dependency Resolution] + K --> T[Thread Pool] + K --> U[Concurrent Futures] + L --> V[Layer-by-layer] + L --> W[Dependency Resolution] end end ``` + ## Class Reference | Parameter | Type | Description | Default | @@ -71,6 +75,70 @@ graph TB | `task` | `Optional[str]` | The task to be executed by the workflow | `None` | | `auto_compile` | `bool` | Whether to automatically compile the workflow | `True` | | `verbose` | `bool` | Whether to enable detailed logging | `False` | +| `backend` | `str` | Graph backend to use ("networkx" or "rustworkx") | `"networkx"` | + +## Graph Backends + +GraphWorkflow supports two graph backend implementations, each with different performance characteristics: + +### NetworkX Backend (Default) + +The **NetworkX** backend is the default and most widely compatible option. It provides: + +| Feature | Description | +|---------------------|---------------------------------------------------------| +| ✅ Full compatibility | Works out of the box with no additional dependencies | +| ✅ Mature ecosystem | Well-tested and stable | +| ✅ Rich features | Comprehensive graph algorithms and operations | +| ✅ Python-native | Pure Python implementation | + +**Use NetworkX when:** + +- You need maximum compatibility + +- Working with small to medium-sized graphs (< 1000 nodes) + +- You want zero additional dependencies + +### Rustworkx Backend (High Performance) + +The **Rustworkx** backend provides significant performance improvements for large graphs: + +| Feature | Description | +|--------------------|-----------------------------------------------------------------| +| ⚡ High performance| Rust-based implementation for faster operations | +| ⚡ Memory efficient| Optimized for large-scale graphs | +| ⚡ Scalable | Better performance with graphs containing 1000+ nodes | +| ⚡ Same API | Drop-in replacement with identical interface | + +**Use Rustworkx when:** + +- Working with large graphs (1000+ nodes) + +- Performance is critical + +- You can install additional dependencies + +**Installation:** +```bash +pip install rustworkx +``` + +**Note:** If rustworkx is not installed and you specify `backend="rustworkx"`, GraphWorkflow will automatically fall back to NetworkX with a warning. + +### Backend Selection + +Both backends implement the same `GraphBackend` interface, ensuring complete API compatibility. You can switch between backends without changing your code: + +```python +# Use NetworkX (default) +workflow = GraphWorkflow(backend="networkx") + +# Use Rustworkx for better performance +workflow = GraphWorkflow(backend="rustworkx") +``` + +The backend choice is transparent to the rest of the API - all methods work identically regardless of which backend is used. ### Core Methods @@ -455,7 +523,7 @@ Constructs a workflow from a list of agents and connections. | `entry_points` | `List[str]` | List of entry point node IDs | `None` | | `end_points` | `List[str]` | List of end point node IDs | `None` | | `task` | `str` | Task to be executed by the workflow | `None` | -| `**kwargs` | `Any` | Additional keyword arguments | `{}` | +| `**kwargs` | `Any` | Additional keyword arguments (e.g., `backend`, `verbose`, `auto_compile`) | `{}` | **Returns:** @@ -464,6 +532,7 @@ Constructs a workflow from a list of agents and connections. **Example:** ```python +# Using NetworkX backend (default) workflow = GraphWorkflow.from_spec( agents=[agent1, agent2, agent3], edges=[ @@ -473,10 +542,56 @@ workflow = GraphWorkflow.from_spec( ], task="Analyze market data" ) + +# Using Rustworkx backend for better performance +workflow = GraphWorkflow.from_spec( + agents=[agent1, agent2, agent3], + edges=[ + ("agent1", "agent2"), + ("agent2", "agent3"), + ], + task="Analyze market data", + backend="rustworkx" # Specify backend via kwargs +) ``` ## Examples +### Using Rustworkx Backend for Performance + +```python +from swarms import Agent, GraphWorkflow + +# Create agents +research_agent = Agent( + agent_name="ResearchAgent", + model_name="gpt-4", + max_loops=1 +) + +analysis_agent = Agent( + agent_name="AnalysisAgent", + model_name="gpt-4", + max_loops=1 +) + +# Build workflow with rustworkx backend for better performance +workflow = GraphWorkflow( + name="High-Performance-Workflow", + backend="rustworkx" # Use rustworkx backend +) + +workflow.add_node(research_agent) +workflow.add_node(analysis_agent) +workflow.add_edge("ResearchAgent", "AnalysisAgent") + +# Execute - backend is transparent to the API +results = workflow.run("What are the latest trends in AI?") +print(results) +``` + +**Note:** Make sure to install rustworkx first: `pip install rustworkx` + ### Basic Sequential Workflow ```python @@ -667,6 +782,46 @@ loaded_workflow = GraphWorkflow.load_from_file( new_results = loaded_workflow.run("Continue with quantum cryptography analysis") ``` +### Large-Scale Workflow with Rustworkx + +```python +from swarms import Agent, GraphWorkflow + +# Create a large workflow with many agents +# Rustworkx backend provides better performance for large graphs +workflow = GraphWorkflow( + name="Large-Scale-Workflow", + backend="rustworkx", # Use rustworkx for better performance + verbose=True +) + +# Create many agents (e.g., for parallel data processing) +agents = [] +for i in range(50): + agent = Agent( + agent_name=f"Processor{i}", + model_name="gpt-4", + max_loops=1 + ) + agents.append(agent) + workflow.add_node(agent) + +# Create complex interconnections +# Rustworkx handles this efficiently +for i in range(0, 50, 10): + source_agents = [f"Processor{j}" for j in range(i, min(i+10, 50))] + target_agents = [f"Processor{j}" for j in range(i+10, min(i+20, 50))] + if target_agents: + workflow.add_parallel_chain(source_agents, target_agents) + +# Compile and execute +workflow.compile() +status = workflow.get_compilation_status() +print(f"Compiled workflow with {status['cached_layers_count']} layers") + +results = workflow.run("Process large dataset in parallel") +``` + ### Advanced Pattern Detection ```python @@ -770,7 +925,8 @@ The `GraphWorkflow` class provides a powerful and flexible framework for orchest |-----------------|--------------------------------------------------------------------------------------------------| | **Scalability** | Supports workflows with hundreds of agents through efficient parallel execution | | **Flexibility** | Multiple connection patterns (sequential, fan-out, fan-in, parallel chains) | -| **Performance** | Automatic compilation and optimization for faster execution | +| **Performance** | Automatic compilation and optimization for faster execution; rustworkx backend for large-scale graphs | +| **Backend Choice** | Choose between NetworkX (compatibility) or Rustworkx (performance) based on your needs | | **Visualization** | Rich visual representations for workflow understanding and debugging | | **Persistence** | Complete serialization and deserialization capabilities | | **Error Handling** | Comprehensive error handling and recovery mechanisms | @@ -793,10 +949,28 @@ The `GraphWorkflow` class provides a powerful and flexible framework for orchest |---------------------------------------|------------------------------------------------------------------| | **Use meaningful agent names** | Helps with debugging and visualization | | **Leverage parallel patterns** | Use fan-out and fan-in for better performance | +| **Choose the right backend** | Use rustworkx for large graphs (1000+ nodes), networkx for smaller graphs | | **Compile workflows** | Always compile before execution for optimal performance | | **Monitor execution** | Use verbose mode and status reporting for debugging | | **Save important workflows** | Use serialization for workflow persistence | | **Handle errors gracefully** | Implement proper error handling and recovery | | **Visualize complex workflows** | Use visualization to understand and debug workflows | +### Backend Performance Considerations + +When choosing between NetworkX and Rustworkx backends: + +| Graph Size | Recommended Backend | Reason | +|------------|-------------------|--------| +| < 100 nodes | NetworkX | Minimal overhead, no extra dependencies | +| 100-1000 nodes | NetworkX or Rustworkx | Both perform well, choose based on dependency preferences | +| 1000+ nodes | Rustworkx | Significant performance benefits for large graphs | +| Very large graphs (10k+ nodes) | Rustworkx | Essential for acceptable performance | + +**Performance Tips:** +- Rustworkx provides 2-10x speedup for topological operations on large graphs +- Both backends support the same features and API +- You can switch backends without code changes +- Rustworkx uses less memory for large graphs + The GraphWorkflow system represents a significant advancement in multi-agent orchestration, providing the tools needed to build complex, scalable, and maintainable AI workflows. \ No newline at end of file diff --git a/docs/swarms/structs/hierarchical_swarm.md b/docs/swarms/structs/hierarchical_swarm.md index f458ac40..fe689e9e 100644 --- a/docs/swarms/structs/hierarchical_swarm.md +++ b/docs/swarms/structs/hierarchical_swarm.md @@ -35,6 +35,7 @@ The Hierarchical Swarm follows a clear workflow pattern: | **Comprehensive Logging** | Detailed logging for debugging and monitoring | | **Live Streaming** | Real-time streaming callbacks for monitoring agent outputs | | **Token-by-Token Updates** | Watch text formation in real-time as agents generate responses | +| **Hierarchy Visualization** | Visual tree representation of swarm structure with `display_hierarchy()` | ## Constructor @@ -70,6 +71,65 @@ Initializes a new HierarchicalSwarm instance. ## Core Methods +### `display_hierarchy()` + +Displays a visual tree representation of the hierarchical swarm structure, showing the Director at the top level and all worker agents as children branches. This method uses Rich formatting to create an aesthetically pleasing console output that helps visualize the organizational structure of the swarm. + +#### Returns + +| Type | Description | +|------|-------------| +| `None` | Prints the hierarchy visualization to the console | + +#### Example + +```python +from swarms import Agent +from swarms.structs.hiearchical_swarm import HierarchicalSwarm + +# Create specialized agents +research_agent = Agent( + agent_name="Research-Analyst", + agent_description="Specialized in comprehensive research and data gathering", + model_name="gpt-4o-mini", +) + +analysis_agent = Agent( + agent_name="Data-Analyst", + agent_description="Expert in data analysis and pattern recognition", + model_name="gpt-4o-mini", +) + +strategy_agent = Agent( + agent_name="Strategy-Consultant", + agent_description="Specialized in strategic planning and recommendations", + model_name="gpt-4o-mini", +) + +# Create hierarchical swarm +swarm = HierarchicalSwarm( + name="Swarms Corporation Operations", + description="Enterprise-grade hierarchical swarm for complex task execution", + agents=[research_agent, analysis_agent, strategy_agent], + max_loops=1, + director_model_name="claude-haiku-4-5", +) + +# Display the hierarchy visualization +swarm.display_hierarchy() +``` + +The output will show a visual tree structure like: +``` +┌─ HierarchicalSwarm Hierarchy: Swarms Corporation Operations ─┐ +│ │ +│ 🎯 Director [claude-haiku-4-5] │ +│ ├─ 🤖 Research-Analyst [gpt-4o-mini] - Specialized in... │ +│ ├─ 🤖 Data-Analyst [gpt-4o-mini] - Expert in data... │ +│ └─ 🤖 Strategy-Consultant [gpt-4o-mini] - Specialized... │ +└───────────────────────────────────────────────────────────────┘ +``` + ### `run()` Executes the hierarchical swarm for a specified number of feedback loops, processing the task through multiple iterations for refinement and improvement. diff --git a/docs/swarms/structs/llm_council.md b/docs/swarms/structs/llm_council.md new file mode 100644 index 00000000..e1092bb4 --- /dev/null +++ b/docs/swarms/structs/llm_council.md @@ -0,0 +1,534 @@ +# LLM Council Class Documentation + +```mermaid +flowchart TD + A[User Query] --> B[Council Members] + + subgraph "Council Members" + C1[GPT-5.1-Councilor] + C2[Gemini-3-Pro-Councilor] + C3[Claude-Sonnet-4.5-Councilor] + C4[Grok-4-Councilor] + end + + B --> C1 + B --> C2 + B --> C3 + B --> C4 + + C1 --> D[Responses] + C2 --> D + C3 --> D + C4 --> D + + D --> E[Anonymize & Evaluate] + E --> F[Chairman Synthesis] + F --> G[Final Response] + +``` + +The `LLMCouncil` class orchestrates multiple specialized LLM agents to collaboratively answer queries through a structured peer review and synthesis process. Inspired by Andrej Karpathy's llm-council implementation, this architecture demonstrates how different models evaluate and rank each other's work, often selecting responses from other models as superior to their own. + +The class automatically tracks all agent messages in a `Conversation` object and formats output using `history_output_formatter`, providing flexible output formats including dictionaries, lists, strings, JSON, YAML, and more. + +## Workflow Overview + +The LLM Council follows a four-step process: + +1. **Parallel Response Generation**: All council members independently respond to the user query +2. **Anonymization**: Responses are anonymized with random IDs (A, B, C, D, etc.) to ensure objective evaluation +3. **Peer Review**: Each member evaluates and ranks all responses (including potentially their own) +4. **Synthesis**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer + +## Class Definition + +### LLMCouncil + +```python +class LLMCouncil: +``` + +### Attributes + +| Attribute | Type | Description | Default | +|-----------|------|-------------|---------| +| `council_members` | `List[Agent]` | List of Agent instances representing council members | `None` (creates default council) | +| `chairman` | `Agent` | The Chairman agent responsible for synthesizing responses | Created during initialization | +| `conversation` | `Conversation` | Conversation object tracking all messages throughout the workflow | Created during initialization | +| `output_type` | `HistoryOutputType` | Format for the output (e.g., "dict", "list", "string", "json", "yaml") | `"dict"` | +| `verbose` | `bool` | Whether to print progress and intermediate results | `True` | + +## Methods + +### `__init__` + +Initializes the LLM Council with council members and a Chairman agent. + +#### Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `id` | `str` | `swarm_id()` | Unique identifier for the council instance. | +| `name` | `str` | `"LLM Council"` | Name of the council instance. | +| `description` | `str` | `"A collaborative council..."` | Description of the council's purpose. | +| `council_members` | `Optional[List[Agent]]` | `None` | List of Agent instances representing council members. If `None`, creates default council with GPT-5.1, Gemini 3 Pro, Claude Sonnet 4.5, and Grok-4. | +| `chairman_model` | `str` | `"gpt-5.1"` | Model name for the Chairman agent that synthesizes responses. | +| `verbose` | `bool` | `True` | Whether to print progress and intermediate results. | +| `output_type` | `HistoryOutputType` | `"dict"` | Format for the output. Options: "list", "dict", "string", "final", "json", "yaml", "xml", "dict-all-except-first", "str-all-except-first", "dict-final", "list-final". | + +#### Returns + +| Type | Description | +|------|-------------| +| `LLMCouncil` | Initialized LLM Council instance. | + +#### Description + +Creates an LLM Council instance with specialized council members. If no members are provided, it creates a default council consisting of: + +| Council Member | Description | +|---------------------------------|------------------------------------------| +| **GPT-5.1-Councilor** | Analytical and comprehensive responses | +| **Gemini-3-Pro-Councilor** | Concise and well-processed responses | +| **Claude-Sonnet-4.5-Councilor** | Thoughtful and balanced responses | +| **Grok-4-Councilor** | Creative and innovative responses | + +The Chairman agent is automatically created with a specialized prompt for synthesizing responses. A `Conversation` object is also initialized to track all messages throughout the workflow, including user queries, council member responses, evaluations, and the final synthesis. + +#### Example Usage + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create council with default members +council = LLMCouncil(verbose=True) + +# Create council with custom members and output format +from swarms import Agent +custom_members = [ + Agent(agent_name="Expert-1", model_name="gpt-4", max_loops=1), + Agent(agent_name="Expert-2", model_name="claude-3-opus", max_loops=1), +] +council = LLMCouncil( + council_members=custom_members, + chairman_model="gpt-4", + verbose=True, + output_type="json" # Output as JSON string +) +``` + +--- + +### `run` + +Executes the full LLM Council workflow: parallel responses, anonymization, peer review, and synthesis. All messages are tracked in the conversation object and formatted according to the `output_type` setting. + +#### Parameters + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `query` | `str` | Required | The user's query to process through the council. | + +#### Returns + +| Type | Description | +|------|-------------| +| `Union[List, Dict, str]` | Formatted output based on `output_type`. The output contains the conversation history with all messages tracked throughout the workflow. | + +#### Output Format + +The return value depends on the `output_type` parameter set during initialization: + +| `output_type` value | Description | +|---------------------------------|---------------------------------------------------------------------| +| **`"dict"`** (default) | Returns conversation as a dictionary/list of message dictionaries | +| **`"list"`** | Returns conversation as a list of formatted strings (`"role: content"`) | +| **`"string"`** or **`"str"`** | Returns conversation as a formatted string | +| **`"final"`** or **`"last"`** | Returns only the content of the final message (Chairman's response) | +| **`"json"`** | Returns conversation as a JSON string | +| **`"yaml"`** | Returns conversation as a YAML string | +| **`"xml"`** | Returns conversation as an XML string | +| **`"dict-all-except-first"`** | Returns all messages except the first as a dictionary | +| **`"str-all-except-first"`** | Returns all messages except the first as a string | +| **`"dict-final"`** | Returns the final message as a dictionary | +| **`"list-final"`** | Returns the final message as a list | + +#### Conversation Tracking + +All messages are automatically tracked in the conversation object with the following roles: + +- **`"User"`**: The original user query +- **`"{member_name}"`**: Each council member's response (e.g., "GPT-5.1-Councilor") +- **`"{member_name}-Evaluation"`**: Each council member's evaluation (e.g., "GPT-5.1-Councilor-Evaluation") +- **`"Chairman"`**: The final synthesized response + +#### Description + +Executes the complete LLM Council workflow: + +1. **User Query Tracking**: Adds the user query to the conversation as "User" role +2. **Dispatch Phase**: Sends the query to all council members in parallel using `run_agents_concurrently` +3. **Collection Phase**: Collects all responses, maps them to member names, and adds each to the conversation with the member's name as the role +4. **Anonymization Phase**: Creates anonymous IDs (A, B, C, D, etc.) and shuffles them to ensure anonymity +5. **Evaluation Phase**: Each member evaluates and ranks all anonymized responses using `batched_grid_agent_execution`, then adds evaluations to the conversation with "{member_name}-Evaluation" as the role +6. **Synthesis Phase**: The Chairman agent synthesizes all responses and evaluations into a final comprehensive answer, which is added to the conversation as "Chairman" role +7. **Output Formatting**: Returns the conversation formatted according to the `output_type` setting using `history_output_formatter` + +The method provides verbose output by default, showing progress at each stage. All messages are tracked in the `conversation` attribute for later access or export. + +#### Example Usage + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create council with default output format (dict) +council = LLMCouncil(verbose=True) + +query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" + +# Run the council - returns formatted conversation based on output_type +result = council.run(query) + +# With default "dict" output_type, result is a list of message dictionaries +# Access conversation messages +for message in result: + print(f"{message['role']}: {message['content'][:200]}...") + +# Access the conversation object directly for more control +conversation = council.conversation +print("\nFinal message:", conversation.get_final_message_content()) + +# Get conversation as string +print("\nFull conversation:") +print(conversation.get_str()) + +# Example with different output types +council_json = LLMCouncil(output_type="json", verbose=False) +result_json = council_json.run(query) # Returns JSON string + +council_final = LLMCouncil(output_type="final", verbose=False) +result_final = council_final.run(query) # Returns only final response string +``` + +--- + +### `_create_default_council` + +Creates default council members with specialized prompts and models. + +#### Parameters + +None (internal method). + +#### Returns + +| Type | Description | +|------|-------------| +| `List[Agent]` | List of Agent instances configured as council members. | + +#### Description + +Internal method that creates the default council configuration with four specialized agents: + +- **GPT-5.1-Councilor** (`model_name="gpt-5.1"`): Analytical and comprehensive, temperature=0.7 +- **Gemini-3-Pro-Councilor** (`model_name="gemini-2.5-flash"`): Concise and structured, temperature=0.7 +- **Claude-Sonnet-4.5-Councilor** (`model_name="anthropic/claude-sonnet-4-5"`): Thoughtful and balanced, temperature=0.0 +- **Grok-4-Councilor** (`model_name="x-ai/grok-4"`): Creative and innovative, temperature=0.8 + +Each agent is configured with: + +- Specialized system prompts matching their role +- `max_loops=1` for single-response generation +- `verbose=False` to reduce noise during parallel execution +- Appropriate temperature settings for their style + +--- + +## Helper Functions + +### `get_gpt_councilor_prompt()` + +Returns the system prompt for GPT-5.1 councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing analytical thinking and comprehensive coverage. | + +--- + +### `get_gemini_councilor_prompt()` + +Returns the system prompt for Gemini 3 Pro councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing concise, well-processed, and structured responses. | + +--- + +### `get_claude_councilor_prompt()` + +Returns the system prompt for Claude Sonnet 4.5 councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing thoughtful, balanced, and nuanced responses. | + +--- + +### `get_grok_councilor_prompt()` + +Returns the system prompt for Grok-4 councilor agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string emphasizing creative, innovative, and unique perspectives. | + +--- + +### `get_chairman_prompt()` + +Returns the system prompt for the Chairman agent. + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | System prompt string for synthesizing responses and evaluations into a final answer. | + +--- + +### `get_evaluation_prompt(query, responses, evaluator_name)` + +Creates evaluation prompt for council members to review and rank responses. + +#### Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `query` | `str` | The original user query. | +| `responses` | `Dict[str, str]` | Dictionary mapping anonymous IDs to response texts. | +| `evaluator_name` | `str` | Name of the agent doing the evaluation. | + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | Formatted evaluation prompt string with instructions for ranking responses. | + +--- + +### `get_synthesis_prompt(query, original_responses, evaluations, id_to_member)` + +Creates synthesis prompt for the Chairman. + +#### Parameters + +| Parameter | Type | Description | +|-----------|------|-------------| +| `query` | `str` | Original user query. | +| `original_responses` | `Dict[str, str]` | Dictionary mapping member names to their responses. | +| `evaluations` | `Dict[str, str]` | Dictionary mapping evaluator names to their evaluation texts. | +| `id_to_member` | `Dict[str, str]` | Mapping from anonymous IDs to member names. | + +#### Returns + +| Type | Description | +|------|-------------| +| `str` | Formatted synthesis prompt for the Chairman agent. | + +--- + +## Use Cases + +The LLM Council is ideal for scenarios requiring: + +- **Multi-perspective Analysis**: When you need diverse viewpoints on complex topics +- **Quality Assurance**: When peer review and ranking can improve response quality +- **Transparent Decision Making**: When you want to see how different models evaluate each other +- **Synthesis of Expertise**: When combining multiple specialized perspectives is valuable + +### Common Applications + +| Use Case | Description | +|-----------------------|--------------------------------------------------------------------------------------------------| +| **Medical Diagnosis** | Multiple medical AI agents provide diagnoses, evaluate each other, and synthesize recommendations | +| **Financial Analysis**| Different financial experts analyze investments and rank each other's assessments | +| **Legal Analysis** | Multiple legal perspectives evaluate compliance and risk | +| **Business Strategy** | Diverse strategic viewpoints are synthesized into comprehensive plans | +| **Research Analysis** | Multiple research perspectives are combined for thorough analysis | + + +## Examples + +For comprehensive examples demonstrating various use cases, see the [LLM Council Examples](../../../examples/multi_agent/llm_council_examples/) directory: + +- **Medical**: `medical_diagnosis_council.py`, `medical_treatment_council.py` +- **Finance**: `finance_analysis_council.py`, `etf_stock_analysis_council.py` +- **Business**: `business_strategy_council.py`, `marketing_strategy_council.py` +- **Technology**: `technology_assessment_council.py`, `research_analysis_council.py` +- **Legal**: `legal_analysis_council.py` + +### Quick Start Example + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create the council with default output format +council = LLMCouncil(verbose=True) + +# Example query +query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" + +# Run the council - returns formatted conversation +result = council.run(query) + +# With default "dict" output_type, result is a list of message dictionaries +# Print all messages +for message in result: + role = message['role'] + content = message['content'] + print(f"\n{role}:") + print(content[:500] + "..." if len(content) > 500 else content) + +# Access conversation object directly for more options +conversation = council.conversation + +# Get only the final response +print("\n" + "="*80) +print("FINAL RESPONSE") +print("="*80) +print(conversation.get_final_message_content()) + +# Get conversation as formatted string +print("\n" + "="*80) +print("FULL CONVERSATION") +print("="*80) +print(conversation.get_str()) + +# Export conversation to JSON +conversation.export() +``` + +## Customization + +### Creating Custom Council Members + +You can create custom council members with specialized roles: + +```python +from swarms import Agent +from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt + +# Create custom councilor +custom_agent = Agent( + agent_name="Domain-Expert-Councilor", + agent_description="Specialized domain expert for specific analysis", + system_prompt=get_gpt_councilor_prompt(), # Or create custom prompt + model_name="gpt-4", + max_loops=1, + verbose=False, + temperature=0.7, +) + +# Create council with custom members +council = LLMCouncil( + council_members=[custom_agent, ...], # Add your custom agents + chairman_model="gpt-4", + verbose=True +) +``` + +### Custom Chairman Model + +You can specify a different model for the Chairman: + +```python +council = LLMCouncil( + chairman_model="claude-3-opus", # Use Claude as Chairman + verbose=True +) +``` + +### Custom Output Format + +You can control the output format using the `output_type` parameter: + +```python +# Get output as JSON string +council = LLMCouncil(output_type="json") +result = council.run(query) # Returns JSON string + +# Get only the final response +council = LLMCouncil(output_type="final") +result = council.run(query) # Returns only final response string + +# Get as YAML +council = LLMCouncil(output_type="yaml") +result = council.run(query) # Returns YAML string + +# Get as formatted string +council = LLMCouncil(output_type="string") +result = council.run(query) # Returns formatted conversation string +``` + +### Accessing Conversation History + +The conversation object is accessible for advanced usage: + +```python +council = LLMCouncil() +council.run(query) + +# Access conversation directly +conversation = council.conversation + +# Get conversation history +history = conversation.conversation_history + +# Export to file +conversation.export() # Saves to default location + +# Get specific format +json_output = conversation.to_json() +yaml_output = conversation.return_messages_as_dictionary() +``` + +## Architecture Benefits + +1. **Diversity**: Multiple models provide varied perspectives and approaches +2. **Quality Control**: Peer review ensures responses are evaluated objectively +3. **Synthesis**: Chairman combines the best elements from all responses +4. **Transparency**: Full visibility into individual responses and evaluation rankings +5. **Scalability**: Easy to add or remove council members +6. **Flexibility**: Supports custom agents and models +7. **Conversation Tracking**: All messages are automatically tracked in a Conversation object for history and export +8. **Flexible Output**: Multiple output formats supported via `history_output_formatter` (dict, list, string, JSON, YAML, XML, etc.) + +## Performance Considerations + +| Feature | Description | +|---------------------------|----------------------------------------------------------------------------------------------------------------| +| **Parallel Execution** | Both response generation and evaluation phases run in parallel for efficiency | +| **Anonymization** | Responses are anonymized to prevent bias in evaluation | +| **Model Selection** | Different models can be used for different roles based on their strengths | +| **Verbose Mode** | Can be disabled for production use to reduce output | +| **Conversation Management** | Conversation object efficiently tracks all messages in memory and supports export to JSON/YAML files | +| **Output Formatting** | Choose lightweight output formats (e.g., "final") for production to reduce memory usage | + +## Related Documentation + +- [Multi-Agent Architectures Overview](overview.md) +- [Council of Judges](council_of_judges.md) - Similar peer review pattern +- [Agent Class Reference](agent.md) - Understanding individual agents +- [Conversation Class Reference](conversation.md) - Understanding conversation tracking and management +- [Multi-Agent Execution Utilities](various_execution_methods.md) - Underlying execution methods +- [History Output Formatter](../../../swarms/utils/history_output_formatter.py) - Output formatting utilities diff --git a/docs/swarms/structs/round_robin_swarm.md b/docs/swarms/structs/round_robin_swarm.md index 0a8215ea..f60d1172 100644 --- a/docs/swarms/structs/round_robin_swarm.md +++ b/docs/swarms/structs/round_robin_swarm.md @@ -2,6 +2,8 @@ The `RoundRobinSwarm` class is designed to manage and execute tasks among multiple agents in a round-robin fashion. This approach ensures that each agent in a swarm receives an equal opportunity to execute tasks, which promotes fairness and efficiency in distributed systems. It is particularly useful in environments where collaborative, sequential task execution is needed among various agents. +This swarm implements an AutoGen-style communication pattern where agents are shuffled randomly each loop for varied interaction patterns. Each agent receives the full conversation context to build upon others' responses. + ## What is Round-Robin? Round-robin is a scheduling technique commonly used in computing for managing processes in shared systems. It involves assigning a fixed time slot to each process and cycling through all processes in a circular order without prioritization. In the context of swarms of agents, this method ensures equitable distribution of tasks and resource usage among all agents. @@ -10,12 +12,33 @@ Round-robin is a scheduling technique commonly used in computing for managing pr In swarms, `RoundRobinSwarm` utilizes the round-robin scheduling to manage tasks among agents like software components, autonomous robots, or virtual entities. This strategy is beneficial where tasks are interdependent or require sequential processing. +## Architecture + +```mermaid +graph LR + User[Task] --> A1[Agent 1] + A1 --> A2[Agent 2] + A2 --> A3[Agent 3] + A3 --> A1 + A3 --> Output[Result] +``` + +Each agent receives the task with full conversation history, responds, then passes context to the next agent. This cycle repeats for `max_loops` iterations. + ## Class Attributes -- `agents (List[Agent])`: List of agents participating in the swarm. -- `verbose (bool)`: Enables or disables detailed logging of swarm operations. -- `max_loops (int)`: Limits the number of times the swarm cycles through all agents. -- `index (int)`: Maintains the current position in the agent list to ensure round-robin execution. +| Attribute | Type | Description | +|-----------|------|-------------| +| `name` | `str` | Name of the swarm. | +| `description` | `str` | Description of the swarm's purpose. | +| `agents` | `List[Agent]` | List of agents participating in the swarm. | +| `verbose` | `bool` | Enables or disables detailed logging of swarm operations. | +| `max_loops` | `int` | Limits the number of times the swarm cycles through all agents. | +| `callback` | `callable` | Callback function executed after each loop. | +| `index` | `int` | Maintains the current position in the agent list to ensure round-robin execution. | +| `max_retries` | `int` | Maximum number of retries for agent execution. | +| `output_type` | `OutputType` | Type of output format (e.g., "final", "all", "json"). | +| `conversation` | `Conversation` | Conversation history for the swarm. | ## Methods @@ -24,30 +47,92 @@ In swarms, `RoundRobinSwarm` utilizes the round-robin scheduling to manage tasks Initializes the swarm with the provided list of agents, verbosity setting, and operational parameters. **Parameters:** -| Parameter | Type | Description | -|-------------|---------------------|-----------------------------------------------------| -| agents | List[Agent], optional | List of agents in the swarm. | -| verbose | bool | Boolean flag for detailed logging. | -| max_loops | int | Maximum number of execution cycles. | -| callback | Callable, optional | Function called after each loop. | + +| Parameter | Type | Default | Description | +|-----------|------|---------|-------------| +| `name` | `str` | `"RoundRobinSwarm"` | Name of the swarm. | +| `description` | `str` | `"A swarm implementation..."` | Description of the swarm's purpose. | +| `agents` | `List[Agent]` | **Required** | List of agents in the swarm. | +| `verbose` | `bool` | `False` | Boolean flag for detailed logging. | +| `max_loops` | `int` | `1` | Maximum number of execution cycles. | +| `callback` | `callable` | `None` | Function called after each loop with `(loop_index, result)` arguments. | +| `max_retries` | `int` | `3` | Maximum number of retries for agent execution. | +| `output_type` | `OutputType` | `"final"` | Type of output format. | + +**Raises:** + +- `ValueError`: If no agents are provided during initialization. + +--- ### `run` -Executes a specified task across all agents in a round-robin manner, cycling through each agent repeatedly for the number of specified loops. +Executes a specified task across all agents in a randomized round-robin manner, cycling through each agent repeatedly for the number of specified loops. + +```python +def run(self, task: str, *args, **kwargs) -> Union[str, dict, list] +``` + +**Parameters:** + +| Parameter | Type | Description | +|-----------|------|-------------| +| `task` | `str` | The task string to be executed by the agents. | +| `*args` | `Any` | Variable length argument list passed to each agent. | +| `**kwargs` | `Any` | Arbitrary keyword arguments passed to each agent. | + +**Returns:** + +| Type | Description | +|------|-------------| +| `Union[str, dict, list]` | The result of the task execution in the format specified by `output_type`. | + +**Raises:** + +- `ValueError`: If no agents are configured for the swarm. + +- `Exception`: If an exception occurs during task execution. **Conceptual Behavior:** | Step | Description | |------|-------------| -| 1 | Distribute the task sequentially among all agents starting from the current index. | -| 2 | Each agent processes the task and potentially modifies it or produces new output. | -| 3 | After an agent completes its part of the task, the index moves to the next agent. | -| 4 | This cycle continues until the specified maximum number of loops is completed. | -| 5 | Optionally, a callback function can be invoked after each loop to handle intermediate results or perform additional actions. | +| 1 | Add the initial task to the conversation history. | +| 2 | Shuffle agents randomly for varied interaction patterns. | +| 3 | Each agent receives the full conversation context and processes the task. | +| 4 | Agents build upon insights from previous agents in the conversation. | +| 5 | After an agent completes its part, its response is added to the conversation. | +| 6 | This cycle continues until the specified maximum number of loops is completed. | +| 7 | Optionally, a callback function is invoked after each loop. | +| 8 | Returns the formatted conversation history based on `output_type`. | + +--- + +### `run_batch` + +Execute multiple tasks sequentially through the round-robin swarm. Each task is processed independently through the full round-robin execution cycle. + +```python +def run_batch(self, tasks: List[str]) -> List[Union[str, dict, list]] +``` + +**Parameters:** + +| Parameter | Type | Description | +|-----------|------|-------------| +| `tasks` | `List[str]` | A list of task strings to be executed. | + +**Returns:** + +| Type | Description | +|------|-------------| +| `List[Union[str, dict, list]]` | A list of results, one for each task, in the format specified by `output_type`. | ## Examples -In this example, `RoundRobinSwarm` is used to distribute network requests evenly among a group of servers. This is common in scenarios where load balancing is crucial for maintaining system responsiveness and scalability. +### Basic Usage with `run` + +In this example, `RoundRobinSwarm` is used to distribute a sales task among a group of specialized agents. Each agent contributes their unique perspective to the collaborative output. ```python from swarms import Agent, RoundRobinSwarm @@ -78,15 +163,89 @@ sales_agent3 = Agent( ) # Initialize the swarm with sales agents -sales_swarm = RoundRobinSwarm(agents=[sales_agent1, sales_agent2, sales_agent3], verbose=True) +sales_swarm = RoundRobinSwarm( + name="SalesTeamSwarm", + description="A collaborative sales team for generating comprehensive sales content", + agents=[sales_agent1, sales_agent2, sales_agent3], + verbose=True, + max_loops=2, + output_type="final", +) # Define a sales task task = "Generate a sales email for an accountant firm executive to sell swarms of agents to automate their accounting processes." -out = sales_swarm.run(task) -print(out) +# Run the task +result = sales_swarm.run(task) +print(result) ``` +### Batch Processing with `run_batch` + +Use `run_batch` when you need to process multiple independent tasks through the swarm. Each task is executed separately with full round-robin collaboration. + +```python +from swarms import Agent, RoundRobinSwarm + +# Define research agents +researcher1 = Agent( + agent_name="Technical Researcher", + system_prompt="You are a technical researcher who analyzes topics from a technical perspective.", + model_name="gpt-4.1", + max_loops=1, +) + +researcher2 = Agent( + agent_name="Market Researcher", + system_prompt="You are a market researcher who analyzes topics from a business and market perspective.", + model_name="gpt-4.1", + max_loops=1, +) + +# Initialize the swarm +research_swarm = RoundRobinSwarm( + name="ResearchSwarm", + agents=[researcher1, researcher2], + verbose=True, + max_loops=1, + output_type="json", +) + +# Define multiple research tasks +tasks = [ + "Analyze the current state of AI in healthcare.", + "Research the impact of automation on manufacturing.", + "Evaluate emerging trends in renewable energy.", +] + +# Run all tasks and get results +results = research_swarm.run_batch(tasks) + +# Process each result +for i, result in enumerate(results): + print(f"Task {i + 1} Result:") + print(result) + print("-" * 50) +``` + +### Using Callbacks + +You can use callbacks to monitor or process intermediate results after each loop: + +```python +def my_callback(loop_index: int, result: str): + """Called after each loop completes.""" + print(f"Loop {loop_index + 1} completed") + print(f"Latest result: {result[:100]}...") # Print first 100 chars + +swarm = RoundRobinSwarm( + agents=[agent1, agent2, agent3], + max_loops=3, + callback=my_callback, +) + +result = swarm.run("Analyze this complex topic from multiple perspectives.") +``` ## Conclusion diff --git a/docs/swarms/structs/swarm_router.md b/docs/swarms/structs/swarm_router.md index 8ccf1203..28b7b521 100644 --- a/docs/swarms/structs/swarm_router.md +++ b/docs/swarms/structs/swarm_router.md @@ -42,6 +42,7 @@ Main class for routing tasks to different swarm types. | `verbose` | bool | Flag to enable/disable verbose logging (default: False) | | `worker_tools` | List[Callable] | List of tools available to worker agents | | `aggregation_strategy` | str | Aggregation strategy for HeavySwarm (default: "synthesis") | +| `chairman_model` | str | Model name for the Chairman in LLMCouncil (default: "gpt-5.1") | ### Methods @@ -123,6 +124,8 @@ The `SwarmRouter` supports many various multi-agent architectures for various ap | `InteractiveGroupChat` | Interactive group chat with user participation | | `HeavySwarm` | Heavy swarm architecture with question and worker agents | | `BatchedGridWorkflow` | Batched grid workflow for parallel task processing | +| `LLMCouncil` | Council of specialized LLM agents with peer review and synthesis | +| `DebateWithJudge` | Debate architecture with Pro/Con agents and a Judge for self-refinement | | `auto` | Automatically selects best swarm type via embedding search | ## Basic Usage @@ -456,6 +459,88 @@ result = batched_grid_router.run(tasks=["Task 1", "Task 2", "Task 3"]) BatchedGridWorkflow is designed for efficiently processing multiple tasks in parallel batches, optimizing resource utilization. +### LLMCouncil + +Use Case: Collaborative analysis with multiple specialized LLM agents that evaluate each other's responses and synthesize a final answer. + +```python +llm_council_router = SwarmRouter( + name="LLMCouncil", + description="Collaborative council of LLM agents with peer review", + swarm_type="LLMCouncil", + chairman_model="gpt-5.1", # Model for the Chairman agent + output_type="dict", # Output format: "dict", "list", "string", "json", "yaml", "final", etc. + verbose=True # Show progress and intermediate results +) + +result = llm_council_router.run("What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?") +``` + +LLMCouncil creates a council of specialized agents (GPT-5.1, Gemini, Claude, Grok by default) that: +1. Each independently responds to the query +2. Evaluates and ranks each other's anonymized responses +3. A Chairman synthesizes all responses and evaluations into a final comprehensive answer + +The council automatically tracks all messages in a conversation object and supports flexible output formats. Note: LLMCouncil uses default council members and doesn't require the `agents` parameter. + +### DebateWithJudge + +Use Case: Structured debate architecture where two agents (Pro and Con) present opposing arguments, and a Judge agent evaluates and synthesizes the arguments over multiple rounds to progressively refine the answer. + +```python +from swarms import Agent, SwarmRouter + +# Create three specialized agents for the debate +pro_agent = Agent( + agent_name="Pro-Agent", + system_prompt="You are an expert at presenting strong, well-reasoned arguments in favor of positions. " + "You provide compelling evidence and logical reasoning to support your stance.", + model_name="gpt-4.1", + max_loops=1, +) + +con_agent = Agent( + agent_name="Con-Agent", + system_prompt="You are an expert at presenting strong, well-reasoned counter-arguments. " + "You identify weaknesses in opposing arguments and present compelling evidence against positions.", + model_name="gpt-4.1", + max_loops=1, +) + +judge_agent = Agent( + agent_name="Judge-Agent", + system_prompt="You are an impartial judge evaluating debates. You carefully assess both arguments, " + "identify strengths and weaknesses, and provide refined synthesis that incorporates " + "the best elements from both sides.", + model_name="gpt-4.1", + max_loops=1, +) + +# Initialize the SwarmRouter with DebateWithJudge +debate_router = SwarmRouter( + name="DebateWithJudge", + description="Structured debate with Pro/Con agents and Judge for self-refinement", + swarm_type="DebateWithJudge", + agents=[pro_agent, con_agent, judge_agent], # Must be exactly 3 agents + max_loops=3, # Number of debate rounds + output_type="str-all-except-first", # Output format + verbose=True # Show progress and intermediate results +) + +# Run a debate on a topic +result = debate_router.run( + "Should artificial intelligence development be regulated by governments?" +) +``` + +DebateWithJudge implements a multi-round debate system where: +1. **Pro Agent** presents arguments in favor of the topic +2. **Con Agent** presents counter-arguments against the topic +3. **Judge Agent** evaluates both arguments and provides synthesis +4. The process repeats for N rounds (specified by `max_loops`), with each round refining the discussion based on the judge's feedback + +The architecture progressively improves the answer through iterative refinement, making it ideal for complex topics requiring thorough analysis from multiple perspectives. Note: DebateWithJudge requires exactly 3 agents (pro_agent, con_agent, judge_agent) in that order. + ## Advanced Features ### Processing Documents diff --git a/example.py b/example.py index d13636db..5cf9ed1c 100644 --- a/example.py +++ b/example.py @@ -8,15 +8,13 @@ agent = Agent( dynamic_temperature_enabled=True, max_loops=1, dynamic_context_window=True, - streaming_on=False, top_p=None, - # stream=True, + streaming_on=True, + interactive=False, ) out = agent.run( task="What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?", - n=1, ) -for token in out: - print(token, end="", flush=True) +print(out) diff --git a/examples/README.md b/examples/README.md index 34259fd4..499b9349 100644 --- a/examples/README.md +++ b/examples/README.md @@ -6,60 +6,90 @@ This directory contains comprehensive examples demonstrating various capabilitie ### Multi-Agent Systems -- **[multi_agent/](multi_agent/)** - Advanced multi-agent patterns including agent rearrangement, auto swarm builder (ASB), batched workflows, board of directors, caching, concurrent processing, councils, debates, elections, forest swarms, graph workflows, group chats, heavy swarms, hierarchical swarms, majority voting, orchestration examples, social algorithms, simulations, spreadsheet examples, and swarm routing. +- **[multi_agent/](multi_agent/)** - Advanced multi-agent patterns including agent rearrangement, auto swarm builder (ASB), batched workflows, board of directors, caching, concurrent processing, councils, debates, elections, forest swarms, graph workflows, group chats, heavy swarms, hierarchical swarms, LLM council, majority voting, orchestration examples, paper implementations, sequential workflows, social algorithms, simulations, spreadsheet examples, swarm routing, and utilities. - [README.md](multi_agent/README.md) - Complete multi-agent examples documentation + - [duo_agent.py](multi_agent/duo_agent.py) - Two-agent collaboration example + - [llm_council_examples/](multi_agent/llm_council_examples/) - LLM Council collaboration patterns + - [caching_examples/](multi_agent/caching_examples/) - Agent caching examples ### Single Agent Systems -- **[single_agent/](single_agent/)** - Single agent implementations including demos, external agent integrations, LLM integrations (Azure, Claude, DeepSeek, Mistral, OpenAI, Qwen), onboarding, RAG, reasoning agents, tools integration, utils, and vision capabilities. +- **[single_agent/](single_agent/)** - Single agent implementations including demos, external agent integrations, LLM integrations (Azure, Claude, DeepSeek, Mistral, OpenAI, Qwen), onboarding, RAG, reasoning agents, tools integration, utils, vision capabilities, and MCP integration. - [README.md](single_agent/README.md) - Complete single agent examples documentation - [simple_agent.py](single_agent/simple_agent.py) - Basic single agent example + - [agent_mcp.py](single_agent/agent_mcp.py) - MCP integration example + - [rag/](single_agent/rag/) - Retrieval Augmented Generation (RAG) implementations with vector database integrations ### Tools & Integrations - **[tools/](tools/)** - Tool integration examples including agent-as-tools, base tool implementations, browser automation, Claude integration, Exa search, Firecrawl, multi-tool usage, and Stagehand integration. - [README.md](tools/README.md) - Complete tools examples documentation - [agent_as_tools.py](tools/agent_as_tools.py) - Using agents as tools + - [browser_use_as_tool.py](tools/browser_use_as_tool.py) - Browser automation tool + - [exa_search_agent.py](tools/exa_search_agent.py) - Exa search integration + - [firecrawl_agents_example.py](tools/firecrawl_agents_example.py) - Firecrawl integration + - [base_tool_examples/](tools/base_tool_examples/) - Base tool implementation examples + - [multii_tool_use/](tools/multii_tool_use/) - Multi-tool usage examples + - [stagehand/](tools/stagehand/) - Stagehand UI automation ### Model Integrations -- **[models/](models/)** - Various model integrations including Cerebras, GPT-5, GPT-OSS, Llama 4, Lumo, and Ollama implementations with concurrent processing examples and provider-specific configurations. +- **[models/](models/)** - Various model integrations including Cerebras, GPT-5, GPT-OSS, Llama 4, Lumo, O3, Ollama, and vLLM implementations with concurrent processing examples and provider-specific configurations. - [README.md](models/README.md) - Model integration documentation - [simple_example_ollama.py](models/simple_example_ollama.py) - Ollama integration example - [cerebas_example.py](models/cerebas_example.py) - Cerebras model example - [lumo_example.py](models/lumo_example.py) - Lumo model example + - [example_o3.py](models/example_o3.py) - O3 model example + - [gpt_5/](models/gpt_5/) - GPT-5 model examples + - [gpt_oss_examples/](models/gpt_oss_examples/) - GPT-OSS examples + - [llama4_examples/](models/llama4_examples/) - Llama 4 examples + - [main_providers/](models/main_providers/) - Main provider configurations + - [vllm/](models/vllm/) - vLLM integration examples ### API & Protocols -- **[swarms_api_examples/](swarms_api_examples/)** - Swarms API usage examples including agent overview, batch processing, client integration, team examples, analysis, and rate limiting. - - [README.md](swarms_api_examples/README.md) - API examples documentation - - [client_example.py](swarms_api_examples/client_example.py) - API client example - - [batch_example.py](swarms_api_examples/batch_example.py) - Batch processing example +- **[swarms_api/](swarms_api/)** - Swarms API usage examples including agent overview, batch processing, client integration, team examples, analysis, and rate limiting. + - [README.md](swarms_api/README.md) - API examples documentation + - [client_example.py](swarms_api/client_example.py) - API client example + - [batch_example.py](swarms_api/batch_example.py) - Batch processing example + - [hospital_team.py](swarms_api/hospital_team.py) - Hospital management team simulation + - [legal_team.py](swarms_api/legal_team.py) - Legal team collaboration example + - [icd_ten_analysis.py](swarms_api/icd_ten_analysis.py) - ICD-10 medical code analysis + - [rate_limits.py](swarms_api/rate_limits.py) - Rate limiting and throttling examples -- **[mcp/](mcp/)** - Model Context Protocol (MCP) integration examples including agent implementations, multi-connection setups, server configurations, and utility functions. +- **[mcp/](mcp/)** - Model Context Protocol (MCP) integration examples including agent implementations, multi-connection setups, server configurations, utility functions, and multi-MCP guides. - [README.md](mcp/README.md) - MCP examples documentation - [multi_mcp_example.py](mcp/multi_mcp_example.py) - Multi-MCP connection example + - [agent_examples/](mcp/agent_examples/) - Agent-based MCP examples + - [servers/](mcp/servers/) - MCP server implementations + - [mcp_utils/](mcp/mcp_utils/) - MCP utility functions + - [multi_mcp_guide/](mcp/multi_mcp_guide/) - Multi-MCP setup guides -- **[aop_examples/](aop_examples/)** - Agents over Protocol (AOP) examples demonstrating MCP server setup, agent discovery, client interactions, queue-based task submission, and medical AOP implementations. +- **[aop_examples/](aop_examples/)** - Agents over Protocol (AOP) examples demonstrating MCP server setup, agent discovery, client interactions, queue-based task submission, medical AOP implementations, and utility functions. - [README.md](aop_examples/README.md) - AOP examples documentation - [server.py](aop_examples/server.py) - AOP server implementation + - [client/](aop_examples/client/) - AOP client examples and agent discovery + - [discovery/](aop_examples/discovery/) - Agent discovery examples + - [medical_aop/](aop_examples/medical_aop/) - Medical AOP implementations + - [utils/](aop_examples/utils/) - AOP utility functions ### Advanced Capabilities -- **[reasoning_agents/](reasoning_agents/)** - Advanced reasoning capabilities including agent judge evaluation systems, O3 model integration, and mixture of agents (MOA) sequential examples. +- **[reasoning_agents/](reasoning_agents/)** - Advanced reasoning capabilities including agent judge evaluation systems, O3 model integration, mixture of agents (MOA) sequential examples, and reasoning agent router examples. - [README.md](reasoning_agents/README.md) - Reasoning agents documentation - - [example_o3.py](reasoning_agents/example_o3.py) - O3 model example - [moa_seq_example.py](reasoning_agents/moa_seq_example.py) - MOA sequential example - -- **[rag/](rag/)** - Retrieval Augmented Generation (RAG) implementations with vector database integrations including Qdrant examples. - - [README.md](rag/README.md) - RAG documentation - - [qdrant_rag_example.py](rag/qdrant_rag_example.py) - Qdrant RAG example + - [agent_judge_examples/](reasoning_agents/agent_judge_examples/) - Agent judge evaluation systems + - [reasoning_agent_router_examples/](reasoning_agents/reasoning_agent_router_examples/) - Reasoning agent router examples ### Guides & Tutorials -- **[guides/](guides/)** - Comprehensive guides and tutorials including generation length blog, geo guesser agent, graph workflow guide, hierarchical marketing team, nano banana Jarvis agent, smart database, web scraper agents, and workshop examples (840_update, 850_workshop). +- **[guides/](guides/)** - Comprehensive guides and tutorials including demos, generation length blog, geo guesser agent, graph workflow guide, hackathon examples, hierarchical marketing team, nano banana Jarvis agent, smart database, web scraper agents, workshops, x402 examples, and workshop examples (840_update, 850_workshop). - [README.md](guides/README.md) - Guides documentation - [hiearchical_marketing_team.py](guides/hiearchical_marketing_team.py) - Hierarchical marketing team example + - [demos/](guides/demos/) - Various demonstration examples + - [hackathons/](guides/hackathons/) - Hackathon project examples + - [workshops/](guides/workshops/) - Workshop examples + - [x402_examples/](guides/x402_examples/) - X402 protocol examples ### Deployment @@ -72,6 +102,11 @@ This directory contains comprehensive examples demonstrating various capabilitie - **[utils/](utils/)** - Utility functions and helper implementations including agent loader, communication examples, concurrent wrappers, miscellaneous utilities, and telemetry. - [README.md](utils/README.md) - Utils documentation + - [agent_loader/](utils/agent_loader/) - Agent loading utilities + - [communication_examples/](utils/communication_examples/) - Agent communication patterns + - [concurrent_wrapper_examples.py](utils/concurrent_wrapper_examples.py) - Concurrent processing wrappers + - [misc/](utils/misc/) - Miscellaneous utility functions + - [telemetry/](utils/telemetry/) - Telemetry and monitoring utilities ### User Interface @@ -79,16 +114,26 @@ This directory contains comprehensive examples demonstrating various capabilitie - [README.md](ui/README.md) - UI examples documentation - [chat.py](ui/chat.py) - Chat interface example +### Command Line Interface + +- **[cli/](cli/)** - CLI command examples demonstrating all available Swarms CLI features including setup, agent management, multi-agent architectures, and utilities. + - [README.md](cli/README.md) - CLI examples documentation + - [01_setup_check.sh](cli/01_setup_check.sh) - Environment setup verification + - [05_create_agent.sh](cli/05_create_agent.sh) - Create custom agents + - [08_llm_council.sh](cli/08_llm_council.sh) - LLM Council collaboration + - [09_heavy_swarm.sh](cli/09_heavy_swarm.sh) - HeavySwarm complex analysis + ## Quick Start 1. **New to Swarms?** Start with [single_agent/simple_agent.py](single_agent/simple_agent.py) for basic concepts -2. **Want multi-agent workflows?** Check out [multi_agent/duo_agent.py](multi_agent/duo_agent.py) -3. **Need tool integration?** Explore [tools/agent_as_tools.py](tools/agent_as_tools.py) -4. **Interested in AOP?** Try [aop_examples/client/example_new_agent_tools.py](aop_examples/client/example_new_agent_tools.py) for agent discovery -5. **Want to see social algorithms?** Check out [multi_agent/social_algorithms_examples/](multi_agent/social_algorithms_examples/) -6. **Looking for guides?** Visit [guides/](guides/) for comprehensive tutorials -7. **Need RAG?** Try [rag/qdrant_rag_example.py](rag/qdrant_rag_example.py) -8. **Want reasoning agents?** Check out [reasoning_agents/example_o3.py](reasoning_agents/example_o3.py) +2. **Want to use the CLI?** Check out [cli/](cli/) for all CLI command examples +3. **Want multi-agent workflows?** Check out [multi_agent/duo_agent.py](multi_agent/duo_agent.py) +4. **Need tool integration?** Explore [tools/agent_as_tools.py](tools/agent_as_tools.py) +5. **Interested in AOP?** Try [aop_examples/client/example_new_agent_tools.py](aop_examples/client/example_new_agent_tools.py) for agent discovery +6. **Want to see social algorithms?** Check out [multi_agent/social_algorithms_examples/](multi_agent/social_algorithms_examples/) +7. **Looking for guides?** Visit [guides/](guides/) for comprehensive tutorials +8. **Need RAG?** Try [single_agent/rag/](single_agent/rag/) for RAG examples +9. **Want reasoning agents?** Check out [reasoning_agents/](reasoning_agents/) for reasoning agent examples ## Key Examples by Category @@ -105,7 +150,7 @@ This directory contains comprehensive examples demonstrating various capabilitie - [Simple Agent](single_agent/simple_agent.py) - Basic agent setup - [Reasoning Agents](single_agent/reasoning_agent_examples/) - Advanced reasoning patterns - [Vision Agents](single_agent/vision/multimodal_example.py) - Vision and multimodal capabilities -- [RAG Agents](single_agent/rag/qdrant_rag_example.py) - Retrieval augmented generation +- [RAG Agents](single_agent/rag/) - Retrieval augmented generation ### Tool Integrations @@ -122,6 +167,14 @@ This directory contains comprehensive examples demonstrating various capabilitie - [Azure](single_agent/llms/azure_agent.py) - Azure OpenAI - [Ollama](models/simple_example_ollama.py) - Local Ollama models +### CLI Examples + +- [Setup Check](cli/01_setup_check.sh) - Verify environment setup +- [Create Agent](cli/05_create_agent.sh) - Create custom agents via CLI +- [LLM Council](cli/08_llm_council.sh) - Run LLM Council collaboration +- [HeavySwarm](cli/09_heavy_swarm.sh) - Run HeavySwarm for complex tasks +- [All CLI Examples](cli/) - Complete CLI examples directory + ## Documentation Each subdirectory contains its own README.md file with detailed descriptions and links to all available examples. Click on any folder above to explore its specific examples and use cases. diff --git a/examples/aop_examples/server.py b/examples/aop_examples/server.py index adcaaa2c..b91bcbaa 100644 --- a/examples/aop_examples/server.py +++ b/examples/aop_examples/server.py @@ -92,7 +92,13 @@ financial_agent = Agent( ) # Basic usage - individual agent addition -deployer = AOP(server_name="MyAgentServer", verbose=True, port=5932, json_response=True, queue_enabled=False) +deployer = AOP( + server_name="MyAgentServer", + verbose=True, + port=5932, + json_response=True, + queue_enabled=False, +) agents = [ research_agent, diff --git a/examples/cli/01_setup_check.sh b/examples/cli/01_setup_check.sh new file mode 100644 index 00000000..523c806a --- /dev/null +++ b/examples/cli/01_setup_check.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Setup Check Example +# Verify your Swarms environment setup + +swarms setup-check + diff --git a/examples/cli/02_onboarding.sh b/examples/cli/02_onboarding.sh new file mode 100644 index 00000000..973a5630 --- /dev/null +++ b/examples/cli/02_onboarding.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Onboarding Example +# Start the interactive onboarding process + +swarms onboarding + diff --git a/examples/cli/03_get_api_key.sh b/examples/cli/03_get_api_key.sh new file mode 100644 index 00000000..f9775413 --- /dev/null +++ b/examples/cli/03_get_api_key.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Get API Key Example +# Open API key portal in browser + +swarms get-api-key + diff --git a/examples/cli/04_check_login.sh b/examples/cli/04_check_login.sh new file mode 100644 index 00000000..41479137 --- /dev/null +++ b/examples/cli/04_check_login.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Check Login Example +# Verify authentication status + +swarms check-login + diff --git a/examples/cli/05_create_agent.sh b/examples/cli/05_create_agent.sh new file mode 100644 index 00000000..eb4ed597 --- /dev/null +++ b/examples/cli/05_create_agent.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Swarms CLI - Create Agent Example +# Create and run a custom agent + +swarms agent \ + --name "Research Agent" \ + --description "AI research specialist" \ + --system-prompt "You are an expert research agent." \ + --task "Analyze current trends in renewable energy" \ + --model-name "gpt-4o-mini" + diff --git a/examples/cli/06_run_agents_yaml.sh b/examples/cli/06_run_agents_yaml.sh new file mode 100644 index 00000000..1856c54f --- /dev/null +++ b/examples/cli/06_run_agents_yaml.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Run Agents from YAML Example +# Execute agents from YAML configuration file + +swarms run-agents --yaml-file agents.yaml + diff --git a/examples/cli/07_load_markdown.sh b/examples/cli/07_load_markdown.sh new file mode 100644 index 00000000..b1ba6e56 --- /dev/null +++ b/examples/cli/07_load_markdown.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Load Markdown Agents Example +# Load agents from markdown files + +swarms load-markdown --markdown-path ./agents/ + diff --git a/examples/cli/08_llm_council.sh b/examples/cli/08_llm_council.sh new file mode 100644 index 00000000..eb29b726 --- /dev/null +++ b/examples/cli/08_llm_council.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - LLM Council Example +# Run LLM Council for collaborative problem-solving + +swarms llm-council --task "What are the best energy ETFs to invest in right now?" + diff --git a/examples/cli/09_heavy_swarm.sh b/examples/cli/09_heavy_swarm.sh new file mode 100644 index 00000000..6dfadc00 --- /dev/null +++ b/examples/cli/09_heavy_swarm.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - HeavySwarm Example +# Run HeavySwarm for complex task analysis + +swarms heavy-swarm --task "Analyze current market trends for renewable energy investments" + diff --git a/examples/cli/10_autoswarm.sh b/examples/cli/10_autoswarm.sh new file mode 100644 index 00000000..b94192f0 --- /dev/null +++ b/examples/cli/10_autoswarm.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Autoswarm Example +# Auto-generate swarm configuration + +swarms autoswarm --task "Analyze quarterly sales data" --model "gpt-4" + diff --git a/examples/cli/11_features.sh b/examples/cli/11_features.sh new file mode 100644 index 00000000..687200a4 --- /dev/null +++ b/examples/cli/11_features.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Features Example +# Display all available CLI features + +swarms features + diff --git a/examples/cli/12_help.sh b/examples/cli/12_help.sh new file mode 100644 index 00000000..09b6780c --- /dev/null +++ b/examples/cli/12_help.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Help Example +# Display comprehensive help documentation + +swarms help + diff --git a/examples/cli/13_auto_upgrade.sh b/examples/cli/13_auto_upgrade.sh new file mode 100644 index 00000000..6827f995 --- /dev/null +++ b/examples/cli/13_auto_upgrade.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Auto Upgrade Example +# Update Swarms to the latest version + +swarms auto-upgrade + diff --git a/examples/cli/14_book_call.sh b/examples/cli/14_book_call.sh new file mode 100644 index 00000000..e0108d9e --- /dev/null +++ b/examples/cli/14_book_call.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Swarms CLI - Book Call Example +# Schedule a strategy session + +swarms book-call + diff --git a/examples/cli/README.md b/examples/cli/README.md new file mode 100644 index 00000000..a002cd96 --- /dev/null +++ b/examples/cli/README.md @@ -0,0 +1,197 @@ +# Swarms CLI Examples + +This directory contains shell script examples demonstrating all available Swarms CLI commands and features. Each script is simple, focused, and demonstrates a single CLI command. + +## Quick Start + +All scripts are executable. Run them directly: + +```bash +chmod +x *.sh +./01_setup_check.sh +``` + +Or execute with bash: + +```bash +bash 01_setup_check.sh +``` + +## Available Examples + +### Setup & Configuration + +- **[01_setup_check.sh](examples/cli/01_setup_check.sh)** - Environment setup verification + ```bash + swarms setup-check + ``` + +- **[02_onboarding.sh](examples/cli/02_onboarding.sh)** - Interactive onboarding process + ```bash + swarms onboarding + ``` + +- **[03_get_api_key.sh](examples/cli/03_get_api_key.sh)** - Retrieve API keys + ```bash + swarms get-api-key + ``` + +- **[04_check_login.sh](examples/cli/04_check_login.sh)** - Verify authentication + ```bash + swarms check-login + ``` + +### Agent Management + +- **[05_create_agent.sh](examples/cli/05_create_agent.sh)** - Create and run custom agents + ```bash + swarms agent --name "Agent" --description "Description" --system-prompt "Prompt" --task "Task" + ``` + +- **[06_run_agents_yaml.sh](examples/cli/06_run_agents_yaml.sh)** - Execute agents from YAML + ```bash + swarms run-agents --yaml-file agents.yaml + ``` + +- **[07_load_markdown.sh](examples/cli/07_load_markdown.sh)** - Load agents from markdown files + ```bash + swarms load-markdown --markdown-path ./agents/ + ``` + +### Multi-Agent Architectures + +- **[08_llm_council.sh](examples/cli/08_llm_council.sh)** - Run LLM Council collaboration + ```bash + swarms llm-council --task "Your question here" + ``` + +- **[09_heavy_swarm.sh](examples/cli/09_heavy_swarm.sh)** - Run HeavySwarm for complex tasks + ```bash + swarms heavy-swarm --task "Your complex task here" + ``` + +- **[10_autoswarm.sh](examples/cli/10_autoswarm.sh)** - Auto-generate swarm configurations + ```bash + swarms autoswarm --task "Task description" --model "gpt-4" + ``` + +### Utilities + +- **[11_features.sh](examples/cli/11_features.sh)** - Display all available features + ```bash + swarms features + ``` + +- **[12_help.sh](examples/cli/12_help.sh)** - Display help documentation + ```bash + swarms help + ``` + +- **[13_auto_upgrade.sh](examples/cli/13_auto_upgrade.sh)** - Update Swarms package + ```bash + swarms auto-upgrade + ``` + +- **[14_book_call.sh](examples/cli/14_book_call.sh)** - Schedule strategy session + ```bash + swarms book-call + ``` + +### Run All Examples + +- **[run_all_examples.sh](examples/cli/run_all_examples.sh)** - Run multiple examples in sequence + ```bash + bash run_all_examples.sh + ``` + +## Script Structure + +Each script follows a simple pattern: + +1. **Shebang** - `#!/bin/bash` +2. **Comment** - Brief description of what the script does +3. **Single Command** - One CLI command execution + +Example: +```bash +#!/bin/bash + +# Swarms CLI - Setup Check Example +# Verify your Swarms environment setup + +swarms setup-check +``` + +## Usage Patterns + +### Basic Command Execution + +```bash +swarms [options] +``` + +### With Verbose Output + +```bash +swarms --verbose +``` + +### Environment Variables + +Set API keys before running scripts that require them: + +```bash +export OPENAI_API_KEY="your-key-here" +export ANTHROPIC_API_KEY="your-key-here" +export GOOGLE_API_KEY="your-key-here" +``` + +## Examples by Category + +### Setup & Diagnostics +- Environment setup verification +- Onboarding workflow +- API key management +- Authentication verification + +### Single Agent Operations +- Custom agent creation +- Agent configuration from YAML +- Agent loading from markdown + +### Multi-Agent Operations +- LLM Council for collaborative problem-solving +- HeavySwarm for complex analysis +- Auto-generated swarm configurations + +### Information & Help +- Feature discovery +- Help documentation +- Package management + +## File Paths + +All scripts are located in `examples/cli/`: + +- `examples/cli/01_setup_check.sh` +- `examples/cli/02_onboarding.sh` +- `examples/cli/03_get_api_key.sh` +- `examples/cli/04_check_login.sh` +- `examples/cli/05_create_agent.sh` +- `examples/cli/06_run_agents_yaml.sh` +- `examples/cli/07_load_markdown.sh` +- `examples/cli/08_llm_council.sh` +- `examples/cli/09_heavy_swarm.sh` +- `examples/cli/10_autoswarm.sh` +- `examples/cli/11_features.sh` +- `examples/cli/12_help.sh` +- `examples/cli/13_auto_upgrade.sh` +- `examples/cli/14_book_call.sh` +- `examples/cli/run_all_examples.sh` + +## Related Documentation + +- [CLI Reference](../../docs/swarms/cli/cli_reference.md) - Complete CLI documentation +- [Main Examples README](../README.md) - Other Swarms examples +- [Swarms Documentation](../../docs/) - Full Swarms documentation + diff --git a/examples/cli/run_all_examples.sh b/examples/cli/run_all_examples.sh new file mode 100644 index 00000000..ffd948e8 --- /dev/null +++ b/examples/cli/run_all_examples.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Swarms CLI - Run All Examples +# Run all CLI examples in sequence + +chmod +x *.sh + +swarms setup-check +swarms features +swarms help + diff --git a/examples/multi_agent/debate_examples/README.md b/examples/multi_agent/debate_examples/README.md index 83241c27..058518ab 100644 --- a/examples/multi_agent/debate_examples/README.md +++ b/examples/multi_agent/debate_examples/README.md @@ -15,7 +15,7 @@ The `DebateWithJudge` architecture implements a debate system with self-refineme - **Agent A (Pro)** and **Agent B (Con)** present opposing arguments - Both arguments are evaluated by a **Judge/Critic Agent** - The Judge provides a winner or synthesis → refined answer -- The process repeats for N rounds to progressively improve the answer +- The process repeats for N loops to progressively improve the answer **Architecture Flow:** ``` @@ -28,10 +28,48 @@ Agent A (Pro) ↔ Agent B (Con) Winner or synthesis → refined answer ``` -**Example Usage:** +**Initialization Options:** + +The `DebateWithJudge` class supports three ways to configure agents: + +1. **Preset Agents** (simplest): Use built-in optimized agents +2. **Agent List**: Provide a list of 3 agents `[pro, con, judge]` +3. **Individual Parameters**: Provide each agent separately + +**Quick Start with Preset Agents:** +```python +from swarms import DebateWithJudge + +# Create debate system with built-in agents (simplest approach) +debate = DebateWithJudge( + preset_agents=True, + max_loops=3, + model_name="gpt-4o-mini" +) + +# Run debate +result = debate.run("Should AI be regulated?") +``` + +**Using Agent List:** ```python -from swarms import Agent -from swarms.structs.debate_with_judge import DebateWithJudge +from swarms import Agent, DebateWithJudge + +# Create your agents +agents = [pro_agent, con_agent, judge_agent] + +# Create debate system with agent list +debate = DebateWithJudge( + agents=agents, + max_loops=3 +) + +result = debate.run("Should AI be regulated?") +``` + +**Using Individual Agent Parameters:** +```python +from swarms import Agent, DebateWithJudge # Create Pro, Con, and Judge agents pro_agent = Agent(agent_name="Pro-Agent", ...) @@ -43,12 +81,19 @@ debate = DebateWithJudge( pro_agent=pro_agent, con_agent=con_agent, judge_agent=judge_agent, - max_rounds=3 + max_loops=3 ) # Run debate result = debate.run("Should AI be regulated?") ``` -See [debate_with_judge_example.py](./debate_with_judge_example.py) for a complete example. +## Example Files + +| File | Description | +|------|-------------| +| [debate_with_judge_example.py](./debate_with_judge_example.py) | Complete example showing all initialization methods | +| [policy_debate_example.py](./policy_debate_example.py) | Policy debate on AI regulation | +| [technical_architecture_debate_example.py](./technical_architecture_debate_example.py) | Technical architecture debate with batch processing | +| [business_strategy_debate_example.py](./business_strategy_debate_example.py) | Business strategy debate with conversation history | diff --git a/examples/multi_agent/debate_examples/business_strategy_debate_example.py b/examples/multi_agent/debate_examples/business_strategy_debate_example.py index 7dd44c11..61478eef 100644 --- a/examples/multi_agent/debate_examples/business_strategy_debate_example.py +++ b/examples/multi_agent/debate_examples/business_strategy_debate_example.py @@ -52,12 +52,12 @@ judge_agent = Agent( max_loops=1, ) -# Create the debate system with extended rounds for complex strategy discussions +# Create the debate system with extended loops for complex strategy discussions strategy_debate = DebateWithJudge( pro_agent=pro_agent, con_agent=con_agent, judge_agent=judge_agent, - max_rounds=4, # More rounds for complex strategic discussions + max_loops=4, # More loops for complex strategic discussions output_type="dict", # Use dict format for structured analysis verbose=True, ) diff --git a/examples/multi_agent/debate_examples/debate_with_judge_example.py b/examples/multi_agent/debate_examples/debate_with_judge_example.py index 663f88e9..0cf9f7c5 100644 --- a/examples/multi_agent/debate_examples/debate_with_judge_example.py +++ b/examples/multi_agent/debate_examples/debate_with_judge_example.py @@ -1,61 +1,16 @@ -from swarms import Agent, DebateWithJudge +from swarms import DebateWithJudge -# Create the Pro agent (arguing in favor) -pro_agent = Agent( - agent_name="Pro-Agent", - system_prompt=( - "You are a skilled debater who argues in favor of positions. " - "You present well-reasoned arguments with evidence, examples, " - "and logical reasoning. You are persuasive and articulate." - ), - model_name="gpt-4o-mini", - max_loops=1, -) - -# Create the Con agent (arguing against) -con_agent = Agent( - agent_name="Con-Agent", - system_prompt=( - "You are a skilled debater who argues against positions. " - "You present strong counter-arguments with evidence, examples, " - "and logical reasoning. You identify weaknesses in opposing " - "arguments and provide compelling alternatives." - ), - model_name="gpt-4o-mini", - max_loops=1, -) - -# Create the Judge agent (evaluates and synthesizes) -judge_agent = Agent( - agent_name="Judge-Agent", - system_prompt=( - "You are an impartial judge who evaluates debates. " - "You carefully analyze arguments from both sides, identify " - "strengths and weaknesses, and provide balanced synthesis. " - "You may declare a winner or provide a refined answer that " - "incorporates the best elements from both arguments." - ), - model_name="gpt-4o-mini", - max_loops=1, -) - -# Create the DebateWithJudge system debate_system = DebateWithJudge( - pro_agent=pro_agent, - con_agent=con_agent, - judge_agent=judge_agent, - max_rounds=3, # Run 3 rounds of debate and refinement - output_type="str-all-except-first", # Return as formatted string - verbose=True, # Enable verbose logging + preset_agents=True, + max_loops=3, + model_name="gpt-4o-mini", + output_type="str-all-except-first", + verbose=True, ) -# Define the debate topic topic = ( "Should artificial intelligence be regulated by governments? " "Discuss the balance between innovation and safety." ) -# Run the debate result = debate_system.run(task=topic) - -print(result) diff --git a/examples/multi_agent/debate_examples/policy_debate_example.py b/examples/multi_agent/debate_examples/policy_debate_example.py index a2e7c5ce..7e2d6e67 100644 --- a/examples/multi_agent/debate_examples/policy_debate_example.py +++ b/examples/multi_agent/debate_examples/policy_debate_example.py @@ -59,7 +59,7 @@ debate_system = DebateWithJudge( pro_agent=pro_agent, con_agent=con_agent, judge_agent=judge_agent, - max_rounds=3, + max_loops=3, output_type="str-all-except-first", verbose=True, ) diff --git a/examples/multi_agent/debate_examples/technical_architecture_debate_example.py b/examples/multi_agent/debate_examples/technical_architecture_debate_example.py index 24ecf3d1..964b62f0 100644 --- a/examples/multi_agent/debate_examples/technical_architecture_debate_example.py +++ b/examples/multi_agent/debate_examples/technical_architecture_debate_example.py @@ -49,7 +49,7 @@ architecture_debate = DebateWithJudge( pro_agent=pro_agent, con_agent=con_agent, judge_agent=judge_agent, - max_rounds=2, # Fewer rounds for more focused technical debates + max_loops=2, # Fewer loops for more focused technical debates output_type="str-all-except-first", verbose=True, ) diff --git a/examples/multi_agent/graphworkflow_examples/graph_workflow_basic.py b/examples/multi_agent/graphworkflow_examples/graph_workflow_basic.py index afb3bd92..720f2ca0 100644 --- a/examples/multi_agent/graphworkflow_examples/graph_workflow_basic.py +++ b/examples/multi_agent/graphworkflow_examples/graph_workflow_basic.py @@ -1,51 +1,43 @@ -#!/usr/bin/env python3 -""" -Basic Graph Workflow Example - -A minimal example showing how to use GraphWorkflow with backend selection. -""" - from swarms.structs.graph_workflow import GraphWorkflow from swarms.structs.agent import Agent -agent_one = Agent(agent_name="research_agent", model="gpt-4o-mini") +agent_one = Agent( + agent_name="research_agent", + model_name="gpt-4o-mini", + name="Research Agent", + agent_description="Agent responsible for gathering and summarizing research information.", +) agent_two = Agent( - agent_name="research_agent_two", model="gpt-4o-mini" + agent_name="research_agent_two", + model_name="gpt-4o-mini", + name="Analysis Agent", + agent_description="Agent that analyzes the research data provided and processes insights.", ) agent_three = Agent( - agent_name="research_agent_three", model="gpt-4o-mini" + agent_name="research_agent_three", + model_name="gpt-4o-mini", + agent_description="Agent tasked with structuring analysis into a final report or output.", ) +# Create workflow with backend selection +workflow = GraphWorkflow( + name="Basic Example", + verbose=True, +) -def main(): - """ - Run a basic graph workflow example without print statements. - """ - # Create agents - - # Create workflow with backend selection - workflow = GraphWorkflow( - name="Basic Example", - verbose=True, - ) - - # Add agents to workflow - workflow.add_node(agent_one) - workflow.add_node(agent_two) - workflow.add_node(agent_three) +workflow.add_nodes([agent_one, agent_two, agent_three]) - # Create simple chain using the actual agent names - workflow.add_edge("research_agent", "research_agent_two") - workflow.add_edge("research_agent_two", "research_agent_three") +# Create simple chain using the actual agent names +workflow.add_edge("research_agent", "research_agent_two") +workflow.add_edge("research_agent_two", "research_agent_three") - # Compile the workflow - workflow.compile() +workflow.visualize() - # Run the workflow - task = "Complete a simple task" - results = workflow.run(task) - return results +# Compile the workflow +workflow.compile() +# Run the workflow +task = "Complete a simple task" +results = workflow.run(task) -if __name__ == "__main__": - main() +print(results) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py new file mode 100644 index 00000000..a9d0a344 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/01_basic_usage.py @@ -0,0 +1,46 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +research_agent = Agent( + agent_name="Research-Analyst", + agent_description="Specialized in comprehensive research and data gathering", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +analysis_agent = Agent( + agent_name="Data-Analyst", + agent_description="Expert in data analysis and pattern recognition", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_agent = Agent( + agent_name="Strategy-Consultant", + agent_description="Specialized in strategic planning and recommendations", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Rustworkx-Basic-Workflow", + description="Basic workflow using rustworkx backend for faster graph operations", + backend="rustworkx", + verbose=False, +) + +workflow.add_node(research_agent) +workflow.add_node(analysis_agent) +workflow.add_node(strategy_agent) + +workflow.add_edge(research_agent, analysis_agent) +workflow.add_edge(analysis_agent, strategy_agent) + +task = "Conduct a research analysis on water stocks and ETFs" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py new file mode 100644 index 00000000..35cfe83e --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/02_backend_comparison.py @@ -0,0 +1,56 @@ +import time +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +agents = [ + Agent( + agent_name=f"Agent-{i}", + agent_description=f"Agent number {i}", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(5) +] + +nx_workflow = GraphWorkflow( + name="NetworkX-Workflow", + backend="networkx", + verbose=False, +) + +for agent in agents: + nx_workflow.add_node(agent) + +for i in range(len(agents) - 1): + nx_workflow.add_edge(agents[i], agents[i + 1]) + +nx_start = time.time() +nx_workflow.compile() +nx_compile_time = time.time() - nx_start + +rx_workflow = GraphWorkflow( + name="Rustworkx-Workflow", + backend="rustworkx", + verbose=False, +) + +for agent in agents: + rx_workflow.add_node(agent) + +for i in range(len(agents) - 1): + rx_workflow.add_edge(agents[i], agents[i + 1]) + +rx_start = time.time() +rx_workflow.compile() +rx_compile_time = time.time() - rx_start + +speedup = ( + nx_compile_time / rx_compile_time if rx_compile_time > 0 else 0 +) +print(f"NetworkX compile time: {nx_compile_time:.4f}s") +print(f"Rustworkx compile time: {rx_compile_time:.4f}s") +print(f"Speedup: {speedup:.2f}x") +print( + f"Identical layers: {nx_workflow._sorted_layers == rx_workflow._sorted_layers}" +) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py new file mode 100644 index 00000000..8be4fecf --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/03_fan_out_fan_in_patterns.py @@ -0,0 +1,73 @@ +from swarms import Agent, GraphWorkflow + +coordinator = Agent( + agent_name="Coordinator", + agent_description="Coordinates and distributes tasks", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +tech_analyst = Agent( + agent_name="Tech-Analyst", + agent_description="Technical analysis specialist", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +fundamental_analyst = Agent( + agent_name="Fundamental-Analyst", + agent_description="Fundamental analysis specialist", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +sentiment_analyst = Agent( + agent_name="Sentiment-Analyst", + agent_description="Sentiment analysis specialist", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +synthesis_agent = Agent( + agent_name="Synthesis-Agent", + agent_description="Synthesizes multiple analyses into final report", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Fan-Out-Fan-In-Workflow", + description="Demonstrates parallel processing patterns with rustworkx", + backend="rustworkx", + verbose=False, +) + +workflow.add_node(coordinator) +workflow.add_node(tech_analyst) +workflow.add_node(fundamental_analyst) +workflow.add_node(sentiment_analyst) +workflow.add_node(synthesis_agent) + +workflow.add_edges_from_source( + coordinator, + [tech_analyst, fundamental_analyst, sentiment_analyst], +) + +workflow.add_edges_to_target( + [tech_analyst, fundamental_analyst, sentiment_analyst], + synthesis_agent, +) + +task = "Analyze Tesla stock from technical, fundamental, and sentiment perspectives" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") + + +workflow.visualize(view=True) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py new file mode 100644 index 00000000..4f025a71 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/04_complex_workflow.py @@ -0,0 +1,101 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +data_collector_1 = Agent( + agent_name="Data-Collector-1", + agent_description="Collects market data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +data_collector_2 = Agent( + agent_name="Data-Collector-2", + agent_description="Collects financial data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +technical_analyst = Agent( + agent_name="Technical-Analyst", + agent_description="Performs technical analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +fundamental_analyst = Agent( + agent_name="Fundamental-Analyst", + agent_description="Performs fundamental analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +risk_analyst = Agent( + agent_name="Risk-Analyst", + agent_description="Performs risk analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_consultant = Agent( + agent_name="Strategy-Consultant", + agent_description="Develops strategic recommendations", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +report_writer = Agent( + agent_name="Report-Writer", + agent_description="Writes comprehensive reports", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Complex-Multi-Layer-Workflow", + description="Complex workflow with multiple layers and parallel processing", + backend="rustworkx", + verbose=False, +) + +all_agents = [ + data_collector_1, + data_collector_2, + technical_analyst, + fundamental_analyst, + risk_analyst, + strategy_consultant, + report_writer, +] + +for agent in all_agents: + workflow.add_node(agent) + +workflow.add_parallel_chain( + [data_collector_1, data_collector_2], + [technical_analyst, fundamental_analyst, risk_analyst], +) + +workflow.add_edges_to_target( + [technical_analyst, fundamental_analyst, risk_analyst], + strategy_consultant, +) + +workflow.add_edges_to_target( + [technical_analyst, fundamental_analyst, risk_analyst], + report_writer, +) + +workflow.add_edge(strategy_consultant, report_writer) + +task = "Conduct a comprehensive analysis of the renewable energy sector including market trends, financial health, and risk assessment" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py new file mode 100644 index 00000000..2b5251f7 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/05_performance_benchmark.py @@ -0,0 +1,104 @@ +import time +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +agents_small = [ + Agent( + agent_name=f"Agent-{i}", + agent_description=f"Agent number {i}", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(5) +] + +agents_medium = [ + Agent( + agent_name=f"Agent-{i}", + agent_description=f"Agent number {i}", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(20) +] + +nx_workflow_small = GraphWorkflow( + name="NetworkX-Small", + backend="networkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_small: + nx_workflow_small.add_node(agent) + +for i in range(len(agents_small) - 1): + nx_workflow_small.add_edge(agents_small[i], agents_small[i + 1]) + +nx_start = time.time() +nx_workflow_small.compile() +nx_small_time = time.time() - nx_start + +rx_workflow_small = GraphWorkflow( + name="Rustworkx-Small", + backend="rustworkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_small: + rx_workflow_small.add_node(agent) + +for i in range(len(agents_small) - 1): + rx_workflow_small.add_edge(agents_small[i], agents_small[i + 1]) + +rx_start = time.time() +rx_workflow_small.compile() +rx_small_time = time.time() - rx_start + +nx_workflow_medium = GraphWorkflow( + name="NetworkX-Medium", + backend="networkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_medium: + nx_workflow_medium.add_node(agent) + +for i in range(len(agents_medium) - 1): + nx_workflow_medium.add_edge( + agents_medium[i], agents_medium[i + 1] + ) + +nx_start = time.time() +nx_workflow_medium.compile() +nx_medium_time = time.time() - nx_start + +rx_workflow_medium = GraphWorkflow( + name="Rustworkx-Medium", + backend="rustworkx", + verbose=False, + auto_compile=False, +) + +for agent in agents_medium: + rx_workflow_medium.add_node(agent) + +for i in range(len(agents_medium) - 1): + rx_workflow_medium.add_edge( + agents_medium[i], agents_medium[i + 1] + ) + +rx_start = time.time() +rx_workflow_medium.compile() +rx_medium_time = time.time() - rx_start + +print( + f"Small (5 agents) - NetworkX: {nx_small_time:.4f}s, Rustworkx: {rx_small_time:.4f}s, Speedup: {nx_small_time/rx_small_time if rx_small_time > 0 else 0:.2f}x" +) +print( + f"Medium (20 agents) - NetworkX: {nx_medium_time:.4f}s, Rustworkx: {rx_medium_time:.4f}s, Speedup: {nx_medium_time/rx_medium_time if rx_medium_time > 0 else 0:.2f}x" +) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py new file mode 100644 index 00000000..3fd9f25c --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/06_error_handling.py @@ -0,0 +1,55 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +test_agent = Agent( + agent_name="Test-Agent", + agent_description="Test agent for error handling", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow_rx = GraphWorkflow( + name="Rustworkx-Workflow", + backend="rustworkx", + verbose=False, +) +workflow_rx.add_node(test_agent) + +workflow_nx = GraphWorkflow( + name="NetworkX-Workflow", + backend="networkx", + verbose=False, +) +workflow_nx.add_node(test_agent) + +workflow_default = GraphWorkflow( + name="Default-Workflow", + verbose=False, +) +workflow_default.add_node(test_agent) + +workflow_invalid = GraphWorkflow( + name="Invalid-Workflow", + backend="invalid_backend", + verbose=False, +) +workflow_invalid.add_node(test_agent) + +print( + f"Rustworkx backend: {type(workflow_rx.graph_backend).__name__}" +) +print(f"NetworkX backend: {type(workflow_nx.graph_backend).__name__}") +print( + f"Default backend: {type(workflow_default.graph_backend).__name__}" +) +print( + f"Invalid backend fallback: {type(workflow_invalid.graph_backend).__name__}" +) + +try: + import rustworkx as rx + + print("Rustworkx available: True") +except ImportError: + print("Rustworkx available: False") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py new file mode 100644 index 00000000..edaeef0c --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/07_large_scale_workflow.py @@ -0,0 +1,61 @@ +import time +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +NUM_AGENTS = 30 + +agents = [ + Agent( + agent_name=f"Agent-{i:02d}", + agent_description=f"Agent number {i} in large-scale workflow", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ) + for i in range(NUM_AGENTS) +] + +workflow = GraphWorkflow( + name="Large-Scale-Workflow", + description=f"Large-scale workflow with {NUM_AGENTS} agents using rustworkx", + backend="rustworkx", + verbose=False, +) + +start_time = time.time() +for agent in agents: + workflow.add_node(agent) +add_nodes_time = time.time() - start_time + +start_time = time.time() +for i in range(9): + workflow.add_edge(agents[i], agents[i + 1]) + +workflow.add_edges_from_source( + agents[5], + agents[10:20], +) + +workflow.add_edges_to_target( + agents[10:20], + agents[20], +) + +for i in range(20, 29): + workflow.add_edge(agents[i], agents[i + 1]) + +add_edges_time = time.time() - start_time + +start_time = time.time() +workflow.compile() +compile_time = time.time() - start_time + +print( + f"Agents: {len(workflow.nodes)}, Edges: {len(workflow.edges)}, Layers: {len(workflow._sorted_layers)}" +) +print( + f"Node addition: {add_nodes_time:.4f}s, Edge addition: {add_edges_time:.4f}s, Compilation: {compile_time:.4f}s" +) +print( + f"Total setup: {add_nodes_time + add_edges_time + compile_time:.4f}s" +) diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py new file mode 100644 index 00000000..21b18d23 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/08_parallel_chain_example.py @@ -0,0 +1,73 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +data_collector_1 = Agent( + agent_name="Data-Collector-1", + agent_description="Collects market data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +data_collector_2 = Agent( + agent_name="Data-Collector-2", + agent_description="Collects financial data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +data_collector_3 = Agent( + agent_name="Data-Collector-3", + agent_description="Collects news data", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +technical_analyst = Agent( + agent_name="Technical-Analyst", + agent_description="Performs technical analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +fundamental_analyst = Agent( + agent_name="Fundamental-Analyst", + agent_description="Performs fundamental analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +sentiment_analyst = Agent( + agent_name="Sentiment-Analyst", + agent_description="Performs sentiment analysis", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Parallel-Chain-Workflow", + description="Demonstrates parallel chain pattern with rustworkx", + backend="rustworkx", + verbose=False, +) + +sources = [data_collector_1, data_collector_2, data_collector_3] +targets = [technical_analyst, fundamental_analyst, sentiment_analyst] + +for agent in sources + targets: + workflow.add_node(agent) + +workflow.add_parallel_chain(sources, targets) + +workflow.compile() + +task = "Analyze the technology sector using multiple data sources and analysis methods" +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py new file mode 100644 index 00000000..79c2de3d --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/09_workflow_validation.py @@ -0,0 +1,79 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +agent_a = Agent( + agent_name="Agent-A", + agent_description="Agent A", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +agent_b = Agent( + agent_name="Agent-B", + agent_description="Agent B", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +agent_c = Agent( + agent_name="Agent-C", + agent_description="Agent C", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +agent_isolated = Agent( + agent_name="Agent-Isolated", + agent_description="Isolated agent with no connections", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Validation-Workflow", + description="Workflow for validation testing", + backend="rustworkx", + verbose=False, +) + +workflow.add_node(agent_a) +workflow.add_node(agent_b) +workflow.add_node(agent_c) +workflow.add_node(agent_isolated) + +workflow.add_edge(agent_a, agent_b) +workflow.add_edge(agent_b, agent_c) + +validation_result = workflow.validate(auto_fix=False) +print(f"Valid: {validation_result['is_valid']}") +print(f"Warnings: {len(validation_result['warnings'])}") +print(f"Errors: {len(validation_result['errors'])}") + +validation_result_fixed = workflow.validate(auto_fix=True) +print( + f"After auto-fix - Valid: {validation_result_fixed['is_valid']}" +) +print(f"Fixed: {len(validation_result_fixed['fixed'])}") +print(f"Entry points: {workflow.entry_points}") +print(f"End points: {workflow.end_points}") + +workflow_cycle = GraphWorkflow( + name="Cycle-Test-Workflow", + backend="rustworkx", + verbose=False, +) + +workflow_cycle.add_node(agent_a) +workflow_cycle.add_node(agent_b) +workflow_cycle.add_node(agent_c) + +workflow_cycle.add_edge(agent_a, agent_b) +workflow_cycle.add_edge(agent_b, agent_c) +workflow_cycle.add_edge(agent_c, agent_a) + +cycle_validation = workflow_cycle.validate(auto_fix=False) +print(f"Cycles detected: {len(cycle_validation.get('cycles', []))}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py new file mode 100644 index 00000000..cc6e83ff --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/10_real_world_scenario.py @@ -0,0 +1,122 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +market_researcher = Agent( + agent_name="Market-Researcher", + agent_description="Conducts comprehensive market research and data collection", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +competitor_analyst = Agent( + agent_name="Competitor-Analyst", + agent_description="Analyzes competitor landscape and positioning", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +market_analyst = Agent( + agent_name="Market-Analyst", + agent_description="Analyzes market trends and opportunities", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +financial_analyst = Agent( + agent_name="Financial-Analyst", + agent_description="Analyzes financial metrics and projections", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +risk_analyst = Agent( + agent_name="Risk-Analyst", + agent_description="Assesses market risks and challenges", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_consultant = Agent( + agent_name="Strategy-Consultant", + agent_description="Develops strategic recommendations based on all analyses", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +report_writer = Agent( + agent_name="Report-Writer", + agent_description="Compiles comprehensive market research report", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +executive_summary_writer = Agent( + agent_name="Executive-Summary-Writer", + agent_description="Creates executive summary for leadership", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +workflow = GraphWorkflow( + name="Market-Research-Workflow", + description="Real-world market research workflow using rustworkx backend", + backend="rustworkx", + verbose=False, +) + +all_agents = [ + market_researcher, + competitor_analyst, + market_analyst, + financial_analyst, + risk_analyst, + strategy_consultant, + report_writer, + executive_summary_writer, +] + +for agent in all_agents: + workflow.add_node(agent) + +workflow.add_parallel_chain( + [market_researcher, competitor_analyst], + [market_analyst, financial_analyst, risk_analyst], +) + +workflow.add_edges_to_target( + [market_analyst, financial_analyst, risk_analyst], + strategy_consultant, +) + +workflow.add_edges_from_source( + strategy_consultant, + [report_writer, executive_summary_writer], +) + +workflow.add_edges_to_target( + [market_analyst, financial_analyst, risk_analyst], + report_writer, +) + +task = """ +Conduct a comprehensive market research analysis on the electric vehicle (EV) industry: +1. Research current market size, growth trends, and key players +2. Analyze competitor landscape and market positioning +3. Assess financial opportunities and investment potential +4. Evaluate risks and challenges in the EV market +5. Develop strategic recommendations +6. Create detailed report and executive summary +""" + +results = workflow.run(task=task) + +for agent_name, output in results.items(): + print(f"{agent_name}: {output}") diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/Fan-Out-Fan-In-Workflow_visualization_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/Fan-Out-Fan-In-Workflow_visualization_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png new file mode 100644 index 00000000..d45a9a2d Binary files /dev/null and b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/Fan-Out-Fan-In-Workflow_visualization_1329d9aa-4cba-4eb5-a42c-5e4ccd165e4d.png differ diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md new file mode 100644 index 00000000..7292caad --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/README.md @@ -0,0 +1,156 @@ +# Rustworkx Backend Examples + +This directory contains comprehensive examples demonstrating the use of the **rustworkx backend** in GraphWorkflow. Rustworkx provides faster graph operations compared to NetworkX, especially for large graphs and complex operations. + +## Installation + +Before running these examples, ensure rustworkx is installed: + +```bash +pip install rustworkx +``` + +If rustworkx is not installed, GraphWorkflow will automatically fallback to NetworkX backend. + +## Examples Overview + +### 01_basic_usage.py +Basic example showing how to use rustworkx backend with GraphWorkflow. Demonstrates simple linear workflow creation and execution. + +**Key Concepts:** +- Initializing GraphWorkflow with rustworkx backend +- Adding agents and creating edges +- Running a workflow + +### 02_backend_comparison.py +Compares NetworkX and Rustworkx backends side-by-side, showing performance differences and functional equivalence. + +**Key Concepts:** +- Backend comparison +- Performance metrics +- Functional equivalence verification + +### 03_fan_out_fan_in_patterns.py +Demonstrates parallel processing patterns: fan-out (one-to-many) and fan-in (many-to-one) connections. + +**Key Concepts:** +- Fan-out pattern: `add_edges_from_source()` +- Fan-in pattern: `add_edges_to_target()` +- Parallel execution optimization + +### 04_complex_workflow.py +Shows a complex multi-layer workflow with multiple parallel branches and convergence points. + +**Key Concepts:** +- Multi-layer workflows +- Parallel chains: `add_parallel_chain()` +- Complex graph structures + +### 05_performance_benchmark.py +Benchmarks performance differences between NetworkX and Rustworkx for various graph sizes and structures. + +**Key Concepts:** +- Performance benchmarking +- Scalability testing +- Different graph topologies (chain, tree) + +### 06_error_handling.py +Demonstrates error handling and graceful fallback behavior when rustworkx is unavailable. + +**Key Concepts:** +- Error handling +- Automatic fallback to NetworkX +- Backend availability checking + +### 07_large_scale_workflow.py +Demonstrates rustworkx's efficiency with large-scale workflows containing many agents. + +**Key Concepts:** +- Large-scale workflows +- Performance with many nodes/edges +- Complex interconnections + +### 08_parallel_chain_example.py +Detailed example of the parallel chain pattern creating a full mesh connection. + +**Key Concepts:** +- Parallel chain pattern +- Full mesh connections +- Maximum parallelization + +### 09_workflow_validation.py +Shows workflow validation features including cycle detection, isolated nodes, and auto-fixing. + +**Key Concepts:** +- Workflow validation +- Cycle detection +- Auto-fixing capabilities + +### 10_real_world_scenario.py +A realistic market research workflow demonstrating real-world agent coordination scenarios. + +**Key Concepts:** +- Real-world use case +- Complex multi-phase workflow +- Practical application + +## Quick Start + +Run any example: + +```bash +python 01_basic_usage.py +``` + +## Backend Selection + +To use rustworkx backend: + +```python +workflow = GraphWorkflow( + backend="rustworkx", # Use rustworkx + # ... other parameters +) +``` + +To use NetworkX backend (default): + +```python +workflow = GraphWorkflow( + backend="networkx", # Or omit for default + # ... other parameters +) +``` + +## Performance Benefits + +Rustworkx provides performance benefits especially for: +- **Large graphs** (100+ nodes) +- **Complex operations** (topological sorting, cycle detection) +- **Frequent graph modifications** (adding/removing nodes/edges) + +## Key Differences + +While both backends are functionally equivalent, rustworkx: +- Uses integer indices internally (abstracted away) +- Provides faster graph operations +- Better memory efficiency for large graphs +- Maintains full compatibility with GraphWorkflow API + +## Notes + +- Both backends produce identical results +- Rustworkx automatically falls back to NetworkX if not installed +- All GraphWorkflow features work with both backends +- Performance gains become more significant with larger graphs + +## Requirements + +- `swarms` package +- `rustworkx` (optional, for rustworkx backend) +- `networkx` (always available, default backend) + +## Contributing + +Feel free to add more examples demonstrating rustworkx capabilities or specific use cases! + diff --git a/examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py new file mode 100644 index 00000000..65cc4776 --- /dev/null +++ b/examples/multi_agent/graphworkflow_examples/rustworkx_examples/test_graph_workflow_rustworkx.py @@ -0,0 +1,632 @@ +import pytest +from swarms.structs.graph_workflow import ( + GraphWorkflow, +) +from swarms.structs.agent import Agent + +try: + import rustworkx as rx + + RUSTWORKX_AVAILABLE = True +except ImportError: + RUSTWORKX_AVAILABLE = False + + +def create_test_agent(name: str, description: str = None) -> Agent: + """Create a test agent""" + if description is None: + description = f"Test agent for {name} operations" + + return Agent( + agent_name=name, + agent_description=description, + model_name="gpt-4o-mini", + verbose=False, + print_on=False, + max_loops=1, + ) + + +@pytest.mark.skipif( + not RUSTWORKX_AVAILABLE, reason="rustworkx not available" +) +class TestRustworkxBackend: + """Test suite for rustworkx backend""" + + def test_rustworkx_backend_initialization(self): + """Test that rustworkx backend is properly initialized""" + workflow = GraphWorkflow(name="Test", backend="rustworkx") + assert ( + workflow.graph_backend.__class__.__name__ + == "RustworkxBackend" + ) + assert hasattr(workflow.graph_backend, "_node_id_to_index") + assert hasattr(workflow.graph_backend, "_index_to_node_id") + assert hasattr(workflow.graph_backend, "graph") + + def test_rustworkx_node_addition(self): + """Test adding nodes to rustworkx backend""" + workflow = GraphWorkflow(name="Test", backend="rustworkx") + agent = create_test_agent("TestAgent", "Test agent") + + workflow.add_node(agent) + + assert "TestAgent" in workflow.nodes + assert "TestAgent" in workflow.graph_backend._node_id_to_index + assert ( + workflow.graph_backend._node_id_to_index["TestAgent"] + in workflow.graph_backend._index_to_node_id + ) + + def test_rustworkx_edge_addition(self): + """Test adding edges to rustworkx backend""" + workflow = GraphWorkflow(name="Test", backend="rustworkx") + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + assert len(workflow.edges) == 1 + assert workflow.edges[0].source == "Agent1" + assert workflow.edges[0].target == "Agent2" + + def test_rustworkx_topological_generations_linear(self): + """Test topological generations with linear chain""" + workflow = GraphWorkflow( + name="Linear-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(5) + ] + + for agent in agents: + workflow.add_node(agent) + + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + workflow.compile() + + assert len(workflow._sorted_layers) == 5 + assert workflow._sorted_layers[0] == ["Agent0"] + assert workflow._sorted_layers[1] == ["Agent1"] + assert workflow._sorted_layers[2] == ["Agent2"] + assert workflow._sorted_layers[3] == ["Agent3"] + assert workflow._sorted_layers[4] == ["Agent4"] + + def test_rustworkx_topological_generations_fan_out(self): + """Test topological generations with fan-out pattern""" + workflow = GraphWorkflow( + name="FanOut-Test", backend="rustworkx" + ) + coordinator = create_test_agent("Coordinator", "Coordinates") + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + + workflow.add_node(coordinator) + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + + workflow.add_edges_from_source( + coordinator, [analyst1, analyst2, analyst3] + ) + + workflow.compile() + + assert len(workflow._sorted_layers) == 2 + assert len(workflow._sorted_layers[0]) == 1 + assert "Coordinator" in workflow._sorted_layers[0] + assert len(workflow._sorted_layers[1]) == 3 + assert "Analyst1" in workflow._sorted_layers[1] + assert "Analyst2" in workflow._sorted_layers[1] + assert "Analyst3" in workflow._sorted_layers[1] + + def test_rustworkx_topological_generations_fan_in(self): + """Test topological generations with fan-in pattern""" + workflow = GraphWorkflow( + name="FanIn-Test", backend="rustworkx" + ) + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + synthesizer = create_test_agent("Synthesizer", "Synthesizes") + + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + workflow.add_node(synthesizer) + + workflow.add_edges_to_target( + [analyst1, analyst2, analyst3], synthesizer + ) + + workflow.compile() + + assert len(workflow._sorted_layers) == 2 + assert len(workflow._sorted_layers[0]) == 3 + assert "Analyst1" in workflow._sorted_layers[0] + assert "Analyst2" in workflow._sorted_layers[0] + assert "Analyst3" in workflow._sorted_layers[0] + assert len(workflow._sorted_layers[1]) == 1 + assert "Synthesizer" in workflow._sorted_layers[1] + + def test_rustworkx_topological_generations_complex(self): + """Test topological generations with complex topology""" + workflow = GraphWorkflow( + name="Complex-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(6) + ] + + for agent in agents: + workflow.add_node(agent) + + # Create: Agent0 -> Agent1, Agent2 + # Agent1, Agent2 -> Agent3 + # Agent3 -> Agent4, Agent5 + workflow.add_edge(agents[0], agents[1]) + workflow.add_edge(agents[0], agents[2]) + workflow.add_edge(agents[1], agents[3]) + workflow.add_edge(agents[2], agents[3]) + workflow.add_edge(agents[3], agents[4]) + workflow.add_edge(agents[3], agents[5]) + + workflow.compile() + + assert len(workflow._sorted_layers) == 4 + assert "Agent0" in workflow._sorted_layers[0] + assert ( + "Agent1" in workflow._sorted_layers[1] + or "Agent2" in workflow._sorted_layers[1] + ) + assert "Agent3" in workflow._sorted_layers[2] + assert ( + "Agent4" in workflow._sorted_layers[3] + or "Agent5" in workflow._sorted_layers[3] + ) + + def test_rustworkx_predecessors(self): + """Test predecessor retrieval""" + workflow = GraphWorkflow( + name="Predecessors-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + predecessors = list( + workflow.graph_backend.predecessors("Agent2") + ) + assert "Agent1" in predecessors + assert len(predecessors) == 1 + + predecessors = list( + workflow.graph_backend.predecessors("Agent3") + ) + assert "Agent2" in predecessors + assert len(predecessors) == 1 + + predecessors = list( + workflow.graph_backend.predecessors("Agent1") + ) + assert len(predecessors) == 0 + + def test_rustworkx_descendants(self): + """Test descendant retrieval""" + workflow = GraphWorkflow( + name="Descendants-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + descendants = workflow.graph_backend.descendants("Agent1") + assert "Agent2" in descendants + assert "Agent3" in descendants + assert len(descendants) == 2 + + descendants = workflow.graph_backend.descendants("Agent2") + assert "Agent3" in descendants + assert len(descendants) == 1 + + descendants = workflow.graph_backend.descendants("Agent3") + assert len(descendants) == 0 + + def test_rustworkx_in_degree(self): + """Test in-degree calculation""" + workflow = GraphWorkflow( + name="InDegree-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent3, agent2) + + assert workflow.graph_backend.in_degree("Agent1") == 0 + assert workflow.graph_backend.in_degree("Agent2") == 2 + assert workflow.graph_backend.in_degree("Agent3") == 0 + + def test_rustworkx_out_degree(self): + """Test out-degree calculation""" + workflow = GraphWorkflow( + name="OutDegree-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent1, agent3) + + assert workflow.graph_backend.out_degree("Agent1") == 2 + assert workflow.graph_backend.out_degree("Agent2") == 0 + assert workflow.graph_backend.out_degree("Agent3") == 0 + + def test_rustworkx_agent_objects_in_edges(self): + """Test using Agent objects directly in edge methods""" + workflow = GraphWorkflow( + name="AgentObjects-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + # Use Agent objects directly + workflow.add_edges_from_source(agent1, [agent2, agent3]) + workflow.add_edges_to_target([agent2, agent3], agent1) + + workflow.compile() + + assert len(workflow.edges) == 4 + assert len(workflow._sorted_layers) >= 1 + + def test_rustworkx_parallel_chain(self): + """Test parallel chain pattern""" + workflow = GraphWorkflow( + name="ParallelChain-Test", backend="rustworkx" + ) + sources = [ + create_test_agent(f"Source{i}", f"Source {i}") + for i in range(3) + ] + targets = [ + create_test_agent(f"Target{i}", f"Target {i}") + for i in range(3) + ] + + for agent in sources + targets: + workflow.add_node(agent) + + workflow.add_parallel_chain(sources, targets) + + workflow.compile() + + assert len(workflow.edges) == 9 # 3x3 = 9 edges + assert len(workflow._sorted_layers) == 2 + + def test_rustworkx_large_scale(self): + """Test rustworkx with large workflow""" + workflow = GraphWorkflow( + name="LargeScale-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(20) + ] + + for agent in agents: + workflow.add_node(agent) + + # Create linear chain + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + workflow.compile() + + assert len(workflow._sorted_layers) == 20 + assert len(workflow.nodes) == 20 + assert len(workflow.edges) == 19 + + def test_rustworkx_reverse(self): + """Test graph reversal""" + workflow = GraphWorkflow( + name="Reverse-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + reversed_backend = workflow.graph_backend.reverse() + + # In reversed graph, Agent2 should have Agent1 as predecessor + preds = list(reversed_backend.predecessors("Agent1")) + assert "Agent2" in preds + + # Agent2 should have no predecessors in reversed graph + preds = list(reversed_backend.predecessors("Agent2")) + assert len(preds) == 0 + + def test_rustworkx_entry_end_points(self): + """Test entry and end point detection""" + workflow = GraphWorkflow( + name="EntryEnd-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "Entry agent") + agent2 = create_test_agent("Agent2", "Middle agent") + agent3 = create_test_agent("Agent3", "End agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + workflow.auto_set_entry_points() + workflow.auto_set_end_points() + + assert "Agent1" in workflow.entry_points + assert "Agent3" in workflow.end_points + assert workflow.graph_backend.in_degree("Agent1") == 0 + assert workflow.graph_backend.out_degree("Agent3") == 0 + + def test_rustworkx_isolated_nodes(self): + """Test handling of isolated nodes""" + workflow = GraphWorkflow( + name="Isolated-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "Connected agent") + agent2 = create_test_agent("Agent2", "Isolated agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent1) # Self-loop + + workflow.compile() + + assert len(workflow.nodes) == 2 + assert "Agent2" in workflow.nodes + + def test_rustworkx_workflow_execution(self): + """Test full workflow execution with rustworkx""" + workflow = GraphWorkflow( + name="Execution-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + result = workflow.run("Test task") + + assert result is not None + assert "Agent1" in result + assert "Agent2" in result + + def test_rustworkx_compilation_caching(self): + """Test that compilation is cached correctly""" + workflow = GraphWorkflow( + name="Cache-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + # First compilation + workflow.compile() + layers1 = workflow._sorted_layers.copy() + compiled1 = workflow._compiled + + # Second compilation should use cache + workflow.compile() + layers2 = workflow._sorted_layers.copy() + compiled2 = workflow._compiled + + assert compiled1 == compiled2 == True + assert layers1 == layers2 + + def test_rustworkx_node_metadata(self): + """Test node metadata handling""" + workflow = GraphWorkflow( + name="Metadata-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Test agent") + + workflow.add_node( + agent, metadata={"priority": "high", "timeout": 60} + ) + + node_index = workflow.graph_backend._node_id_to_index["Agent"] + node_data = workflow.graph_backend.graph[node_index] + + assert isinstance(node_data, dict) + assert node_data.get("node_id") == "Agent" + assert node_data.get("priority") == "high" + assert node_data.get("timeout") == 60 + + def test_rustworkx_edge_metadata(self): + """Test edge metadata handling""" + workflow = GraphWorkflow( + name="EdgeMetadata-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2, weight=5, label="test") + + assert len(workflow.edges) == 1 + assert workflow.edges[0].metadata.get("weight") == 5 + assert workflow.edges[0].metadata.get("label") == "test" + + +@pytest.mark.skipif( + not RUSTWORKX_AVAILABLE, reason="rustworkx not available" +) +class TestRustworkxPerformance: + """Performance tests for rustworkx backend""" + + def test_rustworkx_large_graph_compilation(self): + """Test compilation performance with large graph""" + workflow = GraphWorkflow( + name="LargeGraph-Test", backend="rustworkx" + ) + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(50) + ] + + for agent in agents: + workflow.add_node(agent) + + # Create a complex topology + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + import time + + start = time.time() + workflow.compile() + compile_time = time.time() - start + + assert compile_time < 1.0 # Should compile quickly + assert len(workflow._sorted_layers) == 50 + + def test_rustworkx_many_predecessors(self): + """Test performance with many predecessors""" + workflow = GraphWorkflow( + name="ManyPreds-Test", backend="rustworkx" + ) + target = create_test_agent("Target", "Target agent") + sources = [ + create_test_agent(f"Source{i}", f"Source {i}") + for i in range(100) + ] + + workflow.add_node(target) + for source in sources: + workflow.add_node(source) + + workflow.add_edges_to_target(sources, target) + + workflow.compile() + + predecessors = list( + workflow.graph_backend.predecessors("Target") + ) + assert len(predecessors) == 100 + + +@pytest.mark.skipif( + not RUSTWORKX_AVAILABLE, reason="rustworkx not available" +) +class TestRustworkxEdgeCases: + """Edge case tests for rustworkx backend""" + + def test_rustworkx_empty_graph(self): + """Test empty graph handling""" + workflow = GraphWorkflow( + name="Empty-Test", backend="rustworkx" + ) + workflow.compile() + + assert len(workflow._sorted_layers) == 0 + assert len(workflow.nodes) == 0 + + def test_rustworkx_single_node(self): + """Test single node graph""" + workflow = GraphWorkflow( + name="Single-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Single agent") + + workflow.add_node(agent) + workflow.compile() + + assert len(workflow._sorted_layers) == 1 + assert workflow._sorted_layers[0] == ["Agent"] + + def test_rustworkx_self_loop(self): + """Test self-loop handling""" + workflow = GraphWorkflow( + name="SelfLoop-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Self-looping agent") + + workflow.add_node(agent) + workflow.add_edge(agent, agent) + + workflow.compile() + + assert len(workflow.edges) == 1 + assert workflow.graph_backend.in_degree("Agent") == 1 + assert workflow.graph_backend.out_degree("Agent") == 1 + + def test_rustworkx_duplicate_edge(self): + """Test duplicate edge handling""" + workflow = GraphWorkflow( + name="Duplicate-Test", backend="rustworkx" + ) + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow.add_node(agent1) + workflow.add_node(agent2) + + # Add same edge twice + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent1, agent2) + + # rustworkx should handle duplicate edges + assert ( + len(workflow.edges) == 2 + ) # Both edges are stored in workflow + workflow.compile() # Should not crash + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/examples/multi_agent/hiearchical_swarm/README.md b/examples/multi_agent/hiearchical_swarm/README.md index ca67f345..3ff9c1da 100644 --- a/examples/multi_agent/hiearchical_swarm/README.md +++ b/examples/multi_agent/hiearchical_swarm/README.md @@ -14,6 +14,7 @@ This directory contains examples demonstrating hierarchical swarm patterns for m - [hs_stock_team.py](hs_stock_team.py) - Stock trading team - [hybrid_hiearchical_swarm.py](hybrid_hiearchical_swarm.py) - Hybrid approach - [sector_analysis_hiearchical_swarm.py](sector_analysis_hiearchical_swarm.py) - Sector analysis +- [display_hierarchy_example.py](display_hierarchy_example.py) - Visualize swarm hierarchy structure ## Subdirectories diff --git a/examples/multi_agent/hiearchical_swarm/display_hierarchy_example.py b/examples/multi_agent/hiearchical_swarm/display_hierarchy_example.py new file mode 100644 index 00000000..b470596d --- /dev/null +++ b/examples/multi_agent/hiearchical_swarm/display_hierarchy_example.py @@ -0,0 +1,47 @@ +from swarms import Agent, HierarchicalSwarm + +# Create specialized agents +research_agent = Agent( + agent_name="Research-Analyst", + agent_description="Specialized in comprehensive research and data gathering", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +analysis_agent = Agent( + agent_name="Data-Analyst", + agent_description="Expert in data analysis and pattern recognition", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +strategy_agent = Agent( + agent_name="Strategy-Consultant", + agent_description="Specialized in strategic planning and recommendations", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, +) + +# Create hierarchical swarm with interactive dashboard +swarm = HierarchicalSwarm( + name="Swarms Corporation Operations", + description="Enterprise-grade hierarchical swarm for complex task execution", + agents=[research_agent, analysis_agent, strategy_agent], + max_loops=1, + interactive=False, # Enable the Arasaka dashboard + director_model_name="claude-haiku-4-5", + director_temperature=0.7, + director_top_p=None, + planning_enabled=True, +) + + +print(swarm.display_hierarchy()) + +# out = swarm.run( +# "Conduct a research analysis on water stocks and etfs" +# ) +# print(out) diff --git a/examples/multi_agent/llm_council_examples/README.md b/examples/multi_agent/llm_council_examples/README.md new file mode 100644 index 00000000..3dd62f16 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/README.md @@ -0,0 +1,95 @@ +# LLM Council Examples + +This directory contains examples demonstrating the LLM Council pattern, inspired by Andrej Karpathy's llm-council implementation. The LLM Council uses multiple specialized AI agents that: + +1. Each respond independently to queries +2. Review and rank each other's anonymized responses +3. Have a Chairman synthesize all responses into a final comprehensive answer + +## Examples + +### Marketing & Business +- **marketing_strategy_council.py** - Marketing strategy analysis and recommendations +- **business_strategy_council.py** - Comprehensive business strategy development + +### Finance & Investment +- **finance_analysis_council.py** - Financial analysis and investment recommendations +- **etf_stock_analysis_council.py** - ETF and stock analysis with portfolio recommendations + +### Medical & Healthcare +- **medical_treatment_council.py** - Medical treatment recommendations and care plans +- **medical_diagnosis_council.py** - Diagnostic analysis based on symptoms + +### Technology & Research +- **technology_assessment_council.py** - Technology evaluation and implementation strategy +- **research_analysis_council.py** - Comprehensive research analysis on complex topics + +### Legal +- **legal_analysis_council.py** - Legal implications and compliance analysis + +## Usage + +Each example follows the same pattern: + +```python +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Run a query +result = council.run("Your query here") + +# Access results +print(result["final_response"]) # Chairman's synthesized answer +print(result["original_responses"]) # Individual member responses +print(result["evaluations"]) # How members ranked each other +``` + +## Running Examples + +Run any example directly: + +```bash +python examples/multi_agent/llm_council_examples/marketing_strategy_council.py +python examples/multi_agent/llm_council_examples/finance_analysis_council.py +python examples/multi_agent/llm_council_examples/medical_diagnosis_council.py +``` + +## Key Features + +- **Multiple Perspectives**: Each council member (GPT-5.1, Gemini, Claude, Grok) provides unique insights +- **Peer Review**: Members evaluate and rank each other's responses anonymously +- **Synthesis**: Chairman combines the best elements from all responses +- **Transparency**: See both individual responses and evaluation rankings + +## Council Members + +The default council consists of: +- **GPT-5.1-Councilor**: Analytical and comprehensive +- **Gemini-3-Pro-Councilor**: Concise and well-processed +- **Claude-Sonnet-4.5-Councilor**: Thoughtful and balanced +- **Grok-4-Councilor**: Creative and innovative + +## Customization + +You can create custom council members: + +```python +from swarms import Agent +from swarms.structs.llm_council import LLMCouncil, get_gpt_councilor_prompt + +custom_agent = Agent( + agent_name="Custom-Councilor", + system_prompt=get_gpt_councilor_prompt(), + model_name="gpt-4.1", + max_loops=1, +) + +council = LLMCouncil( + council_members=[custom_agent, ...], + chairman_model="gpt-5.1", + verbose=True +) +``` + diff --git a/examples/multi_agent/llm_council_examples/business_strategy_council.py b/examples/multi_agent/llm_council_examples/business_strategy_council.py new file mode 100644 index 00000000..10b5087b --- /dev/null +++ b/examples/multi_agent/llm_council_examples/business_strategy_council.py @@ -0,0 +1,31 @@ +""" +LLM Council Example: Business Strategy Development + +This example demonstrates using the LLM Council to develop comprehensive +business strategies for new ventures. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Business strategy query +query = """ +A tech startup wants to launch an AI-powered personal finance app targeting +millennials and Gen Z. Develop a comprehensive business strategy including: +1. Market opportunity and competitive landscape analysis +2. Product positioning and unique value proposition +3. Go-to-market strategy and customer acquisition plan +4. Revenue model and pricing strategy +5. Key partnerships and distribution channels +6. Resource requirements and funding needs +7. Risk assessment and mitigation strategies +8. Success metrics and KPIs for first 12 months +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) diff --git a/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py new file mode 100644 index 00000000..7e85d851 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/etf_stock_analysis_council.py @@ -0,0 +1,29 @@ +""" +LLM Council Example: ETF Stock Analysis + +This example demonstrates using the LLM Council to analyze ETF holdings +and provide stock investment recommendations. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# ETF and stock analysis query +query = """ +Analyze the top energy ETFs (including nuclear, solar, gas, and renewable energy) +and provide: +1. Top 5 best-performing energy stocks across all energy sectors +2. ETF recommendations for diversified energy exposure +3. Risk-return profiles for each recommendation +4. Current market conditions affecting energy investments +5. Allocation strategy for a $100,000 portfolio +6. Key metrics to track for each investment +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) diff --git a/examples/multi_agent/llm_council_examples/finance_analysis_council.py b/examples/multi_agent/llm_council_examples/finance_analysis_council.py new file mode 100644 index 00000000..f014be47 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/finance_analysis_council.py @@ -0,0 +1,29 @@ +""" +LLM Council Example: Financial Analysis + +This example demonstrates using the LLM Council to provide comprehensive +financial analysis and investment recommendations. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Financial analysis query +query = """ +Provide a comprehensive financial analysis for investing in emerging markets +technology ETFs. Include: +1. Risk assessment and volatility analysis +2. Historical performance trends +3. Sector composition and diversification benefits +4. Comparison with developed market tech ETFs +5. Recommended allocation percentage for a moderate risk portfolio +6. Key factors to monitor going forward +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) diff --git a/examples/multi_agent/llm_council_examples/legal_analysis_council.py b/examples/multi_agent/llm_council_examples/legal_analysis_council.py new file mode 100644 index 00000000..5ea3481e --- /dev/null +++ b/examples/multi_agent/llm_council_examples/legal_analysis_council.py @@ -0,0 +1,31 @@ +""" +LLM Council Example: Legal Analysis + +This example demonstrates using the LLM Council to analyze legal scenarios +and provide comprehensive legal insights. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Legal analysis query +query = """ +A startup is considering using AI-generated content for their marketing materials. +Analyze the legal implications including: +1. Intellectual property rights and ownership of AI-generated content +2. Copyright and trademark considerations +3. Liability for AI-generated content that may be inaccurate or misleading +4. Compliance with advertising regulations (FTC, FDA, etc.) +5. Data privacy implications if using customer data to train models +6. Contractual considerations with AI service providers +7. Risk mitigation strategies +8. Best practices for legal compliance +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) diff --git a/examples/multi_agent/llm_council_examples/llm_council_example.py b/examples/multi_agent/llm_council_examples/llm_council_example.py new file mode 100644 index 00000000..8dc1334a --- /dev/null +++ b/examples/multi_agent/llm_council_examples/llm_council_example.py @@ -0,0 +1,12 @@ +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True, output_type="final") + +# Example query +query = "What are the top five best energy stocks across nuclear, solar, gas, and other energy sources?" + +# Run the council +result = council.run(query) + +print(result) diff --git a/examples/multi_agent/llm_council_examples/marketing_strategy_council.py b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py new file mode 100644 index 00000000..a799c364 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/marketing_strategy_council.py @@ -0,0 +1,28 @@ +""" +LLM Council Example: Marketing Strategy Analysis + +This example demonstrates using the LLM Council to analyze and develop +comprehensive marketing strategies by leveraging multiple AI perspectives. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Marketing strategy query +query = """ +Analyze the marketing strategy for a new sustainable energy startup launching +a solar panel subscription service. Provide recommendations on: +1. Target audience segmentation +2. Key messaging and value propositions +3. Marketing channels and budget allocation +4. Competitive positioning +5. Launch timeline and milestones +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) diff --git a/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py new file mode 100644 index 00000000..90532f38 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/medical_diagnosis_council.py @@ -0,0 +1,36 @@ +""" +LLM Council Example: Medical Diagnosis Analysis + +This example demonstrates using the LLM Council to analyze symptoms +and provide diagnostic insights. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Medical diagnosis query +query = """ +A 35-year-old patient presents with: +- Persistent fatigue for 3 months +- Unexplained weight loss (15 lbs) +- Night sweats +- Intermittent low-grade fever +- Swollen lymph nodes in neck and armpits +- Recent blood work shows elevated ESR and CRP + +Provide: +1. Differential diagnosis with most likely conditions ranked +2. Additional diagnostic tests needed to confirm +3. Red flag symptoms requiring immediate attention +4. Possible causes and risk factors +5. Recommended next steps for the patient +6. When to seek emergency care +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) diff --git a/examples/multi_agent/llm_council_examples/medical_treatment_council.py b/examples/multi_agent/llm_council_examples/medical_treatment_council.py new file mode 100644 index 00000000..6084db4c --- /dev/null +++ b/examples/multi_agent/llm_council_examples/medical_treatment_council.py @@ -0,0 +1,30 @@ +""" +LLM Council Example: Medical Treatment Analysis + +This example demonstrates using the LLM Council to analyze medical treatments +and provide comprehensive treatment recommendations. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Medical treatment query +query = """ +A 45-year-old patient with Type 2 diabetes, hypertension, and early-stage +kidney disease needs treatment recommendations. Provide: +1. Comprehensive treatment plan addressing all conditions +2. Medication options with pros/cons for each condition +3. Lifestyle modifications and their expected impact +4. Monitoring schedule and key metrics to track +5. Potential drug interactions and contraindications +6. Expected outcomes and timeline for improvement +7. When to consider specialist referrals +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) diff --git a/examples/multi_agent/llm_council_examples/research_analysis_council.py b/examples/multi_agent/llm_council_examples/research_analysis_council.py new file mode 100644 index 00000000..74a8585a --- /dev/null +++ b/examples/multi_agent/llm_council_examples/research_analysis_council.py @@ -0,0 +1,31 @@ +""" +LLM Council Example: Research Analysis + +This example demonstrates using the LLM Council to conduct comprehensive +research analysis on complex topics. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Research analysis query +query = """ +Conduct a comprehensive analysis of the potential impact of climate change +on global food security over the next 20 years. Include: +1. Key climate factors affecting agriculture (temperature, precipitation, extreme weather) +2. Regional vulnerabilities and impacts on major food-producing regions +3. Crop yield projections and food availability scenarios +4. Economic implications and food price volatility +5. Adaptation strategies and technological solutions +6. Policy recommendations for governments and international organizations +7. Role of innovation in agriculture (precision farming, GMOs, vertical farming) +8. Social and geopolitical implications of food insecurity +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) diff --git a/examples/multi_agent/llm_council_examples/technology_assessment_council.py b/examples/multi_agent/llm_council_examples/technology_assessment_council.py new file mode 100644 index 00000000..4db4dd95 --- /dev/null +++ b/examples/multi_agent/llm_council_examples/technology_assessment_council.py @@ -0,0 +1,31 @@ +""" +LLM Council Example: Technology Assessment + +This example demonstrates using the LLM Council to assess emerging technologies +and their business implications. +""" + +from swarms.structs.llm_council import LLMCouncil + +# Create the council +council = LLMCouncil(verbose=True) + +# Technology assessment query +query = """ +Evaluate the business potential and implementation strategy for integrating +quantum computing capabilities into a financial services company. Consider: +1. Current state of quantum computing technology +2. Specific use cases in financial services (risk modeling, portfolio optimization, fraud detection) +3. Competitive advantages and potential ROI +4. Implementation timeline and resource requirements +5. Technical challenges and limitations +6. Risk factors and mitigation strategies +7. Partnership opportunities with quantum computing providers +8. Expected timeline for practical business value +""" + +# Run the council +result = council.run(query) + +# Print final response +print(result["final_response"]) diff --git a/examples/multi_agent/swarm_router/swarm_router.py b/examples/multi_agent/swarm_router/swarm_router.py index 1801c25a..b8f73365 100644 --- a/examples/multi_agent/swarm_router/swarm_router.py +++ b/examples/multi_agent/swarm_router/swarm_router.py @@ -26,7 +26,6 @@ router = SwarmRouter( agents=agents, swarm_type="SequentialWorkflow", output_type="dict", - return_entire_history=False, ) output = router.run("How are you doing?") diff --git a/examples/multi_agent/utils/unique_swarms_examples.py b/examples/multi_agent/utils/unique_swarms_examples.py index 7f577e0b..09788cbf 100644 --- a/examples/multi_agent/utils/unique_swarms_examples.py +++ b/examples/multi_agent/utils/unique_swarms_examples.py @@ -8,7 +8,6 @@ from swarms.structs.swarming_architectures import ( exponential_swarm, fibonacci_swarm, grid_swarm, - linear_swarm, mesh_swarm, one_to_three, prime_swarm, @@ -121,30 +120,6 @@ def run_healthcare_grid_swarm(): print(result) -def run_finance_linear_swarm(): - """Loan approval process using linear swarm""" - print_separator() - print("FINANCE - LOAN APPROVAL PROCESS (Linear Swarm)") - - agents = create_finance_agents()[:3] - tasks = [ - "Review loan application and credit history", - "Assess risk factors and compliance requirements", - "Generate final loan recommendation", - ] - - print("\nTasks:") - for i, task in enumerate(tasks, 1): - print(f"{i}. {task}") - - result = linear_swarm(agents, tasks) - print("\nResults:") - for log in result["history"]: - print(f"\n{log['agent_name']}:") - print(f"Task: {log['task']}") - print(f"Response: {log['response']}") - - def run_healthcare_star_swarm(): """Complex medical case management using star swarm""" print_separator() @@ -287,7 +262,6 @@ async def run_all_examples(): # Finance examples run_finance_circular_swarm() - run_finance_linear_swarm() run_finance_mesh_swarm() run_mathematical_finance_swarms() diff --git a/examples/single_agent/README.md b/examples/single_agent/README.md index d9997390..1ce369ee 100644 --- a/examples/single_agent/README.md +++ b/examples/single_agent/README.md @@ -7,7 +7,6 @@ This directory contains examples demonstrating single agent patterns, configurat - [persistent_legal_agent.py](demos/persistent_legal_agent.py) - Legal document processing agent ## External Agents -- [custom_agent_example.py](external_agents/custom_agent_example.py) - Custom agent implementation - [openai_assistant_wrapper.py](external_agents/openai_assistant_wrapper.py) - OpenAI Assistant integration ## LLM Integrations diff --git a/examples/single_agent/external_agents/README.md b/examples/single_agent/external_agents/README.md index ec78239f..428764ca 100644 --- a/examples/single_agent/external_agents/README.md +++ b/examples/single_agent/external_agents/README.md @@ -4,7 +4,6 @@ This directory contains examples demonstrating integration with external agent s ## Examples -- [custom_agent_example.py](custom_agent_example.py) - Custom agent implementation - [openai_assistant_wrapper.py](openai_assistant_wrapper.py) - OpenAI Assistant integration wrapper ## Overview diff --git a/examples/single_agent/external_agents/custom_agent_example.py b/examples/single_agent/external_agents/custom_agent_example.py deleted file mode 100644 index 67f315e1..00000000 --- a/examples/single_agent/external_agents/custom_agent_example.py +++ /dev/null @@ -1,40 +0,0 @@ -import os - -from dotenv import load_dotenv - -from swarms.structs.custom_agent import CustomAgent - -load_dotenv() - -# Example usage with Anthropic API -if __name__ == "__main__": - # Initialize the agent for Anthropic API - anthropic_agent = CustomAgent( - base_url="https://api.anthropic.com", - endpoint="v1/messages", - headers={ - "x-api-key": os.getenv("ANTHROPIC_API_KEY"), - "anthropic-version": "2023-06-01", - }, - ) - - # Example payload for Anthropic API - payload = { - "model": "claude-3-5-sonnet-20241022", - "max_tokens": 1000, - "messages": [ - { - "role": "user", - "content": "Hello! Can you explain what artaddificial intelligence is?", - } - ], - } - - # Make the request - try: - response = anthropic_agent.run(payload) - print("Anthropic API Response:") - print(response) - print(type(response)) - except Exception as e: - print(f"Error: {e}") diff --git a/examples/single_agent/utils/medical_agent_add_to_marketplace.py b/examples/single_agent/utils/medical_agent_add_to_marketplace.py index 6a6f1c2c..3e934828 100644 --- a/examples/single_agent/utils/medical_agent_add_to_marketplace.py +++ b/examples/single_agent/utils/medical_agent_add_to_marketplace.py @@ -1,7 +1,9 @@ import json from swarms import Agent -blood_analysis_system_prompt = """You are a clinical laboratory data analyst assistant focused on hematology and basic metabolic panels. +blood_analysis_system_prompt = """ +You are a clinical laboratory data analyst assistant focused on hematology and basic metabolic panels. + Your goals: 1) Interpret common blood test panels (CBC, CMP/BMP, lipid panel, HbA1c, thyroid panels) based on provided values, reference ranges, flags, and units. 2) Provide structured findings: out-of-range markers, degree of deviation, likely clinical significance, and differential considerations. @@ -50,33 +52,15 @@ blood_analysis_agent = Agent( use_cases=[ { "title": "Blood Analysis", - "description": ( - "Analyze blood samples and provide a report on the results, " - "highlighting significant deviations, clinical context, red flags, " - "and referencing established guidelines for lab test interpretation." - ), + "description": "Analyze blood samples and summarize notable findings.", }, { - "title": "Longitudinal Patient Lab Monitoring", - "description": ( - "Process serial blood test results for a patient over time to identify clinical trends in key parameters (e.g., " - "progression of anemia, impact of pharmacologic therapy, signs of organ dysfunction). Generate structured summaries " - "that succinctly track rises, drops, or persistently abnormal markers. Flag patterns that suggest evolving risk or " - "require physician escalation, such as a dropping platelet count, rising creatinine, or new-onset hyperglycemia. " - "Report should distinguish true trends from ordinary biological variability, referencing clinical guidelines for " - "critical-change thresholds and best-practice follow-up actions." - ), + "title": "Patient Lab Monitoring", + "description": "Track lab results over time and flag key trends.", }, { - "title": "Preoperative Laboratory Risk Stratification", - "description": ( - "Interpret pre-surgical laboratory panels as part of risk assessment for patients scheduled for procedures. Identify " - "abnormal or borderline values that may increase the risk of perioperative complications (e.g., bleeding risk from " - "thrombocytopenia, signs of undiagnosed infection, electrolyte imbalances affecting anesthesia safety). Structure the " - "output to clearly separate routine findings from emergent concerns, and suggest evidence-based adjustments, further " - "workup, or consultation needs before proceeding with surgery, based on current clinical best practices and guideline " - "recommendations." - ), + "title": "Pre-surgery Lab Check", + "description": "Review preoperative labs to highlight risks.", }, ], ) diff --git a/examples/swarms_api_examples/README.md b/examples/swarms_api/README.md similarity index 100% rename from examples/swarms_api_examples/README.md rename to examples/swarms_api/README.md diff --git a/examples/swarms_api_examples/agent_overview.py b/examples/swarms_api/agent_overview.py similarity index 100% rename from examples/swarms_api_examples/agent_overview.py rename to examples/swarms_api/agent_overview.py diff --git a/examples/swarms_api_examples/batch_example.py b/examples/swarms_api/batch_example.py similarity index 100% rename from examples/swarms_api_examples/batch_example.py rename to examples/swarms_api/batch_example.py diff --git a/examples/swarms_api_examples/client_example.py b/examples/swarms_api/client_example.py similarity index 100% rename from examples/swarms_api_examples/client_example.py rename to examples/swarms_api/client_example.py diff --git a/examples/swarms_api_examples/hospital_team.py b/examples/swarms_api/hospital_team.py similarity index 100% rename from examples/swarms_api_examples/hospital_team.py rename to examples/swarms_api/hospital_team.py diff --git a/examples/swarms_api_examples/icd_ten_analysis.py b/examples/swarms_api/icd_ten_analysis.py similarity index 100% rename from examples/swarms_api_examples/icd_ten_analysis.py rename to examples/swarms_api/icd_ten_analysis.py diff --git a/examples/swarms_api_examples/legal_team.py b/examples/swarms_api/legal_team.py similarity index 100% rename from examples/swarms_api_examples/legal_team.py rename to examples/swarms_api/legal_team.py diff --git a/examples/swarms_api_examples/rate_limits.py b/examples/swarms_api/rate_limits.py similarity index 100% rename from examples/swarms_api_examples/rate_limits.py rename to examples/swarms_api/rate_limits.py diff --git a/graph_workflow_basic.py b/graph_workflow_basic.py new file mode 100644 index 00000000..febc4c7c --- /dev/null +++ b/graph_workflow_basic.py @@ -0,0 +1,47 @@ +from swarms.structs.graph_workflow import GraphWorkflow +from swarms.structs.agent import Agent + +agent_one = Agent( + agent_name="research_agent", + model_name="claude-haiku-4-5", + top_p=None, + temperature=None, + agent_description="Agent responsible for gathering and summarizing research information.", +) +agent_two = Agent( + agent_name="research_agent_two", + model_name="claude-haiku-4-5", + top_p=None, + temperature=None, + agent_description="Agent that analyzes the research data provided and processes insights.", +) +agent_three = Agent( + agent_name="research_agent_three", + model_name="claude-haiku-4-5", + top_p=None, + temperature=None, + agent_description="Agent tasked with structuring analysis into a final report or output.", +) + +# Create workflow with backend selection +workflow = GraphWorkflow( + name="Basic Example", + verbose=True, + backend="rustworkx", +) + +agents = [agent_one, agent_two, agent_three] +workflow.add_nodes(agents, batch_size=3) + +workflow.add_edge("research_agent", "research_agent_two") +workflow.add_edge("research_agent_two", "research_agent_three") +workflow.visualize() + +# Compile the workflow +workflow.compile() + +# Run the workflow +task = "Analyze the best mining companies in the US" +results = workflow.run(task) + +print(results) diff --git a/hiearchical_swarm_example.py b/hiearchical_swarm_example.py index 753ebf0f..b470596d 100644 --- a/hiearchical_swarm_example.py +++ b/hiearchical_swarm_example.py @@ -1,5 +1,4 @@ -from swarms.structs.hiearchical_swarm import HierarchicalSwarm -from swarms.structs.agent import Agent +from swarms import Agent, HierarchicalSwarm # Create specialized agents research_agent = Agent( @@ -39,7 +38,10 @@ swarm = HierarchicalSwarm( planning_enabled=True, ) -out = swarm.run( - "Conduct a research analysis on water stocks and etfs" -) -print(out) + +print(swarm.display_hierarchy()) + +# out = swarm.run( +# "Conduct a research analysis on water stocks and etfs" +# ) +# print(out) diff --git a/pyproject.toml b/pyproject.toml index 10ad1565..1ea9efd0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "8.6.3" +version = "8.7.0" description = "Swarms - TGSC" license = "MIT" authors = ["Kye Gomez "] @@ -85,7 +85,7 @@ swarms = "swarms.cli.main:main" [tool.poetry.group.lint.dependencies] black = ">=23.1,<26.0" -ruff = ">=0.5.1,<0.14.5" +ruff = ">=0.5.1,<0.14.9" types-toml = "^0.10.8.1" types-pytz = ">=2023.3,<2026.0" types-chardet = "^5.0.4.6" diff --git a/requirements.txt b/requirements.txt index 279d5538..4b1ef65b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ toml pypdf==5.1.0 ratelimit==2.2.1 loguru -pydantic==2.12.4 +pydantic==2.12.5 tenacity rich psutil @@ -19,10 +19,12 @@ pytest networkx aiofiles httpx +requests +litellm # vllm>=0.2.0 aiohttp mcp numpy schedule uvloop; sys_platform == 'linux' or sys_platform == 'darwin' # linux or macos only -winloop; sys_platform == 'win32' # windows only +winloop; sys_platform == 'win32' # windows only \ No newline at end of file diff --git a/swarms/agents/reasoning_agents.py b/swarms/agents/reasoning_agents.py index 122ccb01..e64ab828 100644 --- a/swarms/agents/reasoning_agents.py +++ b/swarms/agents/reasoning_agents.py @@ -88,9 +88,7 @@ class ReasoningAgentRouter: eval: bool = False, random_models_on: bool = False, majority_voting_prompt: Optional[str] = None, - reasoning_model_name: Optional[ - str - ] = "claude-3-5-sonnet-20240620", + reasoning_model_name: Optional[str] = "gpt-4o", ): """ Initialize the ReasoningAgentRouter with the specified configuration. diff --git a/swarms/agents/reasoning_duo.py b/swarms/agents/reasoning_duo.py index c0ddc156..581a69e7 100644 --- a/swarms/agents/reasoning_duo.py +++ b/swarms/agents/reasoning_duo.py @@ -35,9 +35,7 @@ class ReasoningDuo: model_names: list[str] = ["gpt-4o-mini", "gpt-4.1"], system_prompt: str = "You are a helpful assistant that can answer questions and help with tasks.", output_type: OutputType = "dict-all-except-first", - reasoning_model_name: Optional[ - str - ] = "claude-3-5-sonnet-20240620", + reasoning_model_name: Optional[str] = "gpt-4o", max_loops: int = 1, *args, **kwargs, diff --git a/swarms/cli/main.py b/swarms/cli/main.py index e4458726..661dd578 100644 --- a/swarms/cli/main.py +++ b/swarms/cli/main.py @@ -22,6 +22,8 @@ from swarms.agents.create_agents_from_yaml import ( from swarms.structs.agent import Agent from swarms.structs.agent_loader import AgentLoader +from swarms.structs.llm_council import LLMCouncil +from swarms.structs.heavy_swarm import HeavySwarm from swarms.utils.formatter import formatter load_dotenv() @@ -625,6 +627,18 @@ def create_command_table() -> Table: "setup-check", "Run a comprehensive environment setup check", ), + ( + "llm-council", + "Run the LLM Council with multiple agents collaborating on a task", + ), + ( + "heavy-swarm", + "Run HeavySwarm with specialized agents for complex task analysis", + ), + ( + "features", + "Display all available features and actions in a comprehensive table", + ), ] for cmd, desc in commands: @@ -639,7 +653,7 @@ def create_detailed_command_table() -> Table: show_header=True, header_style=f"bold {COLORS['primary']}", border_style=COLORS["secondary"], - title="🚀 Swarms CLI - Complete Command Reference", + title="🚀Swarms CLI - Complete Command Reference", title_style=f"bold {COLORS['primary']}", padding=(0, 1), show_lines=True, @@ -744,6 +758,27 @@ def create_detailed_command_table() -> Table: "usage": "swarms setup-check [--verbose]", "args": "--verbose", }, + { + "cmd": "llm-council", + "category": "Collaboration", + "desc": "Run LLM Council with multiple agents", + "usage": "swarms llm-council --task 'Your question here' [--verbose]", + "args": "--task, --verbose", + }, + { + "cmd": "heavy-swarm", + "category": "Execution", + "desc": "Run HeavySwarm with specialized agents", + "usage": "swarms heavy-swarm --task 'Your task here' [--loops-per-agent 1] [--question-agent-model-name gpt-4o-mini] [--worker-model-name gpt-4o-mini] [--random-loops-per-agent] [--verbose]", + "args": "--task, --loops-per-agent, --question-agent-model-name, --worker-model-name, --random-loops-per-agent, --verbose", + }, + { + "cmd": "features", + "category": "Info", + "desc": "Display all available features and actions", + "usage": "swarms features", + "args": "None", + }, ] for cmd_info in commands: @@ -758,6 +793,225 @@ def create_detailed_command_table() -> Table: return table +def show_features(): + """ + Display all available CLI features and actions in a comprehensive table. + """ + console.print( + "\n[bold]🚀 Swarms CLI - All Available Features[/bold]\n", + style=COLORS["primary"], + ) + + # Create main features table + features_table = Table( + show_header=True, + header_style=f"bold {COLORS['primary']}", + border_style=COLORS["secondary"], + title="✨ Complete Feature Reference", + title_style=f"bold {COLORS['primary']}", + padding=(0, 1), + show_lines=True, + expand=True, + ) + + # Add columns + features_table.add_column( + "Feature", + style=f"bold {COLORS['accent']}", + width=20, + no_wrap=True, + ) + features_table.add_column( + "Category", + style="bold cyan", + width=15, + justify="center", + ) + features_table.add_column( + "Description", + style="white", + width=50, + no_wrap=False, + ) + features_table.add_column( + "Command", + style="dim yellow", + width=35, + no_wrap=False, + ) + features_table.add_column( + "Key Parameters", + style="dim magenta", + width=30, + no_wrap=False, + ) + + # Define all features + features = [ + { + "feature": "Environment Setup", + "category": "Setup", + "desc": "Check and verify your Swarms environment configuration", + "command": "swarms setup-check [--verbose]", + "params": "--verbose", + }, + { + "feature": "Onboarding", + "category": "Setup", + "desc": "Run environment setup check (alias for setup-check)", + "command": "swarms onboarding [--verbose]", + "params": "--verbose", + }, + { + "feature": "API Key Management", + "category": "Setup", + "desc": "Retrieve API keys from the Swarms platform", + "command": "swarms get-api-key", + "params": "None", + }, + { + "feature": "Authentication", + "category": "Auth", + "desc": "Verify login status and initialize authentication cache", + "command": "swarms check-login", + "params": "None", + }, + { + "feature": "YAML Agent Execution", + "category": "Execution", + "desc": "Execute agents from YAML configuration files", + "command": "swarms run-agents --yaml-file agents.yaml", + "params": "--yaml-file", + }, + { + "feature": "Markdown Agent Loading", + "category": "Loading", + "desc": "Load agents from markdown files with YAML frontmatter", + "command": "swarms load-markdown --markdown-path ./agents/", + "params": "--markdown-path, --concurrent", + }, + { + "feature": "Custom Agent Creation", + "category": "Creation", + "desc": "Create and run a custom agent with specified parameters", + "command": "swarms agent --name 'Agent' --task 'Task' --system-prompt 'Prompt'", + "params": "--name, --task, --system-prompt, --model-name, --temperature, --max-loops, --verbose", + }, + { + "feature": "Auto Swarm Generation", + "category": "AI Generation", + "desc": "Automatically generate and execute an autonomous swarm configuration", + "command": "swarms autoswarm --task 'analyze data' --model gpt-4", + "params": "--task, --model", + }, + { + "feature": "LLM Council", + "category": "Collaboration", + "desc": "Run LLM Council with multiple agents collaborating and evaluating responses", + "command": "swarms llm-council --task 'Your question' [--verbose]", + "params": "--task, --verbose", + }, + { + "feature": "HeavySwarm", + "category": "Execution", + "desc": "Run HeavySwarm with specialized agents for complex task analysis", + "command": "swarms heavy-swarm --task 'Your task' [options]", + "params": "--task, --loops-per-agent, --question-agent-model-name, --worker-model-name, --random-loops-per-agent, --verbose", + }, + { + "feature": "Package Upgrade", + "category": "Maintenance", + "desc": "Update Swarms to the latest version", + "command": "swarms auto-upgrade", + "params": "None", + }, + { + "feature": "Support Booking", + "category": "Support", + "desc": "Schedule a strategy session with the Swarms team", + "command": "swarms book-call", + "params": "None", + }, + { + "feature": "Help Documentation", + "category": "Info", + "desc": "Display comprehensive help message with all commands", + "command": "swarms help", + "params": "None", + }, + { + "feature": "Features List", + "category": "Info", + "desc": "Display all available features and actions in a table", + "command": "swarms features", + "params": "None", + }, + ] + + # Add rows to table + for feat in features: + features_table.add_row( + feat["feature"], + feat["category"], + feat["desc"], + feat["command"], + feat["params"], + ) + + console.print(features_table) + + # Add category summary + console.print("\n[bold cyan]📊 Feature Categories:[/bold cyan]\n") + + category_table = Table( + show_header=True, + header_style=f"bold {COLORS['primary']}", + border_style=COLORS["secondary"], + padding=(0, 2), + ) + + category_table.add_column("Category", style="bold cyan", width=20) + category_table.add_column( + "Count", style="bold white", justify="center", width=10 + ) + category_table.add_column("Features", style="dim white", width=60) + + # Count features by category + categories = {} + for feat in features: + cat = feat["category"] + if cat not in categories: + categories[cat] = [] + categories[cat].append(feat["feature"]) + + for category, feature_list in sorted(categories.items()): + category_table.add_row( + category, + str(len(feature_list)), + ", ".join(feature_list), + ) + + console.print(category_table) + + # Add usage tips + tips_panel = Panel( + "[bold cyan]💡 Quick Tips:[/bold cyan]\n" + "• Use [yellow]swarms features[/yellow] to see this table anytime\n" + "• Use [yellow]swarms help[/yellow] for detailed command documentation\n" + "• Use [yellow]swarms setup-check --verbose[/yellow] for detailed diagnostics\n" + "• Most commands support [yellow]--verbose[/yellow] for detailed output\n" + "• Use [yellow]swarms --help[/yellow] for command-specific help", + title="📚 Usage Tips", + border_style=COLORS["success"], + padding=(1, 2), + ) + console.print(tips_panel) + + console.print( + "\n[dim]For more information, visit: https://docs.swarms.world[/dim]" + ) + + def show_help(): """Display a beautifully formatted help message with comprehensive command reference.""" console.print( @@ -771,7 +1025,10 @@ def show_help(): "• [yellow]swarms onboarding[/yellow] - Environment setup check\n" "• [yellow]swarms setup-check[/yellow] - Check your environment\n" "• [yellow]swarms agent --name 'MyAgent' --task 'Hello World'[/yellow] - Create agent\n" - "• [yellow]swarms autoswarm --task 'analyze data' --model gpt-4[/yellow] - Auto-generate swarm", + "• [yellow]swarms autoswarm --task 'analyze data' --model gpt-4[/yellow] - Auto-generate swarm\n" + "• [yellow]swarms llm-council --task 'Your question'[/yellow] - Run LLM Council\n" + "• [yellow]swarms heavy-swarm --task 'Your task'[/yellow] - Run HeavySwarm\n" + "• [yellow]swarms features[/yellow] - View all available features", title="⚡ Quick Usage Guide", border_style=COLORS["secondary"], padding=(1, 2), @@ -1028,6 +1285,189 @@ def load_markdown_agents( return [] +def run_heavy_swarm( + task: str, + loops_per_agent: int = 1, + question_agent_model_name: str = "gpt-4o-mini", + worker_model_name: str = "gpt-4o-mini", + random_loops_per_agent: bool = False, + verbose: bool = False, +): + """ + Run the HeavySwarm with a given task. + + Args: + task: The task/query for the HeavySwarm to process + loops_per_agent: Number of execution loops each agent should perform + question_agent_model_name: Model name for question generation + worker_model_name: Model name for specialized worker agents + random_loops_per_agent: Enable random number of loops per agent (1-10 range) + verbose: Whether to show verbose output + """ + try: + console.print( + "[yellow]🚀 Initializing HeavySwarm...[/yellow]" + ) + + # Create progress display + progress = Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + console=console, + ) + + with progress: + # Add initial task + init_task = progress.add_task( + "Initializing swarm...", total=None + ) + + # Create HeavySwarm + progress.update( + init_task, + description="Creating HeavySwarm with specialized agents...", + ) + swarm = HeavySwarm( + loops_per_agent=loops_per_agent, + question_agent_model_name=question_agent_model_name, + worker_model_name=worker_model_name, + random_loops_per_agent=random_loops_per_agent, + verbose=verbose, + ) + + # Update progress + progress.update( + init_task, + description="Swarm initialized! Processing task...", + ) + + # Run the swarm + result = swarm.run(task=task) + + # Update progress on completion + progress.update( + init_task, + description="Task completed!", + completed=True, + ) + + # Display results + if result: + console.print( + "\n[bold green]✓ HeavySwarm completed successfully![/bold green]" + ) + + # Display result in a panel + result_panel = Panel( + str(result), + title="HeavySwarm Final Response", + border_style="green", + padding=(1, 2), + ) + console.print(result_panel) + + return result + else: + console.print( + "[yellow]⚠ HeavySwarm completed but returned no results.[/yellow]" + ) + return None + + except Exception as e: + show_error( + "HeavySwarm Error", + f"Failed to run HeavySwarm: {str(e)}\n\n" + "Please check:\n" + "1. Your API keys are set correctly\n" + "2. You have network connectivity\n" + "3. The task is properly formatted", + ) + return None + + +def run_llm_council(task: str, verbose: bool = True): + """ + Run the LLM Council with a given task. + + Args: + task: The task/query for the LLM Council to process + verbose: Whether to show verbose output + """ + try: + console.print( + "[yellow]🏛️ Initializing LLM Council...[/yellow]" + ) + + # Create progress display + progress = Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + console=console, + ) + + with progress: + # Add initial task + init_task = progress.add_task( + "Initializing council...", total=None + ) + + # Create LLM Council + progress.update( + init_task, + description="Creating LLM Council with default members...", + ) + council = LLMCouncil(verbose=verbose) + + # Update progress + progress.update( + init_task, + description="Council initialized! Processing task...", + ) + + # Run the council + result = council.run(query=task) + + # Update progress on completion + progress.update( + init_task, + description="Task completed!", + completed=True, + ) + + # Display results + if result: + console.print( + "\n[bold green]✓ LLM Council completed successfully![/bold green]" + ) + + # Display result in a panel + result_panel = Panel( + str(result), + title="LLM Council Final Response", + border_style="green", + padding=(1, 2), + ) + console.print(result_panel) + + return result + else: + console.print( + "[yellow]⚠ LLM Council completed but returned no results.[/yellow]" + ) + return None + + except Exception as e: + show_error( + "LLM Council Error", + f"Failed to run LLM Council: {str(e)}\n\n" + "Please check:\n" + "1. Your API keys are set correctly\n" + "2. You have network connectivity\n" + "3. The task is properly formatted", + ) + return None + + def create_swarm_agent( name: str, description: str, @@ -1158,6 +1598,9 @@ def main(): "book-call", "autoswarm", "setup-check", + "llm-council", + "heavy-swarm", + "features", ], help="Command to execute", ) @@ -1285,6 +1728,30 @@ def main(): type=str, help="MCP URL for the agent", ) + # HeavySwarm specific arguments + parser.add_argument( + "--loops-per-agent", + type=int, + default=1, + help="Number of execution loops each agent should perform (default: 1)", + ) + parser.add_argument( + "--question-agent-model-name", + type=str, + default="gpt-4o-mini", + help="Model name for question generation agent (default: gpt-4o-mini)", + ) + parser.add_argument( + "--worker-model-name", + type=str, + default="gpt-4o-mini", + help="Model name for specialized worker agents (default: gpt-4o-mini)", + ) + parser.add_argument( + "--random-loops-per-agent", + action="store_true", + help="Enable random number of loops per agent (1-10 range)", + ) args = parser.parse_args() @@ -1297,6 +1764,8 @@ def main(): run_setup_check(verbose=args.verbose) elif args.command == "help": show_help() + elif args.command == "features": + show_features() elif args.command == "get-api-key": get_api_key() elif args.command == "check-login": @@ -1517,6 +1986,29 @@ def main(): run_autoswarm(args.task, args.model) elif args.command == "setup-check": run_setup_check(verbose=args.verbose) + elif args.command == "llm-council": + if not args.task: + show_error( + "Missing required argument: --task", + "Example usage: swarms llm-council --task 'What is the best approach to solve this problem?'", + ) + exit(1) + run_llm_council(task=args.task, verbose=args.verbose) + elif args.command == "heavy-swarm": + if not args.task: + show_error( + "Missing required argument: --task", + "Example usage: swarms heavy-swarm --task 'Analyze the current market trends'", + ) + exit(1) + run_heavy_swarm( + task=args.task, + loops_per_agent=args.loops_per_agent, + question_agent_model_name=args.question_agent_model_name, + worker_model_name=args.worker_model_name, + random_loops_per_agent=args.random_loops_per_agent, + verbose=args.verbose, + ) except Exception as e: console.print( f"[{COLORS['error']}]Error: {str(e)}[/{COLORS['error']}]" diff --git a/swarms/prompts/__init__.py b/swarms/prompts/__init__.py index e73a118f..6bd4edfb 100644 --- a/swarms/prompts/__init__.py +++ b/swarms/prompts/__init__.py @@ -8,6 +8,11 @@ from swarms.prompts.operations_agent_prompt import ( ) from swarms.prompts.product_agent_prompt import PRODUCT_AGENT_PROMPT from swarms.prompts.prompt import Prompt +from swarms.prompts.autonomous_agent_prompt import ( + AUTONOMOUS_AGENT_SYSTEM_PROMPT, + get_autonomous_agent_prompt, + get_autonomous_agent_prompt_with_context, +) __all__ = [ "CODE_INTERPRETER", @@ -18,4 +23,7 @@ __all__ = [ "PRODUCT_AGENT_PROMPT", "DOCUMENTATION_WRITER_SOP", "Prompt", + "AUTONOMOUS_AGENT_SYSTEM_PROMPT", + "get_autonomous_agent_prompt", + "get_autonomous_agent_prompt_with_context", ] diff --git a/swarms/prompts/autonomous_agent_prompt.py b/swarms/prompts/autonomous_agent_prompt.py new file mode 100644 index 00000000..54bddedb --- /dev/null +++ b/swarms/prompts/autonomous_agent_prompt.py @@ -0,0 +1,357 @@ +""" +Comprehensive prompt for autonomous agent operating in auto loop mode. + +This prompt guides the agent through the structured workflow: +plan -> think -> action -> subtask_done -> complete_task +""" + +AUTONOMOUS_AGENT_SYSTEM_PROMPT = """You are an elite autonomous agent operating in a sophisticated autonomous loop structure. Your mission is to reliably and efficiently complete complex tasks by breaking them down into manageable subtasks, executing them systematically, and providing comprehensive results. + +## CORE PRINCIPLES + +1. **Excellence First**: The quality of your outputs directly impacts user success. Strive for perfection. +2. **Systematic Approach**: Break down complex tasks into clear, actionable steps with proper dependencies. +3. **Action-Oriented**: Focus on execution and completion, not endless analysis or communication. +4. **Adaptive Problem-Solving**: When obstacles arise, analyze, adapt, and continue forward. +5. **Transparency**: Keep users informed of progress, but prioritize execution over communication. + +## AUTONOMOUS LOOP WORKFLOW + +You operate in a structured three-phase cycle: + +### PHASE 1: PLANNING +**Objective**: Create a comprehensive, actionable plan for the task. + +**Process**: +1. Analyze the main task thoroughly +2. Break it down into smaller, manageable subtasks +3. Assign appropriate priorities (critical, high, medium, low) +4. Identify dependencies between subtasks +5. Use the `create_plan` tool to formalize your plan + +**Guidelines**: +- Each subtask should be specific and actionable +- Critical priority tasks are foundational and must be completed first +- Dependencies ensure logical execution order +- The plan should be comprehensive but not overly granular + +**Example Plan Structure**: +``` +Task: Research and write a report on renewable energy +├── research_sources (critical) - Identify authoritative sources +├── gather_data (high, depends on: research_sources) - Collect relevant data +├── analyze_trends (high, depends on: gather_data) - Analyze patterns +├── draft_report (critical, depends on: analyze_trends) - Write initial draft +└── finalize_report (medium, depends on: draft_report) - Polish and format +``` + +### PHASE 2: EXECUTION +**Objective**: Complete each subtask systematically and efficiently. + +**Workflow for Each Subtask**: +1. **Brief Analysis** (Optional but Recommended): + - Use the `think` tool ONCE to analyze what needs to be done + - Assess complexity, required tools, and approach + - Set clear expectations for the subtask outcome + +2. **Take Action**: + - Use available tools to complete the work + - Execute concrete actions, not just analysis + - Make progress toward the subtask goal + +3. **Communicate Progress** (Optional, Limit to Once): + - Use `respond_to_user` ONCE if significant progress is made or clarification is needed + - Do NOT repeatedly communicate - focus on execution + - Communication should be informative, not repetitive + +4. **Complete Subtask**: + - When the subtask is finished, call `subtask_done` with: + - task_id: The ID of the completed subtask + - summary: A clear summary of what was accomplished + - success: true if completed successfully, false otherwise + +**Critical Rules**: +- DO NOT call `think` more than 2 times consecutively - take action instead +- DO NOT call `respond_to_user` more than 2 times consecutively - execute instead +- DO NOT get stuck in analysis or communication loops +- DO focus on completing the actual work +- DO mark subtasks as done when finished, not when you're "about to start" + +**Tool Usage Priority**: +1. Use available user-provided tools for actual work +2. Use `think` briefly for complex situations (max 2 times) +3. Use `respond_to_user` sparingly for important updates (max 2 times) +4. Always end with `subtask_done` when work is complete + +### PHASE 3: THINKING (Between Tasks) +**Objective**: Reflect on progress and determine next steps. + +**When to Enter Thinking Phase**: +- After completing a subtask +- When assessing overall progress +- Before finalizing the main task + +**Process**: +1. Assess current state: + - How many subtasks are completed? + - What progress has been made? + - What remains to be done? + +2. Determine next action: + - If all subtasks are complete: Call `complete_task` + - If subtasks remain: Return to execution phase for the next task + - If stuck: Analyze the issue and take corrective action + +3. Keep it brief: + - Thinking phase should be quick assessment, not deep analysis + - Move to action quickly + +## TOOL USAGE GUIDELINES + +### create_plan +**When to Use**: At the very beginning, when you receive the main task. +**How to Use**: +- Provide a clear task_description +- Break down into steps with step_id, description, priority, and dependencies +- Ensure the plan is comprehensive and actionable + +### think +**When to Use**: +- Before starting a complex subtask (optional but recommended) +- When you need to analyze a situation +- Maximum: 2 consecutive calls before you MUST take action + +**How to Use**: +- Provide current_state, analysis, next_actions, and confidence +- Be concise and action-oriented +- Use it to plan, not to procrastinate + +**WARNING**: If you call `think` 2+ times consecutively, it will be BLOCKED. You must take action. + +### respond_to_user +**When to Use**: +- To provide important progress updates +- To ask critical questions that block progress +- To share significant results or findings +- Maximum: 2 consecutive calls before you MUST take action + +**How to Use**: +- message: Clear, informative message +- message_type: One of: update, question, result, error, info +- Be concise and actionable + +**WARNING**: If you call `respond_to_user` 2+ times consecutively, you will be forced to execute. Stop communicating and start working. + +### subtask_done +**When to Use**: After completing a subtask, not before starting it. +**How to Use**: +- task_id: The exact step_id from your plan +- summary: What was accomplished (be specific) +- success: true if completed successfully, false if failed + +**Critical**: Only call this when the subtask is ACTUALLY DONE, not when you're "about to start" or "planning to do it." + +### complete_task +**When to Use**: ONLY when ALL subtasks are completed. +**How to Use**: +- task_id: Usually "main_task" or the original task identifier +- summary: Comprehensive summary of the entire task completion +- success: true if the main task was successful +- results: Detailed results (optional) +- lessons_learned: Key insights (optional) + +**Critical**: Do NOT call this until every subtask shows status "completed" or "failed". + +## LOOP PREVENTION STRATEGIES + +### Thinking Loop Prevention +- **Limit**: Maximum 2 consecutive `think` calls +- **Enforcement**: After 2 calls, `think` tool is BLOCKED +- **Solution**: Take action immediately, use available tools, complete the work + +### Communication Loop Prevention +- **Limit**: Maximum 2 consecutive `respond_to_user` calls +- **Enforcement**: After 2 calls, you're forced to execute +- **Solution**: Stop talking, start working, complete the task + +### Action Encouragement +- After thinking, immediately execute +- After communicating, immediately execute +- Focus on tool usage and task completion +- Mark subtasks as done when finished + +## BEST PRACTICES + +### Planning Best Practices +1. **Break Down Appropriately**: Not too granular (micro-tasks), not too broad (unclear tasks) +2. **Set Priorities Correctly**: Critical tasks are foundational, high tasks are important, medium/low are nice-to-have +3. **Identify Dependencies**: Ensure logical execution order +4. **Be Specific**: Each subtask should have a clear, actionable description + +### Execution Best Practices +1. **Think Once, Act Many**: Use `think` briefly, then execute multiple actions +2. **Communicate Sparingly**: Use `respond_to_user` for important updates only +3. **Use Tools Effectively**: Leverage available tools to accomplish work +4. **Complete Before Moving On**: Finish one subtask before starting the next +5. **Mark Progress**: Always call `subtask_done` when a subtask is complete + +### Thinking Best Practices +1. **Be Brief**: Quick assessment, not deep philosophical analysis +2. **Be Action-Oriented**: Focus on what to do next, not just reflection +3. **Move Forward**: After thinking, immediately take action + +### Communication Best Practices +1. **Be Informative**: Share useful information, not fluff +2. **Be Concise**: Get to the point quickly +3. **Be Actionable**: If asking questions, make them specific and necessary +4. **Limit Frequency**: One update per subtask is usually sufficient + +## COMMON PITFALLS TO AVOID + +### ❌ DON'T: +- Call `think` repeatedly without taking action +- Call `respond_to_user` repeatedly without executing +- Call `subtask_done` before actually completing the work +- Call `complete_task` before all subtasks are done +- Get stuck in analysis paralysis +- Over-communicate instead of executing +- Skip the planning phase +- Ignore dependencies in your plan + +### ✅ DO: +- Create a comprehensive plan first +- Think briefly, then act decisively +- Use tools to accomplish actual work +- Complete subtasks before marking them done +- Communicate only when necessary +- Follow the workflow: plan -> think -> action -> subtask_done +- Complete all subtasks before calling `complete_task` +- Provide comprehensive final reports + +## TASK COMPLETION CHECKLIST + +Before calling `complete_task`, verify: +- [ ] All subtasks have been marked as "completed" or "failed" +- [ ] The main task objective has been achieved +- [ ] Results are ready to be shared +- [ ] A comprehensive summary can be provided + +## EXAMPLE WORKFLOW + +**Task**: "Research the top 5 AI companies and create a comparison report" + +**Step 1: Planning** +``` +create_plan( + task_description="Research top 5 AI companies and create comparison report", + steps=[ + {"step_id": "identify_companies", "description": "Identify top 5 AI companies", "priority": "critical"}, + {"step_id": "gather_data", "description": "Gather financial and product data", "priority": "high", "dependencies": ["identify_companies"]}, + {"step_id": "analyze_comparison", "description": "Compare companies across metrics", "priority": "high", "dependencies": ["gather_data"]}, + {"step_id": "create_report", "description": "Write comparison report", "priority": "critical", "dependencies": ["analyze_comparison"]} + ] +) +``` + +**Step 2: Execution - Subtask 1** +``` +think(current_state="Starting first subtask", analysis="Need to identify top AI companies", next_actions=["Use search tools"], confidence=0.9) +[Use search tools to find top AI companies] +subtask_done(task_id="identify_companies", summary="Identified top 5 AI companies: OpenAI, Anthropic, Google DeepMind, Meta AI, Microsoft AI", success=True) +``` + +**Step 3: Execution - Subtask 2** +``` +[Use tools to gather data on each company] +respond_to_user(message="Gathered financial data for all 5 companies", message_type="update") +subtask_done(task_id="gather_data", summary="Collected financial metrics, product information, and market position for all companies", success=True) +``` + +**Step 4: Execution - Subtask 3** +``` +[Analyze and compare companies] +subtask_done(task_id="analyze_comparison", summary="Compared companies across revenue, market cap, product offerings, and innovation metrics", success=True) +``` + +**Step 5: Execution - Subtask 4** +``` +[Create comprehensive report] +subtask_done(task_id="create_report", summary="Created detailed comparison report with analysis and recommendations", success=True) +``` + +**Step 6: Completion** +``` +complete_task( + task_id="main_task", + summary="Successfully researched top 5 AI companies and created comprehensive comparison report", + success=True, + results="Report includes detailed analysis of OpenAI, Anthropic, Google DeepMind, Meta AI, and Microsoft AI", + lessons_learned="AI market is rapidly evolving with significant competition" +) +``` + +## FINAL REMINDERS + +1. **Plan First**: Always create a plan before executing +2. **Think Briefly**: Use `think` for quick analysis, not endless reflection +3. **Execute Decisively**: Take concrete actions, use tools, make progress +4. **Communicate Sparingly**: Update users when necessary, but prioritize work +5. **Complete Systematically**: Finish subtasks before marking them done +6. **Finalize Properly**: Only call `complete_task` when everything is finished + +Remember: You are an elite autonomous agent. Your goal is to complete tasks efficiently and effectively. Avoid loops, focus on execution, and deliver exceptional results. + +Now, begin your mission with excellence.""" + + +def get_autonomous_agent_prompt() -> str: + """ + Get the comprehensive autonomous agent system prompt. + + Returns: + str: The full autonomous agent system prompt + """ + return AUTONOMOUS_AGENT_SYSTEM_PROMPT + + +def get_autonomous_agent_prompt_with_context( + agent_name: str = None, + agent_description: str = None, + available_tools: list = None, +) -> str: + """ + Get the autonomous agent prompt with contextual information. + + Args: + agent_name: Name of the agent + agent_description: Description of the agent's role + available_tools: List of available tool names + + Returns: + str: Contextualized autonomous agent prompt + """ + prompt = AUTONOMOUS_AGENT_SYSTEM_PROMPT + + if agent_name: + prompt = prompt.replace( + "You are an elite autonomous agent", + f"You are {agent_name}, an elite autonomous agent", + ) + + if agent_description: + prompt += f"\n\n## AGENT ROLE\n{agent_description}\n" + + if available_tools and len(available_tools) > 0: + tools_list = "\n".join( + [f"- {tool}" for tool in available_tools[:20]] + ) # Limit to 20 tools + prompt += f"\n\n## AVAILABLE TOOLS\nYou have access to the following tools:\n{tools_list}\n" + if len(available_tools) > 20: + prompt += ( + f"\n(and {len(available_tools) - 20} more tools)\n" + ) + prompt += ( + "\nUse these tools effectively to complete your tasks.\n" + ) + + return prompt diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index e0d3430a..18f46c5c 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -34,6 +34,7 @@ from swarms.structs.interactive_groupchat import ( random_speaker, round_robin_speaker, ) +from swarms.structs.llm_council import LLMCouncil from swarms.structs.ma_blocks import ( aggregate, find_agent_by_name, @@ -89,7 +90,6 @@ from swarms.structs.swarming_architectures import ( geometric_swarm, grid_swarm, harmonic_swarm, - linear_swarm, log_swarm, mesh_swarm, one_to_one, @@ -127,7 +127,6 @@ __all__ = [ "geometric_swarm", "grid_swarm", "harmonic_swarm", - "linear_swarm", "log_swarm", "mesh_swarm", "one_to_one", @@ -161,6 +160,7 @@ __all__ = [ "get_swarms_info", "AutoSwarmBuilder", "CouncilAsAJudge", + "LLMCouncil", "batch_agent_execution", "aggregate", "find_agent_by_name", diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 32687894..3ffb39df 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -3309,7 +3309,7 @@ class Agent: # Get the text content from the tool response # execute_tool_call_simple returns a string directly, not an object with content attribute - text_content = f"MCP Tool Response: \n\n {json.dumps(tool_response, indent=2)}" + text_content = f"MCP Tool Response: \n\n {json.dumps(tool_response, indent=2, sort_keys=True)}" if self.print_on is True: formatter.print_panel( diff --git a/swarms/structs/agent_rearrange.py b/swarms/structs/agent_rearrange.py index d3016de4..a0155ef6 100644 --- a/swarms/structs/agent_rearrange.py +++ b/swarms/structs/agent_rearrange.py @@ -1,7 +1,7 @@ import json from concurrent.futures import ThreadPoolExecutor from typing import Any, Callable, Dict, List, Optional, Union - +import asyncio from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation from swarms.structs.multi_agent_exec import run_agents_concurrently @@ -908,6 +908,45 @@ class AgentRearrange: except Exception as e: self._catch_error(e) + async def run_async( + self, + task: str, + img: Optional[str] = None, + *args, + **kwargs, + ) -> Any: + """ + Asynchronously executes a task through the agent workflow. + + This method enables asynchronous execution of tasks by running the + synchronous run method in a separate thread using asyncio.to_thread. + This is ideal for integrating the agent workflow into async applications + or when you want non-blocking execution. + + Args: + task (str): The task to be executed through the agent workflow. + img (Optional[str]): Optional image input for the task. Defaults to None. + *args: Additional positional arguments passed to the run method. + **kwargs: Additional keyword arguments passed to the run method. + + Returns: + Any: The result of the task execution, format depends on output_type setting. + + Raises: + Exception: If an error occurs during task execution. + + Note: + This method uses asyncio.to_thread to run the synchronous run method + asynchronously, allowing integration with async/await patterns. + """ + + try: + return await asyncio.to_thread( + self.run, task=task, img=img, *args, **kwargs + ) + except Exception as e: + self._catch_error(e) + def _serialize_callable( self, attr_value: Callable ) -> Dict[str, Any]: diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py index a8f7bea4..141dfe62 100644 --- a/swarms/structs/aop.py +++ b/swarms/structs/aop.py @@ -12,8 +12,10 @@ from typing import Any, Callable, Dict, List, Literal, Optional from uuid import uuid4 from loguru import logger +from mcp.server.auth.settings import AuthSettings from mcp.server.fastmcp import FastMCP from mcp.server.lowlevel.server import LifespanResultT +from mcp.server.transport_security import TransportSecuritySettings from swarms.structs.agent import Agent from swarms.structs.omni_agent_types import AgentType @@ -21,7 +23,6 @@ from swarms.tools.mcp_client_tools import ( get_tools_for_multiple_mcp_servers, ) -from mcp.server.fastmcp import AuthSettings, TransportSecuritySettings class TaskStatus(Enum): """Status of a task in the queue.""" @@ -603,7 +604,13 @@ class AOP: log_level: Literal[ "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" ] = "INFO", - lifespan: Callable[[FastMCP[LifespanResultT]], AbstractAsyncContextManager[LifespanResultT]] | None = None, + lifespan: ( + Callable[ + [FastMCP[LifespanResultT]], + AbstractAsyncContextManager[LifespanResultT], + ] + | None + ) = None, auth: AuthSettings | None = None, transport_security: TransportSecuritySettings | None = None, *args, @@ -672,6 +679,7 @@ class AOP: self.tool_configs: Dict[str, AgentToolConfig] = {} self.task_queues: Dict[str, TaskQueue] = {} self.transport = transport + self.mcp_server = FastMCP( name=server_name, port=port, diff --git a/swarms/structs/auto_swarm_builder.py b/swarms/structs/auto_swarm_builder.py index 514cb79c..08c75164 100644 --- a/swarms/structs/auto_swarm_builder.py +++ b/swarms/structs/auto_swarm_builder.py @@ -407,6 +407,8 @@ class AutoSwarmBuilder: agents_dictionary = model.run(task) + agents_dictionary = json.loads(agents_dictionary) + return agents_dictionary except Exception as e: @@ -437,6 +439,8 @@ class AutoSwarmBuilder: f"Create the swarm spec for the following task: {task}" ) + swarm_spec = json.loads(swarm_spec) + print(swarm_spec) print(type(swarm_spec)) diff --git a/swarms/structs/custom_agent.py b/swarms/structs/custom_agent.py deleted file mode 100644 index b4c3617e..00000000 --- a/swarms/structs/custom_agent.py +++ /dev/null @@ -1,344 +0,0 @@ -import json -from dataclasses import dataclass -from typing import Any, Dict, Optional, Union - -import httpx -from loguru import logger - - -@dataclass -class AgentResponse: - """Data class to hold agent response information""" - - status_code: int - content: str - headers: Dict[str, str] - json_data: Optional[Dict[str, Any]] = None - success: bool = False - error_message: Optional[str] = None - - -class CustomAgent: - """ - A custom HTTP agent class for making POST requests using httpx. - - Features: - - Configurable headers and payload - - Both sync and async execution - - Built-in error handling and logging - - Flexible response handling - - Name and description - """ - - def __init__( - self, - name: str, - description: str, - base_url: str, - endpoint: str, - headers: Optional[Dict[str, str]] = None, - timeout: float = 30.0, - verify_ssl: bool = True, - *args, - **kwargs, - ): - """ - Initialize the Custom Agent. - - Args: - base_url: Base URL for the API endpoint - endpoint: API endpoint path - headers: Default headers to include in requests - timeout: Request timeout in seconds - verify_ssl: Whether to verify SSL certificates - """ - self.base_url = base_url.rstrip("/") - self.endpoint = endpoint.lstrip("/") - self.default_headers = headers or {} - self.timeout = timeout - self.verify_ssl = verify_ssl - - # Default headers - if "Content-Type" not in self.default_headers: - self.default_headers["Content-Type"] = "application/json" - - logger.info( - f"CustomAgent initialized for {self.base_url}/{self.endpoint}" - ) - - def _prepare_headers( - self, additional_headers: Optional[Dict[str, str]] = None - ) -> Dict[str, str]: - """Merge default headers with additional headers.""" - headers = self.default_headers.copy() - if additional_headers: - headers.update(additional_headers) - return headers - - def _prepare_payload( - self, payload: Union[Dict, str, bytes] - ) -> Union[str, bytes]: - """Prepare the payload for the request.""" - if isinstance(payload, dict): - return json.dumps(payload) - return payload - - def _parse_response( - self, response: httpx.Response - ) -> AgentResponse: - """Parse httpx response into AgentResponse object.""" - try: - # Try to parse JSON if possible - json_data = None - if response.headers.get("content-type", "").startswith( - "application/json" - ): - try: - json_data = response.json() - except json.JSONDecodeError: - pass - - return AgentResponse( - status_code=response.status_code, - content=response.text, - headers=dict(response.headers), - json_data=json_data, - success=200 <= response.status_code < 300, - error_message=( - None - if 200 <= response.status_code < 300 - else f"HTTP {response.status_code}" - ), - ) - except Exception as e: - logger.error(f"Error parsing response: {e}") - return AgentResponse( - status_code=response.status_code, - content=response.text, - headers=dict(response.headers), - success=False, - error_message=str(e), - ) - - def _extract_content(self, response_data: Dict[str, Any]) -> str: - """ - Extract message content from API response, supporting multiple formats. - - Args: - response_data: Parsed JSON response from API - - Returns: - str: Extracted message content - """ - try: - # OpenAI format - if ( - "choices" in response_data - and response_data["choices"] - ): - choice = response_data["choices"][0] - if ( - "message" in choice - and "content" in choice["message"] - ): - return choice["message"]["content"] - elif "text" in choice: - return choice["text"] - - # Anthropic format - elif ( - "content" in response_data - and response_data["content"] - ): - if isinstance(response_data["content"], list): - # Extract text from content blocks - text_parts = [] - for content_block in response_data["content"]: - if ( - isinstance(content_block, dict) - and "text" in content_block - ): - text_parts.append(content_block["text"]) - elif isinstance(content_block, str): - text_parts.append(content_block) - return "".join(text_parts) - elif isinstance(response_data["content"], str): - return response_data["content"] - - # Generic fallback - look for common content fields - elif "text" in response_data: - return response_data["text"] - elif "message" in response_data: - return response_data["message"] - elif "response" in response_data: - return response_data["response"] - - # If no known format, return the entire response as JSON string - logger.warning( - "Unknown response format, returning full response" - ) - return json.dumps(response_data, indent=2) - - except Exception as e: - logger.error(f"Error extracting content: {e}") - return json.dumps(response_data, indent=2) - - def run( - self, - payload: Union[Dict[str, Any], str, bytes], - additional_headers: Optional[Dict[str, str]] = None, - **kwargs, - ) -> str: - """ - Execute a synchronous POST request. - - Args: - payload: Request body/payload - additional_headers: Additional headers for this request - **kwargs: Additional httpx client options - - Returns: - str: Extracted message content from response - """ - url = f"{self.base_url}/{self.endpoint}" - request_headers = self._prepare_headers(additional_headers) - request_payload = self._prepare_payload(payload) - - logger.info(f"Making POST request to: {url}") - - try: - with httpx.Client( - timeout=self.timeout, verify=self.verify_ssl, **kwargs - ) as client: - response = client.post( - url, - content=request_payload, - headers=request_headers, - ) - - if 200 <= response.status_code < 300: - logger.info( - f"Request successful: {response.status_code}" - ) - try: - response_data = response.json() - return self._extract_content(response_data) - except json.JSONDecodeError: - logger.warning( - "Response is not JSON, returning raw text" - ) - return response.text - else: - logger.warning( - f"Request failed: {response.status_code}" - ) - return f"Error: HTTP {response.status_code} - {response.text}" - - except httpx.RequestError as e: - logger.error(f"Request error: {e}") - return f"Request error: {str(e)}" - except Exception as e: - logger.error(f"Unexpected error: {e}") - return f"Unexpected error: {str(e)}" - - async def run_async( - self, - payload: Union[Dict[str, Any], str, bytes], - additional_headers: Optional[Dict[str, str]] = None, - **kwargs, - ) -> str: - """ - Execute an asynchronous POST request. - - Args: - payload: Request body/payload - additional_headers: Additional headers for this request - **kwargs: Additional httpx client options - - Returns: - str: Extracted message content from response - """ - url = f"{self.base_url}/{self.endpoint}" - request_headers = self._prepare_headers(additional_headers) - request_payload = self._prepare_payload(payload) - - logger.info(f"Making async POST request to: {url}") - - try: - async with httpx.AsyncClient( - timeout=self.timeout, verify=self.verify_ssl, **kwargs - ) as client: - response = await client.post( - url, - content=request_payload, - headers=request_headers, - ) - - if 200 <= response.status_code < 300: - logger.info( - f"Async request successful: {response.status_code}" - ) - try: - response_data = response.json() - return self._extract_content(response_data) - except json.JSONDecodeError: - logger.warning( - "Async response is not JSON, returning raw text" - ) - return response.text - else: - logger.warning( - f"Async request failed: {response.status_code}" - ) - return f"Error: HTTP {response.status_code} - {response.text}" - - except httpx.RequestError as e: - logger.error(f"Async request error: {e}") - return f"Request error: {str(e)}" - except Exception as e: - logger.error(f"Unexpected async error: {e}") - return f"Unexpected error: {str(e)}" - - -# # Example usage with Anthropic API -# if __name__ == "__main__": -# # Initialize the agent for Anthropic API -# anthropic_agent = CustomAgent( -# base_url="https://api.anthropic.com", -# endpoint="v1/messages", -# headers={ -# "x-api-key": "your-anthropic-api-key-here", -# "anthropic-version": "2023-06-01" -# } -# ) - -# # Example payload for Anthropic API -# payload = { -# "model": "claude-3-sonnet-20240229", -# "max_tokens": 1000, -# "messages": [ -# { -# "role": "user", -# "content": "Hello! Can you explain what artificial intelligence is?" -# } -# ] -# } - -# # Make the request -# try: -# response = anthropic_agent.run(payload) -# print("Anthropic API Response:") -# print(response) -# except Exception as e: -# print(f"Error: {e}") - -# # Example with async usage -# # import asyncio -# # -# # async def async_example(): -# # response = await anthropic_agent.run_async(payload) -# # print("Async Anthropic API Response:") -# # print(response) -# # -# # Uncomment to run async example -# # asyncio.run(async_example()) diff --git a/swarms/structs/debate_with_judge.py b/swarms/structs/debate_with_judge.py index e3104198..0baa54f5 100644 --- a/swarms/structs/debate_with_judge.py +++ b/swarms/structs/debate_with_judge.py @@ -1,13 +1,4 @@ -""" -Debate/Self-Refinement with Judge Architecture - -This module implements a debate architecture where two agents (Pro and Con) -debate a topic, and a Judge agent evaluates their arguments and provides -refined synthesis. The process repeats for N rounds to progressively refine -the answer. -""" - -from typing import List, Union +from typing import List, Optional, Union from loguru import logger @@ -18,6 +9,66 @@ from swarms.utils.history_output_formatter import ( ) +# Pre-built system prompts for debate agents +PRO_AGENT_SYSTEM_PROMPT = """You are an expert debater specializing in arguing IN FAVOR of propositions. + +Your Role: +- Present compelling, well-reasoned arguments supporting your assigned position +- Use evidence, logic, and persuasive rhetoric to make your case +- Anticipate and preemptively address potential counterarguments +- Build upon previous arguments when refining your position + +Debate Guidelines: +1. Structure your arguments clearly with main points and supporting evidence +2. Use concrete examples and data when available +3. Acknowledge valid opposing points while explaining why your position is stronger +4. Maintain a professional, respectful tone throughout the debate +5. Focus on the strongest aspects of your position + +Your goal is to present the most compelling case possible for the Pro position.""" + +CON_AGENT_SYSTEM_PROMPT = """You are an expert debater specializing in arguing AGAINST propositions. + +Your Role: +- Present compelling, well-reasoned counter-arguments opposing the given position +- Identify weaknesses, flaws, and potential negative consequences +- Challenge assumptions and evidence presented by the opposing side +- Build upon previous arguments when refining your position + +Debate Guidelines: +1. Structure your counter-arguments clearly with main points and supporting evidence +2. Use concrete examples and data to support your opposition +3. Directly address and refute the Pro's arguments +4. Maintain a professional, respectful tone throughout the debate +5. Focus on the most significant weaknesses of the opposing position + +Your goal is to present the most compelling case possible against the proposition.""" + +JUDGE_AGENT_SYSTEM_PROMPT = """You are an impartial judge and critical evaluator of debates. + +Your Role: +- Objectively evaluate arguments from both Pro and Con sides +- Identify strengths and weaknesses in each position +- Provide constructive feedback for improvement +- Synthesize the best elements from both sides when appropriate +- Render fair verdicts based on argument quality, not personal bias + +Evaluation Criteria: +1. Logical coherence and reasoning quality +2. Evidence and supporting data quality +3. Persuasiveness and rhetorical effectiveness +4. Responsiveness to opposing arguments +5. Overall argument structure and clarity + +Judgment Guidelines: +- Be specific about what makes arguments strong or weak +- Provide actionable feedback for improvement +- When synthesizing, explain how elements from both sides complement each other +- In final rounds, provide clear conclusions with justification + +Your goal is to facilitate productive debate and arrive at well-reasoned conclusions.""" + + class DebateWithJudge: """ A debate architecture with self-refinement through a judge agent. @@ -26,7 +77,7 @@ class DebateWithJudge: 1. Agent A (Pro) and Agent B (Con) present opposing arguments 2. Both arguments are evaluated by a Judge/Critic Agent 3. The Judge provides a winner or synthesis → refined answer - 4. The process repeats for N rounds to progressively improve the answer + 4. The process repeats for N loops to progressively improve the answer Architecture: Agent A (Pro) ↔ Agent B (Con) @@ -37,62 +88,214 @@ class DebateWithJudge: ▼ Winner or synthesis → refined answer + Initialization Options: + 1. Provide individual agents: pro_agent, con_agent, judge_agent + 2. Provide a list of agents: agents=[pro, con, judge] + 3. Use preset agents: preset_agents=True (creates default agents automatically) + Attributes: pro_agent (Agent): The agent arguing in favor (Pro position). con_agent (Agent): The agent arguing against (Con position). judge_agent (Agent): The judge agent that evaluates arguments and provides synthesis. - max_rounds (int): Maximum number of debate rounds to execute. + max_loops (int): Maximum number of debate loops to execute. output_type (str): Format for the output conversation history. verbose (bool): Whether to enable verbose logging. + + Examples: + >>> # Using preset agents (simplest approach) + >>> debate = DebateWithJudge(preset_agents=True, max_loops=3) + >>> result = debate.run("Should AI be regulated?") + + >>> # Using a list of agents + >>> agents = [pro_agent, con_agent, judge_agent] + >>> debate = DebateWithJudge(agents=agents, max_loops=3) + >>> result = debate.run("Is remote work better than office work?") + + >>> # Using individual agent parameters + >>> debate = DebateWithJudge( + ... pro_agent=my_pro_agent, + ... con_agent=my_con_agent, + ... judge_agent=my_judge_agent + ... ) + >>> result = debate.run("Should we colonize Mars?") """ def __init__( self, - pro_agent: Agent, - con_agent: Agent, - judge_agent: Agent, - max_rounds: int = 3, + pro_agent: Optional[Agent] = None, + con_agent: Optional[Agent] = None, + judge_agent: Optional[Agent] = None, + agents: Optional[List[Agent]] = None, + preset_agents: bool = False, + max_loops: int = 3, output_type: str = "str-all-except-first", verbose: bool = True, + model_name: str = "gpt-4o-mini", ): """ Initialize the DebateWithJudge architecture. Args: - pro_agent (Agent): The agent arguing in favor (Pro position). - con_agent (Agent): The agent arguing against (Con position). - judge_agent (Agent): The judge agent that evaluates arguments and provides synthesis. - max_rounds (int): Maximum number of debate rounds to execute. Defaults to 3. - output_type (str): Format for the output conversation history. Defaults to "str-all-except-first". + pro_agent (Optional[Agent]): The agent arguing in favor (Pro position). + Not required if using agents list or preset_agents. + con_agent (Optional[Agent]): The agent arguing against (Con position). + Not required if using agents list or preset_agents. + judge_agent (Optional[Agent]): The judge agent that evaluates arguments. + Not required if using agents list or preset_agents. + agents (Optional[List[Agent]]): A list of exactly 3 agents in order: + [pro_agent, con_agent, judge_agent]. Takes precedence over individual + agent parameters if provided. + preset_agents (bool): If True, creates default pro, con, and judge agents + automatically. Used when no agents are provided. Defaults to False. + max_loops (int): Maximum number of debate loops to execute. Defaults to 3. + output_type (str): Format for the output conversation history. + Defaults to "str-all-except-first". verbose (bool): Whether to enable verbose logging. Defaults to True. + model_name (str): The model name to use for preset agents. + Defaults to "gpt-4o-mini". Raises: - ValueError: If any of the required agents are None or if max_rounds is less than 1. + ValueError: If no valid agent configuration is provided (no agents, no list, + and preset_agents is False), if agents list doesn't have exactly 3 agents, + or if max_loops is less than 1. """ - if pro_agent is None: - raise ValueError("pro_agent cannot be None") - if con_agent is None: - raise ValueError("con_agent cannot be None") - if judge_agent is None: - raise ValueError("judge_agent cannot be None") - if max_rounds < 1: - raise ValueError("max_rounds must be at least 1") - - self.pro_agent = pro_agent - self.con_agent = con_agent - self.judge_agent = judge_agent - self.max_rounds = max_rounds + if max_loops < 1: + raise ValueError("max_loops must be at least 1") + + self.max_loops = max_loops self.output_type = output_type self.verbose = verbose + self.model_name = model_name + + # Determine agent configuration + self._configure_agents( + pro_agent=pro_agent, + con_agent=con_agent, + judge_agent=judge_agent, + agents=agents, + preset_agents=preset_agents, + ) # Initialize conversation history self.conversation = Conversation() if self.verbose: logger.info( - f"DebateWithJudge initialized with {max_rounds} rounds" + f"DebateWithJudge initialized with {max_loops} loops" + ) + logger.info( + f"Pro Agent: {self.pro_agent.agent_name}, " + f"Con Agent: {self.con_agent.agent_name}, " + f"Judge Agent: {self.judge_agent.agent_name}" ) + def _configure_agents( + self, + pro_agent: Optional[Agent], + con_agent: Optional[Agent], + judge_agent: Optional[Agent], + agents: Optional[List[Agent]], + preset_agents: bool, + ) -> None: + """ + Configure agents based on provided parameters. + + Priority order: + 1. agents list (if provided and valid) + 2. Individual agent parameters (if all provided) + 3. preset_agents (if True) + + Args: + pro_agent: The pro agent (optional). + con_agent: The con agent (optional). + judge_agent: The judge agent (optional). + agents: List of agents [pro, con, judge] (optional). + preset_agents: Whether to create default agents. + + Raises: + ValueError: If no valid configuration is provided. + """ + # Option 1: Use agents list + if agents is not None: + if len(agents) != 3: + raise ValueError( + f"agents list must contain exactly 3 agents " + f"[pro_agent, con_agent, judge_agent], got {len(agents)}" + ) + for i, agent in enumerate(agents): + if not isinstance(agent, Agent): + raise ValueError( + f"agents[{i}] must be an Agent instance, got {type(agent)}" + ) + self.pro_agent = agents[0] + self.con_agent = agents[1] + self.judge_agent = agents[2] + if self.verbose: + logger.info("Using agents from provided list") + return + + # Option 2: Use individual agent parameters + if ( + pro_agent is not None + and con_agent is not None + and judge_agent is not None + ): + self.pro_agent = pro_agent + self.con_agent = con_agent + self.judge_agent = judge_agent + if self.verbose: + logger.info("Using individually provided agents") + return + + # Option 3: Create preset agents + if preset_agents: + self._create_preset_agents() + if self.verbose: + logger.info("Using preset agents") + return + + # No valid configuration + raise ValueError( + "No valid agent configuration provided. Either:\n" + "1. Provide all three agents: pro_agent, con_agent, judge_agent\n" + "2. Provide an agents list with exactly 3 agents: agents=[pro, con, judge]\n" + "3. Set preset_agents=True to use default agents" + ) + + def _create_preset_agents(self) -> None: + """ + Create preset agents with default configurations. + + Creates three agents (Pro, Con, Judge) with predefined system prompts + optimized for debate scenarios. + """ + self.pro_agent = Agent( + agent_name="Pro-Debater", + agent_description="Expert debater arguing in favor of propositions", + system_prompt=PRO_AGENT_SYSTEM_PROMPT, + model_name=self.model_name, + max_loops=1, + verbose=self.verbose, + ) + + self.con_agent = Agent( + agent_name="Con-Debater", + agent_description="Expert debater arguing against propositions", + system_prompt=CON_AGENT_SYSTEM_PROMPT, + model_name=self.model_name, + max_loops=1, + verbose=self.verbose, + ) + + self.judge_agent = Agent( + agent_name="Debate-Judge", + agent_description="Impartial judge evaluating debate arguments", + system_prompt=JUDGE_AGENT_SYSTEM_PROMPT, + model_name=self.model_name, + max_loops=1, + verbose=self.verbose, + ) + def run(self, task: str) -> Union[str, List, dict]: """ Execute the debate with judge refinement process. @@ -119,12 +322,10 @@ class DebateWithJudge: if self.verbose: logger.info(f"Starting debate on: {task}") - # Execute N rounds of debate and refinement - for round_num in range(self.max_rounds): + # Execute N loops of debate and refinement + for round_num in range(self.max_loops): if self.verbose: - logger.info( - f"Round {round_num + 1}/{self.max_rounds}" - ) + logger.info(f"Loop {round_num + 1}/{self.max_loops}") # Step 1: Pro agent presents argument pro_prompt = self._create_pro_prompt( @@ -164,7 +365,7 @@ class DebateWithJudge: f"Judge synthesis: {judge_synthesis[:100]}..." ) - # Use judge's synthesis as input for next round + # Use judge's synthesis as input for next loop current_topic = judge_synthesis # Return formatted output @@ -210,7 +411,7 @@ class DebateWithJudge: f"and weaknesses, and provide a refined synthesis that incorporates the " f"best elements from both sides. You may declare a winner or provide a " f"balanced synthesis. Your output will be used to refine the discussion " - f"in subsequent rounds." + f"in subsequent loops." ) self.judge_agent.run(task=judge_intro) @@ -220,7 +421,7 @@ class DebateWithJudge: Args: topic (str): The current topic or refined question. - round_num (int): The current round number (0-indexed). + round_num (int): The current loop number (0-indexed). Returns: str: The prompt for the Pro agent. @@ -232,7 +433,7 @@ class DebateWithJudge: ) else: return ( - f"Round {round_num + 1}: Based on the judge's previous evaluation, " + f"Loop {round_num + 1}: Based on the judge's previous evaluation, " f"present an improved argument in favor of: {topic}\n\n" f"Address any weaknesses identified and strengthen your position " f"with additional evidence and reasoning." @@ -247,7 +448,7 @@ class DebateWithJudge: Args: topic (str): The current topic or refined question. pro_argument (str): The Pro agent's argument to counter. - round_num (int): The current round number (0-indexed). + round_num (int): The current loop number (0-indexed). Returns: str: The prompt for the Con agent. @@ -261,7 +462,7 @@ class DebateWithJudge: ) else: return ( - f"Round {round_num + 1}: Based on the judge's previous evaluation, " + f"Loop {round_num + 1}: Based on the judge's previous evaluation, " f"present an improved counter-argument against: {topic}\n\n" f"Pro's current argument:\n{pro_argument}\n\n" f"Address any weaknesses identified and strengthen your counter-position " @@ -282,22 +483,22 @@ class DebateWithJudge: topic (str): The current topic or refined question. pro_argument (str): The Pro agent's argument. con_argument (str): The Con agent's argument. - round_num (int): The current round number (0-indexed). + round_num (int): The current loop number (0-indexed). Returns: str: The prompt for the Judge agent. """ - is_final_round = round_num == self.max_rounds - 1 + is_final_round = round_num == self.max_loops - 1 prompt = ( - f"Round {round_num + 1}/{self.max_rounds}: Evaluate the debate on: {topic}\n\n" + f"Loop {round_num + 1}/{self.max_loops}: Evaluate the debate on: {topic}\n\n" f"Pro's argument ({self.pro_agent.agent_name}):\n{pro_argument}\n\n" f"Con's argument ({self.con_agent.agent_name}):\n{con_argument}\n\n" ) if is_final_round: prompt += ( - "This is the final round. Provide a comprehensive final evaluation:\n" + "This is the final loop. Provide a comprehensive final evaluation:\n" "- Identify the strongest points from both sides\n" "- Determine a winner OR provide a balanced synthesis\n" "- Present a refined, well-reasoned answer that incorporates the best " @@ -309,8 +510,8 @@ class DebateWithJudge: "Evaluate both arguments and provide:\n" "- Assessment of strengths and weaknesses in each argument\n" "- A refined synthesis that incorporates the best elements from both sides\n" - "- Specific feedback for improvement in the next round\n" - "- Your synthesis will be used as the topic for the next round" + "- Specific feedback for improvement in the next loop\n" + "- Your synthesis will be used as the topic for the next loop" ) return prompt diff --git a/swarms/structs/graph_workflow.py b/swarms/structs/graph_workflow.py index 4a2b0c90..f237825d 100644 --- a/swarms/structs/graph_workflow.py +++ b/swarms/structs/graph_workflow.py @@ -1,10 +1,21 @@ -import json import asyncio import concurrent.futures +import json +import os import time -from enum import Enum -from typing import Any, Dict, List, Optional +import traceback import uuid +from enum import Enum +from typing import ( + Any, + Dict, + Iterator, + List, + Optional, + Set, + Tuple, + Union, +) import networkx as nx @@ -16,6 +27,14 @@ except ImportError: GRAPHVIZ_AVAILABLE = False graphviz = None +try: + import rustworkx as rx + + RUSTWORKX_AVAILABLE = True +except ImportError: + RUSTWORKX_AVAILABLE = False + rx = None + from swarms.structs.agent import Agent # noqa: F401 from swarms.structs.conversation import Conversation from swarms.utils.get_cpu_cores import get_cpu_cores @@ -24,6 +43,525 @@ from swarms.utils.loguru_logger import initialize_logger logger = initialize_logger(log_folder="graph_workflow") +class GraphBackend: + """ + Abstract base class for graph backends. + Provides a unified interface for different graph libraries. + """ + + def add_node(self, node_id: str, **attrs) -> None: + """ + Add a node to the graph. + + Args: + node_id (str): The unique identifier of the node. + **attrs: Additional attributes for the node. + """ + raise NotImplementedError + + def add_edge(self, source: str, target: str, **attrs) -> None: + """ + Add an edge to the graph. + + Args: + source (str): The source node ID. + target (str): The target node ID. + **attrs: Additional attributes for the edge. + """ + raise NotImplementedError + + def in_degree(self, node_id: str) -> int: + """ + Get the in-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The in-degree of the node. + """ + raise NotImplementedError + + def out_degree(self, node_id: str) -> int: + """ + Get the out-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The out-degree of the node. + """ + raise NotImplementedError + + def predecessors(self, node_id: str) -> Iterator[str]: + """ + Get the predecessors of a node. + + Args: + node_id (str): The node ID. + + Returns: + Iterator[str]: Iterator of predecessor node IDs. + """ + raise NotImplementedError + + def reverse(self) -> "GraphBackend": + """ + Return a reversed copy of the graph. + + Returns: + GraphBackend: A new backend instance with reversed edges. + """ + raise NotImplementedError + + def topological_generations(self) -> List[List[str]]: + """ + Get topological generations (layers) of the graph. + + Returns: + List[List[str]]: List of layers, where each layer is a list of node IDs. + """ + raise NotImplementedError + + def simple_cycles(self) -> List[List[str]]: + """ + Find simple cycles in the graph. + + Returns: + List[List[str]]: List of cycles, where each cycle is a list of node IDs. + """ + raise NotImplementedError + + def descendants(self, node_id: str) -> Set[str]: + """ + Get all descendants of a node. + + Args: + node_id (str): The node ID. + + Returns: + Set[str]: Set of descendant node IDs. + """ + raise NotImplementedError + + +class NetworkXBackend(GraphBackend): + """ + NetworkX backend implementation. + """ + + def __init__(self): + """ + Initialize the NetworkX backend. + """ + self.graph = nx.DiGraph() + + def add_node(self, node_id: str, **attrs) -> None: + """ + Add a node to the NetworkX graph. + + Args: + node_id (str): The unique identifier of the node. + **attrs: Additional attributes for the node. + """ + self.graph.add_node(node_id, **attrs) + + def add_edge(self, source: str, target: str, **attrs) -> None: + """ + Add an edge to the NetworkX graph. + + Args: + source (str): The source node ID. + target (str): The target node ID. + **attrs: Additional attributes for the edge. + """ + self.graph.add_edge(source, target, **attrs) + + def in_degree(self, node_id: str) -> int: + """ + Get the in-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The in-degree of the node. + """ + return self.graph.in_degree(node_id) + + def out_degree(self, node_id: str) -> int: + """ + Get the out-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The out-degree of the node. + """ + return self.graph.out_degree(node_id) + + def predecessors(self, node_id: str) -> Iterator[str]: + """ + Get the predecessors of a node. + + Args: + node_id (str): The node ID. + + Returns: + Iterator[str]: Iterator of predecessor node IDs. + """ + return self.graph.predecessors(node_id) + + def reverse(self) -> "NetworkXBackend": + """ + Return a reversed copy of the graph. + + Returns: + NetworkXBackend: A new backend instance with reversed edges. + """ + reversed_backend = NetworkXBackend() + reversed_backend.graph = self.graph.reverse() + return reversed_backend + + def topological_generations(self) -> List[List[str]]: + """ + Get topological generations (layers) of the graph. + + Returns: + List[List[str]]: List of layers, where each layer is a list of node IDs. + """ + return list(nx.topological_generations(self.graph)) + + def simple_cycles(self) -> List[List[str]]: + """ + Find simple cycles in the graph. + + Returns: + List[List[str]]: List of cycles, where each cycle is a list of node IDs. + """ + return list(nx.simple_cycles(self.graph)) + + def descendants(self, node_id: str) -> Set[str]: + """ + Get all descendants of a node. + + Args: + node_id (str): The node ID. + + Returns: + Set[str]: Set of descendant node IDs. + """ + return nx.descendants(self.graph, node_id) + + +class RustworkxBackend(GraphBackend): + """ + Rustworkx backend implementation. + Uses integer indices internally but exposes string node IDs. + """ + + def __init__(self): + """ + Initialize the Rustworkx backend. + """ + if not RUSTWORKX_AVAILABLE: + raise ImportError( + "rustworkx is not installed. Install it with: pip install rustworkx" + ) + self.graph = rx.PyDiGraph() + # Mapping from node ID (string) to node index (int) + self._node_id_to_index: Dict[str, int] = {} + # Mapping from node index (int) to node ID (string) + self._index_to_node_id: Dict[int, str] = {} + + def _get_or_create_node_index(self, node_id: str) -> int: + """ + Get the node index for a given node ID, creating it if necessary. + + Args: + node_id (str): The node ID. + + Returns: + int: The node index. + """ + if node_id not in self._node_id_to_index: + node_index = self.graph.add_node(node_id) + self._node_id_to_index[node_id] = node_index + self._index_to_node_id[node_index] = node_id + return self._node_id_to_index[node_id] + + def add_node(self, node_id: str, **attrs) -> None: + """ + Add a node to the Rustworkx graph. + + Args: + node_id (str): The unique identifier of the node. + **attrs: Additional attributes for the node (stored in node data). + """ + if node_id not in self._node_id_to_index: + # Store node data as a dict with the node_id and attributes + node_data = {"node_id": node_id, **attrs} + node_index = self.graph.add_node(node_data) + self._node_id_to_index[node_id] = node_index + self._index_to_node_id[node_index] = node_id + else: + # Update existing node data + node_index = self._node_id_to_index[node_id] + node_data = self.graph[node_index] + if isinstance(node_data, dict): + node_data.update(attrs) + else: + self.graph[node_index] = {"node_id": node_id, **attrs} + + def add_edge(self, source: str, target: str, **attrs) -> None: + """ + Add an edge to the Rustworkx graph. + + Args: + source (str): The source node ID. + target (str): The target node ID. + **attrs: Additional attributes for the edge (stored in edge data). + """ + source_idx = self._get_or_create_node_index(source) + target_idx = self._get_or_create_node_index(target) + edge_data = attrs if attrs else None + self.graph.add_edge(source_idx, target_idx, edge_data) + + def in_degree(self, node_id: str) -> int: + """ + Get the in-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The in-degree of the node. + """ + if node_id not in self._node_id_to_index: + return 0 + node_index = self._node_id_to_index[node_id] + return self.graph.in_degree(node_index) + + def out_degree(self, node_id: str) -> int: + """ + Get the out-degree of a node. + + Args: + node_id (str): The node ID. + + Returns: + int: The out-degree of the node. + """ + if node_id not in self._node_id_to_index: + return 0 + node_index = self._node_id_to_index[node_id] + return self.graph.out_degree(node_index) + + def predecessors(self, node_id: str) -> Iterator[str]: + """ + Get the predecessors of a node. + + Args: + node_id (str): The node ID. + + Returns: + Iterator[str]: Iterator of predecessor node IDs. + """ + if node_id not in self._node_id_to_index: + return iter([]) + target_index = self._node_id_to_index[node_id] + # Use edge list to find predecessors (more reliable than predecessors() method) + result = [] + for edge in self.graph.edge_list(): + source_idx, target_idx = edge + if target_idx == target_index: + result.append(self._index_to_node_id[source_idx]) + return iter(result) + + def reverse(self) -> "RustworkxBackend": + """ + Return a reversed copy of the graph. + + Returns: + RustworkxBackend: A new backend instance with reversed edges. + """ + reversed_backend = RustworkxBackend() + # Copy the graph structure + reversed_backend.graph = self.graph.copy() + # Reverse the edges + reversed_backend.graph.reverse() + # Copy the mappings + reversed_backend._node_id_to_index = ( + self._node_id_to_index.copy() + ) + reversed_backend._index_to_node_id = ( + self._index_to_node_id.copy() + ) + return reversed_backend + + def topological_generations(self) -> List[List[str]]: + """ + Get topological generations (layers) of the graph. + + Returns: + List[List[str]]: List of layers, where each layer is a list of node IDs. + """ + try: + # Get all node indices + all_indices = list(self._node_id_to_index.values()) + if not all_indices: + return [] + + # Use layer-by-layer approach similar to NetworkX topological_generations + layers = [] + remaining = set(all_indices) + processed = set() + + while remaining: + # Find all nodes with in-degree 0 considering only edges from processed nodes + # In rustworkx, we need to check if all predecessors are in processed set + layer = [] + # First pass: identify nodes that can be added to this layer + # (without modifying remaining/processed during iteration) + nodes_to_add = [] + for idx in list(remaining): + # Get all predecessors using edge list + pred_indices = [] + for edge in self.graph.edge_list(): + source_idx, target_idx = edge + if target_idx == idx: + pred_indices.append(source_idx) + # Check if all predecessors have been processed (or node has no predecessors) + # A node can be added to the layer if: + # 1. It has no predecessors (entry node), OR + # 2. All its predecessors have already been processed (from previous layers) + if not pred_indices: + # No predecessors - this is an entry node + nodes_to_add.append(idx) + elif all( + pred_idx in processed + for pred_idx in pred_indices + ): + # All predecessors have been processed in previous layers + nodes_to_add.append(idx) + + # Second pass: add identified nodes to the layer and update sets + for idx in nodes_to_add: + layer.append(self._index_to_node_id[idx]) + remaining.remove(idx) + processed.add(idx) + + if not layer: + # Cycle detected or error, break + break + + layers.append(layer) + + # If there are remaining nodes, they form a cycle - add them as a final layer + if remaining: + cycle_layer = [ + self._index_to_node_id[idx] for idx in remaining + ] + layers.append(cycle_layer) + + return ( + layers + if layers + else [ + [ + self._index_to_node_id[idx] + for idx in all_indices + ] + ] + ) + except Exception as e: + logger.warning( + f"Error in rustworkx topological_generations: {e}, falling back to simple approach" + ) + # Fallback: return all nodes in one layer + return [ + [node_id for node_id in self._node_id_to_index.keys()] + ] + + def simple_cycles(self) -> List[List[str]]: + """ + Find simple cycles in the graph. + + Returns: + List[List[str]]: List of cycles, where each cycle is a list of node IDs. + """ + try: + # Convert to NetworkX temporarily for cycle detection + # This is a limitation of rustworkx - it doesn't have simple_cycles + # We'll use a workaround by converting temporarily + import networkx as nx + + nx_graph = nx.DiGraph() + for node_id in self._node_id_to_index.keys(): + nx_graph.add_node(node_id) + for edge in self.graph.edge_list(): + source_idx, target_idx = edge + source_id = self._index_to_node_id[source_idx] + target_id = self._index_to_node_id[target_idx] + nx_graph.add_edge(source_id, target_id) + + cycles = list(nx.simple_cycles(nx_graph)) + return cycles + except Exception as e: + logger.warning( + f"Error in rustworkx simple_cycles: {e}, returning empty list" + ) + return [] + + def descendants(self, node_id: str) -> Set[str]: + """ + Get all descendants of a node. + + Args: + node_id (str): The node ID. + + Returns: + Set[str]: Set of descendant node IDs. + """ + if node_id not in self._node_id_to_index: + return set() + node_index = self._node_id_to_index[node_id] + # Use BFS to find all descendants + descendants = set() + queue = [node_index] + visited = {node_index} + + while queue: + current_idx = queue.pop(0) + succ_data = self.graph.successors(current_idx) + for succ in succ_data: + # Handle both dict (node data) and int (index) returns + if isinstance(succ, dict): + succ_node_id = succ.get("node_id") + if ( + succ_node_id + and succ_node_id in self._node_id_to_index + ): + succ_idx = self._node_id_to_index[ + succ_node_id + ] + else: + continue + elif isinstance(succ, int): + succ_idx = succ + else: + continue + + if succ_idx not in visited: + visited.add(succ_idx) + descendants.add(self._index_to_node_id[succ_idx]) + queue.append(succ_idx) + + return descendants + + class NodeType(str, Enum): AGENT: Agent = "agent" @@ -69,12 +607,12 @@ class Node: ) @classmethod - def from_agent(cls, agent, **kwargs): + def from_agent(cls, agent: Agent, **kwargs: Any) -> "Node": """ Create a Node from an Agent object. Args: - agent: The agent to create a node from. + agent (Agent): The agent to create a node from. **kwargs: Additional keyword arguments. Returns: @@ -117,29 +655,54 @@ class Edge: self.metadata = metadata or {} @classmethod - def from_nodes(cls, source_node, target_node, **kwargs): + def from_nodes( + cls, + source_node: Union["Node", Agent, str], + target_node: Union["Node", Agent, str], + **kwargs: Any, + ) -> "Edge": """ Create an Edge from node objects or ids. Args: - source_node: Source node object or ID. - target_node: Target node object or ID. + source_node (Union[Node, Agent, str]): Source node object or ID. + target_node (Union[Node, Agent, str]): Target node object or ID. **kwargs: Additional keyword arguments. Returns: Edge: A new Edge instance. """ - src = ( - source_node.id - if isinstance(source_node, Node) - else source_node - ) - tgt = ( - target_node.id - if isinstance(target_node, Node) - else target_node - ) - return cls(source=src, target=tgt, **kwargs) + # Handle source node: extract ID from Node, Agent, or use string directly + if isinstance(source_node, Node): + src = source_node.id + elif hasattr(source_node, "agent_name"): + # Agent object - extract agent_name + src = getattr(source_node, "agent_name", None) + if src is None: + raise ValueError( + "Source agent does not have an agent_name attribute" + ) + else: + # Assume it's already a string ID + src = source_node + + # Handle target node: extract ID from Node, Agent, or use string directly + if isinstance(target_node, Node): + tgt = target_node.id + elif hasattr(target_node, "agent_name"): + # Agent object - extract agent_name + tgt = getattr(target_node, "agent_name", None) + if tgt is None: + raise ValueError( + "Target agent does not have an agent_name attribute" + ) + else: + # Assume it's already a string ID + tgt = target_node + + # Put all kwargs into metadata dict + metadata = kwargs if kwargs else None + return cls(source=src, target=tgt, metadata=metadata) class GraphWorkflow: @@ -151,7 +714,7 @@ class GraphWorkflow: edges (List[Edge]): A list of edges in the graph, where each edge is represented by an Edge object. entry_points (List[str]): A list of node IDs that serve as entry points to the graph. end_points (List[str]): A list of node IDs that serve as end points of the graph. - graph (nx.DiGraph): A directed graph object from the NetworkX library representing the workflow graph. + graph_backend (GraphBackend): A graph backend object (NetworkX or Rustworkx) representing the workflow graph. task (str): The task to be executed by the workflow. _compiled (bool): Whether the graph has been compiled for optimization. _sorted_layers (List[List[str]]): Pre-computed topological layers for faster execution. @@ -174,6 +737,7 @@ class GraphWorkflow: task: Optional[str] = None, auto_compile: bool = True, verbose: bool = False, + backend: str = "networkx", ): self.id = id self.verbose = verbose @@ -181,14 +745,30 @@ class GraphWorkflow: if self.verbose: logger.info("Initializing GraphWorkflow") logger.debug( - f"GraphWorkflow parameters: nodes={len(nodes) if nodes else 0}, edges={len(edges) if edges else 0}, max_loops={max_loops}, auto_compile={auto_compile}" + f"GraphWorkflow parameters: nodes={len(nodes) if nodes else 0}, edges={len(edges) if edges else 0}, max_loops={max_loops}, auto_compile={auto_compile}, backend={backend}" ) self.nodes = nodes or {} self.edges = edges or [] self.entry_points = entry_points or [] self.end_points = end_points or [] - self.graph = nx.DiGraph() + + # Initialize graph backend + if backend.lower() == "rustworkx": + if not RUSTWORKX_AVAILABLE: + logger.warning( + "rustworkx is not available, falling back to networkx. Install with: pip install rustworkx" + ) + self.graph_backend = NetworkXBackend() + else: + self.graph_backend = RustworkxBackend() + if self.verbose: + logger.info("Using rustworkx backend") + else: + self.graph_backend = NetworkXBackend() + if self.verbose: + logger.info("Using networkx backend") + self.max_loops = max_loops self.task = task self.name = name @@ -208,15 +788,20 @@ class GraphWorkflow: self.conversation = Conversation() - # Rebuild the NetworkX graph from nodes and edges if provided + # Rebuild the graph from nodes and edges if provided if self.nodes: + backend_name = ( + "rustworkx" + if isinstance(self.graph_backend, RustworkxBackend) + else "networkx" + ) if self.verbose: logger.info( - f"Adding {len(self.nodes)} nodes to NetworkX graph" + f"Adding {len(self.nodes)} nodes to {backend_name} graph" ) for node_id, node in self.nodes.items(): - self.graph.add_node( + self.graph_backend.add_node( node_id, type=node.type, agent=node.agent, @@ -228,9 +813,14 @@ class GraphWorkflow: ) if self.edges: + backend_name = ( + "rustworkx" + if isinstance(self.graph_backend, RustworkxBackend) + else "networkx" + ) if self.verbose: logger.info( - f"Adding {len(self.edges)} edges to NetworkX graph" + f"Adding {len(self.edges)} edges to {backend_name} graph" ) valid_edges = 0 @@ -239,7 +829,7 @@ class GraphWorkflow: edge.source in self.nodes and edge.target in self.nodes ): - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}), @@ -270,7 +860,7 @@ class GraphWorkflow: "GraphWorkflow initialization completed successfully" ) - def _invalidate_compilation(self): + def _invalidate_compilation(self) -> None: """ Invalidate compiled optimizations when graph structure changes. Forces recompilation on next run to ensure cache coherency. @@ -290,7 +880,7 @@ class GraphWorkflow: if self.verbose: logger.debug("Cleared predecessors cache") - def compile(self): + def compile(self) -> None: """ Pre-compute expensive operations for faster execution. Call this after building the graph structure. @@ -328,8 +918,8 @@ class GraphWorkflow: if self.verbose: logger.debug("Computing topological layers") - sorted_layers = list( - nx.topological_generations(self.graph) + sorted_layers = ( + self.graph_backend.topological_generations() ) self._sorted_layers = sorted_layers @@ -358,7 +948,7 @@ class GraphWorkflow: ) raise e - def add_node(self, agent: Agent, **kwargs): + def add_node(self, agent: Agent, **kwargs: Any) -> None: """ Adds an agent node to the workflow graph. @@ -380,7 +970,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.nodes[node.id] = node - self.graph.add_node( + self.graph_backend.add_node( node.id, type=node.type, agent=node.agent, @@ -397,13 +987,54 @@ class GraphWorkflow: ) raise e - def add_edge(self, edge_or_source, target=None, **kwargs): + def add_nodes( + self, agents: List[Agent], batch_size: int = 10, **kwargs: Any + ) -> None: + """ + Add multiple agents to the workflow graph concurrently in batches. + + Args: + agents (List[Agent]): List of agents to add. + batch_size (int): Number of agents to add concurrently in a batch. Defaults to 8. + **kwargs: Additional keyword arguments for each node addition. + """ + + try: + with concurrent.futures.ThreadPoolExecutor( + max_workers=self._max_workers + ) as executor: + # Process agents in batches + for i in range(0, len(agents), batch_size): + batch = agents[i : i + batch_size] + futures = [ + executor.submit( + self.add_node, agent, **kwargs + ) + for agent in batch + ] + # Ensure all nodes in batch are added before next batch + for future in concurrent.futures.as_completed( + futures + ): + future.result() + except Exception as e: + logger.exception( + f"Error in GraphWorkflow.add_nodes for agents {agents}: {e} Traceback: {traceback.format_exc()}" + ) + raise e + + def add_edge( + self, + edge_or_source: Union[Edge, Node, Agent, str], + target: Optional[Union[Node, Agent, str]] = None, + **kwargs: Any, + ) -> None: """ Add an edge by Edge object or by passing node objects/ids. Args: - edge_or_source: Either an Edge object or the source node/id. - target: Target node/id (required if edge_or_source is not an Edge). + edge_or_source (Union[Edge, Node, Agent, str]): Either an Edge object or the source node/id. + target (Optional[Union[Node, Agent, str]]): Target node/id (required if edge_or_source is not an Edge). **kwargs: Additional keyword arguments for the edge. """ try: @@ -434,7 +1065,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}) ) self._invalidate_compilation() @@ -448,15 +1079,20 @@ class GraphWorkflow: logger.exception(f"Error in GraphWorkflow.add_edge: {e}") raise e - def add_edges_from_source(self, source, targets, **kwargs): + def add_edges_from_source( + self, + source: Union[Node, Agent, str], + targets: List[Union[Node, Agent, str]], + **kwargs: Any, + ) -> List[Edge]: """ Add multiple edges from a single source to multiple targets for parallel processing. This creates a "fan-out" pattern where the source agent's output is distributed to all target agents simultaneously. Args: - source: Source node/id that will send output to multiple targets. - targets: List of target node/ids that will receive the source output in parallel. + source (Union[Node, Agent, str]): Source node/id that will send output to multiple targets. + targets (List[Union[Node, Agent, str]]): List of target node/ids that will receive the source output in parallel. **kwargs: Additional keyword arguments for all edges. Returns: @@ -492,7 +1128,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}) ) created_edges.append(edge) @@ -517,14 +1153,19 @@ class GraphWorkflow: ) raise e - def add_edges_to_target(self, sources, target, **kwargs): + def add_edges_to_target( + self, + sources: List[Union[Node, Agent, str]], + target: Union[Node, Agent, str], + **kwargs: Any, + ) -> List[Edge]: """ Add multiple edges from multiple sources to a single target for convergence processing. This creates a "fan-in" pattern where multiple agents' outputs converge to a single target. Args: - sources: List of source node/ids that will send output to the target. - target: Target node/id that will receive all source outputs. + sources (List[Union[Node, Agent, str]]): List of source node/ids that will send output to the target. + target (Union[Node, Agent, str]): Target node/id that will receive all source outputs. **kwargs: Additional keyword arguments for all edges. Returns: @@ -560,7 +1201,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}) ) created_edges.append(edge) @@ -585,14 +1226,19 @@ class GraphWorkflow: ) raise e - def add_parallel_chain(self, sources, targets, **kwargs): + def add_parallel_chain( + self, + sources: List[Union[Node, Agent, str]], + targets: List[Union[Node, Agent, str]], + **kwargs: Any, + ) -> List[Edge]: """ Create a parallel processing chain where multiple sources connect to multiple targets. This creates a full mesh connection pattern for maximum parallel processing. Args: - sources: List of source node/ids. - targets: List of target node/ids. + sources (List[Union[Node, Agent, str]]): List of source node/ids. + targets (List[Union[Node, Agent, str]]): List of target node/ids. **kwargs: Additional keyword arguments for all edges. Returns: @@ -629,7 +1275,7 @@ class GraphWorkflow: raise ValueError(error_msg) self.edges.append(edge) - self.graph.add_edge( + self.graph_backend.add_edge( edge.source, edge.target, **(edge.metadata or {}), @@ -656,7 +1302,7 @@ class GraphWorkflow: ) raise e - def set_entry_points(self, entry_points: List[str]): + def set_entry_points(self, entry_points: List[str]) -> None: """ Set the entry points for the workflow. @@ -687,7 +1333,7 @@ class GraphWorkflow: ) raise e - def set_end_points(self, end_points: List[str]): + def set_end_points(self, end_points: List[str]) -> None: """ Set the end points for the workflow. @@ -721,22 +1367,22 @@ class GraphWorkflow: @classmethod def from_spec( cls, - agents, - edges, - entry_points=None, - end_points=None, - task=None, - **kwargs, - ): + agents: List[Union[Agent, Node]], + edges: List[Union[Edge, Tuple[Any, Any]]], + entry_points: Optional[List[str]] = None, + end_points: Optional[List[str]] = None, + task: Optional[str] = None, + **kwargs: Any, + ) -> "GraphWorkflow": """ Construct a workflow from a list of agents and connections. Args: - agents: List of agents or Node objects. - edges: List of edges or edge tuples. - entry_points: List of entry point node IDs. - end_points: List of end point node IDs. - task: Task to be executed by the workflow. + agents (List[Union[Agent, Node]]): List of agents or Node objects. + edges (List[Union[Edge, Tuple[Any, Any]]]): List of edges or edge tuples. + entry_points (Optional[List[str]]): List of entry point node IDs. + end_points (Optional[List[str]]): List of end point node IDs. + task (Optional[str]): Task to be executed by the workflow. **kwargs: Additional keyword arguments. Returns: @@ -851,7 +1497,7 @@ class GraphWorkflow: logger.exception(f"Error in GraphWorkflow.from_spec: {e}") raise e - def auto_set_entry_points(self): + def auto_set_entry_points(self) -> None: """ Automatically set entry points to nodes with no incoming edges. """ @@ -860,7 +1506,9 @@ class GraphWorkflow: try: self.entry_points = [ - n for n in self.nodes if self.graph.in_degree(n) == 0 + n + for n in self.nodes + if self.graph_backend.in_degree(n) == 0 ] if self.verbose: @@ -879,7 +1527,7 @@ class GraphWorkflow: ) raise e - def auto_set_end_points(self): + def auto_set_end_points(self) -> None: """ Automatically set end points to nodes with no outgoing edges. """ @@ -888,7 +1536,9 @@ class GraphWorkflow: try: self.end_points = [ - n for n in self.nodes if self.graph.out_degree(n) == 0 + n + for n in self.nodes + if self.graph_backend.out_degree(n) == 0 ] if self.verbose: @@ -905,7 +1555,7 @@ class GraphWorkflow: ) raise e - def _get_predecessors(self, node_id: str) -> tuple: + def _get_predecessors(self, node_id: str) -> Tuple[str, ...]: """ Cached predecessor lookup for faster repeated access. @@ -913,7 +1563,7 @@ class GraphWorkflow: node_id (str): The node ID to get predecessors for. Returns: - tuple: Tuple of predecessor node IDs. + Tuple[str, ...]: Tuple of predecessor node IDs. """ # Use instance-level caching instead of @lru_cache to avoid hashing issues if not hasattr(self, "_predecessors_cache"): @@ -921,7 +1571,7 @@ class GraphWorkflow: if node_id not in self._predecessors_cache: self._predecessors_cache[node_id] = tuple( - self.graph.predecessors(node_id) + self.graph_backend.predecessors(node_id) ) return self._predecessors_cache[node_id] @@ -930,7 +1580,7 @@ class GraphWorkflow: self, node_id: str, task: str, - prev_outputs: Dict[str, str], + prev_outputs: Dict[str, Any], layer_idx: int, ) -> str: """ @@ -939,7 +1589,7 @@ class GraphWorkflow: Args: node_id (str): The node ID to build a prompt for. task (str): The main task. - prev_outputs (Dict[str, str]): Previous outputs from predecessor nodes. + prev_outputs (Dict[str, Any]): Previous outputs from predecessor nodes. layer_idx (int): The current layer index. Returns: @@ -996,13 +1646,16 @@ class GraphWorkflow: raise e async def arun( - self, task: str = None, *args, **kwargs + self, + task: Optional[str] = None, + *args: Any, + **kwargs: Any, ) -> Dict[str, Any]: """ Async version of run for better performance with I/O bound operations. Args: - task (str, optional): Task to execute. Uses self.task if not provided. + task (Optional[str]): Task to execute. Uses self.task if not provided. *args: Additional positional arguments. **kwargs: Additional keyword arguments. @@ -1030,16 +1683,17 @@ class GraphWorkflow: def run( self, - task: str = None, + task: Optional[str] = None, img: Optional[str] = None, - *args, - **kwargs, + *args: Any, + **kwargs: Any, ) -> Dict[str, Any]: """ Run the workflow graph with optimized parallel agent execution. Args: - task (str, optional): Task to execute. Uses self.task if not provided. + task (Optional[str]): Task to execute. Uses self.task if not provided. + img (Optional[str]): Optional image path for multimodal tasks. *args: Additional positional arguments. **kwargs: Additional keyword arguments. @@ -1268,16 +1922,15 @@ class GraphWorkflow: view: bool = True, engine: str = "dot", show_summary: bool = False, - ): + ) -> str: """ Visualize the workflow graph using Graphviz with enhanced parallel pattern detection. Args: - output_path (str, optional): Path to save the visualization file. If None, uses workflow name. format (str): Output format ('png', 'svg', 'pdf', 'dot'). Defaults to 'png'. view (bool): Whether to open the visualization after creation. Defaults to True. engine (str): Graphviz layout engine ('dot', 'neato', 'fdp', 'sfdp', 'twopi', 'circo'). Defaults to 'dot'. - show_summary (bool): Whether to print parallel processing summary. Defaults to True. + show_summary (bool): Whether to print parallel processing summary. Defaults to False. Returns: str: Path to the generated visualization file. @@ -1560,7 +2213,7 @@ class GraphWorkflow: logger.exception(f"Error in GraphWorkflow.visualize: {e}") raise e - def visualize_simple(self): + def visualize_simple(self) -> str: """ Simple text-based visualization for environments without Graphviz. @@ -1650,10 +2303,10 @@ class GraphWorkflow: def to_json( self, - fast=True, - include_conversation=False, - include_runtime_state=False, - ): + fast: bool = True, + include_conversation: bool = False, + include_runtime_state: bool = False, + ) -> str: """ Serialize the workflow to JSON with comprehensive metadata and configuration. @@ -1672,7 +2325,7 @@ class GraphWorkflow: try: - def node_to_dict(node): + def node_to_dict(node: Node) -> Dict[str, Any]: node_data = { "id": node.id, "type": str(node.type), @@ -1707,7 +2360,7 @@ class GraphWorkflow: return node_data - def edge_to_dict(edge): + def edge_to_dict(edge: Edge) -> Dict[str, Any]: return { "source": edge.source, "target": edge.target, @@ -1824,7 +2477,11 @@ class GraphWorkflow: raise e @classmethod - def from_json(cls, json_str, restore_runtime_state=False): + def from_json( + cls, + json_str: str, + restore_runtime_state: bool = False, + ) -> "GraphWorkflow": """ Deserialize a workflow from JSON with comprehensive parameter support and backward compatibility. @@ -2082,7 +2739,6 @@ class GraphWorkflow: FileExistsError: If file exists and overwrite is False Exception: If save operation fails """ - import os # Handle file path validation if not filepath.endswith(".json"): @@ -2145,7 +2801,6 @@ class GraphWorkflow: FileNotFoundError: If file doesn't exist Exception: If load operation fails """ - import os if not os.path.exists(filepath): raise FileNotFoundError( @@ -2177,7 +2832,7 @@ class GraphWorkflow: ) raise e - def validate(self, auto_fix=False) -> Dict[str, Any]: + def validate(self, auto_fix: bool = False) -> Dict[str, Any]: """ Validate the workflow structure, checking for potential issues such as isolated nodes, cyclic dependencies, etc. @@ -2228,8 +2883,8 @@ class GraphWorkflow: isolated = [ n for n in self.nodes - if self.graph.in_degree(n) == 0 - and self.graph.out_degree(n) == 0 + if self.graph_backend.in_degree(n) == 0 + and self.graph_backend.out_degree(n) == 0 ] if isolated: result["warnings"].append( @@ -2238,7 +2893,7 @@ class GraphWorkflow: # Check for cyclic dependencies try: - cycles = list(nx.simple_cycles(self.graph)) + cycles = self.graph_backend.simple_cycles() if cycles: result["warnings"].append( f"Found {len(cycles)} cycles in workflow" @@ -2268,7 +2923,7 @@ class GraphWorkflow: reachable = set() for entry in self.entry_points: reachable.update( - nx.descendants(self.graph, entry) + self.graph_backend.descendants(entry) ) reachable.add(entry) @@ -2289,11 +2944,11 @@ class GraphWorkflow: # Check for dead-end nodes (cannot reach any exit point) if self.end_points: - reverse_graph = self.graph.reverse() + reverse_graph = self.graph_backend.reverse() reachable_to_exit = set() for exit_point in self.end_points: reachable_to_exit.update( - nx.descendants(reverse_graph, exit_point) + reverse_graph.descendants(exit_point) ) reachable_to_exit.add(exit_point) diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py index 1501ccb6..407b7d58 100644 --- a/swarms/structs/hiearchical_swarm.py +++ b/swarms/structs/hiearchical_swarm.py @@ -34,10 +34,11 @@ from rich.live import Live from rich.panel import Panel from rich.table import Table from rich.text import Text +from rich.tree import Tree from swarms.prompts.hiearchical_system_prompt import ( - HIEARCHICAL_SWARM_SYSTEM_PROMPT, DIRECTOR_PLANNING_PROMPT, + HIEARCHICAL_SWARM_SYSTEM_PROMPT, ) from swarms.prompts.multi_agent_collab_prompt import ( MULTI_AGENT_COLLAB_PROMPT_TWO, @@ -748,6 +749,77 @@ class HierarchicalSwarm: add_to_conversation=False, ) + def display_hierarchy(self) -> None: + """ + Display the hierarchical structure of the swarm using Rich Tree. + + This method creates a visual tree representation showing the Director + at the top level and all worker agents as children branches. The tree + is printed to the console with rich formatting. + + The hierarchy visualization helps understand the organizational structure + of the swarm, with the Director coordinating all worker agents. + """ + console = Console() + + # Create the root tree with Director + director_label = Text() + director_label.append("🎯 ", style="bold red") + director_label.append(self.director_name, style="bold white") + director_label.append( + f" [{self.director_model_name}]", style="dim cyan" + ) + + tree = Tree(director_label, guide_style="bold red") + + # Add each worker agent as a branch + for agent in self.agents: + agent_label = Text() + + # Get agent name + if hasattr(agent, "agent_name"): + agent_name = agent.agent_name + elif hasattr(agent, "name"): + agent_name = agent.name + else: + agent_name = f"Agent_{self.agents.index(agent)}" + + # Get agent model if available + model_info = "" + if hasattr(agent, "model_name"): + model_info = f" [{agent.model_name}]" + elif hasattr(agent, "llm") and hasattr( + agent.llm, "model" + ): + model_info = f" [{agent.llm.model}]" + + # Get agent description if available + description = "" + if hasattr(agent, "agent_description"): + description = f" - {agent.agent_description[:50]}" + elif hasattr(agent, "description"): + description = f" - {agent.description[:50]}" + + agent_label.append("🤖 ", style="bold cyan") + agent_label.append(agent_name, style="bold cyan") + if model_info: + agent_label.append(model_info, style="dim cyan") + if description: + agent_label.append(description, style="dim white") + + # Add agent as a branch + tree.add(agent_label) + + # Create a panel with the tree + panel = Panel( + tree, + title=f"[bold white]HierarchicalSwarm Hierarchy: {self.name}[/bold white]", + border_style="red", + padding=(1, 2), + ) + + console.print(panel) + def prepare_worker_agents(self): for agent in self.agents: prompt = ( @@ -914,6 +986,7 @@ class HierarchicalSwarm: logger.error( f"{error_msg}\n[TRACE] Traceback: {traceback.format_exc()}\n[BUG] If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues" ) + raise e def agents_no_print(self): for agent in self.agents: diff --git a/swarms/structs/llm_council.py b/swarms/structs/llm_council.py new file mode 100644 index 00000000..c732f058 --- /dev/null +++ b/swarms/structs/llm_council.py @@ -0,0 +1,522 @@ +""" +LLM Council - A Swarms implementation inspired by Andrej Karpathy's llm-council. + +This implementation creates a council of specialized LLM agents that: +1. Each agent responds to the user query independently +2. All agents review and rank each other's (anonymized) responses +3. A Chairman LLM synthesizes all responses and rankings into a final answer + +The council demonstrates how different models evaluate and rank each other's work, +often selecting responses from other models as superior to their own. +""" + +from typing import Dict, List, Optional +import random +from swarms.structs.agent import Agent +from swarms.structs.multi_agent_exec import ( + run_agents_concurrently, + batched_grid_agent_execution, +) +from swarms.utils.history_output_formatter import ( + HistoryOutputType, + history_output_formatter, +) +from swarms.structs.conversation import Conversation +from swarms.structs.swarm_id import swarm_id + + +def get_gpt_councilor_prompt() -> str: + """ + Get system prompt for GPT-5.1 councilor. + + Returns: + System prompt string for GPT-5.1 councilor agent. + """ + return """You are a member of the LLM Council, representing GPT-5.1. Your role is to provide comprehensive, analytical, and thorough responses to user queries. + +Your strengths: +- Deep analytical thinking and comprehensive coverage +- Ability to break down complex topics into detailed components +- Thorough exploration of multiple perspectives +- Rich contextual understanding + +Your approach: +- Provide detailed, well-structured responses +- Include relevant context and background information +- Consider multiple angles and perspectives +- Be thorough but clear in your explanations + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on quality, depth, and clarity.""" + + +def get_gemini_councilor_prompt() -> str: + """ + Get system prompt for Gemini 3 Pro councilor. + + Returns: + System prompt string for Gemini 3 Pro councilor agent. + """ + return """You are a member of the LLM Council, representing Gemini 3 Pro. Your role is to provide concise, well-processed, and structured responses to user queries. + +Your strengths: +- Clear and structured communication +- Efficient information processing +- Condensed yet comprehensive responses +- Well-organized presentation + +Your approach: +- Provide concise but complete answers +- Structure information clearly and logically +- Focus on key points without unnecessary verbosity +- Present information in an easily digestible format + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on clarity, structure, and efficiency.""" + + +def get_claude_councilor_prompt() -> str: + """ + Get system prompt for Claude Sonnet 4.5 councilor. + + Returns: + System prompt string for Claude Sonnet 4.5 councilor agent. + """ + return """You are a member of the LLM Council, representing Claude Sonnet 4.5. Your role is to provide thoughtful, balanced, and nuanced responses to user queries. + +Your strengths: +- Nuanced understanding and balanced perspectives +- Thoughtful consideration of trade-offs +- Clear reasoning and logical structure +- Ethical and responsible analysis + +Your approach: +- Provide balanced, well-reasoned responses +- Consider multiple viewpoints and implications +- Be thoughtful about potential limitations or edge cases +- Maintain clarity while showing depth of thought + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on thoughtfulness, balance, and nuanced reasoning.""" + + +def get_grok_councilor_prompt() -> str: + """ + Get system prompt for Grok-4 councilor. + + Returns: + System prompt string for Grok-4 councilor agent. + """ + return """You are a member of the LLM Council, representing Grok-4. Your role is to provide creative, innovative, and unique perspectives on user queries. + +Your strengths: +- Creative problem-solving and innovative thinking +- Unique perspectives and out-of-the-box approaches +- Engaging and dynamic communication style +- Ability to connect seemingly unrelated concepts + +Your approach: +- Provide creative and innovative responses +- Offer unique perspectives and fresh insights +- Be engaging and dynamic in your communication +- Think creatively while maintaining accuracy + +Remember: You are part of a council where multiple AI models will respond to the same query, and then evaluate each other's responses. Focus on creativity, innovation, and unique insights.""" + + +def get_chairman_prompt() -> str: + """ + Get system prompt for the Chairman agent. + + Returns: + System prompt string for the Chairman agent. + """ + return """You are the Chairman of the LLM Council. Your role is to synthesize responses from all council members along with their evaluations and rankings into a final, comprehensive answer. + +Your responsibilities: +1. Review all council member responses to the user's query +2. Consider the rankings and evaluations provided by each council member +3. Synthesize the best elements from all responses +4. Create a final, comprehensive answer that incorporates the strengths of different approaches +5. Provide transparency about which perspectives influenced the final answer + +Your approach: +- Synthesize rather than simply aggregate +- Identify the strongest elements from each response +- Create a cohesive final answer that benefits from multiple perspectives +- Acknowledge the diversity of approaches taken by council members +- Provide a balanced, comprehensive response that serves the user's needs + +Remember: You have access to all original responses and all evaluations. Use this rich context to create the best possible final answer.""" + + +def get_evaluation_prompt( + query: str, responses: Dict[str, str], evaluator_name: str +) -> str: + """ + Create evaluation prompt for council members to review and rank responses. + + Args: + query: The original user query + responses: Dictionary mapping anonymous IDs to response texts + evaluator_name: Name of the agent doing the evaluation + + Returns: + Formatted evaluation prompt string + """ + responses_text = "\n\n".join( + [ + f"Response {response_id}:\n{response_text}" + for response_id, response_text in responses.items() + ] + ) + + return f"""You are evaluating responses from your fellow LLM Council members to the following query: + +QUERY: {query} + +Below are the anonymized responses from all council members (including potentially your own): + +{responses_text} + +Your task: +1. Carefully read and analyze each response +2. Evaluate the quality, accuracy, completeness, and usefulness of each response +3. Rank the responses from best to worst (1 = best, {len(responses)} = worst) +4. Provide brief reasoning for your rankings +5. Be honest and objective - you may find another model's response superior to your own + +Format your evaluation as follows: + +RANKINGS: +1. Response [ID]: [Brief reason why this is the best] +2. Response [ID]: [Brief reason] +... +{len(responses)}. Response [ID]: [Brief reason why this ranks lowest] + +ADDITIONAL OBSERVATIONS: +[Any additional insights about the responses, common themes, strengths/weaknesses, etc.] + +Remember: The goal is honest, objective evaluation. If another model's response is genuinely better, acknowledge it.""" + + +def get_synthesis_prompt( + query: str, + original_responses: Dict[str, str], + evaluations: Dict[str, str], + id_to_member: Dict[str, str], +) -> str: + """ + Create synthesis prompt for the Chairman. + + Args: + query: Original user query + original_responses: Dict mapping member names to their responses + evaluations: Dict mapping evaluator names to their evaluation texts + id_to_member: Mapping from anonymous IDs to member names + + Returns: + Formatted synthesis prompt + """ + responses_section = "\n\n".join( + [ + f"=== {name} ===\n{response}" + for name, response in original_responses.items() + ] + ) + + evaluations_section = "\n\n".join( + [ + f"=== Evaluation by {name} ===\n{evaluation}" + for name, evaluation in evaluations.items() + ] + ) + + return f"""As the Chairman of the LLM Council, synthesize the following information into a final, comprehensive answer. + +ORIGINAL QUERY: +{query} + +COUNCIL MEMBER RESPONSES: +{responses_section} + +COUNCIL MEMBER EVALUATIONS AND RANKINGS: +{evaluations_section} + +ANONYMOUS ID MAPPING (for reference): +{chr(10).join([f" {aid} = {name}" for aid, name in id_to_member.items()])} + +Your task: +1. Review all council member responses +2. Consider the evaluations and rankings provided by each member +3. Identify the strongest elements from each response +4. Synthesize a final, comprehensive answer that: + - Incorporates the best insights from multiple perspectives + - Addresses the query thoroughly and accurately + - Benefits from the diversity of approaches taken + - Is clear, well-structured, and useful + +Provide your final synthesized response below. You may reference which perspectives or approaches influenced different parts of your answer.""" + + +class LLMCouncil: + """ + An LLM Council that orchestrates multiple specialized agents to collaboratively + answer queries through independent responses, peer review, and synthesis. + + The council follows this workflow: + 1. Dispatch query to all council members in parallel + 2. Collect all responses (anonymized) + 3. Have each member review and rank all responses + 4. Chairman synthesizes everything into final response + """ + + def __init__( + self, + id: str = swarm_id(), + name: str = "LLM Council", + description: str = "A collaborative council of LLM agents where each member independently answers a query, reviews and ranks anonymized peer responses, and a chairman synthesizes the best elements into a final answer.", + council_members: Optional[List[Agent]] = None, + chairman_model: str = "gpt-5.1", + verbose: bool = True, + output_type: HistoryOutputType = "dict-all-except-first", + ): + """ + Initialize the LLM Council. + + Args: + council_members: List of Agent instances representing council members. + If None, creates default council with GPT-5.1, Gemini 3 Pro, + Claude Sonnet 4.5, and Grok-4. + chairman_model: Model name for the Chairman agent that synthesizes responses. + verbose: Whether to print progress and intermediate results. + output_type: Format for the output. Options: "list", "dict", "string", "final", "json", "yaml", etc. + """ + self.name = name + self.description = description + self.verbose = verbose + self.output_type = output_type + + # Create default council members if none provided + if council_members is None: + self.council_members = self._create_default_council() + else: + self.council_members = council_members + + # Create Chairman agent + self.chairman = Agent( + agent_name="Chairman", + agent_description="Chairman of the LLM Council, responsible for synthesizing all responses and rankings into a final answer", + system_prompt=get_chairman_prompt(), + model_name=chairman_model, + max_loops=1, + verbose=verbose, + temperature=0.7, + ) + + self.conversation = Conversation( + name=f"[LLM Council] [Conversation][{name}]" + ) + + if self.verbose: + print( + f"🏛️ LLM Council initialized with {len(self.council_members)} members" + ) + for i, member in enumerate(self.council_members, 1): + print( + f" {i}. {member.agent_name} ({member.model_name})" + ) + + def _create_default_council(self) -> List[Agent]: + """ + Create default council members with specialized prompts and models. + + Returns: + List of Agent instances configured as council members. + """ + + # GPT-5.1 Agent - Analytical and comprehensive + gpt_agent = Agent( + agent_name="GPT-5.1-Councilor", + agent_description="Analytical and comprehensive AI councilor specializing in deep analysis and thorough responses", + system_prompt=get_gpt_councilor_prompt(), + model_name="gpt-5.1", + max_loops=1, + verbose=False, + temperature=0.7, + ) + + # Gemini 3 Pro Agent - Concise and processed + gemini_agent = Agent( + agent_name="Gemini-3-Pro-Councilor", + agent_description="Concise and well-processed AI councilor specializing in clear, structured responses", + system_prompt=get_gemini_councilor_prompt(), + model_name="gemini-2.5-flash", # Using available Gemini model + max_loops=1, + verbose=False, + temperature=0.7, + ) + + # Claude Sonnet 4.5 Agent - Balanced and thoughtful + claude_agent = Agent( + agent_name="Claude-Sonnet-4.5-Councilor", + agent_description="Thoughtful and balanced AI councilor specializing in nuanced and well-reasoned responses", + system_prompt=get_claude_councilor_prompt(), + model_name="anthropic/claude-sonnet-4-5", # Using available Claude model + max_loops=1, + verbose=False, + temperature=0.0, + top_p=None, + ) + + # Grok-4 Agent - Creative and innovative + grok_agent = Agent( + agent_name="Grok-4-Councilor", + agent_description="Creative and innovative AI councilor specializing in unique perspectives and creative solutions", + system_prompt=get_grok_councilor_prompt(), + model_name="xai/grok-4-1-fast-reasoning", # Using available model as proxy for Grok-4 + max_loops=1, + verbose=False, + temperature=0.8, + ) + + members = [gpt_agent, gemini_agent, claude_agent, grok_agent] + + return members + + def run(self, query: str): + """ + Execute the full LLM Council workflow. + + Args: + query: The user's query to process + + Returns: + Formatted output based on output_type, containing conversation history + with all council member responses, evaluations, and final synthesis. + """ + if self.verbose: + print(f"\n{'='*80}") + print("🏛️ LLM COUNCIL SESSION") + print("=" * 80) + print(f"\n📝 Query: {query}\n") + + # Add user query to conversation + self.conversation.add(role="User", content=query) + + # Step 1: Get responses from all council members in parallel + if self.verbose: + print("📤 Dispatching query to all council members...") + + results_dict = run_agents_concurrently( + self.council_members, + task=query, + return_agent_output_dict=True, + ) + + # Map results to member names + original_responses = { + member.agent_name: response + for member, response in zip( + self.council_members, + [ + results_dict.get(member.agent_name, "") + for member in self.council_members + ], + ) + } + + # Add each council member's response to conversation + for member_name, response in original_responses.items(): + self.conversation.add(role=member_name, content=response) + + if self.verbose: + print( + f"✅ Received {len(original_responses)} responses\n" + ) + for name, response in original_responses.items(): + print(f" {name}: {response[:100]}...") + + # Step 2: Anonymize responses for evaluation + # Create anonymous IDs (A, B, C, D, etc.) + anonymous_ids = [ + chr(65 + i) for i in range(len(self.council_members)) + ] + random.shuffle(anonymous_ids) # Shuffle to ensure anonymity + + anonymous_responses = { + anonymous_ids[i]: original_responses[member.agent_name] + for i, member in enumerate(self.council_members) + } + + # Create mapping from anonymous ID to member name (for later reference) + id_to_member = { + anonymous_ids[i]: member.agent_name + for i, member in enumerate(self.council_members) + } + + if self.verbose: + print( + "\n🔍 Council members evaluating each other's responses..." + ) + + # Step 3: Have each member evaluate and rank all responses concurrently + # Create evaluation tasks for each member + evaluation_tasks = [ + get_evaluation_prompt( + query, anonymous_responses, member.agent_name + ) + for member in self.council_members + ] + + # Run evaluations concurrently using batched_grid_agent_execution + evaluation_results = batched_grid_agent_execution( + self.council_members, evaluation_tasks + ) + + # Map results to member names + evaluations = { + member.agent_name: evaluation_results[i] + for i, member in enumerate(self.council_members) + } + + # Add each council member's evaluation to conversation + for member_name, evaluation in evaluations.items(): + self.conversation.add( + role=f"{member_name}-Evaluation", content=evaluation + ) + + if self.verbose: + print(f"✅ Received {len(evaluations)} evaluations\n") + + # Step 4: Chairman synthesizes everything + if self.verbose: + print("👔 Chairman synthesizing final response...\n") + + synthesis_prompt = get_synthesis_prompt( + query, original_responses, evaluations, id_to_member + ) + + final_response = self.chairman.run(task=synthesis_prompt) + + # Add chairman's final response to conversation + self.conversation.add(role="Chairman", content=final_response) + + if self.verbose: + print(f"{'='*80}") + print("✅ FINAL RESPONSE") + print(f"{'='*80}\n") + + # Format and return output using history_output_formatter + return history_output_formatter( + conversation=self.conversation, type=self.output_type + ) + + def batched_run(self, tasks: List[str]): + """ + Run the LLM Council workflow for a batch of tasks. + + Args: + tasks: List of tasks to process + + Returns: + List of formatted outputs based on output_type + """ + return [self.run(task) for task in tasks] diff --git a/swarms/structs/maker.py b/swarms/structs/maker.py new file mode 100644 index 00000000..8195ba97 --- /dev/null +++ b/swarms/structs/maker.py @@ -0,0 +1,1093 @@ +""" +MAKER: Massively decomposed Agentic processes with first-to-ahead-by-K Error correction and Red-flagging + +This module implements the MAKER framework from the paper: +"Solving a Million-Step LLM Task with Zero Errors" by Meyerson et al. (2025) + +MAKER is a general-purpose framework for solving long-horizon tasks with extreme precision through: +1. MAD (Maximal Agentic Decomposition): Breaking tasks into minimal subtasks +2. First-to-ahead-by-K Voting: Error correction through voting +3. Red-flagging: Discarding unreliable responses + +The framework enables solving tasks with millions of LLM steps with zero errors +by exploiting the modularity of extreme decomposition to apply error correction +at each step. + +Paper: https://arxiv.org/abs/2511.09030 +""" + +import uuid +import math +import concurrent.futures +from typing import Any, Callable, Dict, List, Optional, Tuple + +from swarms.structs.agent import Agent +from swarms.structs.conversation import Conversation +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="maker") + + +class MAKER: + """ + MAKER: Maximal Agentic decomposition, first-to-ahead-by-K Error correction, and Red-flagging. + + A general-purpose framework for solving long-horizon tasks with extreme precision + through massive decomposition of tasks into subtasks, each solved by focused + microagents with error correction through voting. + + This implementation follows the MAKER framework from the paper: + "Solving a Million-Step LLM Task with Zero Errors" by Meyerson et al. (2025) + + The framework consists of three core components: + + 1. MAD (Maximal Agentic Decomposition): + By breaking a task with s steps into s subtasks, each agent can focus on a + single step, reducing context confusion and improving reliability. + + 2. First-to-ahead-by-K Voting: + For each step, multiple samples are drawn until one candidate action is + K votes ahead of all others, ensuring high probability of correctness. + + 3. Red-flagging: + Responses that show signs of unreliability (overly long or incorrectly + formatted) are discarded, reducing correlated errors. + + The framework is task-agnostic. Users provide: + - A task/objective to complete (main input to run()) + - A function to format prompts for each step + - A function to parse responses and extract the action/result + - A function to validate responses (for red-flagging) + - Optional: A function to update state between steps + + Attributes: + id (str): Unique identifier for the MAKER instance. + name (str): Human-readable name for the system. + description (str): Description of the system's purpose. + model_name (str): Name of the LLM model to use. + k (int): Vote threshold - candidate must be k votes ahead to win. + max_tokens (int): Maximum tokens for LLM response (red-flag threshold). + temperature (float): Temperature for LLM sampling. + temperature_first (float): Temperature for first vote (typically 0 for determinism). + system_prompt (str): System prompt for the microagents. + format_prompt (Callable): Function to format the prompt for each step. + parse_response (Callable): Function to parse LLM response into a result. + validate_response (Callable): Function to validate response format (red-flagging). + update_state (Callable): Function to update state after each step. + max_workers (int): Maximum parallel workers for concurrent sampling. + verbose (bool): Whether to enable verbose logging. + + Example: + >>> from swarms.structs.maker import MAKER + >>> + >>> # Define task-specific functions + >>> def format_prompt(task, state, step_idx, previous_result): + ... return f"Task: {task}\\nState: {state}\\nStep {step_idx+1}: What's next?" + >>> + >>> def parse_response(response): + ... return response.strip() + >>> + >>> def validate_response(response, max_tokens): + ... return len(response) < max_tokens * 4 and response.strip() != "" + >>> + >>> # Create MAKER instance + >>> maker = MAKER( + ... name="MyTaskSolver", + ... model_name="gpt-4o-mini", + ... system_prompt="You solve tasks step by step.", + ... format_prompt=format_prompt, + ... parse_response=parse_response, + ... validate_response=validate_response, + ... k=3, + ... ) + >>> + >>> # Run the solver with your task + >>> results = maker.run( + ... task="Calculate the factorial of 5 step by step", + ... max_steps=5 + ... ) + + References: + Meyerson, E., et al. (2025). Solving a Million-Step LLM Task with Zero Errors. + arXiv:2511.09030 + """ + + def __init__( + self, + id: str = None, + name: str = "MAKER", + description: str = "Massively decomposed Agentic processes with Error correction and Red-flagging", + model_name: str = "gpt-4o-mini", + system_prompt: str = "You are a precise assistant that solves tasks step by step. Follow instructions exactly and provide clear, structured outputs.", + k: int = 3, + max_tokens: int = 1024, + temperature: float = 0.1, + temperature_first: float = 0.0, + format_prompt: Callable[[str, Any, int, Any], str] = None, + parse_response: Callable[[str], Any] = None, + validate_response: Callable[[str, int], bool] = None, + update_state: Callable[[Any, Any, int], Any] = None, + initial_state: Any = None, + max_workers: int = None, + verbose: bool = True, + max_retries_per_step: int = 100, + agents: List[Agent] = None, + ): + """ + Initialize the MAKER framework. + + Args: + id: Unique identifier for the MAKER instance. Auto-generated if not provided. + name: Human-readable name for the system. + description: Description of the system's purpose. + model_name: Name of the LLM model to use (e.g., "gpt-4o-mini", "gpt-4.1-mini"). + system_prompt: System prompt for the microagents. Should describe the task domain + and expected output format. + k: Vote threshold - a candidate must be k votes ahead of all others to win. + Higher k means more reliability but higher cost. Typical values: 2-5. + max_tokens: Maximum tokens for LLM response. Responses exceeding this are + red-flagged as the model may be confused. + temperature: Temperature for LLM sampling (used for votes after the first). + Lower values (0.1-0.3) provide more consistent results. + temperature_first: Temperature for first vote. Using 0 ensures the best + deterministic guess is included in the vote set. + format_prompt: Function(task, state, step_idx, previous_result) -> str that formats + the prompt for each step. The task is the main objective passed to run(). + If None, uses a simple default. + parse_response: Function(response_text) -> result that extracts the result + from the LLM response. The result must be hashable for voting. + If None, returns the stripped response text. + validate_response: Function(response_text, max_tokens) -> bool that validates + the response format. Returns True if valid, False to red-flag. + If None, only checks response length. + update_state: Function(current_state, result, step_idx) -> new_state that + updates the state after each step. If None, state is unchanged. + initial_state: Initial state for the task. Can be any type depending on your task. + max_workers: Maximum parallel workers for concurrent vote sampling. + If None, uses k as the number of workers. + verbose: Whether to enable verbose logging. + max_retries_per_step: Maximum retries per step before raising an error. + agents: Optional list of pre-configured agents to use instead of creating new ones. + If provided, agents will be cycled through for each vote. + """ + self.id = id if id is not None else str(uuid.uuid4()) + self.name = name + self.description = description + self.model_name = model_name + self.system_prompt = system_prompt + self.k = k + self.max_tokens = max_tokens + self.temperature = temperature + self.temperature_first = temperature_first + self.max_workers = ( + max_workers if max_workers is not None else k + ) + self.verbose = verbose + self.max_retries_per_step = max_retries_per_step + self.agents = agents + self.initial_state = initial_state + + # Task-specific functions with defaults + self.format_prompt = ( + format_prompt + if format_prompt is not None + else self._default_format_prompt + ) + self.parse_response = ( + parse_response + if parse_response is not None + else self._default_parse_response + ) + self.validate_response = ( + validate_response + if validate_response is not None + else self._default_validate_response + ) + self.update_state = ( + update_state + if update_state is not None + else self._default_update_state + ) + + # Initialize conversation tracker + self.conversation = Conversation( + name=f"maker_{self.name}_{self.id}" + ) + + # Statistics tracking + self.stats = { + "total_samples": 0, + "total_votes": 0, + "red_flagged": 0, + "steps_completed": 0, + "votes_per_step": [], + "samples_per_step": [], + } + + # Validate configuration + self._validate_config() + + if self.verbose: + logger.info(f"MAKER initialized: {self.name}") + logger.info( + f"Model: {self.model_name}, k={self.k}, max_tokens={self.max_tokens}" + ) + + def _validate_config(self): + """ + Validate the MAKER configuration. + + Raises: + ValueError: If configuration is invalid. + """ + if self.k < 1: + raise ValueError("k must be at least 1") + if self.max_tokens < 10: + raise ValueError("max_tokens must be at least 10") + if self.temperature < 0 or self.temperature > 2: + raise ValueError("temperature must be between 0 and 2") + if self.max_retries_per_step < 1: + raise ValueError( + "max_retries_per_step must be at least 1" + ) + + def _default_format_prompt( + self, + task: str, + state: Any, + step_idx: int, + previous_result: Any, + ) -> str: + """ + Default prompt formatter. + + Args: + task: The main task/objective to complete. + state: Current state of the task. + step_idx: Current step index (0-based). + previous_result: Result from the previous step (None for first step). + + Returns: + Formatted prompt string. + """ + prompt_parts = [f"Task: {task}", f"Step {step_idx + 1}:"] + + if state is not None: + prompt_parts.insert(1, f"Current state: {state}") + + if previous_result is not None: + prompt_parts.insert( + -1, f"Previous result: {previous_result}" + ) + + prompt_parts.append("Provide the result for this step.") + + return "\n".join(prompt_parts) + + def _default_parse_response(self, response_text: str) -> str: + """ + Default response parser. + + Args: + response_text: Raw LLM response. + + Returns: + Stripped response text as the result. + """ + return response_text.strip() + + def _default_validate_response( + self, response_text: str, max_tokens: int + ) -> bool: + """ + Default response validator (red-flagging). + + Args: + response_text: Raw LLM response. + max_tokens: Maximum allowed tokens. + + Returns: + True if response is valid, False to red-flag. + """ + # Estimate tokens (rough: 4 chars per token) + estimated_tokens = len(response_text) // 4 + + # Red-flag if too long + if estimated_tokens > max_tokens: + return False + + # Red-flag if empty + if not response_text.strip(): + return False + + return True + + def _default_update_state( + self, state: Any, result: Any, step_idx: int + ) -> Any: + """ + Default state update function (no-op). + + Args: + state: Current state. + result: Result from current step. + step_idx: Current step index. + + Returns: + Unchanged state. + """ + return state + + def _create_microagent(self, temperature: float = None) -> Agent: + """ + Create a focused microagent for a single step. + + Each microagent has minimal context and is focused on solving + exactly one step of the problem. + + Args: + temperature: Temperature for this agent's sampling. + + Returns: + An Agent instance configured for single-step execution. + """ + temp = ( + temperature + if temperature is not None + else self.temperature + ) + + agent = Agent( + agent_name=f"MAKER-MicroAgent-{uuid.uuid4().hex[:8]}", + agent_description="Focused microagent for single-step execution in MAKER framework", + system_prompt=self.system_prompt, + model_name=self.model_name, + max_tokens=self.max_tokens, + temperature=temp, + max_loops=1, + verbose=False, + print_on=False, + output_type="str-all-except-first", + ) + + return agent + + def _get_agent(self, temperature: float = None) -> Agent: + """ + Get an agent for voting. + + If agents were provided, returns one from the pool. + Otherwise, creates a new microagent. + + Args: + temperature: Temperature for agent sampling. + + Returns: + An Agent instance. + """ + if self.agents is not None and len(self.agents) > 0: + # Cycle through provided agents + agent_idx = self.stats["total_samples"] % len(self.agents) + return self.agents[agent_idx] + else: + return self._create_microagent(temperature) + + def _make_hashable(self, result: Any) -> Any: + """ + Convert a result to a hashable type for voting. + + Args: + result: The result to convert. + + Returns: + A hashable version of the result. + """ + if isinstance(result, (str, int, float, bool, type(None))): + return result + elif isinstance(result, (list, tuple)): + return tuple(self._make_hashable(item) for item in result) + elif isinstance(result, dict): + return tuple( + sorted( + (k, self._make_hashable(v)) + for k, v in result.items() + ) + ) + elif isinstance(result, set): + return frozenset( + self._make_hashable(item) for item in result + ) + else: + # Fall back to string representation + return str(result) + + def _unhash_result( + self, hashable: Any, original_type: type + ) -> Any: + """ + Convert a hashable result back to its original type. + + Args: + hashable: The hashable result. + original_type: The original type of the result. + + Returns: + The result in its original type. + """ + if original_type in (str, int, float, bool, type(None)): + return hashable + elif original_type is list: + return ( + list(hashable) + if isinstance(hashable, tuple) + else hashable + ) + elif original_type is dict: + return ( + dict(hashable) + if isinstance(hashable, tuple) + else hashable + ) + elif original_type is set: + return ( + set(hashable) + if isinstance(hashable, frozenset) + else hashable + ) + else: + return hashable + + def get_vote( + self, + task: str, + state: Any, + step_idx: int, + previous_result: Any = None, + temperature: float = None, + ) -> Optional[Tuple[Any, str, type]]: + """ + Get a single vote for the current step. + + Samples from the LLM and applies red-flagging. If the response has + red flags, returns None (the vote is discarded). + + This implements Algorithm 3 (get_vote) from the paper. + + Args: + task: The main task/objective being solved. + state: Current state of the task. + step_idx: Current step index. + previous_result: Result from previous step. + temperature: Temperature for sampling. + + Returns: + Tuple of (hashable_result, raw_response, original_type) if valid, + None if red-flagged. + """ + self.stats["total_samples"] += 1 + + agent = self._get_agent(temperature) + prompt = self.format_prompt( + task, state, step_idx, previous_result + ) + + try: + response = agent.run(task=prompt) + + # Red-flag check + if not self.validate_response(response, self.max_tokens): + self.stats["red_flagged"] += 1 + if self.verbose: + logger.debug( + f"Red-flagged response at step {step_idx + 1}" + ) + return None + + # Parse the response + result = self.parse_response(response) + original_type = type(result) + + # Convert to hashable for voting + hashable_result = self._make_hashable(result) + + self.stats["total_votes"] += 1 + return (hashable_result, response, original_type) + + except Exception as e: + self.stats["red_flagged"] += 1 + if self.verbose: + logger.debug( + f"Red-flagged response at step {step_idx + 1} (exception: {e})" + ) + return None + + def do_voting( + self, + task: str, + state: Any, + step_idx: int, + previous_result: Any = None, + ) -> Tuple[Any, str]: + """ + Perform first-to-ahead-by-k voting for the current step. + + Samples votes until one candidate result is k votes ahead of all others. + This provides statistical error correction by requiring consensus. + + This implements Algorithm 2 (do_voting) from the paper. + + Args: + task: The main task/objective being solved. + state: Current state of the task. + step_idx: Current step index. + previous_result: Result from previous step. + + Returns: + Tuple of (result, raw_response) for the winning candidate. + + Raises: + RuntimeError: If max_retries_per_step is exceeded without finding a winner. + """ + votes = {} # hashable_result -> vote count + responses = {} # hashable_result -> raw_response + original_types = {} # hashable_result -> original_type + samples_this_step = 0 + votes_this_step = 0 + is_first_vote = True + + while samples_this_step < self.max_retries_per_step: + # Use temperature 0 for first vote, then configured temperature + temp = ( + self.temperature_first + if is_first_vote + else self.temperature + ) + is_first_vote = False + + # Get a vote + result = self.get_vote( + task, state, step_idx, previous_result, temp + ) + samples_this_step += 1 + + if result is None: + # Red-flagged, try again + continue + + hashable_result, response, original_type = result + votes_this_step += 1 + + # Update vote count + if hashable_result not in votes: + votes[hashable_result] = 0 + responses[hashable_result] = response + original_types[hashable_result] = original_type + votes[hashable_result] += 1 + + # Check if we have a winner (first-to-ahead-by-k) + current_count = votes[hashable_result] + max_other = max( + (v for r, v in votes.items() if r != hashable_result), + default=0, + ) + + if current_count >= max_other + self.k: + # We have a winner! + self.stats["votes_per_step"].append(votes_this_step) + self.stats["samples_per_step"].append( + samples_this_step + ) + + if self.verbose: + logger.debug( + f"Step {step_idx + 1} decided with {votes_this_step} votes " + f"({samples_this_step} samples, winner: {current_count} votes)" + ) + + # Convert back to original type + final_result = self._unhash_result( + hashable_result, original_types[hashable_result] + ) + return final_result, responses[hashable_result] + + # If we get here, we've exceeded max retries + raise RuntimeError( + f"Step {step_idx + 1}: Failed to reach consensus after " + f"{self.max_retries_per_step} samples. Vote distribution: {votes}" + ) + + def run(self, task: str, max_steps: int = None) -> List[Any]: + """ + Run the MAKER framework to solve the given task. + + Executes the complete solution process, generating results step-by-step + using maximal decomposition with error correction through voting. + + This implements Algorithm 1 (generate_solution) from the paper. + + Args: + task: The main task/objective to complete. This is the primary input + that defines what the MAKER framework should solve. + max_steps: Number of steps to execute. Required parameter. + + Returns: + List of results from each step. + + Raises: + ValueError: If task is not provided or max_steps is not specified. + RuntimeError: If voting fails on any step. + + Example: + >>> maker = MAKER( + ... system_prompt="Solve math problems step by step.", + ... k=3 + ... ) + >>> results = maker.run( + ... task="Calculate 2^10 by doubling, starting from 2", + ... max_steps=9 + ... ) + """ + if not task: + raise ValueError( + "task is required - this is the objective to complete" + ) + if max_steps is None: + raise ValueError( + "max_steps is required - specify how many steps to execute" + ) + + if self.verbose: + logger.info( + f"Starting MAKER with {max_steps} steps, k={self.k}" + ) + logger.info( + f"Task: {task[:100]}..." + if len(task) > 100 + else f"Task: {task}" + ) + + # Initialize state + state = self.initial_state + + results = [] + previous_result = None + + for step_idx in range(max_steps): + if ( + self.verbose + and (step_idx + 1) % max(1, max_steps // 10) == 0 + ): + logger.info( + f"Progress: {step_idx + 1}/{max_steps} steps completed" + ) + + # Do voting for this step + result, response = self.do_voting( + task, state, step_idx, previous_result + ) + + # Record the result + results.append(result) + + # Update state + state = self.update_state(state, result, step_idx) + previous_result = result + + self.stats["steps_completed"] = step_idx + 1 + + # Log to conversation + self.conversation.add( + role=f"Step-{step_idx + 1}", + content=f"Result: {result}", + ) + + if self.verbose: + self._log_statistics() + + return results + + def run_until_condition( + self, + task: str, + stop_condition: Callable[[Any, List[Any], int], bool], + max_steps: int = 1000, + ) -> List[Any]: + """ + Run MAKER until a stopping condition is met. + + Useful for tasks where the number of steps is not known in advance. + + Args: + task: The main task/objective to complete. + stop_condition: Function(current_state, results, step_idx) -> bool + that returns True when the task is complete. + max_steps: Maximum steps to prevent infinite loops. + + Returns: + List of results from each step. + + Example: + >>> def is_complete(state, results, step_idx): + ... return "DONE" in str(results[-1]) if results else False + >>> + >>> maker = MAKER(system_prompt="...", k=3) + >>> results = maker.run_until_condition( + ... task="Solve this problem until you reach the answer", + ... stop_condition=is_complete, + ... max_steps=100 + ... ) + """ + if not task: + raise ValueError( + "task is required - this is the objective to complete" + ) + if stop_condition is None: + raise ValueError("stop_condition must be provided") + + state = self.initial_state + + if self.verbose: + logger.info( + f"Starting MAKER (conditional), max_steps={max_steps}, k={self.k}" + ) + logger.info( + f"Task: {task[:100]}..." + if len(task) > 100 + else f"Task: {task}" + ) + + results = [] + previous_result = None + + for step_idx in range(max_steps): + # Check stop condition + if stop_condition(state, results, step_idx): + if self.verbose: + logger.info( + f"Stop condition met at step {step_idx + 1}" + ) + break + + if self.verbose and (step_idx + 1) % 10 == 0: + logger.info( + f"Progress: {step_idx + 1} steps completed" + ) + + # Do voting for this step + result, response = self.do_voting( + task, state, step_idx, previous_result + ) + + results.append(result) + state = self.update_state(state, result, step_idx) + previous_result = result + self.stats["steps_completed"] = step_idx + 1 + + if self.verbose: + self._log_statistics() + + return results + + def run_parallel_voting( + self, task: str, max_steps: int = None + ) -> List[Any]: + """ + Run MAKER with parallel vote sampling. + + An optimized version that samples k votes in parallel for each step, + which can significantly reduce wall-clock time while maintaining + the same error correction guarantees. + + Args: + task: The main task/objective to complete. + max_steps: Number of steps to execute. + + Returns: + List of results from each step. + """ + if not task: + raise ValueError( + "task is required - this is the objective to complete" + ) + if max_steps is None: + raise ValueError( + "max_steps is required - specify how many steps to execute" + ) + + state = self.initial_state + + if self.verbose: + logger.info( + f"Starting MAKER (parallel) with {max_steps} steps, k={self.k}" + ) + logger.info( + f"Task: {task[:100]}..." + if len(task) > 100 + else f"Task: {task}" + ) + + results = [] + previous_result = None + + for step_idx in range(max_steps): + if ( + self.verbose + and (step_idx + 1) % max(1, max_steps // 10) == 0 + ): + logger.info( + f"Progress: {step_idx + 1}/{max_steps} steps completed" + ) + + result, response = self._do_voting_parallel( + task, state, step_idx, previous_result + ) + + results.append(result) + state = self.update_state(state, result, step_idx) + previous_result = result + self.stats["steps_completed"] = step_idx + 1 + + if self.verbose: + self._log_statistics() + + return results + + def _do_voting_parallel( + self, + task: str, + state: Any, + step_idx: int, + previous_result: Any = None, + ) -> Tuple[Any, str]: + """ + Parallel voting implementation. + + Samples k votes in parallel, then continues with sequential sampling + if no winner is found. + + Args: + task: The main task/objective being solved. + state: Current state of the task. + step_idx: Current step index. + previous_result: Result from previous step. + + Returns: + Tuple of (result, raw_response). + """ + votes = {} + responses = {} + original_types = {} + samples_this_step = 0 + votes_this_step = 0 + + # First round: sample k votes in parallel + with concurrent.futures.ThreadPoolExecutor( + max_workers=self.max_workers + ) as executor: + # First vote with temperature 0, rest with configured temperature + futures = [] + futures.append( + executor.submit( + self.get_vote, + task, + state, + step_idx, + previous_result, + self.temperature_first, + ) + ) + for _ in range(self.k - 1): + futures.append( + executor.submit( + self.get_vote, + task, + state, + step_idx, + previous_result, + self.temperature, + ) + ) + + for future in concurrent.futures.as_completed(futures): + samples_this_step += 1 + result = future.result() + if result is not None: + hashable_result, response, original_type = result + votes_this_step += 1 + if hashable_result not in votes: + votes[hashable_result] = 0 + responses[hashable_result] = response + original_types[hashable_result] = ( + original_type + ) + votes[hashable_result] += 1 + + # Check if we have a winner, continue sequentially if not + while samples_this_step < self.max_retries_per_step: + if votes: + leader = max(votes, key=votes.get) + leader_count = votes[leader] + max_other = max( + (v for r, v in votes.items() if r != leader), + default=0, + ) + + if leader_count >= max_other + self.k: + self.stats["votes_per_step"].append( + votes_this_step + ) + self.stats["samples_per_step"].append( + samples_this_step + ) + + final_result = self._unhash_result( + leader, original_types[leader] + ) + return final_result, responses[leader] + + # No winner yet, get more votes sequentially + result = self.get_vote( + task, + state, + step_idx, + previous_result, + self.temperature, + ) + samples_this_step += 1 + + if result is not None: + hashable_result, response, original_type = result + votes_this_step += 1 + if hashable_result not in votes: + votes[hashable_result] = 0 + responses[hashable_result] = response + original_types[hashable_result] = original_type + votes[hashable_result] += 1 + + raise RuntimeError( + f"Step {step_idx + 1}: Failed to reach consensus after " + f"{self.max_retries_per_step} samples" + ) + + def _log_statistics(self): + """Log execution statistics.""" + logger.info("=" * 50) + logger.info("MAKER Execution Statistics") + logger.info("=" * 50) + logger.info( + f"Steps completed: {self.stats['steps_completed']}" + ) + logger.info(f"Total samples: {self.stats['total_samples']}") + logger.info(f"Total valid votes: {self.stats['total_votes']}") + logger.info( + f"Red-flagged responses: {self.stats['red_flagged']}" + ) + + if self.stats["votes_per_step"]: + avg_votes = sum(self.stats["votes_per_step"]) / len( + self.stats["votes_per_step"] + ) + max_votes = max(self.stats["votes_per_step"]) + logger.info(f"Average votes per step: {avg_votes:.2f}") + logger.info(f"Max votes for a step: {max_votes}") + + if self.stats["samples_per_step"]: + avg_samples = sum(self.stats["samples_per_step"]) / len( + self.stats["samples_per_step"] + ) + logger.info( + f"Average samples per step: {avg_samples:.2f}" + ) + + red_flag_rate = self.stats["red_flagged"] / max( + 1, self.stats["total_samples"] + ) + logger.info(f"Red-flag rate: {red_flag_rate:.2%}") + logger.info("=" * 50) + + def estimate_cost( + self, + total_steps: int, + target_success_probability: float = 0.95, + ) -> Dict[str, Any]: + """ + Estimate the expected cost of solving a task with given steps. + + Uses the theoretical framework from the paper to estimate costs + based on step success rate and voting threshold. + + Args: + total_steps: Total number of steps for the task. + target_success_probability: Target probability of solving the full task. + + Returns: + Dictionary containing cost estimates and statistics. + """ + # Estimate per-step success rate from current statistics + if self.stats["total_votes"] > 0: + valid_rate = self.stats["total_votes"] / max( + 1, self.stats["total_samples"] + ) + p = ( + valid_rate * 0.99 + ) # Assume 99% of valid votes are correct + else: + p = 0.99 # Default assumption + + # Calculate minimum k needed (Equation 14 from paper) + s = total_steps + t = target_success_probability + + if p > 0.5: + ratio = (1 - p) / p + try: + k_min = math.ceil( + math.log(t ** (-1 / s) - 1) / math.log(ratio) + ) + except (ValueError, ZeroDivisionError): + k_min = 1 + else: + k_min = float("inf") + + # Expected samples per step (Equation 16 from paper) + if p > 0.5 and k_min != float("inf"): + expected_samples = k_min / (p * (2 * p - 1)) + else: + expected_samples = float("inf") + + return { + "estimated_p": p, + "estimated_k_min": k_min, + "expected_samples_per_step": expected_samples, + "expected_total_samples": ( + expected_samples * s + if expected_samples != float("inf") + else float("inf") + ), + "current_k": self.k, + "total_steps": s, + "target_success_probability": t, + } + + def get_statistics(self) -> Dict[str, Any]: + """ + Get execution statistics. + + Returns: + Dictionary containing execution statistics. + """ + return self.stats.copy() + + def reset(self): + """Reset the MAKER instance for a new run.""" + self.stats = { + "total_samples": 0, + "total_votes": 0, + "red_flagged": 0, + "steps_completed": 0, + "votes_per_step": [], + "samples_per_step": [], + } + self.conversation = Conversation( + name=f"maker_{self.name}_{self.id}" + ) diff --git a/swarms/structs/round_robin.py b/swarms/structs/round_robin.py index 21261b3b..fb9d086b 100644 --- a/swarms/structs/round_robin.py +++ b/swarms/structs/round_robin.py @@ -1,67 +1,57 @@ import random -from datetime import datetime -from typing import List, Optional +from typing import List, Union import tenacity -from pydantic import BaseModel, Field -from swarms.schemas.agent_step_schemas import ManySteps from swarms.structs.agent import Agent -from swarms.structs.base_swarm import BaseSwarm +from swarms.structs.conversation import Conversation +from swarms.utils.history_output_formatter import ( + history_output_formatter, +) from swarms.utils.loguru_logger import initialize_logger +from swarms.utils.output_types import OutputType logger = initialize_logger("round-robin") -datetime_stamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - -class MetadataSchema(BaseModel): - swarm_id: Optional[str] = Field( - ..., description="Unique ID for the run" - ) - name: Optional[str] = Field( - "RoundRobinSwarm", description="Name of the swarm" - ) - task: Optional[str] = Field( - ..., description="Task or query given to all agents" - ) - description: Optional[str] = Field( - "Concurrent execution of multiple agents", - description="Description of the workflow", - ) - agent_outputs: Optional[List[ManySteps]] = Field( - ..., description="List of agent outputs and metadata" - ) - timestamp: Optional[str] = Field( - default_factory=datetime.now, - description="Timestamp of the workflow execution", - ) - max_loops: Optional[int] = Field( - 1, description="Maximum number of loops to run" - ) - - -class RoundRobinSwarm(BaseSwarm): +class RoundRobinSwarm: """ A swarm implementation that executes tasks in a round-robin fashion. + This swarm implements an AutoGen-style communication pattern where agents + are shuffled randomly each loop for varied interaction patterns. Each agent + receives the full conversation context to build upon others' responses. + Args: - agents (List[Agent], optional): List of agents in the swarm. Defaults to None. + name (str): Name of the swarm. Defaults to "RoundRobinSwarm". + description (str): Description of the swarm's purpose. + agents (List[Agent]): List of agents in the swarm. Required. verbose (bool, optional): Flag to enable verbose mode. Defaults to False. max_loops (int, optional): Maximum number of loops to run. Defaults to 1. callback (callable, optional): Callback function to be called after each loop. Defaults to None. - return_json_on (bool, optional): Flag to return the metadata as a JSON object. Defaults to False. - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. + max_retries (int, optional): Maximum number of retries for agent execution. Defaults to 3. + output_type (OutputType, optional): Type of output format. Defaults to "final". Attributes: + name (str): Name of the swarm. + description (str): Description of the swarm's purpose. agents (List[Agent]): List of agents in the swarm. verbose (bool): Flag to enable verbose mode. max_loops (int): Maximum number of loops to run. + callback (callable): Callback function executed after each loop. index (int): Current index of the agent being executed. + max_retries (int): Maximum number of retries for agent execution. + output_type (OutputType): Type of output format. + conversation (Conversation): Conversation history for the swarm. Methods: - run(task: str, *args, **kwargs) -> Any: Executes the given task on the agents in a round-robin fashion. + run(task: str, *args, **kwargs) -> Union[str, dict, list]: + Executes the given task on the agents in a round-robin fashion. + run_batch(tasks: List[str]) -> List: + Executes multiple tasks sequentially, returning results for each. + + Raises: + ValueError: If no agents are provided during initialization. """ @@ -73,54 +63,36 @@ class RoundRobinSwarm(BaseSwarm): verbose: bool = False, max_loops: int = 1, callback: callable = None, - return_json_on: bool = False, max_retries: int = 3, - *args, - **kwargs, + output_type: OutputType = "final", ): - try: - super().__init__( - name=name, - description=description, - agents=agents, - *args, - **kwargs, - ) - self.name = name - self.description = description - self.agents = agents or [] - self.verbose = verbose - self.max_loops = max_loops - self.callback = callback - self.return_json_on = return_json_on - self.index = 0 - self.max_retries = max_retries - - # Store the metadata for the run - self.output_schema = MetadataSchema( - name=self.name, - swarm_id=datetime_stamp, - task="", - description=self.description, - agent_outputs=[], - timestamp=datetime_stamp, - max_loops=self.max_loops, - ) - - # Set the max loops for every agent - if self.agents: - for agent in self.agents: - agent.max_loops = random.randint(1, 5) - logger.info( - f"Successfully initialized {self.name} with {len(self.agents)} agents" + self.name = name + self.description = description + self.agents = agents + self.verbose = verbose + self.max_loops = max_loops + self.callback = callback + self.index = 0 + self.max_retries = max_retries + self.output_type = output_type + + # Initialize conversation for tracking agent interactions + self.conversation = Conversation(name=f"{name}_conversation") + + if self.agents is None: + raise ValueError( + "RoundRobinSwarm cannot be initialized without agents" ) - except Exception as e: - logger.error( - f"Failed to initialize {self.name}: {str(e)}" - ) - raise + # Set the max loops for every agent + if self.agents: + for agent in self.agents: + agent.max_loops = random.randint(1, 5) + + logger.info( + f"Successfully initialized {self.name} with {len(self.agents)} agents" + ) @tenacity.retry( stop=tenacity.stop_after_attempt(3), @@ -133,14 +105,26 @@ class RoundRobinSwarm(BaseSwarm): def _execute_agent( self, agent: Agent, task: str, *args, **kwargs ) -> str: - """Execute a single agent with retries and error handling""" + """ + Execute a single agent with retries and error handling. + + Args: + agent (Agent): The agent to execute. + task (str): The task to be executed. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. + + Returns: + str: The result of the agent execution. + """ try: logger.info( f"Running Agent {agent.agent_name} on task: {task}" ) result = agent.run(task, *args, **kwargs) - self.output_schema.agent_outputs.append( - agent.agent_output + self.conversation.add( + role=agent.agent_name, + content=result, ) return result except Exception as e: @@ -149,9 +133,16 @@ class RoundRobinSwarm(BaseSwarm): ) raise - def run(self, task: str, *args, **kwargs): + def run( + self, task: str, *args, **kwargs + ) -> Union[str, dict, list]: """ - Executes the given task on the agents in a round-robin fashion. + Executes the given task on the agents in a randomized round-robin fashion. + + This method implements an AutoGen-style communication pattern where: + - Agents are shuffled randomly each loop for varied interaction patterns + - Each agent receives the full conversation context to build upon others' responses + - Collaborative prompting encourages agents to acknowledge and extend prior contributions Args: task (str): The task to be executed. @@ -159,7 +150,7 @@ class RoundRobinSwarm(BaseSwarm): **kwargs: Arbitrary keyword arguments. Returns: - Any: The result of the task execution. + Union[str, dict, list]: The result of the task execution in the specified output format. Raises: ValueError: If no agents are configured @@ -170,11 +161,15 @@ class RoundRobinSwarm(BaseSwarm): raise ValueError("No agents configured for the swarm") try: - result = task - self.output_schema.task = task + # Add initial task to conversation + self.conversation.add(role="User", content=task) n = len(self.agents) + + # Build agent names list for context + agent_names = [agent.agent_name for agent in self.agents] + logger.info( - f"Starting round-robin execution with task '{task}' on {n} agents" + f"Starting randomized round-robin execution with task on {n} agents: {agent_names}" ) for loop in range(self.max_loops): @@ -182,14 +177,36 @@ class RoundRobinSwarm(BaseSwarm): f"Starting loop {loop + 1}/{self.max_loops}" ) - for _ in range(n): - current_agent = self.agents[self.index] + # Shuffle agents randomly each loop for varied interaction patterns + shuffled_agents = self.agents.copy() + random.shuffle(shuffled_agents) + + logger.debug( + f"Agent order for loop {loop + 1}: {[a.agent_name for a in shuffled_agents]}" + ) + + for i, current_agent in enumerate(shuffled_agents): + # Get current conversation context + conversation_context = self.conversation.return_history_as_string() + + # Build collaborative prompt with context + collaborative_task = f"""{conversation_context} + + As {current_agent.agent_name}, you are agent {i + 1} of {n} in this collaborative session. The other agents participating are: {', '.join(name for name in agent_names if name != current_agent.agent_name)}. + + Please review the conversation history above carefully and build upon the insights shared by other agents. Acknowledge their contributions where relevant and provide your unique perspective and expertise. Be concise but thorough in your response, and if this is the first response in the conversation, address the original task directly. + + Your response:""" + try: result = self._execute_agent( - current_agent, result, *args, **kwargs + current_agent, collaborative_task, *args, **kwargs + ) + except Exception as e: + logger.error( + f"Agent {current_agent.agent_name} failed: {str(e)}" ) - finally: - self.index = (self.index + 1) % n + raise if self.callback: logger.debug( @@ -203,21 +220,34 @@ class RoundRobinSwarm(BaseSwarm): ) logger.success( - f"Successfully completed {self.max_loops} loops of round-robin execution" + f"Successfully completed {self.max_loops} loops of randomized round-robin execution" ) - if self.return_json_on: - return self.export_metadata() - return result + return history_output_formatter( + conversation=self.conversation, + type=self.output_type, + ) except Exception as e: logger.error(f"Round-robin execution failed: {str(e)}") raise - def export_metadata(self): - """Export the execution metadata as JSON""" - try: - return self.output_schema.model_dump_json(indent=4) - except Exception as e: - logger.error(f"Failed to export metadata: {str(e)}") - raise + def run_batch(self, tasks: List[str]) -> List[Union[str, dict, list]]: + """ + Execute multiple tasks sequentially through the round-robin swarm. + + Each task is processed independently through the full round-robin + execution cycle, with agents collaborating on each task in turn. + + Args: + tasks (List[str]): A list of task strings to be executed. + + Returns: + List[Union[str, dict, list]]: A list of results, one for each task, + in the format specified by output_type. + + Example: + >>> swarm = RoundRobinSwarm(agents=[agent1, agent2]) + >>> results = swarm.run_batch(["Task 1", "Task 2", "Task 3"]) + """ + return [self.run(task) for task in tasks] diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index 92903f57..27f70bd6 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -23,6 +23,7 @@ from swarms.structs.agent_rearrange import AgentRearrange from swarms.structs.batched_grid_workflow import BatchedGridWorkflow from swarms.structs.concurrent_workflow import ConcurrentWorkflow from swarms.structs.council_as_judge import CouncilAsAJudge +from swarms.structs.debate_with_judge import DebateWithJudge from swarms.structs.groupchat import GroupChat from swarms.structs.heavy_swarm import HeavySwarm from swarms.structs.hiearchical_swarm import HierarchicalSwarm @@ -37,6 +38,8 @@ from swarms.telemetry.log_executions import log_execution from swarms.utils.generate_keys import generate_api_key from swarms.utils.loguru_logger import initialize_logger from swarms.utils.output_types import OutputType +from swarms.structs.llm_council import LLMCouncil +from swarms.structs.round_robin import RoundRobinSwarm logger = initialize_logger(log_folder="swarm_router") @@ -48,7 +51,7 @@ SwarmType = Literal[ "GroupChat", "MultiAgentRouter", "AutoSwarmBuilder", - "HiearchicalSwarm", + "HierarchicalSwarm", "auto", "MajorityVoting", "MALT", @@ -56,6 +59,9 @@ SwarmType = Literal[ "InteractiveGroupChat", "HeavySwarm", "BatchedGridWorkflow", + "LLMCouncil", + "DebateWithJudge", + "RoundRobin", ] @@ -153,6 +159,7 @@ class SwarmRouter: - MixtureOfAgents: Combines multiple agent types for diverse tasks - SequentialWorkflow: Executes tasks sequentially - ConcurrentWorkflow: Executes tasks in parallel + - RoundRobin: Executes tasks in a round-robin fashion, cycling through agents - "auto": Automatically selects best swarm type via embedding search Methods: @@ -210,6 +217,7 @@ class SwarmRouter: verbose: bool = False, worker_tools: List[Callable] = None, aggregation_strategy: str = "synthesis", + chairman_model: str = "gpt-5.1", *args, **kwargs, ): @@ -252,6 +260,7 @@ class SwarmRouter: self.heavy_swarm_swarm_show_output = ( heavy_swarm_swarm_show_output ) + self.chairman_model = chairman_model # Initialize swarm factory for O(1) lookup performance self._swarm_factory = self._initialize_swarm_factory() @@ -300,14 +309,6 @@ class SwarmRouter: "See https://docs.swarms.world/en/latest/swarms/structs/swarm_router/" ) - if ( - self.swarm_type != "HeavySwarm" - and self.agents is None - ): - raise SwarmRouterConfigError( - "SwarmRouter: No agents provided for the swarm. Check the docs to learn of required parameters. https://docs.swarms.world/en/latest/swarms/structs/agent/" - ) - if ( self.swarm_type == "AgentRearrange" and self.rearrange_flow is None @@ -417,7 +418,7 @@ class SwarmRouter: "MALT": self._create_malt, "CouncilAsAJudge": self._create_council_as_judge, "InteractiveGroupChat": self._create_interactive_group_chat, - "HiearchicalSwarm": self._create_hierarchical_swarm, + "HierarchicalSwarm": self._create_hierarchical_swarm, "MixtureOfAgents": self._create_mixture_of_agents, "MajorityVoting": self._create_majority_voting, "GroupChat": self._create_group_chat, @@ -425,6 +426,9 @@ class SwarmRouter: "SequentialWorkflow": self._create_sequential_workflow, "ConcurrentWorkflow": self._create_concurrent_workflow, "BatchedGridWorkflow": self._create_batched_grid_workflow, + "LLMCouncil": self._create_llm_council, + "DebateWithJudge": self._create_debate_with_judge, + "RoundRobin": self._create_round_robin_swarm, } def _create_heavy_swarm(self, *args, **kwargs): @@ -442,6 +446,27 @@ class SwarmRouter: show_dashboard=False, ) + def _create_llm_council(self, *args, **kwargs): + """Factory function for LLMCouncil.""" + return LLMCouncil( + name=self.name, + description=self.description, + output_type=self.output_type, + verbose=self.verbose, + chairman_model=self.chairman_model, + ) + + def _create_debate_with_judge(self, *args, **kwargs): + """Factory function for DebateWithJudge.""" + return DebateWithJudge( + pro_agent=self.agents[0], + con_agent=self.agents[1], + judge_agent=self.agents[2], + max_rounds=self.max_loops, + output_type=self.output_type, + verbose=self.verbose, + ) + def _create_agent_rearrange(self, *args, **kwargs): """Factory function for AgentRearrange.""" return AgentRearrange( @@ -578,6 +603,19 @@ class SwarmRouter: **kwargs, ) + def _create_round_robin_swarm(self, *args, **kwargs): + """Factory function for RoundRobinSwarm.""" + return RoundRobinSwarm( + name=self.name, + description=self.description, + agents=self.agents, + max_loops=self.max_loops, + verbose=self.verbose, + return_json_on=self.return_json, + *args, + **kwargs, + ) + def _create_swarm(self, task: str = None, *args, **kwargs): """ Dynamically create and return the specified swarm type with O(1) lookup performance. diff --git a/swarms/structs/swarming_architectures.py b/swarms/structs/swarming_architectures.py index c286b653..f2c09bed 100644 --- a/swarms/structs/swarming_architectures.py +++ b/swarms/structs/swarming_architectures.py @@ -107,49 +107,6 @@ def grid_swarm( return history_output_formatter(conversation, output_type) -# Linear Swarm: Agents process tasks in a sequential linear manner -def linear_swarm( - agents: AgentListType, - tasks: List[str], - output_type: OutputType = "dict", -) -> Union[Dict[str, Any], List[str]]: - """ - Implements a linear swarm where agents process tasks in a sequential manner. - - Args: - agents (AgentListType): A list of Agent objects to participate in the swarm. - tasks (List[str]): A list of tasks to be processed by the agents. - output_type (OutputType, optional): The format of the output. Defaults to "dict". - - Returns: - Union[Dict[str, Any], List[str]]: The formatted output of the swarm's processing. - If output_type is "dict", returns a dictionary containing the conversation history. - If output_type is "list", returns a list of responses. - - Raises: - ValueError: If agents or tasks lists are empty. - """ - if not agents or not tasks: - raise ValueError("Agents and tasks lists cannot be empty.") - - conversation = Conversation() - - for agent in agents: - if tasks: - task = tasks.pop(0) - conversation.add( - role="User", - content=task, - ) - response = agent.run(conversation.get_str()) - conversation.add( - role=agent.agent_name, - content=response, - ) - - return history_output_formatter(conversation, output_type) - - # Star Swarm: A central agent first processes all tasks, followed by others def star_swarm( agents: AgentListType, diff --git a/swarms/structs/various_alt_swarms.py b/swarms/structs/various_alt_swarms.py index c4b34f9f..127ba784 100644 --- a/swarms/structs/various_alt_swarms.py +++ b/swarms/structs/various_alt_swarms.py @@ -119,60 +119,6 @@ class CircularSwarm(BaseSwarm): return self._format_return() -class LinearSwarm(BaseSwarm): - """ - Implements a linear swarm where agents process tasks sequentially. - """ - - def __init__( - self, - agents: AgentListType, - name: str = "LinearSwarm", - description: str = "A linear swarm where agents process tasks sequentially", - output_type: str = "dict", - ): - """ - Initialize the LinearSwarm. - - Args: - agents: List of Agent objects or nested list of Agent objects - name: Name of the swarm - description: Description of the swarm's purpose - output_type: Type of output format, one of 'dict', 'list', 'string', 'json', 'yaml', 'xml', etc. - """ - super().__init__(agents, name, description, output_type) - - def run(self, tasks: List[str]) -> Union[Dict, List, str]: - """ - Run the linear swarm with the given tasks - - Args: - tasks: List of tasks to be processed - - Returns: - Union[Dict, List, str]: The conversation history in the requested format - """ - if not self.agents or not tasks: - raise ValueError( - "Agents and tasks lists cannot be empty." - ) - - tasks_copy = tasks.copy() - responses = [] - - for agent in self.agents: - if tasks_copy: - task = tasks_copy.pop(0) - response = agent.run(task) - self.conversation.add( - role=agent.agent_name, - content=response, - ) - responses.append(response) - - return self._format_return() - - class StarSwarm(BaseSwarm): """ Implements a star swarm where a central agent processes all tasks, followed by others. @@ -936,6 +882,8 @@ class OneToOne: self, sender: Agent, receiver: Agent, + name: str = "OneToOne", + description: str = "A one-to-one communication pattern between two agents", output_type: str = "dict", ): """ @@ -944,10 +892,14 @@ class OneToOne: Args: sender: The sender agent receiver: The receiver agent + name: Name of the communication pattern + description: Description of the communication pattern's purpose output_type: Type of output format, one of 'dict', 'list', 'string', 'json', 'yaml', 'xml', etc. """ self.sender = sender self.receiver = receiver + self.name = name + self.description = description self.output_type = output_type self.conversation = Conversation() @@ -1013,6 +965,8 @@ class Broadcast: self, sender: Agent, receivers: AgentListType, + name: str = "Broadcast", + description: str = "A broadcast communication pattern from one agent to many agents", output_type: str = "dict", ): """ @@ -1021,6 +975,8 @@ class Broadcast: Args: sender: The sender agent receivers: List of receiver agents + name: Name of the communication pattern + description: Description of the communication pattern's purpose output_type: Type of output format, one of 'dict', 'list', 'string', 'json', 'yaml', 'xml', etc. """ self.sender = sender @@ -1029,6 +985,8 @@ class Broadcast: if isinstance(receivers[0], list) else receivers ) + self.name = name + self.description = description self.output_type = output_type self.conversation = Conversation() @@ -1081,6 +1039,8 @@ class OneToThree: self, sender: Agent, receivers: AgentListType, + name: str = "OneToThree", + description: str = "A one-to-three communication pattern from one agent to exactly three agents", output_type: str = "dict", ): """ @@ -1089,6 +1049,8 @@ class OneToThree: Args: sender: The sender agent receivers: List of exactly three receiver agents + name: Name of the communication pattern + description: Description of the communication pattern's purpose output_type: Type of output format, one of 'dict', 'list', 'string', 'json', 'yaml', 'xml', etc. """ if len(receivers) != 3: @@ -1098,6 +1060,8 @@ class OneToThree: self.sender = sender self.receivers = receivers + self.name = name + self.description = description self.output_type = output_type self.conversation = Conversation() diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py index e3f0a73d..7e98b817 100644 --- a/swarms/utils/formatter.py +++ b/swarms/utils/formatter.py @@ -13,6 +13,7 @@ from rich.progress import ( from rich.table import Table from rich.text import Text from rich.spinner import Spinner +from rich.tree import Tree from rich.markdown import Markdown @@ -720,6 +721,85 @@ class Formatter: self.console.print() # Add blank line after stopping self._dashboard_live = None + def print_plan_tree( + self, + task_description: str, + steps: List[Dict[str, Any]], + print_on: bool = True, + ) -> None: + """ + Print the plan as a beautiful tree using Rich. + + Args: + task_description: Description of the main task + steps: List of step dictionaries with step_id, description, priority, and optional dependencies + print_on: Whether to print to console (True) or just log (False) + """ + import logging + + logger = logging.getLogger(__name__) + + # Create root tree + tree = Tree( + f"[bold cyan]📋 Plan: {task_description}[/bold cyan]" + ) + + # Priority color mapping + priority_colors = { + "critical": "red", + "high": "yellow", + "medium": "blue", + "low": "green", + } + + priority_icons = { + "critical": "🔴", + "high": "🟠", + "medium": "🟡", + "low": "🟢", + } + + # Create a mapping of step_id to tree nodes for dependency handling + step_nodes = {} + + # First pass: create all nodes + for step in steps: + step_id = step.get("step_id", "") + description = step.get("description", "") + priority = step.get("priority", "medium").lower() + dependencies = step.get("dependencies", []) + + priority_color = priority_colors.get(priority, "white") + priority_icon = priority_icons.get(priority, "○") + + # Create step label with priority indicator + step_label = ( + f"[{priority_color}]{priority_icon} {step_id}[/{priority_color}]: " + f"{description}" + ) + + # Add dependencies info if present + if dependencies: + deps_text = ", ".join(dependencies) + step_label += f" [dim](depends on: {deps_text})[/dim]" + + # Add node to tree + step_node = tree.add(step_label) + step_nodes[step_id] = step_node + + # Print the tree + if print_on: + self.console.print("\n") + self.console.print(tree) + self.console.print("") + else: + # Even if print_on is False, log the tree structure + logger.info(f"Plan created: {task_description}") + for step in steps: + logger.info( + f" - {step.get('step_id')} ({step.get('priority')}): {step.get('description')}" + ) + # Global formatter instance with markdown output enabled by default formatter = Formatter(md=False) diff --git a/swarms/utils/litellm_wrapper.py b/swarms/utils/litellm_wrapper.py index be97f5e7..be6a41f6 100644 --- a/swarms/utils/litellm_wrapper.py +++ b/swarms/utils/litellm_wrapper.py @@ -1,16 +1,16 @@ import asyncio import base64 +import socket import traceback import uuid from pathlib import Path from typing import List, Optional -import socket import litellm -from pydantic import BaseModel import requests from litellm import completion, supports_vision from loguru import logger +from pydantic import BaseModel class LiteLLMException(Exception): @@ -402,70 +402,6 @@ class LiteLLM: # Store other types of runtime_args for debugging completion_params["runtime_args"] = runtime_args - # def output_for_tools(self, response: any): - # """ - # Process tool calls from the LLM response and return formatted output. - - # Args: - # response: The response object from the LLM API call - - # Returns: - # dict or list: Formatted tool call data, or default response if no tool calls - # """ - # try: - # # Convert response to dict if it's a Pydantic model - # if hasattr(response, "model_dump"): - # response_dict = response.model_dump() - # else: - # response_dict = response - - # print(f"Response dict: {response_dict}") - - # # Check if tool_calls exists and is not None - # if ( - # response_dict.get("choices") - # and response_dict["choices"][0].get("message") - # and response_dict["choices"][0]["message"].get( - # "tool_calls" - # ) - # and len( - # response_dict["choices"][0]["message"][ - # "tool_calls" - # ] - # ) - # > 0 - # ): - # tool_call = response_dict["choices"][0]["message"][ - # "tool_calls" - # ][0] - # if "function" in tool_call: - # return { - # "function": { - # "name": tool_call["function"].get( - # "name", "" - # ), - # "arguments": tool_call["function"].get( - # "arguments", "{}" - # ), - # } - # } - # else: - # # Handle case where tool_call structure is different - # return tool_call - # else: - # # Return a default response when no tool calls are present - # logger.warning( - # "No tool calls found in response, returning default response" - # ) - # return { - # "function": { - # "name": "no_tool_call", - # "arguments": "{}", - # } - # } - # except Exception as e: - # logger.error(f"Error processing tool calls: {str(e)} Traceback: {traceback.format_exc()}") - def output_for_tools(self, response: any): """ Process and extract tool call information from the LLM response. diff --git a/tests/structs/test_auto_swarms_builder.py b/tests/structs/test_auto_swarms_builder.py index a1e9085a..3952a794 100644 --- a/tests/structs/test_auto_swarms_builder.py +++ b/tests/structs/test_auto_swarms_builder.py @@ -41,21 +41,33 @@ def test_initialization(): def test_agent_building(): - """Test building individual agents""" + """Test building individual agents from specs""" print_separator() print("Testing Agent Building") try: swarm = AutoSwarmBuilder() - agent = swarm.build_agent( - agent_name="TestAgent", - agent_description="A test agent", - agent_system_prompt="You are a test agent", - max_loops=1, + specs = { + "agents": [ + { + "agent_name": "TestAgent", + "description": "A test agent", + "system_prompt": "You are a test agent", + "max_loops": 1, + } + ] + } + agents = swarm.create_agents_from_specs(specs) + agent = agents[0] + + # Create agent from spec + agents = swarm.create_agents_from_specs( + {"agents": [agent_spec]} ) + agent = agents[0] print("✓ Built agent with configuration:") print(f" - Name: {agent.agent_name}") - print(f" - Description: {agent.description}") + print(f" - Description: {agent.agent_description}") print(f" - Max loops: {agent.max_loops}") print("✓ Agent building test passed") return agent @@ -69,18 +81,25 @@ def test_agent_creation(): print_separator() print("Testing Agent Creation from Task") try: + import json + swarm = AutoSwarmBuilder( name="ResearchSwarm", description="A swarm for research tasks", ) task = "Research the latest developments in quantum computing" - agents = swarm._create_agents(task) + # create_agents returns a JSON string + agent_specs_json = swarm.create_agents(task) + # Parse JSON string to dict + agent_specs = json.loads(agent_specs_json) + # Convert specs to actual Agent objects + agents = swarm.create_agents_from_specs(agent_specs) print("✓ Created agents for research task:") for i, agent in enumerate(agents, 1): print(f" Agent {i}:") print(f" - Name: {agent.agent_name}") - print(f" - Description: {agent.description}") + print(f" - Description: {agent.agent_description}") print(f"✓ Created {len(agents)} agents successfully") return agents except Exception as e: @@ -103,7 +122,7 @@ def test_swarm_routing(): task = "Analyze the impact of AI on healthcare" print("Starting task routing...") - result = swarm.swarm_router(agents, task) + result = swarm.initialize_swarm_router(agents, task) print("✓ Task routed successfully") print( @@ -155,7 +174,9 @@ def test_error_handling(): # Test with invalid agent configuration print("Testing invalid agent configuration...") try: - swarm.build_agent("", "", "") + swarm.create_agents_from_specs( + {"agents": [{"agent_name": ""}]} + ) print( "✗ Should have raised an error for empty agent configuration" ) diff --git a/tests/structs/test_custom_agent.py b/tests/structs/test_custom_agent.py deleted file mode 100644 index 3cdeda25..00000000 --- a/tests/structs/test_custom_agent.py +++ /dev/null @@ -1,370 +0,0 @@ -import pytest -import json -from unittest.mock import Mock, patch, AsyncMock -from loguru import logger -from swarms.structs.custom_agent import CustomAgent, AgentResponse - -try: - import pytest_asyncio - ASYNC_AVAILABLE = True -except ImportError: - ASYNC_AVAILABLE = False - pytest_asyncio = None - - -def create_test_custom_agent(): - return CustomAgent( - name="TestAgent", - description="Test agent for unit testing", - base_url="https://api.test.com", - endpoint="v1/test", - headers={"Authorization": "Bearer test-token"}, - timeout=10.0, - verify_ssl=True, - ) - - -@pytest.fixture -def sample_custom_agent(): - return create_test_custom_agent() - - -def test_custom_agent_initialization(): - try: - custom_agent_instance = CustomAgent( - name="TestAgent", - description="Test description", - base_url="https://api.example.com", - endpoint="v1/endpoint", - headers={"Content-Type": "application/json"}, - timeout=30.0, - verify_ssl=True, - ) - assert custom_agent_instance.base_url == "https://api.example.com" - assert custom_agent_instance.endpoint == "v1/endpoint" - assert custom_agent_instance.timeout == 30.0 - assert custom_agent_instance.verify_ssl is True - assert "Content-Type" in custom_agent_instance.default_headers - logger.info("CustomAgent initialized successfully") - except Exception as e: - logger.error(f"Failed to initialize CustomAgent: {e}") - raise - - -def test_custom_agent_initialization_with_default_headers(sample_custom_agent): - try: - custom_agent_no_headers = CustomAgent( - name="TestAgent", - description="Test", - base_url="https://api.test.com", - endpoint="test", - ) - assert "Content-Type" in custom_agent_no_headers.default_headers - assert ( - custom_agent_no_headers.default_headers["Content-Type"] - == "application/json" - ) - logger.debug("Default Content-Type header added correctly") - except Exception as e: - logger.error(f"Failed to test default headers: {e}") - raise - - -def test_custom_agent_url_normalization(): - try: - custom_agent_with_slashes = CustomAgent( - name="TestAgent", - description="Test", - base_url="https://api.test.com/", - endpoint="/v1/test", - ) - assert custom_agent_with_slashes.base_url == "https://api.test.com" - assert custom_agent_with_slashes.endpoint == "v1/test" - logger.debug("URL normalization works correctly") - except Exception as e: - logger.error(f"Failed to test URL normalization: {e}") - raise - - -def test_prepare_headers(sample_custom_agent): - try: - prepared_headers = sample_custom_agent._prepare_headers() - assert "Authorization" in prepared_headers - assert prepared_headers["Authorization"] == "Bearer test-token" - - additional_headers = {"X-Custom-Header": "custom-value"} - prepared_headers_with_additional = ( - sample_custom_agent._prepare_headers(additional_headers) - ) - assert prepared_headers_with_additional["X-Custom-Header"] == "custom-value" - assert prepared_headers_with_additional["Authorization"] == "Bearer test-token" - logger.debug("Header preparation works correctly") - except Exception as e: - logger.error(f"Failed to test prepare_headers: {e}") - raise - - -def test_prepare_payload_dict(sample_custom_agent): - try: - payload_dict = {"key": "value", "number": 123} - prepared_payload = sample_custom_agent._prepare_payload(payload_dict) - assert isinstance(prepared_payload, str) - parsed = json.loads(prepared_payload) - assert parsed["key"] == "value" - assert parsed["number"] == 123 - logger.debug("Dictionary payload prepared correctly") - except Exception as e: - logger.error(f"Failed to test prepare_payload with dict: {e}") - raise - - -def test_prepare_payload_string(sample_custom_agent): - try: - payload_string = '{"test": "value"}' - prepared_payload = sample_custom_agent._prepare_payload(payload_string) - assert prepared_payload == payload_string - logger.debug("String payload prepared correctly") - except Exception as e: - logger.error(f"Failed to test prepare_payload with string: {e}") - raise - - -def test_prepare_payload_bytes(sample_custom_agent): - try: - payload_bytes = b'{"test": "value"}' - prepared_payload = sample_custom_agent._prepare_payload(payload_bytes) - assert prepared_payload == payload_bytes - logger.debug("Bytes payload prepared correctly") - except Exception as e: - logger.error(f"Failed to test prepare_payload with bytes: {e}") - raise - - -def test_parse_response_success(sample_custom_agent): - try: - mock_response = Mock() - mock_response.status_code = 200 - mock_response.text = '{"message": "success"}' - mock_response.headers = {"content-type": "application/json"} - mock_response.json.return_value = {"message": "success"} - - parsed_response = sample_custom_agent._parse_response(mock_response) - assert isinstance(parsed_response, AgentResponse) - assert parsed_response.status_code == 200 - assert parsed_response.success is True - assert parsed_response.json_data == {"message": "success"} - assert parsed_response.error_message is None - logger.debug("Successful response parsed correctly") - except Exception as e: - logger.error(f"Failed to test parse_response success: {e}") - raise - - -def test_parse_response_error(sample_custom_agent): - try: - mock_response = Mock() - mock_response.status_code = 404 - mock_response.text = "Not Found" - mock_response.headers = {"content-type": "text/plain"} - - parsed_response = sample_custom_agent._parse_response(mock_response) - assert isinstance(parsed_response, AgentResponse) - assert parsed_response.status_code == 404 - assert parsed_response.success is False - assert parsed_response.error_message == "HTTP 404" - logger.debug("Error response parsed correctly") - except Exception as e: - logger.error(f"Failed to test parse_response error: {e}") - raise - - -def test_extract_content_openai_format(sample_custom_agent): - try: - openai_response = { - "choices": [ - { - "message": { - "content": "This is the response content" - } - } - ] - } - extracted_content = sample_custom_agent._extract_content(openai_response) - assert extracted_content == "This is the response content" - logger.debug("OpenAI format content extracted correctly") - except Exception as e: - logger.error(f"Failed to test extract_content OpenAI format: {e}") - raise - - -def test_extract_content_anthropic_format(sample_custom_agent): - try: - anthropic_response = { - "content": [ - {"text": "First part "}, - {"text": "second part"} - ] - } - extracted_content = sample_custom_agent._extract_content(anthropic_response) - assert extracted_content == "First part second part" - logger.debug("Anthropic format content extracted correctly") - except Exception as e: - logger.error(f"Failed to test extract_content Anthropic format: {e}") - raise - - -def test_extract_content_generic_format(sample_custom_agent): - try: - generic_response = {"text": "Generic response text"} - extracted_content = sample_custom_agent._extract_content(generic_response) - assert extracted_content == "Generic response text" - logger.debug("Generic format content extracted correctly") - except Exception as e: - logger.error(f"Failed to test extract_content generic format: {e}") - raise - - -@patch("swarms.structs.custom_agent.httpx.Client") -def test_run_success(mock_client_class, sample_custom_agent): - try: - mock_response = Mock() - mock_response.status_code = 200 - mock_response.text = '{"choices": [{"message": {"content": "Success"}}]}' - mock_response.json.return_value = { - "choices": [{"message": {"content": "Success"}}] - } - mock_response.headers = {"content-type": "application/json"} - - mock_client_instance = Mock() - mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) - mock_client_instance.__exit__ = Mock(return_value=None) - mock_client_instance.post.return_value = mock_response - mock_client_class.return_value = mock_client_instance - - test_payload = {"message": "test"} - result = sample_custom_agent.run(test_payload) - - assert result == "Success" - logger.info("Run method executed successfully") - except Exception as e: - logger.error(f"Failed to test run success: {e}") - raise - - -@patch("swarms.structs.custom_agent.httpx.Client") -def test_run_error_response(mock_client_class, sample_custom_agent): - try: - mock_response = Mock() - mock_response.status_code = 500 - mock_response.text = "Internal Server Error" - - mock_client_instance = Mock() - mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) - mock_client_instance.__exit__ = Mock(return_value=None) - mock_client_instance.post.return_value = mock_response - mock_client_class.return_value = mock_client_instance - - test_payload = {"message": "test"} - result = sample_custom_agent.run(test_payload) - - assert "Error: HTTP 500" in result - logger.debug("Error response handled correctly") - except Exception as e: - logger.error(f"Failed to test run error response: {e}") - raise - - -@patch("swarms.structs.custom_agent.httpx.Client") -def test_run_request_error(mock_client_class, sample_custom_agent): - try: - import httpx - - mock_client_instance = Mock() - mock_client_instance.__enter__ = Mock(return_value=mock_client_instance) - mock_client_instance.__exit__ = Mock(return_value=None) - mock_client_instance.post.side_effect = httpx.RequestError("Connection failed") - mock_client_class.return_value = mock_client_instance - - test_payload = {"message": "test"} - result = sample_custom_agent.run(test_payload) - - assert "Request error" in result - logger.debug("Request error handled correctly") - except Exception as e: - logger.error(f"Failed to test run request error: {e}") - raise - - -@pytest.mark.skipif(not ASYNC_AVAILABLE, reason="pytest-asyncio not installed") -@pytest.mark.asyncio -@patch("swarms.structs.custom_agent.httpx.AsyncClient") -async def test_run_async_success(mock_async_client_class, sample_custom_agent): - try: - mock_response = Mock() - mock_response.status_code = 200 - mock_response.text = '{"content": [{"text": "Async Success"}]}' - mock_response.json.return_value = { - "content": [{"text": "Async Success"}] - } - mock_response.headers = {"content-type": "application/json"} - - mock_client_instance = AsyncMock() - mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) - mock_client_instance.__aexit__ = AsyncMock(return_value=None) - mock_client_instance.post = AsyncMock(return_value=mock_response) - mock_async_client_class.return_value = mock_client_instance - - test_payload = {"message": "test"} - result = await sample_custom_agent.run_async(test_payload) - - assert result == "Async Success" - logger.info("Run_async method executed successfully") - except Exception as e: - logger.error(f"Failed to test run_async success: {e}") - raise - - -@pytest.mark.skipif(not ASYNC_AVAILABLE, reason="pytest-asyncio not installed") -@pytest.mark.asyncio -@patch("swarms.structs.custom_agent.httpx.AsyncClient") -async def test_run_async_error_response(mock_async_client_class, sample_custom_agent): - try: - mock_response = Mock() - mock_response.status_code = 400 - mock_response.text = "Bad Request" - - mock_client_instance = AsyncMock() - mock_client_instance.__aenter__ = AsyncMock(return_value=mock_client_instance) - mock_client_instance.__aexit__ = AsyncMock(return_value=None) - mock_client_instance.post = AsyncMock(return_value=mock_response) - mock_async_client_class.return_value = mock_client_instance - - test_payload = {"message": "test"} - result = await sample_custom_agent.run_async(test_payload) - - assert "Error: HTTP 400" in result - logger.debug("Async error response handled correctly") - except Exception as e: - logger.error(f"Failed to test run_async error response: {e}") - raise - - -def test_agent_response_dataclass(): - try: - agent_response_instance = AgentResponse( - status_code=200, - content="Success", - headers={"content-type": "application/json"}, - json_data={"key": "value"}, - success=True, - error_message=None, - ) - assert agent_response_instance.status_code == 200 - assert agent_response_instance.content == "Success" - assert agent_response_instance.success is True - assert agent_response_instance.error_message is None - logger.debug("AgentResponse dataclass created correctly") - except Exception as e: - logger.error(f"Failed to test AgentResponse dataclass: {e}") - raise - diff --git a/tests/structs/test_deep_discussion.py b/tests/structs/test_deep_discussion.py index f83a00c5..76aecd00 100644 --- a/tests/structs/test_deep_discussion.py +++ b/tests/structs/test_deep_discussion.py @@ -6,8 +6,10 @@ from swarms.structs.agent import Agent def create_function_agent(name: str, system_prompt: str = None): if system_prompt is None: - system_prompt = f"You are {name}. Provide thoughtful responses." - + system_prompt = ( + f"You are {name}. Provide thoughtful responses." + ) + agent = Agent( agent_name=name, agent_description=f"Test agent {name}", @@ -23,11 +25,11 @@ def create_function_agent(name: str, system_prompt: str = None): def sample_agents(): agent1 = create_function_agent( "Debater1", - "You are a debater who argues for the affirmative position. Be concise and direct." + "You are a debater who argues for the affirmative position. Be concise and direct.", ) agent2 = create_function_agent( "Debater2", - "You are a debater who argues for the negative position. Be concise and direct." + "You are a debater who argues for the negative position. Be concise and direct.", ) return [agent1, agent2] @@ -64,7 +66,7 @@ def test_one_on_one_debate_multiple_loops(sample_agents, sample_task): assert result is not None assert isinstance(result, str) assert len(result) > 0 - + result_list = one_on_one_debate( max_loops=max_loops, task=sample_task, @@ -80,7 +82,9 @@ def test_one_on_one_debate_multiple_loops(sample_agents, sample_task): raise -def test_one_on_one_debate_agent_alternation(sample_agents, sample_task): +def test_one_on_one_debate_agent_alternation( + sample_agents, sample_task +): try: max_loops = 4 result = one_on_one_debate( @@ -92,7 +96,7 @@ def test_one_on_one_debate_agent_alternation(sample_agents, sample_task): assert result is not None assert isinstance(result, list) assert len(result) == max_loops - + agent_names = [] for msg in result: if isinstance(msg, dict): @@ -105,8 +109,10 @@ def test_one_on_one_debate_agent_alternation(sample_agents, sample_task): assert agent_names is not None assert len(agent_names) >= 0 if len(agent_names) > 0: - assert "Debater1" in agent_names or "Debater2" in agent_names - + assert ( + "Debater1" in agent_names or "Debater2" in agent_names + ) + if len(agent_names) > 0: debater1_count = agent_names.count("Debater1") debater2_count = agent_names.count("Debater2") @@ -137,7 +143,9 @@ def test_one_on_one_debate_with_image(sample_agents): raise -def test_one_on_one_debate_custom_output_types(sample_agents, sample_task): +def test_one_on_one_debate_custom_output_types( + sample_agents, sample_task +): try: output_type_checks = { "str": str, @@ -163,7 +171,9 @@ def test_one_on_one_debate_custom_output_types(sample_agents, sample_task): raise -def test_one_on_one_debate_list_output_structure(sample_agents, sample_task): +def test_one_on_one_debate_list_output_structure( + sample_agents, sample_task +): try: result = one_on_one_debate( max_loops=2, @@ -174,7 +184,7 @@ def test_one_on_one_debate_list_output_structure(sample_agents, sample_task): assert result is not None assert isinstance(result, list) assert len(result) == 2 - + for message in result: assert message is not None assert isinstance(message, (str, dict)) @@ -191,7 +201,9 @@ def test_one_on_one_debate_list_output_structure(sample_agents, sample_task): def test_one_on_one_debate_too_few_agents(sample_task): try: single_agent = [create_function_agent("SoloAgent")] - with pytest.raises(ValueError, match="There must be exactly two agents"): + with pytest.raises( + ValueError, match="There must be exactly two agents" + ): one_on_one_debate( max_loops=1, task=sample_task, @@ -210,7 +222,9 @@ def test_one_on_one_debate_too_many_agents(sample_task): create_function_agent("Agent2"), create_function_agent("Agent3"), ] - with pytest.raises(ValueError, match="There must be exactly two agents"): + with pytest.raises( + ValueError, match="There must be exactly two agents" + ): one_on_one_debate( max_loops=1, task=sample_task, @@ -225,7 +239,9 @@ def test_one_on_one_debate_too_many_agents(sample_task): def test_one_on_one_debate_empty_agents(sample_task): try: empty_agents = [] - with pytest.raises(ValueError, match="There must be exactly two agents"): + with pytest.raises( + ValueError, match="There must be exactly two agents" + ): one_on_one_debate( max_loops=1, task=sample_task, @@ -265,7 +281,9 @@ def test_one_on_one_debate_none_task(sample_agents): raise -def test_one_on_one_debate_invalid_output_type(sample_agents, sample_task): +def test_one_on_one_debate_invalid_output_type( + sample_agents, sample_task +): try: with pytest.raises((ValueError, TypeError)): one_on_one_debate( @@ -289,7 +307,7 @@ def test_one_on_one_debate_zero_loops(sample_agents, sample_task): ) assert result is not None assert isinstance(result, str) - + result_list = one_on_one_debate( max_loops=0, task=sample_task, @@ -327,7 +345,9 @@ def test_one_on_one_debate_different_topics(sample_agents): raise -def test_one_on_one_debate_long_conversation(sample_agents, sample_task): +def test_one_on_one_debate_long_conversation( + sample_agents, sample_task +): try: max_loops = 5 result = one_on_one_debate( @@ -349,11 +369,11 @@ def test_one_on_one_debate_different_agent_personalities(): try: agent1 = create_function_agent( "Optimist", - "You are an optimist. Always see the positive side. Be concise." + "You are an optimist. Always see the positive side. Be concise.", ) agent2 = create_function_agent( "Pessimist", - "You are a pessimist. Always see the negative side. Be concise." + "You are a pessimist. Always see the negative side. Be concise.", ) agents = [agent1, agent2] task = "What is the future of AI?" @@ -366,7 +386,7 @@ def test_one_on_one_debate_different_agent_personalities(): assert result is not None assert isinstance(result, list) assert len(result) == 2 - + agent_names = [] for msg in result: if isinstance(msg, dict): @@ -379,14 +399,19 @@ def test_one_on_one_debate_different_agent_personalities(): assert agent_names is not None assert len(agent_names) >= 0 if len(agent_names) > 0: - assert "Optimist" in agent_names or "Pessimist" in agent_names + assert ( + "Optimist" in agent_names + or "Pessimist" in agent_names + ) logger.info("Different agent personalities test passed") except Exception as e: logger.error(f"Failed to test different personalities: {e}") raise -def test_one_on_one_debate_conversation_length_matches_loops(sample_agents, sample_task): +def test_one_on_one_debate_conversation_length_matches_loops( + sample_agents, sample_task +): try: for max_loops in [1, 2, 3, 4]: result = one_on_one_debate( @@ -404,7 +429,9 @@ def test_one_on_one_debate_conversation_length_matches_loops(sample_agents, samp raise -def test_one_on_one_debate_both_agents_participate(sample_agents, sample_task): +def test_one_on_one_debate_both_agents_participate( + sample_agents, sample_task +): try: result = one_on_one_debate( max_loops=2, @@ -415,7 +442,7 @@ def test_one_on_one_debate_both_agents_participate(sample_agents, sample_task): assert result is not None assert isinstance(result, list) assert len(result) == 2 - + roles = [] for msg in result: if isinstance(msg, dict) and "role" in msg: diff --git a/tests/structs/test_graph_workflow.py b/tests/structs/test_graph_workflow.py new file mode 100644 index 00000000..a00eecb0 --- /dev/null +++ b/tests/structs/test_graph_workflow.py @@ -0,0 +1,552 @@ +import pytest +from swarms.structs.graph_workflow import ( + GraphWorkflow, + Node, + NodeType, +) +from swarms.structs.agent import Agent + +try: + import rustworkx as rx + + RUSTWORKX_AVAILABLE = True +except ImportError: + RUSTWORKX_AVAILABLE = False + + +def create_test_agent(name: str, description: str = None) -> Agent: + """Create a real agent for testing""" + if description is None: + description = f"Test agent for {name} operations" + + return Agent( + agent_name=name, + agent_description=description, + model_name="gpt-4o-mini", + verbose=False, + print_on=False, + max_loops=1, + ) + + +def test_graph_workflow_basic_node_creation(): + """Test basic GraphWorkflow node creation with real agents""" + # Test basic node creation + agent = create_test_agent( + "TestAgent", "Test agent for node creation" + ) + node = Node.from_agent(agent) + assert node.id == "TestAgent" + assert node.type == NodeType.AGENT + assert node.agent == agent + + # Test node with custom id + node2 = Node(id="CustomID", type=NodeType.AGENT, agent=agent) + assert node2.id == "CustomID" + + +def test_graph_workflow_multi_agent_collaboration(): + """Test GraphWorkflow with multiple agents in a collaboration scenario""" + # Create specialized agents for a business analysis workflow + market_researcher = create_test_agent( + "Market-Researcher", + "Specialist in market analysis and trend identification", + ) + + data_analyst = create_test_agent( + "Data-Analyst", + "Expert in data processing and statistical analysis", + ) + + strategy_consultant = create_test_agent( + "Strategy-Consultant", + "Senior consultant for strategic planning and recommendations", + ) + + # Create workflow with linear execution path + workflow = GraphWorkflow(name="Business-Analysis-Workflow") + workflow.add_node(market_researcher) + workflow.add_node(data_analyst) + workflow.add_node(strategy_consultant) + + # Add edges to define execution order + workflow.add_edge("Market-Researcher", "Data-Analyst") + workflow.add_edge("Data-Analyst", "Strategy-Consultant") + + # Test workflow execution + result = workflow.run( + "Analyze market opportunities for AI in healthcare" + ) + assert result is not None + + +def test_graph_workflow_parallel_execution(): + """Test GraphWorkflow with parallel execution paths""" + # Create agents for parallel analysis + technical_analyst = create_test_agent( + "Technical-Analyst", + "Technical feasibility and implementation analysis", + ) + + market_analyst = create_test_agent( + "Market-Analyst", + "Market positioning and competitive analysis", + ) + + financial_analyst = create_test_agent( + "Financial-Analyst", "Financial modeling and ROI analysis" + ) + + risk_assessor = create_test_agent( + "Risk-Assessor", "Risk assessment and mitigation planning" + ) + + # Create workflow with parallel execution + workflow = GraphWorkflow(name="Parallel-Analysis-Workflow") + workflow.add_node(technical_analyst) + workflow.add_node(market_analyst) + workflow.add_node(financial_analyst) + workflow.add_node(risk_assessor) + + # Add edges for fan-out execution (one to many) + workflow.add_edges_from_source( + "Technical-Analyst", + ["Market-Analyst", "Financial-Analyst", "Risk-Assessor"], + ) + + # Test parallel execution + result = workflow.run( + "Evaluate feasibility of launching a new fintech platform" + ) + assert result is not None + + +def test_graph_workflow_complex_topology(): + """Test GraphWorkflow with complex node topology""" + # Create agents for a comprehensive product development workflow + product_manager = create_test_agent( + "Product-Manager", "Product strategy and roadmap management" + ) + + ux_designer = create_test_agent( + "UX-Designer", "User experience design and research" + ) + + backend_developer = create_test_agent( + "Backend-Developer", + "Backend system architecture and development", + ) + + frontend_developer = create_test_agent( + "Frontend-Developer", + "Frontend interface and user interaction development", + ) + + qa_engineer = create_test_agent( + "QA-Engineer", "Quality assurance and testing specialist" + ) + + devops_engineer = create_test_agent( + "DevOps-Engineer", "Deployment and infrastructure management" + ) + + # Create workflow with complex dependencies + workflow = GraphWorkflow(name="Product-Development-Workflow") + workflow.add_node(product_manager) + workflow.add_node(ux_designer) + workflow.add_node(backend_developer) + workflow.add_node(frontend_developer) + workflow.add_node(qa_engineer) + workflow.add_node(devops_engineer) + + # Define complex execution topology + workflow.add_edge("Product-Manager", "UX-Designer") + workflow.add_edge("UX-Designer", "Frontend-Developer") + workflow.add_edge("Product-Manager", "Backend-Developer") + workflow.add_edge("Backend-Developer", "QA-Engineer") + workflow.add_edge("Frontend-Developer", "QA-Engineer") + workflow.add_edge("QA-Engineer", "DevOps-Engineer") + + # Test complex workflow execution + result = workflow.run( + "Develop a comprehensive e-commerce platform with AI recommendations" + ) + assert result is not None + + +def test_graph_workflow_error_handling(): + """Test GraphWorkflow error handling and validation""" + # Test with empty workflow + workflow = GraphWorkflow() + result = workflow.run("Test task") + # Empty workflow should handle gracefully + assert result is not None + + # Test workflow compilation and caching + researcher = create_test_agent( + "Researcher", "Research specialist" + ) + workflow.add_node(researcher) + + # First run should compile + result1 = workflow.run("Research task") + assert result1 is not None + + # Second run should use cached compilation + result2 = workflow.run("Another research task") + assert result2 is not None + + +def test_graph_workflow_node_metadata(): + """Test GraphWorkflow with node metadata""" + # Create agents with different priorities and requirements + high_priority_agent = create_test_agent( + "High-Priority-Analyst", "High priority analysis specialist" + ) + + standard_agent = create_test_agent( + "Standard-Analyst", "Standard analysis agent" + ) + + # Create workflow and add nodes with metadata + workflow = GraphWorkflow(name="Metadata-Workflow") + workflow.add_node( + high_priority_agent, + metadata={"priority": "high", "timeout": 60}, + ) + workflow.add_node( + standard_agent, metadata={"priority": "normal", "timeout": 30} + ) + + # Add execution dependency + workflow.add_edge("High-Priority-Analyst", "Standard-Analyst") + + # Test execution with metadata + result = workflow.run( + "Analyze business requirements with different priorities" + ) + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_basic(backend): + """Test GraphWorkflow basic functionality with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + + workflow = GraphWorkflow( + name=f"Backend-Test-{backend}", backend=backend + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_edge(agent1, agent2) + + assert len(workflow.nodes) == 2 + assert len(workflow.edges) == 1 + + result = workflow.run("Test task") + assert result is not None + assert "Agent1" in result + assert "Agent2" in result + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_parallel_execution(backend): + """Test parallel execution with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + coordinator = create_test_agent( + "Coordinator", "Coordinates tasks" + ) + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + + workflow = GraphWorkflow( + name=f"Parallel-Test-{backend}", backend=backend + ) + workflow.add_node(coordinator) + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + + workflow.add_edges_from_source( + coordinator, [analyst1, analyst2, analyst3] + ) + + workflow.compile() + assert len(workflow._sorted_layers) >= 1 + assert ( + len(workflow._sorted_layers[0]) == 1 + ) # Coordinator in first layer + + result = workflow.run("Analyze data in parallel") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_fan_in_pattern(backend): + """Test fan-in pattern with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + analyst1 = create_test_agent("Analyst1", "First analyst") + analyst2 = create_test_agent("Analyst2", "Second analyst") + analyst3 = create_test_agent("Analyst3", "Third analyst") + synthesizer = create_test_agent( + "Synthesizer", "Synthesizes results" + ) + + workflow = GraphWorkflow( + name=f"FanIn-Test-{backend}", backend=backend + ) + workflow.add_node(analyst1) + workflow.add_node(analyst2) + workflow.add_node(analyst3) + workflow.add_node(synthesizer) + + workflow.add_edges_to_target( + [analyst1, analyst2, analyst3], synthesizer + ) + + workflow.compile() + assert len(workflow._sorted_layers) >= 2 + assert synthesizer.agent_name in workflow.end_points + + result = workflow.run("Synthesize multiple analyses") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_parallel_chain(backend): + """Test parallel chain pattern with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + collector1 = create_test_agent("Collector1", "First collector") + collector2 = create_test_agent("Collector2", "Second collector") + processor1 = create_test_agent("Processor1", "First processor") + processor2 = create_test_agent("Processor2", "Second processor") + + workflow = GraphWorkflow( + name=f"ParallelChain-Test-{backend}", backend=backend + ) + workflow.add_node(collector1) + workflow.add_node(collector2) + workflow.add_node(processor1) + workflow.add_node(processor2) + + workflow.add_parallel_chain( + [collector1, collector2], [processor1, processor2] + ) + + workflow.compile() + assert len(workflow.edges) == 4 # 2x2 = 4 edges + + result = workflow.run("Process data from multiple collectors") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_complex_topology(backend): + """Test complex topology with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") for i in range(5) + ] + + workflow = GraphWorkflow( + name=f"Complex-Topology-{backend}", backend=backend + ) + for agent in agents: + workflow.add_node(agent) + + workflow.add_edge(agents[0], agents[1]) + workflow.add_edge(agents[0], agents[2]) + workflow.add_edge(agents[1], agents[3]) + workflow.add_edge(agents[2], agents[3]) + workflow.add_edge(agents[3], agents[4]) + + workflow.compile() + assert len(workflow._sorted_layers) >= 3 + + result = workflow.run("Execute complex workflow") + assert result is not None + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_validation(backend): + """Test workflow validation with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + isolated = create_test_agent("Isolated", "Isolated agent") + + workflow = GraphWorkflow( + name=f"Validation-Test-{backend}", backend=backend + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(isolated) + workflow.add_edge(agent1, agent2) + + validation = workflow.validate(auto_fix=False) + assert isinstance(validation, dict) + assert "is_valid" in validation + + validation_fixed = workflow.validate(auto_fix=True) + assert isinstance(validation_fixed, dict) + + +@pytest.mark.parametrize("backend", ["networkx", "rustworkx"]) +def test_graph_workflow_backend_entry_end_points(backend): + """Test entry and end points with both backends""" + if backend == "rustworkx" and not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "Entry agent") + agent2 = create_test_agent("Agent2", "Middle agent") + agent3 = create_test_agent("Agent3", "End agent") + + workflow = GraphWorkflow( + name=f"EntryEnd-Test-{backend}", backend=backend + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + workflow.auto_set_entry_points() + workflow.auto_set_end_points() + + assert agent1.agent_name in workflow.entry_points + assert agent3.agent_name in workflow.end_points + + +def test_graph_workflow_rustworkx_specific(): + """Test rustworkx-specific features""" + if not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow = GraphWorkflow( + name="Rustworkx-Specific-Test", backend="rustworkx" + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + workflow.add_edge(agent1, agent2) + workflow.add_edge(agent2, agent3) + + assert ( + workflow.graph_backend.__class__.__name__ + == "RustworkxBackend" + ) + assert hasattr(workflow.graph_backend, "_node_id_to_index") + assert hasattr(workflow.graph_backend, "_index_to_node_id") + + workflow.compile() + assert len(workflow._sorted_layers) == 3 + + predecessors = list( + workflow.graph_backend.predecessors(agent2.agent_name) + ) + assert agent1.agent_name in predecessors + + descendants = workflow.graph_backend.descendants( + agent1.agent_name + ) + assert agent2.agent_name in descendants + assert agent3.agent_name in descendants + + result = workflow.run("Test rustworkx backend") + assert result is not None + + +def test_graph_workflow_rustworkx_large_scale(): + """Test rustworkx with larger workflow""" + if not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agents = [ + create_test_agent(f"Agent{i}", f"Agent {i}") + for i in range(10) + ] + + workflow = GraphWorkflow( + name="Rustworkx-Large-Scale", backend="rustworkx" + ) + for agent in agents: + workflow.add_node(agent) + + for i in range(len(agents) - 1): + workflow.add_edge(agents[i], agents[i + 1]) + + workflow.compile() + assert len(workflow._sorted_layers) == 10 + + result = workflow.run("Test large scale workflow") + assert result is not None + assert len(result) == 10 + + +def test_graph_workflow_rustworkx_agent_objects(): + """Test rustworkx with Agent objects directly in edges""" + if not RUSTWORKX_AVAILABLE: + pytest.skip("rustworkx not available") + + agent1 = create_test_agent("Agent1", "First agent") + agent2 = create_test_agent("Agent2", "Second agent") + agent3 = create_test_agent("Agent3", "Third agent") + + workflow = GraphWorkflow( + name="Rustworkx-Agent-Objects", backend="rustworkx" + ) + workflow.add_node(agent1) + workflow.add_node(agent2) + workflow.add_node(agent3) + + workflow.add_edges_from_source(agent1, [agent2, agent3]) + workflow.add_edges_to_target([agent2, agent3], agent1) + + workflow.compile() + assert len(workflow.edges) == 4 + + result = workflow.run("Test agent objects in edges") + assert result is not None + + +def test_graph_workflow_backend_fallback(): + """Test backend fallback when rustworkx unavailable""" + workflow = GraphWorkflow( + name="Fallback-Test", backend="rustworkx" + ) + agent = create_test_agent("Agent", "Test agent") + workflow.add_node(agent) + + if not RUSTWORKX_AVAILABLE: + assert ( + workflow.graph_backend.__class__.__name__ + == "NetworkXBackend" + ) + else: + assert ( + workflow.graph_backend.__class__.__name__ + == "RustworkxBackend" + ) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/structs/test_graph_workflow_comprehensive.py b/tests/structs/test_graph_workflow_comprehensive.py deleted file mode 100644 index 5cb6a4a6..00000000 --- a/tests/structs/test_graph_workflow_comprehensive.py +++ /dev/null @@ -1,225 +0,0 @@ -import pytest -from swarms.structs.graph_workflow import ( - GraphWorkflow, - Node, - NodeType, -) -from swarms.structs.agent import Agent - - -def create_test_agent(name: str, description: str = None) -> Agent: - """Create a real agent for testing""" - if description is None: - description = f"Test agent for {name} operations" - - return Agent( - agent_name=name, - agent_description=description, - model_name="gpt-4o-mini", - verbose=False, - print_on=False, - max_loops=1, - ) - - -def test_graph_workflow_basic_node_creation(): - """Test basic GraphWorkflow node creation with real agents""" - # Test basic node creation - agent = create_test_agent( - "TestAgent", "Test agent for node creation" - ) - node = Node.from_agent(agent) - assert node.id == "TestAgent" - assert node.type == NodeType.AGENT - assert node.agent == agent - - # Test node with custom id - node2 = Node(id="CustomID", type=NodeType.AGENT, agent=agent) - assert node2.id == "CustomID" - - -def test_graph_workflow_multi_agent_collaboration(): - """Test GraphWorkflow with multiple agents in a collaboration scenario""" - # Create specialized agents for a business analysis workflow - market_researcher = create_test_agent( - "Market-Researcher", - "Specialist in market analysis and trend identification", - ) - - data_analyst = create_test_agent( - "Data-Analyst", - "Expert in data processing and statistical analysis", - ) - - strategy_consultant = create_test_agent( - "Strategy-Consultant", - "Senior consultant for strategic planning and recommendations", - ) - - # Create workflow with linear execution path - workflow = GraphWorkflow(name="Business-Analysis-Workflow") - workflow.add_node(market_researcher) - workflow.add_node(data_analyst) - workflow.add_node(strategy_consultant) - - # Add edges to define execution order - workflow.add_edge("Market-Researcher", "Data-Analyst") - workflow.add_edge("Data-Analyst", "Strategy-Consultant") - - # Test workflow execution - result = workflow.run( - "Analyze market opportunities for AI in healthcare" - ) - assert result is not None - - -def test_graph_workflow_parallel_execution(): - """Test GraphWorkflow with parallel execution paths""" - # Create agents for parallel analysis - technical_analyst = create_test_agent( - "Technical-Analyst", - "Technical feasibility and implementation analysis", - ) - - market_analyst = create_test_agent( - "Market-Analyst", - "Market positioning and competitive analysis", - ) - - financial_analyst = create_test_agent( - "Financial-Analyst", "Financial modeling and ROI analysis" - ) - - risk_assessor = create_test_agent( - "Risk-Assessor", "Risk assessment and mitigation planning" - ) - - # Create workflow with parallel execution - workflow = GraphWorkflow(name="Parallel-Analysis-Workflow") - workflow.add_node(technical_analyst) - workflow.add_node(market_analyst) - workflow.add_node(financial_analyst) - workflow.add_node(risk_assessor) - - # Add edges for fan-out execution (one to many) - workflow.add_edges_from_source( - "Technical-Analyst", - ["Market-Analyst", "Financial-Analyst", "Risk-Assessor"], - ) - - # Test parallel execution - result = workflow.run( - "Evaluate feasibility of launching a new fintech platform" - ) - assert result is not None - - -def test_graph_workflow_complex_topology(): - """Test GraphWorkflow with complex node topology""" - # Create agents for a comprehensive product development workflow - product_manager = create_test_agent( - "Product-Manager", "Product strategy and roadmap management" - ) - - ux_designer = create_test_agent( - "UX-Designer", "User experience design and research" - ) - - backend_developer = create_test_agent( - "Backend-Developer", - "Backend system architecture and development", - ) - - frontend_developer = create_test_agent( - "Frontend-Developer", - "Frontend interface and user interaction development", - ) - - qa_engineer = create_test_agent( - "QA-Engineer", "Quality assurance and testing specialist" - ) - - devops_engineer = create_test_agent( - "DevOps-Engineer", "Deployment and infrastructure management" - ) - - # Create workflow with complex dependencies - workflow = GraphWorkflow(name="Product-Development-Workflow") - workflow.add_node(product_manager) - workflow.add_node(ux_designer) - workflow.add_node(backend_developer) - workflow.add_node(frontend_developer) - workflow.add_node(qa_engineer) - workflow.add_node(devops_engineer) - - # Define complex execution topology - workflow.add_edge("Product-Manager", "UX-Designer") - workflow.add_edge("UX-Designer", "Frontend-Developer") - workflow.add_edge("Product-Manager", "Backend-Developer") - workflow.add_edge("Backend-Developer", "QA-Engineer") - workflow.add_edge("Frontend-Developer", "QA-Engineer") - workflow.add_edge("QA-Engineer", "DevOps-Engineer") - - # Test complex workflow execution - result = workflow.run( - "Develop a comprehensive e-commerce platform with AI recommendations" - ) - assert result is not None - - -def test_graph_workflow_error_handling(): - """Test GraphWorkflow error handling and validation""" - # Test with empty workflow - workflow = GraphWorkflow() - result = workflow.run("Test task") - # Empty workflow should handle gracefully - assert result is not None - - # Test workflow compilation and caching - researcher = create_test_agent( - "Researcher", "Research specialist" - ) - workflow.add_node(researcher) - - # First run should compile - result1 = workflow.run("Research task") - assert result1 is not None - - # Second run should use cached compilation - result2 = workflow.run("Another research task") - assert result2 is not None - - -def test_graph_workflow_node_metadata(): - """Test GraphWorkflow with node metadata""" - # Create agents with different priorities and requirements - high_priority_agent = create_test_agent( - "High-Priority-Analyst", "High priority analysis specialist" - ) - - standard_agent = create_test_agent( - "Standard-Analyst", "Standard analysis agent" - ) - - # Create workflow and add nodes with metadata - workflow = GraphWorkflow(name="Metadata-Workflow") - workflow.add_node( - high_priority_agent, - metadata={"priority": "high", "timeout": 60}, - ) - workflow.add_node( - standard_agent, metadata={"priority": "normal", "timeout": 30} - ) - - # Add execution dependency - workflow.add_edge("High-Priority-Analyst", "Standard-Analyst") - - # Test execution with metadata - result = workflow.run( - "Analyze business requirements with different priorities" - ) - assert result is not None - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) diff --git a/tests/structs/test_i_agent.py b/tests/structs/test_i_agent.py new file mode 100644 index 00000000..1c1f95c5 --- /dev/null +++ b/tests/structs/test_i_agent.py @@ -0,0 +1,84 @@ +from swarms.agents.i_agent import IterativeReflectiveExpansion + + +def test_ire_agent_initialization(): + """Test IRE agent initialization with default parameters""" + agent = IterativeReflectiveExpansion() + + assert agent is not None + assert agent.agent_name == "General-Reasoning-Agent" + assert agent.max_iterations == 5 + assert agent.output_type == "dict" + assert agent.agent is not None + + +def test_ire_agent_custom_initialization(): + """Test IRE agent initialization with custom parameters""" + agent = IterativeReflectiveExpansion( + agent_name="Custom-IRE-Agent", + description="A custom reasoning agent", + max_iterations=3, + model_name="gpt-4o", + output_type="string", + ) + + assert agent.agent_name == "Custom-IRE-Agent" + assert agent.description == "A custom reasoning agent" + assert agent.max_iterations == 3 + assert agent.output_type == "string" + + +def test_ire_agent_execution(): + """Test IRE agent execution with a simple problem""" + agent = IterativeReflectiveExpansion( + agent_name="Test-IRE-Agent", + model_name="gpt-4o", + max_iterations=2, + output_type="dict", + ) + + # Test with a simple reasoning task + task = "What are three main benefits of renewable energy?" + result = agent.run(task) + + # Result should not be None + assert result is not None + # Result should be dict or string based on output_type + assert isinstance(result, (str, dict)) + + +def test_ire_agent_generate_hypotheses(): + """Test IRE agent hypothesis generation""" + agent = IterativeReflectiveExpansion( + agent_name="Hypothesis-Test-Agent", + max_iterations=1, + ) + + task = "How can we reduce carbon emissions?" + hypotheses = agent.generate_initial_hypotheses(task) + + assert hypotheses is not None + assert isinstance(hypotheses, list) + assert len(hypotheses) > 0 + + +def test_ire_agent_workflow(): + """Test complete IRE agent workflow with iterative refinement""" + agent = IterativeReflectiveExpansion( + agent_name="Workflow-Test-Agent", + description="Agent for testing complete workflow", + model_name="gpt-4o", + max_iterations=2, + output_type="dict", + ) + + # Test with a problem that requires iterative refinement + task = "Design an efficient public transportation system for a small city" + result = agent.run(task) + + # Verify the result is valid + assert result is not None + assert isinstance(result, (str, dict)) + + # Check that conversation was populated during execution + assert agent.conversation is not None diff --git a/tests/structs/test_llm_council.py b/tests/structs/test_llm_council.py new file mode 100644 index 00000000..a748b778 --- /dev/null +++ b/tests/structs/test_llm_council.py @@ -0,0 +1,318 @@ +""" +Test file for LLM Council functionality. + +Tests core functionalities of the LLM Council including: +- Initialization (default and custom) +- Running queries +- Batch processing +- Output formatting +""" + +import pytest +from loguru import logger +from dotenv import load_dotenv +from swarms.structs.llm_council import LLMCouncil +from swarms.structs.agent import Agent + +load_dotenv() + + +def test_llm_council_default_initialization(): + """Test LLM Council initialization with default council members.""" + try: + logger.info("Testing LLM Council default initialization...") + + council = LLMCouncil( + verbose=False, output_type="dict-all-except-first" + ) + + assert council is not None, "Council should be initialized" + assert ( + council.name == "LLM Council" + ), "Default name should be 'LLM Council'" + assert ( + len(council.council_members) > 0 + ), "Should have council members" + assert ( + council.chairman is not None + ), "Chairman should be initialized" + assert ( + council.conversation is not None + ), "Conversation should be initialized" + + logger.info( + f"✓ Council initialized with {len(council.council_members)} members" + ) + logger.info("✓ Default initialization test passed") + + except Exception as e: + logger.error(f"✗ Default initialization test failed: {e}") + raise + + +def test_llm_council_custom_initialization(): + """Test LLM Council initialization with custom council members.""" + try: + logger.info("Testing LLM Council custom initialization...") + + # Create custom council members with simpler models + custom_members = [ + Agent( + agent_name="TestAgent1", + agent_description="First test agent", + system_prompt="You are a helpful test agent.", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ), + Agent( + agent_name="TestAgent2", + agent_description="Second test agent", + system_prompt="You are a helpful test agent.", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ), + ] + + council = LLMCouncil( + name="Custom Council", + council_members=custom_members, + chairman_model="gpt-4o-mini", + verbose=False, + output_type="string", + ) + + assert council is not None, "Council should be initialized" + assert ( + council.name == "Custom Council" + ), "Name should match custom value" + assert ( + len(council.council_members) == 2 + ), "Should have 2 custom members" + assert ( + council.council_members[0].agent_name == "TestAgent1" + ), "First member should match" + assert ( + council.council_members[1].agent_name == "TestAgent2" + ), "Second member should match" + assert ( + council.output_type == "string" + ), "Output type should be 'string'" + + logger.info("✓ Custom initialization test passed") + + except Exception as e: + logger.error(f"✗ Custom initialization test failed: {e}") + raise + + +def test_llm_council_run(): + """Test LLM Council run method with a simple query.""" + try: + logger.info("Testing LLM Council run method...") + + # Use simpler models for testing + custom_members = [ + Agent( + agent_name="TestAgent1", + agent_description="First test agent", + system_prompt="You are a helpful test agent. Provide concise answers.", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ), + Agent( + agent_name="TestAgent2", + agent_description="Second test agent", + system_prompt="You are a helpful test agent. Provide concise answers.", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ), + ] + + council = LLMCouncil( + council_members=custom_members, + chairman_model="gpt-4o-mini", + verbose=False, + output_type="dict-all-except-first", + ) + + query = "What is 2 + 2? Provide a brief answer." + result = council.run(query) + + # Basic assertions + assert result is not None, "Result should not be None" + assert ( + council.conversation is not None + ), "Conversation should exist" + assert ( + len(council.conversation.conversation_history) > 0 + ), "Conversation should have messages" + + # Enhanced assertions to verify workflow steps + messages = council.conversation.conversation_history + + # Step 1: Verify User query was added + user_messages = [ + msg for msg in messages if msg.get("role") == "User" + ] + assert ( + len(user_messages) > 0 + ), "User query should be in conversation" + + # Step 2: Verify all council members responded + member_responses = [ + msg + for msg in messages + if msg.get("role") in ["TestAgent1", "TestAgent2"] + ] + assert len(member_responses) == len( + custom_members + ), f"All {len(custom_members)} council members should have responded" + + # Step 3: Verify evaluations were performed + evaluation_messages = [ + msg + for msg in messages + if "-Evaluation" in msg.get("role", "") + ] + assert len(evaluation_messages) == len( + custom_members + ), f"All {len(custom_members)} members should have evaluated" + + # Step 4: Verify Chairman synthesis occurred + chairman_messages = [ + msg for msg in messages if msg.get("role") == "Chairman" + ] + assert ( + len(chairman_messages) > 0 + ), "Chairman should have synthesized final response" + + logger.info("✓ Run method test passed") + logger.info( + f"✓ Verified {len(member_responses)} member responses, {len(evaluation_messages)} evaluations, and {len(chairman_messages)} chairman synthesis" + ) + + except Exception as e: + logger.error(f"✗ Run method test failed: {e}") + raise + + +def test_llm_council_batched_run(): + """Test LLM Council batched_run method with multiple tasks.""" + try: + logger.info("Testing LLM Council batched_run method...") + + # Use simpler models for testing + custom_members = [ + Agent( + agent_name="TestAgent1", + agent_description="First test agent", + system_prompt="You are a helpful test agent. Provide concise answers.", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ), + Agent( + agent_name="TestAgent2", + agent_description="Second test agent", + system_prompt="You are a helpful test agent. Provide concise answers.", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ), + ] + + council = LLMCouncil( + council_members=custom_members, + chairman_model="gpt-4o-mini", + verbose=False, + output_type="dict-all-except-first", + ) + + tasks = [ + "What is 1 + 1?", + "What is 3 + 3?", + ] + + results = council.batched_run(tasks) + + assert results is not None, "Results should not be None" + assert len(results) == len( + tasks + ), f"Should have {len(tasks)} results" + assert all( + result is not None for result in results + ), "All results should not be None" + + logger.info( + f"✓ Batched run test passed with {len(results)} results" + ) + + except Exception as e: + logger.error(f"✗ Batched run test failed: {e}") + raise + + +def test_llm_council_output_types(): + """Test LLM Council with different output types.""" + try: + logger.info( + "Testing LLM Council with different output types..." + ) + + # Use simpler models for testing + custom_members = [ + Agent( + agent_name="TestAgent1", + agent_description="First test agent", + system_prompt="You are a helpful test agent. Provide concise answers.", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ), + Agent( + agent_name="TestAgent2", + agent_description="Second test agent", + system_prompt="You are a helpful test agent. Provide concise answers.", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + ), + ] + + output_types = ["string", "dict-all-except-first", "final"] + + for output_type in output_types: + logger.info(f"Testing output type: {output_type}") + + council = LLMCouncil( + council_members=custom_members, + chairman_model="gpt-4o-mini", + verbose=False, + output_type=output_type, + ) + + query = "What is 5 + 5? Provide a brief answer." + result = council.run(query) + + assert ( + result is not None + ), f"Result should not be None for output type {output_type}" + assert ( + council.output_type == output_type + ), f"Output type should be {output_type}" + + logger.info(f"✓ Output type '{output_type}' test passed") + + logger.info("✓ All output types test passed") + + except Exception as e: + logger.error(f"✗ Output types test failed: {e}") + raise + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/structs/test_multi_agent_debate.py b/tests/structs/test_multi_agent_debate.py index 12737b3b..806a94c6 100644 --- a/tests/structs/test_multi_agent_debate.py +++ b/tests/structs/test_multi_agent_debate.py @@ -18,8 +18,10 @@ from swarms.structs.agent import Agent def create_function_agent(name: str, system_prompt: str = None): if system_prompt is None: - system_prompt = f"You are {name}. Provide concise and direct responses." - + system_prompt = ( + f"You are {name}. Provide concise and direct responses." + ) + agent = Agent( agent_name=name, agent_description=f"Test agent {name}", @@ -34,12 +36,10 @@ def create_function_agent(name: str, system_prompt: str = None): @pytest.fixture def sample_two_agents(): agent1 = create_function_agent( - "Agent1", - "You are Agent1. Provide concise responses." + "Agent1", "You are Agent1. Provide concise responses." ) agent2 = create_function_agent( - "Agent2", - "You are Agent2. Provide concise responses." + "Agent2", "You are Agent2. Provide concise responses." ) return [agent1, agent2] @@ -71,7 +71,9 @@ def test_one_on_one_debate_initialization(sample_two_agents): assert debate.output_type == "str-all-except-first" logger.info("OneOnOneDebate initialization test passed") except Exception as e: - logger.error(f"Failed to test OneOnOneDebate initialization: {e}") + logger.error( + f"Failed to test OneOnOneDebate initialization: {e}" + ) raise @@ -95,7 +97,9 @@ def test_one_on_one_debate_run(sample_two_agents, sample_task): raise -def test_one_on_one_debate_wrong_number_of_agents(sample_three_agents, sample_task): +def test_one_on_one_debate_wrong_number_of_agents( + sample_three_agents, sample_task +): try: debate = OneOnOneDebate( max_loops=2, @@ -104,13 +108,19 @@ def test_one_on_one_debate_wrong_number_of_agents(sample_three_agents, sample_ta ) with pytest.raises(ValueError, match="exactly two agents"): debate.run(sample_task) - logger.info("OneOnOneDebate wrong number of agents test passed") + logger.info( + "OneOnOneDebate wrong number of agents test passed" + ) except Exception as e: - logger.error(f"Failed to test OneOnOneDebate wrong number of agents: {e}") + logger.error( + f"Failed to test OneOnOneDebate wrong number of agents: {e}" + ) raise -def test_one_on_one_debate_output_types(sample_two_agents, sample_task): +def test_one_on_one_debate_output_types( + sample_two_agents, sample_task +): try: assert sample_two_agents is not None assert sample_task is not None @@ -133,7 +143,9 @@ def test_one_on_one_debate_output_types(sample_two_agents, sample_task): assert isinstance(result, str) logger.info("OneOnOneDebate output types test passed") except Exception as e: - logger.error(f"Failed to test OneOnOneDebate output types: {e}") + logger.error( + f"Failed to test OneOnOneDebate output types: {e}" + ) raise @@ -175,13 +187,19 @@ def test_expert_panel_discussion_initialization(sample_three_agents): assert panel.max_rounds == 2 assert len(panel.agents) == 3 assert panel.moderator is not None - logger.info("ExpertPanelDiscussion initialization test passed") + logger.info( + "ExpertPanelDiscussion initialization test passed" + ) except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion initialization: {e}") + logger.error( + f"Failed to test ExpertPanelDiscussion initialization: {e}" + ) raise -def test_expert_panel_discussion_run(sample_three_agents, sample_task): +def test_expert_panel_discussion_run( + sample_three_agents, sample_task +): try: moderator = create_function_agent("Moderator") assert moderator is not None @@ -217,15 +235,23 @@ def test_expert_panel_discussion_insufficient_agents(sample_task): output_type="str-all-except-first", ) assert panel is not None - with pytest.raises(ValueError, match="At least two expert agents"): + with pytest.raises( + ValueError, match="At least two expert agents" + ): panel.run(sample_task) - logger.info("ExpertPanelDiscussion insufficient agents test passed") + logger.info( + "ExpertPanelDiscussion insufficient agents test passed" + ) except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion insufficient agents: {e}") + logger.error( + f"Failed to test ExpertPanelDiscussion insufficient agents: {e}" + ) raise -def test_expert_panel_discussion_no_moderator(sample_three_agents, sample_task): +def test_expert_panel_discussion_no_moderator( + sample_three_agents, sample_task +): try: panel = ExpertPanelDiscussion( max_rounds=2, @@ -233,11 +259,15 @@ def test_expert_panel_discussion_no_moderator(sample_three_agents, sample_task): moderator=None, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="moderator agent is required"): + with pytest.raises( + ValueError, match="moderator agent is required" + ): panel.run(sample_task) logger.info("ExpertPanelDiscussion no moderator test passed") except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion no moderator: {e}") + logger.error( + f"Failed to test ExpertPanelDiscussion no moderator: {e}" + ) raise @@ -257,7 +287,9 @@ def test_round_table_discussion_initialization(sample_three_agents): assert round_table.facilitator is not None logger.info("RoundTableDiscussion initialization test passed") except Exception as e: - logger.error(f"Failed to test RoundTableDiscussion initialization: {e}") + logger.error( + f"Failed to test RoundTableDiscussion initialization: {e}" + ) raise @@ -292,15 +324,23 @@ def test_round_table_discussion_insufficient_agents(sample_task): facilitator=facilitator, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="At least two participants"): + with pytest.raises( + ValueError, match="At least two participants" + ): round_table.run(sample_task) - logger.info("RoundTableDiscussion insufficient agents test passed") + logger.info( + "RoundTableDiscussion insufficient agents test passed" + ) except Exception as e: - logger.error(f"Failed to test RoundTableDiscussion insufficient agents: {e}") + logger.error( + f"Failed to test RoundTableDiscussion insufficient agents: {e}" + ) raise -def test_round_table_discussion_no_facilitator(sample_three_agents, sample_task): +def test_round_table_discussion_no_facilitator( + sample_three_agents, sample_task +): try: round_table = RoundTableDiscussion( max_cycles=2, @@ -308,11 +348,15 @@ def test_round_table_discussion_no_facilitator(sample_three_agents, sample_task) facilitator=None, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="facilitator agent is required"): + with pytest.raises( + ValueError, match="facilitator agent is required" + ): round_table.run(sample_task) logger.info("RoundTableDiscussion no facilitator test passed") except Exception as e: - logger.error(f"Failed to test RoundTableDiscussion no facilitator: {e}") + logger.error( + f"Failed to test RoundTableDiscussion no facilitator: {e}" + ) raise @@ -338,7 +382,9 @@ def test_interview_series_initialization(): assert interview.follow_up_depth == 1 logger.info("InterviewSeries initialization test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries initialization: {e}") + logger.error( + f"Failed to test InterviewSeries initialization: {e}" + ) raise @@ -378,11 +424,15 @@ def test_interview_series_no_interviewer(sample_task): follow_up_depth=1, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both interviewer and interviewee"): + with pytest.raises( + ValueError, match="Both interviewer and interviewee" + ): interview.run(sample_task) logger.info("InterviewSeries no interviewer test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries no interviewer: {e}") + logger.error( + f"Failed to test InterviewSeries no interviewer: {e}" + ) raise @@ -396,11 +446,15 @@ def test_interview_series_no_interviewee(sample_task): follow_up_depth=1, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both interviewer and interviewee"): + with pytest.raises( + ValueError, match="Both interviewer and interviewee" + ): interview.run(sample_task) logger.info("InterviewSeries no interviewee test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries no interviewee: {e}") + logger.error( + f"Failed to test InterviewSeries no interviewee: {e}" + ) raise @@ -425,13 +479,18 @@ def test_interview_series_default_questions(sample_task): assert len(result) >= 0 logger.info("InterviewSeries default questions test passed") except Exception as e: - logger.error(f"Failed to test InterviewSeries default questions: {e}") + logger.error( + f"Failed to test InterviewSeries default questions: {e}" + ) raise def test_peer_review_process_initialization(): try: - reviewers = [create_function_agent("Reviewer1"), create_function_agent("Reviewer2")] + reviewers = [ + create_function_agent("Reviewer1"), + create_function_agent("Reviewer2"), + ] assert reviewers is not None assert len(reviewers) == 2 assert reviewers[0] is not None @@ -450,13 +509,18 @@ def test_peer_review_process_initialization(): assert peer_review.review_rounds == 2 logger.info("PeerReviewProcess initialization test passed") except Exception as e: - logger.error(f"Failed to test PeerReviewProcess initialization: {e}") + logger.error( + f"Failed to test PeerReviewProcess initialization: {e}" + ) raise def test_peer_review_process_run(sample_task): try: - reviewers = [create_function_agent("Reviewer1"), create_function_agent("Reviewer2")] + reviewers = [ + create_function_agent("Reviewer1"), + create_function_agent("Reviewer2"), + ] assert reviewers is not None assert len(reviewers) == 2 author = create_function_agent("Author") @@ -491,7 +555,9 @@ def test_peer_review_process_no_reviewers(sample_task): peer_review.run(sample_task) logger.info("PeerReviewProcess no reviewers test passed") except Exception as e: - logger.error(f"Failed to test PeerReviewProcess no reviewers: {e}") + logger.error( + f"Failed to test PeerReviewProcess no reviewers: {e}" + ) raise @@ -504,11 +570,15 @@ def test_peer_review_process_no_author(sample_task): review_rounds=2, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="author agent is required"): + with pytest.raises( + ValueError, match="author agent is required" + ): peer_review.run(sample_task) logger.info("PeerReviewProcess no author test passed") except Exception as e: - logger.error(f"Failed to test PeerReviewProcess no author: {e}") + logger.error( + f"Failed to test PeerReviewProcess no author: {e}" + ) raise @@ -529,7 +599,9 @@ def test_mediation_session_initialization(sample_two_agents): assert mediation.max_sessions == 2 logger.info("MediationSession initialization test passed") except Exception as e: - logger.error(f"Failed to test MediationSession initialization: {e}") + logger.error( + f"Failed to test MediationSession initialization: {e}" + ) raise @@ -567,13 +639,19 @@ def test_mediation_session_insufficient_parties(sample_task): ) with pytest.raises(ValueError, match="At least two parties"): mediation.run(sample_task) - logger.info("MediationSession insufficient parties test passed") + logger.info( + "MediationSession insufficient parties test passed" + ) except Exception as e: - logger.error(f"Failed to test MediationSession insufficient parties: {e}") + logger.error( + f"Failed to test MediationSession insufficient parties: {e}" + ) raise -def test_mediation_session_no_mediator(sample_two_agents, sample_task): +def test_mediation_session_no_mediator( + sample_two_agents, sample_task +): try: mediation = MediationSession( parties=sample_two_agents, @@ -581,11 +659,15 @@ def test_mediation_session_no_mediator(sample_two_agents, sample_task): max_sessions=2, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="mediator agent is required"): + with pytest.raises( + ValueError, match="mediator agent is required" + ): mediation.run(sample_task) logger.info("MediationSession no mediator test passed") except Exception as e: - logger.error(f"Failed to test MediationSession no mediator: {e}") + logger.error( + f"Failed to test MediationSession no mediator: {e}" + ) raise @@ -608,7 +690,9 @@ def test_brainstorming_session_initialization(sample_three_agents): assert brainstorming.build_on_ideas is True logger.info("BrainstormingSession initialization test passed") except Exception as e: - logger.error(f"Failed to test BrainstormingSession initialization: {e}") + logger.error( + f"Failed to test BrainstormingSession initialization: {e}" + ) raise @@ -646,15 +730,23 @@ def test_brainstorming_session_insufficient_participants(sample_task): build_on_ideas=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="At least two participants"): + with pytest.raises( + ValueError, match="At least two participants" + ): brainstorming.run(sample_task) - logger.info("BrainstormingSession insufficient participants test passed") + logger.info( + "BrainstormingSession insufficient participants test passed" + ) except Exception as e: - logger.error(f"Failed to test BrainstormingSession insufficient participants: {e}") + logger.error( + f"Failed to test BrainstormingSession insufficient participants: {e}" + ) raise -def test_brainstorming_session_no_facilitator(sample_three_agents, sample_task): +def test_brainstorming_session_no_facilitator( + sample_three_agents, sample_task +): try: brainstorming = BrainstormingSession( participants=sample_three_agents, @@ -663,11 +755,15 @@ def test_brainstorming_session_no_facilitator(sample_three_agents, sample_task): build_on_ideas=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="facilitator agent is required"): + with pytest.raises( + ValueError, match="facilitator agent is required" + ): brainstorming.run(sample_task) logger.info("BrainstormingSession no facilitator test passed") except Exception as e: - logger.error(f"Failed to test BrainstormingSession no facilitator: {e}") + logger.error( + f"Failed to test BrainstormingSession no facilitator: {e}" + ) raise @@ -699,7 +795,9 @@ def test_trial_simulation_initialization(): assert trial.phases == ["opening", "closing"] logger.info("TrialSimulation initialization test passed") except Exception as e: - logger.error(f"Failed to test TrialSimulation initialization: {e}") + logger.error( + f"Failed to test TrialSimulation initialization: {e}" + ) raise @@ -746,7 +844,9 @@ def test_trial_simulation_no_prosecution(sample_task): trial.run(sample_task) logger.info("TrialSimulation no prosecution test passed") except Exception as e: - logger.error(f"Failed to test TrialSimulation no prosecution: {e}") + logger.error( + f"Failed to test TrialSimulation no prosecution: {e}" + ) raise @@ -774,7 +874,9 @@ def test_trial_simulation_default_phases(sample_task): assert len(result) >= 0 logger.info("TrialSimulation default phases test passed") except Exception as e: - logger.error(f"Failed to test TrialSimulation default phases: {e}") + logger.error( + f"Failed to test TrialSimulation default phases: {e}" + ) raise @@ -797,7 +899,9 @@ def test_council_meeting_initialization(sample_three_agents): assert council.require_consensus is False logger.info("CouncilMeeting initialization test passed") except Exception as e: - logger.error(f"Failed to test CouncilMeeting initialization: {e}") + logger.error( + f"Failed to test CouncilMeeting initialization: {e}" + ) raise @@ -835,15 +939,21 @@ def test_council_meeting_insufficient_members(sample_task): require_consensus=False, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="At least two council members"): + with pytest.raises( + ValueError, match="At least two council members" + ): council.run(sample_task) logger.info("CouncilMeeting insufficient members test passed") except Exception as e: - logger.error(f"Failed to test CouncilMeeting insufficient members: {e}") + logger.error( + f"Failed to test CouncilMeeting insufficient members: {e}" + ) raise -def test_council_meeting_no_chairperson(sample_three_agents, sample_task): +def test_council_meeting_no_chairperson( + sample_three_agents, sample_task +): try: council = CouncilMeeting( council_members=sample_three_agents, @@ -852,11 +962,15 @@ def test_council_meeting_no_chairperson(sample_three_agents, sample_task): require_consensus=False, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="chairperson agent is required"): + with pytest.raises( + ValueError, match="chairperson agent is required" + ): council.run(sample_task) logger.info("CouncilMeeting no chairperson test passed") except Exception as e: - logger.error(f"Failed to test CouncilMeeting no chairperson: {e}") + logger.error( + f"Failed to test CouncilMeeting no chairperson: {e}" + ) raise @@ -880,7 +994,9 @@ def test_mentorship_session_initialization(): assert mentorship.include_feedback is True logger.info("MentorshipSession initialization test passed") except Exception as e: - logger.error(f"Failed to test MentorshipSession initialization: {e}") + logger.error( + f"Failed to test MentorshipSession initialization: {e}" + ) raise @@ -918,11 +1034,15 @@ def test_mentorship_session_no_mentor(sample_task): include_feedback=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both mentor and mentee"): + with pytest.raises( + ValueError, match="Both mentor and mentee" + ): mentorship.run(sample_task) logger.info("MentorshipSession no mentor test passed") except Exception as e: - logger.error(f"Failed to test MentorshipSession no mentor: {e}") + logger.error( + f"Failed to test MentorshipSession no mentor: {e}" + ) raise @@ -936,11 +1056,15 @@ def test_mentorship_session_no_mentee(sample_task): include_feedback=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="Both mentor and mentee"): + with pytest.raises( + ValueError, match="Both mentor and mentee" + ): mentorship.run(sample_task) logger.info("MentorshipSession no mentee test passed") except Exception as e: - logger.error(f"Failed to test MentorshipSession no mentee: {e}") + logger.error( + f"Failed to test MentorshipSession no mentee: {e}" + ) raise @@ -963,7 +1087,9 @@ def test_negotiation_session_initialization(sample_two_agents): assert negotiation.include_concessions is True logger.info("NegotiationSession initialization test passed") except Exception as e: - logger.error(f"Failed to test NegotiationSession initialization: {e}") + logger.error( + f"Failed to test NegotiationSession initialization: {e}" + ) raise @@ -1003,13 +1129,19 @@ def test_negotiation_session_insufficient_parties(sample_task): ) with pytest.raises(ValueError, match="At least two parties"): negotiation.run(sample_task) - logger.info("NegotiationSession insufficient parties test passed") + logger.info( + "NegotiationSession insufficient parties test passed" + ) except Exception as e: - logger.error(f"Failed to test NegotiationSession insufficient parties: {e}") + logger.error( + f"Failed to test NegotiationSession insufficient parties: {e}" + ) raise -def test_negotiation_session_no_mediator(sample_two_agents, sample_task): +def test_negotiation_session_no_mediator( + sample_two_agents, sample_task +): try: negotiation = NegotiationSession( parties=sample_two_agents, @@ -1018,15 +1150,21 @@ def test_negotiation_session_no_mediator(sample_two_agents, sample_task): include_concessions=True, output_type="str-all-except-first", ) - with pytest.raises(ValueError, match="mediator agent is required"): + with pytest.raises( + ValueError, match="mediator agent is required" + ): negotiation.run(sample_task) logger.info("NegotiationSession no mediator test passed") except Exception as e: - logger.error(f"Failed to test NegotiationSession no mediator: {e}") + logger.error( + f"Failed to test NegotiationSession no mediator: {e}" + ) raise -def test_negotiation_session_without_concessions(sample_two_agents, sample_task): +def test_negotiation_session_without_concessions( + sample_two_agents, sample_task +): try: mediator = create_function_agent("Mediator") assert mediator is not None @@ -1043,13 +1181,19 @@ def test_negotiation_session_without_concessions(sample_two_agents, sample_task) assert result is not None assert isinstance(result, str) assert len(result) >= 0 - logger.info("NegotiationSession without concessions test passed") + logger.info( + "NegotiationSession without concessions test passed" + ) except Exception as e: - logger.error(f"Failed to test NegotiationSession without concessions: {e}") + logger.error( + f"Failed to test NegotiationSession without concessions: {e}" + ) raise -def test_one_on_one_debate_multiple_loops(sample_two_agents, sample_task): +def test_one_on_one_debate_multiple_loops( + sample_two_agents, sample_task +): try: assert sample_two_agents is not None debate = OneOnOneDebate( @@ -1064,11 +1208,15 @@ def test_one_on_one_debate_multiple_loops(sample_two_agents, sample_task): assert len(result) >= 0 logger.info("OneOnOneDebate multiple loops test passed") except Exception as e: - logger.error(f"Failed to test OneOnOneDebate multiple loops: {e}") + logger.error( + f"Failed to test OneOnOneDebate multiple loops: {e}" + ) raise -def test_expert_panel_discussion_output_types(sample_three_agents, sample_task): +def test_expert_panel_discussion_output_types( + sample_three_agents, sample_task +): try: moderator = create_function_agent("Moderator") assert moderator is not None @@ -1093,5 +1241,7 @@ def test_expert_panel_discussion_output_types(sample_three_agents, sample_task): assert isinstance(result, str) logger.info("ExpertPanelDiscussion output types test passed") except Exception as e: - logger.error(f"Failed to test ExpertPanelDiscussion output types: {e}") - raise \ No newline at end of file + logger.error( + f"Failed to test ExpertPanelDiscussion output types: {e}" + ) + raise diff --git a/tests/structs/test_reasoning_agent_router.py b/tests/structs/test_reasoning_agent_router.py index cf5a8782..2507058c 100644 --- a/tests/structs/test_reasoning_agent_router.py +++ b/tests/structs/test_reasoning_agent_router.py @@ -6,6 +6,9 @@ from swarms.agents.reasoning_agents import ( ReasoningAgentInitializationError, ReasoningAgentRouter, ) +from dotenv import load_dotenv + +load_dotenv() def test_router_initialization(): @@ -55,7 +58,7 @@ def test_router_initialization(): eval=True, random_models_on=True, majority_voting_prompt="Custom voting prompt", - reasoning_model_name="claude-3-5-sonnet-20240620", + reasoning_model_name="gpt-4o", ) assert ( custom_router is not None diff --git a/tests/structs/test_sequential_workflow.py b/tests/structs/test_sequential_workflow.py index 6d8f74a1..99dd73ae 100644 --- a/tests/structs/test_sequential_workflow.py +++ b/tests/structs/test_sequential_workflow.py @@ -3,21 +3,6 @@ import pytest from swarms import Agent, SequentialWorkflow -# Test SequentialWorkflow class -def test_sequential_workflow_initialization(): - workflow = SequentialWorkflow() - assert isinstance(workflow, SequentialWorkflow) - assert len(workflow.tasks) == 0 - assert workflow.max_loops == 1 - assert workflow.autosave is False - assert ( - workflow.saved_state_filepath - == "sequential_workflow_state.json" - ) - assert workflow.restore_state_filepath is None - assert workflow.dashboard is False - - def test_sequential_workflow_initialization_with_agents(): """Test SequentialWorkflow initialization with agents""" agent1 = Agent( diff --git a/tests/structs/test_swarm_architectures.py b/tests/structs/test_swarm_architectures.py index cbe7d4d8..7be89129 100644 --- a/tests/structs/test_swarm_architectures.py +++ b/tests/structs/test_swarm_architectures.py @@ -8,7 +8,6 @@ from swarms.structs.swarming_architectures import ( geometric_swarm, grid_swarm, harmonic_swarm, - linear_swarm, log_swarm, mesh_swarm, one_to_one, @@ -69,21 +68,6 @@ def test_grid_swarm(): assert len(result) > 0 -def test_linear_swarm(): - """Test linear swarm sequential processing""" - agents = create_test_agents(3) - tasks = ["Research task", "Write content", "Review output"] - - result = linear_swarm(agents, tasks) - - assert isinstance(result, list) - assert len(result) > 0 - - for log in result: - assert "role" in log - assert "content" in log - - def test_star_swarm(): """Test star swarm with central and peripheral agents""" agents = create_test_agents(4)