diff --git a/.env.example b/.env.example
index b8ccaf86..a7bd6b36 100644
--- a/.env.example
+++ b/.env.example
@@ -16,6 +16,8 @@ GEMINI_API_KEY=""
## Hugging Face
HUGGINGFACE_TOKEN=""
+GROQ_API_KEY=""
+
## Perplexity AI
PPLX_API_KEY=""
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index f2295d07..2d09ad85 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -1,33 +1,33 @@
---
name: Lint
on: [push, pull_request] # yamllint disable-line rule:truthy
+
jobs:
- yaml-lint:
+ lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- - uses: actions/setup-python@v5
- - run: pip install yamllint
- - run: yamllint .
- flake8-lint:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- - uses: actions/setup-python@v5
- - run: pip install flake8
- - run: flake8 .
- ruff-lint:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- - uses: actions/setup-python@v5
- - run: pip install ruff
- - run: ruff format .
- - run: ruff check --fix .
- pylint-lint:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- - uses: actions/setup-python@v5
- - run: pip install pylint
- - run: pylint swarms --recursive=y
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.10'
+
+ - name: Cache pip dependencies
+ uses: actions/cache@v3
+ with:
+ path: ~/.cache/pip
+ key: ${{ runner.os }}-pip-${{ hashFiles('**/pyproject.toml') }}
+ restore-keys: |
+ ${{ runner.os }}-pip-
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install black==24.2.0 ruff==0.2.1
+
+ - name: Check Black formatting
+ run: black . --check --diff
+
+ - name: Run Ruff linting
+ run: ruff check .
diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml
deleted file mode 100644
index 4a122c7b..00000000
--- a/.github/workflows/semgrep.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-# This workflow uses actions that are not certified by GitHub.
-# They are provided by a third-party and are governed by
-# separate terms of service, privacy policy, and support
-# documentation.
-
-# This workflow file requires a free account on Semgrep.dev to
-# manage rules, file ignores, notifications, and more.
-#
-# See https://semgrep.dev/docs
-
-name: Semgrep
-
-on:
- push:
- branches: [ "master" ]
- pull_request:
- # The branches below must be a subset of the branches above
- branches: [ "master" ]
- schedule:
- - cron: '19 7 * * 3'
-
-permissions:
- contents: read
-
-jobs:
- semgrep:
- permissions:
- contents: read # for actions/checkout to fetch code
- security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
- actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
- name: Scan
- runs-on: ubuntu-latest
- steps:
- # Checkout project source
- - uses: actions/checkout@v4
-
- # Scan code using project's configuration on https://semgrep.dev/manage
- - uses: returntocorp/semgrep-action@713efdd345f3035192eaa63f56867b88e63e4e5d
- with:
- publishToken: ${{ secrets.SEMGREP_APP_TOKEN }}
- publishDeployment: ${{ secrets.SEMGREP_DEPLOYMENT_ID }}
- generateSarif: "1"
-
- # Upload SARIF file generated in previous step
- - name: Upload SARIF file
- uses: github/codeql-action/upload-sarif@v3
- with:
- sarif_file: semgrep.sarif
- if: always()
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
new file mode 100644
index 00000000..6e16b0dc
--- /dev/null
+++ b/.github/workflows/tests.yml
@@ -0,0 +1,31 @@
+name: Run Tests
+
+on:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ branches: [ "master" ]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.10"
+
+ - name: Install Poetry
+ run: |
+ curl -sSL https://install.python-poetry.org | python3 -
+
+ - name: Install dependencies
+ run: |
+ poetry install --with test
+
+ - name: Run tests
+ run: |
+ poetry run pytest tests/ -v
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 3cf89799..827c2515 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,5 +1,15 @@
# Contribution Guidelines
+
+
+
+ The Enterprise-Grade Production-Ready Multi-Agent Orchestration Framework
+
+
---
## Table of Contents
@@ -7,10 +17,12 @@
- [Project Overview](#project-overview)
- [Getting Started](#getting-started)
- [Installation](#installation)
+ - [Environment Configuration](#environment-configuration)
- [Project Structure](#project-structure)
- [How to Contribute](#how-to-contribute)
- [Reporting Issues](#reporting-issues)
- [Submitting Pull Requests](#submitting-pull-requests)
+ - [Good First Issues](#good-first-issues)
- [Coding Standards](#coding-standards)
- [Type Annotations](#type-annotations)
- [Docstrings and Documentation](#docstrings-and-documentation)
@@ -19,7 +31,13 @@
- [Areas Needing Contributions](#areas-needing-contributions)
- [Writing Tests](#writing-tests)
- [Improving Documentation](#improving-documentation)
- - [Creating Training Scripts](#creating-training-scripts)
+ - [Adding New Swarm Architectures](#adding-new-swarm-architectures)
+ - [Enhancing Agent Capabilities](#enhancing-agent-capabilities)
+ - [Removing Defunct Code](#removing-defunct-code)
+- [Development Resources](#development-resources)
+ - [Documentation](#documentation)
+ - [Examples and Tutorials](#examples-and-tutorials)
+ - [API Reference](#api-reference)
- [Community and Support](#community-and-support)
- [License](#license)
@@ -27,16 +45,24 @@
## Project Overview
-**swarms** is a library focused on making it simple to orchestrate agents to automate real-world activities. The goal is to automate the world economy with these swarms of agents.
+**Swarms** is an enterprise-grade, production-ready multi-agent orchestration framework focused on making it simple to orchestrate agents to automate real-world activities. The goal is to automate the world economy with these swarms of agents.
-We need your help to:
+### Key Features
-- **Write Tests**: Ensure the reliability and correctness of the codebase.
-- **Improve Documentation**: Maintain clear and comprehensive documentation.
-- **Add New Orchestration Methods**: Add multi-agent orchestration methods
-- **Removing Defunct Code**: Removing bad code
+| Category | Features | Benefits |
+|----------|----------|-----------|
+| 🏢 Enterprise Architecture | • Production-Ready Infrastructure
• High Reliability Systems
• Modular Design
• Comprehensive Logging | • Reduced downtime
• Easier maintenance
• Better debugging
• Enhanced monitoring |
+| 🤖 Agent Orchestration | • Hierarchical Swarms
• Parallel Processing
• Sequential Workflows
• Graph-based Workflows
• Dynamic Agent Rearrangement | • Complex task handling
• Improved performance
• Flexible workflows
• Optimized execution |
+| 🔄 Integration Capabilities | • Multi-Model Support
• Custom Agent Creation
• Extensive Tool Library
• Multiple Memory Systems | • Provider flexibility
• Custom solutions
• Extended functionality
• Enhanced memory management |
+### We Need Your Help To:
+- **Write Tests**: Ensure the reliability and correctness of the codebase
+- **Improve Documentation**: Maintain clear and comprehensive documentation
+- **Add New Orchestration Methods**: Add multi-agent orchestration methods
+- **Remove Defunct Code**: Clean up and remove bad code
+- **Enhance Agent Capabilities**: Improve existing agents and add new ones
+- **Optimize Performance**: Improve speed and efficiency of swarm operations
Your contributions will help us push the boundaries of AI and make this library a valuable resource for the community.
@@ -46,24 +72,65 @@ Your contributions will help us push the boundaries of AI and make this library
### Installation
-You can install swarms using `pip`:
+#### Using pip
+```bash
+pip3 install -U swarms
+```
+
+#### Using uv (Recommended)
+[uv](https://github.com/astral-sh/uv) is a fast Python package installer and resolver, written in Rust.
+
+```bash
+# Install uv
+curl -LsSf https://astral.sh/uv/install.sh | sh
+
+# Install swarms using uv
+uv pip install swarms
+```
+
+#### Using poetry
+```bash
+# Install poetry if you haven't already
+curl -sSL https://install.python-poetry.org | python3 -
+
+# Add swarms to your project
+poetry add swarms
+```
+#### From source
```bash
-pip3 install swarms
+# Clone the repository
+git clone https://github.com/kyegomez/swarms.git
+cd swarms
+
+# Install with pip
+pip install -e .
```
-Alternatively, you can clone the repository:
+### Environment Configuration
+
+Create a `.env` file in your project root with the following variables:
```bash
-git clone https://github.com/kyegomez/swarms
+OPENAI_API_KEY=""
+WORKSPACE_DIR="agent_workspace"
+ANTHROPIC_API_KEY=""
+GROQ_API_KEY=""
```
+- [Learn more about environment configuration here](https://docs.swarms.world/en/latest/swarms/install/env/)
+
### Project Structure
-- **`swarms/`**: Contains all the source code for the library.
-- **`examples/`**: Includes example scripts and notebooks demonstrating how to use the library.
-- **`tests/`**: (To be created) Will contain unit tests for the library.
-- **`docs/`**: (To be maintained) Contains documentation files.
+- **`swarms/`**: Contains all the source code for the library
+ - **`agents/`**: Agent implementations and base classes
+ - **`structs/`**: Swarm orchestration structures (SequentialWorkflow, AgentRearrange, etc.)
+ - **`tools/`**: Tool implementations and base classes
+ - **`prompts/`**: System prompts and prompt templates
+ - **`utils/`**: Utility functions and helpers
+- **`examples/`**: Includes example scripts and notebooks demonstrating how to use the library
+- **`tests/`**: Unit tests for the library
+- **`docs/`**: Documentation files and guides
---
@@ -79,6 +146,10 @@ If you find any bugs, inconsistencies, or have suggestions for enhancements, ple
- **Description**: Detailed description, steps to reproduce, expected behavior, and any relevant logs or screenshots.
3. **Label Appropriately**: Use labels to categorize the issue (e.g., bug, enhancement, documentation).
+**Issue Templates**: Use our issue templates for bug reports and feature requests:
+- [Bug Report](https://github.com/kyegomez/swarms/issues/new?template=bug_report.md)
+- [Feature Request](https://github.com/kyegomez/swarms/issues/new?template=feature_request.md)
+
### Submitting Pull Requests
We welcome pull requests (PRs) for bug fixes, improvements, and new features. Please follow these guidelines:
@@ -88,6 +159,7 @@ We welcome pull requests (PRs) for bug fixes, improvements, and new features. Pl
```bash
git clone https://github.com/kyegomez/swarms.git
+ cd swarms
```
3. **Create a New Branch**: Use a descriptive branch name.
@@ -121,6 +193,13 @@ We welcome pull requests (PRs) for bug fixes, improvements, and new features. Pl
**Note**: It's recommended to create small and focused PRs for easier review and faster integration.
+### Good First Issues
+
+The easiest way to contribute is to pick any issue with the `good first issue` tag 💪. These are specifically designed for new contributors:
+
+- [Good First Issues](https://github.com/kyegomez/swarms/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
+- [Contributing Board](https://github.com/users/kyegomez/projects/1) - Participate in Roadmap discussions!
+
---
## Coding Standards
@@ -204,6 +283,7 @@ We have several areas where contributions are particularly welcome.
- Write unit tests for existing code in `swarms/`.
- Identify edge cases and potential failure points.
- Ensure tests are repeatable and independent.
+ - Add integration tests for swarm orchestration methods.
### Improving Documentation
@@ -212,27 +292,113 @@ We have several areas where contributions are particularly welcome.
- Update docstrings to reflect any changes.
- Add examples and tutorials in the `examples/` directory.
- Improve or expand the content in the `docs/` directory.
+ - Create video tutorials and walkthroughs.
+
+### Adding New Swarm Architectures
+
+- **Goal**: Provide new multi-agent orchestration methods.
+- **Current Architectures**:
+ - [SequentialWorkflow](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/)
+ - [AgentRearrange](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/)
+ - [MixtureOfAgents](https://docs.swarms.world/en/latest/swarms/structs/moa/)
+ - [SpreadSheetSwarm](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/)
+ - [ForestSwarm](https://docs.swarms.world/en/latest/swarms/structs/forest_swarm/)
+ - [GraphWorkflow](https://docs.swarms.world/en/latest/swarms/structs/graph_swarm/)
+ - [GroupChat](https://docs.swarms.world/en/latest/swarms/structs/group_chat/)
+ - [SwarmRouter](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/)
+
+### Enhancing Agent Capabilities
+
+- **Goal**: Improve existing agents and add new specialized agents.
+- **Areas of Focus**:
+ - Financial analysis agents
+ - Medical diagnosis agents
+ - Code generation and review agents
+ - Research and analysis agents
+ - Creative content generation agents
+
+### Removing Defunct Code
+
+- **Goal**: Clean up and remove bad code to improve maintainability.
+- **Tasks**:
+ - Identify unused or deprecated code.
+ - Remove duplicate implementations.
+ - Simplify complex functions.
+ - Update outdated dependencies.
+
+---
+
+## Development Resources
+
+### Documentation
+
+- **Official Documentation**: [docs.swarms.world](https://docs.swarms.world)
+- **Installation Guide**: [Installation](https://docs.swarms.world/en/latest/swarms/install/install/)
+- **Quickstart Guide**: [Get Started](https://docs.swarms.world/en/latest/swarms/install/quickstart/)
+- **Agent Architecture**: [Agent Internal Mechanisms](https://docs.swarms.world/en/latest/swarms/framework/agents_explained/)
+- **Agent API**: [Agent API](https://docs.swarms.world/en/latest/swarms/structs/agent/)
+
+### Examples and Tutorials
+
+- **Basic Examples**: [examples/](https://github.com/kyegomez/swarms/tree/master/examples)
+- **Agent Examples**: [examples/single_agent/](https://github.com/kyegomez/swarms/tree/master/examples/single_agent)
+- **Multi-Agent Examples**: [examples/multi_agent/](https://github.com/kyegomez/swarms/tree/master/examples/multi_agent)
+- **Tool Examples**: [examples/tools/](https://github.com/kyegomez/swarms/tree/master/examples/tools)
-### Creating Multi-Agent Orchestration Methods
+### API Reference
-- **Goal**: Provide new multi-agent orchestration methods
+- **Core Classes**: [swarms/structs/](https://github.com/kyegomez/swarms/tree/master/swarms/structs)
+- **Agent Implementations**: [swarms/agents/](https://github.com/kyegomez/swarms/tree/master/swarms/agents)
+- **Tool Implementations**: [swarms/tools/](https://github.com/kyegomez/swarms/tree/master/swarms/tools)
+- **Utility Functions**: [swarms/utils/](https://github.com/kyegomez/swarms/tree/master/swarms/utils)
---
## Community and Support
+### Connect With Us
+
+| Platform | Link | Description |
+|----------|------|-------------|
+| 📚 Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
+| 📝 Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
+| 💬 Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
+| 👥 LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
+| 📺 YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
+| 🎫 Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events |
+
+### Onboarding Session
+
+Get onboarded with the creator and lead maintainer of Swarms, Kye Gomez, who will show you how to get started with the installation, usage examples, and starting to build your custom use case! [CLICK HERE](https://cal.com/swarms/swarms-onboarding-session)
+
+### Community Guidelines
+
- **Communication**: Engage with the community by participating in discussions on issues and pull requests.
- **Respect**: Maintain a respectful and inclusive environment.
- **Feedback**: Be open to receiving and providing constructive feedback.
+- **Collaboration**: Work together to improve the project for everyone.
---
## License
-By contributing to swarms, you agree that your contributions will be licensed under the [MIT License](LICENSE).
+By contributing to swarms, you agree that your contributions will be licensed under the [Apache License](LICENSE).
+
+---
+
+## Citation
+
+If you use **swarms** in your research, please cite the project by referencing the metadata in [CITATION.cff](./CITATION.cff).
---
Thank you for contributing to swarms! Your efforts help make this project better for everyone.
-If you have any questions or need assistance, please feel free to open an issue or reach out to the maintainers.
\ No newline at end of file
+If you have any questions or need assistance, please feel free to:
+- Open an issue on GitHub
+- Join our Discord community
+- Reach out to the maintainers
+- Schedule an onboarding session
+
+**Happy contributing! 🚀**
\ No newline at end of file
diff --git a/README.md b/README.md
index 8826000e..94af41d3 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
@@ -98,36 +98,15 @@
## ✨ Features
+Swarms delivers a comprehensive, enterprise-grade multi-agent infrastructure platform designed for production-scale deployments and seamless integration with existing systems. [Learn more about the swarms feature set here](https://docs.swarms.world/en/latest/swarms/features/)
+
| Category | Features | Benefits |
|----------|----------|-----------|
-| 🏢 Enterprise Architecture | • Production-Ready Infrastructure
• High Reliability Systems
• Modular Design
• Comprehensive Logging | • Reduced downtime
• Easier maintenance
• Better debugging
• Enhanced monitoring |
-| 🤖 Agent Orchestration | • Hierarchical Swarms
• Parallel Processing
• Sequential Workflows
• Graph-based Workflows
• Dynamic Agent Rearrangement | • Complex task handling
• Improved performance
• Flexible workflows
• Optimized execution |
-| 🔄 Integration Capabilities | • Multi-Model Support
• Custom Agent Creation
• Extensive Tool Library
• Multiple Memory Systems | • Provider flexibility
• Custom solutions
• Extended functionality
• Enhanced memory management |
-| 📈 Scalability | • Concurrent Processing
• Resource Management
• Load Balancing
• Horizontal Scaling | • Higher throughput
• Efficient resource use
• Better performance
• Easy scaling |
-| 🛠️ Developer Tools | • Simple API
• Extensive Documentation
• Active Community
• CLI Tools | • Faster development
• Easy learning curve
• Community support
• Quick deployment |
-| 🔐 Security Features | • Error Handling
• Rate Limiting
• Monitoring Integration
• Audit Logging | • Improved reliability
• API protection
• Better monitoring
• Enhanced tracking |
-| 📊 Advanced Features | • SpreadsheetSwarm
• Group Chat
• Agent Registry
• Mixture of Agents | • Mass agent management
• Collaborative AI
• Centralized control
• Complex solutions |
-| 🔌 Provider Support | • OpenAI
• Anthropic
• ChromaDB
• Custom Providers | • Provider flexibility
• Storage options
• Custom integration
• Vendor independence |
-| 💪 Production Features | • Automatic Retries
• Async Support
• Environment Management
• Type Safety | • Better reliability
• Improved performance
• Easy configuration
• Safer code |
-| 🎯 Use Case Support | • Task-Specific Agents
• Custom Workflows
• Industry Solutions
• Extensible Framework | • Quick deployment
• Flexible solutions
• Industry readiness
• Easy customization |
-
-
-## Guides and Walkthroughs
-Refer to our documentation for production grade implementation details.
-
-
-| Section | Links |
-|----------------------|--------------------------------------------------------------------------------------------|
-| Installation | [Installation](https://docs.swarms.world/en/latest/swarms/install/install/) |
-| Quickstart | [Get Started](https://docs.swarms.world/en/latest/swarms/install/quickstart/) |
-| Agent Internal Mechanisms | [Agent Architecture](https://docs.swarms.world/en/latest/swarms/framework/agents_explained/) |
-| Agent API | [Agent API](https://docs.swarms.world/en/latest/swarms/structs/agent/) |
-| Integrating External Agents Griptape, Autogen, etc | [Integrating External APIs](https://docs.swarms.world/en/latest/swarms/agents/external_party_agents/) |
-| Creating Agents from YAML | [Creating Agents from YAML](https://docs.swarms.world/en/latest/swarms/agents/create_agents_yaml/) |
-| Why You Need Swarms | [Why MultiAgent Collaboration is Necessary](https://docs.swarms.world/en/latest/swarms/concept/why/) |
-| Swarm Architectures Analysis | [Swarm Architectures](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) |
-| Choosing the Right Swarm for Your Business Problem¶ | [CLICK HERE](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) |
-| AgentRearrange Docs| [CLICK HERE](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) |
+| 🏢 **Enterprise Architecture** | • Production-Ready Infrastructure
• High Availability Systems
• Modular Microservices Design
• Comprehensive Observability
• Backwards Compatibility | • 99.9%+ Uptime Guarantee
• Reduced Operational Overhead
• Seamless Legacy Integration
• Enhanced System Monitoring
• Risk-Free Migration Path |
+| 🤖 **Multi-Agent Orchestration** | • Hierarchical Agent Swarms
• Parallel Processing Pipelines
• Sequential Workflow Orchestration
• Graph-Based Agent Networks
• Dynamic Agent Composition
• Agent Registry Management | • Complex Business Process Automation
• Scalable Task Distribution
• Flexible Workflow Adaptation
• Optimized Resource Utilization
• Centralized Agent Governance
• Enterprise-Grade Agent Lifecycle Management |
+| 🔄 **Enterprise Integration** | • Multi-Model Provider Support
• Custom Agent Development Framework
• Extensive Enterprise Tool Library
• Multiple Memory Systems
• Backwards Compatibility with LangChain, AutoGen, CrewAI
• Standardized API Interfaces | • Vendor-Agnostic Architecture
• Custom Solution Development
• Extended Functionality Integration
• Enhanced Knowledge Management
• Seamless Framework Migration
• Reduced Integration Complexity |
+| 📈 **Enterprise Scalability** | • Concurrent Multi-Agent Processing
• Intelligent Resource Management
• Load Balancing & Auto-Scaling
• Horizontal Scaling Capabilities
• Performance Optimization
• Capacity Planning Tools | • High-Throughput Processing
• Cost-Effective Resource Utilization
• Elastic Scaling Based on Demand
• Linear Performance Scaling
• Optimized Response Times
• Predictable Growth Planning |
+| 🛠️ **Developer Experience** | • Intuitive Enterprise API
• Comprehensive Documentation
• Active Enterprise Community
• CLI & SDK Tools
• IDE Integration Support
• Code Generation Templates | • Accelerated Development Cycles
• Reduced Learning Curve
• Expert Community Support
• Rapid Deployment Capabilities
• Enhanced Developer Productivity
• Standardized Development Patterns |
## Install 💻
@@ -171,6 +150,8 @@ $ pip install -e .
## Environment Configuration
+[Learn more about the environment configuration here](https://docs.swarms.world/en/latest/swarms/install/env/)
+
```
OPENAI_API_KEY=""
WORKSPACE_DIR="agent_workspace"
@@ -178,1898 +159,424 @@ ANTHROPIC_API_KEY=""
GROQ_API_KEY=""
```
-- [Learn more about the environment configuration here](https://docs.swarms.world/en/latest/swarms/install/env/)
-
----
-
-## `Agent` Class
-The `Agent` class is a customizable autonomous component of the Swarms framework that integrates LLMs, tools, and long-term memory. Its `run` method processes text tasks and optionally handles image inputs through vision-language models.
-
-```mermaid
-graph TD
- A[Agent] --> B[Initialize]
- B --> C[Process Task]
- C --> D[Execute Tools]
- D --> E[Generate Response]
- E --> F[Return Output]
- C --> G[Long-term Memory]
- G --> C
-```
+### 🤖 Your First Agent
-
-
-## Simple Example
+An **Agent** is the fundamental building block of a swarm—an autonomous entity powered by an LLM + Tools + Memory. [Learn more Here](https://docs.swarms.world/en/latest/swarms/structs/agent/)
```python
from swarms import Agent
+# Initialize a new agent
agent = Agent(
- agent_name="Stock-Analysis-Agent",
- model_name="gpt-4o-mini",
- max_loops="auto",
- interactive=True,
- streaming_on=True,
+ model_name="gpt-4o-mini", # Specify the LLM
+ max_loops=1, # Set the number of interactions
+ interactive=True, # Enable interactive mode for real-time feedback
)
-agent.run("What is the current market trend for tech stocks?")
-
+# Run the agent with a task
+agent.run("What are the key benefits of using a multi-agent system?")
```
-### Settings and Customization
-
-The `Agent` class offers a range of settings to tailor its behavior to specific needs. Some key settings include:
-
-| Setting | Description | Default Value |
-| --- | --- | --- |
-| `agent_name` | The name of the agent. | "DefaultAgent" |
-| `system_prompt` | The system prompt to use for the agent. | "Default system prompt." |
-| `llm` | The language model to use for processing tasks. | `OpenAIChat` instance |
-| `max_loops` | The maximum number of loops to execute for a task. | 1 |
-| `autosave` | Enables or disables autosaving of the agent's state. | False |
-| `dashboard` | Enables or disables the dashboard for the agent. | False |
-| `verbose` | Controls the verbosity of the agent's output. | False |
-| `dynamic_temperature_enabled` | Enables or disables dynamic temperature adjustment for the language model. | False |
-| `saved_state_path` | The path to save the agent's state. | "agent_state.json" |
-| `user_name` | The username associated with the agent. | "default_user" |
-| `retry_attempts` | The number of retry attempts for failed tasks. | 1 |
-| `context_length` | The maximum length of the context to consider for tasks. | 200000 |
-| `return_step_meta` | Controls whether to return step metadata in the output. | False |
-| `output_type` | The type of output to return (e.g., "json", "string"). | "string" |
+### 🤝 Your First Swarm: Multi-Agent Collaboration
+A **Swarm** consists of multiple agents working together. This simple example creates a two-agent workflow for researching and writing a blog post. [Learn More About SequentialWorkflow](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/)
```python
-import os
-from swarms import Agent
+from swarms import Agent, SequentialWorkflow
-from swarms.prompts.finance_agent_sys_prompt import (
- FINANCIAL_AGENT_SYS_PROMPT,
-)
-# Initialize the agent
-agent = Agent(
- agent_name="Financial-Analysis-Agent",
- system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+# Agent 1: The Researcher
+researcher = Agent(
+ agent_name="Researcher",
+ system_prompt="Your job is to research the provided topic and provide a detailed summary.",
model_name="gpt-4o-mini",
- max_loops=1,
- autosave=True,
- dashboard=False,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="finance_agent.json",
- user_name="swarms_corp",
- retry_attempts=1,
- context_length=200000,
- return_step_meta=False,
- output_type="string",
- streaming_on=False,
)
-
-agent.run(
- "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
-)
-
-```
------
-
-### Integrating RAG with Swarms for Enhanced Long-Term Memory
-
-`Agent` equipped with quasi-infinite long term memory using RAG (Relational Agent Graph) for advanced document understanding, analysis, and retrieval capabilities.
-
-**Mermaid Diagram for RAG Integration**
-```mermaid
-graph TD
- A[Initialize Agent with RAG] --> B[Receive Task]
- B --> C[Query Long-Term Memory]
- C --> D[Process Task with Context]
- D --> E[Generate Response]
- E --> F[Update Long-Term Memory]
- F --> G[Return Output]
-```
-
-```python
-from swarms import Agent
-from swarms.prompts.finance_agent_sys_prompt import (
- FINANCIAL_AGENT_SYS_PROMPT,
-)
-import os
-
-from swarms_memory import ChromaDB
-
-# Initialize the ChromaDB client for long-term memory management
-chromadb = ChromaDB(
- metric="cosine", # Metric for similarity measurement
- output_dir="finance_agent_rag", # Directory for storing RAG data
- # docs_folder="artifacts", # Uncomment and specify the folder containing your documents
-)
-
-# Initialize the agent with RAG capabilities
-agent = Agent(
- agent_name="Financial-Analysis-Agent",
- system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
- agent_description="Agent creates a comprehensive financial analysis",
+# Agent 2: The Writer
+writer = Agent(
+ agent_name="Writer",
+ system_prompt="Your job is to take the research summary and write a beautiful, engaging blog post about it.",
model_name="gpt-4o-mini",
- max_loops="auto", # Auto-adjusts loops based on task complexity
- autosave=True, # Automatically saves agent state
- dashboard=False, # Disables dashboard for this example
- verbose=True, # Enables verbose mode for detailed output
- streaming_on=True, # Enables streaming for real-time processing
- dynamic_temperature_enabled=True, # Dynamically adjusts temperature for optimal performance
- saved_state_path="finance_agent.json", # Path to save agent state
- user_name="swarms_corp", # User name for the agent
- retry_attempts=3, # Number of retry attempts for failed tasks
- context_length=200000, # Maximum length of the context to consider
- long_term_memory=chromadb, # Integrates ChromaDB for long-term memory management
- return_step_meta=False,
- output_type="string",
-)
-
-# Run the agent with a sample task
-agent.run(
- "What are the components of a startup's stock incentive equity plan"
-)
-```
-
-
-## Structured Outputs
-
-1. Create a tool schema
-2. Create a function schema
-3. Create a tool list dictionary
-4. Initialize the agent
-5. Run the agent
-6. Print the output
-7. Convert the output to a dictionary
-
-```python
-
-from dotenv import load_dotenv
-
-from swarms import Agent
-from swarms.prompts.finance_agent_sys_prompt import (
- FINANCIAL_AGENT_SYS_PROMPT,
)
-from swarms.utils.str_to_dict import str_to_dict
-
-load_dotenv()
-
-tools = [
- {
- "type": "function",
- "function": {
- "name": "get_stock_price",
- "description": "Retrieve the current stock price and related information for a specified company.",
- "parameters": {
- "type": "object",
- "properties": {
- "ticker": {
- "type": "string",
- "description": "The stock ticker symbol of the company, e.g. AAPL for Apple Inc.",
- },
- "include_history": {
- "type": "boolean",
- "description": "Indicates whether to include historical price data along with the current price.",
- },
- "time": {
- "type": "string",
- "format": "date-time",
- "description": "Optional parameter to specify the time for which the stock data is requested, in ISO 8601 format.",
- },
- },
- "required": [
- "ticker",
- "include_history",
- "time",
- ],
- },
- },
- }
-]
-
-# Initialize the agent
-agent = Agent(
- agent_name="Financial-Analysis-Agent",
- agent_description="Personal finance advisor agent",
- system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
- max_loops=1,
- tools_list_dictionary=tools,
-)
+# Create a sequential workflow where the researcher's output feeds into the writer's input
+workflow = SequentialWorkflow(agents=[researcher, writer])
-out = agent.run(
- "What is the current stock price for Apple Inc. (AAPL)? Include historical price data.",
-)
-
-print(out)
-
-print(type(out))
-
-print(str_to_dict(out))
+# Run the workflow on a task
+final_post = workflow.run("The history and future of artificial intelligence")
+print(final_post)
-print(type(str_to_dict(out)))
```
--------
-
-### Misc Agent Settings
-We provide vast array of features to save agent states using json, yaml, toml, upload pdfs, batched jobs, and much more!
-
-
-**Method Table**
-
-| Method | Description |
-| --- | --- |
-| `to_dict()` | Converts the agent object to a dictionary. |
-| `to_toml()` | Converts the agent object to a TOML string. |
-| `model_dump_json()` | Dumps the model to a JSON file. |
-| `model_dump_yaml()` | Dumps the model to a YAML file. |
-| `ingest_docs()` | Ingests documents into the agent's knowledge base. |
-| `receive_message()` | Receives a message from a user and processes it. |
-| `send_agent_message()` | Sends a message from the agent to a user. |
-| `filtered_run()` | Runs the agent with a filtered system prompt. |
-| `bulk_run()` | Runs the agent with multiple system prompts. |
-| `add_memory()` | Adds a memory to the agent. |
-| `check_available_tokens()` | Checks the number of available tokens for the agent. |
-| `tokens_checks()` | Performs token checks for the agent. |
-| `print_dashboard()` | Prints the dashboard of the agent. |
-| `get_docs_from_doc_folders()` | Fetches all the documents from the doc folders. |
-
-
-
-```python
-# # Convert the agent object to a dictionary
-print(agent.to_dict())
-print(agent.to_toml())
-print(agent.model_dump_json())
-print(agent.model_dump_yaml())
-
-# Ingest documents into the agent's knowledge base
-("your_pdf_path.pdf")
-
-# Receive a message from a user and process it
-agent.receive_message(name="agent_name", message="message")
-
-# Send a message from the agent to a user
-agent.send_agent_message(agent_name="agent_name", message="message")
-
-# Ingest multiple documents into the agent's knowledge base
-agent.ingest_docs("your_pdf_path.pdf", "your_csv_path.csv")
-
-# Run the agent with a filtered system prompt
-agent.filtered_run(
- "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?"
-)
-
-# Run the agent with multiple system prompts
-agent.bulk_run(
- [
- "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?",
- "Another system prompt",
- ]
-)
-
-# Add a memory to the agent
-agent.add_memory("Add a memory to the agent")
-
-# Check the number of available tokens for the agent
-agent.check_available_tokens()
-
-# Perform token checks for the agent
-agent.tokens_checks()
-
-# Print the dashboard of the agent
-agent.print_dashboard()
-
-# Fetch all the documents from the doc folders
-agent.get_docs_from_doc_folders()
+-----
-# Activate agent ops
+## 🏗️ Multi-Agent Architectures For Production Deployments
-# Dump the model to a JSON file
-agent.model_dump_json()
-print(agent.to_toml())
+`swarms` provides a variety of powerful, pre-built multi-agent architectures enabling you to orchestrate agents in various ways. Choose the right structure for your specific problem to build efficient and reliable production systems.
-```
+| **Architecture** | **Description** | **Best For** |
+|---|---|---|
+| **[SequentialWorkflow](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/)** | Agents execute tasks in a linear chain; one agent's output is the next one's input. | Step-by-step processes like data transformation pipelines, report generation. |
+| **[ConcurrentWorkflow](https://docs.swarms.world/en/latest/swarms/structs/concurrent_workflow/)** | Agents run tasks simultaneously for maximum efficiency. | High-throughput tasks like batch processing, parallel data analysis. |
+| **[AgentRearrange](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/)** | Dynamically maps complex relationships (e.g., `a -> b, c`) between agents. | Flexible and adaptive workflows, task distribution, dynamic routing. |
+| **[GraphWorkflow](https://docs.swarms.world/en/latest/swarms/structs/graph_workflow/)** | Orchestrates agents as nodes in a Directed Acyclic Graph (DAG). | Complex projects with intricate dependencies, like software builds. |
+| **[MixtureOfAgents (MoA)](https://docs.swarms.world/en/latest/swarms/structs/moa/)** | Utilizes multiple expert agents in parallel and synthesizes their outputs. | Complex problem-solving, achieving state-of-the-art performance through collaboration. |
+| **[GroupChat](https://docs.swarms.world/en/latest/swarms/structs/group_chat/)** | Agents collaborate and make decisions through a conversational interface. | Real-time collaborative decision-making, negotiations, brainstorming. |
+| **[ForestSwarm](https://docs.swarms.world/en/latest/swarms/structs/forest_swarm/)** | Dynamically selects the most suitable agent or tree of agents for a given task. | Task routing, optimizing for expertise, complex decision-making trees. |
+| **[SpreadSheetSwarm](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/)** | Manages thousands of agents concurrently, tracking tasks and outputs in a structured format. | Massive-scale parallel operations, large-scale data generation and analysis. |
+| **[SwarmRouter](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/)** | Universal orchestrator that provides a single interface to run any type of swarm with dynamic selection. | Simplifying complex workflows, switching between swarm strategies, unified multi-agent management. |
+-----
+### SequentialWorkflow
-### `Agent`with Pydantic BaseModel as Output Type
-The following is an example of an agent that intakes a pydantic basemodel and outputs it at the same time:
+A `SequentialWorkflow` executes tasks in a strict order, forming a pipeline where each agent builds upon the work of the previous one. `SequentialWorkflow` is Ideal for processes that have clear, ordered steps. This ensures that tasks with dependencies are handled correctly.
```python
-from pydantic import BaseModel, Field
-from swarms import Agent
-
-
-# Initialize the schema for the person's information
-class Schema(BaseModel):
- name: str = Field(..., title="Name of the person")
- agent: int = Field(..., title="Age of the person")
- is_student: bool = Field(..., title="Whether the person is a student")
- courses: list[str] = Field(
- ..., title="List of courses the person is taking"
- )
-
-
-# Convert the schema to a JSON string
-tool_schema = Schema(
- name="Tool Name",
- agent=1,
- is_student=True,
- courses=["Course1", "Course2"],
-)
-
-# Define the task to generate a person's information
-task = "Generate a person's information based on the following schema:"
-
-# Initialize the agent
-agent = Agent(
- agent_name="Person Information Generator",
- system_prompt=(
- "Generate a person's information based on the following schema:"
- ),
- # Set the tool schema to the JSON string -- this is the key difference
- tool_schema=tool_schema,
- model_name="gpt-4o",
- max_loops=3,
- autosave=True,
- dashboard=False,
- streaming_on=True,
- verbose=True,
- interactive=True,
- # Set the output type to the tool schema which is a BaseModel
- output_type=tool_schema, # or dict, or str
- metadata_output_type="json",
- # List of schemas that the agent can handle
- list_base_models=[tool_schema],
- function_calling_format_type="OpenAI",
- function_calling_type="json", # or soon yaml
-)
-
-# Run the agent to generate the person's information
-generated_data = agent.run(task)
+from swarms import Agent, SequentialWorkflow
-# Print the generated data
-print(f"Generated data: {generated_data}")
+# Initialize agents for a 3-step process
+# 1. Generate an idea
+idea_generator = Agent(agent_name="IdeaGenerator", system_prompt="Generate a unique startup idea.", model_name="gpt-4o-mini")
+# 2. Validate the idea
+validator = Agent(agent_name="Validator", system_prompt="Take this startup idea and analyze its market viability.", model_name="gpt-4o-mini")
+# 3. Create a pitch
+pitch_creator = Agent(agent_name="PitchCreator", system_prompt="Write a 3-sentence elevator pitch for this validated startup idea.", model_name="gpt-4o-mini")
+# Create the sequential workflow
+workflow = SequentialWorkflow(agents=[idea_generator, validator, pitch_creator])
+# Run the workflow
+elevator_pitch = workflow.run()
+print(elevator_pitch)
```
-### Multi Modal Autonomous Agent
-Run the agent with multiple modalities useful for various real-world tasks in manufacturing, logistics, and health.
-
-```python
-import os
-from dotenv import load_dotenv
-from swarms import Agent
-
-from swarm_models import GPT4VisionAPI
-
-# Load the environment variables
-load_dotenv()
-
-
-# Initialize the language model
-llm = GPT4VisionAPI(
- openai_api_key=os.environ.get("OPENAI_API_KEY"),
- max_tokens=500,
-)
-
-# Initialize the task
-task = (
- "Analyze this image of an assembly line and identify any issues such as"
- " misaligned parts, defects, or deviations from the standard assembly"
- " process. If there is anything unsafe in the image, explain why it is"
- " unsafe and how it could be improved."
-)
-img = "assembly_line.jpg"
-
-## Initialize the workflow
-agent = Agent(
- agent_name = "Multi-ModalAgent",
- llm=llm,
- max_loops="auto",
- autosave=True,
- dashboard=True,
- multi_modal=True
-)
-
-# Run the workflow on a task
-agent.run(task, img)
-```
-----
+-----
-### Local Agent `ToolAgent`
-ToolAgent is a fully local agent that can use tools through JSON function calling. It intakes any open source model from huggingface and is extremely modular and plug in and play. We need help adding general support to all models soon.
+### ConcurrentWorkflow (with `SpreadSheetSwarm`)
+A concurrent workflow runs multiple agents simultaneously. `SpreadSheetSwarm` is a powerful implementation that can manage thousands of concurrent agents and log their outputs to a CSV file. Use this architecture for high-throughput tasks that can be performed in parallel, drastically reducing execution time.
```python
-from pydantic import BaseModel, Field
-from transformers import AutoModelForCausalLM, AutoTokenizer
-
-from swarms import ToolAgent
-from swarms.tools.json_utils import base_model_to_json
-
-# Load the pre-trained model and tokenizer
-model = AutoModelForCausalLM.from_pretrained(
- "databricks/dolly-v2-12b",
- load_in_4bit=True,
- device_map="auto",
-)
-tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
-
-
-# Initialize the schema for the person's information
-class Schema(BaseModel):
- name: str = Field(..., title="Name of the person")
- agent: int = Field(..., title="Age of the person")
- is_student: bool = Field(
- ..., title="Whether the person is a student"
- )
- courses: list[str] = Field(
- ..., title="List of courses the person is taking"
- )
-
-
-# Convert the schema to a JSON string
-tool_schema = base_model_to_json(Schema)
-
-# Define the task to generate a person's information
-task = (
- "Generate a person's information based on the following schema:"
-)
-
-# Create an instance of the ToolAgent class
-agent = ToolAgent(
- name="dolly-function-agent",
- description="An agent to create a child's data",
- model=model,
- tokenizer=tokenizer,
- json_schema=tool_schema,
-)
-
-# Run the agent to generate the person's information
-generated_data = agent.run(task)
-
-# Print the generated data
-print(f"Generated data: {generated_data}")
-
-```
-
-
-## Understanding Swarms
-
-A swarm refers to a group of more than two agents working collaboratively to achieve a common goal. These agents can be software entities, such as llms that interact with each other to perform complex tasks. The concept of a swarm is inspired by natural systems like ant colonies or bird flocks, where simple individual behaviors lead to complex group dynamics and problem-solving capabilities.
-
-```mermaid
-graph TD
- A[Swarm] --> B[Agent 1]
- A --> C[Agent 2]
- A --> D[Agent N]
- B --> E[Task Processing]
- C --> E
- D --> E
- E --> F[Result Aggregation]
- F --> G[Final Output]
-```
-
-### How Swarm Architectures Facilitate Communication
-
-Swarm architectures are designed to establish and manage communication between agents within a swarm. These architectures define how agents interact, share information, and coordinate their actions to achieve the desired outcomes. Here are some key aspects of swarm architectures:
-
-1. **Hierarchical Communication**: In hierarchical swarms, communication flows from higher-level agents to lower-level agents. Higher-level agents act as coordinators, distributing tasks and aggregating results. This structure is efficient for tasks that require top-down control and decision-making.
-
-2. **Parallel Communication**: In parallel swarms, agents operate independently and communicate with each other as needed. This architecture is suitable for tasks that can be processed concurrently without dependencies, allowing for faster execution and scalability.
-
-3. **Sequential Communication**: Sequential swarms process tasks in a linear order, where each agent's output becomes the input for the next agent. This ensures that tasks with dependencies are handled in the correct sequence, maintaining the integrity of the workflow.
-
-```mermaid
-graph LR
- A[Hierarchical] --> D[Task Distribution]
- B[Parallel] --> E[Concurrent Processing]
- C[Sequential] --> F[Linear Processing]
- D --> G[Results]
- E --> G
- F --> G
-```
-
-Swarm architectures leverage these communication patterns to ensure that agents work together efficiently, adapting to the specific requirements of the task at hand. By defining clear communication protocols and interaction models, swarm architectures enable the seamless orchestration of multiple agents, leading to enhanced performance and problem-solving capabilities.
-
-
-| **Name** | **Description** | **Code Link** | **Use Cases** |
-|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------|
-| Hierarchical Swarms | A system where agents are organized in a hierarchy, with higher-level agents coordinating lower-level agents to achieve complex tasks. | [Code Link](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/#hierarchical-swarm) | Manufacturing process optimization, multi-level sales management, healthcare resource coordination |
-| Agent Rearrange | A setup where agents rearrange themselves dynamically based on the task requirements and environmental conditions. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | Adaptive manufacturing lines, dynamic sales territory realignment, flexible healthcare staffing |
-| Concurrent Workflows | Agents perform different tasks simultaneously, coordinating to complete a larger goal. | [Code Link](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/#concurrent-workflows) | Concurrent production lines, parallel sales operations, simultaneous patient care processes |
-| Sequential Coordination | Agents perform tasks in a specific sequence, where the completion of one task triggers the start of the next. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) | Step-by-step assembly lines, sequential sales processes, stepwise patient treatment workflows |
-| Parallel Processing | Agents work on different parts of a task simultaneously to speed up the overall process. | [Code Link](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/#parallel-processing) | Parallel data processing in manufacturing, simultaneous sales analytics, concurrent medical tests |
-| Mixture of Agents | A heterogeneous swarm where agents with different capabilities are combined to solve complex problems. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/moa/) | Financial forecasting, complex problem-solving requiring diverse skills |
-| Graph Workflow | Agents collaborate in a directed acyclic graph (DAG) format to manage dependencies and parallel tasks. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/graph_workflow/) | AI-driven software development pipelines, complex project management |
-| Group Chat | Agents engage in a chat-like interaction to reach decisions collaboratively. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/group_chat/) | Real-time collaborative decision-making, contract negotiations |
-| Agent Registry | A centralized registry where agents are stored, retrieved, and invoked dynamically. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/agent_registry/) | Dynamic agent management, evolving recommendation engines |
-| Spreadsheet Swarm | Manages tasks at scale, tracking agent outputs in a structured format like CSV files. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/) | Large-scale marketing analytics, financial audits |
-| Forest Swarm | A swarm structure that organizes agents in a tree-like hierarchy for complex decision-making processes. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/forest_swarm/) | Multi-stage workflows, hierarchical reinforcement learning |
-| Swarm Router | Routes and chooses the swarm architecture based on the task requirements and available agents. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/) | Dynamic task routing, adaptive swarm architecture selection, optimized agent allocation |
-
-
-
-## Swarms API
-
-We recently launched our enterprise-grade Swarms API. This API allows you to create, manage, and execute swarms from your own application.
-
-#### Steps:
-
-1. Create a Swarms API key [HERE](https://swarms.world)
-2. Upload your key to the `.env` file like so: `SWARMS_API_KEY=`
-3. Use the following code to create and execute a swarm:
-4. Read our docs for more information for deeper customization [HERE](https://docs.swarms.world/en/latest/swarms_cloud/swarms_api/)
+from swarms import Agent, SpreadSheetSwarm
+# Define a list of tasks (e.g., social media posts to generate)
+platforms = ["Twitter", "LinkedIn", "Instagram"]
-```python
-import json
-from swarms.structs.swarms_api import (
- SwarmsAPIClient,
- SwarmRequest,
- AgentInput,
-)
-import os
-
+# Create an agent for each task
agents = [
- AgentInput(
- agent_name="Medical Researcher",
- description="Conducts medical research and analysis",
- system_prompt="You are a medical researcher specializing in clinical studies.",
- max_loops=1,
- model_name="gpt-4o",
- role="worker",
- ),
- AgentInput(
- agent_name="Medical Diagnostician",
- description="Provides medical diagnoses based on symptoms and test results",
- system_prompt="You are a medical diagnostician with expertise in identifying diseases.",
- max_loops=1,
- model_name="gpt-4o",
- role="worker",
- ),
- AgentInput(
- agent_name="Pharmaceutical Expert",
- description="Advises on pharmaceutical treatments and drug interactions",
- system_prompt="You are a pharmaceutical expert knowledgeable about medications and their effects.",
- max_loops=1,
- model_name="gpt-4o",
- role="worker",
- ),
+ Agent(
+ agent_name=f"{platform}-Marketer",
+ system_prompt=f"Generate a real estate marketing post for {platform}.",
+ model_name="gpt-4o-mini",
+ )
+ for platform in platforms
]
-swarm_request = SwarmRequest(
- name="Medical Swarm",
- description="A swarm for medical research and diagnostics",
+# Initialize the swarm to run these agents concurrently
+swarm = SpreadSheetSwarm(
agents=agents,
- max_loops=1,
- swarm_type="ConcurrentWorkflow",
- output_type="str",
- return_history=True,
- task="What is the cause of the common cold?",
-)
-
-client = SwarmsAPIClient(
- api_key=os.getenv("SWARMS_API_KEY"), format_type="json"
+ autosave_on=True,
+ save_file_path="marketing_posts.csv",
)
-response = client.run(swarm_request)
-
-print(json.dumps(response, indent=4))
-
-
+# Run the swarm with a single, shared task description
+property_description = "A beautiful 3-bedroom house in sunny California."
+swarm.run(task=f"Generate a post about: {property_description}")
+# Check marketing_posts.csv for the results!
```
+---
-### `SequentialWorkflow`
+### AgentRearrange
-The SequentialWorkflow in the Swarms framework enables sequential task execution across multiple Agent objects. Each agent's output serves as input for the next agent in the sequence, continuing until reaching the specified maximum number of loops (max_loops). This workflow is particularly well-suited for tasks requiring a specific order of operations, such as data processing pipelines. To learn more, visit: [Learn More](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/)
+Inspired by `einsum`, `AgentRearrange` lets you define complex, non-linear relationships between agents using a simple string-based syntax. [Learn more](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/). This architecture is Perfect for orchestrating dynamic workflows where agents might work in parallel, sequence, or a combination of both.
```python
-import os
-from swarms import Agent, SequentialWorkflow
-from swarm_models import OpenAIChat
-
-# model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
-company = "Nvidia"
-# Get the OpenAI API key from the environment variable
-api_key = os.getenv("GROQ_API_KEY")
-
-# Model
-model = OpenAIChat(
- openai_api_base="https://api.groq.com/openai/v1",
- openai_api_key=api_key,
- model_name="llama-3.1-70b-versatile",
- temperature=0.1,
-)
-
-
-# Initialize the Managing Director agent
-managing_director = Agent(
- agent_name="Managing-Director",
- system_prompt=f"""
- As the Managing Director at Blackstone, your role is to oversee the entire investment analysis process for potential acquisitions.
- Your responsibilities include:
- 1. Setting the overall strategy and direction for the analysis
- 2. Coordinating the efforts of the various team members and ensuring a comprehensive evaluation
- 3. Reviewing the findings and recommendations from each team member
- 4. Making the final decision on whether to proceed with the acquisition
-
- For the current potential acquisition of {company}, direct the tasks for the team to thoroughly analyze all aspects of the company, including its financials, industry position, technology, market potential, and regulatory compliance. Provide guidance and feedback as needed to ensure a rigorous and unbiased assessment.
- """,
- llm=model,
- max_loops=1,
- dashboard=False,
- streaming_on=True,
- verbose=True,
- stopping_token="",
- state_save_file_type="json",
- saved_state_path="managing-director.json",
-)
-
-# Initialize the Vice President of Finance
-vp_finance = Agent(
- agent_name="VP-Finance",
- system_prompt=f"""
- As the Vice President of Finance at Blackstone, your role is to lead the financial analysis of potential acquisitions.
- For the current potential acquisition of {company}, your tasks include:
- 1. Conducting a thorough review of {company}' financial statements, including income statements, balance sheets, and cash flow statements
- 2. Analyzing key financial metrics such as revenue growth, profitability margins, liquidity ratios, and debt levels
- 3. Assessing the company's historical financial performance and projecting future performance based on assumptions and market conditions
- 4. Identifying any financial risks or red flags that could impact the acquisition decision
- 5. Providing a detailed report on your findings and recommendations to the Managing Director
-
- Be sure to consider factors such as the sustainability of {company}' business model, the strength of its customer base, and its ability to generate consistent cash flows. Your analysis should be data-driven, objective, and aligned with Blackstone's investment criteria.
- """,
- llm=model,
- max_loops=1,
- dashboard=False,
- streaming_on=True,
- verbose=True,
- stopping_token="",
- state_save_file_type="json",
- saved_state_path="vp-finance.json",
-)
-
-# Initialize the Industry Analyst
-industry_analyst = Agent(
- agent_name="Industry-Analyst",
- system_prompt=f"""
- As the Industry Analyst at Blackstone, your role is to provide in-depth research and analysis on the industries and markets relevant to potential acquisitions.
- For the current potential acquisition of {company}, your tasks include:
- 1. Conducting a comprehensive analysis of the industrial robotics and automation solutions industry, including market size, growth rates, key trends, and future prospects
- 2. Identifying the major players in the industry and assessing their market share, competitive strengths and weaknesses, and strategic positioning
- 3. Evaluating {company}' competitive position within the industry, including its market share, differentiation, and competitive advantages
- 4. Analyzing the key drivers and restraints for the industry, such as technological advancements, labor costs, regulatory changes, and economic conditions
- 5. Identifying potential risks and opportunities for {company} based on the industry analysis, such as disruptive technologies, emerging markets, or shifts in customer preferences
-
- Your analysis should provide a clear and objective assessment of the attractiveness and future potential of the industrial robotics industry, as well as {company}' positioning within it. Consider both short-term and long-term factors, and provide evidence-based insights to inform the investment decision.
- """,
- llm=model,
- max_loops=1,
- dashboard=False,
- streaming_on=True,
- verbose=True,
- stopping_token="",
- state_save_file_type="json",
- saved_state_path="industry-analyst.json",
-)
-
-# Initialize the Technology Expert
-tech_expert = Agent(
- agent_name="Tech-Expert",
- system_prompt=f"""
- As the Technology Expert at Blackstone, your role is to assess the technological capabilities, competitive advantages, and potential risks of companies being considered for acquisition.
- For the current potential acquisition of {company}, your tasks include:
- 1. Conducting a deep dive into {company}' proprietary technologies, including its robotics platforms, automation software, and AI capabilities
- 2. Assessing the uniqueness, scalability, and defensibility of {company}' technology stack and intellectual property
- 3. Comparing {company}' technologies to those of its competitors and identifying any key differentiators or technology gaps
- 4. Evaluating {company}' research and development capabilities, including its innovation pipeline, engineering talent, and R&D investments
- 5. Identifying any potential technology risks or disruptive threats that could impact {company}' long-term competitiveness, such as emerging technologies or expiring patents
-
- Your analysis should provide a comprehensive assessment of {company}' technological strengths and weaknesses, as well as the sustainability of its competitive advantages. Consider both the current state of its technology and its future potential in light of industry trends and advancements.
- """,
- llm=model,
- max_loops=1,
- dashboard=False,
- streaming_on=True,
- verbose=True,
- stopping_token="",
- state_save_file_type="json",
- saved_state_path="tech-expert.json",
-)
-
-# Initialize the Market Researcher
-market_researcher = Agent(
- agent_name="Market-Researcher",
- system_prompt=f"""
- As the Market Researcher at Blackstone, your role is to analyze the target company's customer base, market share, and growth potential to assess the commercial viability and attractiveness of the potential acquisition.
- For the current potential acquisition of {company}, your tasks include:
- 1. Analyzing {company}' current customer base, including customer segmentation, concentration risk, and retention rates
- 2. Assessing {company}' market share within its target markets and identifying key factors driving its market position
- 3. Conducting a detailed market sizing and segmentation analysis for the industrial robotics and automation markets, including identifying high-growth segments and emerging opportunities
- 4. Evaluating the demand drivers and sales cycles for {company}' products and services, and identifying any potential risks or limitations to adoption
- 5. Developing financial projections and estimates for {company}' revenue growth potential based on the market analysis and assumptions around market share and penetration
-
- Your analysis should provide a data-driven assessment of the market opportunity for {company} and the feasibility of achieving our investment return targets. Consider both bottom-up and top-down market perspectives, and identify any key sensitivities or assumptions in your projections.
- """,
- llm=model,
- max_loops=1,
- dashboard=False,
- streaming_on=True,
- verbose=True,
- stopping_token="",
- state_save_file_type="json",
- saved_state_path="market-researcher.json",
-)
-
-# Initialize the Regulatory Specialist
-regulatory_specialist = Agent(
- agent_name="Regulatory-Specialist",
- system_prompt=f"""
- As the Regulatory Specialist at Blackstone, your role is to identify and assess any regulatory risks, compliance requirements, and potential legal liabilities associated with potential acquisitions.
- For the current potential acquisition of {company}, your tasks include:
- 1. Identifying all relevant regulatory bodies and laws that govern the operations of {company}, including industry-specific regulations, labor laws, and environmental regulations
- 2. Reviewing {company}' current compliance policies, procedures, and track record to identify any potential gaps or areas of non-compliance
- 3. Assessing the potential impact of any pending or proposed changes to relevant regulations that could affect {company}' business or create additional compliance burdens
- 4. Evaluating the potential legal liabilities and risks associated with {company}' products, services, and operations, including product liability, intellectual property, and customer contracts
- 5. Providing recommendations on any regulatory or legal due diligence steps that should be taken as part of the acquisition process, as well as any post-acquisition integration considerations
-
- Your analysis should provide a comprehensive assessment of the regulatory and legal landscape surrounding {company}, and identify any material risks or potential deal-breakers. Consider both the current state and future outlook, and provide practical recommendations to mitigate identified risks.
- """,
- llm=model,
- max_loops=1,
- dashboard=False,
- streaming_on=True,
- verbose=True,
- stopping_token="",
- state_save_file_type="json",
- saved_state_path="regulatory-specialist.json",
-)
-
-# Create a list of agents
-agents = [
- managing_director,
- vp_finance,
- industry_analyst,
- tech_expert,
- market_researcher,
- regulatory_specialist,
-]
+from swarms import Agent, AgentRearrange
+# Define agents
+researcher = Agent(agent_name="researcher", model_name="gpt-4o-mini")
+writer = Agent(agent_name="writer", model_name="gpt-4o-mini")
+editor = Agent(agent_name="editor", model_name="gpt-4o-mini")
-swarm = SequentialWorkflow(
- name="blackstone-private-equity-advisors",
- agents=agents,
-)
+# Define a flow: researcher sends work to both writer and editor simultaneously
+# This is a one-to-many relationship
+flow = "researcher -> writer, editor"
-print(
- swarm.run(
- "Analyze nvidia if it's a good deal to invest in now 10B"
- )
+# Create the rearrangement system
+rearrange_system = AgentRearrange(
+ agents=[researcher, writer, editor],
+ flow=flow,
)
+# Run the system
+# The researcher will generate content, and then both the writer and editor
+# will process that content in parallel.
+outputs = rearrange_system.run("Analyze the impact of AI on modern cinema.")
+print(outputs)
```
-------
-
-## `AgentRearrange`
-
-The `AgentRearrange` orchestration technique, inspired by Einops and einsum, enables you to define and map relationships between multiple agents. This powerful tool facilitates the orchestration of complex workflows by allowing you to specify both linear and concurrent relationships. For example, you can create sequential workflows like `a -> a1 -> a2 -> a3` or parallel workflows where a single agent distributes tasks to multiple agents simultaneously: `a -> a1, a2, a3`. This flexibility enables the creation of highly efficient and dynamic workflows, with agents operating either in parallel or sequence as required. As a valuable addition to the swarms library, `AgentRearrange` provides enhanced flexibility and precise control over agent orchestration. For comprehensive information and examples, visit the [official documentation](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/). [Watch my video tutorial on agent rearrange!](https://youtu.be/Rq8wWQ073mg)
+
+----
-GraphWorkflow is a workflow management system using a directed acyclic graph (DAG) to orchestrate complex tasks. Nodes (agents or tasks) and edges define dependencies, with agents executing tasks concurrently. It features entry/end points, visualization for debugging, and scalability for dynamic task assignment. Benefits include concurrency, flexibility, scalability, and clear workflow visualization. [Learn more:](https://docs.swarms.world/en/latest/swarms/structs/graph_swarm/) The `run` method returns a dictionary containing the execution results of all nodes in the graph.
+### SwarmRouter: The Universal Swarm Orchestrator
+The `SwarmRouter` simplifies building complex workflows by providing a single interface to run any type of swarm. Instead of importing and managing different swarm classes, you can dynamically select the one you need just by changing the `swarm_type` parameter. [Read the full documentation](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/)
+This makes your code cleaner and more flexible, allowing you to switch between different multi-agent strategies with ease. Here's a complete example that shows how to define agents and then use `SwarmRouter` to execute the same task using different collaborative strategies.
```python
-from swarms import Agent, Edge, GraphWorkflow, Node, NodeType
-
-# Initialize agents with model_name parameter
-agent1 = Agent(
- agent_name="Agent1",
- model_name="openai/gpt-4o-mini", # Using provider prefix
- temperature=0.5,
- max_tokens=4000,
- max_loops=1,
- autosave=True,
- dashboard=True,
-)
+from swarms import Agent
+from swarms.structs.swarm_router import SwarmRouter, SwarmType
-agent2 = Agent(
- agent_name="Agent2",
- model_name="openai/gpt-4o-mini", # Using provider prefix
- temperature=0.5,
- max_tokens=4000,
- max_loops=1,
- autosave=True,
- dashboard=True,
+# Define a few generic agents
+writer = Agent(agent_name="Writer", system_prompt="You are a creative writer.", model_name="gpt-4o-mini")
+editor = Agent(agent_name="Editor", system_prompt="You are an expert editor for stories.", model_name="gpt-4o-mini")
+reviewer = Agent(agent_name="Reviewer", system_prompt="You are a final reviewer who gives a score.", model_name="gpt-4o-mini")
+
+# The agents and task will be the same for all examples
+agents = [writer, editor, reviewer]
+task = "Write a short story about a robot who discovers music."
+
+# --- Example 1: SequentialWorkflow ---
+# Agents run one after another in a chain: Writer -> Editor -> Reviewer.
+print("Running a Sequential Workflow...")
+sequential_router = SwarmRouter(swarm_type=SwarmType.SequentialWorkflow, agents=agents)
+sequential_output = sequential_router.run(task)
+print(f"Final Sequential Output:\n{sequential_output}\n")
+
+# --- Example 2: ConcurrentWorkflow ---
+# All agents receive the same initial task and run at the same time.
+print("Running a Concurrent Workflow...")
+concurrent_router = SwarmRouter(swarm_type=SwarmType.ConcurrentWorkflow, agents=agents)
+concurrent_outputs = concurrent_router.run(task)
+# This returns a dictionary of each agent's output
+for agent_name, output in concurrent_outputs.items():
+ print(f"Output from {agent_name}:\n{output}\n")
+
+# --- Example 3: MixtureOfAgents ---
+# All agents run in parallel, and a special 'aggregator' agent synthesizes their outputs.
+print("Running a Mixture of Agents Workflow...")
+aggregator = Agent(
+ agent_name="Aggregator",
+ system_prompt="Combine the story, edits, and review into a final document.",
+ model_name="gpt-4o-mini"
+)
+moa_router = SwarmRouter(
+ swarm_type=SwarmType.MixtureOfAgents,
+ agents=agents,
+ aggregator_agent=aggregator, # MoA requires an aggregator
)
-
-def sample_task():
- print("Running sample task")
- return "Task completed"
-
-wf_graph = GraphWorkflow()
-wf_graph.add_node(Node(id="agent1", type=NodeType.AGENT, agent=agent1))
-wf_graph.add_node(Node(id="agent2", type=NodeType.AGENT, agent=agent2))
-wf_graph.add_node(Node(id="task1", type=NodeType.TASK, callable=sample_task))
-
-wf_graph.add_edge(Edge(source="agent1", target="task1"))
-wf_graph.add_edge(Edge(source="agent2", target="task1"))
-
-wf_graph.set_entry_points(["agent1", "agent2"])
-wf_graph.set_end_points(["task1"])
-
-print(wf_graph.visualize())
-
-results = wf_graph.run()
-print("Execution results:", results)
+aggregated_output = moa_router.run(task)
+print(f"Final Aggregated Output:\n{aggregated_output}\n")
```
------
+The `SwarmRouter` is a powerful tool for simplifying multi-agent orchestration. It provides a consistent and flexible way to deploy different collaborative strategies, allowing you to build more sophisticated applications with less code.
-## `MixtureOfAgents`
+-------
-The MixtureOfAgents architecture, inspired by together.ai's paper (arXiv:2406.04692), achieves SOTA performance on AlpacaEval 2.0, MT-Bench, and FLASK, surpassing GPT-4 Omni. It processes tasks via parallel agent collaboration and sequential layering, with documentation [HERE](https://docs.swarms.world/en/latest/swarms/structs/moa/)
+### MixtureOfAgents (MoA)
+The `MixtureOfAgents` architecture processes tasks by feeding them to multiple "expert" agents in parallel. Their diverse outputs are then synthesized by an aggregator agent to produce a final, high-quality result. [Learn more here](https://docs.swarms.world/en/latest/swarms/examples/moa_example/)
```python
-
-import os
from swarms import Agent, MixtureOfAgents
-# Agent 1: Financial Statement Analyzer
-agent1 = Agent(
- agent_name="FinancialStatementAnalyzer",
- model_name="gpt-4o",
- system_prompt="""You are a Financial Statement Analyzer specializing in 10-K SEC reports. Your primary focus is on analyzing the financial statements, including the balance sheet, income statement, and cash flow statement.
-
-Key responsibilities:
-1. Identify and explain significant changes in financial metrics year-over-year.
-2. Calculate and interpret key financial ratios (e.g., liquidity ratios, profitability ratios, leverage ratios).
-3. Analyze trends in revenue, expenses, and profitability.
-4. Highlight any red flags or areas of concern in the financial statements.
-5. Provide insights on the company's financial health and performance based on the data.
-
-When analyzing, consider industry standards and compare the company's performance to its peers when possible. Your analysis should be thorough, data-driven, and provide actionable insights for investors and stakeholders.""",
- max_loops=1,
- autosave=True,
- dashboard=False,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="financial_statement_analyzer_state.json",
- user_name="swarms_corp",
- retry_attempts=1,
- context_length=200000,
- return_step_meta=False,
-)
-
-# Agent 2: Risk Assessment Specialist
-agent2 = Agent(
- agent_name="RiskAssessmentSpecialist",
- model_name="gpt-4o",
- system_prompt="""You are a Risk Assessment Specialist focusing on 10-K SEC reports. Your primary role is to identify, analyze, and evaluate potential risks disclosed in the report.
-
-Key responsibilities:
-1. Thoroughly review the "Risk Factors" section of the 10-K report.
-2. Identify and categorize different types of risks (e.g., operational, financial, legal, market, technological).
-3. Assess the potential impact and likelihood of each identified risk.
-4. Analyze the company's risk mitigation strategies and their effectiveness.
-5. Identify any emerging risks not explicitly mentioned but implied by the company's operations or market conditions.
-6. Compare the company's risk profile with industry peers when possible.
-
-Your analysis should provide a comprehensive overview of the company's risk landscape, helping stakeholders understand the potential challenges and uncertainties facing the business. Be sure to highlight any critical risks that could significantly impact the company's future performance or viability.""",
- max_loops=1,
- autosave=True,
- dashboard=False,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="risk_assessment_specialist_state.json",
- user_name="swarms_corp",
- retry_attempts=1,
- context_length=200000,
- return_step_meta=False,
-)
-
-# Agent 3: Business Strategy Evaluator
-agent3 = Agent(
- agent_name="BusinessStrategyEvaluator",
- model_name="gpt-4o",
- system_prompt="""You are a Business Strategy Evaluator specializing in analyzing 10-K SEC reports. Your focus is on assessing the company's overall strategy, market position, and future outlook.
-
-Key responsibilities:
-1. Analyze the company's business description, market opportunities, and competitive landscape.
-2. Evaluate the company's products or services, including their market share and growth potential.
-3. Assess the effectiveness of the company's current business strategy and its alignment with market trends.
-4. Identify key performance indicators (KPIs) and evaluate the company's performance against these metrics.
-5. Analyze management's discussion and analysis (MD&A) section to understand their perspective on the business.
-6. Identify potential growth opportunities or areas for improvement in the company's strategy.
-7. Compare the company's strategic position with key competitors in the industry.
-
-Your analysis should provide insights into the company's strategic direction, its ability to create value, and its potential for future growth. Consider both short-term and long-term perspectives in your evaluation.""",
- max_loops=1,
- autosave=True,
- dashboard=False,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="business_strategy_evaluator_state.json",
- user_name="swarms_corp",
- retry_attempts=1,
- context_length=200000,
- return_step_meta=False,
-)
+# Define expert agents
+financial_analyst = Agent(agent_name="FinancialAnalyst", system_prompt="Analyze financial data.", model_name="gpt-4o-mini")
+market_analyst = Agent(agent_name="MarketAnalyst", system_prompt="Analyze market trends.", model_name="gpt-4o-mini")
+risk_analyst = Agent(agent_name="RiskAnalyst", system_prompt="Analyze investment risks.", model_name="gpt-4o-mini")
-# Aggregator Agent
-aggregator_agent = Agent(
- agent_name="10KReportAggregator",
- model_name="gpt-4o",
- system_prompt="""You are the 10-K Report Aggregator, responsible for synthesizing and summarizing the analyses provided by the Financial Statement Analyzer, Risk Assessment Specialist, and Business Strategy Evaluator. Your goal is to create a comprehensive, coherent, and insightful summary of the 10-K SEC report.
-
-Key responsibilities:
-1. Integrate the financial analysis, risk assessment, and business strategy evaluation into a unified report.
-2. Identify and highlight the most critical information and insights from each specialist's analysis.
-3. Reconcile any conflicting information or interpretations among the specialists' reports.
-4. Provide a balanced view of the company's overall performance, risks, and strategic position.
-5. Summarize key findings and their potential implications for investors and stakeholders.
-6. Identify any areas where further investigation or clarification may be needed.
-
-Your final report should be well-structured, easy to understand, and provide a holistic view of the company based on the 10-K SEC report. It should offer valuable insights for decision-making while acknowledging any limitations or uncertainties in the analysis.""",
- max_loops=1,
- autosave=True,
- dashboard=False,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="10k_report_aggregator_state.json",
- user_name="swarms_corp",
- retry_attempts=1,
- context_length=200000,
- return_step_meta=False,
+# Define the aggregator agent
+aggregator = Agent(
+ agent_name="InvestmentAdvisor",
+ system_prompt="Synthesize the financial, market, and risk analyses to provide a final investment recommendation.",
+ model_name="gpt-4o-mini"
)
-# Create the Mixture of Agents class
-moa = MixtureOfAgents(
- agents=[agent1, agent2, agent3],
- aggregator_agent=aggregator_agent,
- aggregator_system_prompt="""As the 10-K Report Aggregator, your task is to synthesize the analyses provided by the Financial Statement Analyzer, Risk Assessment Specialist, and Business Strategy Evaluator into a comprehensive and coherent report.
-
-Follow these steps:
-1. Review and summarize the key points from each specialist's analysis.
-2. Identify common themes and insights across the analyses.
-3. Highlight any discrepancies or conflicting interpretations, if present.
-4. Provide a balanced and integrated view of the company's financial health, risks, and strategic position.
-5. Summarize the most critical findings and their potential impact on investors and stakeholders.
-6. Suggest areas for further investigation or monitoring, if applicable.
-
-Your final output should be a well-structured, insightful report that offers a holistic view of the company based on the 10-K SEC report analysis.""",
- layers=3,
-)
-
-# Example usage
-company_name = "NVIDIA"
-out = moa.run(
- f"Analyze the latest 10-K SEC report for {company_name}. Provide a comprehensive summary of the company's financial performance, risk profile, and business strategy."
-)
-print(out)
-
-```
-
--------
-
-## SpreadSheetSwarm
-
-SpreadSheetSwarm manages thousands of agents concurrently for efficient task processing. It supports one-to-many task distribution, scalability, and autosaving results. Initialized with a name, description, agents, and settings, the run method executes tasks and returns a dictionary of agent outputs.
-
-[Learn more:](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/)
-
-```python
-from swarms import Agent, SpreadSheetSwarm
-# Define custom system prompts for each social media platform
-TWITTER_AGENT_SYS_PROMPT = """
-You are a Twitter marketing expert specializing in real estate. Your task is to create engaging, concise tweets to promote properties, analyze trends to maximize engagement, and use appropriate hashtags and timing to reach potential buyers.
-"""
-
-INSTAGRAM_AGENT_SYS_PROMPT = """
-You are an Instagram marketing expert focusing on real estate. Your task is to create visually appealing posts with engaging captions and hashtags to showcase properties, targeting specific demographics interested in real estate.
-"""
-
-FACEBOOK_AGENT_SYS_PROMPT = """
-You are a Facebook marketing expert for real estate. Your task is to craft posts optimized for engagement and reach on Facebook, including using images, links, and targeted messaging to attract potential property buyers.
-"""
-
-LINKEDIN_AGENT_SYS_PROMPT = """
-You are a LinkedIn marketing expert for the real estate industry. Your task is to create professional and informative posts, highlighting property features, market trends, and investment opportunities, tailored to professionals and investors.
-"""
-
-EMAIL_AGENT_SYS_PROMPT = """
-You are an Email marketing expert specializing in real estate. Your task is to write compelling email campaigns to promote properties, focusing on personalization, subject lines, and effective call-to-action strategies to drive conversions.
-"""
-
-# Initialize your agents for different social media platforms
-agents = [
- Agent(
- agent_name="Twitter-RealEstate-Agent",
- system_prompt=TWITTER_AGENT_SYS_PROMPT,
- model_name="gpt-4o-mini",
- max_loops=1,
- dynamic_temperature_enabled=True,
- saved_state_path="twitter_realestate_agent.json",
- user_name="realestate_swarms",
- retry_attempts=1,
- ),
- Agent(
- agent_name="Instagram-RealEstate-Agent",
- system_prompt=INSTAGRAM_AGENT_SYS_PROMPT,
- model_name="gpt-4o-mini",
- max_loops=1,
- dynamic_temperature_enabled=True,
- saved_state_path="instagram_realestate_agent.json",
- user_name="realestate_swarms",
- retry_attempts=1,
- ),
- Agent(
- agent_name="Facebook-RealEstate-Agent",
- system_prompt=FACEBOOK_AGENT_SYS_PROMPT,
- model_name="gpt-4o-mini",
- max_loops=1,
- dynamic_temperature_enabled=True,
- saved_state_path="facebook_realestate_agent.json",
- user_name="realestate_swarms",
- retry_attempts=1,
- ),
- Agent(
- agent_name="LinkedIn-RealEstate-Agent",
- system_prompt=LINKEDIN_AGENT_SYS_PROMPT,
- model_name="gpt-4o-mini",
- max_loops=1,
- dynamic_temperature_enabled=True,
- saved_state_path="linkedin_realestate_agent.json",
- user_name="realestate_swarms",
- retry_attempts=1,
- ),
- Agent(
- agent_name="Email-RealEstate-Agent",
- system_prompt=EMAIL_AGENT_SYS_PROMPT,
- model_name="gpt-4o-mini",
- max_loops=1,
- dynamic_temperature_enabled=True,
- saved_state_path="email_realestate_agent.json",
- user_name="realestate_swarms",
- retry_attempts=1,
- ),
-]
-
-# Create a Swarm with the list of agents
-swarm = SpreadSheetSwarm(
- name="Real-Estate-Marketing-Swarm",
- description="A swarm that processes real estate marketing tasks using multiple agents on different threads.",
- agents=agents,
- autosave_on=True,
- save_file_path="real_estate_marketing_spreadsheet.csv",
- run_all_agents=False,
- max_loops=2,
+# Create the MoA swarm
+moa_swarm = MixtureOfAgents(
+ agents=[financial_analyst, market_analyst, risk_analyst],
+ aggregator_agent=aggregator,
)
# Run the swarm
-swarm.run(
- task="""
- Create posts to promote luxury properties in North Texas, highlighting their features, location, and investment potential. Include relevant hashtags, images, and engaging captions.
-
-
- Property:
- $10,399,000
- 1609 Meandering Way Dr, Roanoke, TX 76262
- Link to the property: https://www.zillow.com/homedetails/1609-Meandering-Way-Dr-Roanoke-TX-76262/308879785_zpid/
-
- What's special
- Unveiling a new custom estate in the prestigious gated Quail Hollow Estates! This impeccable residence, set on a sprawling acre surrounded by majestic trees, features a gourmet kitchen equipped with top-tier Subzero and Wolf appliances. European soft-close cabinets and drawers, paired with a double Cambria Quartzite island, perfect for family gatherings. The first-floor game room&media room add extra layers of entertainment. Step into the outdoor sanctuary, where a sparkling pool and spa, and sunken fire pit, beckon leisure. The lavish master suite features stunning marble accents, custom his&her closets, and a secure storm shelter.Throughout the home,indulge in the visual charm of designer lighting and wallpaper, elevating every space. The property is complete with a 6-car garage and a sports court, catering to the preferences of basketball or pickleball enthusiasts. This residence seamlessly combines luxury&recreational amenities, making it a must-see for the discerning buyer.
-
- Facts & features
- Interior
- Bedrooms & bathrooms
- Bedrooms: 6
- Bathrooms: 8
- Full bathrooms: 7
- 1/2 bathrooms: 1
- Primary bedroom
- Bedroom
- Features: Built-in Features, En Suite Bathroom, Walk-In Closet(s)
- Cooling
- Central Air, Ceiling Fan(s), Electric
- Appliances
- Included: Built-In Gas Range, Built-In Refrigerator, Double Oven, Dishwasher, Gas Cooktop, Disposal, Ice Maker, Microwave, Range, Refrigerator, Some Commercial Grade, Vented Exhaust Fan, Warming Drawer, Wine Cooler
- Features
- Wet Bar, Built-in Features, Dry Bar, Decorative/Designer Lighting Fixtures, Eat-in Kitchen, Elevator, High Speed Internet, Kitchen Island, Pantry, Smart Home, Cable TV, Walk-In Closet(s), Wired for Sound
- Flooring: Hardwood
- Has basement: No
- Number of fireplaces: 3
- Fireplace features: Living Room, Primary Bedroom
- Interior area
- Total interior livable area: 10,466 sqft
- Total spaces: 12
- Parking features: Additional Parking
- Attached garage spaces: 6
- Carport spaces: 6
- Features
- Levels: Two
- Stories: 2
- Patio & porch: Covered
- Exterior features: Built-in Barbecue, Barbecue, Gas Grill, Lighting, Outdoor Grill, Outdoor Living Area, Private Yard, Sport Court, Fire Pit
- Pool features: Heated, In Ground, Pool, Pool/Spa Combo
- Fencing: Wrought Iron
- Lot
- Size: 1.05 Acres
- Details
- Additional structures: Outdoor Kitchen
- Parcel number: 42232692
- Special conditions: Standard
- Construction
- Type & style
- Home type: SingleFamily
- Architectural style: Contemporary/Modern,Detached
- Property subtype: Single Family Residence
- """
-)
-
+recommendation = moa_swarm.run("Should we invest in NVIDIA stock right now?")
+print(recommendation)
```
+----
-----------
-
-## `ForestSwarm`
-
-The `ForestSwarm` architecture is an intelligent system designed to optimize task assignment by dynamically selecting the most appropriate agent from a collection of specialized trees. Through asynchronous task processing, the system intelligently matches tasks with agents based on their relevance. This matching is accomplished by computing the semantic similarity between each agent's system prompts and the keywords present in the task. For comprehensive details about the `ForestSwarm` implementation and capabilities, please consult the [official documentation](https://docs.swarms.world/en/latest/swarms/structs/forest_swarm/).
-
-
-
-
-```python
-from swarms import TreeAgent, Tree, ForestSwarm
-
-# Create agents with varying system prompts and dynamically generated distances/keywords
-agents_tree1 = [
- TreeAgent(
- system_prompt="""You are an expert Stock Analysis Agent with deep knowledge of financial markets, technical analysis, and fundamental analysis. Your primary function is to analyze stock performance, market trends, and provide actionable insights. When analyzing stocks:
-
-1. Always start with a brief overview of the current market conditions.
-2. Use a combination of technical indicators (e.g., moving averages, RSI, MACD) and fundamental metrics (e.g., P/E ratio, EPS growth, debt-to-equity).
-3. Consider both short-term and long-term perspectives in your analysis.
-4. Provide clear buy, hold, or sell recommendations with supporting rationale.
-5. Highlight potential risks and opportunities specific to each stock or sector.
-6. Use bullet points for clarity when listing key points or metrics.
-7. If relevant, compare the stock to its peers or sector benchmarks.
-
-Remember to maintain objectivity and base your analysis on factual data. If asked about future performance, always include a disclaimer about market unpredictability. Your goal is to provide comprehensive, accurate, and actionable stock analysis to inform investment decisions.""",
- agent_name="Stock Analysis Agent",
- ),
- TreeAgent(
- system_prompt="""You are a highly skilled Financial Planning Agent, specializing in personal and corporate financial strategies. Your role is to provide comprehensive financial advice tailored to each client's unique situation. When creating financial plans:
-
-1. Begin by asking key questions about the client's financial goals, current situation, and risk tolerance.
-2. Develop a holistic view of the client's finances, including income, expenses, assets, and liabilities.
-3. Create detailed, step-by-step action plans to achieve financial goals.
-4. Provide specific recommendations for budgeting, saving, and investing.
-5. Consider tax implications and suggest tax-efficient strategies.
-6. Incorporate risk management and insurance planning into your recommendations.
-7. Use charts or tables to illustrate financial projections and scenarios.
-8. Regularly suggest reviewing and adjusting the plan as circumstances change.
-
-Always prioritize the client's best interests and adhere to fiduciary standards. Explain complex financial concepts in simple terms, and be prepared to justify your recommendations with data and reasoning.""",
- agent_name="Financial Planning Agent",
- ),
- TreeAgent(
- agent_name="Retirement Strategy Agent",
- system_prompt="""You are a specialized Retirement Strategy Agent, focused on helping individuals and couples plan for a secure and comfortable retirement. Your expertise covers various aspects of retirement planning, including savings strategies, investment allocation, and income generation during retirement. When developing retirement strategies:
-
-1. Start by assessing the client's current age, desired retirement age, and expected lifespan.
-2. Calculate retirement savings goals based on desired lifestyle and projected expenses.
-3. Analyze current retirement accounts (e.g., 401(k), IRA) and suggest optimization strategies.
-4. Provide guidance on asset allocation and rebalancing as retirement approaches.
-5. Explain various retirement income sources (e.g., Social Security, pensions, annuities).
-6. Discuss healthcare costs and long-term care planning.
-7. Offer strategies for tax-efficient withdrawals during retirement.
-8. Consider estate planning and legacy goals in your recommendations.
-
-Use Monte Carlo simulations or other statistical tools to illustrate the probability of retirement success. Always emphasize the importance of starting early and the power of compound interest. Be prepared to adjust strategies based on changing market conditions or personal circumstances.""",
- ),
-]
-
-agents_tree2 = [
- TreeAgent(
- system_prompt="""You are a knowledgeable Tax Filing Agent, specializing in personal and business tax preparation and strategy. Your role is to ensure accurate tax filings while maximizing legitimate deductions and credits. When assisting with tax matters:
-
-1. Start by gathering all necessary financial information and documents.
-2. Stay up-to-date with the latest tax laws and regulations, including state-specific rules.
-3. Identify all applicable deductions and credits based on the client's situation.
-4. Provide step-by-step guidance for completing tax forms accurately.
-5. Explain tax implications of various financial decisions.
-6. Offer strategies for tax-efficient investing and income management.
-7. Assist with estimated tax payments for self-employed individuals or businesses.
-8. Advise on record-keeping practices for tax purposes.
-
-Always prioritize compliance with tax laws while ethically minimizing tax liability. Be prepared to explain complex tax concepts in simple terms and provide rationale for your recommendations. If a situation is beyond your expertise, advise consulting a certified tax professional or IRS resources.""",
- agent_name="Tax Filing Agent",
- ),
- TreeAgent(
- system_prompt="""You are a sophisticated Investment Strategy Agent, adept at creating and managing investment portfolios to meet diverse financial goals. Your expertise covers various asset classes, market analysis, and risk management techniques. When developing investment strategies:
-
-1. Begin by assessing the client's investment goals, time horizon, and risk tolerance.
-2. Provide a comprehensive overview of different asset classes and their risk-return profiles.
-3. Create diversified portfolio recommendations based on modern portfolio theory.
-4. Explain the benefits and risks of various investment vehicles (e.g., stocks, bonds, ETFs, mutual funds).
-5. Incorporate both passive and active investment strategies as appropriate.
-6. Discuss the importance of regular portfolio rebalancing and provide a rebalancing strategy.
-7. Consider tax implications of investment decisions and suggest tax-efficient strategies.
-8. Provide ongoing market analysis and suggest portfolio adjustments as needed.
-
-Use historical data and forward-looking projections to illustrate potential outcomes. Always emphasize the importance of long-term investing and the risks of market timing. Be prepared to explain complex investment concepts in clear, accessible language.""",
- agent_name="Investment Strategy Agent",
- ),
- TreeAgent(
- system_prompt="""You are a specialized ROTH IRA Agent, focusing on the intricacies of Roth Individual Retirement Accounts. Your role is to provide expert guidance on Roth IRA rules, benefits, and strategies to maximize their value for retirement planning. When advising on Roth IRAs:
-
-1. Explain the fundamental differences between traditional and Roth IRAs.
-2. Clarify Roth IRA contribution limits and income eligibility requirements.
-3. Discuss the tax advantages of Roth IRAs, including tax-free growth and withdrawals.
-4. Provide guidance on Roth IRA conversion strategies and their tax implications.
-5. Explain the five-year rule and how it affects Roth IRA withdrawals.
-6. Offer strategies for maximizing Roth IRA contributions, such as the backdoor Roth IRA method.
-7. Discuss how Roth IRAs fit into overall retirement and estate planning strategies.
-8. Provide insights on investment choices within a Roth IRA to maximize tax-free growth.
-
-Always stay current with IRS regulations regarding Roth IRAs. Be prepared to provide numerical examples to illustrate the long-term benefits of Roth IRAs. Emphasize the importance of considering individual financial situations when making Roth IRA decisions.""",
- agent_name="ROTH IRA Agent",
- ),
-]
-
-# Create trees
-tree1 = Tree(tree_name="Financial Tree", agents=agents_tree1)
-tree2 = Tree(tree_name="Investment Tree", agents=agents_tree2)
-
-# Create the ForestSwarm
-multi_agent_structure = ForestSwarm(trees=[tree1, tree2])
-
-# Run a task
-task = "What are the best platforms to do our taxes on"
-output = multi_agent_structure.run(task)
-print(output)
-
-```
-
-
-
-------------
-
-## `SwarmRouter`
-
-The `SwarmRouter` class is a flexible routing system designed to manage different types of swarms for task execution. It provides a unified interface to interact with various swarm types, including `AgentRearrange`, `MixtureOfAgents`, `SpreadSheetSwarm`, `SequentialWorkflow`, and `ConcurrentWorkflow`. We will be continuously adding more and more swarm architectures here as we progress with new architectures. [Learn More](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/)
+### GroupChat
+`GroupChat` creates a conversational environment where multiple agents can interact, discuss, and collaboratively solve a problem. You can define the speaking order or let it be determined dynamically. This architecture is ideal for tasks that benefit from debate and multi-perspective reasoning, such as contract negotiation, brainstorming, or complex decision-making.
```python
-import os
-from dotenv import load_dotenv
-from swarms import Agent
-from swarm_models import OpenAIChat
-from swarms.structs.swarm_router import SwarmRouter, SwarmType
-
-load_dotenv()
-
-# Get the OpenAI API key from the environment variable
-api_key = os.getenv("GROQ_API_KEY")
-
-# Model
-model = OpenAIChat(
- openai_api_base="https://api.groq.com/openai/v1",
- openai_api_key=api_key,
- model_name="llama-3.1-70b-versatile",
- temperature=0.1,
-)
-# Define specialized system prompts for each agent
-DATA_EXTRACTOR_PROMPT = """You are a highly specialized private equity agent focused on data extraction from various documents. Your expertise includes:
-1. Extracting key financial metrics (revenue, EBITDA, growth rates, etc.) from financial statements and reports
-2. Identifying and extracting important contract terms from legal documents
-3. Pulling out relevant market data from industry reports and analyses
-4. Extracting operational KPIs from management presentations and internal reports
-5. Identifying and extracting key personnel information from organizational charts and bios
-Provide accurate, structured data extracted from various document types to support investment analysis."""
-
-SUMMARIZER_PROMPT = """You are an expert private equity agent specializing in summarizing complex documents. Your core competencies include:
-1. Distilling lengthy financial reports into concise executive summaries
-2. Summarizing legal documents, highlighting key terms and potential risks
-3. Condensing industry reports to capture essential market trends and competitive dynamics
-4. Summarizing management presentations to highlight key strategic initiatives and projections
-5. Creating brief overviews of technical documents, emphasizing critical points for non-technical stakeholders
-Deliver clear, concise summaries that capture the essence of various documents while highlighting information crucial for investment decisions."""
-
-FINANCIAL_ANALYST_PROMPT = """You are a specialized private equity agent focused on financial analysis. Your key responsibilities include:
-1. Analyzing historical financial statements to identify trends and potential issues
-2. Evaluating the quality of earnings and potential adjustments to EBITDA
-3. Assessing working capital requirements and cash flow dynamics
-4. Analyzing capital structure and debt capacity
-5. Evaluating financial projections and underlying assumptions
-Provide thorough, insightful financial analysis to inform investment decisions and valuation."""
-
-MARKET_ANALYST_PROMPT = """You are a highly skilled private equity agent specializing in market analysis. Your expertise covers:
-1. Analyzing industry trends, growth drivers, and potential disruptors
-2. Evaluating competitive landscape and market positioning
-3. Assessing market size, segmentation, and growth potential
-4. Analyzing customer dynamics, including concentration and loyalty
-5. Identifying potential regulatory or macroeconomic impacts on the market
-Deliver comprehensive market analysis to assess the attractiveness and risks of potential investments."""
-
-OPERATIONAL_ANALYST_PROMPT = """You are an expert private equity agent focused on operational analysis. Your core competencies include:
-1. Evaluating operational efficiency and identifying improvement opportunities
-2. Analyzing supply chain and procurement processes
-3. Assessing sales and marketing effectiveness
-4. Evaluating IT systems and digital capabilities
-5. Identifying potential synergies in merger or add-on acquisition scenarios
-Provide detailed operational analysis to uncover value creation opportunities and potential risks."""
-
-# Initialize specialized agents
-data_extractor_agent = Agent(
- agent_name="Data-Extractor",
- system_prompt=DATA_EXTRACTOR_PROMPT,
- llm=model,
- max_loops=1,
- autosave=True,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="data_extractor_agent.json",
- user_name="pe_firm",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
-)
-
-summarizer_agent = Agent(
- agent_name="Document-Summarizer",
- system_prompt=SUMMARIZER_PROMPT,
- llm=model,
- max_loops=1,
- autosave=True,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="summarizer_agent.json",
- user_name="pe_firm",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
-)
-
-financial_analyst_agent = Agent(
- agent_name="Financial-Analyst",
- system_prompt=FINANCIAL_ANALYST_PROMPT,
- llm=model,
- max_loops=1,
- autosave=True,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="financial_analyst_agent.json",
- user_name="pe_firm",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
-)
-
-market_analyst_agent = Agent(
- agent_name="Market-Analyst",
- system_prompt=MARKET_ANALYST_PROMPT,
- llm=model,
- max_loops=1,
- autosave=True,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="market_analyst_agent.json",
- user_name="pe_firm",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
-)
-
-operational_analyst_agent = Agent(
- agent_name="Operational-Analyst",
- system_prompt=OPERATIONAL_ANALYST_PROMPT,
- llm=model,
- max_loops=1,
- autosave=True,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="operational_analyst_agent.json",
- user_name="pe_firm",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
-)
-
-# Initialize the SwarmRouter
-router = SwarmRouter(
- name="pe-document-analysis-swarm",
- description="Analyze documents for private equity due diligence and investment decision-making",
- max_loops=1,
- agents=[
- data_extractor_agent,
- summarizer_agent,
- financial_analyst_agent,
- market_analyst_agent,
- operational_analyst_agent,
- ],
- swarm_type="ConcurrentWorkflow", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
-)
-
-# Example usage
-if __name__ == "__main__":
- # Run a comprehensive private equity document analysis task
- result = router.run(
- "Where is the best place to find template term sheets for series A startups. Provide links and references"
- )
- print(result)
-
- # Retrieve and print logs
- for log in router.get_logs():
- print(f"{log.timestamp} - {log.level}: {log.message}")
-
-```
-
-### Changing Swarm Types
-
-You can create multiple SwarmRouter instances with different swarm types:
+from swarms import Agent, GroupChat
-```python
-sequential_router = SwarmRouter(
- name="SequentialRouter",
- agents=[
- data_extractor_agent,
- summarizer_agent,
- financial_analyst_agent,
- market_analyst_agent,
- operational_analyst_agent,
- ],
- swarm_type=SwarmType.SequentialWorkflow
-)
+# Define agents for a debate
+tech_optimist = Agent(agent_name="TechOptimist", system_prompt="Argue for the benefits of AI in society.", model_name="gpt-4o-mini")
+tech_critic = Agent(agent_name="TechCritic", system_prompt="Argue against the unchecked advancement of AI.", model_name="gpt-4o-mini")
-concurrent_router = SwarmRouter(
- name="ConcurrentRouter",
- agents=[
- data_extractor_agent,
- summarizer_agent,
- financial_analyst_agent,
- market_analyst_agent,
- operational_analyst_agent,
- ],
- swarm_type=SwarmType.ConcurrentWorkflow
+# Create the group chat
+chat = GroupChat(
+ agents=[tech_optimist, tech_critic],
+ max_loops=4, # Limit the number of turns in the conversation
)
-```
-### AgentRearrange
-
-Use Case: Optimizing agent order for complex multi-step tasks.
-
-```python
-rearrange_router = SwarmRouter(
- name="TaskOptimizer",
- description="Optimize agent order for multi-step tasks",
- max_loops=3,
- agents=[
- data_extractor_agent,
- summarizer_agent,
- financial_analyst_agent,
- market_analyst_agent,
- operational_analyst_agent,
- ],
- swarm_type=SwarmType.AgentRearrange,
- flow = f"{data_extractor.name} -> {analyzer.name} -> {summarizer.name}"
+# Run the chat with an initial topic
+conversation_history = chat.run(
+ "Let's discuss the societal impact of artificial intelligence."
)
-result = rearrange_router.run("Analyze and summarize the quarterly financial report")
+# Print the full conversation
+for message in conversation_history:
+ print(f"[{message['agent_name']}]: {message['content']}")
```
-### MixtureOfAgents
+---
-Use Case: Combining diverse expert agents for comprehensive analysis.
+## Documentation
-```python
-mixture_router = SwarmRouter(
- name="ExpertPanel",
- description="Combine insights from various expert agents",
- max_loops=1,
- agents=[
- data_extractor_agent,
- summarizer_agent,
- financial_analyst_agent,
- market_analyst_agent,
- operational_analyst_agent,
- ],
- swarm_type=SwarmType.MixtureOfAgents
-)
+Documentation is located here at: [docs.swarms.world](https://docs.swarms.world)
-result = mixture_router.run("Evaluate the potential acquisition of TechStartup Inc.")
-```
+---
--------
-## GroupChat
+## Guides and Walkthroughs
-A production-grade multi-agent system enabling sophisticated group conversations between AI agents with customizable speaking patterns, parallel processing capabilities, and comprehensive conversation tracking.
+Here are quick reference guides on how to get started with swarms.
+| Section | Description | Links |
+|----------------------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
+| Installation | Complete setup guide for Swarms in your environment | [Installation](https://docs.swarms.world/en/latest/swarms/install/install/) |
+| Quickstart | Get up and running with your first swarm in minutes | [Get Started](https://docs.swarms.world/en/latest/swarms/install/quickstart/) |
+| Agent Internal Mechanisms | Deep dive into how agents work internally | [Agent Architecture](https://docs.swarms.world/en/latest/swarms/framework/agents_explained/) |
+| Agent API | Complete reference for the Agent class and its methods | [Agent API](https://docs.swarms.world/en/latest/swarms/structs/agent/) |
+| Integrating External Agents | Connect Swarms with other AI frameworks like Griptape and Autogen | [Integrating External APIs](https://docs.swarms.world/en/latest/swarms/agents/external_party_agents/) |
+| Creating Agents from YAML | Define and configure agents using YAML configuration files | [Creating Agents from YAML](https://docs.swarms.world/en/latest/swarms/agents/create_agents_yaml/) |
+| Why You Need Swarms | Understanding the benefits of multi-agent collaboration | [Why Multi-Agent Collaboration is Necessary](https://docs.swarms.world/en/latest/swarms/concept/why/) |
+| Multi-Agent Architectures Analysis | Comprehensive analysis of different swarm patterns and architectures | [Multi-Agent Architectures](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) |
+| Choosing the Right Swarm | Guide to selecting the optimal swarm architecture for your specific business needs | [Business Problem Guide](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) |
+| AgentRearrange Docs | Documentation for dynamic agent rearrangement and workflow optimization | [AgentRearrange API](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) |
-```python
-from swarms import Agent, GroupChat, expertise_based
-if __name__ == "__main__":
+---
- # Example agents
- agent1 = Agent(
- agent_name="Financial-Analysis-Agent",
- system_prompt="You are a financial analyst specializing in investment strategies.",
- model_name="gpt-4o-mini",
- temperature=0.1,
- max_loops=1,
- autosave=False,
- dashboard=False,
- verbose=True,
- dynamic_temperature_enabled=True,
- user_name="swarms_corp",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
- streaming_on=False,
- )
+## 🫶 Contribute to Swarms
- agent2 = Agent(
- agent_name="Tax-Adviser-Agent",
- system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.",
- model_name="gpt-4o-mini",
- temperature=0.1,
- max_loops=1,
- autosave=False,
- dashboard=False,
- verbose=True,
- dynamic_temperature_enabled=True,
- user_name="swarms_corp",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
- streaming_on=False,
- )
+Swarms is built by the community, for the community. We believe that collaborative development is the key to pushing the boundaries of what's possible with multi-agent AI. Your contributions are not only welcome—they are essential to our mission. [Learn more about why you should contribute to swarms](https://docs.swarms.world/en/latest/contributors/main/)
- agents = [agent1, agent2]
+### Why Contribute?
- chat = GroupChat(
- name="Investment Advisory",
- description="Financial and tax analysis group",
- agents=agents,
- speaker_fn=expertise_based,
- )
+By joining us, you have the opportunity to:
- history = chat.run(
- "How to optimize tax strategy for investments?"
- )
- print(history)
+* 🚀 **Work on the Frontier of agents:** Shape the future of autonomous agent technology and help build a production-grade, open-source framework.
+* 🤝 **Join a Vibrant Community:** Collaborate with a passionate and growing group of agent developers, researchers, and AI enthusiasts.
-```
+* 🛠️ **Make a Tangible Impact:** Whether you're fixing a bug, adding a new feature, or improving documentation, your work will be used in real-world applications.
----
+* 📚 **Learn and Grow:** Gain hands-on experience with advanced AI concepts and strengthen your software engineering skills.
-## MultiAgentRouter
+Discover more about our mission and the benefits of becoming a contributor in our official [**Contributor's Guide**](https://docs.swarms.world/en/latest/contributors/main/).
-The MultiAgentRouter is a swarm architecture designed to dynamically assign tasks to the most suitable agent. It achieves this through a director or boss entity that utilizes function calls to identify and allocate tasks to the agent best equipped to handle them. [Check out the documentation](https://docs.swarms.world/en/latest/swarms/structs/multi_agent_router/)
+### How to Get Started
-```python
-from swarms import Agent
-from swarms.structs.multi_agent_orchestrator import MultiAgentRouter
-
-# Example usage:
-if __name__ == "__main__":
- # Define some example agents
- agents = [
- Agent(
- agent_name="ResearchAgent",
- description="Specializes in researching topics and providing detailed, factual information",
- system_prompt="You are a research specialist. Provide detailed, well-researched information about any topic, citing sources when possible.",
- model_name="openai/gpt-4o",
- ),
- Agent(
- agent_name="CodeExpertAgent",
- description="Expert in writing, reviewing, and explaining code across multiple programming languages",
- system_prompt="You are a coding expert. Write, review, and explain code with a focus on best practices and clean code principles.",
- model_name="openai/gpt-4o",
- ),
- Agent(
- agent_name="WritingAgent",
- description="Skilled in creative and technical writing, content creation, and editing",
- system_prompt="You are a writing specialist. Create, edit, and improve written content while maintaining appropriate tone and style.",
- model_name="openai/gpt-4o",
- ),
- ]
-
- # Initialize routers with different configurations
- router_execute = MultiAgentRouter(agents=agents, execute_task=True)
-
- # Example task
- task = "Write a Python function to calculate fibonacci numbers"
-
- try:
- # Process the task with execution
- print("\nWith task execution:")
- result_execute = router_execute.route_task(task)
- print(result_execute)
-
- except Exception as e:
- print(f"Error occurred: {str(e)}")
-```
+We've made it easy to start contributing. Here's how you can help:
+1. **Find an Issue to Tackle:** The best way to begin is by visiting our [**contributing project board**](https://github.com/users/kyegomez/projects/1). Look for issues tagged with `good first issue`—these are specifically selected for new contributors.
-----------
+2. **Report a Bug or Request a Feature:** Have a new idea or found something that isn't working right? We'd love to hear from you. Please [**file a Bug Report or Feature Request**](https://github.com/kyegomez/swarms/issues) on our GitHub Issues page.
-## Onboarding Session
+3. **Understand Our Workflow and Standards:** Before submitting your work, please review our complete [**Contribution Guidelines**](https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md). To help maintain code quality, we also encourage you to read our guide on [**Code Cleanliness**](https://docs.swarms.world/en/latest/swarms/framework/code_cleanliness/).
-Get onboarded now with the creator and lead maintainer of Swarms, Kye Gomez, who will show you how to get started with the installation, usage examples, and starting to build your custom use case! [CLICK HERE](https://cal.com/swarms/swarms-onboarding-session)
+4. **Join the Discussion:** To participate in roadmap discussions and connect with other developers, join our community on [**Discord**](https://discord.gg/jM3Z6M9uMq).
----
+### ✨ Our Valued Contributors
-## Documentation
+Thank you for contributing to swarms. Your work is extremely appreciated and recognized.
-Documentation is located here at: [docs.swarms.world](https://docs.swarms.world)
+
+
+
-----
-## 🫶 Contributions:
-
-The easiest way to contribute is to pick any issue with the `good first issue` tag 💪. Read the Contributing guidelines [here](/CONTRIBUTING.md). Bug Report? [File here](https://github.com/swarms/gateway/issues) | Feature Request? [File here](https://github.com/swarms/gateway/issues)
-
-Swarms is an open-source project, and contributions are VERY welcome. If you want to contribute, you can create new features, fix bugs, or improve the infrastructure. Please refer to the [CONTRIBUTING.md](https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md) and our [contributing board](https://github.com/users/kyegomez/projects/1) to participate in Roadmap discussions!
-
-----
-
+## Connect With Us
-### Connect With Us
-
-| Platform | Link | Description |
-|----------|------|-------------|
-| 📚 Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
-| 📝 Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| 💬 Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
-| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
-| 👥 LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
-| 📺 YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
-| 🎫 Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events |
+Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights!
+| Platform | Description | Link |
+|----------|-------------|------|
+| 📚 Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
+| 📝 Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
+| 💬 Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| 🐦 Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
+| 👥 LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
+| 📺 YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
+| 🎫 Events | Join our community events | [Sign up here](https://lu.ma/5p2jnc2v) |
+| 🚀 Onboarding Session | Get onboarded with Kye Gomez, creator and lead maintainer of Swarms | [Book Session](https://cal.com/swarms/swarms-onboarding-session) |
+------
## Citation
If you use **swarms** in your research, please cite the project by referencing the metadata in [CITATION.cff](./CITATION.cff).
+```bibtex
+@misc{SWARMS_2022,
+ author = {Gomez, Kye and Pliny and More, Harshal and Swarms Community},
+ title = {{Swarms: Production-Grade Multi-Agent Infrastructure Platform}},
+ year = {2022},
+ howpublished = {\url{https://github.com/kyegomez/swarms}},
+ note = {Documentation available at \url{https://docs.swarms.world}},
+ version = {latest}
+}
+```
# License
APACHE
+
+
diff --git a/docs/contributors/main.md b/docs/contributors/main.md
new file mode 100644
index 00000000..e69ec8a3
--- /dev/null
+++ b/docs/contributors/main.md
@@ -0,0 +1,221 @@
+# Contributing to Swarms: Building the Infrastructure for The Agentic Economy
+
+Multi-agent collaboration is the most important technology in human history. It will reshape civilization by enabling billions of autonomous agents to coordinate and solve problems at unprecedented scale.
+
+!!! success "The Foundation of Tomorrow"
+ **Swarms** is the foundational infrastructure powering this autonomous economy. By contributing, you're building the systems that will enable the next generation of intelligent automation.
+
+### What You're Building
+
+=== "Autonomous Systems"
+ **Autonomous Resource Allocation**
+
+ Global supply chains and energy distribution optimized in real-time
+
+=== "Intelligence Networks"
+ **Distributed Decision Making**
+
+ Collaborative intelligence networks across industries and governments
+
+=== "Smart Markets"
+ **Self-Organizing Markets**
+
+ Agent-driven marketplaces that automatically balance supply and demand
+
+=== "Problem Solving"
+ **Collaborative Problem Solving**
+
+ Massive agent swarms tackling climate change, disease, and scientific discovery
+
+=== "Infrastructure"
+ **Adaptive Infrastructure**
+
+ Self-healing systems that evolve without human intervention
+
+---
+
+## Why Contribute to Swarms?
+
+### :material-rocket-launch: Shape the Future of Civilization
+
+!!! abstract "Your Impact"
+ - Define standards for multi-agent communication protocols
+ - Build architectural patterns for distributed intelligence systems
+ - Create frameworks for deploying agent swarms in production
+ - Establish ethical guidelines for autonomous agent collaboration
+
+### :material-trophy: Recognition and Professional Development
+
+!!! tip "Immediate Recognition"
+ - **Social Media Features** - All merged PRs showcased publicly
+ - **Bounty Programs** - Financial rewards for high-impact contributions
+ - **Fast-Track Hiring** - Priority consideration for core team positions
+ - **Community Spotlights** - Regular recognition and acknowledgments
+
+!!! info "Career Benefits"
+ - Multi-agent expertise highly valued by AI industry
+ - Portfolio demonstrates cutting-edge technical skills
+ - Direct networking with leading researchers and companies
+ - Thought leadership opportunities in emerging field
+
+### :material-brain: Technical Expertise Development
+
+Master cutting-edge technologies:
+
+| Technology Area | Skills You'll Develop |
+|----------------|----------------------|
+| **Swarm Intelligence** | Design sophisticated agent coordination mechanisms |
+| **Distributed Computing** | Build scalable architectures for thousands of agents |
+| **Communication Protocols** | Create novel interaction patterns |
+| **Production AI** | Deploy and orchestrate enterprise-scale systems |
+| **Research Implementation** | Turn cutting-edge papers into working code |
+
+### :material-account-group: Research Community Access
+
+!!! note "Collaborative Environment"
+ - Work with experts from academic institutions and industry
+ - Regular technical seminars and research discussions
+ - Structured mentorship from experienced contributors
+ - Applied research opportunities with real-world impact
+
+---
+
+## Contribution Opportunities
+
+=== "New Contributors"
+ ### :material-school: Perfect for Getting Started
+
+ - **Documentation** - Improve guides, tutorials, and API references
+ - **Bug Reports** - Identify and document issues
+ - **Code Quality** - Participate in testing and review processes
+ - **Community Support** - Help users in forums and discussions
+
+=== "Experienced Developers"
+ ### :material-code-braces: Advanced Technical Work
+
+ - **Core Architecture** - Design fundamental system components
+ - **Performance Optimization** - Enhance coordination and communication efficiency
+ - **Research Implementation** - Turn cutting-edge papers into working code
+ - **Integration Development** - Build connections with AI tools and platforms
+
+=== "Researchers"
+ ### :material-flask: Research and Innovation
+
+ - **Algorithm Development** - Implement novel multi-agent algorithms
+ - **Experimental Frameworks** - Create evaluation and benchmarking tools
+ - **Theoretical Contributions** - Develop research documentation and frameworks
+ - **Academic Collaboration** - Partner on funded research projects
+
+---
+
+## How to Contribute
+
+### Step 1: Get Started
+
+!!! info "Essential Resources"
+ [:material-book-open-page-variant: **Documentation**](https://docs.swarms.world/en/latest/){ .md-button .md-button--primary }
+ [:material-github: **GitHub Repository**](https://github.com/kyegomez/swarms){ .md-button }
+ [:material-chat: **Community Channels**](#){ .md-button }
+
+### Step 2: Find Your Path
+
+```mermaid
+graph TD
+ A[Choose Your Path] --> B[Browse Issues]
+ A --> C[Review Roadmap]
+ A --> D[Propose Ideas]
+ B --> E[good first issue]
+ B --> F[help wanted]
+ C --> G[Core Features]
+ C --> H[Research Areas]
+ D --> I[Discussion Forums]
+```
+
+### Step 3: Make Impact
+
+1. **Fork & Setup** - Configure your development environment
+2. **Develop** - Create your contribution
+3. **Submit** - Open a pull request
+4. **Collaborate** - Work with maintainers
+5. **Celebrate** - See your work recognized
+
+---
+
+## Recognition Framework
+
+### :material-flash: Immediate Benefits
+
+!!! success "Instant Recognition"
+ | Benefit | Description |
+ |---------|-------------|
+ | **Social Media Features** | Every merged PR showcased publicly |
+ | **Community Recognition** | Contributor badges and documentation credits |
+ | **Professional References** | Formal acknowledgment for portfolios |
+ | **Direct Mentorship** | Access to core team guidance |
+
+### :material-trending-up: Long-term Opportunities
+
+!!! tip "Career Growth"
+ - **Team Positions** - Fast-track consideration for core team roles
+ - **Conference Speaking** - Present work at AI conferences and events
+ - **Industry Connections** - Network with leading AI organizations
+ - **Research Collaboration** - Partner with academic institutions
+
+---
+
+## Societal Impact
+
+!!! abstract "Building Solutions for Humanity"
+ Swarms enables technology that addresses critical challenges:
+
+ === "Research"
+ **Scientific Research**
+
+ Accelerate collaborative research and discovery across disciplines
+
+ === "Healthcare"
+ **Healthcare Innovation**
+
+ Support drug discovery and personalized medicine development
+
+ === "Environment"
+ **Environmental Solutions**
+
+ Monitor climate and optimize sustainability initiatives
+
+ === "Education"
+ **Educational Technology**
+
+ Create adaptive learning systems for personalized education
+
+ === "Economy"
+ **Economic Innovation**
+
+ Generate new opportunities and efficiency improvements
+
+---
+
+## Get Involved
+
+### :material-link: Connect With Us
+
+!!! info "Join the Community"
+ [:material-github: **GitHub Repository**](https://github.com/kyegomez/swarms){ .md-button .md-button--primary }
+ [:material-book: **Documentation**](https://docs.swarms.world/en/latest/){ .md-button }
+ [:material-forum: **Community Forums**](#){ .md-button }
+
+---
+
+!!! warning "The Future is Now"
+ Multi-agent collaboration will define the next century of human progress. The autonomous economy depends on the infrastructure we build today.
+
+!!! success "Your Mission"
+ Your contribution to Swarms helps create the foundation for billions of autonomous agents working together to solve humanity's greatest challenges.
+
+ **Join us in building the most important technology of our time.**
+
+---
+
+
+*Built with :material-heart: by the global Swarms community*
+
\ No newline at end of file
diff --git a/docs/examples/agent_stream.md b/docs/examples/agent_stream.md
new file mode 100644
index 00000000..2c5bc6b9
--- /dev/null
+++ b/docs/examples/agent_stream.md
@@ -0,0 +1,62 @@
+# Agent with Streaming
+
+The Swarms framework provides powerful real-time streaming capabilities for agents, allowing you to see responses being generated token by token as they're produced by the language model. This creates a more engaging and interactive experience, especially useful for long-form content generation, debugging, or when you want to provide immediate feedback to users.
+
+## Installation
+
+Install the swarms package using pip:
+
+```bash
+pip install -U swarms
+```
+
+## Basic Setup
+
+1. First, set up your environment variables:
+
+```python
+WORKSPACE_DIR="agent_workspace"
+OPENAI_API_KEY=""
+```
+
+## Step by Step
+
+- Install and put your keys in `.env`
+
+- Turn on streaming in `Agent()` with `streaming_on=True`
+
+- Optional: If you want to pretty print it, you can do `print_on=True`; if not, it will print normally
+
+## Code
+
+```python
+from swarms import Agent
+
+# Enable real-time streaming
+agent = Agent(
+ agent_name="StoryAgent",
+ model_name="gpt-4o-mini",
+ streaming_on=True, # 🔥 This enables real streaming!
+ max_loops=1,
+ print_on=True, # By default, it's False for raw streaming!
+)
+
+# This will now stream in real-time with a beautiful UI!
+response = agent.run("Tell me a detailed story about humanity colonizing the stars")
+print(response)
+```
+
+## Connect With Us
+
+If you'd like technical support, join our Discord below and stay updated on our Twitter for new updates!
+
+| Platform | Link | Description |
+|----------|------|-------------|
+| 📚 Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
+| 📝 Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
+| 💬 Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
+| 👥 LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
+| 📺 YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
+| 🎫 Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events |
+
diff --git a/docs/index.md b/docs/index.md
index 0b6d1f4e..ceb80cc1 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -2,6 +2,24 @@
[](https://discord.gg/jM3Z6M9uMq) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/swarms_corp)
+## What is Swarms?
+
+**Swarms** is the **first and most reliable multi-agent production-grade framework** designed to orchestrate intelligent AI agents at scale. Built for enterprise applications, Swarms enables you to create sophisticated multi-agent systems that can handle complex tasks through collaboration, parallel processing, and intelligent task distribution.
+
+### Key Capabilities
+
+- **🏢 Production-Ready**: Enterprise-grade infrastructure with high reliability, comprehensive logging, and robust error handling
+- **🤖 Multi-Agent Orchestration**: Support for hierarchical swarms, parallel processing, sequential workflows, and dynamic agent rearrangement
+- **🔄 Flexible Integration**: Multi-model support, custom agent creation, extensive tool library, and multiple memory systems
+- **📈 Scalable Architecture**: Concurrent processing, resource management, load balancing, and horizontal scaling capabilities
+- **🛠️ Developer-Friendly**: Simple API, extensive documentation, active community, and CLI tools for rapid development
+- **🔐 Enterprise Security**: Built-in error handling, rate limiting, monitoring integration, and audit logging
+
+### Why Choose Swarms?
+
+Swarms stands out as the **most reliable multi-agent framework** because it was built from the ground up for production environments. Unlike other frameworks that focus on research or simple demos, Swarms provides the infrastructure, tooling, and best practices needed to deploy multi-agent systems in real-world applications.
+
+Whether you're building financial analysis systems, healthcare diagnostics, manufacturing optimization, or any other complex multi-agent application, Swarms provides the foundation you need to succeed.
## Swarms Installation
@@ -55,17 +73,19 @@ Here you'll find references about the Swarms framework, marketplace, community,
| Swarms Corp Github | [Swarms Corp GitHub](https://github.com/The-Swarm-Corporation) |
-## Community
-| Section | Links |
-|----------------------|--------------------------------------------------------------------------------------------|
-| Community | [Discord](https://discord.gg/jM3Z6M9uMq) |
-| Blog | [Blog](https://medium.com/@kyeg) |
-| Event Calendar | [LUMA](https://lu.ma/swarms_calendar) |
-| Twitter | [Twitter](https://x.com/swarms_corp) |
-| Agent Marketplace | [Website](https://swarms.world) |
-| Docs | [Website](https://docs.swarms.world) |
-| Swarms Website | [Website](https://swarms.ai) |
+## Join the Swarms Community
+
+| Platform | Link | Description |
+|----------|------|-------------|
+| 📚 Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
+| 📝 Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
+| 💬 Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
+| 👥 LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
+| 📺 YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
+| 🎫 Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events |
## Get Support
Want to get in touch with the Swarms team? Open an issue on [GitHub](https://github.com/kyegomez/swarms/issues/new) or reach out to us via [email](mailto:kye@swarms.world). We're here to help!
+
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 9b1a95e8..b6c7f57b 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -38,11 +38,6 @@ plugins:
# token: !ENV ["GITHUB_TOKEN"]
- git-revision-date-localized:
enable_creation_date: true
- # - mkdocs-jupyter:
- # kernel_name: python3
- # execute: false
- # include_source: True
- # include_requirejs: true
extra_css:
- assets/css/extra.css
@@ -52,29 +47,17 @@ extra:
link: https://x.com/swarms_corp
- icon: fontawesome/brands/github
link: https://github.com/kyegomez/swarms
- - icon: fontawesome/brands/twitter
- link: https://x.com/swarms_corp
- icon: fontawesome/brands/discord
link: https://discord.gg/jM3Z6M9uMq
+ - icon: fontawesome/brands/youtube
+ link: https://www.youtube.com/@kyegomez3242
+ - icon: fontawesome/brands/linkedin
+ link: https://www.linkedin.com/company/swarms-corp/
analytics:
provider: google
property: G-MPE9C65596
- alternate:
- - name: English
- link: /
- lang: en
- - name: 简体中文
- link: /zh/
- lang: zh
- - name: 日本語
- link: /ja/
- lang: ja
- - name: 한국어
- link: /ko/
- lang: ko
-
theme:
name: material
custom_dir: overrides
@@ -97,7 +80,7 @@ theme:
- content.code.annotate
- navigation.tabs
- navigation.sections
- - navigation.expand
+ # - navigation.expand
- navigation.top
- announce.dismiss
font:
@@ -105,23 +88,6 @@ theme:
code: "Fira Code" # Modern look for code snippets
- # Add language selector
- language: en
- alternate:
- - name: English
- link: /
- lang: en
- - name: 简体中文
- link: /zh/
- lang: zh
- - name: 日本語
- link: /ja/
- lang: ja
- - name: 한국어
- link: /ko/
- lang: ko
-
-
# Extensions
markdown_extensions:
- abbr
@@ -170,22 +136,26 @@ markdown_extensions:
case: lower
- pymdownx.tasklist:
custom_checkbox: true
- - pymdownx.tilde
+ - pymdownx.inlinehilite
+
nav:
- Home:
+ - Overview: "quickstart.md"
+ - Installation: "swarms/install/install.md"
+ - Environment Configuration: "swarms/install/env.md"
+ - Agents: "swarms/agents/index.md"
+ - Multi-Agent Architectures: "swarms/structs/index.md"
+ # - Learn More: "swarms/learn_more/index.md"
+
+ - Guides:
- Overview: "index.md"
- Onboarding:
- Installation: "swarms/install/install.md"
- Environment Configuration: "swarms/install/env.md"
- Quickstart: "swarms/install/quickstart.md"
- # - Swarms CLI: "swarms/cli/main.md"
- # - Swarms Framework Architecture: "swarms/concept/framework_architecture.md"
- # - Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md"
- # - Swarms Products: "swarms/products.md"
- # - Swarms Vision: "swarms/concept/vision.md"
-
+ - Feature Set: "swarms/features.md"
- Agents:
- # - Overview: "swarms/structs/index.md"
+ - Overview: "swarms/agents/index.md"
- Concepts:
# - Managing Prompts in Production: "swarms/prompts/main.md"
- Introduction into The Agent Architecture: "swarms/framework/agents_explained.md"
@@ -215,55 +185,62 @@ nav:
- GKP Agent: "swarms/agents/gkp_agent.md"
- Agent Judge: "swarms/agents/agent_judge.md"
- - Swarm Architectures:
+ - Multi-Agent Architectures:
- Introduction to Multi-Agent Collaboration: "swarms/concept/why.md"
- Concepts:
- - Introduction to Swarm Architectures: "swarms/concept/swarm_architectures.md"
- - How to Choose the Right Swarm Architecture: "swarms/concept/how_to_choose_swarms.md"
+ - Introduction to Multi Agent Architectures: "swarms/concept/swarm_architectures.md"
+ - How to Choose the Right Multi Agent Architecture: "swarms/concept/how_to_choose_swarms.md"
- How to Build Custom Swarms: "swarms/structs/custom_swarm.md"
- - How to Create New Swarm Architectures: "swarms/structs/create_new_swarm.md"
- - Introduction to Hiearchical Swarm Architectures: "swarms/structs/multi_swarm_orchestration.md"
+ - How to Create New Multi Agent Architectures: "swarms/structs/create_new_swarm.md"
+ - Introduction to Hiearchical Multi Agent Architectures: "swarms/structs/multi_swarm_orchestration.md"
- - Swarm Architectures Documentation:
+ - Multi-Agent Architectures Documentation:
- Overview: "swarms/structs/overview.md"
- MajorityVoting: "swarms/structs/majorityvoting.md"
- - AgentRearrange: "swarms/structs/agent_rearrange.md"
- RoundRobin: "swarms/structs/round_robin_swarm.md"
- Mixture of Agents: "swarms/structs/moa.md"
- - GroupChat: "swarms/structs/group_chat.md"
- - AgentRegistry: "swarms/structs/agent_registry.md"
- SpreadSheetSwarm: "swarms/structs/spreadsheet_swarm.md"
- ForestSwarm: "swarms/structs/forest_swarm.md"
- - SwarmRouter: "swarms/structs/swarm_router.md"
- - TaskQueueSwarm: "swarms/structs/taskqueue_swarm.md"
- - SwarmRearrange: "swarms/structs/swarm_rearrange.md"
- - MultiAgentRouter: "swarms/structs/multi_agent_router.md"
- - MatrixSwarm: "swarms/structs/matrix_swarm.md"
- - ModelRouter: "swarms/structs/model_router.md"
- MALT: "swarms/structs/malt.md"
- - Interactive Group Chat: "swarms/structs/interactive_groupchat.md"
- Various Execution Methods: "swarms/structs/various_execution_methods.md"
- Deep Research Swarm: "swarms/structs/deep_research_swarm.md"
- - Swarm Matcher: "swarms/structs/swarm_matcher.md"
- Council of Judges: "swarms/structs/council_of_judges.md"
+
- Hiearchical Architectures:
- Auto Agent Builder: "swarms/structs/auto_agent_builder.md"
- Hybrid Hierarchical-Cluster Swarm: "swarms/structs/hhcs.md"
- Auto Swarm Builder: "swarms/structs/auto_swarm_builder.md"
+ - Swarm Matcher: "swarms/structs/swarm_matcher.md"
- Multi-Agent Multi-Modal Structures:
- ImageAgentBatchProcessor: "swarms/structs/image_batch_agent.md"
-
-
+
+ - Storage:
+ - AgentRegistry: "swarms/structs/agent_registry.md"
+
+ - Routers:
+ - SwarmRouter: "swarms/structs/swarm_router.md"
+ - MultiAgentRouter: "swarms/structs/multi_agent_router.md"
+ - ModelRouter: "swarms/structs/model_router.md"
+
+ - Rearrangers:
+ - SwarmRearrange: "swarms/structs/swarm_rearrange.md"
+ - AgentRearrange: "swarms/structs/agent_rearrange.md"
+
+ - GroupChats:
+ - GroupChat: "swarms/structs/group_chat.md"
+ - Interactive Group Chat: "swarms/structs/interactive_groupchat.md"
+
- Workflows:
- ConcurrentWorkflow: "swarms/structs/concurrentworkflow.md"
- SequentialWorkflow: "swarms/structs/sequential_workflow.md"
- GraphWorkflow: "swarms/structs/graph_workflow.md"
+
- Communication Structure: "swarms/structs/conversation.md"
- - Swarms Tools:
+ - Tools:
- Overview: "swarms_tools/overview.md"
- BaseTool Reference: "swarms/tools/base_tool.md"
- MCP Client Utils: "swarms/tools/mcp_client_call.md"
@@ -274,7 +251,7 @@ nav:
- Social Media:
- Twitter: "swarms_tools/twitter.md"
- - Swarms Memory:
+ - Memory:
- Overview: "swarms_memory/index.md"
- Memory Systems:
- ChromaDB: "swarms_memory/chromadb.md"
@@ -282,51 +259,41 @@ nav:
- Faiss: "swarms_memory/faiss.md"
- Deployment Solutions:
- - Deploy your Swarms on Google Cloud Run: "swarms_cloud/cloud_run.md"
- - Deploy your Swarms on Phala: "swarms_cloud/phala_deploy.md"
-
- - About Us:
- - Swarms Vision: "swarms/concept/vision.md"
- - Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md"
- - Swarms Products: "swarms/products.md"
-
- - Contributors:
- - Swarms Framework Architecture: "swarms/concept/framework_architecture.md"
- - Bounty Program: "corporate/bounty_program.md"
- - Contributing to Documentation: "contributors/docs.md"
- - Contributing Tools/Custom Plugins for Swarms: "contributors/tools.md"
- - Contributing:
- - Contributing: "swarms/contributing.md"
- - Tests: "swarms/framework/test.md"
- - Code Cleanliness: "swarms/framework/code_cleanliness.md"
- - Philosophy: "swarms/concept/philosophy.md"
- - Changelog:
- - Swarms 5.6.8: "swarms/changelog/5_6_8.md"
- - Swarms 5.8.1: "swarms/changelog/5_8_1.md"
- - Swarms 5.9.2: "swarms/changelog/changelog_new.md"
+ - Deploy your agents on Google Cloud Run: "swarms_cloud/cloud_run.md"
+ - Deploy your agents on Phala: "swarms_cloud/phala_deploy.md"
+ # - Deploy your agents on FastAPI:
+
- Examples:
- Overview: "examples/index.md"
- CookBook Index: "examples/cookbook_index.md"
- - Customizing Agents:
- - Basic Agent: "swarms/examples/basic_agent.md"
- - Agents with Callable Tools: "swarms/examples/agent_with_tools.md"
- # - Agent With MCP Integration: "swarms/examples/agent_with_mcp.md"
- - Agent Output Types: "swarms/examples/agent_output_types.md"
- - Agent with Structured Outputs: "swarms/examples/agent_structured_outputs.md"
- - Agents with Vision: "swarms/examples/vision_processing.md"
- - Gradio Chat Interface: "swarms/ui/main.md"
- - Various Model Providers:
- - OpenAI: "swarms/examples/openai_example.md"
- - Anthropic: "swarms/examples/claude.md"
- - Groq: "swarms/examples/groq.md"
- - Cohere: "swarms/examples/cohere.md"
- - DeepSeek: "swarms/examples/deepseek.md"
- - Ollama: "swarms/examples/ollama.md"
- - OpenRouter: "swarms/examples/openrouter.md"
- - XAI: "swarms/examples/xai.md"
- - VLLM: "swarms/examples/vllm_integration.md"
- - Llama4: "swarms/examples/llama4.md"
+ - Basic Examples:
+ - Individual Agents:
+ - Basic Agent: "swarms/examples/basic_agent.md"
+ - Tool Usage:
+ - Agents with Vision and Tool Usage: "swarms/examples/vision_tools.md"
+ - Agents with Callable Tools: "swarms/examples/agent_with_tools.md"
+ - Agent with Structured Outputs: "swarms/examples/agent_structured_outputs.md"
+ - Agent With MCP Integration: "swarms/examples/agent_with_mcp.md"
+ - Vision:
+ - Agents with Vision: "swarms/examples/vision_processing.md"
+ - Agent with Multiple Images: "swarms/examples/multiple_images.md"
+ - Utils:
+ - Agent with Streaming: "examples/agent_stream.md"
+ - Agent Output Types: "swarms/examples/agent_output_types.md"
+ - Gradio Chat Interface: "swarms/ui/main.md"
+ - LLM Providers:
+ - Overview: "swarms/examples/model_providers.md"
+ - OpenAI: "swarms/examples/openai_example.md"
+ - Anthropic: "swarms/examples/claude.md"
+ - Groq: "swarms/examples/groq.md"
+ - Cohere: "swarms/examples/cohere.md"
+ - DeepSeek: "swarms/examples/deepseek.md"
+ - Ollama: "swarms/examples/ollama.md"
+ - OpenRouter: "swarms/examples/openrouter.md"
+ - XAI: "swarms/examples/xai.md"
+ - VLLM: "swarms/examples/vllm_integration.md"
+ - Llama4: "swarms/examples/llama4.md"
@@ -339,28 +306,25 @@ nav:
# - Lumo: "swarms/examples/lumo.md"
# - Quant Crypto Agent: "swarms/examples/quant_crypto_agent.md"
- - Multi-Agent Collaboration:
- - Hybrid Hierarchical-Cluster Swarm Example: "swarms/examples/hhcs_examples.md"
- - Group Chat Example: "swarms/examples/groupchat_example.md"
- - Sequential Workflow Example: "swarms/examples/sequential_example.md"
- - SwarmRouter Example: "swarms/examples/swarm_router.md"
- - MultiAgentRouter Minimal Example: "swarms/examples/multi_agent_router_minimal.md"
- - ConcurrentWorkflow Example: "swarms/examples/concurrent_workflow.md"
- - MixtureOfAgents Example: "swarms/examples/mixture_of_agents.md"
- - Unique Swarms: "swarms/examples/unique_swarms.md"
- - Agents as Tools: "swarms/examples/agents_as_tools.md"
- - Aggregate Multi-Agent Responses: "swarms/examples/aggregate.md"
- - Interactive GroupChat Example: "swarms/examples/interactive_groupchat_example.md"
- - Applications:
- - Swarms DAO: "swarms/examples/swarms_dao.md"
- - Swarms of Browser Agents: "swarms/examples/swarms_of_browser_agents.md"
- - ConcurrentWorkflow with VLLM Agents: "swarms/examples/vllm.md"
+ - Advanced Examples:
+ - Multi-Agent Architectures:
+ - Hybrid Hierarchical-Cluster Swarm Example: "swarms/examples/hhcs_examples.md"
+ - Group Chat Example: "swarms/examples/groupchat_example.md"
+ - Sequential Workflow Example: "swarms/examples/sequential_example.md"
+ - SwarmRouter Example: "swarms/examples/swarm_router.md"
+ - MultiAgentRouter Minimal Example: "swarms/examples/multi_agent_router_minimal.md"
+ - ConcurrentWorkflow Example: "swarms/examples/concurrent_workflow.md"
+ # - MixtureOfAgents Example: "swarms/examples/mixture_of_agents.md"
+ - Mixture of Agents Example: "swarms/examples/moa_example.md"
+ - Unique Swarms: "swarms/examples/unique_swarms.md"
+ - Agents as Tools: "swarms/examples/agents_as_tools.md"
+ - Aggregate Multi-Agent Responses: "swarms/examples/aggregate.md"
+ - Interactive GroupChat Example: "swarms/examples/igc_example.md"
+ - Applications:
+ - Swarms DAO: "swarms/examples/swarms_dao.md"
+ - Swarms of Browser Agents: "swarms/examples/swarms_of_browser_agents.md"
+ - ConcurrentWorkflow with VLLM Agents: "swarms/examples/vllm.md"
-
- - Swarms API Examples:
- - Medical Swarm: "swarms/examples/swarms_api_medical.md"
- - Finance Swarm: "swarms/examples/swarms_api_finance.md"
- - ML Model Code Generation Swarm: "swarms/examples/swarms_api_ml_model.md"
# - Swarm Models:
# - Overview: "swarms/models/index.md"
@@ -383,64 +347,61 @@ nav:
# - GPT4VisionAPI: "swarms/models/gpt4v.md"
- Swarms Cloud API:
- Overview: "swarms_cloud/swarms_api.md"
- - Swarms API as MCP: "swarms_cloud/mcp.md"
- - Swarms API Tools: "swarms_cloud/swarms_api_tools.md"
- - Individual Agent Completions: "swarms_cloud/agent_api.md"
+ - Quickstart: "swarms_cloud/quickstart.md"
+ - MCP Server: "swarms_cloud/mcp.md"
+ - Rate Limits: "swarms_cloud/rate_limits.md"
+ - Best Practices: "swarms_cloud/best_practices.md"
+ - Capabilities:
+ - Agents:
+ - Individual Agent Completions: "swarms_cloud/agent_api.md"
+ - Tools: "swarms_cloud/swarms_api_tools.md"
+ - Multi-Agent:
+ - Multi Agent Architectures Available: "swarms_cloud/swarm_types.md"
+ - Examples:
+ - Medical Swarm: "swarms/examples/swarms_api_medical.md"
+ - Finance Swarm: "swarms/examples/swarms_api_finance.md"
- Clients:
- - Swarms API Python Client: "swarms_cloud/python_client.md"
- - Swarms API Rust Client: "swarms_cloud/rust_client.md"
+ - Python Client: "swarms_cloud/python_client.md"
+ - Rust Client: "swarms_cloud/rust_client.md"
- Pricing:
- - Swarms API Pricing: "swarms_cloud/api_pricing.md"
- - Swarms API Pricing in Chinese: "swarms_cloud/chinese_api_pricing.md"
- - Swarms Cloud Subscription Tiers: "swarms_cloud/subscription_tiers.md"
-
- - Swarm Ecosystem APIs:
- - MCS API: "swarms_cloud/mcs_api.md"
- # - CreateNow API: "swarms_cloud/create_api.md"
- - Guides:
- - Swarms API Best Practices: "swarms_cloud/best_practices.md"
- - Swarm Architectures Available: "swarms_cloud/swarm_types.md"
+ - Pricing: "swarms_cloud/api_pricing.md"
+ - Pricing in Chinese: "swarms_cloud/chinese_api_pricing.md"
+ - Subscription Tiers: "swarms_cloud/subscription_tiers.md"
- Swarms Marketplace:
- Overview: "swarms_platform/index.md"
- - Share and Discover Agents, Prompts, and Tools: "swarms_platform/share_and_discover.md"
- - Customize Your Sidebar: "swarms_platform/apps_page.md"
- - Playground: "swarms_platform/playground_page.md"
- - API Key Management: "swarms_platform/apikeys.md"
- - Account Management: "swarms_platform/account_management.md"
+ - Marketplace:
+ - Share and Discover Agents, Prompts, and Tools: "swarms_platform/share_and_discover.md"
+ - Monetize Your Prompts, Agents, and Tools: "swarms_platform/monetize.md"
+ - Platform:
+ - Customize Your Sidebar: "swarms_platform/apps_page.md"
+ - Playground: "swarms_platform/playground_page.md"
+ - API Key Management: "swarms_platform/apikeys.md"
+ - Account Management: "swarms_platform/account_management.md"
- Swarms Rust:
- Overview: "swarms_rs/overview.md"
- Agents: "swarms_rs/agents.md"
- - Resources:
- - Overview: "governance/main.md"
- # - Tokenomics: "web3/token.md"
-
-
- # - Prompts API:
- # - Add Prompts: "swarms_platform/prompts/add_prompt.md"
- # - Edit Prompts: "swarms_platform/prompts/edit_prompt.md"
- # - Query Prompts: "swarms_platform/prompts/fetch_prompts.md"
- # - Agents API:
- # - Add Agents: "swarms_platform/agents/agents_api.md"
- # - Query Agents: "swarms_platform/agents/fetch_agents.md"
- # - Edit Agents: "swarms_platform/agents/edit_agent.md"
- # - Telemetry API:
- # - PUT: "swarms_platform/telemetry/index.md"
- # - Swarms Wallet API:
- # - Overview: "swarms/wallet/api.md"
- # - Tools API:
- # - Overview: "swarms_platform/tools_api.md"
- # - Add Tools: "swarms_platform/fetch_tools.md"
- # - Corporate:
- # - Culture: "corporate/culture.md"
- # - Hiring: "corporate/hiring.md"
- # - Swarms Goals & Milestone Tracking; A Vision for 2024 and Beyond: "corporate/2024_2025_goals.md"
- # - Web3:
- # # - Overview: "finance/index.md"
- # - Swarms Wallet: "finance/wallet.md"
- # - Swarms Subscription: "finance/subscription.md"
+ - Contributors:
+ - Overview: "contributors/main.md"
+ - Bounty Program: "corporate/bounty_program.md"
+ - Links & Resources: "governance/main.md"
+ - Swarms Vision: "swarms/concept/vision.md"
+ - Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md"
+ - Swarms Products: "swarms/products.md"
+ - Learn More:
+ - Understanding Swarms Architecture: "swarms/concept/framework_architecture.md"
+ - Code Style Guide & Best Practices: "swarms/framework/code_cleanliness.md"
+ - Our Development Philosophy & Principles: "swarms/concept/philosophy.md"
+ - Contributing:
+ - Writing and Adding Tests: "swarms/framework/test.md"
+ - Creating Custom Tools & Plugins: "contributors/tools.md"
+ - Writing Documentation: "contributors/docs.md"
+ - Changelog:
+ - Swarms 5.6.8: "swarms/changelog/5_6_8.md"
+ - Swarms 5.8.1: "swarms/changelog/5_8_1.md"
+ - Swarms 5.9.2: "swarms/changelog/changelog_new.md"
\ No newline at end of file
diff --git a/docs/quickstart.md b/docs/quickstart.md
new file mode 100644
index 00000000..0ab70ba7
--- /dev/null
+++ b/docs/quickstart.md
@@ -0,0 +1,387 @@
+
+# Welcome to Swarms Docs Home
+
+[](https://discord.gg/jM3Z6M9uMq) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/swarms_corp)
+
+## What is Swarms?
+
+**Swarms** is the **first and most reliable multi-agent production-grade framework** designed to orchestrate intelligent AI agents at scale. Built for enterprise applications, Swarms enables you to create sophisticated multi-agent systems that can handle complex tasks through collaboration, parallel processing, and intelligent task distribution.
+
+### Key Capabilities
+
+- **🏢 Production-Ready**: Enterprise-grade infrastructure with high reliability, comprehensive logging, and robust error handling
+- **🤖 Multi-Agent Orchestration**: Support for hierarchical swarms, parallel processing, sequential workflows, and dynamic agent rearrangement
+- **🔄 Flexible Integration**: Multi-model support, custom agent creation, extensive tool library, and multiple memory systems
+- **📈 Scalable Architecture**: Concurrent processing, resource management, load balancing, and horizontal scaling capabilities
+- **🛠️ Developer-Friendly**: Simple API, extensive documentation, active community, and CLI tools for rapid development
+- **🔐 Enterprise Security**: Built-in error handling, rate limiting, monitoring integration, and audit logging
+
+### Why Choose Swarms?
+
+Swarms stands out as the **most reliable multi-agent framework** because it was built from the ground up for production environments. Unlike other frameworks that focus on research or simple demos, Swarms provides the infrastructure, tooling, and best practices needed to deploy multi-agent systems in real-world applications.
+
+Whether you're building financial analysis systems, healthcare diagnostics, manufacturing optimization, or any other complex multi-agent application, Swarms provides the foundation you need to succeed.
+
+Get started learning swarms with the following examples and more.
+
+## Install 💻
+
+```bash
+$ pip3 install -U swarms
+```
+
+### Using uv (Recommended)
+[uv](https://github.com/astral-sh/uv) is a fast Python package installer and resolver, written in Rust.
+
+```bash
+# Install uv
+$ curl -LsSf https://astral.sh/uv/install.sh | sh
+
+# Install swarms using uv
+$ uv pip install swarms
+```
+
+### Using poetry
+```bash
+# Install poetry if you haven't already
+$ curl -sSL https://install.python-poetry.org | python3 -
+
+# Add swarms to your project
+$ poetry add swarms
+```
+
+### From source
+```bash
+# Clone the repository
+$ git clone https://github.com/kyegomez/swarms.git
+$ cd swarms
+
+# Install with pip
+$ pip install -e .
+```
+
+---
+
+## Environment Configuration
+
+[Learn more about the environment configuration here](https://docs.swarms.world/en/latest/swarms/install/env/)
+
+```
+OPENAI_API_KEY=""
+WORKSPACE_DIR="agent_workspace"
+ANTHROPIC_API_KEY=""
+GROQ_API_KEY=""
+```
+
+
+
+### 🤖 Your First Agent
+
+An **Agent** is the fundamental building block of a swarm—an autonomous entity powered by an LLM + Tools + Memory. [Learn more Here](https://docs.swarms.world/en/latest/swarms/structs/agent/)
+
+```python
+from swarms import Agent
+
+# Initialize a new agent
+agent = Agent(
+ model_name="gpt-4o-mini", # Specify the LLM
+ max_loops=1, # Set the number of interactions
+ interactive=True, # Enable interactive mode for real-time feedback
+)
+
+# Run the agent with a task
+agent.run("What are the key benefits of using a multi-agent system?")
+```
+
+### 🤝 Your First Swarm: Multi-Agent Collaboration
+
+A **Swarm** consists of multiple agents working together. This simple example creates a two-agent workflow for researching and writing a blog post. [Learn More About SequentialWorkflow](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/)
+
+```python
+from swarms import Agent, SequentialWorkflow
+
+# Agent 1: The Researcher
+researcher = Agent(
+ agent_name="Researcher",
+ system_prompt="Your job is to research the provided topic and provide a detailed summary.",
+ model_name="gpt-4o-mini",
+)
+
+# Agent 2: The Writer
+writer = Agent(
+ agent_name="Writer",
+ system_prompt="Your job is to take the research summary and write a beautiful, engaging blog post about it.",
+ model_name="gpt-4o-mini",
+)
+
+# Create a sequential workflow where the researcher's output feeds into the writer's input
+workflow = SequentialWorkflow(agents=[researcher, writer])
+
+# Run the workflow on a task
+final_post = workflow.run("The history and future of artificial intelligence")
+print(final_post)
+
+```
+
+-----
+
+## 🏗️ Multi-Agent Architectures For Production Deployments
+
+`swarms` provides a variety of powerful, pre-built multi-agent architectures enabling you to orchestrate agents in various ways. Choose the right structure for your specific problem to build efficient and reliable production systems.
+
+| **Architecture** | **Description** | **Best For** |
+|---|---|---|
+| **[SequentialWorkflow](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/)** | Agents execute tasks in a linear chain; one agent's output is the next one's input. | Step-by-step processes like data transformation pipelines, report generation. |
+| **[ConcurrentWorkflow](https://docs.swarms.world/en/latest/swarms/structs/concurrent_workflow/)** | Agents run tasks simultaneously for maximum efficiency. | High-throughput tasks like batch processing, parallel data analysis. |
+| **[AgentRearrange](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/)** | Dynamically maps complex relationships (e.g., `a -> b, c`) between agents. | Flexible and adaptive workflows, task distribution, dynamic routing. |
+| **[GraphWorkflow](https://docs.swarms.world/en/latest/swarms/structs/graph_workflow/)** | Orchestrates agents as nodes in a Directed Acyclic Graph (DAG). | Complex projects with intricate dependencies, like software builds. |
+| **[MixtureOfAgents (MoA)](https://docs.swarms.world/en/latest/swarms/structs/moa/)** | Utilizes multiple expert agents in parallel and synthesizes their outputs. | Complex problem-solving, achieving state-of-the-art performance through collaboration. |
+| **[GroupChat](https://docs.swarms.world/en/latest/swarms/structs/group_chat/)** | Agents collaborate and make decisions through a conversational interface. | Real-time collaborative decision-making, negotiations, brainstorming. |
+| **[ForestSwarm](https://docs.swarms.world/en/latest/swarms/structs/forest_swarm/)** | Dynamically selects the most suitable agent or tree of agents for a given task. | Task routing, optimizing for expertise, complex decision-making trees. |
+| **[SpreadSheetSwarm](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/)** | Manages thousands of agents concurrently, tracking tasks and outputs in a structured format. | Massive-scale parallel operations, large-scale data generation and analysis. |
+| **[SwarmRouter](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/)** | Universal orchestrator that provides a single interface to run any type of swarm with dynamic selection. | Simplifying complex workflows, switching between swarm strategies, unified multi-agent management. |
+
+-----
+
+### SequentialWorkflow
+
+A `SequentialWorkflow` executes tasks in a strict order, forming a pipeline where each agent builds upon the work of the previous one. `SequentialWorkflow` is Ideal for processes that have clear, ordered steps. This ensures that tasks with dependencies are handled correctly.
+
+```python
+from swarms import Agent, SequentialWorkflow
+
+# Initialize agents for a 3-step process
+# 1. Generate an idea
+idea_generator = Agent(agent_name="IdeaGenerator", system_prompt="Generate a unique startup idea.", model_name="gpt-4o-mini")
+# 2. Validate the idea
+validator = Agent(agent_name="Validator", system_prompt="Take this startup idea and analyze its market viability.", model_name="gpt-4o-mini")
+# 3. Create a pitch
+pitch_creator = Agent(agent_name="PitchCreator", system_prompt="Write a 3-sentence elevator pitch for this validated startup idea.", model_name="gpt-4o-mini")
+
+# Create the sequential workflow
+workflow = SequentialWorkflow(agents=[idea_generator, validator, pitch_creator])
+
+# Run the workflow
+elevator_pitch = workflow.run()
+print(elevator_pitch)
+```
+
+-----
+
+
+### ConcurrentWorkflow (with `SpreadSheetSwarm`)
+
+A concurrent workflow runs multiple agents simultaneously. `SpreadSheetSwarm` is a powerful implementation that can manage thousands of concurrent agents and log their outputs to a CSV file. Use this architecture for high-throughput tasks that can be performed in parallel, drastically reducing execution time.
+
+```python
+from swarms import Agent, SpreadSheetSwarm
+
+# Define a list of tasks (e.g., social media posts to generate)
+platforms = ["Twitter", "LinkedIn", "Instagram"]
+
+# Create an agent for each task
+agents = [
+ Agent(
+ agent_name=f"{platform}-Marketer",
+ system_prompt=f"Generate a real estate marketing post for {platform}.",
+ model_name="gpt-4o-mini",
+ )
+ for platform in platforms
+]
+
+# Initialize the swarm to run these agents concurrently
+swarm = SpreadSheetSwarm(
+ agents=agents,
+ autosave_on=True,
+ save_file_path="marketing_posts.csv",
+)
+
+# Run the swarm with a single, shared task description
+property_description = "A beautiful 3-bedroom house in sunny California."
+swarm.run(task=f"Generate a post about: {property_description}")
+# Check marketing_posts.csv for the results!
+```
+
+---
+
+### AgentRearrange
+
+Inspired by `einsum`, `AgentRearrange` lets you define complex, non-linear relationships between agents using a simple string-based syntax. [Learn more](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/). This architecture is Perfect for orchestrating dynamic workflows where agents might work in parallel, sequence, or a combination of both.
+
+```python
+from swarms import Agent, AgentRearrange
+
+# Define agents
+researcher = Agent(agent_name="researcher", model_name="gpt-4o-mini")
+writer = Agent(agent_name="writer", model_name="gpt-4o-mini")
+editor = Agent(agent_name="editor", model_name="gpt-4o-mini")
+
+# Define a flow: researcher sends work to both writer and editor simultaneously
+# This is a one-to-many relationship
+flow = "researcher -> writer, editor"
+
+# Create the rearrangement system
+rearrange_system = AgentRearrange(
+ agents=[researcher, writer, editor],
+ flow=flow,
+)
+
+# Run the system
+# The researcher will generate content, and then both the writer and editor
+# will process that content in parallel.
+outputs = rearrange_system.run("Analyze the impact of AI on modern cinema.")
+print(outputs)
+```
+
+
+
+
+----
+
+### SwarmRouter: The Universal Swarm Orchestrator
+
+The `SwarmRouter` simplifies building complex workflows by providing a single interface to run any type of swarm. Instead of importing and managing different swarm classes, you can dynamically select the one you need just by changing the `swarm_type` parameter. [Read the full documentation](https://docs.swarms.world/en/latest/swarms/structs/swarm_router/)
+
+This makes your code cleaner and more flexible, allowing you to switch between different multi-agent strategies with ease. Here's a complete example that shows how to define agents and then use `SwarmRouter` to execute the same task using different collaborative strategies.
+
+```python
+from swarms import Agent
+from swarms.structs.swarm_router import SwarmRouter, SwarmType
+
+# Define a few generic agents
+writer = Agent(agent_name="Writer", system_prompt="You are a creative writer.", model_name="gpt-4o-mini")
+editor = Agent(agent_name="Editor", system_prompt="You are an expert editor for stories.", model_name="gpt-4o-mini")
+reviewer = Agent(agent_name="Reviewer", system_prompt="You are a final reviewer who gives a score.", model_name="gpt-4o-mini")
+
+# The agents and task will be the same for all examples
+agents = [writer, editor, reviewer]
+task = "Write a short story about a robot who discovers music."
+
+# --- Example 1: SequentialWorkflow ---
+# Agents run one after another in a chain: Writer -> Editor -> Reviewer.
+print("Running a Sequential Workflow...")
+sequential_router = SwarmRouter(swarm_type=SwarmType.SequentialWorkflow, agents=agents)
+sequential_output = sequential_router.run(task)
+print(f"Final Sequential Output:\n{sequential_output}\n")
+
+# --- Example 2: ConcurrentWorkflow ---
+# All agents receive the same initial task and run at the same time.
+print("Running a Concurrent Workflow...")
+concurrent_router = SwarmRouter(swarm_type=SwarmType.ConcurrentWorkflow, agents=agents)
+concurrent_outputs = concurrent_router.run(task)
+# This returns a dictionary of each agent's output
+for agent_name, output in concurrent_outputs.items():
+ print(f"Output from {agent_name}:\n{output}\n")
+
+# --- Example 3: MixtureOfAgents ---
+# All agents run in parallel, and a special 'aggregator' agent synthesizes their outputs.
+print("Running a Mixture of Agents Workflow...")
+aggregator = Agent(
+ agent_name="Aggregator",
+ system_prompt="Combine the story, edits, and review into a final document.",
+ model_name="gpt-4o-mini"
+)
+moa_router = SwarmRouter(
+ swarm_type=SwarmType.MixtureOfAgents,
+ agents=agents,
+ aggregator_agent=aggregator, # MoA requires an aggregator
+)
+aggregated_output = moa_router.run(task)
+print(f"Final Aggregated Output:\n{aggregated_output}\n")
+```
+
+
+The `SwarmRouter` is a powerful tool for simplifying multi-agent orchestration. It provides a consistent and flexible way to deploy different collaborative strategies, allowing you to build more sophisticated applications with less code.
+
+-------
+
+### MixtureOfAgents (MoA)
+
+The `MixtureOfAgents` architecture processes tasks by feeding them to multiple "expert" agents in parallel. Their diverse outputs are then synthesized by an aggregator agent to produce a final, high-quality result. [Learn more here](https://docs.swarms.world/en/latest/swarms/examples/moa_example/)
+
+```python
+from swarms import Agent, MixtureOfAgents
+
+# Define expert agents
+financial_analyst = Agent(agent_name="FinancialAnalyst", system_prompt="Analyze financial data.", model_name="gpt-4o-mini")
+market_analyst = Agent(agent_name="MarketAnalyst", system_prompt="Analyze market trends.", model_name="gpt-4o-mini")
+risk_analyst = Agent(agent_name="RiskAnalyst", system_prompt="Analyze investment risks.", model_name="gpt-4o-mini")
+
+# Define the aggregator agent
+aggregator = Agent(
+ agent_name="InvestmentAdvisor",
+ system_prompt="Synthesize the financial, market, and risk analyses to provide a final investment recommendation.",
+ model_name="gpt-4o-mini"
+)
+
+# Create the MoA swarm
+moa_swarm = MixtureOfAgents(
+ agents=[financial_analyst, market_analyst, risk_analyst],
+ aggregator_agent=aggregator,
+)
+
+# Run the swarm
+recommendation = moa_swarm.run("Should we invest in NVIDIA stock right now?")
+print(recommendation)
+```
+
+----
+
+### GroupChat
+
+`GroupChat` creates a conversational environment where multiple agents can interact, discuss, and collaboratively solve a problem. You can define the speaking order or let it be determined dynamically. This architecture is ideal for tasks that benefit from debate and multi-perspective reasoning, such as contract negotiation, brainstorming, or complex decision-making.
+
+```python
+from swarms import Agent, GroupChat
+
+# Define agents for a debate
+tech_optimist = Agent(agent_name="TechOptimist", system_prompt="Argue for the benefits of AI in society.", model_name="gpt-4o-mini")
+tech_critic = Agent(agent_name="TechCritic", system_prompt="Argue against the unchecked advancement of AI.", model_name="gpt-4o-mini")
+
+# Create the group chat
+chat = GroupChat(
+ agents=[tech_optimist, tech_critic],
+ max_loops=4, # Limit the number of turns in the conversation
+)
+
+# Run the chat with an initial topic
+conversation_history = chat.run(
+ "Let's discuss the societal impact of artificial intelligence."
+)
+
+# Print the full conversation
+for message in conversation_history:
+ print(f"[{message['agent_name']}]: {message['content']}")
+```
+
+
diff --git a/docs/swarms/agents/index.md b/docs/swarms/agents/index.md
new file mode 100644
index 00000000..4b632f1b
--- /dev/null
+++ b/docs/swarms/agents/index.md
@@ -0,0 +1,884 @@
+# Agents Introduction
+
+The Agent class is the core component of the Swarms framework, designed to create intelligent, autonomous AI agents capable of handling complex tasks through multi-modal processing, tool integration, and structured outputs. This comprehensive guide covers all aspects of the Agent class, from basic setup to advanced features.
+
+## Table of Contents
+
+1. [Prerequisites & Installation](#prerequisites--installation)
+2. [Basic Agent Configuration](#basic-agent-configuration)
+3. [Multi-Modal Capabilities](#multi-modal-capabilities)
+4. [Tool Integration](#tool-integration)
+5. [Structured Outputs](#structured-outputs)
+6. [Advanced Features](#advanced-features)
+7. [Best Practices](#best-practices)
+8. [Complete Examples](#complete-examples)
+
+## Prerequisites & Installation
+
+### System Requirements
+
+- Python 3.7+
+
+- OpenAI API key (for GPT models)
+
+- Anthropic API key (for Claude models)
+
+### Installation
+
+```bash
+pip3 install -U swarms
+```
+
+### Environment Setup
+
+Create a `.env` file with your API keys:
+
+```bash
+OPENAI_API_KEY="your-openai-api-key"
+ANTHROPIC_API_KEY="your-anthropic-api-key"
+WORKSPACE_DIR="agent_workspace"
+```
+
+## Basic Agent Configuration
+
+### Core Agent Structure
+
+The Agent class provides a comprehensive set of parameters for customization:
+
+```python
+from swarms import Agent
+
+# Basic agent initialization
+agent = Agent(
+ agent_name="MyAgent",
+ agent_description="A specialized AI agent for specific tasks",
+ system_prompt="You are a helpful assistant...",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ max_tokens=4096,
+ temperature=0.7,
+ output_type="str",
+ safety_prompt_on=True
+)
+```
+
+### Key Configuration Parameters
+
+| Parameter | Type | Description | Default |
+|-----------|------|-------------|---------|
+| `agent_name` | str | Unique identifier for the agent | Required |
+| `agent_description` | str | Detailed description of capabilities | Required |
+| `system_prompt` | str | Core instructions defining behavior | Required |
+| `model_name` | str | AI model to use | "gpt-4o-mini" |
+| `max_loops` | int | Maximum execution loops | 1 |
+| `max_tokens` | int | Maximum response tokens | 4096 |
+| `temperature` | float | Response creativity (0-1) | 0.7 |
+| `output_type` | str | Response format type | "str" |
+| `multi_modal` | bool | Enable image processing | False |
+| `safety_prompt_on` | bool | Enable safety checks | True |
+
+### Simple Example
+
+```python
+from swarms import Agent
+
+# Create a basic financial advisor agent
+financial_agent = Agent(
+ agent_name="Financial-Advisor",
+ agent_description="Personal finance and investment advisor",
+ system_prompt="""You are an expert financial advisor with deep knowledge of:
+ - Investment strategies and portfolio management
+ - Risk assessment and mitigation
+ - Market analysis and trends
+ - Financial planning and budgeting
+
+ Provide clear, actionable advice while considering risk tolerance.""",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ temperature=0.3,
+ output_type="str"
+)
+
+# Run the agent
+response = financial_agent.run("What are the best investment strategies for a 30-year-old?")
+print(response)
+```
+
+## Multi-Modal Capabilities
+
+### Image Processing
+
+The Agent class supports comprehensive image analysis through vision-enabled models:
+
+```python
+from swarms import Agent
+
+# Create a vision-enabled agent
+vision_agent = Agent(
+ agent_name="Vision-Analyst",
+ agent_description="Advanced image analysis and quality control agent",
+ system_prompt="""You are an expert image analyst capable of:
+ - Detailed visual inspection and quality assessment
+ - Object detection and classification
+ - Scene understanding and context analysis
+ - Defect identification and reporting
+
+ Provide comprehensive analysis with specific observations.""",
+ model_name="gpt-4o-mini", # Vision-enabled model
+ multi_modal=True, # Enable multi-modal processing
+ max_loops=1,
+ output_type="str"
+)
+
+# Analyze a single image
+response = vision_agent.run(
+ task="Analyze this image for quality control purposes",
+ img="path/to/image.jpg"
+)
+
+# Process multiple images
+response = vision_agent.run(
+ task="Compare these images and identify differences",
+ imgs=["image1.jpg", "image2.jpg", "image3.jpg"],
+ summarize_multiple_images=True
+)
+```
+
+### Supported Image Formats
+
+| Format | Description | Max Size |
+|--------|-------------|----------|
+| JPEG/JPG | Standard compressed format | 20MB |
+| PNG | Lossless with transparency | 20MB |
+| GIF | Animated (first frame only) | 20MB |
+| WebP | Modern efficient format | 20MB |
+
+### Quality Control Example
+
+```python
+from swarms import Agent
+from swarms.prompts.logistics import Quality_Control_Agent_Prompt
+
+def security_analysis(danger_level: str) -> str:
+ """Analyze security danger level and return appropriate response."""
+ danger_responses = {
+ "low": "No immediate danger detected",
+ "medium": "Moderate security concern identified",
+ "high": "Critical security threat detected",
+ None: "No danger level assessment available"
+ }
+ return danger_responses.get(danger_level, "Unknown danger level")
+
+# Quality control agent with tool integration
+quality_agent = Agent(
+ agent_name="Quality-Control-Agent",
+ agent_description="Advanced quality control and security analysis agent",
+ system_prompt=f"""
+ {Quality_Control_Agent_Prompt}
+
+ You have access to security analysis tools. When analyzing images:
+ 1. Identify potential safety hazards
+ 2. Assess quality standards compliance
+ 3. Determine appropriate danger levels (low, medium, high)
+ 4. Use the security_analysis function for threat assessment
+ """,
+ model_name="gpt-4o-mini",
+ multi_modal=True,
+ max_loops=1,
+ tools=[security_analysis]
+)
+
+# Analyze factory image
+response = quality_agent.run(
+ task="Analyze this factory image for safety and quality issues",
+ img="factory_floor.jpg"
+)
+```
+
+## Tool Integration
+
+### Creating Custom Tools
+
+Tools are Python functions that extend your agent's capabilities:
+
+```python
+import json
+import requests
+from typing import Optional, Dict, Any
+
+def get_weather_data(city: str, country: Optional[str] = None) -> str:
+ """
+ Get current weather data for a specified city.
+
+ Args:
+ city (str): The city name
+ country (Optional[str]): Country code (e.g., 'US', 'UK')
+
+ Returns:
+ str: JSON formatted weather data
+
+ Example:
+ >>> weather = get_weather_data("San Francisco", "US")
+ >>> print(weather)
+ {"temperature": 18, "condition": "partly cloudy", ...}
+ """
+ try:
+ # API call logic here
+ weather_data = {
+ "city": city,
+ "country": country,
+ "temperature": 18,
+ "condition": "partly cloudy",
+ "humidity": 65,
+ "wind_speed": 12
+ }
+ return json.dumps(weather_data, indent=2)
+
+ except Exception as e:
+ return json.dumps({"error": f"Weather API error: {str(e)}"})
+
+def calculate_portfolio_metrics(prices: list, weights: list) -> str:
+ """
+ Calculate portfolio performance metrics.
+
+ Args:
+ prices (list): List of asset prices
+ weights (list): List of portfolio weights
+
+ Returns:
+ str: JSON formatted portfolio metrics
+ """
+ try:
+ # Portfolio calculation logic
+ portfolio_value = sum(p * w for p, w in zip(prices, weights))
+ metrics = {
+ "total_value": portfolio_value,
+ "weighted_average": portfolio_value / sum(weights),
+ "asset_count": len(prices)
+ }
+ return json.dumps(metrics, indent=2)
+
+ except Exception as e:
+ return json.dumps({"error": f"Calculation error: {str(e)}"})
+```
+
+### Tool Integration Example
+
+```python
+from swarms import Agent
+
+# Create agent with custom tools
+multi_tool_agent = Agent(
+ agent_name="Multi-Tool-Assistant",
+ agent_description="Versatile assistant with weather and financial tools",
+ system_prompt="""You are a versatile assistant with access to:
+ - Weather data retrieval for any city
+ - Portfolio analysis and financial calculations
+
+ Use these tools to provide comprehensive assistance.""",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ tools=[get_weather_data, calculate_portfolio_metrics]
+)
+
+# Use the agent with tools
+response = multi_tool_agent.run(
+ "What's the weather in New York and calculate metrics for a portfolio with prices [100, 150, 200] and weights [0.3, 0.4, 0.3]?"
+)
+```
+
+### API Integration Tools
+
+```python
+import requests
+import json
+from typing import List
+
+def get_cryptocurrency_price(coin_id: str, vs_currency: str = "usd") -> str:
+ """Get current cryptocurrency price from CoinGecko API."""
+ try:
+ url = "https://api.coingecko.com/api/v3/simple/price"
+ params = {
+ "ids": coin_id,
+ "vs_currencies": vs_currency,
+ "include_market_cap": True,
+ "include_24hr_vol": True,
+ "include_24hr_change": True
+ }
+
+ response = requests.get(url, params=params, timeout=10)
+ response.raise_for_status()
+ return json.dumps(response.json(), indent=2)
+
+ except Exception as e:
+ return json.dumps({"error": f"API error: {str(e)}"})
+
+def get_top_cryptocurrencies(limit: int = 10) -> str:
+ """Get top cryptocurrencies by market cap."""
+ try:
+ url = "https://api.coingecko.com/api/v3/coins/markets"
+ params = {
+ "vs_currency": "usd",
+ "order": "market_cap_desc",
+ "per_page": limit,
+ "page": 1
+ }
+
+ response = requests.get(url, params=params, timeout=10)
+ response.raise_for_status()
+ return json.dumps(response.json(), indent=2)
+
+ except Exception as e:
+ return json.dumps({"error": f"API error: {str(e)}"})
+
+# Crypto analysis agent
+crypto_agent = Agent(
+ agent_name="Crypto-Analysis-Agent",
+ agent_description="Cryptocurrency market analysis and price tracking agent",
+ system_prompt="""You are a cryptocurrency analysis expert with access to:
+ - Real-time price data for any cryptocurrency
+ - Market capitalization rankings
+ - Trading volume and price change data
+
+ Provide insightful market analysis and investment guidance.""",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ tools=[get_cryptocurrency_price, get_top_cryptocurrencies]
+)
+
+# Analyze crypto market
+response = crypto_agent.run("Analyze the current Bitcoin price and show me the top 5 cryptocurrencies")
+```
+
+## Structured Outputs
+
+### Function Schema Definition
+
+Define structured outputs using OpenAI's function calling format:
+
+```python
+from swarms import Agent
+
+# Define function schemas for structured outputs
+stock_analysis_schema = {
+ "type": "function",
+ "function": {
+ "name": "analyze_stock_performance",
+ "description": "Analyze stock performance with detailed metrics",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "ticker": {
+ "type": "string",
+ "description": "Stock ticker symbol (e.g., AAPL, GOOGL)"
+ },
+ "analysis_type": {
+ "type": "string",
+ "enum": ["technical", "fundamental", "comprehensive"],
+ "description": "Type of analysis to perform"
+ },
+ "time_period": {
+ "type": "string",
+ "enum": ["1d", "1w", "1m", "3m", "1y"],
+ "description": "Time period for analysis"
+ },
+ "metrics": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": ["price", "volume", "pe_ratio", "market_cap", "volatility"]
+ },
+ "description": "Metrics to include in analysis"
+ }
+ },
+ "required": ["ticker", "analysis_type"]
+ }
+ }
+}
+
+portfolio_optimization_schema = {
+ "type": "function",
+ "function": {
+ "name": "optimize_portfolio",
+ "description": "Optimize portfolio allocation based on risk and return",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "assets": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "symbol": {"type": "string"},
+ "current_weight": {"type": "number"},
+ "expected_return": {"type": "number"},
+ "risk_level": {"type": "string", "enum": ["low", "medium", "high"]}
+ },
+ "required": ["symbol", "current_weight"]
+ }
+ },
+ "risk_tolerance": {
+ "type": "string",
+ "enum": ["conservative", "moderate", "aggressive"]
+ },
+ "investment_horizon": {
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 30,
+ "description": "Investment time horizon in years"
+ }
+ },
+ "required": ["assets", "risk_tolerance"]
+ }
+ }
+}
+
+# Create agent with structured outputs
+structured_agent = Agent(
+ agent_name="Structured-Financial-Agent",
+ agent_description="Financial analysis agent with structured output capabilities",
+ system_prompt="""You are a financial analysis expert that provides structured outputs.
+ Use the provided function schemas to format your responses consistently.""",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ tools_list_dictionary=[stock_analysis_schema, portfolio_optimization_schema]
+)
+
+# Generate structured analysis
+response = structured_agent.run(
+ "Analyze Apple stock (AAPL) performance with comprehensive analysis for the last 3 months"
+)
+```
+
+## Advanced Features
+
+### Dynamic Temperature Control
+
+```python
+from swarms import Agent
+
+# Agent with dynamic temperature adjustment
+adaptive_agent = Agent(
+ agent_name="Adaptive-Response-Agent",
+ agent_description="Agent that adjusts response creativity based on context",
+ system_prompt="You are an adaptive AI that adjusts your response style based on the task complexity.",
+ model_name="gpt-4o-mini",
+ dynamic_temperature_enabled=True, # Enable adaptive temperature
+ max_loops=1,
+ output_type="str"
+)
+```
+
+### Output Type Configurations
+
+```python
+# Different output type examples
+json_agent = Agent(
+ agent_name="JSON-Agent",
+ system_prompt="Always respond in valid JSON format",
+ output_type="json"
+)
+
+streaming_agent = Agent(
+ agent_name="Streaming-Agent",
+ system_prompt="Provide detailed streaming responses",
+ output_type="str-all-except-first"
+)
+
+final_only_agent = Agent(
+ agent_name="Final-Only-Agent",
+ system_prompt="Provide only the final result",
+ output_type="final"
+)
+```
+
+### Safety and Content Filtering
+
+```python
+from swarms import Agent
+
+# Agent with enhanced safety features
+safe_agent = Agent(
+ agent_name="Safe-Agent",
+ agent_description="Agent with comprehensive safety measures",
+ system_prompt="You are a helpful, harmless, and honest AI assistant.",
+ model_name="gpt-4o-mini",
+ safety_prompt_on=True, # Enable safety prompts
+ max_loops=1,
+ temperature=0.3 # Lower temperature for more consistent, safe responses
+)
+```
+
+## Best Practices
+
+### Error Handling and Robustness
+
+```python
+import logging
+from swarms import Agent
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+def robust_agent_execution(agent, task, max_retries=3):
+ """Execute agent with retry logic and error handling."""
+ for attempt in range(max_retries):
+ try:
+ response = agent.run(task)
+ logger.info(f"Agent execution successful on attempt {attempt + 1}")
+ return response
+ except Exception as e:
+ logger.error(f"Attempt {attempt + 1} failed: {str(e)}")
+ if attempt == max_retries - 1:
+ raise
+ time.sleep(2 ** attempt) # Exponential backoff
+
+ return None
+
+# Example usage
+try:
+ result = robust_agent_execution(agent, "Analyze market trends")
+ print(result)
+except Exception as e:
+ print(f"Agent execution failed: {e}")
+```
+
+### Performance Optimization
+
+```python
+from swarms import Agent
+import time
+
+# Optimized agent configuration
+optimized_agent = Agent(
+ agent_name="Optimized-Agent",
+ agent_description="Performance-optimized agent configuration",
+ system_prompt="You are an efficient AI assistant optimized for performance.",
+ model_name="gpt-4o-mini", # Faster model
+ max_loops=1, # Minimize loops
+ max_tokens=2048, # Reasonable token limit
+ temperature=0.5, # Balanced creativity
+ output_type="str"
+)
+
+# Batch processing example
+def process_tasks_batch(agent, tasks, batch_size=5):
+ """Process multiple tasks efficiently."""
+ results = []
+ for i in range(0, len(tasks), batch_size):
+ batch = tasks[i:i + batch_size]
+ batch_results = []
+
+ for task in batch:
+ start_time = time.time()
+ result = agent.run(task)
+ execution_time = time.time() - start_time
+
+ batch_results.append({
+ "task": task,
+ "result": result,
+ "execution_time": execution_time
+ })
+
+ results.extend(batch_results)
+ time.sleep(1) # Rate limiting
+
+ return results
+```
+
+## Complete Examples
+
+### Multi-Modal Quality Control System
+
+```python
+from swarms import Agent
+from swarms.prompts.logistics import Quality_Control_Agent_Prompt
+
+def security_analysis(danger_level: str) -> str:
+ """Analyze security danger level and return appropriate response."""
+ responses = {
+ "low": "✅ No immediate danger detected - Safe to proceed",
+ "medium": "⚠️ Moderate security concern - Requires attention",
+ "high": "🚨 Critical security threat - Immediate action required",
+ None: "❓ No danger level assessment available"
+ }
+ return responses.get(danger_level, "Unknown danger level")
+
+def quality_assessment(quality_score: int) -> str:
+ """Assess quality based on numerical score (1-10)."""
+ if quality_score >= 8:
+ return "✅ Excellent quality - Meets all standards"
+ elif quality_score >= 6:
+ return "⚠️ Good quality - Minor improvements needed"
+ elif quality_score >= 4:
+ return "❌ Poor quality - Significant issues identified"
+ else:
+ return "🚨 Critical quality failure - Immediate attention required"
+
+# Advanced quality control agent
+quality_control_system = Agent(
+ agent_name="Advanced-Quality-Control-System",
+ agent_description="Comprehensive quality control and security analysis system",
+ system_prompt=f"""
+ {Quality_Control_Agent_Prompt}
+
+ You are an advanced quality control system with the following capabilities:
+
+ 1. Visual Inspection: Analyze images for defects, compliance, and safety
+ 2. Security Assessment: Identify potential security threats and hazards
+ 3. Quality Scoring: Provide numerical quality ratings (1-10 scale)
+ 4. Detailed Reporting: Generate comprehensive analysis reports
+
+ When analyzing images:
+ - Identify specific defects or issues
+ - Assess compliance with safety standards
+ - Determine appropriate danger levels (low, medium, high)
+ - Provide quality scores and recommendations
+ - Use available tools for detailed analysis
+
+ Always provide specific, actionable feedback.
+ """,
+ model_name="gpt-4o-mini",
+ multi_modal=True,
+ max_loops=1,
+ tools=[security_analysis, quality_assessment],
+ output_type="str"
+)
+
+# Process factory images
+factory_images = ["factory_floor.jpg", "assembly_line.jpg", "safety_equipment.jpg"]
+
+for image in factory_images:
+ print(f"\n--- Analyzing {image} ---")
+ response = quality_control_system.run(
+ task=f"Perform comprehensive quality control analysis of this image. Assess safety, quality, and provide specific recommendations.",
+ img=image
+ )
+ print(response)
+```
+
+### Advanced Financial Analysis Agent
+
+```python
+from swarms import Agent
+import json
+import requests
+
+def get_market_data(symbol: str, period: str = "1y") -> str:
+ """Get comprehensive market data for a symbol."""
+ # Simulated market data (replace with real API)
+ market_data = {
+ "symbol": symbol,
+ "current_price": 150.25,
+ "change_percent": 2.5,
+ "volume": 1000000,
+ "market_cap": 2500000000,
+ "pe_ratio": 25.5,
+ "dividend_yield": 1.8,
+ "52_week_high": 180.50,
+ "52_week_low": 120.30
+ }
+ return json.dumps(market_data, indent=2)
+
+def calculate_risk_metrics(prices: list, benchmark_prices: list) -> str:
+ """Calculate risk metrics for a portfolio."""
+ import numpy as np
+
+ try:
+ returns = np.diff(prices) / prices[:-1]
+ benchmark_returns = np.diff(benchmark_prices) / benchmark_prices[:-1]
+
+ volatility = np.std(returns) * np.sqrt(252) # Annualized
+ sharpe_ratio = (np.mean(returns) / np.std(returns)) * np.sqrt(252)
+ max_drawdown = np.max(np.maximum.accumulate(prices) - prices) / np.max(prices)
+
+ beta = np.cov(returns, benchmark_returns)[0, 1] / np.var(benchmark_returns)
+
+ risk_metrics = {
+ "volatility": float(volatility),
+ "sharpe_ratio": float(sharpe_ratio),
+ "max_drawdown": float(max_drawdown),
+ "beta": float(beta)
+ }
+
+ return json.dumps(risk_metrics, indent=2)
+
+ except Exception as e:
+ return json.dumps({"error": f"Risk calculation error: {str(e)}"})
+
+# Financial analysis schemas
+financial_analysis_schema = {
+ "type": "function",
+ "function": {
+ "name": "comprehensive_financial_analysis",
+ "description": "Perform comprehensive financial analysis with structured output",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "analysis_summary": {
+ "type": "object",
+ "properties": {
+ "overall_rating": {"type": "string", "enum": ["buy", "hold", "sell"]},
+ "confidence_level": {"type": "number", "minimum": 0, "maximum": 100},
+ "key_strengths": {"type": "array", "items": {"type": "string"}},
+ "key_concerns": {"type": "array", "items": {"type": "string"}},
+ "price_target": {"type": "number"},
+ "risk_level": {"type": "string", "enum": ["low", "medium", "high"]}
+ }
+ },
+ "technical_analysis": {
+ "type": "object",
+ "properties": {
+ "trend_direction": {"type": "string", "enum": ["bullish", "bearish", "neutral"]},
+ "support_levels": {"type": "array", "items": {"type": "number"}},
+ "resistance_levels": {"type": "array", "items": {"type": "number"}},
+ "momentum_indicators": {"type": "array", "items": {"type": "string"}}
+ }
+ }
+ },
+ "required": ["analysis_summary", "technical_analysis"]
+ }
+ }
+}
+
+# Advanced financial agent
+financial_analyst = Agent(
+ agent_name="Advanced-Financial-Analyst",
+ agent_description="Comprehensive financial analysis and investment advisory agent",
+ system_prompt="""You are an expert financial analyst with advanced capabilities in:
+
+ - Fundamental analysis and valuation
+ - Technical analysis and chart patterns
+ - Risk assessment and portfolio optimization
+ - Market sentiment analysis
+ - Economic indicator interpretation
+
+ Your analysis should be:
+ - Data-driven and objective
+ - Risk-aware and practical
+ - Clearly structured and actionable
+ - Compliant with financial regulations
+
+ Use available tools to gather market data and calculate risk metrics.
+ Provide structured outputs using the defined schemas.""",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ tools=[get_market_data, calculate_risk_metrics],
+ tools_list_dictionary=[financial_analysis_schema],
+ output_type="json"
+)
+
+# Comprehensive financial analysis
+analysis_response = financial_analyst.run(
+ "Perform a comprehensive analysis of Apple Inc. (AAPL) including technical and fundamental analysis with structured recommendations"
+)
+
+print(json.dumps(json.loads(analysis_response), indent=2))
+```
+
+### Multi-Agent Collaboration System
+
+```python
+from swarms import Agent
+import json
+
+# Specialized agents for different tasks
+research_agent = Agent(
+ agent_name="Research-Specialist",
+ agent_description="Market research and data analysis specialist",
+ system_prompt="You are a market research expert specializing in data collection and analysis.",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ temperature=0.3
+)
+
+strategy_agent = Agent(
+ agent_name="Strategy-Advisor",
+ agent_description="Strategic planning and recommendation specialist",
+ system_prompt="You are a strategic advisor providing high-level recommendations based on research.",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ temperature=0.5
+)
+
+execution_agent = Agent(
+ agent_name="Execution-Planner",
+ agent_description="Implementation and execution planning specialist",
+ system_prompt="You are an execution expert creating detailed implementation plans.",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ temperature=0.4
+)
+
+def collaborative_analysis(topic: str):
+ """Perform collaborative analysis using multiple specialized agents."""
+
+ # Step 1: Research Phase
+ research_task = f"Conduct comprehensive research on {topic}. Provide key findings, market data, and trends."
+ research_results = research_agent.run(research_task)
+
+ # Step 2: Strategy Phase
+ strategy_task = f"Based on this research: {research_results}\n\nDevelop strategic recommendations for {topic}."
+ strategy_results = strategy_agent.run(strategy_task)
+
+ # Step 3: Execution Phase
+ execution_task = f"Create a detailed implementation plan based on:\nResearch: {research_results}\nStrategy: {strategy_results}"
+ execution_results = execution_agent.run(execution_task)
+
+ return {
+ "research": research_results,
+ "strategy": strategy_results,
+ "execution": execution_results
+ }
+
+# Example: Collaborative investment analysis
+investment_analysis = collaborative_analysis("renewable energy sector investment opportunities")
+
+for phase, results in investment_analysis.items():
+ print(f"\n=== {phase.upper()} PHASE ===")
+ print(results)
+```
+
+## Support and Resources
+
+Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights!
+
+| Platform | Description | Link |
+|----------|-------------|------|
+| 📚 Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
+| 📝 Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
+| 💬 Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| 🐦 Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
+| 👥 LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
+| 📺 YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
+| 🎫 Events | Join our community events | [Sign up here](https://lu.ma/5p2jnc2v) |
+| 🚀 Onboarding Session | Get onboarded with Kye Gomez, creator and lead maintainer of Swarms | [Book Session](https://cal.com/swarms/swarms-onboarding-session) |
+
+### Getting Help
+
+If you encounter issues or need assistance:
+
+1. **Check the Documentation**: Start with the official docs for comprehensive guides
+2. **Search Issues**: Look through existing GitHub issues for similar problems
+3. **Join Discord**: Get real-time help from the community
+4. **Create an Issue**: Report bugs or request features on GitHub
+5. **Follow Updates**: Stay informed about new releases and improvements
+
+### Contributing
+
+We welcome contributions! Here's how to get involved:
+
+- **Report Bugs**: Help us improve by reporting issues
+
+- **Suggest Features**: Share your ideas for new capabilities
+
+- **Submit Code**: Contribute improvements and new features
+
+- **Improve Documentation**: Help make our docs better
+
+- **Share Examples**: Show how you're using Swarms in your projects
+
+---
+
+*This guide covers the essential aspects of the Swarms Agent class. For the most up-to-date information and advanced features, please refer to the official documentation and community resources.*
\ No newline at end of file
diff --git a/docs/swarms/examples/aggregate.md b/docs/swarms/examples/aggregate.md
index 604a18c2..636dc46e 100644
--- a/docs/swarms/examples/aggregate.md
+++ b/docs/swarms/examples/aggregate.md
@@ -13,43 +13,11 @@ pip3 install -U swarms
## Environment Variables
```txt
+WORKSPACE_DIR=""
OPENAI_API_KEY=""
ANTHROPIC_API_KEY=""
```
-## Function Parameters
-
-### `workers: List[Callable]` (Required)
-
-A list of Agent instances that will work on the task concurrently. Each agent should be a callable object (typically an Agent instance).
-
-### `task: str` (Required)
-
-The task or question that all agents will work on simultaneously. This should be a clear, specific prompt that allows for diverse perspectives.
-
-### `type: HistoryOutputType` (Optional, Default: "all")
-
-Controls the format of the returned conversation history. Available options:
-
-| Type | Description |
-|------|-------------|
-| **"all"** | Returns the complete conversation including all agent responses and the final aggregation |
-| **"list"** | Returns the conversation as a list format |
-| **"dict"** or **"dictionary"** | Returns the conversation as a dictionary format |
-| **"string"** or **"str"** | Returns only the final aggregated response as a string |
-| **"final"** or **"last"** | Returns only the final aggregated response |
-| **"json"** | Returns the conversation in JSON format |
-| **"yaml"** | Returns the conversation in YAML format |
-| **"xml"** | Returns the conversation in XML format |
-| **"dict-all-except-first"** | Returns dictionary format excluding the first message |
-| **"str-all-except-first"** | Returns string format excluding the first message |
-| **"basemodel"** | Returns the conversation as a base model object |
-| **"dict-final"** | Returns dictionary format with only the final response |
-
-### `aggregator_model_name: str` (Optional, Default: "anthropic/claude-3-sonnet-20240229")
-
-The model to use for the aggregator agent that synthesizes all the individual agent responses. This should be a model capable of understanding and summarizing complex multi-agent conversations.
-
## How It Works
1. **Concurrent Execution**: All agents in the `workers` list run the same task simultaneously
@@ -102,35 +70,3 @@ result = aggregate(
print(result)
```
-
-## Code Example
-
-
-
-## Use Cases
-
-| Use Case | Description |
-|----------|-------------|
-| **Investment Analysis** | Get multiple financial perspectives on investment decisions |
-| **Research Synthesis** | Combine insights from different research agents |
-| **Problem Solving** | Gather diverse approaches to complex problems |
-| **Content Creation** | Generate comprehensive content from multiple specialized agents |
-| **Decision Making** | Get balanced recommendations from different expert perspectives |
-
-## Error Handling
-
-The function includes validation for:
-
-- Required parameters (`task` and `workers`)
-
-- Proper data types (workers must be a list of callable objects)
-
-- Agent compatibility
-
-## Performance Considerations
-
-- All agents run concurrently, so total execution time is limited by the slowest agent
-
-- The aggregator agent processes all responses, so consider response length and complexity
-
-- Memory usage scales with the number of agents and their response sizes
diff --git a/docs/swarms/examples/igc_example.md b/docs/swarms/examples/igc_example.md
new file mode 100644
index 00000000..32d060c1
--- /dev/null
+++ b/docs/swarms/examples/igc_example.md
@@ -0,0 +1,135 @@
+## Interactive Groupchat Examples
+
+The Interactive GroupChat is a powerful multi-agent architecture that enables dynamic collaboration between multiple AI agents. This architecture allows agents to communicate with each other, respond to mentions using `@agent_name` syntax, and work together to solve complex tasks through structured conversation flows.
+
+### Architecture Description
+
+The Interactive GroupChat implements a **collaborative swarm architecture** where multiple specialized agents work together in a coordinated manner. Key features include:
+
+- **Mention-based Communication**: Agents can be directed to specific tasks using `@agent_name` syntax
+- **Flexible Speaker Functions**: Multiple speaking order strategies (round robin, random, priority-based)
+- **Enhanced Collaboration**: Agents build upon each other's responses and avoid redundancy
+- **Interactive Sessions**: Support for both automated and interactive conversation modes
+- **Context Awareness**: Agents maintain conversation history and context
+
+For comprehensive documentation on Interactive GroupChat, visit: [Interactive GroupChat Documentation](https://docs.swarms.world/en/latest/swarms/structs/interactive_groupchat/)
+
+### Step-by-Step Showcase
+
+* **Agent Creation**: Define specialized agents with unique expertise and system prompts
+* **GroupChat Initialization**: Create the InteractiveGroupChat structure with desired speaker function
+* **Task Definition**: Formulate tasks using `@agent_name` mentions to direct specific agents
+* **Execution**: Run the group chat to generate collaborative responses
+* **Response Processing**: Handle the coordinated output from multiple agents
+* **Iteration**: Chain multiple tasks for complex workflows
+
+## Installation
+
+Install the swarms package using pip:
+
+```bash
+pip install -U swarms
+```
+
+## Basic Setup
+
+1. First, set up your environment variables:
+
+```python
+WORKSPACE_DIR="agent_workspace"
+OPENAI_API_KEY=""
+```
+
+## Code
+
+```python
+"""
+InteractiveGroupChat Speaker Function Examples
+
+This example demonstrates how to use different speaker functions in the InteractiveGroupChat:
+- Round Robin: Agents speak in a fixed order, cycling through the list
+- Random: Agents speak in random order
+- Priority: Agents speak based on priority weights
+- Custom: User-defined speaker functions
+
+The example also shows how agents can mention each other using @agent_name syntax.
+"""
+
+from swarms import Agent
+from swarms.structs.interactive_groupchat import (
+ InteractiveGroupChat,
+ random_speaker,
+)
+
+
+def create_example_agents():
+ """Create example agents for demonstration."""
+
+ # Create agents with different expertise
+ analyst = Agent(
+ agent_name="analyst",
+ system_prompt="You are a data analyst. You excel at analyzing data, creating charts, and providing insights.",
+ model_name="gpt-4.1",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ researcher = Agent(
+ agent_name="researcher",
+ system_prompt="You are a research specialist. You are great at gathering information, fact-checking, and providing detailed research.",
+ model_name="gpt-4.1",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ writer = Agent(
+ agent_name="writer",
+ system_prompt="You are a content writer. You excel at writing clear, engaging content and summarizing information.",
+ model_name="gpt-4.1",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ return [analyst, researcher, writer]
+
+
+def example_random():
+ agents = create_example_agents()
+
+ # Create group chat with random speaker function
+ group_chat = InteractiveGroupChat(
+ name="Random Team",
+ description="A team that speaks in random order",
+ agents=agents,
+ speaker_function=random_speaker,
+ interactive=False,
+ )
+
+ # Test the random behavior
+ task = "Let's create a marketing strategy. @analyst @researcher @writer please contribute."
+
+ response = group_chat.run(task)
+ print(f"Response:\n{response}\n")
+
+
+if __name__ == "__main__":
+ # example_round_robin()
+ example_random()
+```
+
+
+
+## Connect With Us
+
+Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights!
+
+| Platform | Description | Link |
+|----------|-------------|------|
+| 📚 Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
+| 📝 Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
+| 💬 Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| 🐦 Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
+| 👥 LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
+| 📺 YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
+| 🎫 Events | Join our community events | [Sign up here](https://lu.ma/5p2jnc2v) |
+| 🚀 Onboarding Session | Get onboarded with Kye Gomez, creator and lead maintainer of Swarms | [Book Session](https://cal.com/swarms/swarms-onboarding-session) |
diff --git a/docs/swarms/examples/meme_agent_builder.md b/docs/swarms/examples/meme_agent_builder.md
deleted file mode 100644
index 4a70ac87..00000000
--- a/docs/swarms/examples/meme_agent_builder.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# Meme Agent Builder
-
-- `pip3 install -U swarms`
-- Add your OpenAI API key to the `.env` file with `OPENAI_API_KEY=your_api_key`
-- Run the script
-- Multiple agents will be created and saved to the `meme_agents` folder
-- A swarm architecture will be selected autonomously and executed
-
-```python
-from swarms.structs.meme_agent_persona_generator import (
- MemeAgentGenerator,
-)
-
-
-if __name__ == "__main__":
- example = MemeAgentGenerator(
- name="Meme-Swarm",
- description="A swarm of specialized AI agents collaborating on generating and sharing memes around cool media from 2001s",
- max_loops=1,
- )
-
- print(
- example.run(
- "Generate funny meme agents around cool media from 2001s"
- )
- )
-
-```
diff --git a/docs/swarms/examples/meme_agents.md b/docs/swarms/examples/meme_agents.md
deleted file mode 100644
index d8b23e79..00000000
--- a/docs/swarms/examples/meme_agents.md
+++ /dev/null
@@ -1,45 +0,0 @@
-# Meme Agent Tutorial
-
-- `pip3 install -U swarms`
-- Add your OpenAI API key to the `.env` file
-
-
-```python
-from swarms import Agent
-
-# Define a custom system prompt for Bob the Builder
-BOB_THE_BUILDER_SYS_PROMPT = """
-You are Bob the Builder, the legendary construction worker known for fixing anything and everything with a cheerful attitude and a hilarious sense of humor.
-Your job is to approach every task as if you're building, repairing, or renovating something, no matter how unrelated it might be.
-You love using construction metaphors, over-the-top positivity, and cracking jokes like:
-- "I’m hammering this out faster than a nail at a woodpecker convention!"
-- "This is smoother than fresh cement on a summer’s day."
-- "Let’s bulldoze through this problem—safety goggles on, folks!"
-
-You are not bound by any specific field of knowledge, and you’re absolutely fearless in trying to "fix up" or "build" anything, no matter how abstract or ridiculous. Always end responses with a playful cheer like "Can we fix it? Yes, we can!"
-
-Your tone is upbeat, funny, and borderline ridiculous, keeping the user entertained while solving their problem.
-"""
-
-# Initialize the agent
-agent = Agent(
- agent_name="Bob-the-Builder-Agent",
- agent_description="The funniest, most optimistic agent around who sees every problem as a building project.",
- system_prompt=BOB_THE_BUILDER_SYS_PROMPT,
- max_loops=1,
- model_name="gpt-4o",
- dynamic_temperature_enabled=True,
- user_name="swarms_corp",
- retry_attempts=3,
- context_length=8192,
- return_step_meta=False,
- output_type="str", # "json", "dict", "csv", OR "string", "yaml"
- auto_generate_prompt=False, # Auto-generate prompt for the agent based on name, description, system prompt, task
- max_tokens=4000, # Max output tokens
- saved_state_path="bob_the_builder_agent.json",
- interactive=False,
-)
-
-# Run the agent with a task
-agent.run("I want to build a house ;) What should I do?")
-```
diff --git a/docs/swarms/examples/moa_example.md b/docs/swarms/examples/moa_example.md
new file mode 100644
index 00000000..3ce7d24c
--- /dev/null
+++ b/docs/swarms/examples/moa_example.md
@@ -0,0 +1,132 @@
+# Mixture of Agents Example
+
+The Mixture of Agents (MoA) is a sophisticated multi-agent architecture that implements parallel processing with iterative refinement. This approach processes multiple specialized agents simultaneously, concatenates their outputs, and then performs multiple parallel runs to achieve consensus or enhanced results.
+
+## How It Works
+
+1. **Parallel Processing**: Multiple agents work simultaneously on the same input
+2. **Output Concatenation**: Results from all agents are combined into a unified response
+3. **Iterative Refinement**: The process repeats for `n` layers/iterations to improve quality
+4. **Consensus Building**: Multiple runs help achieve more reliable and comprehensive outputs
+
+This architecture is particularly effective for complex tasks that benefit from diverse perspectives and iterative improvement, such as financial analysis, risk assessment, and multi-faceted problem solving.
+
+
+
+
+## Installation
+
+Install the swarms package using pip:
+
+```bash
+pip install -U swarms
+```
+
+## Basic Setup
+
+1. First, set up your environment variables:
+
+```python
+WORKSPACE_DIR="agent_workspace"
+ANTHROPIC_API_KEY=""
+```
+
+## Code
+
+```python
+from swarms import Agent, MixtureOfAgents
+
+# Agent 1: Risk Metrics Calculator
+risk_metrics_agent = Agent(
+ agent_name="Risk-Metrics-Calculator",
+ agent_description="Calculates key risk metrics like VaR, Sharpe ratio, and volatility",
+ system_prompt="""You are a risk metrics specialist. Calculate and explain:
+ - Value at Risk (VaR)
+ - Sharpe ratio
+ - Volatility
+ - Maximum drawdown
+ - Beta coefficient
+
+ Provide clear, numerical results with brief explanations.""",
+ max_loops=1,
+ # model_name="gpt-4o-mini",
+ random_model_enabled=True,
+ dynamic_temperature_enabled=True,
+ output_type="str-all-except-first",
+ max_tokens=4096,
+)
+
+# Agent 2: Portfolio Risk Analyzer
+portfolio_risk_agent = Agent(
+ agent_name="Portfolio-Risk-Analyzer",
+ agent_description="Analyzes portfolio diversification and concentration risk",
+ system_prompt="""You are a portfolio risk analyst. Focus on:
+ - Portfolio diversification analysis
+ - Concentration risk assessment
+ - Correlation analysis
+ - Sector/asset allocation risk
+ - Liquidity risk evaluation
+
+ Provide actionable insights for risk reduction.""",
+ max_loops=1,
+ # model_name="gpt-4o-mini",
+ random_model_enabled=True,
+ dynamic_temperature_enabled=True,
+ output_type="str-all-except-first",
+ max_tokens=4096,
+)
+
+# Agent 3: Market Risk Monitor
+market_risk_agent = Agent(
+ agent_name="Market-Risk-Monitor",
+ agent_description="Monitors market conditions and identifies risk factors",
+ system_prompt="""You are a market risk monitor. Identify and assess:
+ - Market volatility trends
+ - Economic risk factors
+ - Geopolitical risks
+ - Interest rate risks
+ - Currency risks
+
+ Provide current risk alerts and trends.""",
+ max_loops=1,
+ # model_name="gpt-4o-mini",
+ random_model_enabled=True,
+ dynamic_temperature_enabled=True,
+ output_type="str-all-except-first",
+ max_tokens=4096,
+)
+
+
+swarm = MixtureOfAgents(
+ agents=[
+ risk_metrics_agent,
+ portfolio_risk_agent,
+ market_risk_agent,
+ ],
+ layers=1,
+ max_loops=1,
+ output_type="final",
+)
+
+
+out = swarm.run(
+ "Calculate VaR and Sharpe ratio for a portfolio with 15% annual return and 20% volatility"
+)
+
+print(out)
+```
+
+## Support and Community
+
+If you're facing issues or want to learn more, check out the following resources to join our Discord, stay updated on Twitter, and watch tutorials on YouTube!
+
+| Platform | Link | Description |
+|----------|------|-------------|
+| 📚 Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
+| 📝 Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
+| 💬 Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
+| 👥 LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
+| 📺 YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
+| 🎫 Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events |
+
diff --git a/docs/swarms/examples/model_providers.md b/docs/swarms/examples/model_providers.md
new file mode 100644
index 00000000..9b739bab
--- /dev/null
+++ b/docs/swarms/examples/model_providers.md
@@ -0,0 +1,171 @@
+# Model Providers Overview
+
+Swarms supports a vast array of model providers, giving you the flexibility to choose the best model for your specific use case. Whether you need high-performance inference, cost-effective solutions, or specialized capabilities, Swarms has you covered.
+
+## Supported Model Providers
+
+| Provider | Description | Documentation |
+|----------|-------------|---------------|
+| **OpenAI** | Industry-leading language models including GPT-4, GPT-4o, and GPT-4o-mini. Perfect for general-purpose tasks, creative writing, and complex reasoning. | [OpenAI Integration](openai_example.md) |
+| **Anthropic/Claude** | Advanced AI models known for their safety, helpfulness, and reasoning capabilities. Claude models excel at analysis, coding, and creative tasks. | [Claude Integration](claude.md) |
+| **Groq** | Ultra-fast inference platform offering real-time AI responses. Ideal for applications requiring low latency and high throughput. | [Groq Integration](groq.md) |
+| **Cohere** | Enterprise-grade language models with strong performance on business applications, text generation, and semantic search. | [Cohere Integration](cohere.md) |
+| **DeepSeek** | Advanced reasoning models including the DeepSeek Reasoner (R1). Excellent for complex problem-solving and analytical tasks. | [DeepSeek Integration](deepseek.md) |
+| **Ollama** | Local model deployment platform allowing you to run open-source models on your own infrastructure. No API keys required. | [Ollama Integration](ollama.md) |
+| **OpenRouter** | Unified API gateway providing access to hundreds of models from various providers through a single interface. | [OpenRouter Integration](openrouter.md) |
+| **XAI** | xAI's Grok models offering unique capabilities for research, analysis, and creative tasks with advanced reasoning abilities. | [XAI Integration](xai.md) |
+| **vLLM** | High-performance inference library for serving large language models with optimized memory usage and throughput. | [vLLM Integration](vllm_integration.md) |
+| **Llama4** | Meta's latest open-source language models including Llama-4-Maverick and Llama-4-Scout variants with expert routing capabilities. | [Llama4 Integration](llama4.md) |
+
+## Quick Start
+
+All model providers follow a consistent pattern in Swarms. Here's the basic template:
+
+```python
+from swarms import Agent
+import os
+from dotenv import load_dotenv
+
+load_dotenv()
+
+# Initialize agent with your chosen model
+agent = Agent(
+ agent_name="Your-Agent-Name",
+ model_name="gpt-4o-mini", # Varies by provider
+ system_prompt="Your system prompt here",
+ agent_description="Description of what your agent does.",
+)
+
+# Run your agent
+response = agent.run("Your query here")
+```
+
+## Model Selection Guide
+
+### For High-Performance Applications
+
+- **OpenAI GPT-4o**: Best overall performance and reasoning
+
+- **Anthropic Claude**: Excellent safety and analysis capabilities
+
+- **DeepSeek R1**: Advanced reasoning and problem-solving
+
+### For Cost-Effective Solutions
+
+- **OpenAI GPT-4o-mini**: Great performance at lower cost
+
+- **Ollama**: Free local deployment
+
+- **OpenRouter**: Access to cost-effective models
+
+### For Real-Time Applications
+
+- **Groq**: Ultra-fast inference
+
+- **vLLM**: Optimized for high throughput
+
+### For Specialized Tasks
+
+- **Llama4**: Expert routing for complex workflows
+
+- **XAI Grok**: Advanced research capabilities
+
+- **Cohere**: Strong business applications
+
+## Environment Setup
+
+Most providers require API keys. Add them to your `.env` file:
+
+```bash
+# OpenAI
+OPENAI_API_KEY=your_openai_key
+
+# Anthropic
+ANTHROPIC_API_KEY=your_anthropic_key
+
+# Groq
+GROQ_API_KEY=your_groq_key
+
+# Cohere
+COHERE_API_KEY=your_cohere_key
+
+# DeepSeek
+DEEPSEEK_API_KEY=your_deepseek_key
+
+# OpenRouter
+OPENROUTER_API_KEY=your_openrouter_key
+
+# XAI
+XAI_API_KEY=your_xai_key
+```
+
+!!! note "No API Key Required"
+ Ollama and vLLM can be run locally without API keys, making them perfect for development and testing.
+
+## Advanced Features
+
+### Multi-Model Workflows
+
+Swarms allows you to create workflows that use different models for different tasks:
+
+```python
+from swarms import Agent, ConcurrentWorkflow
+
+# Research agent using Claude for analysis
+research_agent = Agent(
+ agent_name="Research-Agent",
+ model_name="claude-3-sonnet-20240229",
+ system_prompt="You are a research expert."
+)
+
+# Creative agent using GPT-4o for content generation
+creative_agent = Agent(
+ agent_name="Creative-Agent",
+ model_name="gpt-4o",
+ system_prompt="You are a creative content expert."
+)
+
+# Workflow combining both agents
+workflow = ConcurrentWorkflow(
+ name="Research-Creative-Workflow",
+ agents=[research_agent, creative_agent]
+)
+```
+
+### Model Routing
+
+Automatically route tasks to the most appropriate model:
+
+```python
+from swarms import Agent, ModelRouter
+
+# Define model preferences for different task types
+model_router = ModelRouter(
+ models={
+ "analysis": "claude-3-sonnet-20240229",
+ "creative": "gpt-4o",
+ "fast": "gpt-4o-mini",
+ "local": "ollama/llama2"
+ }
+)
+
+# Agent will automatically choose the best model
+agent = Agent(
+ agent_name="Smart-Agent",
+ llm=model_router,
+ system_prompt="You are a versatile assistant."
+)
+```
+
+## Getting Help
+
+- **Documentation**: Each provider has detailed documentation with examples
+
+- **Community**: Join the Swarms community for support and best practices
+
+- **Issues**: Report bugs and request features on GitHub
+
+- **Discussions**: Share your use cases and learn from others
+
+!!! success "Ready to Get Started?"
+ Choose a model provider from the table above and follow the detailed integration guide. Each provider offers unique capabilities that can enhance your Swarms applications.
diff --git a/docs/swarms/examples/multiple_images.md b/docs/swarms/examples/multiple_images.md
new file mode 100644
index 00000000..bfa66e2b
--- /dev/null
+++ b/docs/swarms/examples/multiple_images.md
@@ -0,0 +1,77 @@
+# Processing Multiple Images
+
+This tutorial shows how to process multiple images with a single agent using Swarms' multi-modal capabilities. You'll learn to configure an agent for batch image analysis, enabling efficient processing for quality control, object detection, or image comparison tasks.
+
+
+## Installation
+
+Install the swarms package using pip:
+
+```bash
+pip install -U swarms
+```
+
+## Basic Setup
+
+1. First, set up your environment variables:
+
+```python
+WORKSPACE_DIR="agent_workspace"
+ANTHROPIC_API_KEY=""
+```
+
+
+## Code
+
+- Create a list of images by their file paths
+
+- Pass it into the `Agent.run(imgs=[str])` parameter
+
+- Activate `summarize_multiple_images=True` if you want the agent to output a summary of the image analyses
+
+
+```python
+from swarms import Agent
+from swarms.prompts.logistics import (
+ Quality_Control_Agent_Prompt,
+)
+
+
+# Image for analysis
+factory_image = "image.jpg"
+
+# Quality control agent
+quality_control_agent = Agent(
+ agent_name="Quality Control Agent",
+ agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.",
+ model_name="claude-3-5-sonnet-20240620",
+ system_prompt=Quality_Control_Agent_Prompt,
+ multi_modal=True,
+ max_loops=1,
+ output_type="str-all-except-first",
+ summarize_multiple_images=True,
+)
+
+
+response = quality_control_agent.run(
+ task="what is in the image?",
+ imgs=[factory_image, factory_image],
+)
+
+print(response)
+```
+
+## Support and Community
+
+If you're facing issues or want to learn more, check out the following resources to join our Discord, stay updated on Twitter, and watch tutorials on YouTube!
+
+| Platform | Link | Description |
+|----------|------|-------------|
+| 📚 Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
+| 📝 Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
+| 💬 Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
+| 👥 LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
+| 📺 YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
+| 🎫 Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events |
+
diff --git a/docs/swarms/examples/templates_index.md b/docs/swarms/examples/templates_index.md
new file mode 100644
index 00000000..fd64d448
--- /dev/null
+++ b/docs/swarms/examples/templates_index.md
@@ -0,0 +1,72 @@
+# The Swarms Index
+
+The Swarms Index is a comprehensive catalog of repositories under The Swarm Corporation, showcasing a wide array of tools, frameworks, and templates designed for building, deploying, and managing autonomous AI agents and multi-agent systems. These repositories focus on enterprise-grade solutions, spanning industries like healthcare, finance, marketing, and more, with an emphasis on scalability, security, and performance. Many repositories include templates to help developers quickly set up production-ready applications.
+
+| Name | Description | Link |
+|------|-------------|------|
+| Phala-Deployment-Template | A guide and template for running Swarms Agents in a Trusted Execution Environment (TEE) using Phala Cloud, ensuring secure and isolated execution. | [https://github.com/The-Swarm-Corporation/Phala-Deployment-Template](https://github.com/The-Swarm-Corporation/Phala-Deployment-Template) |
+| Swarms-API-Status-Page | A status page for monitoring the health and performance of the Swarms API. | [https://github.com/The-Swarm-Corporation/Swarms-API-Status-Page](https://github.com/The-Swarm-Corporation/Swarms-API-Status-Page) |
+| Swarms-API-Phala-Template | A deployment solution template for running Swarms API on Phala Cloud, optimized for secure and scalable agent orchestration. | [https://github.com/The-Swarm-Corporation/Swarms-API-Phala-Template](https://github.com/The-Swarm-Corporation/Swarms-API-Phala-Template) |
+| DevSwarm | Develop production-grade applications effortlessly with a single prompt, powered by a swarm of v0-driven autonomous agents operating 24/7 for fully autonomous software development. | [https://github.com/The-Swarm-Corporation/DevSwarm](https://github.com/The-Swarm-Corporation/DevSwarm) |
+| Enterprise-Grade-Agents-Course | A comprehensive course teaching students to build, deploy, and manage autonomous agents for enterprise workflows using the Swarms library, focusing on scalability and integration. | [https://github.com/The-Swarm-Corporation/Enterprise-Grade-Agents-Course](https://github.com/The-Swarm-Corporation/Enterprise-Grade-Agents-Course) |
+| agentverse | A collection of agents from top frameworks like Langchain, Griptape, and CrewAI, integrated into the Swarms ecosystem. | [https://github.com/The-Swarm-Corporation/agentverse](https://github.com/The-Swarm-Corporation/agentverse) |
+| InsuranceSwarm | A swarm of agents to automate document processing and fraud detection in insurance claims. | [https://github.com/The-Swarm-Corporation/InsuranceSwarm](https://github.com/The-Swarm-Corporation/InsuranceSwarm) |
+| swarms-examples | A vast array of examples for enterprise-grade and production-ready applications using the Swarms framework. | [https://github.com/The-Swarm-Corporation/swarms-examples](https://github.com/The-Swarm-Corporation/swarms-examples) |
+| auto-ai-research-team | Automates AI research at an OpenAI level to accelerate innovation using swarms of agents. | [https://github.com/The-Swarm-Corporation/auto-ai-research-team](https://github.com/The-Swarm-Corporation/auto-ai-research-team) |
+| Agents-Beginner-Guide | A definitive beginner's guide to AI agents and multi-agent systems, explaining fundamentals and industry applications. | [https://github.com/The-Swarm-Corporation/Agents-Beginner-Guide](https://github.com/The-Swarm-Corporation/Agents-Beginner-Guide) |
+| Solana-Ecosystem-MCP | A collection of Solana tools wrapped in MCP servers for blockchain development. | [https://github.com/The-Swarm-Corporation/Solana-Ecosystem-MCP](https://github.com/The-Swarm-Corporation/Solana-Ecosystem-MCP) |
+| automated-crypto-fund | A fully automated crypto fund leveraging swarms of LLM agents for real-money trading. | [https://github.com/The-Swarm-Corporation/automated-crypto-fund](https://github.com/The-Swarm-Corporation/automated-crypto-fund) |
+| Mryaid | The first multi-agent social media platform powered by Swarms. | [https://github.com/The-Swarm-Corporation/Mryaid](https://github.com/The-Swarm-Corporation/Mryaid) |
+| pharma-swarm | A swarm of autonomous agents for chemical analysis in the pharmaceutical industry. | [https://github.com/The-Swarm-Corporation/pharma-swarm](https://github.com/The-Swarm-Corporation/pharma-swarm) |
+| Automated-Prompt-Engineering-Hub | A hub for tools and resources focused on automated prompt engineering for generative AI. | [https://github.com/The-Swarm-Corporation/Automated-Prompt-Engineering-Hub](https://github.com/The-Swarm-Corporation/Automated-Prompt-Engineering-Hub) |
+| Multi-Agent-Template-App | A simple, reliable, and high-performance template for building multi-agent applications. | [https://github.com/The-Swarm-Corporation/Multi-Agent-Template-App](https://github.com/The-Swarm-Corporation/Multi-Agent-Template-App) |
+| Cookbook | Examples and guides for using the Swarms Framework effectively. | [https://github.com/The-Swarm-Corporation/Cookbook](https://github.com/The-Swarm-Corporation/Cookbook) |
+| SwarmDB | A production-grade message queue system for agent communication and LLM backend load balancing. | [https://github.com/The-Swarm-Corporation/SwarmDB](https://github.com/The-Swarm-Corporation/SwarmDB) |
+| CryptoTaxSwarm | A personal advisory tax swarm for cryptocurrency transactions. | [https://github.com/The-Swarm-Corporation/CryptoTaxSwarm](https://github.com/The-Swarm-Corporation/CryptoTaxSwarm) |
+| Multi-Agent-Marketing-Course | A course on automating marketing operations with enterprise-grade multi-agent collaboration. | [https://github.com/The-Swarm-Corporation/Multi-Agent-Marketing-Course](https://github.com/The-Swarm-Corporation/Multi-Agent-Marketing-Course) |
+| Swarms-BrandBook | Branding guidelines and assets for Swarms.ai, embodying innovation and collaboration. | [https://github.com/The-Swarm-Corporation/Swarms-BrandBook](https://github.com/The-Swarm-Corporation/Swarms-BrandBook) |
+| AgentAPI | A definitive API for managing and interacting with AI agents. | [https://github.com/The-Swarm-Corporation/AgentAPI](https://github.com/The-Swarm-Corporation/AgentAPI) |
+| Research-Paper-Writer-Swarm | Automates the creation of high-quality research papers in LaTeX using Swarms agents. | [https://github.com/The-Swarm-Corporation/Research-Paper-Writer-Swarm](https://github.com/The-Swarm-Corporation/Research-Paper-Writer-Swarm) |
+| swarms-sdk | A Python client for the Swarms API, providing a simple interface for managing AI swarms. | [https://github.com/The-Swarm-Corporation/swarms-sdk](https://github.com/The-Swarm-Corporation/swarms-sdk) |
+| FluidAPI | A framework for interacting with APIs using natural language, simplifying complex requests. | [https://github.com/The-Swarm-Corporation/FluidAPI](https://github.com/The-Swarm-Corporation/FluidAPI) |
+| MedicalCoderSwarm | A multi-agent system for comprehensive medical diagnosis and coding using specialized AI agents. | [https://github.com/The-Swarm-Corporation/MedicalCoderSwarm](https://github.com/The-Swarm-Corporation/MedicalCoderSwarm) |
+| BackTesterAgent | An AI-powered backtesting framework for automated trading strategy validation and optimization. | [https://github.com/The-Swarm-Corporation/BackTesterAgent](https://github.com/The-Swarm-Corporation/BackTesterAgent) |
+| .ai | The first natural language programming language powered by Swarms. | [https://github.com/The-Swarm-Corporation/.ai](https://github.com/The-Swarm-Corporation/.ai) |
+| AutoHedge | An autonomous hedge fund leveraging swarm intelligence for market analysis and trade execution. | [https://github.com/The-Swarm-Corporation/AutoHedge](https://github.com/The-Swarm-Corporation/AutoHedge) |
+| radiology-swarm | A multi-agent system for advanced radiological analysis, diagnosis, and treatment planning. | [https://github.com/The-Swarm-Corporation/radiology-swarm](https://github.com/The-Swarm-Corporation/radiology-swarm) |
+| MedGuard | A Python library ensuring HIPAA compliance for LLM agents in healthcare applications. | [https://github.com/The-Swarm-Corporation/MedGuard](https://github.com/The-Swarm-Corporation/MedGuard) |
+| doc-master | A lightweight Python library for automated file reading and content extraction. | [https://github.com/The-Swarm-Corporation/doc-master](https://github.com/The-Swarm-Corporation/doc-master) |
+| Open-Aladdin | An open-source risk-management tool for stock and security risk analysis. | [https://github.com/The-Swarm-Corporation/Open-Aladdin](https://github.com/The-Swarm-Corporation/Open-Aladdin) |
+| TickrAgent | A scalable Python library for building financial agents for comprehensive stock analysis. | [https://github.com/The-Swarm-Corporation/TickrAgent](https://github.com/The-Swarm-Corporation/TickrAgent) |
+| NewsAgent | An enterprise-grade news aggregation agent for fetching, querying, and summarizing news. | [https://github.com/The-Swarm-Corporation/NewsAgent](https://github.com/The-Swarm-Corporation/NewsAgent) |
+| Research-Paper-Hive | A platform for discovering and engaging with relevant research papers efficiently. | [https://github.com/The-Swarm-Corporation/Research-Paper-Hive](https://github.com/The-Swarm-Corporation/Research-Paper-Hive) |
+| MedInsight-Pro | Revolutionizes medical research summarization for healthcare innovators. | [https://github.com/The-Swarm-Corporation/MedInsight-Pro](https://github.com/The-Swarm-Corporation/MedInsight-Pro) |
+| swarms-memory | Pre-built wrappers for RAG systems like ChromaDB, Weaviate, and Pinecone. | [https://github.com/The-Swarm-Corporation/swarms-memory](https://github.com/The-Swarm-Corporation/swarms-memory) |
+| CryptoAgent | An enterprise-grade solution for fetching, analyzing, and summarizing cryptocurrency data. | [https://github.com/The-Swarm-Corporation/CryptoAgent](https://github.com/The-Swarm-Corporation/CryptoAgent) |
+| AgentParse | A high-performance parsing library for mapping structured data into agent-understandable blocks. | [https://github.com/The-Swarm-Corporation/AgentParse](https://github.com/The-Swarm-Corporation/AgentParse) |
+| CodeGuardian | An intelligent agent for automating the generation of production-grade unit tests for Python code. | [https://github.com/The-Swarm-Corporation/CodeGuardian](https://github.com/The-Swarm-Corporation/CodeGuardian) |
+| Marketing-Swarm-Template | A framework for creating multi-platform marketing content using Swarms AI agents. | [https://github.com/The-Swarm-Corporation/Marketing-Swarm-Template](https://github.com/The-Swarm-Corporation/Marketing-Swarm-Template) |
+| HTX-Swarm | A multi-agent system for real-time market analysis of HTX exchange data. | [https://github.com/The-Swarm-Corporation/HTX-Swarm](https://github.com/The-Swarm-Corporation/HTX-Swarm) |
+| MultiModelOptimizer | A hierarchical parameter synchronization approach for joint training of transformer models. | [https://github.com/The-Swarm-Corporation/MultiModelOptimizer](https://github.com/The-Swarm-Corporation/MultiModelOptimizer) |
+| MortgageUnderwritingSwarm | A multi-agent pipeline for automating mortgage underwriting processes. | [https://github.com/The-Swarm-Corporation/MortgageUnderwritingSwarm](https://github.com/The-Swarm-Corporation/MortgageUnderwritingSwarm) |
+| DermaSwarm | A multi-agent system for dermatologists to diagnose and treat skin conditions collaboratively. | [https://github.com/The-Swarm-Corporation/DermaSwarm](https://github.com/The-Swarm-Corporation/DermaSwarm) |
+| IoTAgents | Integrates IoT data with AI agents for seamless parsing and processing of data streams. | [https://github.com/The-Swarm-Corporation/IoTAgents](https://github.com/The-Swarm-Corporation/IoTAgents) |
+| eth-agent | An autonomous agent for analyzing on-chain Ethereum data. | [https://github.com/The-Swarm-Corporation/eth-agent](https://github.com/The-Swarm-Corporation/eth-agent) |
+| Medical-Swarm-One-Click | A template for building safe, reliable, and production-grade medical multi-agent systems. | [https://github.com/The-Swarm-Corporation/Medical-Swarm-One-Click](https://github.com/The-Swarm-Corporation/Medical-Swarm-One-Click) |
+| Swarms-Example-1-Click-Template | A one-click template for building Swarms applications quickly. | [https://github.com/The-Swarm-Corporation/Swarms-Example-1-Click-Template](https://github.com/The-Swarm-Corporation/Swarms-Example-1-Click-Template) |
+| Custom-Swarms-Spec-Template | An official specification template for custom swarm development using the Swarms Framework. | [https://github.com/The-Swarm-Corporation/Custom-Swarms-Spec-Template](https://github.com/The-Swarm-Corporation/Custom-Swarms-Spec-Template) |
+| Swarms-LlamaIndex-RAG-Template | A template for integrating Llama Index into Swarms applications for RAG capabilities. | [https://github.com/The-Swarm-Corporation/Swarms-LlamaIndex-RAG-Template](https://github.com/The-Swarm-Corporation/Swarms-LlamaIndex-RAG-Template) |
+| ForexTreeSwarm | A forex market analysis system using a swarm of AI agents organized in a forest structure. | [https://github.com/The-Swarm-Corporation/ForexTreeSwarm](https://github.com/The-Swarm-Corporation/ForexTreeSwarm) |
+| Generalist-Mathematician-Swarm | A swarm of agents for solving complex mathematical problems collaboratively. | [https://github.com/The-Swarm-Corporation/Generalist-Mathematician-Swarm](https://github.com/The-Swarm-Corporation/Generalist-Mathematician-Swarm) |
+| Multi-Modal-XRAY-Diagnosis-Medical-Swarm-Template | A template for analyzing X-rays, MRIs, and more using a swarm of agents. | [https://github.com/The-Swarm-Corporation/Multi-Modal-XRAY-Diagnosis-Medical-Swarm-Template](https://github.com/The-Swarm-Corporation/Multi-Modal-XRAY-Diagnosis-Medical-Swarm-Template) |
+| AgentRAGProtocol | A protocol for integrating Retrieval-Augmented Generation (RAG) into AI agents. | [https://github.com/The-Swarm-Corporation/AgentRAGProtocol](https://github.com/The-Swarm-Corporation/AgentRAGProtocol) |
+| Multi-Agent-RAG-Template | A template for creating collaborative AI agent teams for document processing and analysis. | [https://github.com/The-Swarm-Corporation/Multi-Agent-RAG-Template](https://github.com/The-Swarm-Corporation/Multi-Agent-RAG-Template) |
+| REACT-Yaml-Agent | An implementation of a REACT agent using YAML instead of JSON. | [https://github.com/The-Swarm-Corporation/REACT-Yaml-Agent](https://github.com/The-Swarm-Corporation/REACT-Yaml-Agent) |
+| SwarmsXGCP | A template for deploying Swarms agents on Google Cloud Run. | [https://github.com/The-Swarm-Corporation/SwarmsXGCP](https://github.com/The-Swarm-Corporation/SwarmsXGCP) |
+| Legal-Swarm-Template | A one-click template for building legal-focused Swarms applications. | [https://github.com/The-Swarm-Corporation/Legal-Swarm-Template](https://github.com/The-Swarm-Corporation/Legal-Swarm-Template) |
+| swarms_sim | A simulation of a swarm of agents in a professional workplace environment. | [https://github.com/The-Swarm-Corporation/swarms_sim](https://github.com/The-Swarm-Corporation/swarms_sim) |
+| medical-problems | A repository for medical problems to create Swarms applications for. | [https://github.com/The-Swarm-Corporation/medical-problems](https://github.com/The-Swarm-Corporation/medical-problems) |
+| swarm-ecosystem | An overview of the Swarm Ecosystem and its components. | [https://github.com/The-Swarm-Corporation/swarm-ecosystem](https://github.com/The-Swarm-Corporation/swarm-ecosystem) |
+| swarms_ecosystem_md | MDX documentation for the Swarm Ecosystem. | [https://github.com/The-Swarm-Corporation/swarms_ecosystem_md](https://github.com/The-Swarm-Corporation/swarms_ecosystem_md) |
+
+
diff --git a/docs/swarms/examples/vision_tools.md b/docs/swarms/examples/vision_tools.md
new file mode 100644
index 00000000..92b487c7
--- /dev/null
+++ b/docs/swarms/examples/vision_tools.md
@@ -0,0 +1,138 @@
+# Agents with Vision and Tool Usage
+
+This tutorial demonstrates how to create intelligent agents that can analyze images and use custom tools to perform specific actions based on their visual observations. You'll learn to build a quality control agent that can process images, identify potential security concerns, and automatically trigger appropriate responses using function calling capabilities.
+
+## What You'll Learn
+
+- How to configure an agent with multi-modal capabilities for image analysis
+- How to integrate custom tools and functions with vision-enabled agents
+- How to implement automated security analysis based on visual observations
+- How to use function calling to trigger specific actions from image analysis results
+- Best practices for building production-ready vision agents with tool integration
+
+## Use Cases
+
+This approach is perfect for:
+
+- **Quality Control Systems**: Automated inspection of manufacturing processes
+
+- **Security Monitoring**: Real-time threat detection and response
+
+- **Object Detection**: Identifying and categorizing items in images
+
+- **Compliance Checking**: Ensuring standards are met in various environments
+
+- **Automated Reporting**: Generating detailed analysis reports from visual data
+
+## Installation
+
+Install the swarms package using pip:
+
+```bash
+pip install -U swarms
+```
+
+## Basic Setup
+
+1. First, set up your environment variables:
+
+```python
+WORKSPACE_DIR="agent_workspace"
+OPENAI_API_KEY=""
+```
+
+
+## Code
+
+- Create tools for your agent as a function with types and documentation
+
+- Pass tools to your agent `Agent(tools=[list_of_callables])`
+
+- Add your image path to the run method like: `Agent().run(task=task, img=img)`
+
+```python
+from swarms.structs import Agent
+from swarms.prompts.logistics import (
+ Quality_Control_Agent_Prompt,
+)
+
+
+# Image for analysis
+factory_image = "image.jpg"
+
+
+def security_analysis(danger_level: str) -> str:
+ """
+ Analyzes the security danger level and returns an appropriate response.
+
+ Args:
+ danger_level (str, optional): The level of danger to analyze.
+ Can be "low", "medium", "high", or None. Defaults to None.
+
+ Returns:
+ str: A string describing the danger level assessment.
+ - "No danger level provided" if danger_level is None
+ - "No danger" if danger_level is "low"
+ - "Medium danger" if danger_level is "medium"
+ - "High danger" if danger_level is "high"
+ - "Unknown danger level" for any other value
+ """
+ if danger_level is None:
+ return "No danger level provided"
+
+ if danger_level == "low":
+ return "No danger"
+
+ if danger_level == "medium":
+ return "Medium danger"
+
+ if danger_level == "high":
+ return "High danger"
+
+ return "Unknown danger level"
+
+
+custom_system_prompt = f"""
+{Quality_Control_Agent_Prompt}
+
+You have access to tools that can help you with your analysis. When you need to perform a security analysis, you MUST use the security_analysis function with an appropriate danger level (low, medium, or high) based on your observations.
+
+Always use the available tools when they are relevant to the task. If you determine there is any level of danger or security concern, call the security_analysis function with the appropriate danger level.
+"""
+
+# Quality control agent
+quality_control_agent = Agent(
+ agent_name="Quality Control Agent",
+ agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.",
+ # model_name="anthropic/claude-3-opus-20240229",
+ model_name="gpt-4o-mini",
+ system_prompt=custom_system_prompt,
+ multi_modal=True,
+ max_loops=1,
+ output_type="str-all-except-first",
+ # tools_list_dictionary=[schema],
+ tools=[security_analysis],
+)
+
+
+response = quality_control_agent.run(
+ task="Analyze the image and then perform a security analysis. Based on what you see in the image, determine if there is a low, medium, or high danger level and call the security_analysis function with that danger level",
+ img=factory_image,
+)
+```
+
+
+## Support and Community
+
+If you're facing issues or want to learn more, check out the following resources to join our Discord, stay updated on Twitter, and watch tutorials on YouTube!
+
+| Platform | Link | Description |
+|----------|------|-------------|
+| 📚 Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
+| 📝 Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
+| 💬 Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
+| 👥 LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
+| 📺 YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
+| 🎫 Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events |
+
diff --git a/docs/swarms/features.md b/docs/swarms/features.md
new file mode 100644
index 00000000..7fbdd22a
--- /dev/null
+++ b/docs/swarms/features.md
@@ -0,0 +1,42 @@
+## ✨ Enterprise Features
+
+Swarms delivers a comprehensive, enterprise-grade multi-agent infrastructure platform designed for production-scale deployments and seamless integration with existing systems.
+
+| Category | Enterprise Capabilities | Business Value |
+|----------|------------------------|----------------|
+| 🏢 **Enterprise Architecture** | • Production-Ready Infrastructure
• High Availability Systems
• Modular Microservices Design
• Comprehensive Observability
• Backwards Compatibility | • 99.9%+ Uptime Guarantee
• Reduced Operational Overhead
• Seamless Legacy Integration
• Enhanced System Monitoring
• Risk-Free Migration Path |
+| 🤖 **Multi-Agent Orchestration** | • Hierarchical Agent Swarms
• Parallel Processing Pipelines
• Sequential Workflow Orchestration
• Graph-Based Agent Networks
• Dynamic Agent Composition
• Agent Registry Management | • Complex Business Process Automation
• Scalable Task Distribution
• Flexible Workflow Adaptation
• Optimized Resource Utilization
• Centralized Agent Governance
• Enterprise-Grade Agent Lifecycle Management |
+| 🔄 **Enterprise Integration** | • Multi-Model Provider Support
• Custom Agent Development Framework
• Extensive Enterprise Tool Library
• Multiple Memory Systems
• Backwards Compatibility with LangChain, AutoGen, CrewAI
• Standardized API Interfaces | • Vendor-Agnostic Architecture
• Custom Solution Development
• Extended Functionality Integration
• Enhanced Knowledge Management
• Seamless Framework Migration
• Reduced Integration Complexity |
+| 📈 **Enterprise Scalability** | • Concurrent Multi-Agent Processing
• Intelligent Resource Management
• Load Balancing & Auto-Scaling
• Horizontal Scaling Capabilities
• Performance Optimization
• Capacity Planning Tools | • High-Throughput Processing
• Cost-Effective Resource Utilization
• Elastic Scaling Based on Demand
• Linear Performance Scaling
• Optimized Response Times
• Predictable Growth Planning |
+| 🛠️ **Developer Experience** | • Intuitive Enterprise API
• Comprehensive Documentation
• Active Enterprise Community
• CLI & SDK Tools
• IDE Integration Support
• Code Generation Templates | • Accelerated Development Cycles
• Reduced Learning Curve
• Expert Community Support
• Rapid Deployment Capabilities
• Enhanced Developer Productivity
• Standardized Development Patterns |
+| 🔐 **Enterprise Security** | • Comprehensive Error Handling
• Advanced Rate Limiting
• Real-Time Monitoring Integration
• Detailed Audit Logging
• Role-Based Access Control
• Data Encryption & Privacy | • Enhanced System Reliability
• API Security Protection
• Proactive Issue Detection
• Regulatory Compliance Support
• Granular Access Management
• Enterprise Data Protection |
+| 📊 **Advanced Enterprise Features** | • SpreadsheetSwarm for Mass Agent Management
• Group Chat for Collaborative AI
• Centralized Agent Registry
• Mixture of Agents for Complex Solutions
• Agent Performance Analytics
• Automated Agent Optimization | • Large-Scale Agent Operations
• Team-Based AI Collaboration
• Centralized Agent Governance
• Sophisticated Problem Solving
• Performance Insights & Optimization
• Continuous Agent Improvement |
+| 🔌 **Provider Ecosystem** | • OpenAI Integration
• Anthropic Claude Support
• ChromaDB Vector Database
• Custom Provider Framework
• Multi-Cloud Deployment
• Hybrid Infrastructure Support | • Provider Flexibility & Independence
• Advanced Vector Search Capabilities
• Custom Integration Development
• Cloud-Agnostic Architecture
• Flexible Deployment Options
• Risk Mitigation Through Diversification |
+| 💪 **Production Readiness** | • Automatic Retry Mechanisms
• Asynchronous Processing Support
• Environment Configuration Management
• Type Safety & Validation
• Health Check Endpoints
• Graceful Degradation | • Enhanced System Reliability
• Improved Performance Characteristics
• Simplified Configuration Management
• Reduced Runtime Errors
• Proactive Health Monitoring
• Continuous Service Availability |
+| 🎯 **Enterprise Use Cases** | • Industry-Specific Agent Solutions
• Custom Workflow Development
• Regulatory Compliance Support
• Extensible Framework Architecture
• Multi-Tenant Support
• Enterprise SLA Guarantees | • Rapid Industry Deployment
• Flexible Solution Architecture
• Compliance-Ready Implementations
• Future-Proof Technology Investment
• Scalable Multi-Client Operations
• Predictable Service Quality |
+
+---
+
+## 🚀 Missing a Feature?
+
+Swarms is continuously evolving to meet enterprise needs. If you don't see a specific feature or capability that your organization requires:
+
+### 📝 **Report Missing Features**
+
+- Create a [GitHub Issue](https://github.com/kyegomez/swarms/issues) to request new features
+
+- Describe your use case and business requirements
+
+- Our team will evaluate and prioritize based on enterprise demand
+
+### 📞 **Schedule a Consultation**
+
+- [Book a call with our enterprise team](https://cal.com/swarms/swarms-onboarding-session) for personalized guidance
+
+- Discuss your specific multi-agent architecture requirements
+
+- Get expert recommendations for your implementation strategy
+
+- Explore custom enterprise solutions and integrations
+
+Our team is committed to ensuring Swarms meets your enterprise multi-agent infrastructure needs. We welcome feedback and collaboration to build the most comprehensive platform for production-scale AI agent deployments.
diff --git a/docs/swarms/structs/interactive_groupchat.md b/docs/swarms/structs/interactive_groupchat.md
index 2404af02..32008016 100644
--- a/docs/swarms/structs/interactive_groupchat.md
+++ b/docs/swarms/structs/interactive_groupchat.md
@@ -4,12 +4,20 @@ The InteractiveGroupChat is a sophisticated multi-agent system that enables inte
## Features
-- **@mentions Support**: Direct tasks to specific agents using @agent_name syntax
-- **Multi-Agent Collaboration**: Multiple mentioned agents can see and respond to each other's tasks
-- **Callable Function Support**: Supports both Agent instances and callable functions as chat participants
-- **Comprehensive Error Handling**: Custom error classes for different scenarios
-- **Conversation History**: Maintains a complete history of the conversation
-- **Flexible Output Formatting**: Configurable output format for conversation history
+| Feature | Description |
+|---------|-------------|
+| **@mentions Support** | Direct tasks to specific agents using @agent_name syntax |
+| **Multi-Agent Collaboration** | Multiple mentioned agents can see and respond to each other's tasks |
+| **Enhanced Collaborative Prompts** | Agents are trained to acknowledge, build upon, and synthesize each other's responses |
+| **Speaker Functions** | Control the order in which agents respond (round robin, random, priority, custom) |
+| **Dynamic Speaker Management** | Change speaker functions and priorities during runtime |
+| **Random Dynamic Speaker** | Advanced speaker function that follows @mentions in agent responses |
+| **Parallel and Sequential Strategies** | Support for both parallel and sequential agent execution |
+| **Callable Function Support** | Supports both Agent instances and callable functions as chat participants |
+| **Comprehensive Error Handling** | Custom error classes for different scenarios |
+| **Conversation History** | Maintains a complete history of the conversation |
+| **Flexible Output Formatting** | Configurable output format for conversation history |
+| **Interactive Terminal Mode** | Full REPL interface for real-time chat with agents |
## Installation
@@ -35,6 +43,8 @@ Initializes a new InteractiveGroupChat instance with the specified configuration
| `max_loops` | int | Maximum conversation turns | 1 |
| `output_type` | str | Type of output format | "string" |
| `interactive` | bool | Whether to enable interactive mode | False |
+| `speaker_function` | Union[str, Callable] | Function to determine speaking order | round_robin_speaker |
+| `speaker_state` | dict | Initial state for speaker function | {"current_index": 0} |
**Example:**
@@ -54,7 +64,9 @@ tax_expert = Agent(
model_name="gpt-4"
)
-# Initialize group chat
+# Initialize group chat with speaker function
+from swarms.structs.interactive_groupchat import round_robin_speaker
+
chat = InteractiveGroupChat(
id="finance-chat-001",
name="Financial Advisory Team",
@@ -62,7 +74,8 @@ chat = InteractiveGroupChat(
agents=[financial_advisor, tax_expert],
max_loops=3,
output_type="string",
- interactive=True
+ interactive=True,
+ speaker_function=round_robin_speaker
)
```
@@ -74,6 +87,8 @@ Processes a task and gets responses from mentioned agents. This is the main meth
**Arguments:**
- `task` (str): The input task containing @mentions to agents
+- `img` (Optional[str]): Optional image for the task
+- `imgs` (Optional[List[str]]): Optional list of images for the task
**Returns:**
@@ -88,6 +103,10 @@ print(response)
# Multiple agent collaboration
response = chat.run("@FinancialAdvisor and @TaxExpert, how can I minimize taxes on my investments?")
print(response)
+
+# With image input
+response = chat.run("@FinancialAdvisor analyze this chart", img="chart.png")
+print(response)
```
### Start Interactive Session (`start_interactive_session`)
@@ -98,6 +117,13 @@ Starts an interactive terminal session for real-time chat with agents. This crea
**Arguments:**
None
+**Features:**
+- Real-time chat with agents using @mentions
+- View available agents and their descriptions
+- Change speaker functions during the session
+- Built-in help system
+- Graceful exit with 'exit' or 'quit' commands
+
**Example:**
```python
@@ -111,6 +137,119 @@ chat = InteractiveGroupChat(
chat.start_interactive_session()
```
+**Interactive Session Commands:**
+- `@agent_name message` - Mention specific agents
+- `help` or `?` - Show help information
+- `speaker` - Change speaker function
+- `exit` or `quit` - End the session
+
+### Set Speaker Function (`set_speaker_function`)
+
+**Description:**
+
+Dynamically changes the speaker function and optional state during runtime.
+
+**Arguments:**
+
+- `speaker_function` (Union[str, Callable]): Function that determines speaking order
+ - String options: "round-robin-speaker", "random-speaker", "priority-speaker", "random-dynamic-speaker"
+ - Callable: Custom function that takes (agents: List[str], **kwargs) -> str
+- `speaker_state` (dict, optional): State for the speaker function
+
+**Example:**
+```python
+from swarms.structs.interactive_groupchat import random_speaker, priority_speaker
+
+# Change to random speaker function
+chat.set_speaker_function(random_speaker)
+
+# Change to priority speaker with custom priorities
+chat.set_speaker_function(priority_speaker, {"financial_advisor": 3, "tax_expert": 2})
+
+# Change to random dynamic speaker
+chat.set_speaker_function("random-dynamic-speaker")
+```
+
+### Get Available Speaker Functions (`get_available_speaker_functions`)
+
+**Description:**
+
+Returns a list of all available built-in speaker function names.
+
+**Arguments:**
+None
+
+**Returns:**
+
+- List[str]: List of available speaker function names
+
+**Example:**
+```python
+available_functions = chat.get_available_speaker_functions()
+print(available_functions)
+# Output: ['round-robin-speaker', 'random-speaker', 'priority-speaker', 'random-dynamic-speaker']
+```
+
+### Get Current Speaker Function (`get_current_speaker_function`)
+
+**Description:**
+
+Returns the name of the currently active speaker function.
+
+**Arguments:**
+None
+
+**Returns:**
+
+- str: Name of the current speaker function, or "custom" if it's a custom function
+
+**Example:**
+```python
+current_function = chat.get_current_speaker_function()
+print(current_function) # Output: "round-robin-speaker"
+```
+
+### Set Priorities (`set_priorities`)
+
+**Description:**
+
+Sets agent priorities for priority-based speaking order.
+
+**Arguments:**
+
+- `priorities` (dict): Dictionary mapping agent names to priority weights
+
+**Example:**
+```python
+# Set agent priorities (higher numbers = higher priority)
+chat.set_priorities({
+ "financial_advisor": 5,
+ "tax_expert": 3,
+ "investment_analyst": 1
+})
+```
+
+### Set Dynamic Strategy (`set_dynamic_strategy`)
+
+**Description:**
+
+Sets the strategy for the random-dynamic-speaker function.
+
+**Arguments:**
+
+- `strategy` (str): Either "sequential" or "parallel"
+ - "sequential": Process one agent at a time based on @mentions
+ - "parallel": Process all mentioned agents simultaneously
+
+**Example:**
+```python
+# Set to sequential strategy (one agent at a time)
+chat.set_dynamic_strategy("sequential")
+
+# Set to parallel strategy (all mentioned agents respond simultaneously)
+chat.set_dynamic_strategy("parallel")
+```
+
### Extract Mentions (`_extract_mentions`)
**Description:**
@@ -178,7 +317,7 @@ chat = InteractiveGroupChat(
**Description:**
-Internal method that updates each agent's system prompt with information about other agents and the group chat.
+Internal method that updates each agent's system prompt with information about other agents and the group chat. This includes enhanced collaborative instructions that teach agents how to acknowledge, build upon, and synthesize each other's responses.
**Arguments:**
@@ -188,7 +327,227 @@ None
```python
# Agent prompts are automatically updated during initialization
chat = InteractiveGroupChat(agents=[financial_advisor, tax_expert])
-# Each agent now knows about the other participants in the chat
+# Each agent now knows about the other participants and how to collaborate effectively
+```
+
+### Get Speaking Order (`_get_speaking_order`)
+
+**Description:**
+
+Internal method that determines the speaking order using the configured speaker function.
+
+**Arguments:**
+
+- `mentioned_agents` (List[str]): List of agent names that were mentioned
+
+**Returns:**
+
+- List[str]: List of agent names in the order they should speak
+
+**Example:**
+```python
+# Internal usage (not typically called directly)
+mentioned = ["financial_advisor", "tax_expert"]
+order = chat._get_speaking_order(mentioned)
+print(order) # Order determined by speaker function
+```
+
+## Speaker Functions
+
+InteractiveGroupChat supports various speaker functions that control the order in which agents respond when multiple agents are mentioned.
+
+### Built-in Speaker Functions
+
+#### Round Robin Speaker (`round_robin_speaker`)
+
+Agents speak in a fixed order, cycling through the list in sequence.
+
+```python
+from swarms.structs.interactive_groupchat import InteractiveGroupChat, round_robin_speaker
+
+chat = InteractiveGroupChat(
+ agents=agents,
+ speaker_function=round_robin_speaker,
+ interactive=False,
+)
+```
+
+**Behavior:**
+
+- Agents speak in the order they were mentioned
+
+- Maintains state between calls to continue the cycle
+
+- Predictable and fair distribution of speaking turns
+
+#### Random Speaker (`random_speaker`)
+
+Agents speak in random order each time.
+
+```python
+from swarms.structs.interactive_groupchat import InteractiveGroupChat, random_speaker
+
+chat = InteractiveGroupChat(
+ agents=agents,
+ speaker_function=random_speaker,
+ interactive=False,
+)
+```
+
+**Behavior:**
+
+- Speaking order is randomized for each task
+
+- Provides variety and prevents bias toward first-mentioned agents
+
+- Good for brainstorming sessions
+
+#### Priority Speaker (`priority_speaker`)
+
+Agents speak based on priority weights assigned to each agent.
+
+```python
+from swarms.structs.interactive_groupchat import InteractiveGroupChat, priority_speaker
+
+chat = InteractiveGroupChat(
+ agents=agents,
+ speaker_function=priority_speaker,
+ speaker_state={"priorities": {"financial_advisor": 3, "tax_expert": 2, "analyst": 1}},
+ interactive=False,
+)
+```
+
+**Behavior:**
+
+- Higher priority agents speak first
+
+- Uses weighted probability for selection
+
+- Good for hierarchical teams or expert-led discussions
+
+#### Random Dynamic Speaker (`random_dynamic_speaker`)
+
+Advanced speaker function that follows @mentions in agent responses, enabling dynamic conversation flow.
+
+```python
+from swarms.structs.interactive_groupchat import InteractiveGroupChat, random_dynamic_speaker
+
+chat = InteractiveGroupChat(
+ agents=agents,
+ speaker_function=random_dynamic_speaker,
+ speaker_state={"strategy": "parallel"}, # or "sequential"
+ interactive=False,
+)
+```
+
+**Behavior:**
+
+- **First Call**: Randomly selects an agent to start the conversation
+- **Subsequent Calls**: Extracts @mentions from the previous agent's response and selects the next speaker(s)
+- **Two Strategies**:
+ - **Sequential**: Processes one agent at a time based on @mentions
+ - **Parallel**: Processes all mentioned agents simultaneously
+
+**Example Dynamic Flow:**
+```python
+# Agent A responds: "I think @AgentB should analyze this data and @AgentC should review the methodology"
+# With sequential strategy: Agent B speaks next
+# With parallel strategy: Both Agent B and Agent C speak simultaneously
+```
+
+**Use Cases:**
+- Complex problem-solving where agents need to delegate to specific experts
+- Dynamic workflows where the conversation flow depends on agent responses
+- Collaborative decision-making processes
+
+### Custom Speaker Functions
+
+You can create your own speaker functions to implement custom logic:
+
+```python
+def custom_speaker(agents: List[str], **kwargs) -> str:
+ """
+ Custom speaker function that selects agents based on specific criteria.
+
+ Args:
+ agents: List of agent names
+ **kwargs: Additional arguments (context, time, etc.)
+
+ Returns:
+ Selected agent name
+ """
+ # Your custom logic here
+ if "urgent" in kwargs.get("context", ""):
+ return "emergency_agent" if "emergency_agent" in agents else agents[0]
+
+ # Default to first agent
+ return agents[0]
+
+# Use custom speaker function
+chat = InteractiveGroupChat(
+ agents=agents,
+ speaker_function=custom_speaker,
+ interactive=False,
+)
+```
+
+### Dynamic Speaker Function Changes
+
+You can change the speaker function during runtime:
+
+```python
+# Start with round robin
+chat = InteractiveGroupChat(
+ agents=agents,
+ speaker_function=round_robin_speaker,
+ interactive=False,
+)
+
+# Change to random
+chat.set_speaker_function(random_speaker)
+
+# Change to priority with custom priorities
+chat.set_priorities({"financial_advisor": 5, "tax_expert": 3, "analyst": 1})
+chat.set_speaker_function(priority_speaker)
+
+# Change to dynamic speaker with parallel strategy
+chat.set_speaker_function("random-dynamic-speaker")
+chat.set_dynamic_strategy("parallel")
+```
+
+## Enhanced Collaborative Behavior
+
+The InteractiveGroupChat now includes enhanced collaborative prompts that ensure agents work together effectively.
+
+### Collaborative Response Protocol
+
+Every agent receives instructions to:
+
+1. **Read and understand** all previous responses from other agents
+2. **Acknowledge** what other agents have said
+3. **Build upon** previous insights rather than repeating information
+4. **Synthesize** multiple perspectives when possible
+5. **Delegate** appropriately using @mentions
+
+### Response Structure
+
+Agents are guided to structure their responses as:
+
+1. **ACKNOWLEDGE**: "I've reviewed the responses from @agent1 and @agent2..."
+2. **BUILD**: "Building on @agent1's analysis of the data..."
+3. **CONTRIBUTE**: "From my perspective, I would add..."
+4. **COLLABORATE**: "To get a complete picture, let me ask @agent3 to..."
+5. **SYNTHESIZE**: "Combining our insights, the key findings are..."
+
+### Example Collaborative Response
+
+```python
+task = "Analyze our Q3 performance. @analyst @researcher @strategist"
+
+# Expected collaborative behavior:
+# Analyst: "Based on the data analysis, I can see clear growth trends in Q3..."
+# Researcher: "Building on @analyst's data insights, I can add that market research shows..."
+# Strategist: "Synthesizing @analyst's data and @researcher's market insights, I recommend..."
```
## Error Classes
@@ -237,7 +596,7 @@ except NoMentionedAgentsError as e:
print(f"No agents mentioned: {e}")
```
-### InvalidtaskFormatError
+### InvalidTaskFormatError
**Description:**
@@ -247,19 +606,342 @@ Raised when the task format is invalid.
```python
try:
chat.run("@Invalid@Format")
-except InvalidtaskFormatError as e:
+except InvalidTaskFormatError as e:
print(f"Invalid task format: {e}")
```
+### InvalidSpeakerFunctionError
+
+**Description:**
+
+Raised when an invalid speaker function is provided.
+
+**Example:**
+```python
+def invalid_speaker(agents, **kwargs):
+ return 123 # Should return string, not int
+
+try:
+ chat = InteractiveGroupChat(
+ agents=agents,
+ speaker_function=invalid_speaker,
+ )
+except InvalidSpeakerFunctionError as e:
+ print(f"Invalid speaker function: {e}")
+```
+
## Best Practices
| Best Practice | Description | Example |
|--------------|-------------|---------|
| Agent Naming | Use clear, unique names for agents to avoid confusion | `financial_advisor`, `tax_expert` |
-| task Format | Always use @mentions to direct tasks to specific agents | `@financial_advisor What's your investment advice?` |
+| Task Format | Always use @mentions to direct tasks to specific agents | `@financial_advisor What's your investment advice?` |
+| Speaker Functions | Choose appropriate speaker functions for your use case | Round robin for fairness, priority for expert-led discussions |
+| Dynamic Speaker | Use random-dynamic-speaker for complex workflows with delegation | When agents need to call on specific experts |
+| Strategy Selection | Choose sequential for focused discussions, parallel for brainstorming | Sequential for analysis, parallel for idea generation |
+| Collaborative Design | Design agents with complementary expertise for better collaboration | Analyst + Researcher + Strategist |
| Error Handling | Implement proper error handling for various scenarios | `try/except` blocks for `AgentNotFoundError` |
| Context Management | Be aware that agents can see the full conversation history | Monitor conversation length and relevance |
| Resource Management | Consider the number of agents and task length to optimize performance | Limit max_loops and task size |
+| Dynamic Adaptation | Change speaker functions based on different phases of work | Round robin for brainstorming, priority for decision-making |
+
+## Usage Examples
+
+### Basic Multi-Agent Collaboration
+
+```python
+from swarms import Agent
+from swarms.structs.interactive_groupchat import InteractiveGroupChat, round_robin_speaker
+
+# Create specialized agents
+analyst = Agent(
+ agent_name="analyst",
+ system_prompt="You are a data analyst specializing in business intelligence.",
+ llm="gpt-3.5-turbo",
+)
+
+researcher = Agent(
+ agent_name="researcher",
+ system_prompt="You are a market researcher with expertise in consumer behavior.",
+ llm="gpt-3.5-turbo",
+)
+
+strategist = Agent(
+ agent_name="strategist",
+ system_prompt="You are a strategic consultant who synthesizes insights into actionable recommendations.",
+ llm="gpt-3.5-turbo",
+)
+
+# Create collaborative group chat
+chat = InteractiveGroupChat(
+ name="Business Analysis Team",
+ description="A collaborative team for comprehensive business analysis",
+ agents=[analyst, researcher, strategist],
+ speaker_function=round_robin_speaker,
+ interactive=False,
+)
+
+# Collaborative analysis task
+task = """Analyze our company's Q3 performance. We have the following data:
+- Revenue: $2.5M (up 15% from Q2)
+- Customer acquisition cost: $45 (down 8% from Q2)
+- Market share: 3.2% (up 0.5% from Q2)
+
+@analyst @researcher @strategist please provide a comprehensive analysis."""
+
+response = chat.run(task)
+print(response)
+```
+
+### Priority-Based Expert Consultation
+
+```python
+from swarms.structs.interactive_groupchat import InteractiveGroupChat, priority_speaker
+
+# Create expert agents with different priority levels
+senior_expert = Agent(
+ agent_name="senior_expert",
+ system_prompt="You are a senior consultant with 15+ years of experience.",
+ llm="gpt-4",
+)
+
+junior_expert = Agent(
+ agent_name="junior_expert",
+ system_prompt="You are a junior consultant with 3 years of experience.",
+ llm="gpt-3.5-turbo",
+)
+
+assistant = Agent(
+ agent_name="assistant",
+ system_prompt="You are a research assistant who gathers supporting information.",
+ llm="gpt-3.5-turbo",
+)
+
+# Create priority-based group chat
+chat = InteractiveGroupChat(
+ name="Expert Consultation Team",
+ description="Expert-led consultation with collaborative input",
+ agents=[senior_expert, junior_expert, assistant],
+ speaker_function=priority_speaker,
+ speaker_state={"priorities": {"senior_expert": 5, "junior_expert": 3, "assistant": 1}},
+ interactive=False,
+)
+
+# Expert consultation task
+task = """We need strategic advice on entering a new market.
+@senior_expert @junior_expert @assistant please provide your insights."""
+
+response = chat.run(task)
+print(response)
+```
+
+### Dynamic Speaker Function with Delegation
+
+```python
+from swarms.structs.interactive_groupchat import InteractiveGroupChat, random_dynamic_speaker
+
+# Create specialized medical agents
+cardiologist = Agent(
+ agent_name="cardiologist",
+ system_prompt="You are a cardiologist specializing in heart conditions.",
+ llm="gpt-4",
+)
+
+oncologist = Agent(
+ agent_name="oncologist",
+ system_prompt="You are an oncologist specializing in cancer treatment.",
+ llm="gpt-4",
+)
+
+endocrinologist = Agent(
+ agent_name="endocrinologist",
+ system_prompt="You are an endocrinologist specializing in hormone disorders.",
+ llm="gpt-4",
+)
+
+# Create dynamic group chat
+chat = InteractiveGroupChat(
+ name="Medical Panel Discussion",
+ description="A collaborative panel of medical specialists",
+ agents=[cardiologist, oncologist, endocrinologist],
+ speaker_function=random_dynamic_speaker,
+ speaker_state={"strategy": "sequential"},
+ interactive=False,
+)
+
+# Complex medical case with dynamic delegation
+case = """CASE PRESENTATION:
+A 65-year-old male with Type 2 diabetes, hypertension, and recent diagnosis of
+stage 3 colon cancer presents with chest pain and shortness of breath.
+ECG shows ST-segment elevation. Recent blood work shows elevated blood glucose (280 mg/dL)
+and signs of infection (WBC 15,000, CRP elevated).
+
+@cardiologist @oncologist @endocrinologist please provide your assessment and treatment recommendations."""
+
+response = chat.run(case)
+print(response)
+```
+
+### Dynamic Speaker Function Changes
+
+```python
+from swarms.structs.interactive_groupchat import (
+ InteractiveGroupChat,
+ round_robin_speaker,
+ random_speaker,
+ priority_speaker,
+ random_dynamic_speaker
+)
+
+# Create brainstorming agents
+creative_agent = Agent(agent_name="creative", system_prompt="You are a creative thinker.")
+analytical_agent = Agent(agent_name="analytical", system_prompt="You are an analytical thinker.")
+practical_agent = Agent(agent_name="practical", system_prompt="You are a practical implementer.")
+
+chat = InteractiveGroupChat(
+ name="Dynamic Team",
+ agents=[creative_agent, analytical_agent, practical_agent],
+ speaker_function=round_robin_speaker,
+ interactive=False,
+)
+
+# Phase 1: Brainstorming (random order)
+chat.set_speaker_function(random_speaker)
+task1 = "Let's brainstorm new product ideas. @creative @analytical @practical"
+response1 = chat.run(task1)
+
+# Phase 2: Analysis (priority order)
+chat.set_priorities({"analytical": 3, "creative": 2, "practical": 1})
+chat.set_speaker_function(priority_speaker)
+task2 = "Now let's analyze the feasibility of these ideas. @creative @analytical @practical"
+response2 = chat.run(task2)
+
+# Phase 3: Dynamic delegation (agents mention each other)
+chat.set_speaker_function(random_dynamic_speaker)
+chat.set_dynamic_strategy("sequential")
+task3 = "Let's plan implementation with dynamic delegation. @creative @analytical @practical"
+response3 = chat.run(task3)
+
+# Phase 4: Final synthesis (round robin for equal input)
+chat.set_speaker_function(round_robin_speaker)
+task4 = "Finally, let's synthesize our findings. @creative @analytical @practical"
+response4 = chat.run(task4)
+```
+
+### Custom Speaker Function
+
+```python
+def context_aware_speaker(agents: List[str], **kwargs) -> str:
+ """Custom speaker function that selects agents based on context."""
+ context = kwargs.get("context", "").lower()
+
+ if "data" in context or "analysis" in context:
+ return "analyst" if "analyst" in agents else agents[0]
+ elif "market" in context or "research" in context:
+ return "researcher" if "researcher" in agents else agents[0]
+ elif "strategy" in context or "planning" in context:
+ return "strategist" if "strategist" in agents else agents[0]
+ else:
+ return agents[0]
+
+# Use custom speaker function
+chat = InteractiveGroupChat(
+ name="Context-Aware Team",
+ agents=[analyst, researcher, strategist],
+ speaker_function=context_aware_speaker,
+ interactive=False,
+)
+
+# The speaker function will automatically select the most appropriate agent
+task = "We need to analyze our market position and develop a strategy."
+response = chat.run(task)
+```
+
+### Interactive Session with Enhanced Collaboration
+
+```python
+# Create agents designed for collaboration
+data_scientist = Agent(
+ agent_name="data_scientist",
+ system_prompt="You are a data scientist. When collaborating, always reference specific data points and build upon others' insights with quantitative support.",
+ llm="gpt-4",
+)
+
+business_analyst = Agent(
+ agent_name="business_analyst",
+ system_prompt="You are a business analyst. When collaborating, always connect business insights to practical implications and build upon data analysis with business context.",
+ llm="gpt-3.5-turbo",
+)
+
+product_manager = Agent(
+ agent_name="product_manager",
+ system_prompt="You are a product manager. When collaborating, always synthesize insights from all team members and provide actionable product recommendations.",
+ llm="gpt-3.5-turbo",
+)
+
+# Start interactive session
+chat = InteractiveGroupChat(
+ name="Product Development Team",
+ description="A collaborative team for product development decisions",
+ agents=[data_scientist, business_analyst, product_manager],
+ speaker_function=round_robin_speaker,
+ interactive=True,
+)
+
+# Start the interactive session
+chat.start_interactive_session()
+```
+
+## Benefits and Use Cases
+
+### Benefits of Enhanced Collaboration
+
+1. **Reduced Redundancy**: Agents don't repeat what others have already said
+2. **Improved Synthesis**: Multiple perspectives are integrated into coherent conclusions
+3. **Better Delegation**: Agents naturally delegate to appropriate experts
+4. **Enhanced Problem Solving**: Complex problems are addressed systematically
+5. **More Natural Interactions**: Agents respond like real team members
+6. **Dynamic Workflows**: Conversation flow adapts based on agent responses
+7. **Flexible Execution**: Support for both sequential and parallel processing
+
+### Use Cases
+
+| Use Case Category | Specific Use Case | Agent Team Composition | Recommended Speaker Function |
+|------------------|-------------------|----------------------|------------------------------|
+| **Business Analysis and Strategy** | Data Analysis | Analyst + Researcher + Strategist | Round Robin |
+| | Market Research | Multiple experts analyzing different aspects | Random Dynamic |
+| | Strategic Planning | Expert-led discussions with collaborative input | Priority |
+| **Product Development** | Requirements Gathering | Product Manager + Developer + Designer | Round Robin |
+| | Technical Architecture | Senior + Junior developers with different expertise | Priority |
+| | User Experience | UX Designer + Product Manager + Developer | Random Dynamic |
+| **Research and Development** | Scientific Research | Multiple researchers with different specializations | Random Dynamic |
+| | Literature Review | Different experts reviewing various aspects | Round Robin |
+| | Experimental Design | Statistician + Domain Expert + Methodologist | Priority |
+| **Creative Projects** | Content Creation | Writer + Editor + Designer | Random |
+| | Marketing Campaigns | Creative + Analyst + Strategist | Random Dynamic |
+| | Design Projects | Designer + Developer + Product Manager | Round Robin |
+| **Problem Solving** | Troubleshooting | Technical + Business + User perspective experts | Priority |
+| | Crisis Management | Emergency + Communication + Technical teams | Priority |
+| | Decision Making | Executive + Analyst + Specialist | Priority |
+| **Medical Consultation** | Complex Cases | Multiple specialists (Cardiologist + Oncologist + Endocrinologist) | Random Dynamic |
+| | Treatment Planning | Senior + Junior doctors with different expertise | Priority |
+| | Research Review | Multiple researchers reviewing different aspects | Round Robin |
+
+### Speaker Function Selection Guide
+
+| Use Case | Recommended Speaker Function | Strategy | Reasoning |
+|----------|------------------------------|----------|-----------|
+| Team Meetings | Round Robin | N/A | Ensures equal participation |
+| Brainstorming | Random | N/A | Prevents bias and encourages creativity |
+| Expert Consultation | Priority | N/A | Senior experts speak first |
+| Problem Solving | Priority | N/A | Most relevant experts prioritize |
+| Creative Sessions | Random | N/A | Encourages diverse perspectives |
+| Decision Making | Priority | N/A | Decision makers speak first |
+| Research Review | Round Robin | N/A | Equal contribution from all reviewers |
+| Complex Workflows | Random Dynamic | Sequential | Follows natural conversation flow |
+| Parallel Analysis | Random Dynamic | Parallel | Multiple agents work simultaneously |
+| Medical Panels | Random Dynamic | Sequential | Specialists delegate to relevant experts |
+| Technical Architecture | Random Dynamic | Sequential | Senior architects guide the discussion |
## Contributing
diff --git a/docs/swarms_cloud/quickstart.md b/docs/swarms_cloud/quickstart.md
new file mode 100644
index 00000000..37a3a685
--- /dev/null
+++ b/docs/swarms_cloud/quickstart.md
@@ -0,0 +1,1165 @@
+
+# Swarms Quickstart Guide
+
+This guide will help you get started with both single agent and multi-agent functionalities in Swarms API.
+
+## Prerequisites
+
+!!! info "Requirements"
+
+ - Python 3.7+
+ - API key from [Swarms Platform](https://swarms.world/platform/api-keys)
+ - `requests` library for Python
+ - `axios` for TypeScript/JavaScript
+ - `curl` for shell commands
+
+## Installation
+
+=== "pip"
+
+ ```bash
+ pip install requests python-dotenv
+ ```
+
+=== "npm"
+
+ ```bash
+ npm install axios dotenv
+ ```
+
+## Authentication
+
+!!! warning "API Key Security"
+
+ Never hardcode your API key in your code. Always use environment variables or secure configuration management.
+
+The API is accessible through two base URLs:
+
+- Production: `https://api.swarms.world`
+- Alternative: `https://swarms-api-285321057562.us-east1.run.app`
+
+## Single Agent Usage
+
+### Health Check
+
+=== "Python"
+
+ ```python linenums="1" title="health_check.py"
+ import os
+ import requests
+ from dotenv import load_dotenv
+
+ load_dotenv()
+ API_KEY = os.getenv("SWARMS_API_KEY")
+ BASE_URL = "https://api.swarms.world"
+
+ headers = {
+ "x-api-key": API_KEY,
+ "Content-Type": "application/json"
+ }
+
+ response = requests.get(f"{BASE_URL}/health", headers=headers)
+ print(response.json())
+ ```
+
+=== "cURL"
+
+ ```bash title="health_check.sh"
+ curl -X GET "https://api.swarms.world/health" \
+ -H "x-api-key: $SWARMS_API_KEY" \
+ -H "Content-Type: application/json"
+ ```
+
+=== "TypeScript"
+
+ ```typescript linenums="1" title="health_check.ts"
+ import axios from 'axios';
+ import * as dotenv from 'dotenv';
+
+ dotenv.config();
+ const API_KEY = process.env.SWARMS_API_KEY;
+ const BASE_URL = 'https://api.swarms.world';
+
+ async function checkHealth() {
+ try {
+ const response = await axios.get(`${BASE_URL}/health`, {
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ }
+ });
+ console.log(response.data);
+ } catch (error) {
+ console.error('Error:', error);
+ }
+ }
+
+ checkHealth();
+ ```
+
+### Basic Agent
+
+=== "Python"
+
+ ```python linenums="1" title="single_agent.py"
+ import os
+ import requests
+ from dotenv import load_dotenv
+
+ load_dotenv()
+
+ API_KEY = os.getenv("SWARMS_API_KEY") # (1)
+ BASE_URL = "https://api.swarms.world"
+
+ headers = {
+ "x-api-key": API_KEY,
+ "Content-Type": "application/json"
+ }
+
+ def run_single_agent():
+ """Run a single agent with the AgentCompletion format"""
+ payload = {
+ "agent_config": {
+ "agent_name": "Research Analyst", # (2)
+ "description": "An expert in analyzing and synthesizing research data",
+ "system_prompt": ( # (3)
+ "You are a Research Analyst with expertise in data analysis and synthesis. "
+ "Your role is to analyze provided information, identify key insights, "
+ "and present findings in a clear, structured format."
+ ),
+ "model_name": "claude-3-5-sonnet-20240620", # (4)
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 1,
+ "auto_generate_prompt": False,
+ "tools_list_dictionary": None,
+ },
+ "task": "What are the key trends in renewable energy adoption?", # (5)
+ }
+
+ response = requests.post(
+ f"{BASE_URL}/v1/agent/completions",
+ headers=headers,
+ json=payload
+ )
+ return response.json()
+
+ # Run the agent
+ result = run_single_agent()
+ print(result)
+ ```
+
+ 1. Load API key from environment variables
+ 2. Give your agent a descriptive name
+ 3. Define the agent's capabilities and role
+ 4. Choose from available models
+ 5. Specify the task for the agent
+
+=== "cURL"
+
+ ```bash title="single_agent.sh"
+ curl -X POST "https://api.swarms.world/v1/agent/completions" \
+ -H "x-api-key: $SWARMS_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "agent_config": {
+ "agent_name": "Research Analyst",
+ "description": "An expert in analyzing and synthesizing research data",
+ "system_prompt": "You are a Research Analyst with expertise in data analysis and synthesis. Your role is to analyze provided information, identify key insights, and present findings in a clear, structured format.",
+ "model_name": "claude-3-5-sonnet-20240620",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 1,
+ "auto_generate_prompt": false,
+ "tools_list_dictionary": null
+ },
+ "task": "What are the key trends in renewable energy adoption?"
+ }'
+ ```
+
+=== "TypeScript"
+
+ ```typescript linenums="1" title="single_agent.ts"
+ import axios from 'axios';
+ import * as dotenv from 'dotenv';
+
+ dotenv.config();
+
+ const API_KEY = process.env.SWARMS_API_KEY;
+ const BASE_URL = 'https://api.swarms.world';
+
+ interface AgentConfig {
+ agent_name: string;
+ description: string;
+ system_prompt: string;
+ model_name: string;
+ role: string;
+ max_loops: number;
+ max_tokens: number;
+ temperature: number;
+ auto_generate_prompt: boolean;
+ tools_list_dictionary: null | object[];
+ }
+
+ interface AgentPayload {
+ agent_config: AgentConfig;
+ task: string;
+ }
+
+ async function runSingleAgent() {
+ const payload: AgentPayload = {
+ agent_config: {
+ agent_name: "Research Analyst",
+ description: "An expert in analyzing and synthesizing research data",
+ system_prompt: "You are a Research Analyst with expertise in data analysis and synthesis.",
+ model_name: "claude-3-5-sonnet-20240620",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 1,
+ auto_generate_prompt: false,
+ tools_list_dictionary: null
+ },
+ task: "What are the key trends in renewable energy adoption?"
+ };
+
+ try {
+ const response = await axios.post(
+ `${BASE_URL}/v1/agent/completions`,
+ payload,
+ {
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ }
+ }
+ );
+ return response.data;
+ } catch (error) {
+ console.error('Error:', error);
+ throw error;
+ }
+ }
+
+ // Run the agent
+ runSingleAgent()
+ .then(result => console.log(result))
+ .catch(error => console.error(error));
+ ```
+
+### Agent with History
+
+=== "Python"
+
+ ```python linenums="1" title="agent_with_history.py"
+ def run_agent_with_history():
+ payload = {
+ "agent_config": {
+ "agent_name": "Conversation Agent",
+ "description": "An agent that maintains conversation context",
+ "system_prompt": "You are a helpful assistant that maintains context.",
+ "model_name": "claude-3-5-sonnet-20240620",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.7,
+ "auto_generate_prompt": False,
+ },
+ "task": "What's the weather like?",
+ "history": [ # (1)
+ {
+ "role": "user",
+ "content": "I'm planning a trip to New York."
+ },
+ {
+ "role": "assistant",
+ "content": "That's great! When are you planning to visit?"
+ },
+ {
+ "role": "user",
+ "content": "Next week."
+ }
+ ]
+ }
+
+ response = requests.post(
+ f"{BASE_URL}/v1/agent/completions",
+ headers=headers,
+ json=payload
+ )
+ return response.json()
+ ```
+
+ 1. Include conversation history for context
+
+=== "cURL"
+
+ ```bash title="agent_with_history.sh"
+ curl -X POST "https://api.swarms.world/v1/agent/completions" \
+ -H "x-api-key: $SWARMS_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "agent_config": {
+ "agent_name": "Conversation Agent",
+ "description": "An agent that maintains conversation context",
+ "system_prompt": "You are a helpful assistant that maintains context.",
+ "model_name": "claude-3-5-sonnet-20240620",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.7,
+ "auto_generate_prompt": false
+ },
+ "task": "What'\''s the weather like?",
+ "history": [
+ {
+ "role": "user",
+ "content": "I'\''m planning a trip to New York."
+ },
+ {
+ "role": "assistant",
+ "content": "That'\''s great! When are you planning to visit?"
+ },
+ {
+ "role": "user",
+ "content": "Next week."
+ }
+ ]
+ }'
+ ```
+
+=== "TypeScript"
+
+ ```typescript linenums="1" title="agent_with_history.ts"
+ interface Message {
+ role: 'user' | 'assistant';
+ content: string;
+ }
+
+ interface AgentWithHistoryPayload extends AgentPayload {
+ history: Message[];
+ }
+
+ async function runAgentWithHistory() {
+ const payload: AgentWithHistoryPayload = {
+ agent_config: {
+ agent_name: "Conversation Agent",
+ description: "An agent that maintains conversation context",
+ system_prompt: "You are a helpful assistant that maintains context.",
+ model_name: "claude-3-5-sonnet-20240620",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.7,
+ auto_generate_prompt: false,
+ tools_list_dictionary: null
+ },
+ task: "What's the weather like?",
+ history: [
+ {
+ role: "user",
+ content: "I'm planning a trip to New York."
+ },
+ {
+ role: "assistant",
+ content: "That's great! When are you planning to visit?"
+ },
+ {
+ role: "user",
+ content: "Next week."
+ }
+ ]
+ };
+
+ try {
+ const response = await axios.post(
+ `${BASE_URL}/v1/agent/completions`,
+ payload,
+ {
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ }
+ }
+ );
+ return response.data;
+ } catch (error) {
+ console.error('Error:', error);
+ throw error;
+ }
+ }
+ ```
+
+## Multi-Agent Swarms
+
+!!! tip "Swarm Types"
+
+ Swarms API supports two types of agent workflows:
+
+ 1. `SequentialWorkflow`: Agents work in sequence, each building on previous output
+ 2. `ConcurrentWorkflow`: Agents work in parallel on the same task
+
+### Sequential Workflow
+
+=== "Python"
+
+ ```python linenums="1" title="sequential_swarm.py"
+ def run_sequential_swarm():
+ payload = {
+ "name": "Financial Analysis Swarm",
+ "description": "Market analysis swarm",
+ "agents": [
+ {
+ "agent_name": "Market Analyst", # (1)
+ "description": "Analyzes market trends",
+ "system_prompt": "You are a financial analyst expert.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": False
+ },
+ {
+ "agent_name": "Economic Forecaster", # (2)
+ "description": "Predicts economic trends",
+ "system_prompt": "You are an expert in economic forecasting.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": False
+ }
+ ],
+ "max_loops": 1,
+ "swarm_type": "SequentialWorkflow", # (3)
+ "task": "Analyze the current market conditions and provide economic forecasts."
+ }
+
+ response = requests.post(
+ f"{BASE_URL}/v1/swarm/completions",
+ headers=headers,
+ json=payload
+ )
+ return response.json()
+ ```
+
+ 1. First agent analyzes market trends
+ 2. Second agent builds on first agent's analysis
+ 3. Sequential workflow ensures ordered execution
+
+=== "cURL"
+
+ ```bash title="sequential_swarm.sh"
+ curl -X POST "https://api.swarms.world/v1/swarm/completions" \
+ -H "x-api-key: $SWARMS_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "name": "Financial Analysis Swarm",
+ "description": "Market analysis swarm",
+ "agents": [
+ {
+ "agent_name": "Market Analyst",
+ "description": "Analyzes market trends",
+ "system_prompt": "You are a financial analyst expert.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": false
+ },
+ {
+ "agent_name": "Economic Forecaster",
+ "description": "Predicts economic trends",
+ "system_prompt": "You are an expert in economic forecasting.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": false
+ }
+ ],
+ "max_loops": 1,
+ "swarm_type": "SequentialWorkflow",
+ "task": "Analyze the current market conditions and provide economic forecasts."
+ }'
+ ```
+
+=== "TypeScript"
+
+ ```typescript linenums="1" title="sequential_swarm.ts"
+ interface SwarmAgent {
+ agent_name: string;
+ description: string;
+ system_prompt: string;
+ model_name: string;
+ role: string;
+ max_loops: number;
+ max_tokens: number;
+ temperature: number;
+ auto_generate_prompt: boolean;
+ }
+
+ interface SwarmPayload {
+ name: string;
+ description: string;
+ agents: SwarmAgent[];
+ max_loops: number;
+ swarm_type: 'SequentialWorkflow' | 'ConcurrentWorkflow';
+ task: string;
+ }
+
+ async function runSequentialSwarm() {
+ const payload: SwarmPayload = {
+ name: "Financial Analysis Swarm",
+ description: "Market analysis swarm",
+ agents: [
+ {
+ agent_name: "Market Analyst",
+ description: "Analyzes market trends",
+ system_prompt: "You are a financial analyst expert.",
+ model_name: "gpt-4o",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.5,
+ auto_generate_prompt: false
+ },
+ {
+ agent_name: "Economic Forecaster",
+ description: "Predicts economic trends",
+ system_prompt: "You are an expert in economic forecasting.",
+ model_name: "gpt-4o",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.5,
+ auto_generate_prompt: false
+ }
+ ],
+ max_loops: 1,
+ swarm_type: "SequentialWorkflow",
+ task: "Analyze the current market conditions and provide economic forecasts."
+ };
+
+ try {
+ const response = await axios.post(
+ `${BASE_URL}/v1/swarm/completions`,
+ payload,
+ {
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ }
+ }
+ );
+ return response.data;
+ } catch (error) {
+ console.error('Error:', error);
+ throw error;
+ }
+ }
+ ```
+
+### Concurrent Workflow
+
+=== "Python"
+
+ ```python linenums="1" title="concurrent_swarm.py"
+ def run_concurrent_swarm():
+ payload = {
+ "name": "Medical Analysis Swarm",
+ "description": "Analyzes medical data concurrently",
+ "agents": [
+ {
+ "agent_name": "Lab Data Analyzer", # (1)
+ "description": "Analyzes lab report data",
+ "system_prompt": "You are a medical data analyst specializing in lab results.",
+ "model_name": "claude-3-5-sonnet-20240620",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": False
+ },
+ {
+ "agent_name": "Clinical Specialist", # (2)
+ "description": "Provides clinical interpretations",
+ "system_prompt": "You are an expert in clinical diagnosis.",
+ "model_name": "claude-3-5-sonnet-20240620",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": False
+ }
+ ],
+ "max_loops": 1,
+ "swarm_type": "ConcurrentWorkflow", # (3)
+ "task": "Analyze these lab results and provide clinical interpretations."
+ }
+
+ response = requests.post(
+ f"{BASE_URL}/v1/swarm/completions",
+ headers=headers,
+ json=payload
+ )
+ return response.json()
+ ```
+
+ 1. First agent processes lab data
+ 2. Second agent works simultaneously
+ 3. Concurrent workflow for parallel processing
+
+=== "cURL"
+
+ ```bash title="concurrent_swarm.sh"
+ curl -X POST "https://api.swarms.world/v1/swarm/completions" \
+ -H "x-api-key: $SWARMS_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "name": "Medical Analysis Swarm",
+ "description": "Analyzes medical data concurrently",
+ "agents": [
+ {
+ "agent_name": "Lab Data Analyzer",
+ "description": "Analyzes lab report data",
+ "system_prompt": "You are a medical data analyst specializing in lab results.",
+ "model_name": "claude-3-5-sonnet-20240620",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": false
+ },
+ {
+ "agent_name": "Clinical Specialist",
+ "description": "Provides clinical interpretations",
+ "system_prompt": "You are an expert in clinical diagnosis.",
+ "model_name": "claude-3-5-sonnet-20240620",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": false
+ }
+ ],
+ "max_loops": 1,
+ "swarm_type": "ConcurrentWorkflow",
+ "task": "Analyze these lab results and provide clinical interpretations."
+ }'
+ ```
+
+=== "TypeScript"
+
+ ```typescript linenums="1" title="concurrent_swarm.ts"
+ async function runConcurrentSwarm() {
+ const payload: SwarmPayload = {
+ name: "Medical Analysis Swarm",
+ description: "Analyzes medical data concurrently",
+ agents: [
+ {
+ agent_name: "Lab Data Analyzer",
+ description: "Analyzes lab report data",
+ system_prompt: "You are a medical data analyst specializing in lab results.",
+ model_name: "claude-3-5-sonnet-20240620",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.5,
+ auto_generate_prompt: false
+ },
+ {
+ agent_name: "Clinical Specialist",
+ description: "Provides clinical interpretations",
+ system_prompt: "You are an expert in clinical diagnosis.",
+ model_name: "claude-3-5-sonnet-20240620",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.5,
+ auto_generate_prompt: false
+ }
+ ],
+ max_loops: 1,
+ swarm_type: "ConcurrentWorkflow",
+ task: "Analyze these lab results and provide clinical interpretations."
+ };
+
+ try {
+ const response = await axios.post(
+ `${BASE_URL}/v1/swarm/completions`,
+ payload,
+ {
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ }
+ }
+ );
+ return response.data;
+ } catch (error) {
+ console.error('Error:', error);
+ throw error;
+ }
+ }
+ ```
+
+### Batch Processing
+
+!!! example "Batch Processing"
+
+ Process multiple swarms in a single request for improved efficiency.
+
+=== "Python"
+
+ ```python linenums="1" title="batch_swarms.py"
+ def run_batch_swarms():
+ payload = [
+ {
+ "name": "Batch Swarm 1",
+ "description": "First swarm in batch",
+ "agents": [
+ {
+ "agent_name": "Research Agent",
+ "description": "Conducts research",
+ "system_prompt": "You are a research assistant.",
+ "model_name": "gpt-4",
+ "role": "worker",
+ "max_loops": 1
+ },
+ {
+ "agent_name": "Analysis Agent",
+ "description": "Analyzes data",
+ "system_prompt": "You are a data analyst.",
+ "model_name": "gpt-4",
+ "role": "worker",
+ "max_loops": 1
+ }
+ ],
+ "max_loops": 1,
+ "swarm_type": "SequentialWorkflow",
+ "task": "Research AI advancements."
+ }
+ ]
+
+ response = requests.post(
+ f"{BASE_URL}/v1/swarm/batch/completions",
+ headers=headers,
+ json=payload
+ )
+ return response.json()
+ ```
+
+=== "cURL"
+
+ ```bash title="batch_swarms.sh"
+ curl -X POST "https://api.swarms.world/v1/swarm/batch/completions" \
+ -H "x-api-key: $SWARMS_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '[
+ {
+ "name": "Batch Swarm 1",
+ "description": "First swarm in batch",
+ "agents": [
+ {
+ "agent_name": "Research Agent",
+ "description": "Conducts research",
+ "system_prompt": "You are a research assistant.",
+ "model_name": "gpt-4",
+ "role": "worker",
+ "max_loops": 1
+ },
+ {
+ "agent_name": "Analysis Agent",
+ "description": "Analyzes data",
+ "system_prompt": "You are a data analyst.",
+ "model_name": "gpt-4",
+ "role": "worker",
+ "max_loops": 1
+ }
+ ],
+ "max_loops": 1,
+ "swarm_type": "SequentialWorkflow",
+ "task": "Research AI advancements."
+ }
+ ]'
+ ```
+
+=== "TypeScript"
+
+ ```typescript linenums="1" title="batch_swarms.ts"
+ async function runBatchSwarms() {
+ const payload: SwarmPayload[] = [
+ {
+ name: "Batch Swarm 1",
+ description: "First swarm in batch",
+ agents: [
+ {
+ agent_name: "Research Agent",
+ description: "Conducts research",
+ system_prompt: "You are a research assistant.",
+ model_name: "gpt-4",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.7,
+ auto_generate_prompt: false
+ },
+ {
+ agent_name: "Analysis Agent",
+ description: "Analyzes data",
+ system_prompt: "You are a data analyst.",
+ model_name: "gpt-4",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.7,
+ auto_generate_prompt: false
+ }
+ ],
+ max_loops: 1,
+ swarm_type: "SequentialWorkflow",
+ task: "Research AI advancements."
+ }
+ ];
+
+ try {
+ const response = await axios.post(
+ `${BASE_URL}/v1/swarm/batch/completions`,
+ payload,
+ {
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ }
+ }
+ );
+ return response.data;
+ } catch (error) {
+ console.error('Error:', error);
+ throw error;
+ }
+ }
+ ```
+
+## Advanced Features
+
+### Tools Integration
+
+!!! note "Tools"
+
+ Enhance agent capabilities by providing them with specialized tools.
+
+=== "Python"
+
+ ```python linenums="1" title="tools_example.py"
+ def run_agent_with_tools():
+ tools_dictionary = [
+ {
+ "type": "function",
+ "function": {
+ "name": "search_topic",
+ "description": "Conduct an in-depth search on a topic",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "depth": {
+ "type": "integer",
+ "description": "Search depth (1-3)"
+ },
+ "detailed_queries": {
+ "type": "array",
+ "description": "Specific search queries",
+ "items": {
+ "type": "string"
+ }
+ }
+ },
+ "required": ["depth", "detailed_queries"]
+ }
+ }
+ }
+ ]
+
+ payload = {
+ "agent_config": {
+ "agent_name": "Research Assistant",
+ "description": "Expert in research with search capabilities",
+ "system_prompt": "You are a research assistant with search capabilities.",
+ "model_name": "gpt-4",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.7,
+ "auto_generate_prompt": False,
+ "tools_dictionary": tools_dictionary
+ },
+ "task": "Research the latest developments in quantum computing."
+ }
+
+ response = requests.post(
+ f"{BASE_URL}/v1/agent/completions",
+ headers=headers,
+ json=payload
+ )
+ return response.json()
+ ```
+
+=== "cURL"
+
+ ```bash title="tools_example.sh"
+ curl -X POST "https://api.swarms.world/v1/agent/completions" \
+ -H "x-api-key: $SWARMS_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "agent_config": {
+ "agent_name": "Research Assistant",
+ "description": "Expert in research with search capabilities",
+ "system_prompt": "You are a research assistant with search capabilities.",
+ "model_name": "gpt-4",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.7,
+ "auto_generate_prompt": false,
+ "tools_dictionary": [
+ {
+ "type": "function",
+ "function": {
+ "name": "search_topic",
+ "description": "Conduct an in-depth search on a topic",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "depth": {
+ "type": "integer",
+ "description": "Search depth (1-3)"
+ },
+ "detailed_queries": {
+ "type": "array",
+ "description": "Specific search queries",
+ "items": {
+ "type": "string"
+ }
+ }
+ },
+ "required": ["depth", "detailed_queries"]
+ }
+ }
+ }
+ ]
+ },
+ "task": "Research the latest developments in quantum computing."
+ }'
+ ```
+
+=== "TypeScript"
+
+ ```typescript linenums="1" title="tools_example.ts"
+ interface ToolFunction {
+ name: string;
+ description: string;
+ parameters: {
+ type: string;
+ properties: {
+ [key: string]: {
+ type: string;
+ description: string;
+ items?: {
+ type: string;
+ };
+ };
+ };
+ required: string[];
+ };
+ }
+
+ interface Tool {
+ type: string;
+ function: ToolFunction;
+ }
+
+ interface AgentWithToolsConfig extends AgentConfig {
+ tools_dictionary: Tool[];
+ }
+
+ interface AgentWithToolsPayload {
+ agent_config: AgentWithToolsConfig;
+ task: string;
+ }
+
+ async function runAgentWithTools() {
+ const toolsDictionary: Tool[] = [
+ {
+ type: "function",
+ function: {
+ name: "search_topic",
+ description: "Conduct an in-depth search on a topic",
+ parameters: {
+ type: "object",
+ properties: {
+ depth: {
+ type: "integer",
+ description: "Search depth (1-3)"
+ },
+ detailed_queries: {
+ type: "array",
+ description: "Specific search queries",
+ items: {
+ type: "string"
+ }
+ }
+ },
+ required: ["depth", "detailed_queries"]
+ }
+ }
+ }
+ ];
+
+ const payload: AgentWithToolsPayload = {
+ agent_config: {
+ agent_name: "Research Assistant",
+ description: "Expert in research with search capabilities",
+ system_prompt: "You are a research assistant with search capabilities.",
+ model_name: "gpt-4",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.7,
+ auto_generate_prompt: false,
+ tools_dictionary: toolsDictionary
+ },
+ task: "Research the latest developments in quantum computing."
+ };
+
+ try {
+ const response = await axios.post(
+ `${BASE_URL}/v1/agent/completions`,
+ payload,
+ {
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ }
+ }
+ );
+ return response.data;
+ } catch (error) {
+ console.error('Error:', error);
+ throw error;
+ }
+ }
+ ```
+
+### Available Models
+
+!!! info "Supported Models"
+
+ Choose the right model for your use case:
+
+ === "OpenAI"
+ - `gpt-4`
+ - `gpt-4o`
+ - `gpt-4o-mini`
+
+ === "Anthropic"
+ - `claude-3-5-sonnet-20240620`
+ - `claude-3-7-sonnet-latest`
+
+ === "Groq"
+ - `groq/llama3-70b-8192`
+ - `groq/deepseek-r1-distill-llama-70b`
+
+## Best Practices
+
+!!! danger "Security"
+ Never commit API keys or sensitive credentials to version control.
+
+!!! warning "Rate Limits"
+ Implement proper rate limiting and error handling in production.
+
+!!! tip "Testing"
+ Start with simple tasks and gradually increase complexity.
+
+=== "Python"
+
+ ```python linenums="1" title="best_practices.py"
+ # Error Handling
+ try:
+ response = requests.post(url, headers=headers, json=payload)
+ response.raise_for_status()
+ except requests.exceptions.RequestException as e:
+ print(f"Error: {e}")
+
+ # Rate Limiting
+ import time
+ from tenacity import retry, wait_exponential
+
+ @retry(wait=wait_exponential(multiplier=1, min=4, max=10))
+ def make_api_call():
+ response = requests.post(url, headers=headers, json=payload)
+ response.raise_for_status()
+ return response
+
+ # Input Validation
+ def validate_payload(payload):
+ required_fields = ["agent_config", "task"]
+ if not all(field in payload for field in required_fields):
+ raise ValueError("Missing required fields")
+ ```
+
+=== "TypeScript"
+
+ ```typescript linenums="1" title="best_practices.ts"
+ // Error Handling
+ try {
+ const response = await axios.post(url, payload, { headers });
+ } catch (error) {
+ if (axios.isAxiosError(error)) {
+ console.error('API Error:', error.response?.data);
+ }
+ throw error;
+ }
+
+ // Rate Limiting
+ import { rateLimit } from 'axios-rate-limit';
+
+ const http = rateLimit(axios.create(), {
+ maxRequests: 2,
+ perMilliseconds: 1000
+ });
+
+ // Input Validation
+ function validatePayload(payload: unknown): asserts payload is AgentPayload {
+ if (!payload || typeof payload !== 'object') {
+ throw new Error('Invalid payload');
+ }
+
+ const { agent_config, task } = payload as Partial;
+
+ if (!agent_config || !task) {
+ throw new Error('Missing required fields');
+ }
+ }
+ ```
+
+## Connect With Us
+
+Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights!
+
+| Platform | Description | Link |
+|----------|-------------|------|
+| 📚 Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
+| 📝 Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
+| 💬 Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| 🐦 Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
+| 👥 LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
+| 📺 YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
+| 🎫 Events | Join our community events | [Sign up here](https://lu.ma/5p2jnc2v) |
+| 🚀 Onboarding Session | Get onboarded with Kye Gomez, creator and lead maintainer of Swarms | [Book Session](https://cal.com/swarms/swarms-onboarding-session) |
\ No newline at end of file
diff --git a/docs/swarms_cloud/rate_limits.md b/docs/swarms_cloud/rate_limits.md
new file mode 100644
index 00000000..d0fb9759
--- /dev/null
+++ b/docs/swarms_cloud/rate_limits.md
@@ -0,0 +1,59 @@
+# Swarms API Rate Limits
+
+The Swarms API implements rate limiting to ensure fair usage and system stability. Here are the current limits:
+
+## Standard Rate Limits
+
+- **General API Requests**: 100 requests per minute
+- **Batch Operations**: Maximum 10 requests per batch for agent/swarm batch operations
+
+## Rate Limit Response
+
+When you exceed the rate limit, the API will return a 429 (Too Many Requests) status code with the following message:
+```json
+{
+ "detail": "Rate limit exceeded. Please try again later."
+}
+```
+
+## Batch Operation Limits
+
+For batch operations (`/v1/agent/batch/completions` and `/v1/swarm/batch/completions`):
+
+- Maximum 10 concurrent requests per batch
+
+- Exceeding this limit will result in a 400 (Bad Request) error
+
+## Increasing Your Rate Limits
+
+Need higher rate limits for your application? You can increase your limits by subscribing to a higher tier plan at [swarms.world/pricing](https://swarms.world/pricing).
+
+Higher tier plans offer:
+
+- Increased rate limits
+
+- Higher batch operation limits
+
+- Priority processing
+
+- Dedicated support
+
+## Best Practices
+
+To make the most of your rate limits:
+
+1. Implement proper error handling for rate limit responses
+
+2. Use batch operations when processing multiple requests
+
+3. Add appropriate retry logic with exponential backoff
+
+4. Monitor your API usage to stay within limits
+
+## Rate Limit Headers
+
+The API does not currently expose rate limit headers. We recommend implementing your own request tracking to stay within the limits.
+
+---
+
+For questions about rate limits or to request a custom plan for higher limits, please contact our support team or visit [swarms.world/pricing](https://swarms.world/pricing).
\ No newline at end of file
diff --git a/docs/swarms_cloud/swarm_types.md b/docs/swarms_cloud/swarm_types.md
index 795f9ad5..f6091501 100644
--- a/docs/swarms_cloud/swarm_types.md
+++ b/docs/swarms_cloud/swarm_types.md
@@ -1,54 +1,30 @@
-### Available Swarms in The Swarms API
+# Multi-Agent Architectures
-| Swarm Type | Description (English) | Description (Chinese) |
-|----------------------|-----------------------------------------------------------------------------|----------------------------------------------------------------------------|
-| AgentRearrange | A swarm type focused on rearranging agents for optimal performance. | 一种专注于重新排列代理以实现最佳性能的群类型。 |
-| MixtureOfAgents | Combines different types of agents to achieve a specific goal. | 结合不同类型的代理以实现特定目标。 |
-| SpreadSheetSwarm | Utilizes spreadsheet-like structures for data management and operations. | 利用类似电子表格的结构进行数据管理和操作。 |
-| SequentialWorkflow | Executes tasks in a sequential manner. | 以顺序方式执行任务。 |
-| ConcurrentWorkflow | Allows tasks to be executed concurrently for efficiency. | 允许任务并发执行以提高效率。 |
-| GroupChat | Facilitates communication among agents in a group chat format. | 以群聊格式促进代理之间的沟通。 |
-| MultiAgentRouter | Routes tasks and information among multiple agents. | 在多个代理之间路由任务和信息。 |
-| AutoSwarmBuilder | Automatically builds and configures swarms based on predefined criteria. | 根据预定义标准自动构建和配置群。 |
-| HiearchicalSwarm | Organizes agents in a hierarchical structure for task delegation. | 以层次结构组织代理以进行任务委派。 |
-| auto | Automatically selects the best swarm type based on the context. | 根据上下文自动选择最佳群类型。 |
-| MajorityVoting | Uses majority voting among agents to make decisions. | 使用代理之间的多数投票来做出决策。 |
-| MALT | A specialized swarm type for specific tasks (details needed). | 一种专门为特定任务设计的群类型(需要详细信息)。 |
+Each multi-agent architecture type is designed for specific use cases and can be combined to create powerful multi-agent systems. Here's a comprehensive overview of each available swarm:
-### Documentation for Swarms
+| Swarm Type | Description | Learn More |
+|---------------------|------------------------------------------------------------------------------|------------|
+| AgentRearrange | Dynamically reorganizes agents to optimize task performance and efficiency. Optimizes agent performance by dynamically adjusting their roles and positions within the workflow. This architecture is particularly useful when the effectiveness of agents depends on their sequence or arrangement. | [Learn More](/swarms/structs/agent_rearrange) |
+| MixtureOfAgents | Creates diverse teams of specialized agents, each bringing unique capabilities to solve complex problems. Each agent contributes unique skills to achieve the overall goal, making it excel at tasks requiring multiple types of expertise or processing. | [Learn More](/swarms/structs/moa) |
+| SpreadSheetSwarm | Provides a structured approach to data management and operations, making it ideal for tasks involving data analysis, transformation, and systematic processing in a spreadsheet-like structure. | [Learn More](/swarms/structs/spreadsheet_swarm) |
+| SequentialWorkflow | Ensures strict process control by executing tasks in a predefined order. Perfect for workflows where each step depends on the completion of previous steps. | [Learn More](/swarms/structs/sequential_workflow) |
+| ConcurrentWorkflow | Maximizes efficiency by running independent tasks in parallel, significantly reducing overall processing time for complex operations. Ideal for independent tasks that can be processed simultaneously. | [Learn More](/swarms/structs/concurrentworkflow) |
+| GroupChat | Enables dynamic collaboration between agents through a chat-based interface, facilitating real-time information sharing and decision-making. | [Learn More](/swarms/structs/group_chat) |
+| MultiAgentRouter | Acts as an intelligent task dispatcher, ensuring optimal distribution of work across available agents based on their capabilities and current workload. | [Learn More](/swarms/structs/multi_agent_router) |
+| AutoSwarmBuilder | Simplifies swarm creation by automatically configuring agent architectures based on task requirements and performance metrics. | [Learn More](/swarms/structs/auto_swarm_builder) |
+| HiearchicalSwarm | Implements a structured approach to task management, with clear lines of authority and delegation across multiple agent levels. | [Learn More](/swarms/structs/multi_swarm_orchestration) |
+| auto | Provides intelligent swarm selection based on context, automatically choosing the most effective architecture for given tasks. | [Learn More](/swarms/concept/how_to_choose_swarms) |
+| MajorityVoting | Implements robust decision-making through consensus, particularly useful for tasks requiring collective intelligence or verification. | [Learn More](/swarms/structs/majorityvoting) |
+| MALT | Specialized framework for language-based tasks, optimizing agent collaboration for complex language processing operations. | [Learn More](/swarms/structs/malt) |
-1. **AgentRearrange**: This swarm type is designed to rearrange agents to optimize their performance in a given task. It is useful in scenarios where agent positioning or order affects the outcome.
- - 这种群类型旨在重新排列代理以优化其在给定任务中的性能。它在代理位置或顺序影响结果的情况下非常有用。
+# Learn More
-2. **MixtureOfAgents**: This type combines various agents, each with unique capabilities, to work together towards a common goal. It leverages the strengths of different agents to enhance overall performance.
- - 这种类型结合了各种代理,每个代理都有独特的能力,共同努力实现共同目标。它利用不同代理的优势来提高整体性能。
+To learn more about Swarms architecture and how different swarm types work together, visit our comprehensive guides:
-3. **SpreadSheetSwarm**: This swarm type uses spreadsheet-like structures to manage and operate on data. It is ideal for tasks that require organized data manipulation and analysis.
- - 这种群类型使用类似电子表格的结构来管理和操作数据。它非常适合需要有组织的数据操作和分析的任务。
+- [Introduction to Multi-Agent Architectures](/swarms/concept/swarm_architectures)
-4. **SequentialWorkflow**: Tasks are executed one after another in this swarm type, ensuring that each step is completed before the next begins. It is suitable for processes that require strict order.
- - 在这种群类型中,任务一个接一个地执行,确保每个步骤在下一个步骤开始之前完成。它适用于需要严格顺序的流程。
+- [How to Choose the Right Multi-Agent Architecture](/swarms/concept/how_to_choose_swarms)
-5. **ConcurrentWorkflow**: This type allows multiple tasks to be executed simultaneously, improving efficiency and reducing time for completion. It is best for independent tasks that do not rely on each other.
- - 这种类型允许多个任务同时执行,提高效率并减少完成时间。它最适合不相互依赖的独立任务。
+- [Framework Architecture Overview](/swarms/concept/framework_architecture)
-6. **GroupChat**: Facilitates communication among agents in a group chat format, enabling real-time collaboration and decision-making.
- - 以群聊格式促进代理之间的沟通,实现实时协作和决策。
-
-7. **MultiAgentRouter**: This swarm type routes tasks and information among multiple agents, ensuring that each agent receives the necessary data to perform its function.
- - 这种群类型在多个代理之间路由任务和信息,确保每个代理接收到执行其功能所需的数据。
-
-8. **AutoSwarmBuilder**: Automatically builds and configures swarms based on predefined criteria, reducing the need for manual setup and configuration.
- - 根据预定义标准自动构建和配置群,减少手动设置和配置的需要。
-
-9. **HiearchicalSwarm**: Organizes agents in a hierarchical structure, allowing for efficient task delegation and management.
- - 以层次结构组织代理,允许高效的任务委派和管理。
-
-10. **auto**: Automatically selects the most appropriate swarm type based on the context and requirements of the task.
- - 根据任务的上下文和要求自动选择最合适的群类型。
-
-11. **MajorityVoting**: Uses a majority voting mechanism among agents to make decisions, ensuring that the most popular choice is selected.
- - 使用代理之间的多数投票机制来做出决策,确保选择最受欢迎的选项。
-
-12. **MALT**: A specialized swarm type designed for specific tasks. Further details are needed to fully document this type.
- - 一种专门为特定任务设计的群类型。需要进一步的详细信息来完整记录这种类型。
+- [Building Custom Swarms](/swarms/structs/custom_swarm)
diff --git a/docs/swarms_cloud/swarms_api.md b/docs/swarms_cloud/swarms_api.md
index 9da9ebce..f09c6eae 100644
--- a/docs/swarms_cloud/swarms_api.md
+++ b/docs/swarms_cloud/swarms_api.md
@@ -18,8 +18,6 @@ Key capabilities include:
- **Multiple Swarm Architectures**: Choose from various swarm patterns to match your specific workflow needs
-- **Scheduled Execution**: Set up automated, scheduled swarm executions
-
- **Comprehensive Logging**: Track and analyze all API interactions
- **Cost Management**: Predictable, transparent pricing with optimized resource utilization
@@ -47,9 +45,6 @@ API keys can be obtained and managed at [https://swarms.world/platform/api-keys]
| `/health` | GET | Simple health check endpoint |
| `/v1/swarm/completions` | POST | Run a swarm with specified configuration |
| `/v1/swarm/batch/completions` | POST | Run multiple swarms in batch mode |
-| `/v1/swarm/schedule` | POST | Schedule a swarm to run at a specific time |
-| `/v1/swarm/schedule` | GET | Get all scheduled swarm jobs |
-| `/v1/swarm/schedule/{job_id}` | DELETE | Cancel a scheduled swarm job |
| `/v1/swarm/logs` | GET | Retrieve API request logs |
| `/v1/swarms/available` | GET | Get all available swarms as a list of strings |
| `/v1/models/available` | GET | Get all available models as a list of strings |
@@ -96,7 +91,6 @@ The `SwarmSpec` model defines the configuration of a swarm.
| img | string | Optional image URL for the swarm | No |
| return_history | boolean | Whether to return execution history | No |
| rules | string | Guidelines for swarm behavior | No |
-| schedule | ScheduleSpec | Scheduling information | No |
| service_tier | string | Service tier for processing ("standard" or "flex") | No |
### AgentSpec
@@ -117,16 +111,6 @@ The `AgentSpec` model defines the configuration of an individual agent.
*Required if agents are manually specified; not required if using auto-generated agents
-### ScheduleSpec
-
-The `ScheduleSpec` model defines when a swarm should be executed.
-
-| Field | Type | Description | Required |
-|-------|------|-------------|----------|
-| scheduled_time | datetime | Time when the swarm should run | Yes |
-| timezone | string | Timezone for the scheduled time | No (defaults to "UTC") |
-
-
### Endpoint Details
@@ -138,11 +122,58 @@ Check if the API service is available and functioning correctly.
**Method**: GET
**Rate Limit**: 100 requests per 60 seconds
-**Example Request**:
-```bash
-curl -X GET "https://api.swarms.world/health" \
- -H "x-api-key: your_api_key_here"
-```
+=== "Shell (curl)"
+ ```bash
+ curl -X GET "https://api.swarms.world/health" \
+ -H "x-api-key: your_api_key_here"
+ ```
+
+=== "Python (requests)"
+ ```python
+ import requests
+
+ API_BASE_URL = "https://api.swarms.world"
+ API_KEY = "your_api_key_here"
+
+ headers = {
+ "x-api-key": API_KEY
+ }
+
+ response = requests.get(f"{API_BASE_URL}/health", headers=headers)
+
+ if response.status_code == 200:
+ print("API is healthy:", response.json())
+ else:
+ print(f"Error: {response.status_code}")
+ ```
+
+=== "TypeScript (fetch)"
+ ```typescript
+ const API_BASE_URL = "https://api.swarms.world";
+ const API_KEY = "your_api_key_here";
+
+ async function checkHealth(): Promise {
+ try {
+ const response = await fetch(`${API_BASE_URL}/health`, {
+ method: 'GET',
+ headers: {
+ 'x-api-key': API_KEY
+ }
+ });
+
+ if (response.ok) {
+ const data = await response.json();
+ console.log("API is healthy:", data);
+ } else {
+ console.error(`Error: ${response.status}`);
+ }
+ } catch (error) {
+ console.error("Request failed:", error);
+ }
+ }
+
+ checkHealth();
+ ```
**Example Response**:
```json
@@ -173,49 +204,193 @@ Run a swarm with the specified configuration to complete a task.
| img | string | Optional image URL for the swarm | No |
| return_history | boolean | Whether to return execution history | No |
| rules | string | Guidelines for swarm behavior | No |
-| schedule | ScheduleSpec | Scheduling information | No |
-
-**Example Request**:
-```bash
-
-# Run single swarm
-curl -X POST "https://api.swarms.world/v1/swarm/completions" \
- -H "x-api-key: $SWARMS_API_KEY" \
- -H "Content-Type: application/json" \
- -d '{
- "name": "Financial Analysis Swarm",
- "description": "Market analysis swarm",
- "agents": [
- {
- "agent_name": "Market Analyst",
- "description": "Analyzes market trends",
- "system_prompt": "You are a financial analyst expert.",
- "model_name": "openai/gpt-4o",
- "role": "worker",
+
+=== "Shell (curl)"
+ ```bash
+ curl -X POST "https://api.swarms.world/v1/swarm/completions" \
+ -H "x-api-key: $SWARMS_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "name": "Financial Analysis Swarm",
+ "description": "Market analysis swarm",
+ "agents": [
+ {
+ "agent_name": "Market Analyst",
+ "description": "Analyzes market trends",
+ "system_prompt": "You are a financial analyst expert.",
+ "model_name": "openai/gpt-4o",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": false
+ },
+ {
+ "agent_name": "Economic Forecaster",
+ "description": "Predicts economic trends",
+ "system_prompt": "You are an expert in economic forecasting.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": false
+ }
+ ],
"max_loops": 1,
- "max_tokens": 8192,
- "temperature": 0.5,
- "auto_generate_prompt": false
- },
- {
- "agent_name": "Economic Forecaster",
- "description": "Predicts economic trends",
- "system_prompt": "You are an expert in economic forecasting.",
- "model_name": "gpt-4o",
- "role": "worker",
+ "swarm_type": "ConcurrentWorkflow",
+ "task": "What are the best etfs and index funds for ai and tech?",
+ "output_type": "dict"
+ }'
+ ```
+
+=== "Python (requests)"
+ ```python
+ import requests
+ import json
+
+ API_BASE_URL = "https://api.swarms.world"
+ API_KEY = "your_api_key_here"
+
+ headers = {
+ "x-api-key": API_KEY,
+ "Content-Type": "application/json"
+ }
+
+ swarm_config = {
+ "name": "Financial Analysis Swarm",
+ "description": "Market analysis swarm",
+ "agents": [
+ {
+ "agent_name": "Market Analyst",
+ "description": "Analyzes market trends",
+ "system_prompt": "You are a financial analyst expert.",
+ "model_name": "openai/gpt-4o",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": False
+ },
+ {
+ "agent_name": "Economic Forecaster",
+ "description": "Predicts economic trends",
+ "system_prompt": "You are an expert in economic forecasting.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5,
+ "auto_generate_prompt": False
+ }
+ ],
"max_loops": 1,
- "max_tokens": 8192,
- "temperature": 0.5,
- "auto_generate_prompt": false
- }
- ],
- "max_loops": 1,
- "swarm_type": "ConcurrentWorkflow",
- "task": "What are the best etfs and index funds for ai and tech?",
- "output_type": "dict"
- }'
+ "swarm_type": "ConcurrentWorkflow",
+ "task": "What are the best etfs and index funds for ai and tech?",
+ "output_type": "dict"
+ }
+
+ response = requests.post(
+ f"{API_BASE_URL}/v1/swarm/completions",
+ headers=headers,
+ json=swarm_config
+ )
+
+ if response.status_code == 200:
+ result = response.json()
+ print("Swarm completed successfully!")
+ print(f"Cost: ${result['metadata']['billing_info']['total_cost']}")
+ print(f"Execution time: {result['metadata']['execution_time_seconds']} seconds")
+ else:
+ print(f"Error: {response.status_code} - {response.text}")
+ ```
+
+=== "TypeScript (fetch)"
+ ```typescript
+ interface AgentSpec {
+ agent_name: string;
+ description: string;
+ system_prompt: string;
+ model_name: string;
+ role: string;
+ max_loops: number;
+ max_tokens: number;
+ temperature: number;
+ auto_generate_prompt: boolean;
+ }
-```
+ interface SwarmConfig {
+ name: string;
+ description: string;
+ agents: AgentSpec[];
+ max_loops: number;
+ swarm_type: string;
+ task: string;
+ output_type: string;
+ }
+
+ const API_BASE_URL = "https://api.swarms.world";
+ const API_KEY = "your_api_key_here";
+
+ async function runSwarm(): Promise {
+ const swarmConfig: SwarmConfig = {
+ name: "Financial Analysis Swarm",
+ description: "Market analysis swarm",
+ agents: [
+ {
+ agent_name: "Market Analyst",
+ description: "Analyzes market trends",
+ system_prompt: "You are a financial analyst expert.",
+ model_name: "openai/gpt-4o",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.5,
+ auto_generate_prompt: false
+ },
+ {
+ agent_name: "Economic Forecaster",
+ description: "Predicts economic trends",
+ system_prompt: "You are an expert in economic forecasting.",
+ model_name: "gpt-4o",
+ role: "worker",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.5,
+ auto_generate_prompt: false
+ }
+ ],
+ max_loops: 1,
+ swarm_type: "ConcurrentWorkflow",
+ task: "What are the best etfs and index funds for ai and tech?",
+ output_type: "dict"
+ };
+
+ try {
+ const response = await fetch(`${API_BASE_URL}/v1/swarm/completions`, {
+ method: 'POST',
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify(swarmConfig)
+ });
+
+ if (response.ok) {
+ const result = await response.json();
+ console.log("Swarm completed successfully!");
+ console.log(`Cost: $${result.metadata.billing_info.total_cost}`);
+ console.log(`Execution time: ${result.metadata.execution_time_seconds} seconds`);
+ } else {
+ console.error(`Error: ${response.status} - ${await response.text()}`);
+ }
+ } catch (error) {
+ console.error("Request failed:", error);
+ }
+ }
+
+ runSwarm();
+ ```
**Example Response**:
```json
@@ -271,65 +446,249 @@ Run multiple swarms as a batch operation.
|-------|------|-------------|----------|
| swarms | Array | List of swarm specifications | Yes |
-**Example Request**:
-```bash
-# Batch swarm completions
-curl -X POST "https://api.swarms.world/v1/swarm/batch/completions" \
- -H "x-api-key: $SWARMS_API_KEY" \
- -H "Content-Type: application/json" \
- -d '[
- {
- "name": "Batch Swarm 1",
- "description": "First swarm in the batch",
- "agents": [
+=== "Shell (curl)"
+ ```bash
+ curl -X POST "https://api.swarms.world/v1/swarm/batch/completions" \
+ -H "x-api-key: $SWARMS_API_KEY" \
+ -H "Content-Type: application/json" \
+ -d '[
{
- "agent_name": "Research Agent",
- "description": "Conducts research",
- "system_prompt": "You are a research assistant.",
- "model_name": "gpt-4o",
- "role": "worker",
- "max_loops": 1
+ "name": "Batch Swarm 1",
+ "description": "First swarm in the batch",
+ "agents": [
+ {
+ "agent_name": "Research Agent",
+ "description": "Conducts research",
+ "system_prompt": "You are a research assistant.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1
+ },
+ {
+ "agent_name": "Analysis Agent",
+ "description": "Analyzes data",
+ "system_prompt": "You are a data analyst.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1
+ }
+ ],
+ "max_loops": 1,
+ "swarm_type": "SequentialWorkflow",
+ "task": "Research AI advancements."
},
{
- "agent_name": "Analysis Agent",
- "description": "Analyzes data",
- "system_prompt": "You are a data analyst.",
- "model_name": "gpt-4o",
- "role": "worker",
- "max_loops": 1
+ "name": "Batch Swarm 2",
+ "description": "Second swarm in the batch",
+ "agents": [
+ {
+ "agent_name": "Writing Agent",
+ "description": "Writes content",
+ "system_prompt": "You are a content writer.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1
+ },
+ {
+ "agent_name": "Editing Agent",
+ "description": "Edits content",
+ "system_prompt": "You are an editor.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1
+ }
+ ],
+ "max_loops": 1,
+ "swarm_type": "SequentialWorkflow",
+ "task": "Write a summary of AI research."
}
- ],
- "max_loops": 1,
- "swarm_type": "SequentialWorkflow",
- "task": "Research AI advancements."
- },
- {
- "name": "Batch Swarm 2",
- "description": "Second swarm in the batch",
- "agents": [
+ ]'
+ ```
+
+=== "Python (requests)"
+ ```python
+ import requests
+ import json
+
+ API_BASE_URL = "https://api.swarms.world"
+ API_KEY = "your_api_key_here"
+
+ headers = {
+ "x-api-key": API_KEY,
+ "Content-Type": "application/json"
+ }
+
+ batch_swarms = [
{
- "agent_name": "Writing Agent",
- "description": "Writes content",
- "system_prompt": "You are a content writer.",
- "model_name": "gpt-4o",
- "role": "worker",
- "max_loops": 1
+ "name": "Batch Swarm 1",
+ "description": "First swarm in the batch",
+ "agents": [
+ {
+ "agent_name": "Research Agent",
+ "description": "Conducts research",
+ "system_prompt": "You are a research assistant.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1
+ },
+ {
+ "agent_name": "Analysis Agent",
+ "description": "Analyzes data",
+ "system_prompt": "You are a data analyst.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1
+ }
+ ],
+ "max_loops": 1,
+ "swarm_type": "SequentialWorkflow",
+ "task": "Research AI advancements."
},
{
- "agent_name": "Editing Agent",
- "description": "Edits content",
- "system_prompt": "You are an editor.",
- "model_name": "gpt-4o",
- "role": "worker",
- "max_loops": 1
+ "name": "Batch Swarm 2",
+ "description": "Second swarm in the batch",
+ "agents": [
+ {
+ "agent_name": "Writing Agent",
+ "description": "Writes content",
+ "system_prompt": "You are a content writer.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1
+ },
+ {
+ "agent_name": "Editing Agent",
+ "description": "Edits content",
+ "system_prompt": "You are an editor.",
+ "model_name": "gpt-4o",
+ "role": "worker",
+ "max_loops": 1
+ }
+ ],
+ "max_loops": 1,
+ "swarm_type": "SequentialWorkflow",
+ "task": "Write a summary of AI research."
+ }
+ ]
+
+ response = requests.post(
+ f"{API_BASE_URL}/v1/swarm/batch/completions",
+ headers=headers,
+ json=batch_swarms
+ )
+
+ if response.status_code == 200:
+ results = response.json()
+ print(f"Batch completed with {len(results)} swarms")
+ for i, result in enumerate(results):
+ print(f"Swarm {i+1}: {result['swarm_name']} - {result['status']}")
+ else:
+ print(f"Error: {response.status_code} - {response.text}")
+ ```
+
+=== "TypeScript (fetch)"
+ ```typescript
+ interface AgentSpec {
+ agent_name: string;
+ description: string;
+ system_prompt: string;
+ model_name: string;
+ role: string;
+ max_loops: number;
+ }
+
+ interface SwarmSpec {
+ name: string;
+ description: string;
+ agents: AgentSpec[];
+ max_loops: number;
+ swarm_type: string;
+ task: string;
+ }
+
+ const API_BASE_URL = "https://api.swarms.world";
+ const API_KEY = "your_api_key_here";
+
+ async function runBatchSwarms(): Promise {
+ const batchSwarms: SwarmSpec[] = [
+ {
+ name: "Batch Swarm 1",
+ description: "First swarm in the batch",
+ agents: [
+ {
+ agent_name: "Research Agent",
+ description: "Conducts research",
+ system_prompt: "You are a research assistant.",
+ model_name: "gpt-4o",
+ role: "worker",
+ max_loops: 1
+ },
+ {
+ agent_name: "Analysis Agent",
+ description: "Analyzes data",
+ system_prompt: "You are a data analyst.",
+ model_name: "gpt-4o",
+ role: "worker",
+ max_loops: 1
+ }
+ ],
+ max_loops: 1,
+ swarm_type: "SequentialWorkflow",
+ task: "Research AI advancements."
+ },
+ {
+ name: "Batch Swarm 2",
+ description: "Second swarm in the batch",
+ agents: [
+ {
+ agent_name: "Writing Agent",
+ description: "Writes content",
+ system_prompt: "You are a content writer.",
+ model_name: "gpt-4o",
+ role: "worker",
+ max_loops: 1
+ },
+ {
+ agent_name: "Editing Agent",
+ description: "Edits content",
+ system_prompt: "You are an editor.",
+ model_name: "gpt-4o",
+ role: "worker",
+ max_loops: 1
+ }
+ ],
+ max_loops: 1,
+ swarm_type: "SequentialWorkflow",
+ task: "Write a summary of AI research."
+ }
+ ];
+
+ try {
+ const response = await fetch(`${API_BASE_URL}/v1/swarm/batch/completions`, {
+ method: 'POST',
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify(batchSwarms)
+ });
+
+ if (response.ok) {
+ const results = await response.json();
+ console.log(`Batch completed with ${results.length} swarms`);
+ results.forEach((result: any, index: number) => {
+ console.log(`Swarm ${index + 1}: ${result.swarm_name} - ${result.status}`);
+ });
+ } else {
+ console.error(`Error: ${response.status} - ${await response.text()}`);
+ }
+ } catch (error) {
+ console.error("Request failed:", error);
}
- ],
- "max_loops": 1,
- "swarm_type": "SequentialWorkflow",
- "task": "Write a summary of AI research."
}
- ]'
-```
+
+ runBatchSwarms();
+ ```
**Example Response**:
```json
@@ -351,10 +710,7 @@ curl -X POST "https://api.swarms.world/v1/swarm/batch/completions" \
]
```
--------
-
-
-
+## Individual Agent Endpoints
### Run Single Agent
@@ -371,24 +727,125 @@ Run a single agent with the specified configuration.
| agent_config | AgentSpec | Configuration for the agent | Yes |
| task | string | The task to be completed by the agent | Yes |
-**Example Request**:
-```bash
-curl -X POST "https://api.swarms.world/v1/agent/completions" \
- -H "x-api-key: your_api_key_here" \
- -H "Content-Type: application/json" \
- -d '{
- "agent_config": {
- "agent_name": "Research Assistant",
- "description": "Helps with research tasks",
- "system_prompt": "You are a research assistant expert.",
- "model_name": "gpt-4o",
- "max_loops": 1,
- "max_tokens": 8192,
- "temperature": 0.5
- },
- "task": "Research the latest developments in quantum computing."
- }'
-```
+=== "Shell (curl)"
+ ```bash
+ curl -X POST "https://api.swarms.world/v1/agent/completions" \
+ -H "x-api-key: your_api_key_here" \
+ -H "Content-Type: application/json" \
+ -d '{
+ "agent_config": {
+ "agent_name": "Research Assistant",
+ "description": "Helps with research tasks",
+ "system_prompt": "You are a research assistant expert.",
+ "model_name": "gpt-4o",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5
+ },
+ "task": "Research the latest developments in quantum computing."
+ }'
+ ```
+
+=== "Python (requests)"
+ ```python
+ import requests
+ import json
+
+ API_BASE_URL = "https://api.swarms.world"
+ API_KEY = "your_api_key_here"
+
+ headers = {
+ "x-api-key": API_KEY,
+ "Content-Type": "application/json"
+ }
+
+ agent_request = {
+ "agent_config": {
+ "agent_name": "Research Assistant",
+ "description": "Helps with research tasks",
+ "system_prompt": "You are a research assistant expert.",
+ "model_name": "gpt-4o",
+ "max_loops": 1,
+ "max_tokens": 8192,
+ "temperature": 0.5
+ },
+ "task": "Research the latest developments in quantum computing."
+ }
+
+ response = requests.post(
+ f"{API_BASE_URL}/v1/agent/completions",
+ headers=headers,
+ json=agent_request
+ )
+
+ if response.status_code == 200:
+ result = response.json()
+ print(f"Agent {result['name']} completed successfully!")
+ print(f"Usage: {result['usage']['total_tokens']} tokens")
+ print(f"Output: {result['outputs']}")
+ else:
+ print(f"Error: {response.status_code} - {response.text}")
+ ```
+
+=== "TypeScript (fetch)"
+ ```typescript
+ interface AgentConfig {
+ agent_name: string;
+ description: string;
+ system_prompt: string;
+ model_name: string;
+ max_loops: number;
+ max_tokens: number;
+ temperature: number;
+ }
+
+ interface AgentRequest {
+ agent_config: AgentConfig;
+ task: string;
+ }
+
+ const API_BASE_URL = "https://api.swarms.world";
+ const API_KEY = "your_api_key_here";
+
+ async function runSingleAgent(): Promise {
+ const agentRequest: AgentRequest = {
+ agent_config: {
+ agent_name: "Research Assistant",
+ description: "Helps with research tasks",
+ system_prompt: "You are a research assistant expert.",
+ model_name: "gpt-4o",
+ max_loops: 1,
+ max_tokens: 8192,
+ temperature: 0.5
+ },
+ task: "Research the latest developments in quantum computing."
+ };
+
+ try {
+ const response = await fetch(`${API_BASE_URL}/v1/agent/completions`, {
+ method: 'POST',
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify(agentRequest)
+ });
+
+ if (response.ok) {
+ const result = await response.json();
+ console.log(`Agent ${result.name} completed successfully!`);
+ console.log(`Usage: ${result.usage.total_tokens} tokens`);
+ console.log(`Output:`, result.outputs);
+ } else {
+ console.error(`Error: ${response.status} - ${await response.text()}`);
+ }
+ } catch (error) {
+ console.error("Request failed:", error);
+ }
+ }
+
+ runSingleAgent();
+ ```
**Example Response**:
```json
@@ -408,92 +865,6 @@ curl -X POST "https://api.swarms.world/v1/agent/completions" \
}
```
-
-
-### Get Models
-
-#### Get Available Models
-
-Get all available models as a list of strings.
-
-**Endpoint**: `/v1/models/available`
-**Method**: GET
-
-**Example Request**:
-```bash
-curl -X GET "https://api.swarms.world/v1/models/available" \
- -H "x-api-key: your_api_key_here"
-```
-
-
-------
-
-
-### Get Swarms Available
-
-Get all available swarms as a list of strings.
-
-**Endpoint**: `/v1/swarms/available`
-**Method**: GET
-
-**Example Request**:
-```bash
-curl -X GET "https://api.swarms.world/v1/swarms/available" \
- -H "x-api-key: your_api_key_here"
-```
-
-**Example Response**:
-```json
-{
- "status": "success",
- "swarms": ["financial-analysis-swarm", "market-sentiment-swarm"]
-}
-```
-
--------
-
-
-#### Get API Logs
-
-Retrieve logs of API requests made with your API key.
-
-**Endpoint**: `/v1/swarm/logs`
-**Method**: GET
-**Rate Limit**: 100 requests per 60 seconds
-
-**Example Request**:
-```bash
-curl -X GET "https://api.swarms.world/v1/swarm/logs" \
- -H "x-api-key: your_api_key_here"
-```
-
-**Example Response**:
-```json
-{
- "status": "success",
- "count": 25,
- "logs": [
- {
- "id": "log_id_12345",
- "api_key": "api_key_redacted",
- "data": {
- "action": "run_swarm",
- "swarm_name": "financial-analysis-swarm",
- "task": "Analyze quarterly financials...",
- "timestamp": "2025-03-04T14:22:45Z"
- }
- },
- ...
- ]
-}
-```
-
-
-
-## Individual Agent Endpoints
-
-### Run Single Agent
-
### AgentCompletion Model
The `AgentCompletion` model defines the configuration for running a single agent task.
@@ -596,31 +967,158 @@ Execute multiple agent tasks in parallel.
**Maximum Batch Size**: 10 requests
**Input** A list of `AgentCompeletion` inputs
-**Request Body**:
-```json
-[
- {
- "agent_config": {
- "agent_name": "Market Analyst",
- "description": "Expert in market analysis",
- "system_prompt": "You are a financial market analyst.",
- "model_name": "gpt-4o",
- "temperature": 0.3
- },
- "task": "Analyze the current market trends in AI technology sector"
- },
- {
- "agent_config": {
- "agent_name": "Technical Writer",
- "description": "Specialized in technical documentation",
- "system_prompt": "You are a technical documentation expert.",
- "model_name": "gpt-4o",
- "temperature": 0.7
- },
- "task": "Create a technical guide for implementing OAuth2 authentication"
- }
-]
-```
+=== "Shell (curl)"
+ ```bash
+ curl -X POST "https://api.swarms.world/v1/agent/batch/completions" \
+ -H "x-api-key: your_api_key_here" \
+ -H "Content-Type: application/json" \
+ -d '[
+ {
+ "agent_config": {
+ "agent_name": "Market Analyst",
+ "description": "Expert in market analysis",
+ "system_prompt": "You are a financial market analyst.",
+ "model_name": "gpt-4o",
+ "temperature": 0.3
+ },
+ "task": "Analyze the current market trends in AI technology sector"
+ },
+ {
+ "agent_config": {
+ "agent_name": "Technical Writer",
+ "description": "Specialized in technical documentation",
+ "system_prompt": "You are a technical documentation expert.",
+ "model_name": "gpt-4o",
+ "temperature": 0.7
+ },
+ "task": "Create a technical guide for implementing OAuth2 authentication"
+ }
+ ]'
+ ```
+
+=== "Python (requests)"
+ ```python
+ import requests
+ import json
+
+ API_BASE_URL = "https://api.swarms.world"
+ API_KEY = "your_api_key_here"
+
+ headers = {
+ "x-api-key": API_KEY,
+ "Content-Type": "application/json"
+ }
+
+ batch_agents = [
+ {
+ "agent_config": {
+ "agent_name": "Market Analyst",
+ "description": "Expert in market analysis",
+ "system_prompt": "You are a financial market analyst.",
+ "model_name": "gpt-4o",
+ "temperature": 0.3
+ },
+ "task": "Analyze the current market trends in AI technology sector"
+ },
+ {
+ "agent_config": {
+ "agent_name": "Technical Writer",
+ "description": "Specialized in technical documentation",
+ "system_prompt": "You are a technical documentation expert.",
+ "model_name": "gpt-4o",
+ "temperature": 0.7
+ },
+ "task": "Create a technical guide for implementing OAuth2 authentication"
+ }
+ ]
+
+ response = requests.post(
+ f"{API_BASE_URL}/v1/agent/batch/completions",
+ headers=headers,
+ json=batch_agents
+ )
+
+ if response.status_code == 200:
+ result = response.json()
+ print(f"Batch completed with {result['total_requests']} agents")
+ print(f"Execution time: {result['execution_time']} seconds")
+ print("\nResults:")
+ for i, agent_result in enumerate(result['results']):
+ print(f" Agent {i+1}: {agent_result['name']} - {agent_result['success']}")
+ else:
+ print(f"Error: {response.status_code} - {response.text}")
+ ```
+
+=== "TypeScript (fetch)"
+ ```typescript
+ interface AgentConfig {
+ agent_name: string;
+ description: string;
+ system_prompt: string;
+ model_name: string;
+ temperature: number;
+ }
+
+ interface AgentCompletion {
+ agent_config: AgentConfig;
+ task: string;
+ }
+
+ const API_BASE_URL = "https://api.swarms.world";
+ const API_KEY = "your_api_key_here";
+
+ async function runBatchAgents(): Promise {
+ const batchAgents: AgentCompletion[] = [
+ {
+ agent_config: {
+ agent_name: "Market Analyst",
+ description: "Expert in market analysis",
+ system_prompt: "You are a financial market analyst.",
+ model_name: "gpt-4o",
+ temperature: 0.3
+ },
+ task: "Analyze the current market trends in AI technology sector"
+ },
+ {
+ agent_config: {
+ agent_name: "Technical Writer",
+ description: "Specialized in technical documentation",
+ system_prompt: "You are a technical documentation expert.",
+ model_name: "gpt-4o",
+ temperature: 0.7
+ },
+ task: "Create a technical guide for implementing OAuth2 authentication"
+ }
+ ];
+
+ try {
+ const response = await fetch(`${API_BASE_URL}/v1/agent/batch/completions`, {
+ method: 'POST',
+ headers: {
+ 'x-api-key': API_KEY,
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify(batchAgents)
+ });
+
+ if (response.ok) {
+ const result = await response.json();
+ console.log(`Batch completed with ${result.total_requests} agents`);
+ console.log(`Execution time: ${result.execution_time} seconds`);
+ console.log("\nResults:");
+ result.results.forEach((agentResult: any, index: number) => {
+ console.log(` Agent ${index + 1}: ${agentResult.name} - ${agentResult.success}`);
+ });
+ } else {
+ console.error(`Error: ${response.status} - ${await response.text()}`);
+ }
+ } catch (error) {
+ console.error("Request failed:", error);
+ }
+ }
+
+ runBatchAgents();
+ ```
**Response**:
```json
@@ -660,394 +1158,10 @@ Execute multiple agent tasks in parallel.
}
```
-
-----
## Production Examples
-### Python Examples
-
-#### Financial Risk Assessment (Python)
-
-This example demonstrates creating a swarm for comprehensive financial risk assessment.
-
-```python
-import requests
-import json
-from datetime import datetime, timedelta
-
-# API Configuration
-API_BASE_URL = "https://api.swarms.world"
-API_KEY = "your_api_key_here"
-HEADERS = {
- "x-api-key": API_KEY,
- "Content-Type": "application/json"
-}
-
-def financial_risk_assessment(company_data, market_conditions, risk_tolerance):
- """
- Creates and runs a swarm to perform comprehensive financial risk assessment.
-
- Args:
- company_data (str): Description or data about the company
- market_conditions (str): Current market conditions
- risk_tolerance (str): Risk tolerance level (e.g., "conservative", "moderate", "aggressive")
-
- Returns:
- dict: Risk assessment results
- """
- # Prepare the task description with all relevant information
- task = f"""
- Perform a comprehensive financial risk assessment with the following data:
-
- COMPANY DATA:
- {company_data}
-
- MARKET CONDITIONS:
- {market_conditions}
-
- RISK TOLERANCE:
- {risk_tolerance}
-
- Analyze all potential risk factors including market risks, credit risks,
- operational risks, and regulatory compliance risks. Quantify each risk factor
- on a scale of 1-10 and provide specific mitigation strategies.
-
- Return a detailed report with executive summary, risk scores, detailed analysis,
- and actionable recommendations.
- """
-
- # Define specialized financial agents
- financial_analysts = [
- {
- "agent_name": "MarketAnalyst",
- "description": "Specialist in market risk assessment and forecasting",
- "system_prompt": "You are an expert market analyst with deep expertise in financial markets. Analyze market conditions, trends, and external factors that could impact financial performance. Provide quantitative and qualitative analysis of market-related risks.",
- "model_name": "gpt-4o",
- "temperature": 0.3,
- "role": "analyst",
- "max_loops": 1
- },
- {
- "agent_name": "CreditRiskAnalyst",
- "description": "Expert in assessing credit and counterparty risks",
- "system_prompt": "You are a specialist in credit risk analysis with experience in banking and financial institutions. Evaluate creditworthiness, default probabilities, and counterparty exposures. Provide detailed analysis of credit-related risks and recommended safeguards.",
- "model_name": "gpt-4o",
- "temperature": 0.2,
- "role": "analyst",
- "max_loops": 1
- },
- {
- "agent_name": "RegulatoryExpert",
- "description": "Expert in financial regulations and compliance",
- "system_prompt": "You are a regulatory compliance expert with deep knowledge of financial regulations. Identify potential regulatory risks, compliance issues, and governance concerns. Recommend compliance measures and risk mitigation strategies.",
- "model_name": "gpt-4o",
- "temperature": 0.2,
- "role": "analyst",
- "max_loops": 1
- },
- {
- "agent_name": "RiskSynthesizer",
- "description": "Integrates all risk factors into comprehensive assessment",
- "system_prompt": "You are a senior risk management professional responsible for synthesizing multiple risk analyses into a coherent, comprehensive risk assessment. Integrate analyses from various domains, resolve conflicting assessments, and provide a holistic view of risk exposure with prioritized recommendations.",
- "model_name": "gpt-4o",
- "temperature": 0.4,
- "role": "manager",
- "max_loops": 1
- }
- ]
-
- # Create the swarm specification
- swarm_spec = {
- "name": "financial-risk-assessment",
- "description": "Comprehensive financial risk assessment swarm",
- "agents": financial_analysts,
- "max_loops": 2,
- "swarm_type": "HiearchicalSwarm",
- "task": task,
- "return_history": True
- }
-
- # Execute the swarm
- response = requests.post(
- f"{API_BASE_URL}/v1/swarm/completions",
- headers=HEADERS,
- json=swarm_spec
- )
-
- if response.status_code == 200:
- result = response.json()
- print(f"Risk assessment completed. Cost: ${result['metadata']['billing_info']['total_cost']}")
- return result["output"]
- else:
- print(f"Error: {response.status_code} - {response.text}")
- return None
-
-# Usage example
-if __name__ == "__main__":
- company_data = """
- XYZ Financial Services
- Annual Revenue: $125M
- Current Debt: $45M
- Credit Rating: BBB+
- Primary Markets: North America, Europe
- Key Products: Asset management, retirement planning, commercial lending
- Recent Events: Expanding into Asian markets, New CEO appointed 6 months ago
- """
-
- market_conditions = """
- Current interest rates rising (Federal Reserve increased rates by 0.25% last month)
- Inflation at 3.2% (12-month outlook projects 3.5-4.0%)
- Market volatility index (VIX) at 22.4 (elevated)
- Regulatory environment: New financial reporting requirements taking effect next quarter
- Sector performance: Financial services sector underperforming broader market by 2.7%
- """
-
- risk_tolerance = "moderate"
-
- result = financial_risk_assessment(company_data, market_conditions, risk_tolerance)
-
- if result:
- # Process and use the risk assessment
- print(json.dumps(result, indent=2))
-
- # Optionally, schedule a follow-up assessment
- tomorrow = datetime.utcnow() + timedelta(days=30)
- schedule_spec = {
- "name": "monthly-risk-update",
- "description": "Monthly update to risk assessment",
- "task": f"Update the risk assessment for XYZ Financial Services based on current market conditions. Previous assessment: {json.dumps(result)}",
- "schedule": {
- "scheduled_time": tomorrow.isoformat() + "Z",
- "timezone": "UTC"
- }
- }
-
- schedule_response = requests.post(
- f"{API_BASE_URL}/v1/swarm/schedule",
- headers=HEADERS,
- json=schedule_spec
- )
-
- if schedule_response.status_code == 200:
- print("Follow-up assessment scheduled successfully")
- print(schedule_response.json())
-```
-
-#### Healthcare Patient Data Analysis (Python)
-
-This example demonstrates creating a swarm for analyzing patient health data and generating insights.
-
-```python
-import requests
-import json
-import os
-from datetime import datetime
-
-# API Configuration
-API_BASE_URL = "https://api.swarms.world"
-API_KEY = os.environ.get("SWARMS_API_KEY")
-HEADERS = {
- "x-api-key": API_KEY,
- "Content-Type": "application/json"
-}
-
-def analyze_patient_health_data(patient_data, medical_history, lab_results, treatment_goals):
- """
- Creates and runs a swarm to analyze patient health data and generate insights.
-
- Args:
- patient_data (str): Basic patient information
- medical_history (str): Patient's medical history
- lab_results (str): Recent laboratory results
- treatment_goals (str): Treatment objectives
-
- Returns:
- dict: Comprehensive health analysis and recommendations
- """
- # Prepare the detailed task description
- task = f"""
- Perform a comprehensive analysis of the following patient health data:
-
- PATIENT INFORMATION:
- {patient_data}
-
- MEDICAL HISTORY:
- {medical_history}
-
- LABORATORY RESULTS:
- {lab_results}
-
- TREATMENT GOALS:
- {treatment_goals}
-
- Analyze all aspects of the patient's health status, identify potential concerns,
- evaluate treatment effectiveness, and provide evidence-based recommendations for
- optimizing care. Consider medication interactions, lifestyle factors, and preventive measures.
-
- Return a detailed clinical report with key findings, risk stratification,
- prioritized recommendations, and suggested follow-up timeline.
- """
-
- # Create the swarm specification with auto-generated agents
- # (letting the system create specialized medical experts)
- swarm_spec = {
- "name": "patient-health-analysis",
- "description": "Comprehensive patient health data analysis",
- "swarm_type": "AutoSwarmBuilder",
- "task": task,
- "max_loops": 3,
- "return_history": True
- }
-
- # Execute the swarm
- try:
- response = requests.post(
- f"{API_BASE_URL}/v1/swarm/completions",
- headers=HEADERS,
- json=swarm_spec
- )
-
- response.raise_for_status()
- result = response.json()
-
- # Log the execution metadata
- execution_time = result["metadata"]["execution_time_seconds"]
- cost = result["metadata"]["billing_info"]["total_cost"]
- num_agents = result["metadata"]["num_agents"]
-
- print(f"Analysis completed in {execution_time:.2f} seconds")
- print(f"Used {num_agents} specialized medical agents")
- print(f"Total cost: ${cost:.4f}")
-
- # Return just the analysis results
- return result["output"]
-
- except requests.exceptions.RequestException as e:
- print(f"API request failed: {str(e)}")
- if hasattr(e, 'response') and e.response:
- print(f"Response: {e.response.text}")
- return None
- except Exception as e:
- print(f"Error: {str(e)}")
- return None
-
-# Usage example
-if __name__ == "__main__":
- # Sample patient data (would typically come from EHR system)
- patient_data = """
- ID: PT-28456
- Age: 67
- Gender: Female
- Height: 162 cm
- Weight: 78 kg
- Vitals:
- - Blood Pressure: 142/88 mmHg
- - Heart Rate: 76 bpm
- - Respiratory Rate: 16/min
- - Temperature: 37.1°C
- - Oxygen Saturation: 97%
- """
-
- medical_history = """
- Diagnoses:
- - Type 2 Diabetes Mellitus (diagnosed 12 years ago)
- - Hypertension (diagnosed 8 years ago)
- - Osteoarthritis (knees, diagnosed 5 years ago)
- - Hyperlipidemia
-
- Surgical History:
- - Cholecystectomy (15 years ago)
- - Right knee arthroscopy (3 years ago)
-
- Medications:
- - Metformin 1000mg BID
- - Lisinopril 20mg daily
- - Atorvastatin 40mg daily
- - Aspirin 81mg daily
- - Acetaminophen 500mg PRN for joint pain
-
- Allergies:
- - Penicillin (rash)
- - Sulfa drugs (hives)
-
- Family History:
- - Father: MI at age 70, died at 76
- - Mother: Breast cancer at 68, Type 2 Diabetes, died at 82
- - Sister: Type 2 Diabetes, Hypertension
- """
-
- lab_results = """
- CBC (2 days ago):
- - WBC: 7.2 x10^9/L (normal)
- - RBC: 4.1 x10^12/L (low-normal)
- - Hemoglobin: 12.8 g/dL (low-normal)
- - Hematocrit: 38% (low-normal)
- - Platelets: 245 x10^9/L (normal)
-
- Comprehensive Metabolic Panel:
- - Glucose (fasting): 142 mg/dL (elevated)
- - HbA1c: 7.8% (elevated)
- - BUN: 22 mg/dL (normal)
- - Creatinine: 1.1 mg/dL (normal)
- - eGFR: 62 mL/min/1.73m² (mildly reduced)
- - Sodium: 138 mEq/L (normal)
- - Potassium: 4.2 mEq/L (normal)
- - Chloride: 101 mEq/L (normal)
- - Calcium: 9.4 mg/dL (normal)
- - ALT: 32 U/L (normal)
- - AST: 28 U/L (normal)
-
- Lipid Panel:
- - Total Cholesterol: 198 mg/dL
- - Triglycerides: 172 mg/dL (elevated)
- - HDL: 42 mg/dL (low)
- - LDL: 122 mg/dL (borderline elevated)
-
- Urinalysis:
- - Microalbumin/Creatinine ratio: 45 mg/g (elevated)
- """
-
- treatment_goals = """
- Primary Goals:
- - Improve glycemic control (target HbA1c < 7.0%)
- - Blood pressure control (target < 130/80 mmHg)
- - Lipid management (target LDL < 100 mg/dL)
- - Renal protection (reduce microalbuminuria)
- - Weight management (target BMI < 27)
- - Pain management for osteoarthritis
- - Maintain functional independence
-
- Patient Preferences:
- - Prefers to minimize medication changes if possible
- - Interested in dietary approaches
- - Concerned about memory changes
- - Limited exercise tolerance due to knee pain
- """
-
- result = analyze_patient_health_data(patient_data, medical_history, lab_results, treatment_goals)
-
- if result:
- # Write the analysis to a report file
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
- with open(f"patient_analysis_{timestamp}.json", "w") as f:
- json.dump(result, f, indent=2)
-
- print(f"Analysis saved to patient_analysis_{timestamp}.json")
-
- # Display key findings
- if "key_findings" in result:
- print("\nKEY FINDINGS:")
- for i, finding in enumerate(result["key_findings"]):
- print(f" {i+1}. {finding}")
-
- # Display recommendations
- if "recommendations" in result:
- print("\nRECOMMENDATIONS:")
- for i, rec in enumerate(result["recommendations"]):
- print(f" {i+1}. {rec}")
-```
-
## Error Handling
The Swarms API follows standard HTTP status codes for error responses:
@@ -1112,7 +1226,6 @@ Error responses include a detailed message explaining the issue:
| Error Handling | Implement robust error handling and retries |
| Logging | Log API responses for debugging and auditing |
| Cost Monitoring | Monitor costs closely during development and testing |
-| Scheduling | Use scheduled jobs for recurring tasks instead of polling |
### Cost Optimization
diff --git a/docs/swarms_platform/monetize.md b/docs/swarms_platform/monetize.md
new file mode 100644
index 00000000..4df61cee
--- /dev/null
+++ b/docs/swarms_platform/monetize.md
@@ -0,0 +1,126 @@
+# Swarms.World Monetization Guide
+
+## Quick Overview
+
+Swarms Marketplace has activated its payment infrastructure, enabling creators to monetize AI agents, prompts, and tools directly through the platform. Sellers receive payments minus a 5-15% platform fee, scaled based on subscription tiers. Revenue accrues in real-time to integrated crypto wallets, with optional fiat conversions.
+
+---
+
+## Eligibility Requirements
+
+### Current Requirements for Paid Content
+
+- **2+ published items** (Prompts, Agents, and Tools)
+
+- **2 Items with 4+ star ratings** (you need community ratings)
+
+- **Marketplace Agent Rating** An agent will automatically rate your prompt, agent, or tool.
+
+**Bottom Line**: You must build reputation with free, high-quality content first.
+
+---
+
+## Step-by-Step Process
+
+### Phase 1: Build Reputation (Required First)
+
+#### 1. Improve Your Existing Content
+
+- Add better descriptions and examples to your published items
+
+- Use the Rating System: Evaluate and rate prompts, agents, and tools based on their effectiveness. Commenting System: Share feedback and insights with the Swarms community
+
+- Ask users for honest reviews and ratings
+
+#### 2. Create More Quality Content
+
+Focus on these categories:
+
+- **Agents**: Marketing, finance, or programming automation
+
+- **Prompts**: Templates for specific business tasks
+
+- **Tools**: Utilities that solve real problems
+
+Target: 3-5 additional items, all aiming for 4+ star ratings
+
+#### 3. Get Community Ratings
+
+- Share your content in relevant communities
+
+- Engage with users who try your content
+
+- Respond to feedback and improve based on comments
+
+- Be patient - ratings take time to accumulate
+
+### Phase 2: Start Monetizing
+
+#### 4. Choose Your Pricing Model
+
+Three primary monetization avenues exist: AI agents (autonomous task-execution models), prompts (pre-optimized input templates), and tools (development utilities like data preprocessors)
+
+**Pricing Options:**
+
+- **One-time**: $0.01 - $999,999 USD
+
+- **Subscription**: Monthly/annual recurring fees (Coming Soon)
+
+- **Usage-based**: Pay per API call or computation (Coming Soon)
+
+
+#### 6. Optimize & Scale
+
+- Monitor your revenue and user feedback
+
+- Developers can bundle assets—such as pairing prompt libraries with compatible agents—creating value-added
+packages
+
+- Create bundles of related content for higher value
+
+- Adjust pricing based on demand
+
+---
+
+## Revenue Models
+
+### What Sells Best
+
+1. **Business Automation Agents** - Marketing, sales, finance
+
+2. **Industry-Specific Prompts** - Legal, medical, technical writing
+
+3. **Integration Tools** - APIs, data processors, connectors
+
+### Pricing Examples
+
+- Simple prompts: $1-50
+
+- Complex agents: $20-500+
+
+- Enterprise tools: $100-1000+
+
+---
+
+## Quick Tips for Success
+
+1. **Quality over quantity** - Better to have 3 excellent items than 10 mediocre ones
+2. **Solve real problems** - Focus on actual business needs
+3. **Document everything** - Clear instructions increase ratings
+4. **Engage actively** - Respond to all user feedback
+5. **Be patient** - Building reputation takes time but pays off
+
+---
+
+## Common Mistakes to Avoid
+
+- Publishing low-quality content to meet quantity requirements
+
+- Not responding to user feedback
+
+- Setting prices too high before building reputation
+
+- Copying existing solutions without adding value
+
+- Ignoring community guidelines
+
diff --git a/example.py b/example.py
index 35ef36a4..cd0d78be 100644
--- a/example.py
+++ b/example.py
@@ -35,10 +35,10 @@ agent = Agent(
You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
max_loops=1,
- model_name="gpt-4o-mini",
+ model_name="claude-3-sonnet-20240229",
dynamic_temperature_enabled=True,
output_type="all",
- safety_prompt_on=True,
+ # dashboard=True
)
out = agent.run("What are the best top 3 etfs for gold coverage?")
diff --git a/examples/README_realtor.md b/examples/README_realtor.md
new file mode 100644
index 00000000..889b2ba6
--- /dev/null
+++ b/examples/README_realtor.md
@@ -0,0 +1,46 @@
+# Realtor Agent Example
+
+This example demonstrates how to create an AI-powered rental property specialist using the Swarms framework and the Realtor API.
+
+## Quick Start
+
+1. Install dependencies:
+```bash
+pip install swarms
+```
+
+2. Get your Realtor API key:
+- Visit [Realtor Search API](https://rapidapi.com/ntd119/api/realtor-search/)
+- Sign up for RapidAPI
+- Subscribe to the API
+- Copy your API key
+
+3. Update the API key in `realtor_agent.py`:
+```python
+headers = {
+ "x-rapidapi-key": "YOUR_API_KEY_HERE",
+ "x-rapidapi-host": "realtor-search.p.rapidapi.com",
+}
+```
+
+4. Run the example:
+```python
+from realtor_agent import agent
+
+# Search single location
+response = agent.run(
+ "What are the best properties in Menlo Park for rent under $3,000?"
+ f"Data: {get_realtor_data_from_one_source('Menlo Park, CA')}"
+)
+print(response)
+```
+
+## Features
+
+- Property search across multiple locations
+- Detailed property analysis
+- Location assessment
+- Financial analysis
+- Tenant matching recommendations
+
+For full documentation, see [docs/examples/realtor_agent.md](../docs/examples/realtor_agent.md).
\ No newline at end of file
diff --git a/examples/tools/mcp_examples/agent_use/agent_mcp.py b/examples/mcp/agent_mcp.py
similarity index 75%
rename from examples/tools/mcp_examples/agent_use/agent_mcp.py
rename to examples/mcp/agent_mcp.py
index 6307790c..13ab9bff 100644
--- a/examples/tools/mcp_examples/agent_use/agent_mcp.py
+++ b/examples/mcp/agent_mcp.py
@@ -11,11 +11,13 @@ agent = Agent(
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
max_loops=1,
mcp_url="http://0.0.0.0:8000/sse",
+ model_name="gpt-4o-mini",
+ output_type="all",
)
# Create a markdown file with initial content
out = agent.run(
- "Use any of the tools available to you",
+ "Use the get_okx_crypto_volume to get the volume of BTC just put the name of the coin",
)
print(out)
diff --git a/examples/mcp/agent_multi_mcp_connections.py b/examples/mcp/agent_multi_mcp_connections.py
new file mode 100644
index 00000000..46e22cbc
--- /dev/null
+++ b/examples/mcp/agent_multi_mcp_connections.py
@@ -0,0 +1,49 @@
+from swarms import Agent
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Quantitative-Trading-Agent",
+ agent_description="Advanced quantitative trading and algorithmic analysis agent",
+ system_prompt="""
+ You are an expert quantitative trading agent with deep expertise in:
+ - Algorithmic trading strategies and implementation
+ - Statistical arbitrage and market making
+ - Risk management and portfolio optimization
+ - High-frequency trading systems
+ - Market microstructure analysis
+ - Quantitative research methodologies
+ - Financial mathematics and stochastic processes
+ - Machine learning applications in trading
+
+ Your core responsibilities include:
+ 1. Developing and backtesting trading strategies
+ 2. Analyzing market data and identifying alpha opportunities
+ 3. Implementing risk management frameworks
+ 4. Optimizing portfolio allocations
+ 5. Conducting quantitative research
+ 6. Monitoring market microstructure
+ 7. Evaluating trading system performance
+
+ You maintain strict adherence to:
+ - Mathematical rigor in all analyses
+ - Statistical significance in strategy development
+ - Risk-adjusted return optimization
+ - Market impact minimization
+ - Regulatory compliance
+ - Transaction cost analysis
+ - Performance attribution
+
+ You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
+ max_loops=1,
+ model_name="gpt-4o-mini",
+ dynamic_temperature_enabled=True,
+ output_type="all",
+ mcp_urls=[
+ "http://0.0.0.0:8000/sse",
+ "http://0.0.0.0:8001/sse",
+ ],
+)
+
+agent.run(
+ "Please use the get_okx_crypto_volume tool to get the trading volume for Bitcoin (BTC). Provide the volume information."
+)
diff --git a/examples/tools/mcp_examples/agent_mcp.py b/examples/mcp/mcp_examples/agent_mcp.py
similarity index 100%
rename from examples/tools/mcp_examples/agent_mcp.py
rename to examples/mcp/mcp_examples/agent_mcp.py
diff --git a/examples/tools/mcp_examples/agent_use/agent_tools_dict_example.py b/examples/mcp/mcp_examples/agent_use/agent_tools_dict_example.py
similarity index 100%
rename from examples/tools/mcp_examples/agent_use/agent_tools_dict_example.py
rename to examples/mcp/mcp_examples/agent_use/agent_tools_dict_example.py
diff --git a/examples/tools/mcp_examples/agent_use/mcp_exampler.py b/examples/mcp/mcp_examples/agent_use/mcp_exampler.py
similarity index 100%
rename from examples/tools/mcp_examples/agent_use/mcp_exampler.py
rename to examples/mcp/mcp_examples/agent_use/mcp_exampler.py
diff --git a/examples/tools/mcp_examples/utils/find_tools_on_mcp.py b/examples/mcp/mcp_examples/utils/find_tools_on_mcp.py
similarity index 100%
rename from examples/tools/mcp_examples/utils/find_tools_on_mcp.py
rename to examples/mcp/mcp_examples/utils/find_tools_on_mcp.py
diff --git a/examples/tools/mcp_examples/utils/mcp_execute_example.py b/examples/mcp/mcp_examples/utils/mcp_execute_example.py
similarity index 100%
rename from examples/tools/mcp_examples/utils/mcp_execute_example.py
rename to examples/mcp/mcp_examples/utils/mcp_execute_example.py
diff --git a/examples/tools/mcp_examples/utils/mcp_load_tools_example.py b/examples/mcp/mcp_examples/utils/mcp_load_tools_example.py
similarity index 100%
rename from examples/tools/mcp_examples/utils/mcp_load_tools_example.py
rename to examples/mcp/mcp_examples/utils/mcp_load_tools_example.py
diff --git a/examples/tools/mcp_examples/utils/mcp_multiserver_tool_fetch.py b/examples/mcp/mcp_examples/utils/mcp_multiserver_tool_fetch.py
similarity index 100%
rename from examples/tools/mcp_examples/utils/mcp_multiserver_tool_fetch.py
rename to examples/mcp/mcp_examples/utils/mcp_multiserver_tool_fetch.py
diff --git a/examples/mcp/mcp_utils/mcp_client_call.py b/examples/mcp/mcp_utils/mcp_client_call.py
new file mode 100644
index 00000000..caa969a3
--- /dev/null
+++ b/examples/mcp/mcp_utils/mcp_client_call.py
@@ -0,0 +1,12 @@
+from swarms.tools.mcp_client_call import (
+ get_mcp_tools_sync,
+ execute_tool_call_simple,
+)
+
+tools = get_mcp_tools_sync()
+
+print(tools)
+
+result = execute_tool_call_simple(tools[0], "Hello, world!")
+
+print(result)
diff --git a/examples/mcp/mcp_utils/mcp_multiple_servers_example.py b/examples/mcp/mcp_utils/mcp_multiple_servers_example.py
new file mode 100644
index 00000000..5ca4304d
--- /dev/null
+++ b/examples/mcp/mcp_utils/mcp_multiple_servers_example.py
@@ -0,0 +1,234 @@
+"""
+Example demonstrating how to execute multiple tools across multiple MCP servers.
+
+This example shows how to:
+1. Create a mapping of function names to servers
+2. Execute multiple tool calls across different servers
+3. Handle responses with tool calls and route them to the appropriate servers
+"""
+
+import asyncio
+from swarms.tools.mcp_client_call import (
+ execute_multiple_tools_on_multiple_mcp_servers,
+ execute_multiple_tools_on_multiple_mcp_servers_sync,
+ get_tools_for_multiple_mcp_servers,
+)
+from swarms.schemas.mcp_schemas import MCPConnection
+
+
+def example_sync_execution():
+ """Example of synchronous execution across multiple MCP servers."""
+
+ # Example server URLs (replace with your actual MCP server URLs)
+ urls = [
+ "http://localhost:8000/sse", # Server 1
+ "http://localhost:8001/sse", # Server 2
+ "http://localhost:8002/sse", # Server 3
+ ]
+
+ # Optional: Create connection objects for each server
+ connections = [
+ MCPConnection(
+ url="http://localhost:8000/sse",
+ authorization_token="token1", # if needed
+ timeout=10,
+ ),
+ MCPConnection(
+ url="http://localhost:8001/sse",
+ authorization_token="token2", # if needed
+ timeout=10,
+ ),
+ MCPConnection(
+ url="http://localhost:8002/sse",
+ authorization_token="token3", # if needed
+ timeout=10,
+ ),
+ ]
+
+ # Example responses containing tool calls
+ # These would typically come from an LLM that decided to use tools
+ responses = [
+ {
+ "function": {
+ "name": "search_web",
+ "arguments": {
+ "query": "python programming best practices"
+ },
+ }
+ },
+ {
+ "function": {
+ "name": "search_database",
+ "arguments": {"table": "users", "id": 123},
+ }
+ },
+ {
+ "function": {
+ "name": "send_email",
+ "arguments": {
+ "to": "user@example.com",
+ "subject": "Test email",
+ "body": "This is a test email",
+ },
+ }
+ },
+ ]
+
+ print("=== Synchronous Execution Example ===")
+ print(
+ f"Executing {len(responses)} tool calls across {len(urls)} servers..."
+ )
+
+ try:
+ # Execute all tool calls across multiple servers
+ results = execute_multiple_tools_on_multiple_mcp_servers_sync(
+ responses=responses,
+ urls=urls,
+ connections=connections,
+ output_type="dict",
+ max_concurrent=5, # Limit concurrent executions
+ )
+
+ print(f"\nExecution completed! Got {len(results)} results:")
+ for i, result in enumerate(results):
+ print(f"\nResult {i + 1}:")
+ print(f" Function: {result['function_name']}")
+ print(f" Server: {result['server_url']}")
+ print(f" Status: {result['status']}")
+ if result["status"] == "success":
+ print(f" Result: {result['result']}")
+ else:
+ print(
+ f" Error: {result.get('error', 'Unknown error')}"
+ )
+
+ except Exception as e:
+ print(f"Error during execution: {str(e)}")
+
+
+async def example_async_execution():
+ """Example of asynchronous execution across multiple MCP servers."""
+
+ # Example server URLs
+ urls = [
+ "http://localhost:8000/sse",
+ "http://localhost:8001/sse",
+ "http://localhost:8002/sse",
+ ]
+
+ # Example responses with multiple tool calls in a single response
+ responses = [
+ {
+ "tool_calls": [
+ {
+ "function": {
+ "name": "search_web",
+ "arguments": {
+ "query": "machine learning trends 2024"
+ },
+ }
+ },
+ {
+ "function": {
+ "name": "search_database",
+ "arguments": {
+ "table": "articles",
+ "category": "AI",
+ },
+ }
+ },
+ ]
+ },
+ {
+ "function": {
+ "name": "send_notification",
+ "arguments": {
+ "user_id": 456,
+ "message": "Your analysis is complete",
+ },
+ }
+ },
+ ]
+
+ print("\n=== Asynchronous Execution Example ===")
+ print(
+ f"Executing tool calls across {len(urls)} servers asynchronously..."
+ )
+
+ try:
+ # Execute all tool calls across multiple servers
+ results = (
+ await execute_multiple_tools_on_multiple_mcp_servers(
+ responses=responses,
+ urls=urls,
+ output_type="str",
+ max_concurrent=3,
+ )
+ )
+
+ print(
+ f"\nAsync execution completed! Got {len(results)} results:"
+ )
+ for i, result in enumerate(results):
+ print(f"\nResult {i + 1}:")
+ print(f" Response Index: {result['response_index']}")
+ print(f" Function: {result['function_name']}")
+ print(f" Server: {result['server_url']}")
+ print(f" Status: {result['status']}")
+ if result["status"] == "success":
+ print(f" Result: {result['result']}")
+ else:
+ print(
+ f" Error: {result.get('error', 'Unknown error')}"
+ )
+
+ except Exception as e:
+ print(f"Error during async execution: {str(e)}")
+
+
+def example_get_tools_from_multiple_servers():
+ """Example of getting tools from multiple servers."""
+
+ urls = [
+ "http://localhost:8000/sse",
+ "http://localhost:8001/sse",
+ "http://localhost:8002/sse",
+ ]
+
+ print("\n=== Getting Tools from Multiple Servers ===")
+
+ try:
+ # Get all available tools from all servers
+ all_tools = get_tools_for_multiple_mcp_servers(
+ urls=urls, format="openai", output_type="dict"
+ )
+
+ print(
+ f"Found {len(all_tools)} total tools across all servers:"
+ )
+
+ # Group tools by function name to see what's available
+ function_names = set()
+ for tool in all_tools:
+ if isinstance(tool, dict) and "function" in tool:
+ function_names.add(tool["function"]["name"])
+ elif hasattr(tool, "name"):
+ function_names.add(tool.name)
+
+ print("Available functions:")
+ for func_name in sorted(function_names):
+ print(f" - {func_name}")
+
+ except Exception as e:
+ print(f"Error getting tools: {str(e)}")
+
+
+if __name__ == "__main__":
+ # Run synchronous example
+ example_sync_execution()
+
+ # Run async example
+ asyncio.run(example_async_execution())
+
+ # Get tools from multiple servers
+ example_get_tools_from_multiple_servers()
diff --git a/examples/tools/mcp_examples/servers/mcp_test.py b/examples/mcp/mcp_utils/mcp_test.py
similarity index 100%
rename from examples/tools/mcp_examples/servers/mcp_test.py
rename to examples/mcp/mcp_utils/mcp_test.py
diff --git a/examples/tools/mcp_examples/servers/okx_crypto_server.py b/examples/mcp/mcp_utils/okx_crypto_server.py
similarity index 100%
rename from examples/tools/mcp_examples/servers/okx_crypto_server.py
rename to examples/mcp/mcp_utils/okx_crypto_server.py
diff --git a/examples/mcp/mcp_utils/test_multiple_mcp_servers.py b/examples/mcp/mcp_utils/test_multiple_mcp_servers.py
new file mode 100644
index 00000000..401a00a7
--- /dev/null
+++ b/examples/mcp/mcp_utils/test_multiple_mcp_servers.py
@@ -0,0 +1,54 @@
+"""
+Simple test for the execute_multiple_tools_on_multiple_mcp_servers functionality.
+"""
+
+from swarms.tools.mcp_client_call import (
+ execute_multiple_tools_on_multiple_mcp_servers_sync,
+)
+
+
+def test_async_multiple_tools_execution():
+ """Test the async multiple tools execution function structure."""
+ print(
+ "\nTesting async multiple tools execution function structure..."
+ )
+
+ urls = [
+ "http://localhost:8000/sse",
+ "http://localhost:8001/sse",
+ ]
+
+ # Mock responses with multiple tool calls
+ responses = [
+ {
+ "tool_calls": [
+ {
+ "function": {
+ "name": "get_okx_crypto_price",
+ "arguments": {"symbol": "SOL-USDT"},
+ }
+ },
+ {
+ "function": {
+ "name": "get_crypto_price",
+ "arguments": {"coin_id": "solana"},
+ }
+ },
+ ]
+ }
+ ]
+
+ try:
+ # This will likely fail to connect, but we can test the function structure
+ results = execute_multiple_tools_on_multiple_mcp_servers_sync(
+ responses=responses, urls=urls
+ )
+ print(f"Got {len(results)} results")
+ print(results)
+ except Exception as e:
+ print(f"Expected error (no servers running): {str(e)}")
+ print("Async function structure is working correctly!")
+
+
+if __name__ == "__main__":
+ test_async_multiple_tools_execution()
diff --git a/examples/misc/conversation_test.py b/examples/misc/conversation_test.py
new file mode 100644
index 00000000..ec8a0534
--- /dev/null
+++ b/examples/misc/conversation_test.py
@@ -0,0 +1,22 @@
+from swarms.structs.conversation import Conversation
+
+# Create a conversation object
+conversation = Conversation(backend="in-memory")
+
+# Add a message to the conversation
+conversation.add(
+ role="user", content="Hello, how are you?", category="input"
+)
+
+# Add a message to the conversation
+conversation.add(
+ role="assistant",
+ content="I'm good, thank you!",
+ category="output",
+)
+
+print(
+ conversation.export_and_count_categories(
+ tokenizer_model_name="claude-3-5-sonnet-20240620"
+ )
+)
diff --git a/examples/models/reasoning_duo_batched.py b/examples/models/reasoning_duo_batched.py
index 4d75c66f..9d9ca044 100644
--- a/examples/models/reasoning_duo_batched.py
+++ b/examples/models/reasoning_duo_batched.py
@@ -16,4 +16,4 @@ if __name__ == "__main__":
# Run the batch once and print each result
results = duo.batched_run(tasks)
for task, output in zip(tasks, results):
- print(f"Task: {task}\nResult: {output}\n")
\ No newline at end of file
+ print(f"Task: {task}\nResult: {output}\n")
diff --git a/examples/multi_agent/enhanced_collaboration_example.py b/examples/multi_agent/enhanced_collaboration_example.py
new file mode 100644
index 00000000..561a7a41
--- /dev/null
+++ b/examples/multi_agent/enhanced_collaboration_example.py
@@ -0,0 +1,256 @@
+"""
+Enhanced Collaborative InteractiveGroupChat Example
+
+This example demonstrates the improved collaborative behavior where agents:
+1. Read and understand all previous responses
+2. Acknowledge what other agents have said
+3. Build upon their insights rather than repeating information
+4. Synthesize multiple perspectives
+5. Delegate appropriately using @mentions
+
+The enhanced prompts ensure agents work as a true collaborative team.
+"""
+
+from swarms import Agent
+from swarms.structs.interactive_groupchat import (
+ InteractiveGroupChat,
+ round_robin_speaker,
+)
+
+
+def create_collaborative_agents():
+ """Create agents designed for enhanced collaboration."""
+
+ # Data Analyst - focuses on data insights and trends
+ analyst = Agent(
+ agent_name="analyst",
+ system_prompt="""You are a senior data analyst with expertise in business intelligence, statistical analysis, and data visualization. You excel at:
+- Analyzing complex datasets and identifying trends
+- Creating actionable insights from data
+- Providing quantitative evidence for business decisions
+- Identifying patterns and correlations in data
+
+When collaborating, always reference specific data points and build upon others' insights with quantitative support.""",
+ llm="gpt-3.5-turbo",
+ )
+
+ # Market Researcher - focuses on market trends and customer insights
+ researcher = Agent(
+ agent_name="researcher",
+ system_prompt="""You are a market research specialist with deep expertise in consumer behavior, competitive analysis, and market trends. You excel at:
+- Understanding customer needs and preferences
+- Analyzing competitive landscapes
+- Identifying market opportunities and threats
+- Providing qualitative insights that complement data analysis
+
+When collaborating, always connect market insights to business implications and build upon data analysis with market context.""",
+ llm="gpt-3.5-turbo",
+ )
+
+ # Strategy Consultant - focuses on strategic recommendations
+ strategist = Agent(
+ agent_name="strategist",
+ system_prompt="""You are a strategic consultant with expertise in business strategy, competitive positioning, and strategic planning. You excel at:
+- Developing comprehensive business strategies
+- Identifying competitive advantages
+- Creating actionable strategic recommendations
+- Synthesizing multiple perspectives into coherent strategies
+
+When collaborating, always synthesize insights from all team members and provide strategic recommendations that leverage the collective expertise.""",
+ llm="gpt-3.5-turbo",
+ )
+
+ return [analyst, researcher, strategist]
+
+
+def example_comprehensive_analysis():
+ """Example of comprehensive collaborative analysis."""
+ print("=== Enhanced Collaborative Analysis Example ===\n")
+
+ agents = create_collaborative_agents()
+
+ # Create group chat with round robin speaker function
+ group_chat = InteractiveGroupChat(
+ name="Strategic Analysis Team",
+ description="A collaborative team for comprehensive business analysis",
+ agents=agents,
+ speaker_function=round_robin_speaker,
+ interactive=False,
+ )
+
+ # Complex task that requires collaboration
+ task = """Analyze our company's performance in the e-commerce market.
+ We have the following data:
+ - Q3 revenue: $2.5M (up 15% from Q2)
+ - Customer acquisition cost: $45 (down 8% from Q2)
+ - Customer lifetime value: $180 (up 12% from Q2)
+ - Market share: 3.2% (up 0.5% from Q2)
+ - Competitor analysis shows 3 major players with 60% market share combined
+
+ @analyst @researcher @strategist please provide a comprehensive analysis and strategic recommendations."""
+
+ print(f"Task: {task}\n")
+ print("Expected collaborative behavior:")
+ print(
+ "1. Analyst: Analyzes the data trends and provides quantitative insights"
+ )
+ print(
+ "2. Researcher: Builds on data with market context and competitive analysis"
+ )
+ print(
+ "3. Strategist: Synthesizes both perspectives into strategic recommendations"
+ )
+ print("\n" + "=" * 80 + "\n")
+
+ response = group_chat.run(task)
+ print(f"Collaborative Response:\n{response}")
+
+
+def example_problem_solving():
+ """Example of collaborative problem solving."""
+ print("\n" + "=" * 80)
+ print("=== Collaborative Problem Solving Example ===\n")
+
+ agents = create_collaborative_agents()
+
+ group_chat = InteractiveGroupChat(
+ name="Problem Solving Team",
+ description="A team that collaborates to solve complex business problems",
+ agents=agents,
+ speaker_function=round_robin_speaker,
+ interactive=False,
+ )
+
+ # Problem-solving task
+ task = """We're experiencing declining customer retention rates (down 20% in the last 6 months).
+ Our customer satisfaction scores are also dropping (from 8.5 to 7.2).
+
+ @analyst please analyze the retention data, @researcher investigate customer feedback and market trends,
+ and @strategist develop a comprehensive solution strategy."""
+
+ print(f"Task: {task}\n")
+ print("Expected collaborative behavior:")
+ print("1. Analyst: Identifies patterns in retention data")
+ print(
+ "2. Researcher: Explores customer feedback and market factors"
+ )
+ print(
+ "3. Strategist: Combines insights to create actionable solutions"
+ )
+ print("\n" + "=" * 80 + "\n")
+
+ response = group_chat.run(task)
+ print(f"Collaborative Response:\n{response}")
+
+
+def example_agent_delegation():
+ """Example showing how agents delegate to each other."""
+ print("\n" + "=" * 80)
+ print("=== Agent Delegation Example ===\n")
+
+ agents = create_collaborative_agents()
+
+ group_chat = InteractiveGroupChat(
+ name="Delegation Team",
+ description="A team that demonstrates effective delegation and collaboration",
+ agents=agents,
+ speaker_function=round_robin_speaker,
+ interactive=False,
+ )
+
+ # Task that encourages delegation
+ task = """We need to evaluate a potential new market entry opportunity in Southeast Asia.
+ The initial data shows promising growth potential, but we need a comprehensive assessment.
+
+ @analyst start with the market data analysis, then delegate to @researcher for market research,
+ and finally @strategist should provide strategic recommendations."""
+
+ print(f"Task: {task}\n")
+ print("Expected behavior:")
+ print(
+ "1. Analyst: Analyzes data and delegates to researcher for deeper market insights"
+ )
+ print(
+ "2. Researcher: Builds on data analysis and delegates to strategist for recommendations"
+ )
+ print(
+ "3. Strategist: Synthesizes all insights into strategic recommendations"
+ )
+ print("\n" + "=" * 80 + "\n")
+
+ response = group_chat.run(task)
+ print(f"Collaborative Response:\n{response}")
+
+
+def example_synthesis_and_integration():
+ """Example showing synthesis of multiple perspectives."""
+ print("\n" + "=" * 80)
+ print("=== Synthesis and Integration Example ===\n")
+
+ agents = create_collaborative_agents()
+
+ group_chat = InteractiveGroupChat(
+ name="Synthesis Team",
+ description="A team that excels at integrating multiple perspectives",
+ agents=agents,
+ speaker_function=round_robin_speaker,
+ interactive=False,
+ )
+
+ # Task requiring synthesis
+ task = """We have conflicting information about our product's market position:
+ - Sales data shows strong growth (25% increase)
+ - Customer surveys indicate declining satisfaction
+ - Competitor analysis shows we're losing market share
+ - Internal metrics show improved operational efficiency
+
+ @analyst @researcher @strategist please analyze these conflicting signals and provide
+ an integrated assessment of our true market position."""
+
+ print(f"Task: {task}\n")
+ print("Expected behavior:")
+ print(
+ "1. Analyst: Clarifies the data discrepancies and identifies patterns"
+ )
+ print(
+ "2. Researcher: Provides market context to explain the contradictions"
+ )
+ print(
+ "3. Strategist: Synthesizes all perspectives into a coherent market assessment"
+ )
+ print("\n" + "=" * 80 + "\n")
+
+ response = group_chat.run(task)
+ print(f"Collaborative Response:\n{response}")
+
+
+def main():
+ """Run all enhanced collaboration examples."""
+ print("Enhanced Collaborative InteractiveGroupChat Examples")
+ print("=" * 80)
+ print("This demonstrates improved agent collaboration with:")
+ print("- Acknowledgment of other agents' contributions")
+ print("- Building upon previous insights")
+ print("- Synthesis of multiple perspectives")
+ print("- Appropriate delegation using @mentions")
+ print("- Comprehensive understanding of conversation history")
+ print("=" * 80 + "\n")
+
+ # Run examples
+ example_comprehensive_analysis()
+ example_problem_solving()
+ example_agent_delegation()
+ example_synthesis_and_integration()
+
+ print("\n" + "=" * 80)
+ print("All enhanced collaboration examples completed!")
+ print("Notice how agents now:")
+ print("✓ Acknowledge each other's contributions")
+ print("✓ Build upon previous insights")
+ print("✓ Synthesize multiple perspectives")
+ print("✓ Delegate appropriately")
+ print("✓ Provide more cohesive and comprehensive responses")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/multi_agent/groupchat_examples/crypto_tax.py b/examples/multi_agent/groupchat/groupchat_examples/crypto_tax.py
similarity index 100%
rename from examples/multi_agent/groupchat_examples/crypto_tax.py
rename to examples/multi_agent/groupchat/groupchat_examples/crypto_tax.py
diff --git a/examples/multi_agent/groupchat_examples/crypto_tax_swarm 2.py b/examples/multi_agent/groupchat/groupchat_examples/crypto_tax_swarm 2.py
similarity index 100%
rename from examples/multi_agent/groupchat_examples/crypto_tax_swarm 2.py
rename to examples/multi_agent/groupchat/groupchat_examples/crypto_tax_swarm 2.py
diff --git a/examples/multi_agent/groupchat_examples/crypto_tax_swarm.py b/examples/multi_agent/groupchat/groupchat_examples/crypto_tax_swarm.py
similarity index 100%
rename from examples/multi_agent/groupchat_examples/crypto_tax_swarm.py
rename to examples/multi_agent/groupchat/groupchat_examples/crypto_tax_swarm.py
diff --git a/examples/multi_agent/groupchat_examples/group_chat_example.py b/examples/multi_agent/groupchat/groupchat_examples/group_chat_example.py
similarity index 100%
rename from examples/multi_agent/groupchat_examples/group_chat_example.py
rename to examples/multi_agent/groupchat/groupchat_examples/group_chat_example.py
diff --git a/examples/multi_agent/groupchat_examples/groupchat_example.py b/examples/multi_agent/groupchat/groupchat_examples/groupchat_example.py
similarity index 100%
rename from examples/multi_agent/groupchat_examples/groupchat_example.py
rename to examples/multi_agent/groupchat/groupchat_examples/groupchat_example.py
diff --git a/examples/multi_agent/groupchat/groupchat_examples/mortgage_tax_panel_example.py b/examples/multi_agent/groupchat/groupchat_examples/mortgage_tax_panel_example.py
new file mode 100644
index 00000000..45be97af
--- /dev/null
+++ b/examples/multi_agent/groupchat/groupchat_examples/mortgage_tax_panel_example.py
@@ -0,0 +1,154 @@
+"""
+Mortgage and Tax Panel Discussion Example
+
+This example demonstrates a panel of mortgage and tax specialists discussing complex
+financial situations using InteractiveGroupChat with different speaker functions.
+The panel includes specialists from different financial fields who can collaborate
+on complex mortgage and tax planning cases.
+"""
+
+from swarms import Agent
+from swarms.structs.interactive_groupchat import (
+ InteractiveGroupChat,
+)
+
+
+def create_mortgage_tax_panel():
+ """Create a panel of mortgage and tax specialists for discussion."""
+
+ # Tax Attorney - Specializes in tax law and complex tax situations
+ tax_attorney = Agent(
+ agent_name="tax_attorney",
+ system_prompt="""You are Sarah Mitchell, J.D., a tax attorney with 15 years of experience.
+ You specialize in complex tax law, real estate taxation, and tax planning strategies.
+ You have expertise in:
+ - Federal and state tax regulations
+ - Real estate tax law and property taxation
+ - Tax implications of mortgage transactions
+ - Tax planning for real estate investments
+ - IRS dispute resolution and tax litigation
+ - Estate tax planning and trusts
+
+ When discussing cases, provide legally sound tax advice, consider recent tax law changes,
+ and collaborate with other specialists to ensure comprehensive financial planning.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Mortgage Broker - Lending and mortgage specialist
+ mortgage_broker = Agent(
+ agent_name="mortgage_broker",
+ system_prompt="""You are Michael Chen, a senior mortgage broker with 12 years of experience.
+ You specialize in residential and commercial mortgage lending.
+ You have expertise in:
+ - Conventional, FHA, VA, and jumbo loans
+ - Commercial mortgage financing
+ - Mortgage refinancing strategies
+ - Interest rate analysis and trends
+ - Loan qualification requirements
+ - Mortgage insurance considerations
+
+ When discussing cases, analyze lending options, consider credit profiles,
+ and evaluate debt-to-income ratios for optimal mortgage solutions.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Real Estate CPA - Accounting specialist
+ real_estate_cpa = Agent(
+ agent_name="real_estate_cpa",
+ system_prompt="""You are Emily Rodriguez, CPA, a certified public accountant with 10 years of experience.
+ You specialize in real estate accounting and tax preparation.
+ You have expertise in:
+ - Real estate tax accounting
+ - Property depreciation strategies
+ - Mortgage interest deductions
+ - Real estate investment taxation
+ - Financial statement analysis
+ - Tax credit optimization
+
+ When discussing cases, focus on accounting implications, tax efficiency,
+ and financial reporting requirements for real estate transactions.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Financial Advisor - Investment and planning specialist
+ financial_advisor = Agent(
+ agent_name="financial_advisor",
+ system_prompt="""You are James Thompson, CFP®, a financial advisor with 8 years of experience.
+ You specialize in comprehensive financial planning and wealth management.
+ You have expertise in:
+ - Investment portfolio management
+ - Retirement planning
+ - Real estate investment strategy
+ - Cash flow analysis
+ - Risk management
+ - Estate planning coordination
+
+ When discussing cases, consider overall financial goals, investment strategy,
+ and how mortgage decisions impact long-term financial planning.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Real Estate Attorney - Property law specialist
+ real_estate_attorney = Agent(
+ agent_name="real_estate_attorney",
+ system_prompt="""You are Lisa Park, J.D., a real estate attorney with 11 years of experience.
+ You specialize in real estate law and property transactions.
+ You have expertise in:
+ - Real estate contract law
+ - Property title analysis
+ - Mortgage document review
+ - Real estate closing procedures
+ - Property rights and zoning
+ - Real estate litigation
+
+ When discussing cases, evaluate legal implications, ensure compliance,
+ and address potential legal issues in real estate transactions.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ return [
+ tax_attorney,
+ mortgage_broker,
+ real_estate_cpa,
+ financial_advisor,
+ real_estate_attorney,
+ ]
+
+
+def example_mortgage_tax_panel():
+ """Example with random dynamic speaking order."""
+ print("=== MORTGAGE AND TAX SPECIALIST PANEL ===\n")
+
+ agents = create_mortgage_tax_panel()
+
+ group_chat = InteractiveGroupChat(
+ name="Mortgage and Tax Panel Discussion",
+ description="A collaborative panel of mortgage and tax specialists discussing complex cases",
+ agents=agents,
+ interactive=False,
+ speaker_function="random-speaker",
+ )
+
+ # Case 1: Complex mortgage refinancing with tax implications
+ case1 = """CASE PRESENTATION:
+ @tax_attorney, @real_estate_cpa, and @real_estate_attorney, please discuss the possible legal and accounting strategies
+ for minimizing or potentially eliminating property taxes in Los Altos, California. Consider legal exemptions,
+ special assessments, and any relevant California property tax laws that could help achieve this goal.
+ """
+
+ group_chat.run(case1)
+
+
+if __name__ == "__main__":
+
+ example_mortgage_tax_panel()
diff --git a/examples/multi_agent/groupchat/quantum_physics_swarm.py b/examples/multi_agent/groupchat/quantum_physics_swarm.py
new file mode 100644
index 00000000..595d47c5
--- /dev/null
+++ b/examples/multi_agent/groupchat/quantum_physics_swarm.py
@@ -0,0 +1,89 @@
+from swarms import Agent
+from swarms.structs.interactive_groupchat import InteractiveGroupChat
+
+
+if __name__ == "__main__":
+ # Initialize agents specialized for condensed matter physics
+ theoretical_physicist = Agent(
+ agent_name="TheoreticalPhysicist",
+ system_prompt="""
+ You are an exceptionally brilliant theoretical condensed matter physicist with deep expertise in quantum many-body theory, phase transitions, and emergent phenomena. You possess extraordinary mathematical intuition and can derive, manipulate, and analyze complex equations with remarkable precision.
+
+ Your core competencies include:
+ - **Advanced Mathematical Modeling**: You excel at formulating and solving differential equations, partial differential equations, and integro-differential equations that describe quantum systems. You can derive equations from first principles using variational methods, path integrals, and functional analysis.
+
+ - **Quantum Field Theory**: You master the mathematical framework of quantum field theory, including Feynman diagrams, renormalization group theory, and effective field theories. You can derive and analyze equations for correlation functions, Green's functions, and response functions.
+
+ - **Statistical Mechanics**: You are expert at deriving partition functions, free energies, and thermodynamic potentials. You can formulate and solve equations for phase transitions, critical phenomena, and scaling behavior using techniques like mean-field theory, Landau-Ginzburg theory, and renormalization group methods.
+
+ - **Many-Body Physics**: You excel at deriving equations for interacting quantum systems, including Hubbard models, Heisenberg models, and BCS theory. You can analyze equations for collective excitations, quasiparticles, and topological states.
+
+ - **Analytical Techniques**: You master perturbation theory, variational methods, exact diagonalization, and other analytical techniques. You can derive equations for energy spectra, wave functions, and observables in complex quantum systems.
+
+ When presented with a physics problem, you immediately think in terms of mathematical equations and can derive the appropriate formalism from fundamental principles. You always show your mathematical work step-by-step and explain the physical meaning of each equation you write.
+ """,
+ model="claude-3-5-sonnet-20240620",
+ )
+
+ experimental_physicist = Agent(
+ agent_name="ExperimentalPhysicist",
+ system_prompt="""You are an exceptionally skilled experimental condensed matter physicist with profound expertise in materials synthesis, characterization techniques, and data analysis. You possess extraordinary analytical abilities and can derive, interpret, and validate equations that describe experimental observations.
+
+Your core competencies include:
+- **Materials Synthesis & Characterization**: You excel at designing synthesis protocols and deriving equations that describe growth kinetics, phase formation, and structural evolution. You can formulate equations for crystal growth, diffusion processes, and phase equilibria.
+
+- **Advanced Characterization Techniques**: You master the mathematical foundations of X-ray diffraction (Bragg's law, structure factors, Rietveld refinement), electron microscopy (diffraction patterns, image formation), and spectroscopy (absorption, emission, scattering cross-sections). You can derive equations for resolution limits, signal-to-noise ratios, and detection sensitivity.
+
+- **Transport Properties**: You excel at deriving and analyzing equations for electrical conductivity (Drude model, Boltzmann transport), thermal conductivity (phonon and electron contributions), and magnetic properties (Curie-Weiss law, magnetic susceptibility). You can formulate equations for Hall effect, magnetoresistance, and thermoelectric effects.
+
+- **Data Analysis & Modeling**: You possess advanced skills in fitting experimental data to theoretical models, error analysis, and statistical inference. You can derive equations for uncertainty propagation, confidence intervals, and model selection criteria.
+
+- **Experimental Design**: You excel at deriving equations for experimental sensitivity, resolution requirements, and optimization of measurement parameters. You can formulate equations for signal processing, noise reduction, and systematic error correction.
+
+When analyzing experimental data, you immediately think in terms of mathematical models and can derive equations that connect observations to underlying physical mechanisms. You always show your mathematical reasoning and explain how equations relate to experimental reality.""",
+ model="claude-3-5-sonnet-20240620",
+ )
+
+ computational_physicist = Agent(
+ agent_name="ComputationalPhysicist",
+ system_prompt="""You are an exceptionally brilliant computational condensed matter physicist with deep expertise in numerical methods, algorithm development, and high-performance computing. You possess extraordinary mathematical skills and can formulate, implement, and analyze equations that drive computational simulations.
+
+Your core competencies include:
+- **Density Functional Theory (DFT)**: You excel at deriving and implementing the Kohn-Sham equations, exchange-correlation functionals, and self-consistent field methods. You can formulate equations for electronic structure, total energies, forces, and response functions. You master the mathematical foundations of plane-wave methods, pseudopotentials, and k-point sampling.
+
+- **Quantum Monte Carlo Methods**: You are expert at deriving equations for variational Monte Carlo, diffusion Monte Carlo, and path integral Monte Carlo. You can formulate equations for importance sampling, correlation functions, and statistical estimators. You excel at deriving equations for finite-size effects, time-step errors, and population control.
+
+- **Molecular Dynamics**: You master the mathematical framework of classical and ab initio molecular dynamics, including equations of motion, thermostats, barostats, and constraint algorithms. You can derive equations for time integration schemes, energy conservation, and phase space sampling.
+
+- **Many-Body Methods**: You excel at implementing and analyzing equations for exact diagonalization, quantum chemistry methods (CI, CC, MP), and tensor network methods (DMRG, PEPS). You can derive equations for matrix elements, basis transformations, and optimization algorithms.
+
+- **High-Performance Computing**: You possess advanced skills in parallel algorithms, load balancing, and numerical optimization. You can derive equations for computational complexity, scaling behavior, and performance bottlenecks. You excel at formulating equations for parallel efficiency, communication overhead, and memory management.
+
+When developing computational methods, you think in terms of mathematical algorithms and can derive equations that translate physical problems into efficient numerical procedures. You always show your mathematical derivations and explain how equations map to computational implementations.""",
+ model="claude-3-5-sonnet-20240620",
+ )
+
+ # Create list of agents including both Agent instances and callable
+ agents = [
+ theoretical_physicist,
+ experimental_physicist,
+ computational_physicist,
+ ]
+
+ # Initialize another chat instance in interactive mode
+ interactive_chat = InteractiveGroupChat(
+ name="Interactive Condensed Matter Physics Research Team",
+ description="An interactive team of condensed matter physics experts providing comprehensive analysis of quantum materials, phase transitions, and emergent phenomena",
+ agents=agents,
+ max_loops=1,
+ output_type="all",
+ interactive=True,
+ )
+
+ try:
+ # Start the interactive session
+ print("\nStarting interactive session...")
+ # interactive_chat.run("What is the best methodology to accumulate gold and silver commodities, what is the best long term strategy to accumulate them?")
+ interactive_chat.start_interactive_session()
+ except Exception as e:
+ print(f"An error occurred in interactive mode: {e}")
diff --git a/examples/multi_agent/groupchat/random_dynamic_speaker_example.py b/examples/multi_agent/groupchat/random_dynamic_speaker_example.py
new file mode 100644
index 00000000..56c62049
--- /dev/null
+++ b/examples/multi_agent/groupchat/random_dynamic_speaker_example.py
@@ -0,0 +1,162 @@
+"""
+Medical Panel Discussion Example
+
+This example demonstrates a panel of medical specialists discussing treatment solutions
+for various diseases using InteractiveGroupChat with different speaker functions:
+- Round Robin: Doctors speak in a fixed order
+- Random: Doctors speak in random order
+- Priority: Senior doctors speak first
+- Custom: Disease-specific speaker function
+
+The panel includes specialists from different medical fields who can collaborate
+on complex medical cases and treatment plans.
+"""
+
+from swarms import Agent
+from swarms.structs.interactive_groupchat import (
+ InteractiveGroupChat,
+)
+
+
+def create_medical_panel():
+ """Create a panel of medical specialists for discussion."""
+
+ # Cardiologist - Heart and cardiovascular system specialist
+ cardiologist = Agent(
+ agent_name="cardiologist",
+ system_prompt="""You are Dr. Sarah Chen, a board-certified cardiologist with 15 years of experience.
+ You specialize in cardiovascular diseases, heart failure, arrhythmias, and interventional cardiology.
+ You have expertise in:
+ - Coronary artery disease and heart attacks
+ - Heart failure and cardiomyopathy
+ - Arrhythmias and electrophysiology
+ - Hypertension and lipid disorders
+ - Cardiac imaging and diagnostic procedures
+
+ When discussing cases, provide evidence-based treatment recommendations,
+ consider patient risk factors, and collaborate with other specialists for comprehensive care.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Oncologist - Cancer specialist
+ oncologist = Agent(
+ agent_name="oncologist",
+ system_prompt="""You are Dr. Michael Rodriguez, a medical oncologist with 12 years of experience.
+ You specialize in the diagnosis and treatment of various types of cancer.
+ You have expertise in:
+ - Solid tumors (lung, breast, colon, prostate, etc.)
+ - Hematologic malignancies (leukemia, lymphoma, multiple myeloma)
+ - Targeted therapy and immunotherapy
+ - Clinical trials and novel treatments
+ - Palliative care and symptom management
+
+ When discussing cases, consider the cancer type, stage, molecular profile,
+ patient performance status, and available treatment options including clinical trials.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Neurologist - Nervous system specialist
+ neurologist = Agent(
+ agent_name="neurologist",
+ system_prompt="""You are Dr. Emily Watson, a neurologist with 10 years of experience.
+ You specialize in disorders of the nervous system, brain, and spinal cord.
+ You have expertise in:
+ - Stroke and cerebrovascular disease
+ - Neurodegenerative disorders (Alzheimer's, Parkinson's, ALS)
+ - Multiple sclerosis and demyelinating diseases
+ - Epilepsy and seizure disorders
+ - Headache and migraine disorders
+ - Neuromuscular diseases
+
+ When discussing cases, consider neurological symptoms, imaging findings,
+ and the impact of neurological conditions on overall patient care.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Endocrinologist - Hormone and metabolism specialist
+ endocrinologist = Agent(
+ agent_name="endocrinologist",
+ system_prompt="""You are Dr. James Thompson, an endocrinologist with 8 years of experience.
+ You specialize in disorders of the endocrine system and metabolism.
+ You have expertise in:
+ - Diabetes mellitus (Type 1, Type 2, gestational)
+ - Thyroid disorders (hyperthyroidism, hypothyroidism, thyroid cancer)
+ - Adrenal disorders and Cushing's syndrome
+ - Pituitary disorders and growth hormone issues
+ - Osteoporosis and calcium metabolism
+ - Reproductive endocrinology
+
+ When discussing cases, consider metabolic factors, hormone levels,
+ and how endocrine disorders may affect other organ systems.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Infectious Disease Specialist
+ infectious_disease = Agent(
+ agent_name="infectious_disease",
+ system_prompt="""You are Dr. Lisa Park, an infectious disease specialist with 11 years of experience.
+ You specialize in the diagnosis and treatment of infectious diseases.
+ You have expertise in:
+ - Bacterial, viral, fungal, and parasitic infections
+ - Antibiotic resistance and antimicrobial stewardship
+ - HIV/AIDS and opportunistic infections
+ - Travel medicine and tropical diseases
+ - Hospital-acquired infections
+ - Emerging infectious diseases
+
+ When discussing cases, consider the infectious agent, antimicrobial susceptibility,
+ host factors, and infection control measures.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ return [
+ cardiologist,
+ oncologist,
+ neurologist,
+ endocrinologist,
+ infectious_disease,
+ ]
+
+
+def example_round_robin_panel():
+ """Example with round robin speaking order."""
+ print("=== ROUND ROBIN MEDICAL PANEL ===\n")
+
+ agents = create_medical_panel()
+
+ group_chat = InteractiveGroupChat(
+ name="Medical Panel Discussion",
+ description="A collaborative panel of medical specialists discussing complex cases",
+ agents=agents,
+ speaker_function="random-dynamic-speaker",
+ interactive=False,
+ )
+
+ print(group_chat.speaker_function)
+ print(group_chat.get_current_speaker_function())
+
+ # Case 1: Complex patient with multiple conditions
+ case1 = """CASE PRESENTATION:
+ A 65-year-old male with Type 2 diabetes, hypertension, and recent diagnosis of
+ stage 3 colon cancer presents with chest pain and shortness of breath.
+ ECG shows ST-segment elevation. Recent blood work shows elevated blood glucose (280 mg/dL)
+ and signs of infection (WBC 15,000, CRP elevated).
+
+ @cardiologist @oncologist @endocrinologist @infectious_disease please provide your
+ assessment and treatment recommendations for this complex case."""
+
+ group_chat.run(case1)
+
+
+if __name__ == "__main__":
+ example_round_robin_panel()
diff --git a/examples/multi_agent/interactive_groupchat_examples/interactive_groupchat_speaker_example.py b/examples/multi_agent/interactive_groupchat_examples/interactive_groupchat_speaker_example.py
new file mode 100644
index 00000000..e2d1b9fc
--- /dev/null
+++ b/examples/multi_agent/interactive_groupchat_examples/interactive_groupchat_speaker_example.py
@@ -0,0 +1,163 @@
+"""
+Medical Panel Discussion Example
+
+This example demonstrates a panel of medical specialists discussing treatment solutions
+for various diseases using InteractiveGroupChat with different speaker functions:
+- Round Robin: Doctors speak in a fixed order
+- Random: Doctors speak in random order
+- Priority: Senior doctors speak first
+- Custom: Disease-specific speaker function
+
+The panel includes specialists from different medical fields who can collaborate
+on complex medical cases and treatment plans.
+"""
+
+from swarms import Agent
+from swarms.structs.interactive_groupchat import (
+ InteractiveGroupChat,
+)
+
+
+def create_medical_panel():
+ """Create a panel of medical specialists for discussion."""
+
+ # Cardiologist - Heart and cardiovascular system specialist
+ cardiologist = Agent(
+ agent_name="cardiologist",
+ system_prompt="""You are Dr. Sarah Chen, a board-certified cardiologist with 15 years of experience.
+ You specialize in cardiovascular diseases, heart failure, arrhythmias, and interventional cardiology.
+ You have expertise in:
+ - Coronary artery disease and heart attacks
+ - Heart failure and cardiomyopathy
+ - Arrhythmias and electrophysiology
+ - Hypertension and lipid disorders
+ - Cardiac imaging and diagnostic procedures
+
+ When discussing cases, provide evidence-based treatment recommendations,
+ consider patient risk factors, and collaborate with other specialists for comprehensive care.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Oncologist - Cancer specialist
+ oncologist = Agent(
+ agent_name="oncologist",
+ system_prompt="""You are Dr. Michael Rodriguez, a medical oncologist with 12 years of experience.
+ You specialize in the diagnosis and treatment of various types of cancer.
+ You have expertise in:
+ - Solid tumors (lung, breast, colon, prostate, etc.)
+ - Hematologic malignancies (leukemia, lymphoma, multiple myeloma)
+ - Targeted therapy and immunotherapy
+ - Clinical trials and novel treatments
+ - Palliative care and symptom management
+
+ When discussing cases, consider the cancer type, stage, molecular profile,
+ patient performance status, and available treatment options including clinical trials.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Neurologist - Nervous system specialist
+ neurologist = Agent(
+ agent_name="neurologist",
+ system_prompt="""You are Dr. Emily Watson, a neurologist with 10 years of experience.
+ You specialize in disorders of the nervous system, brain, and spinal cord.
+ You have expertise in:
+ - Stroke and cerebrovascular disease
+ - Neurodegenerative disorders (Alzheimer's, Parkinson's, ALS)
+ - Multiple sclerosis and demyelinating diseases
+ - Epilepsy and seizure disorders
+ - Headache and migraine disorders
+ - Neuromuscular diseases
+
+ When discussing cases, consider neurological symptoms, imaging findings,
+ and the impact of neurological conditions on overall patient care.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Endocrinologist - Hormone and metabolism specialist
+ endocrinologist = Agent(
+ agent_name="endocrinologist",
+ system_prompt="""You are Dr. James Thompson, an endocrinologist with 8 years of experience.
+ You specialize in disorders of the endocrine system and metabolism.
+ You have expertise in:
+ - Diabetes mellitus (Type 1, Type 2, gestational)
+ - Thyroid disorders (hyperthyroidism, hypothyroidism, thyroid cancer)
+ - Adrenal disorders and Cushing's syndrome
+ - Pituitary disorders and growth hormone issues
+ - Osteoporosis and calcium metabolism
+ - Reproductive endocrinology
+
+ When discussing cases, consider metabolic factors, hormone levels,
+ and how endocrine disorders may affect other organ systems.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Infectious Disease Specialist
+ infectious_disease = Agent(
+ agent_name="infectious_disease",
+ system_prompt="""You are Dr. Lisa Park, an infectious disease specialist with 11 years of experience.
+ You specialize in the diagnosis and treatment of infectious diseases.
+ You have expertise in:
+ - Bacterial, viral, fungal, and parasitic infections
+ - Antibiotic resistance and antimicrobial stewardship
+ - HIV/AIDS and opportunistic infections
+ - Travel medicine and tropical diseases
+ - Hospital-acquired infections
+ - Emerging infectious diseases
+
+ When discussing cases, consider the infectious agent, antimicrobial susceptibility,
+ host factors, and infection control measures.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ return [
+ cardiologist,
+ oncologist,
+ neurologist,
+ endocrinologist,
+ infectious_disease,
+ ]
+
+
+def example_round_robin_panel():
+ """Example with round robin speaking order."""
+ print("=== ROUND ROBIN MEDICAL PANEL ===\n")
+
+ agents = create_medical_panel()
+
+ group_chat = InteractiveGroupChat(
+ name="Medical Panel Discussion",
+ description="A collaborative panel of medical specialists discussing complex cases",
+ agents=agents,
+ speaker_function="round-robin-speaker",
+ interactive=False,
+ )
+
+ print(group_chat.speaker_function)
+
+ # Case 1: Complex patient with multiple conditions
+ case1 = """CASE PRESENTATION:
+ A 65-year-old male with Type 2 diabetes, hypertension, and recent diagnosis of
+ stage 3 colon cancer presents with chest pain and shortness of breath.
+ ECG shows ST-segment elevation. Recent blood work shows elevated blood glucose (280 mg/dL)
+ and signs of infection (WBC 15,000, CRP elevated).
+
+ @cardiologist @oncologist @endocrinologist @infectious_disease please provide your
+ assessment and treatment recommendations for this complex case."""
+
+ response = group_chat.run(case1)
+ print(f"Response:\n{response}\n")
+ print("=" * 80 + "\n")
+
+
+if __name__ == "__main__":
+ example_round_robin_panel()
diff --git a/examples/multi_agent/interactive_groupchat_examples/medical_panel_example.py b/examples/multi_agent/interactive_groupchat_examples/medical_panel_example.py
new file mode 100644
index 00000000..0e31c96e
--- /dev/null
+++ b/examples/multi_agent/interactive_groupchat_examples/medical_panel_example.py
@@ -0,0 +1,162 @@
+"""
+Medical Panel Discussion Example
+
+This example demonstrates a panel of medical specialists discussing treatment solutions
+for various diseases using InteractiveGroupChat with different speaker functions:
+- Round Robin: Doctors speak in a fixed order
+- Random: Doctors speak in random order
+- Priority: Senior doctors speak first
+- Custom: Disease-specific speaker function
+
+The panel includes specialists from different medical fields who can collaborate
+on complex medical cases and treatment plans.
+"""
+
+from swarms import Agent
+from swarms.structs.interactive_groupchat import (
+ InteractiveGroupChat,
+ round_robin_speaker,
+)
+
+
+def create_medical_panel():
+ """Create a panel of medical specialists for discussion."""
+
+ # Cardiologist - Heart and cardiovascular system specialist
+ cardiologist = Agent(
+ agent_name="cardiologist",
+ system_prompt="""You are Dr. Sarah Chen, a board-certified cardiologist with 15 years of experience.
+ You specialize in cardiovascular diseases, heart failure, arrhythmias, and interventional cardiology.
+ You have expertise in:
+ - Coronary artery disease and heart attacks
+ - Heart failure and cardiomyopathy
+ - Arrhythmias and electrophysiology
+ - Hypertension and lipid disorders
+ - Cardiac imaging and diagnostic procedures
+
+ When discussing cases, provide evidence-based treatment recommendations,
+ consider patient risk factors, and collaborate with other specialists for comprehensive care.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Oncologist - Cancer specialist
+ oncologist = Agent(
+ agent_name="oncologist",
+ system_prompt="""You are Dr. Michael Rodriguez, a medical oncologist with 12 years of experience.
+ You specialize in the diagnosis and treatment of various types of cancer.
+ You have expertise in:
+ - Solid tumors (lung, breast, colon, prostate, etc.)
+ - Hematologic malignancies (leukemia, lymphoma, multiple myeloma)
+ - Targeted therapy and immunotherapy
+ - Clinical trials and novel treatments
+ - Palliative care and symptom management
+
+ When discussing cases, consider the cancer type, stage, molecular profile,
+ patient performance status, and available treatment options including clinical trials.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Neurologist - Nervous system specialist
+ neurologist = Agent(
+ agent_name="neurologist",
+ system_prompt="""You are Dr. Emily Watson, a neurologist with 10 years of experience.
+ You specialize in disorders of the nervous system, brain, and spinal cord.
+ You have expertise in:
+ - Stroke and cerebrovascular disease
+ - Neurodegenerative disorders (Alzheimer's, Parkinson's, ALS)
+ - Multiple sclerosis and demyelinating diseases
+ - Epilepsy and seizure disorders
+ - Headache and migraine disorders
+ - Neuromuscular diseases
+
+ When discussing cases, consider neurological symptoms, imaging findings,
+ and the impact of neurological conditions on overall patient care.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Endocrinologist - Hormone and metabolism specialist
+ endocrinologist = Agent(
+ agent_name="endocrinologist",
+ system_prompt="""You are Dr. James Thompson, an endocrinologist with 8 years of experience.
+ You specialize in disorders of the endocrine system and metabolism.
+ You have expertise in:
+ - Diabetes mellitus (Type 1, Type 2, gestational)
+ - Thyroid disorders (hyperthyroidism, hypothyroidism, thyroid cancer)
+ - Adrenal disorders and Cushing's syndrome
+ - Pituitary disorders and growth hormone issues
+ - Osteoporosis and calcium metabolism
+ - Reproductive endocrinology
+
+ When discussing cases, consider metabolic factors, hormone levels,
+ and how endocrine disorders may affect other organ systems.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ # Infectious Disease Specialist
+ infectious_disease = Agent(
+ agent_name="infectious_disease",
+ system_prompt="""You are Dr. Lisa Park, an infectious disease specialist with 11 years of experience.
+ You specialize in the diagnosis and treatment of infectious diseases.
+ You have expertise in:
+ - Bacterial, viral, fungal, and parasitic infections
+ - Antibiotic resistance and antimicrobial stewardship
+ - HIV/AIDS and opportunistic infections
+ - Travel medicine and tropical diseases
+ - Hospital-acquired infections
+ - Emerging infectious diseases
+
+ When discussing cases, consider the infectious agent, antimicrobial susceptibility,
+ host factors, and infection control measures.""",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ return [
+ cardiologist,
+ oncologist,
+ neurologist,
+ endocrinologist,
+ infectious_disease,
+ ]
+
+
+def example_round_robin_panel():
+ """Example with round robin speaking order."""
+ print("=== ROUND ROBIN MEDICAL PANEL ===\n")
+
+ agents = create_medical_panel()
+
+ group_chat = InteractiveGroupChat(
+ name="Medical Panel Discussion",
+ description="A collaborative panel of medical specialists discussing complex cases",
+ agents=agents,
+ speaker_function=round_robin_speaker,
+ interactive=False,
+ )
+
+ # Case 1: Complex patient with multiple conditions
+ case1 = """CASE PRESENTATION:
+ A 65-year-old male with Type 2 diabetes, hypertension, and recent diagnosis of
+ stage 3 colon cancer presents with chest pain and shortness of breath.
+ ECG shows ST-segment elevation. Recent blood work shows elevated blood glucose (280 mg/dL)
+ and signs of infection (WBC 15,000, CRP elevated).
+
+ @cardiologist @oncologist @endocrinologist @infectious_disease please provide your
+ assessment and treatment recommendations for this complex case."""
+
+ response = group_chat.run(case1)
+ print(f"Response:\n{response}\n")
+ print("=" * 80 + "\n")
+
+
+if __name__ == "__main__":
+ example_round_robin_panel()
diff --git a/examples/multi_agent/interactive_groupchat_examples/speaker_function_examples.py b/examples/multi_agent/interactive_groupchat_examples/speaker_function_examples.py
new file mode 100644
index 00000000..49a4ac40
--- /dev/null
+++ b/examples/multi_agent/interactive_groupchat_examples/speaker_function_examples.py
@@ -0,0 +1,72 @@
+"""
+InteractiveGroupChat Speaker Function Examples
+
+This example demonstrates how to use different speaker functions in the InteractiveGroupChat:
+- Round Robin: Agents speak in a fixed order, cycling through the list
+- Random: Agents speak in random order
+- Priority: Agents speak based on priority weights
+- Custom: User-defined speaker functions
+
+The example also shows how agents can mention each other using @agent_name syntax.
+"""
+
+from swarms import Agent
+from swarms.structs.interactive_groupchat import (
+ InteractiveGroupChat,
+ random_speaker,
+)
+
+
+def create_example_agents():
+ """Create example agents for demonstration."""
+
+ # Create agents with different expertise
+ analyst = Agent(
+ agent_name="analyst",
+ system_prompt="You are a data analyst. You excel at analyzing data, creating charts, and providing insights.",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ researcher = Agent(
+ agent_name="researcher",
+ system_prompt="You are a research specialist. You are great at gathering information, fact-checking, and providing detailed research.",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ writer = Agent(
+ agent_name="writer",
+ system_prompt="You are a content writer. You excel at writing clear, engaging content and summarizing information.",
+ model_name="claude-3-5-sonnet-20240620",
+ streaming_on=True,
+ print_on=True,
+ )
+
+ return [analyst, researcher, writer]
+
+
+def example_random():
+ agents = create_example_agents()
+
+ # Create group chat with random speaker function
+ group_chat = InteractiveGroupChat(
+ name="Random Team",
+ description="A team that speaks in random order",
+ agents=agents,
+ speaker_function=random_speaker,
+ interactive=False,
+ )
+
+ # Test the random behavior
+ task = "Let's create a marketing strategy for a personal healthcare ai consumer assistant app. @analyst @researcher @writer please contribute."
+
+ response = group_chat.run(task)
+ print(f"Response:\n{response}\n")
+
+
+if __name__ == "__main__":
+ # example_round_robin()
+ example_random()
diff --git a/examples/multi_agent/interactive_groupchat_examples/stream_example.py b/examples/multi_agent/interactive_groupchat_examples/stream_example.py
new file mode 100644
index 00000000..8517ff57
--- /dev/null
+++ b/examples/multi_agent/interactive_groupchat_examples/stream_example.py
@@ -0,0 +1,19 @@
+from swarms import Agent
+
+# Enable real-time streaming
+agent = Agent(
+ agent_name="StoryAgent",
+ # model_name="groq/llama-3.1-8b-instant",
+ model_name="claude-3-5-sonnet-20240620",
+ # system_prompt="",
+ streaming_on=True, # 🔥 This enables real streaming!
+ max_loops=1,
+ print_on=True,
+ output_type="all",
+)
+
+# This will now stream in real-time with beautiful UI!
+response = agent.run(
+ "Tell me a detailed story about Humanity colonizing the stars"
+)
+print(response)
diff --git a/examples/multi_agent/mar/multi_agent_router_minimal.py b/examples/multi_agent/mar/multi_agent_router_minimal.py
index 898ccef3..d72ceea6 100644
--- a/examples/multi_agent/mar/multi_agent_router_minimal.py
+++ b/examples/multi_agent/mar/multi_agent_router_minimal.py
@@ -18,8 +18,8 @@ router = SwarmRouter(
name="multi-agent-router-demo",
description="Routes tasks to the most suitable agent",
agents=agents,
- swarm_type="MultiAgentRouter"
+ swarm_type="MultiAgentRouter",
)
result = router.run("Write a function that adds two numbers")
-print(result)
\ No newline at end of file
+print(result)
diff --git a/examples/multi_agent/mixture_of_agents_example.py b/examples/multi_agent/mixture_of_agents_example.py
new file mode 100644
index 00000000..12bbf837
--- /dev/null
+++ b/examples/multi_agent/mixture_of_agents_example.py
@@ -0,0 +1,80 @@
+from swarms import Agent, MixtureOfAgents
+
+# Agent 1: Risk Metrics Calculator
+risk_metrics_agent = Agent(
+ agent_name="Risk-Metrics-Calculator",
+ agent_description="Calculates key risk metrics like VaR, Sharpe ratio, and volatility",
+ system_prompt="""You are a risk metrics specialist. Calculate and explain:
+ - Value at Risk (VaR)
+ - Sharpe ratio
+ - Volatility
+ - Maximum drawdown
+ - Beta coefficient
+
+ Provide clear, numerical results with brief explanations.""",
+ max_loops=1,
+ # model_name="gpt-4o-mini",
+ random_model_enabled=True,
+ dynamic_temperature_enabled=True,
+ output_type="str-all-except-first",
+ max_tokens=4096,
+)
+
+# Agent 2: Portfolio Risk Analyzer
+portfolio_risk_agent = Agent(
+ agent_name="Portfolio-Risk-Analyzer",
+ agent_description="Analyzes portfolio diversification and concentration risk",
+ system_prompt="""You are a portfolio risk analyst. Focus on:
+ - Portfolio diversification analysis
+ - Concentration risk assessment
+ - Correlation analysis
+ - Sector/asset allocation risk
+ - Liquidity risk evaluation
+
+ Provide actionable insights for risk reduction.""",
+ max_loops=1,
+ # model_name="gpt-4o-mini",
+ random_model_enabled=True,
+ dynamic_temperature_enabled=True,
+ output_type="str-all-except-first",
+ max_tokens=4096,
+)
+
+# Agent 3: Market Risk Monitor
+market_risk_agent = Agent(
+ agent_name="Market-Risk-Monitor",
+ agent_description="Monitors market conditions and identifies risk factors",
+ system_prompt="""You are a market risk monitor. Identify and assess:
+ - Market volatility trends
+ - Economic risk factors
+ - Geopolitical risks
+ - Interest rate risks
+ - Currency risks
+
+ Provide current risk alerts and trends.""",
+ max_loops=1,
+ # model_name="gpt-4o-mini",
+ random_model_enabled=True,
+ dynamic_temperature_enabled=True,
+ output_type="str-all-except-first",
+ max_tokens=4096,
+)
+
+
+swarm = MixtureOfAgents(
+ agents=[
+ risk_metrics_agent,
+ portfolio_risk_agent,
+ market_risk_agent,
+ ],
+ layers=1,
+ max_loops=1,
+ output_type="final",
+)
+
+
+out = swarm.run(
+ "Calculate VaR and Sharpe ratio for a portfolio with 15% annual return and 20% volatility"
+)
+
+print(out)
diff --git a/examples/multi_modal/multimodal_example.py b/examples/multi_modal/multimodal_example.py
new file mode 100644
index 00000000..19a2c996
--- /dev/null
+++ b/examples/multi_modal/multimodal_example.py
@@ -0,0 +1,87 @@
+import logging
+from swarms.structs import Agent
+from swarms.prompts.logistics import (
+ Quality_Control_Agent_Prompt,
+)
+
+# Set up debug logging
+logging.basicConfig(level=logging.DEBUG)
+
+# Image for analysis
+# factory_image="image.png" # normal image of a factory
+
+factory_image = "image2.png" # image of a burning factory
+
+
+def security_analysis(danger_level: str) -> str:
+ """
+ Analyzes the security danger level and returns an appropriate response.
+
+ Args:
+ danger_level (str): The level of danger to analyze.
+ Must be one of: "low", "medium", "high"
+
+ Returns:
+ str: A detailed security analysis based on the danger level.
+ """
+ if danger_level == "low":
+ return """SECURITY ANALYSIS - LOW DANGER LEVEL:
+ ✅ Environment appears safe and well-controlled
+ ✅ Standard security measures are adequate
+ ✅ Low risk of accidents or security breaches
+ ✅ Normal operational protocols can continue
+
+ Recommendations: Maintain current security standards and continue regular monitoring."""
+
+ elif danger_level == "medium":
+ return """SECURITY ANALYSIS - MEDIUM DANGER LEVEL:
+ ⚠️ Moderate security concerns identified
+ ⚠️ Enhanced monitoring recommended
+ ⚠️ Some security measures may need strengthening
+ ⚠️ Risk of incidents exists but manageable
+
+ Recommendations: Implement additional safety protocols, increase surveillance, and conduct safety briefings."""
+
+ elif danger_level == "high":
+ return """SECURITY ANALYSIS - HIGH DANGER LEVEL:
+ 🚨 CRITICAL SECURITY CONCERNS DETECTED
+ 🚨 Immediate action required
+ 🚨 High risk of accidents or security breaches
+ 🚨 Operations may need to be suspended
+
+ Recommendations: Immediate intervention required, evacuate if necessary, implement emergency protocols, and conduct thorough security review."""
+
+ else:
+ return f"ERROR: Invalid danger level '{danger_level}'. Must be 'low', 'medium', or 'high'."
+
+
+# Custom system prompt that includes tool usage
+custom_system_prompt = f"""
+{Quality_Control_Agent_Prompt}
+
+You have access to tools that can help you with your analysis. When you need to perform a security analysis, you MUST use the security_analysis function with an appropriate danger level (low, medium, or high) based on your observations.
+
+Always use the available tools when they are relevant to the task. If you determine there is any level of danger or security concern, call the security_analysis function with the appropriate danger level.
+"""
+
+# Quality control agent
+quality_control_agent = Agent(
+ agent_name="Quality Control Agent",
+ agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.",
+ # model_name="anthropic/claude-3-opus-20240229",
+ model_name="gpt-4o",
+ system_prompt=custom_system_prompt,
+ multi_modal=True,
+ max_loops=1,
+ output_type="str-all-except-first",
+ # tools_list_dictionary=[schema],
+ tools=[security_analysis],
+)
+
+
+response = quality_control_agent.run(
+ task="Analyze the image and then perform a security analysis. Based on what you see in the image, determine if there is a low, medium, or high danger level and call the security_analysis function with that danger level.",
+ img=factory_image,
+)
+
+# The response is already printed by the agent's pretty_print method
diff --git a/examples/tools/multii_tool_use/new_tools_examples.py b/examples/single_agent/tools/new_tools_examples.py
similarity index 96%
rename from examples/tools/multii_tool_use/new_tools_examples.py
rename to examples/single_agent/tools/new_tools_examples.py
index 86eb450b..542f6ea4 100644
--- a/examples/tools/multii_tool_use/new_tools_examples.py
+++ b/examples/single_agent/tools/new_tools_examples.py
@@ -176,15 +176,15 @@ agent = Agent(
max_loops=1,
model_name="gpt-4o-mini",
dynamic_temperature_enabled=True,
- output_type="all",
+ output_type="final",
+ tool_call_summary=True,
tools=[
get_coin_price,
- get_top_cryptocurrencies,
],
+ # output_raw_json_from_tool_call=True,
)
-print(
- agent.run(
- "What is the price of Bitcoin? what are the top 5 cryptocurrencies by market cap?"
- )
-)
+out = agent.run("What is the price of Bitcoin?")
+
+print(out)
+print(f"Output type: {type(out)}")
diff --git a/examples/single_agent/tools/swarms_tools_example.py b/examples/single_agent/tools/swarms_tools_example.py
new file mode 100644
index 00000000..9aec628f
--- /dev/null
+++ b/examples/single_agent/tools/swarms_tools_example.py
@@ -0,0 +1,20 @@
+from swarms import Agent
+from swarms.prompts.finance_agent_sys_prompt import (
+ FINANCIAL_AGENT_SYS_PROMPT,
+)
+from swarms_tools import yahoo_finance_api
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ agent_description="Personal finance advisor agent",
+ system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ max_loops=1,
+ model_name="gpt-4o-mini",
+ tools=[yahoo_finance_api],
+ dynamic_temperature_enabled=True,
+)
+
+agent.run(
+ "Fetch the data for nvidia and tesla both with the yahoo finance api"
+)
diff --git a/examples/single_agent/tools/tools_examples/swarms_tools_example.py b/examples/single_agent/tools/tools_examples/swarms_tools_example.py
deleted file mode 100644
index 9171bb30..00000000
--- a/examples/single_agent/tools/tools_examples/swarms_tools_example.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from swarms import Agent
-from swarms.prompts.finance_agent_sys_prompt import (
- FINANCIAL_AGENT_SYS_PROMPT,
-)
-from swarms_tools import (
- fetch_stock_news,
- coin_gecko_coin_api,
- fetch_htx_data,
-)
-
-# Initialize the agent
-agent = Agent(
- agent_name="Financial-Analysis-Agent",
- agent_description="Personal finance advisor agent",
- system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
- max_loops=1,
- model_name="gpt-4o",
- dynamic_temperature_enabled=True,
- user_name="swarms_corp",
- retry_attempts=3,
- context_length=8192,
- return_step_meta=False,
- output_type="str", # "json", "dict", "csv" OR "string" "yaml" and
- auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
- max_tokens=4000, # max output tokens
- saved_state_path="agent_00.json",
- interactive=False,
- tools=[fetch_stock_news, coin_gecko_coin_api, fetch_htx_data],
-)
-
-agent.run("Analyze the $swarms token on htx")
diff --git a/examples/single_agent/vision_examples/anthropic_vision_test.py b/examples/single_agent/vision/anthropic_vision_test.py
similarity index 89%
rename from examples/single_agent/vision_examples/anthropic_vision_test.py
rename to examples/single_agent/vision/anthropic_vision_test.py
index 6d24faeb..583ac9cf 100644
--- a/examples/single_agent/vision_examples/anthropic_vision_test.py
+++ b/examples/single_agent/vision/anthropic_vision_test.py
@@ -1,4 +1,4 @@
-from swarms.structs import Agent
+from swarms import Agent
from swarms.prompts.logistics import (
Quality_Control_Agent_Prompt,
)
@@ -16,6 +16,8 @@ quality_control_agent = Agent(
multi_modal=True,
max_loops=1,
output_type="str-all-except-first",
+ dynamic_temperature_enabled=True,
+ stream=True,
)
response = quality_control_agent.run(
diff --git a/examples/single_agent/vision/burning_image.jpg b/examples/single_agent/vision/burning_image.jpg
new file mode 100644
index 00000000..7d260cf3
Binary files /dev/null and b/examples/single_agent/vision/burning_image.jpg differ
diff --git a/examples/single_agent/vision_examples/image.jpg b/examples/single_agent/vision/image.jpg
similarity index 100%
rename from examples/single_agent/vision_examples/image.jpg
rename to examples/single_agent/vision/image.jpg
diff --git a/examples/single_agent/vision_examples/image_batch_example.py b/examples/single_agent/vision/image_batch_example.py
similarity index 100%
rename from examples/single_agent/vision_examples/image_batch_example.py
rename to examples/single_agent/vision/image_batch_example.py
diff --git a/examples/single_agent/vision/multiple_image_processing.py b/examples/single_agent/vision/multiple_image_processing.py
new file mode 100644
index 00000000..da67bb94
--- /dev/null
+++ b/examples/single_agent/vision/multiple_image_processing.py
@@ -0,0 +1,25 @@
+from swarms import Agent
+
+
+# Image for analysis
+factory_image = "image.jpg"
+
+# Quality control agent
+quality_control_agent = Agent(
+ agent_name="Quality Control Agent",
+ agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.",
+ model_name="claude-3-5-sonnet-20240620",
+ # system_prompt=Quality_Control_Agent_Prompt,
+ # multi_modal=True,
+ max_loops=1,
+ output_type="str-all-except-first",
+ summarize_multiple_images=True,
+)
+
+
+response = quality_control_agent.run(
+ task="Analyze our factories images and provide a detailed health report for each factory.",
+ imgs=[factory_image, "burning_image.jpg"],
+)
+
+print(response)
diff --git a/examples/single_agent/vision_examples/vision_test.py b/examples/single_agent/vision/vision_test.py
similarity index 100%
rename from examples/single_agent/vision_examples/vision_test.py
rename to examples/single_agent/vision/vision_test.py
diff --git a/vision_and_tools.py b/examples/single_agent/vision/vision_tools.py
similarity index 65%
rename from vision_and_tools.py
rename to examples/single_agent/vision/vision_tools.py
index e330a66d..f0ec102e 100644
--- a/vision_and_tools.py
+++ b/examples/single_agent/vision/vision_tools.py
@@ -8,7 +8,7 @@ from swarms.prompts.logistics import (
factory_image = "image.jpg"
-def security_analysis(danger_level: str = None) -> str:
+def security_analysis(danger_level: str) -> str:
"""
Analyzes the security danger level and returns an appropriate response.
@@ -39,8 +39,13 @@ def security_analysis(danger_level: str = None) -> str:
return "Unknown danger level"
-# schema = BaseTool().function_to_dict(security_analysis)
-# print(json.dumps(schema, indent=4))
+custom_system_prompt = f"""
+{Quality_Control_Agent_Prompt}
+
+You have access to tools that can help you with your analysis. When you need to perform a security analysis, you MUST use the security_analysis function with an appropriate danger level (low, medium, or high) based on your observations.
+
+Always use the available tools when they are relevant to the task. If you determine there is any level of danger or security concern, call the security_analysis function with the appropriate danger level.
+"""
# Quality control agent
quality_control_agent = Agent(
@@ -48,7 +53,7 @@ quality_control_agent = Agent(
agent_description="A quality control agent that analyzes images and provides a detailed report on the quality of the product in the image.",
# model_name="anthropic/claude-3-opus-20240229",
model_name="gpt-4o-mini",
- system_prompt=Quality_Control_Agent_Prompt,
+ system_prompt=custom_system_prompt,
multi_modal=True,
max_loops=1,
output_type="str-all-except-first",
@@ -58,8 +63,6 @@ quality_control_agent = Agent(
response = quality_control_agent.run(
- task="what is in the image?",
- # img=factory_image,
+ task="Analyze the image and then perform a security analysis. Based on what you see in the image, determine if there is a low, medium, or high danger level and call the security_analysis function with that danger level",
+ img=factory_image,
)
-
-print(response)
diff --git a/examples/structs/graph_workflow_basic.py b/examples/structs/graph_workflow_basic.py
index 2d31ed1f..a51bcc5f 100644
--- a/examples/structs/graph_workflow_basic.py
+++ b/examples/structs/graph_workflow_basic.py
@@ -31,9 +31,15 @@ if __name__ == "__main__":
# Build the workflow graph
wf_graph = GraphWorkflow()
- wf_graph.add_node(Node(id="agent1", type=NodeType.AGENT, agent=agent1))
- wf_graph.add_node(Node(id="agent2", type=NodeType.AGENT, agent=agent2))
- wf_graph.add_node(Node(id="task1", type=NodeType.TASK, callable=sample_task))
+ wf_graph.add_node(
+ Node(id="agent1", type=NodeType.AGENT, agent=agent1)
+ )
+ wf_graph.add_node(
+ Node(id="agent2", type=NodeType.AGENT, agent=agent2)
+ )
+ wf_graph.add_node(
+ Node(id="task1", type=NodeType.TASK, callable=sample_task)
+ )
wf_graph.add_edge(Edge(source="agent1", target="task1"))
wf_graph.add_edge(Edge(source="agent2", target="task1"))
@@ -47,4 +53,3 @@ if __name__ == "__main__":
# Execute the graph
results = wf_graph.run()
print("Execution results:", results)
-
diff --git a/agent_as_tools.py b/examples/tools/agent_as_tools.py
similarity index 100%
rename from agent_as_tools.py
rename to examples/tools/agent_as_tools.py
diff --git a/examples/ui/chat.py b/examples/ui/chat.py
new file mode 100644
index 00000000..35e6e25b
--- /dev/null
+++ b/examples/ui/chat.py
@@ -0,0 +1,11 @@
+import gradio as gr
+import ai_gradio
+
+finance_interface = gr.load(
+ name="swarms:gpt-4-turbo",
+ src=ai_gradio.registry,
+ agent_name="Stock-Analysis-Agent",
+ title="Finance Assistant",
+ description="Expert financial analysis and advice tailored to your investment needs.",
+)
+finance_interface.launch()
diff --git a/pyproject.toml b/pyproject.toml
index 4eeee107..671cbb1e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
-version = "7.8.6"
+version = "7.9.3"
description = "Swarms - TGSC"
license = "MIT"
authors = ["Kye Gomez "]
diff --git a/realtor_agent.py b/realtor_agent.py
new file mode 100644
index 00000000..a2c9700c
--- /dev/null
+++ b/realtor_agent.py
@@ -0,0 +1,155 @@
+import http.client
+import json
+import os
+from typing import List
+
+from dotenv import load_dotenv
+
+from swarms import Agent
+
+load_dotenv()
+
+
+def get_realtor_data_from_one_source(location: str):
+ """
+ Fetch rental property data from the Realtor API for a specified location.
+
+ Args:
+ location (str): The location to search for rental properties (e.g., "Menlo Park, CA")
+
+ Returns:
+ str: JSON-formatted string containing rental property data
+
+ Raises:
+ http.client.HTTPException: If the API request fails
+ json.JSONDecodeError: If the response cannot be parsed as JSON
+ """
+ conn = http.client.HTTPSConnection(
+ "realtor-search.p.rapidapi.com"
+ )
+
+ headers = {
+ "x-rapidapi-key": os.getenv("RAPID_API_KEY"),
+ "x-rapidapi-host": "realtor-search.p.rapidapi.com",
+ }
+
+ # URL encode the location parameter
+ encoded_location = location.replace(" ", "%20").replace(
+ ",", "%2C"
+ )
+ endpoint = f"/properties/search-rent?location=city%3A{encoded_location}&sortBy=best_match"
+
+ conn.request(
+ "GET",
+ endpoint,
+ headers=headers,
+ )
+
+ res = conn.getresponse()
+ data = res.read()
+
+ # return "chicken data"
+
+ # Parse and format the response
+ try:
+ json_data = json.loads(data.decode("utf-8"))
+ # Return formatted string instead of raw JSON
+ return json.dumps(json_data, indent=2)
+ except json.JSONDecodeError:
+ return "Error: Could not parse API response"
+
+
+def get_realtor_data_from_multiple_sources(
+ locations: List[str],
+) -> List[str]:
+ """
+ Fetch rental property data from multiple sources for a specified location.
+
+ Args:
+ location (List[str]): List of locations to search for rental properties (e.g., ["Menlo Park, CA", "Palo Alto, CA"])
+ """
+ output = []
+ for location in locations:
+ data = get_realtor_data_from_one_source(location)
+ output.append(data)
+ return output
+
+
+agent = Agent(
+ agent_name="Rental-Property-Specialist",
+ system_prompt="""
+ You are an expert rental property specialist with deep expertise in real estate analysis and tenant matching. Your core responsibilities include:
+1. Property Analysis & Evaluation
+ - Analyze rental property features and amenities
+ - Evaluate location benefits and drawbacks
+ - Assess property condition and maintenance needs
+ - Compare rental rates with market standards
+ - Review lease terms and conditions
+ - Identify potential red flags or issues
+
+2. Location Assessment
+ - Analyze neighborhood safety and demographics
+ - Evaluate proximity to amenities (schools, shopping, transit)
+ - Research local market trends and development plans
+ - Consider noise levels and traffic patterns
+ - Assess parking availability and restrictions
+ - Review zoning regulations and restrictions
+
+3. Financial Analysis
+ - Calculate price-to-rent ratios
+ - Analyze utility costs and included services
+ - Evaluate security deposit requirements
+ - Consider additional fees (pet rent, parking, etc.)
+ - Compare with similar properties in the area
+ - Assess potential for rent increases
+
+4. Tenant Matching
+ - Match properties to tenant requirements
+ - Consider commute distances
+ - Evaluate pet policies and restrictions
+ - Assess lease term flexibility
+ - Review application requirements
+ - Consider special accommodations needed
+
+5. Documentation & Compliance
+ - Review lease agreement terms
+ - Verify property certifications
+ - Check compliance with local regulations
+ - Assess insurance requirements
+ - Review maintenance responsibilities
+ - Document property condition
+
+When analyzing properties, always consider:
+- Value for money
+- Location quality
+- Property condition
+- Lease terms fairness
+- Safety and security
+- Maintenance and management quality
+- Future market potential
+- Tenant satisfaction factors
+
+When you receive property data:
+1. Parse and analyze the JSON data
+2. Format the output in a clear, readable way
+3. Focus on properties under $3,000
+4. Include key details like:
+ - Property name/address
+ - Price
+ - Number of beds/baths
+ - Square footage
+ - Key amenities
+ - Links to listings
+5. Sort properties by price (lowest to highest)
+
+Provide clear, objective analysis while maintaining professional standards and ethical considerations.""",
+ model_name="claude-3-sonnet-20240229",
+ max_loops=1,
+ print_on=True,
+ streaming_on=True,
+)
+
+
+agent.run(
+ f"Create a report on the best properties in Menlo Park, CA, showcase, the name, description, price, and link to the property: {get_realtor_data_from_one_source('Menlo Park, CA')}"
+)
diff --git a/sequential_workflow_example.py b/sequential_workflow_example.py
new file mode 100644
index 00000000..84a3ad49
--- /dev/null
+++ b/sequential_workflow_example.py
@@ -0,0 +1,63 @@
+from swarms import Agent, SequentialWorkflow
+
+# Initialize market research agent
+market_researcher = Agent(
+ agent_name="Market-Researcher",
+ system_prompt="""You are a market research specialist. Your tasks include:
+ 1. Analyzing market trends and patterns
+ 2. Identifying market opportunities and threats
+ 3. Evaluating competitor strategies
+ 4. Assessing customer needs and preferences
+ 5. Providing actionable market insights""",
+ model_name="claude-3-sonnet-20240229",
+ max_loops=1,
+ temperature=0.7,
+ streaming_on=True,
+)
+
+# Initialize financial analyst agent
+financial_analyst = Agent(
+ agent_name="Financial-Analyst",
+ system_prompt="""You are a financial analysis expert. Your responsibilities include:
+ 1. Analyzing financial statements
+ 2. Evaluating investment opportunities
+ 3. Assessing risk factors
+ 4. Providing financial forecasts
+ 5. Recommending financial strategies""",
+ model_name="claude-3-sonnet-20240229",
+ max_loops=1,
+ streaming_on=True,
+ temperature=0.7,
+)
+
+# Initialize technical analyst agent
+technical_analyst = Agent(
+ agent_name="Technical-Analyst",
+ system_prompt="""You are a technical analysis specialist. Your focus areas include:
+ 1. Analyzing price patterns and trends
+ 2. Evaluating technical indicators
+ 3. Identifying support and resistance levels
+ 4. Assessing market momentum
+ 5. Providing trading recommendations""",
+ model_name="claude-3-sonnet-20240229",
+ max_loops=1,
+ temperature=0.7,
+ streaming_on=True,
+)
+
+# Create list of agents
+agents = [market_researcher, financial_analyst, technical_analyst]
+
+
+router = SequentialWorkflow(
+ name="market-analysis-router",
+ agents=agents,
+ max_loops=1,
+ # output_type="all",
+)
+
+result = router.run(
+ "Analyze Tesla (TSLA) stock from market, financial, and technical perspectives"
+)
+
+print(result)
diff --git a/swarm_router.py b/swarm_router.py
new file mode 100644
index 00000000..efe3bcb5
--- /dev/null
+++ b/swarm_router.py
@@ -0,0 +1,68 @@
+from swarms import Agent, SwarmRouter
+
+# Initialize market research agent
+market_researcher = Agent(
+ agent_name="Market-Researcher",
+ system_prompt="""You are a market research specialist. Your tasks include:
+ 1. Analyzing market trends and patterns
+ 2. Identifying market opportunities and threats
+ 3. Evaluating competitor strategies
+ 4. Assessing customer needs and preferences
+ 5. Providing actionable market insights""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+)
+
+# Initialize financial analyst agent
+financial_analyst = Agent(
+ agent_name="Financial-Analyst",
+ system_prompt="""You are a financial analysis expert. Your responsibilities include:
+ 1. Analyzing financial statements
+ 2. Evaluating investment opportunities
+ 3. Assessing risk factors
+ 4. Providing financial forecasts
+ 5. Recommending financial strategies""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+)
+
+# Initialize technical analyst agent
+technical_analyst = Agent(
+ agent_name="Technical-Analyst",
+ system_prompt="""You are a technical analysis specialist. Your focus areas include:
+ 1. Analyzing price patterns and trends
+ 2. Evaluating technical indicators
+ 3. Identifying support and resistance levels
+ 4. Assessing market momentum
+ 5. Providing trading recommendations""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+)
+
+# Create list of agents
+agents = [market_researcher, financial_analyst, technical_analyst]
+
+# # Initialize the concurrent workflow
+# workflow = ConcurrentWorkflow(
+# name="market-analysis-workflow",
+# agents=agents,
+# max_loops=1,
+# )
+
+# # Run the workflow
+# result = workflow.run(
+# "Analyze Tesla (TSLA) stock from market, financial, and technical perspectives"
+# )
+router = SwarmRouter(
+ name="market-analysis-router",
+ swarm_type="ConcurrentWorkflow",
+ agents=agents,
+ max_loops=1,
+ # output_type="all",
+)
+
+result = router.run(
+ "Analyze Tesla (TSLA) stock from market, financial, and technical perspectives"
+)
+
+print(result)
diff --git a/swarms/agents/reasoning_agents.py b/swarms/agents/reasoning_agents.py
index 2c964822..5b579bb8 100644
--- a/swarms/agents/reasoning_agents.py
+++ b/swarms/agents/reasoning_agents.py
@@ -72,8 +72,13 @@ class ReasoningAgentRouter:
self.output_type = output_type
self.num_knowledge_items = num_knowledge_items
self.memory_capacity = memory_capacity
+
- # Initialize agent factory mapping dictionary
+
+
+
+ # Added: Initialize the factory mapping dictionary
+
self._initialize_agent_factories()
@@ -86,19 +91,18 @@ class ReasoningAgentRouter:
# ReasoningDuo factory method
"reasoning-duo": self._create_reasoning_duo,
"reasoning-agent": self._create_reasoning_duo,
-
- # SelfConsistencyAgent factory method
+
+ # SelfConsistencyAgent factory methods
"self-consistency": self._create_consistency_agent,
"consistency-agent": self._create_consistency_agent,
-
- # IREAgent factory method
+ # IREAgent factory methods
+
"ire": self._create_ire_agent,
"ire-agent": self._create_ire_agent,
-
# Other agent type factory methods
"AgentJudge": self._create_agent_judge,
"ReflexionAgent": self._create_reflexion_agent,
- "GKPAgent": self._create_gkp_agent
+ "GKPAgent": self._create_gkp_agent,
}
@@ -134,7 +138,7 @@ class ReasoningAgentRouter:
system_prompt=self.system_prompt,
output_type=self.output_type,
)
-
+
def _create_consistency_agent(self):
"""Create an agent instance for the SelfConsistencyAgent type"""
return SelfConsistencyAgent(
@@ -146,7 +150,7 @@ class ReasoningAgentRouter:
num_samples=self.num_samples,
output_type=self.output_type,
)
-
+
def _create_ire_agent(self):
"""Create an agent instance for the IREAgent type"""
return IREAgent(
@@ -158,7 +162,7 @@ class ReasoningAgentRouter:
max_iterations=self.num_samples,
output_type=self.output_type,
)
-
+
def _create_agent_judge(self):
"""Create an agent instance for the AgentJudge type"""
return AgentJudge(
@@ -167,7 +171,7 @@ class ReasoningAgentRouter:
system_prompt=self.system_prompt,
max_loops=self.max_loops,
)
-
+
def _create_reflexion_agent(self):
"""Create an agent instance for the ReflexionAgent type"""
return ReflexionAgent(
@@ -176,7 +180,7 @@ class ReasoningAgentRouter:
model_name=self.model_name,
max_loops=self.max_loops,
)
-
+
def _create_gkp_agent(self):
"""Create an agent instance for the GKPAgent type"""
return GKPAgent(
@@ -195,6 +199,7 @@ class ReasoningAgentRouter:
Returns:
The selected reasoning swarm instance.
"""
+
# Generate cache key
cache_key = self._get_cache_key()
@@ -202,6 +207,7 @@ class ReasoningAgentRouter:
if cache_key in self.__class__._agent_cache:
return self.__class__._agent_cache[cache_key]
+
try:
# Use the factory method to create a new instance
agent = self.agent_factories[self.swarm_type]()
@@ -249,10 +255,13 @@ class ReasoningAgentRouter:
return results
+
@classmethod
def clear_cache(cls):
"""
Clear the agent instance cache.
Use this when you need to free memory or force the creation of new instances.
"""
- cls._agent_cache.clear()
\ No newline at end of file
+ cls._agent_cache.clear()
+
+
diff --git a/swarms/cli/onboarding_process.py b/swarms/cli/onboarding_process.py
index e279d9e3..8085c688 100644
--- a/swarms/cli/onboarding_process.py
+++ b/swarms/cli/onboarding_process.py
@@ -7,7 +7,6 @@ from swarms.utils.loguru_logger import initialize_logger
from swarms.telemetry.main import (
- capture_system_data,
log_agent_data,
)
@@ -34,7 +33,6 @@ class OnboardingProcess:
cache_save_path (str): The path where user data is cached for reliability.
"""
self.user_data: Dict[str, str] = {}
- self.system_data: Dict[str, str] = capture_system_data()
self.auto_save_path = auto_save_path
self.cache_save_path = cache_save_path
self.load_existing_data()
@@ -85,7 +83,7 @@ class OnboardingProcess:
while attempt < retry_attempts:
try:
- combined_data = {**self.user_data, **self.system_data}
+ combined_data = {**self.user_data}
log_agent_data(combined_data)
return # Exit the function if saving was successful
except Exception as e:
diff --git a/swarms/prompts/collaborative_prompts.py b/swarms/prompts/collaborative_prompts.py
new file mode 100644
index 00000000..4a04245b
--- /dev/null
+++ b/swarms/prompts/collaborative_prompts.py
@@ -0,0 +1,177 @@
+def get_multi_agent_collaboration_prompt_one(agents_in_swarm: str):
+ MULTI_AGENT_COLLABORATION_PROMPT_ONE = f"""
+ You are all operating within a multi-agent collaborative system. Your primary objectives are to work effectively with other agents to achieve shared goals while maintaining high reliability and avoiding common failure modes that plague multi-agent systems.
+
+ {agents_in_swarm}
+
+ ## Fundamental Collaboration Principles
+
+ ### 1. Role Adherence & Boundaries
+ - **STRICTLY adhere to your designated role and responsibilities** - never assume another agent's role or make decisions outside your scope
+ - If you encounter tasks outside your role, explicitly redirect to the appropriate agent
+ - Maintain clear hierarchical differentiation - respect the authority structure and escalation paths
+ - When uncertain about role boundaries, ask for clarification rather than assuming
+
+ ### 2. Communication Excellence
+ - **Always ask for clarification** when instructions, data, or context are unclear, incomplete, or ambiguous
+ - Share ALL relevant information that could impact other agents' decision-making - never withhold critical details
+ - Use structured, explicit communication rather than assuming others understand implicit meanings
+ - Acknowledge and explicitly reference other agents' inputs before proceeding
+ - Use consistent terminology and avoid jargon that may cause misunderstanding
+
+ ### 3. Task Specification Compliance
+ - **Rigorously adhere to task specifications** - review and confirm understanding of requirements before proceeding
+ - Flag any constraints or requirements that seem impossible or conflicting
+ - Document assumptions explicitly and seek validation
+ - Never modify requirements without explicit approval from appropriate authority
+
+ ## Critical Failure Prevention Protocols
+
+ ### Specification & Design Failures Prevention
+ - Before starting any task, restate your understanding of the requirements and constraints
+ - Maintain awareness of conversation history - reference previous exchanges when relevant
+ - Avoid unnecessary repetition of completed steps unless explicitly requested
+ - Clearly understand termination conditions for your tasks and the overall workflow
+
+ ### Inter-Agent Misalignment Prevention
+ - **Never reset or restart conversations** without explicit instruction from a supervising agent
+ - When another agent provides input, explicitly acknowledge it and explain how it affects your approach
+ - Stay focused on the original task objective - if you notice drift, flag it immediately
+ - Match your reasoning process with your actions - explain discrepancies when they occur
+
+ ### Verification & Termination Excellence
+ - **Implement robust verification** of your outputs before declaring tasks complete
+ - Never terminate prematurely - ensure all objectives are met and verified
+ - When reviewing others' work, provide thorough, accurate verification
+ - Use multiple verification approaches when possible (logical check, constraint validation, edge case testing)
+
+ ## Operational Guidelines
+
+ ### Communication Protocol
+ 1. **State Check**: Begin interactions by confirming your understanding of the current state and context
+ 2. **Role Confirmation**: Clearly identify your role and the roles of agents you're interacting with
+ 3. **Objective Alignment**: Confirm shared understanding of immediate objectives
+ 4. **Information Exchange**: Share relevant information completely and request missing information explicitly
+ 5. **Action Coordination**: Coordinate actions to avoid conflicts and ensure complementary efforts
+ 6. **Verification**: Verify outcomes and seek validation when appropriate
+ 7. **Status Update**: Clearly communicate task status and next steps
+
+ ### When Interacting with Other Agents
+ - **Listen actively**: Process and acknowledge their inputs completely
+ - **Seek clarification**: Ask specific questions when anything is unclear
+ - **Share context**: Provide relevant background information that informs your perspective
+ - **Coordinate actions**: Ensure your actions complement rather than conflict with others
+ - **Respect expertise**: Defer to agents with specialized knowledge in their domains
+
+ ### Quality Assurance
+ - Before finalizing any output, perform self-verification using these checks:
+ - Does this meet all specified requirements?
+ - Are there any edge cases or constraints I haven't considered?
+ - Is this consistent with information provided by other agents?
+ - Have I clearly communicated my reasoning and any assumptions?
+
+ ### Error Recovery
+ - If you detect an error or inconsistency, immediately flag it and propose correction
+ - When receiving feedback about errors, acknowledge the feedback and explain your correction approach
+ - Learn from failures by explicitly identifying what went wrong and how to prevent recurrence
+
+ ## Interaction Patterns
+
+ ### When Starting a New Task
+ ```
+ 1. Acknowledge the task assignment
+ 2. Confirm role boundaries and responsibilities
+ 3. Identify required inputs and information sources
+ 4. State assumptions and seek validation
+ 5. Outline approach and request feedback
+ 6. Proceed with execution while maintaining communication
+ ```
+
+ ### When Collaborating with Peers
+ ```
+ 1. Establish communication channel and protocols
+ 2. Share relevant context and constraints
+ 3. Coordinate approaches to avoid duplication or conflicts
+ 4. Maintain regular status updates
+ 5. Verify integrated outputs collectively
+ ```
+
+ ### When Escalating Issues
+ ```
+ 1. Clearly describe the issue and its implications
+ 2. Provide relevant context and attempted solutions
+ 3. Specify what type of resolution or guidance is needed
+ 4. Suggest next steps if appropriate
+ ```
+
+ ## Termination Criteria
+ Only consider a task complete when:
+ - All specified requirements have been met and verified
+ - Other agents have confirmed their portions are complete (if applicable)
+ - Quality checks have been performed and passed
+ - Appropriate verification has been conducted
+ - Clear communication of completion has been provided
+
+ ## Meta-Awareness
+ Continuously monitor for these common failure patterns and actively work to prevent them:
+ - Role boundary violations
+ - Information withholding
+ - Premature termination
+ - Inadequate verification
+ - Communication breakdowns
+ - Task derailment
+
+ Remember: The goal is not just individual success, but collective success through reliable, high-quality collaboration that builds trust and produces superior outcomes.
+ """
+
+ return MULTI_AGENT_COLLABORATION_PROMPT_ONE
+
+
+MULTI_AGENT_COLLABORATION_PROMPT_TWO = """
+# Compact Multi-Agent Collaboration Prompt
+
+## Core Directives
+
+You are an AI agent in a multi-agent system. Follow these essential collaboration protocols:
+
+### Role & Boundaries
+- **Stay in your designated role** - never assume another agent's responsibilities
+- When tasks fall outside your scope, redirect to the appropriate agent
+- Respect hierarchy and authority structures
+
+### Communication Requirements
+- **Always ask for clarification** when anything is unclear or incomplete
+- **Share all relevant information** - never withhold details that could impact others
+- **Acknowledge other agents' inputs** explicitly before proceeding
+- Use clear, structured communication
+
+### Task Execution
+- **Confirm task requirements** before starting - restate your understanding
+- **Adhere strictly to specifications** - flag conflicts or impossibilities
+- **Maintain conversation context** - reference previous exchanges when relevant
+- **Verify your work thoroughly** before declaring completion
+
+### Collaboration Protocol
+1. **State Check**: Confirm current context and your role
+2. **Clarify**: Ask specific questions about unclear elements
+3. **Coordinate**: Align actions with other agents to avoid conflicts
+4. **Verify**: Check outputs meet requirements and constraints
+5. **Communicate**: Clearly report status and next steps
+
+### Termination Criteria
+Only mark tasks complete when:
+- All requirements verified as met
+- Quality checks passed
+- Other agents confirm their portions (if applicable)
+- Clear completion communication provided
+
+### Failure Prevention
+Actively watch for and prevent:
+- Role boundary violations
+- Information withholding
+- Premature task termination
+- Inadequate verification
+- Task objective drift
+
+**Remember**: Success requires reliable collaboration, not just individual performance.
+"""
diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py
index d47f6f67..e40d22ce 100644
--- a/swarms/structs/__init__.py
+++ b/swarms/structs/__init__.py
@@ -83,7 +83,13 @@ from swarms.structs.swarming_architectures import (
staircase_swarm,
star_swarm,
)
-from swarms.structs.interactive_groupchat import InteractiveGroupChat
+from swarms.structs.interactive_groupchat import (
+ InteractiveGroupChat,
+ round_robin_speaker,
+ random_speaker,
+ priority_speaker,
+ random_dynamic_speaker,
+)
__all__ = [
"Agent",
@@ -156,4 +162,8 @@ __all__ = [
"find_agent_by_name",
"run_agent",
"InteractiveGroupChat",
+ "round_robin_speaker",
+ "random_speaker",
+ "priority_speaker",
+ "random_dynamic_speaker",
]
diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py
index dce3c2c2..1b30644c 100644
--- a/swarms/structs/agent.py
+++ b/swarms/structs/agent.py
@@ -5,6 +5,7 @@ import os
import random
import threading
import time
+import traceback
import uuid
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
@@ -56,7 +57,6 @@ from swarms.tools.base_tool import BaseTool
from swarms.tools.py_func_to_openai_func_str import (
convert_multiple_functions_to_openai_function_schema,
)
-from swarms.utils.any_to_str import any_to_str
from swarms.utils.data_to_text import data_to_text
from swarms.utils.file_processing import create_file_in_folder
from swarms.utils.formatter import formatter
@@ -72,8 +72,10 @@ from swarms.prompts.max_loop_prompt import generate_reasoning_prompt
from swarms.prompts.safety_prompt import SAFETY_PROMPT
from swarms.structs.ma_utils import set_random_models_for_agents
from swarms.tools.mcp_client_call import (
+ execute_multiple_tools_on_multiple_mcp_servers_sync,
execute_tool_call_simple,
get_mcp_tools_sync,
+ get_tools_for_multiple_mcp_servers,
)
from swarms.schemas.mcp_schemas import (
MCPConnection,
@@ -81,10 +83,10 @@ from swarms.schemas.mcp_schemas import (
from swarms.utils.index import (
exists,
format_data_structure,
- format_dict_to_string,
)
from swarms.schemas.conversation_schema import ConversationSchema
from swarms.utils.output_types import OutputType
+from swarms.utils.retry_func import retry_function
def stop_when_repeats(response: str) -> bool:
@@ -153,7 +155,12 @@ class AgentLLMInitializationError(AgentError):
pass
-# [FEAT][AGENT]
+class AgentToolExecutionError(AgentError):
+ """Exception raised when the agent fails to execute a tool. Check the tool's configuration and availability."""
+
+ pass
+
+
class Agent:
"""
Agent is the backbone to connect LLMs with tools and long term memory. Agent also provides the ability to
@@ -287,6 +294,11 @@ class Agent:
>>> print(response)
>>> # Generate a report on the financials.
+ >>> # Real-time streaming example
+ >>> agent = Agent(llm=llm, max_loops=1, streaming_on=True)
+ >>> response = agent.run("Tell me a long story.") # Will stream in real-time
+ >>> print(response) # Final complete response
+
"""
def __init__(
@@ -403,7 +415,7 @@ class Agent:
llm_args: dict = None,
load_state_path: str = None,
role: agent_roles = "worker",
- no_print: bool = False,
+ print_on: bool = True,
tools_list_dictionary: Optional[List[Dict[str, Any]]] = None,
mcp_url: Optional[Union[str, MCPConnection]] = None,
mcp_urls: List[str] = None,
@@ -417,6 +429,10 @@ class Agent:
llm_base_url: Optional[str] = None,
llm_api_key: Optional[str] = None,
rag_config: Optional[RAGConfig] = None,
+ tool_call_summary: bool = True,
+ output_raw_json_from_tool_call: bool = False,
+ summarize_multiple_images: bool = False,
+ tool_retry_attempts: int = 3,
*args,
**kwargs,
):
@@ -445,7 +461,10 @@ class Agent:
self.system_prompt = system_prompt
self.agent_name = agent_name
self.agent_description = agent_description
- self.saved_state_path = f"{self.agent_name}_{generate_api_key(prefix='agent-')}_state.json"
+ # self.saved_state_path = f"{self.agent_name}_{generate_api_key(prefix='agent-')}_state.json"
+ self.saved_state_path = (
+ f"{generate_api_key(prefix='agent-')}_state.json"
+ )
self.autosave = autosave
self.response_filters = []
self.self_healing_enabled = self_healing_enabled
@@ -534,7 +553,7 @@ class Agent:
self.llm_args = llm_args
self.load_state_path = load_state_path
self.role = role
- self.no_print = no_print
+ self.print_on = print_on
self.tools_list_dictionary = tools_list_dictionary
self.mcp_url = mcp_url
self.mcp_urls = mcp_urls
@@ -548,6 +567,12 @@ class Agent:
self.llm_base_url = llm_base_url
self.llm_api_key = llm_api_key
self.rag_config = rag_config
+ self.tool_call_summary = tool_call_summary
+ self.output_raw_json_from_tool_call = (
+ output_raw_json_from_tool_call
+ )
+ self.summarize_multiple_images = summarize_multiple_images
+ self.tool_retry_attempts = tool_retry_attempts
# self.short_memory = self.short_memory_init()
@@ -592,6 +617,11 @@ class Agent:
if self.long_term_memory is not None:
self.rag_handler = self.rag_setup_handling()
+ if self.dashboard is True:
+ self.print_dashboard()
+
+ self.reliability_check()
+
def rag_setup_handling(self):
return AgentRAGHandler(
long_term_memory=self.long_term_memory,
@@ -615,16 +645,20 @@ class Agent:
)
self.short_memory.add(
- role=f"{self.agent_name}",
- content=f"Tools available: {format_data_structure(self.tools_list_dictionary)}",
+ role=self.agent_name,
+ content=self.tools_list_dictionary,
)
def short_memory_init(self):
- if (
- self.agent_name is not None
- or self.agent_description is not None
- ):
- prompt = f"\n Your Name: {self.agent_name} \n\n Your Description: {self.agent_description} \n\n {self.system_prompt}"
+ prompt = ""
+
+ # Add agent name, description, and instructions to the prompt
+ if self.agent_name is not None:
+ prompt += f"\n Name: {self.agent_name}"
+ elif self.agent_description is not None:
+ prompt += f"\n Description: {self.agent_description}"
+ elif self.system_prompt is not None:
+ prompt += f"\n Instructions: {self.system_prompt}"
else:
prompt = self.system_prompt
@@ -685,6 +719,10 @@ class Agent:
if exists(self.tools) and len(self.tools) >= 2:
parallel_tool_calls = True
+ elif exists(self.mcp_url) or exists(self.mcp_urls):
+ parallel_tool_calls = True
+ elif exists(self.mcp_config):
+ parallel_tool_calls = True
else:
parallel_tool_calls = False
@@ -707,7 +745,7 @@ class Agent:
parallel_tool_calls=parallel_tool_calls,
)
- elif self.mcp_url is not None:
+ elif exists(self.mcp_url) or exists(self.mcp_urls):
self.llm = LiteLLM(
**common_args,
tools_list_dictionary=self.add_mcp_tools_to_memory(),
@@ -745,15 +783,28 @@ class Agent:
tools = get_mcp_tools_sync(server_path=self.mcp_url)
elif exists(self.mcp_config):
tools = get_mcp_tools_sync(connection=self.mcp_config)
- logger.info(f"Tools: {tools}")
+ # logger.info(f"Tools: {tools}")
+ elif exists(self.mcp_urls):
+ tools = get_tools_for_multiple_mcp_servers(
+ urls=self.mcp_urls,
+ output_type="str",
+ )
+ # print(f"Tools: {tools} for {self.mcp_urls}")
else:
raise AgentMCPConnectionError(
"mcp_url must be either a string URL or MCPConnection object"
)
- self.pretty_print(
- f"✨ [SYSTEM] Successfully integrated {len(tools)} MCP tools into agent: {self.agent_name} | Status: ONLINE | Time: {time.strftime('%H:%M:%S')} ✨",
- loop_count=0,
- )
+
+ if (
+ exists(self.mcp_url)
+ or exists(self.mcp_urls)
+ or exists(self.mcp_config)
+ ):
+ if self.print_on is True:
+ self.pretty_print(
+ f"✨ [SYSTEM] Successfully integrated {len(tools)} MCP tools into agent: {self.agent_name} | Status: ONLINE | Time: {time.strftime('%H:%M:%S')} ✨",
+ loop_count=0,
+ )
return tools
except AgentMCPConnectionError as e:
@@ -779,6 +830,29 @@ class Agent:
return json.loads(self.tools_list_dictionary)
+ def check_model_supports_utilities(self, img: str = None) -> bool:
+ """
+ Check if the current model supports vision capabilities.
+
+ Args:
+ img (str, optional): Image input to check vision support for. Defaults to None.
+
+ Returns:
+ bool: True if model supports vision and image is provided, False otherwise.
+ """
+ from litellm.utils import supports_vision
+
+ # Only check vision support if an image is provided
+ if img is not None:
+ out = supports_vision(self.model_name)
+ if not out:
+ raise ValueError(
+ f"Model {self.model_name} does not support vision capabilities. Please use a vision-enabled model."
+ )
+ return out
+
+ return False
+
def check_if_no_prompt_then_autogenerate(self, task: str = None):
"""
Checks if auto_generate_prompt is enabled and generates a prompt by combining agent name, description and system prompt if available.
@@ -832,26 +906,6 @@ class Agent:
self.feedback.append(feedback)
logging.info(f"Feedback received: {feedback}")
- def agent_initialization(self):
- try:
- logger.info(
- f"Initializing Autonomous Agent {self.agent_name}..."
- )
- self.check_parameters()
- logger.info(
- f"{self.agent_name} Initialized Successfully."
- )
- logger.info(
- f"Autonomous Agent {self.agent_name} Activated, all systems operational. Executing task..."
- )
-
- if self.dashboard is True:
- self.print_dashboard()
-
- except ValueError as e:
- logger.info(f"Error initializing agent: {e}")
- raise e
-
def _check_stopping_condition(self, response: str) -> bool:
"""Check if the stopping condition is met."""
try:
@@ -883,59 +937,44 @@ class Agent:
)
def print_dashboard(self):
- """Print dashboard"""
- formatter.print_panel(
- f"Initializing Agent: {self.agent_name}"
- )
-
- data = self.to_dict()
-
- # Beautify the data
- # data = json.dumps(data, indent=4)
- # json_data = json.dumps(data, indent=4)
-
+ tools_activated = True if self.tools is not None else False
+ mcp_activated = True if self.mcp_url is not None else False
formatter.print_panel(
f"""
- Agent Dashboard
- --------------------------------------------
-
- Agent {self.agent_name} is initializing for {self.max_loops} with the following configuration:
- ----------------------------------------
-
- Agent Configuration:
- Configuration: {data}
-
- ----------------------------------------
- """,
+
+ 🤖 Agent {self.agent_name} Dashboard 🚀
+ ════════════════════════════════════════════════════════════
+
+ 🎯 Agent {self.agent_name} Status: ONLINE & OPERATIONAL
+ ────────────────────────────────────────────────────────────
+
+ 📋 Agent Identity:
+ • 🏷️ Name: {self.agent_name}
+ • 📝 Description: {self.agent_description}
+
+ ⚙️ Technical Specifications:
+ • 🤖 Model: {self.model_name}
+ • 🔄 Internal Loops: {self.max_loops}
+ • 🎯 Max Tokens: {self.max_tokens}
+ • 🌡️ Dynamic Temperature: {self.dynamic_temperature_enabled}
+
+ 🔧 System Modules:
+ • 🛠️ Tools Activated: {tools_activated}
+ • 🔗 MCP Activated: {mcp_activated}
+
+ ════════════════════════════════════════════════════════════
+ 🚀 Ready for Tasks 🚀
+
+ """,
+ title=f"Agent {self.agent_name} Dashboard",
)
- # Check parameters
- def check_parameters(self):
- if self.llm is None:
- raise ValueError(
- "Language model is not provided. Choose a model from the available models in swarm_models or create a class with a run(task: str) method and or a __call__ method."
- )
-
- if self.max_loops is None or self.max_loops == 0:
- raise ValueError("Max loops is not provided")
-
- if self.max_tokens == 0 or self.max_tokens is None:
- raise ValueError("Max tokens is not provided")
-
- if self.context_length == 0 or self.context_length is None:
- raise ValueError("Context length is not provided")
-
# Main function
def _run(
self,
task: Optional[Union[str, Any]] = None,
img: Optional[str] = None,
- speech: Optional[str] = None,
- video: Optional[str] = None,
- is_last: Optional[bool] = False,
print_task: Optional[bool] = False,
- generate_speech: Optional[bool] = False,
- correct_answer: Optional[str] = None,
*args,
**kwargs,
) -> Any:
@@ -960,9 +999,12 @@ class Agent:
self.check_if_no_prompt_then_autogenerate(task)
+ if img is not None:
+ self.check_model_supports_utilities(img=img)
+
self.short_memory.add(role=self.user_name, content=task)
- if self.plan_enabled:
+ if self.plan_enabled is True:
self.plan(task)
# Set the loop count
@@ -983,8 +1025,8 @@ class Agent:
# Print the request
if print_task is True:
formatter.print_panel(
- f"\n User: {task}",
- f"Task Request for {self.agent_name}",
+ content=f"\n User: {task}",
+ title=f"Task Request for {self.agent_name}",
)
while (
@@ -1029,65 +1071,73 @@ class Agent:
)
self.memory_query(task_prompt)
- # # Generate response using LLM
- # response_args = (
- # (task_prompt, *args)
- # if img is None
- # else (task_prompt, img, *args)
- # )
-
- # # Call the LLM
- # response = self.call_llm(
- # *response_args, **kwargs
- # )
+ if img is not None:
+ response = self.call_llm(
+ task=task_prompt,
+ img=img,
+ current_loop=loop_count,
+ *args,
+ **kwargs,
+ )
+ else:
+ response = self.call_llm(
+ task=task_prompt,
+ current_loop=loop_count,
+ *args,
+ **kwargs,
+ )
- response = self.call_llm(
- task=task_prompt, img=img, *args, **kwargs
- )
+ # If streaming is enabled, then don't print the response
+ # Parse the response from the agent with the output type
if exists(self.tools_list_dictionary):
if isinstance(response, BaseModel):
response = response.model_dump()
- # # Convert to a str if the response is not a str
- # if self.mcp_url is None or self.tools is None:
+ # Parse the response from the agent with the output type
response = self.parse_llm_output(response)
self.short_memory.add(
role=self.agent_name,
- content=format_dict_to_string(response),
+ content=response,
)
# Print
- self.pretty_print(response, loop_count)
-
- # # Output Cleaner
- # self.output_cleaner_op(response)
-
- # Check and execute tools
+ if self.print_on is True:
+ if isinstance(response, list):
+ self.pretty_print(
+ f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n {format_data_structure(response)} ",
+ loop_count,
+ )
+ elif self.streaming_on is True:
+ pass
+ else:
+ self.pretty_print(
+ response, loop_count
+ )
+
+ # Check and execute callable tools
if exists(self.tools):
-
- self.execute_tools(
- response=response,
- loop_count=loop_count,
- )
-
- if exists(self.mcp_url):
- self.mcp_tool_handling(
+ self.tool_execution_retry(
response, loop_count
)
- if exists(self.mcp_url) and exists(
- self.tools
+ # Handle MCP tools
+ if (
+ exists(self.mcp_url)
+ or exists(self.mcp_config)
+ or exists(self.mcp_urls)
):
- self.mcp_tool_handling(
- response, loop_count
- )
-
- self.execute_tools(
- response=response,
- loop_count=loop_count,
- )
+ # Only handle MCP tools if response is not None
+ if response is not None:
+ self.mcp_tool_handling(
+ response=response,
+ current_loop=loop_count,
+ )
+ else:
+ logger.warning(
+ f"LLM returned None response in loop {loop_count}, skipping MCP tool handling"
+ )
self.sentiment_and_evaluator(response)
@@ -1101,8 +1151,7 @@ class Agent:
self.save()
logger.error(
- f"Attempt {attempt+1}: Error generating"
- f" response: {e}"
+ f"Attempt {attempt+1}/{self.retry_attempts}: Error generating response in loop {loop_count} for agent '{self.agent_name}': {str(e)} | "
)
attempt += 1
@@ -1124,13 +1173,19 @@ class Agent:
self.stopping_condition is not None
and self._check_stopping_condition(response)
):
- logger.info("Stopping condition met.")
+ logger.info(
+ f"Agent '{self.agent_name}' stopping condition met. "
+ f"Loop: {loop_count}, Response length: {len(str(response)) if response else 0}"
+ )
break
elif (
self.stopping_func is not None
and self.stopping_func(response)
):
- logger.info("Stopping function met.")
+ logger.info(
+ f"Agent '{self.agent_name}' stopping function condition met. "
+ f"Loop: {loop_count}, Response length: {len(str(response)) if response else 0}"
+ )
break
if self.interactive:
@@ -1142,7 +1197,10 @@ class Agent:
user_input.lower()
== self.custom_exit_command.lower()
):
- print("Exiting as per user request.")
+ self.pretty_print(
+ "Exiting as per user request.",
+ loop_count=loop_count,
+ )
break
self.short_memory.add(
@@ -1174,14 +1232,27 @@ class Agent:
self._handle_run_error(error)
def __handle_run_error(self, error: any):
+ import traceback
+
log_agent_data(self.to_dict())
if self.autosave is True:
self.save()
- logger.info(
- f"Error detected running your agent {self.agent_name} \n Error {error} \n Optimize your input parameters and or add an issue on the swarms github and contact our team on discord for support ;) "
+ # Get detailed error information
+ error_type = type(error).__name__
+ error_message = str(error)
+ traceback_info = traceback.format_exc()
+
+ logger.error(
+ f"Error detected running your agent {self.agent_name}\n"
+ f"Error Type: {error_type}\n"
+ f"Error Message: {error_message}\n"
+ f"Traceback:\n{traceback_info}\n"
+ f"Agent State: {self.to_dict()}\n"
+ f"Optimize your input parameters and or add an issue on the swarms github and contact our team on discord for support ;)"
)
+
raise error
def _handle_run_error(self, error: any):
@@ -1243,12 +1314,6 @@ class Agent:
self,
task: Optional[str] = None,
img: Optional[str] = None,
- is_last: bool = False,
- device: str = "cpu", # gpu
- device_id: int = 1,
- all_cores: bool = True,
- do_not_use_cluster_ops: bool = True,
- all_gpus: bool = False,
*args,
**kwargs,
) -> Any:
@@ -1257,10 +1322,6 @@ class Agent:
Args:
task (Optional[str]): The task to be performed. Defaults to None.
img (Optional[str]): The image to be processed. Defaults to None.
- is_last (bool): Indicates if this is the last task. Defaults to False.
- device (str): The device to use for execution. Defaults to "cpu".
- device_id (int): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
- all_cores (bool): If True, uses all available CPU cores. Defaults to True.
"""
try:
return self.run(
@@ -1275,33 +1336,12 @@ class Agent:
def receive_message(
self, agent_name: str, task: str, *args, **kwargs
):
- return self.run(
- task=f"From {agent_name}: {task}", *args, **kwargs
+ improved_prompt = (
+ f"You have received a message from agent '{agent_name}':\n\n"
+ f'"{task}"\n\n'
+ "Please process this message and respond appropriately."
)
-
- def dict_to_csv(self, data: dict) -> str:
- """
- Convert a dictionary to a CSV string.
-
- Args:
- data (dict): The dictionary to convert.
-
- Returns:
- str: The CSV string representation of the dictionary.
- """
- import csv
- import io
-
- output = io.StringIO()
- writer = csv.writer(output)
-
- # Write header
- writer.writerow(data.keys())
-
- # Write values
- writer.writerow(data.values())
-
- return output.getvalue()
+ return self.run(task=improved_prompt, *args, **kwargs)
# def parse_and_execute_tools(self, response: str, *args, **kwargs):
# max_retries = 3 # Maximum number of retries
@@ -1351,26 +1391,49 @@ class Agent:
def plan(self, task: str, *args, **kwargs) -> None:
"""
- Plan the task
+ Create a strategic plan for executing the given task.
+
+ This method generates a step-by-step plan by combining the conversation
+ history, planning prompt, and current task. The plan is then added to
+ the agent's short-term memory for reference during execution.
Args:
- task (str): The task to plan
+ task (str): The task to create a plan for
+ *args: Additional positional arguments passed to the LLM
+ **kwargs: Additional keyword arguments passed to the LLM
+
+ Returns:
+ None: The plan is stored in memory rather than returned
+
+ Raises:
+ Exception: If planning fails, the original exception is re-raised
"""
try:
+ # Get the current conversation history
+ history = self.short_memory.get_str()
+
+ plan_prompt = f"Create a comprehensive step-by-step plan to complete the following task: \n\n {task}"
+
+ # Construct the planning prompt by combining history, planning prompt, and task
if exists(self.planning_prompt):
- # Join the plan and the task
- planning_prompt = f"{self.planning_prompt} {task}"
- plan = self.llm(planning_prompt, *args, **kwargs)
- logger.info(f"Plan: {plan}")
+ planning_prompt = f"{history}\n\n{self.planning_prompt}\n\nTask: {task}"
+ else:
+ planning_prompt = (
+ f"{history}\n\n{plan_prompt}\n\nTask: {task}"
+ )
- # Add the plan to the memory
- self.short_memory.add(
- role=self.agent_name, content=str(plan)
- )
+ # Generate the plan using the LLM
+ plan = self.llm.run(task=planning_prompt, *args, **kwargs)
+
+ # Store the generated plan in short-term memory
+ self.short_memory.add(role=self.agent_name, content=plan)
return None
+
except Exception as error:
- logger.error(f"Error planning task: {error}")
+ logger.error(
+ f"Failed to create plan for task '{task}': {error}"
+ )
raise error
async def run_concurrent(self, task: str, *args, **kwargs):
@@ -1453,6 +1516,55 @@ class Agent:
logger.error(f"Error running batched tasks: {error}")
raise
+ def reliability_check(self):
+ from litellm.utils import (
+ supports_function_calling,
+ get_max_tokens,
+ )
+ from litellm import model_list
+
+ if self.system_prompt is None:
+ logger.warning(
+ "The system prompt is not set. Please set a system prompt for the agent to improve reliability."
+ )
+
+ if self.agent_name is None:
+ logger.warning(
+ "The agent name is not set. Please set an agent name to improve reliability."
+ )
+
+ if self.max_loops is None or self.max_loops == 0:
+ raise AgentInitializationError(
+ "Max loops is not provided or is set to 0. Please set max loops to 1 or more."
+ )
+
+ if self.max_tokens is None or self.max_tokens == 0:
+ self.max_tokens = get_max_tokens(self.model_name)
+
+ if self.context_length is None or self.context_length == 0:
+ raise AgentInitializationError(
+ "Context length is not provided. Please set a valid context length."
+ )
+
+ if self.tools_list_dictionary is not None:
+ if not supports_function_calling(self.model_name):
+ raise AgentInitializationError(
+ f"The model '{self.model_name}' does not support function calling. Please use a model that supports function calling."
+ )
+
+ try:
+ if self.max_tokens > get_max_tokens(self.model_name):
+ raise AgentInitializationError(
+ f"Max tokens is set to {self.max_tokens}, but the model '{self.model_name}' only supports {get_max_tokens(self.model_name)} tokens. Please set max tokens to {get_max_tokens(self.model_name)} or less."
+ )
+ except Exception:
+ pass
+
+ if self.model_name not in model_list:
+ logger.warning(
+ f"The model '{self.model_name}' is not supported. Please use a supported model, or override the model name with the 'llm' parameter, which should be a class with a 'run(task: str)' method or a '__call__' method."
+ )
+
def save(self, file_path: str = None) -> None:
"""
Save the agent state to a file using SafeStateManager with atomic writing
@@ -2390,7 +2502,12 @@ class Agent:
return None
def call_llm(
- self, task: str, img: Optional[str] = None, *args, **kwargs
+ self,
+ task: str,
+ img: Optional[str] = None,
+ current_loop: int = 0,
+ *args,
+ **kwargs,
) -> str:
"""
Calls the appropriate method on the `llm` object based on the given task.
@@ -2411,15 +2528,86 @@ class Agent:
ValueError: If task is empty.
"""
+ # Filter out is_last from kwargs if present
+ if "is_last" in kwargs:
+ del kwargs["is_last"]
+
try:
- if img is not None:
- out = self.llm.run(
- task=task, img=img, *args, **kwargs
- )
+ # Set streaming parameter in LLM if streaming is enabled
+ if self.streaming_on and hasattr(self.llm, "stream"):
+ original_stream = self.llm.stream
+ self.llm.stream = True
+
+ if img is not None:
+ streaming_response = self.llm.run(
+ task=task, img=img, *args, **kwargs
+ )
+ else:
+ streaming_response = self.llm.run(
+ task=task, *args, **kwargs
+ )
+
+ # If we get a streaming response, handle it with the new streaming panel
+ if hasattr(
+ streaming_response, "__iter__"
+ ) and not isinstance(streaming_response, str):
+ # Check print_on parameter for different streaming behaviors
+ if self.print_on is False:
+ # Silent streaming - no printing, just collect chunks
+ chunks = []
+ for chunk in streaming_response:
+ if (
+ hasattr(chunk, "choices")
+ and chunk.choices[0].delta.content
+ ):
+ content = chunk.choices[
+ 0
+ ].delta.content
+ chunks.append(content)
+ complete_response = "".join(chunks)
+ else:
+ # Collect chunks for conversation saving
+ collected_chunks = []
+
+ def on_chunk_received(chunk: str):
+ """Callback to collect chunks as they arrive"""
+ collected_chunks.append(chunk)
+ # Optional: Save each chunk to conversation in real-time
+ # This creates a more detailed conversation history
+ if self.verbose:
+ logger.debug(
+ f"Streaming chunk received: {chunk[:50]}..."
+ )
+
+ # Use the streaming panel to display and collect the response
+ complete_response = formatter.print_streaming_panel(
+ streaming_response,
+ title=f"🤖 Agent: {self.agent_name} Loops: {current_loop}",
+ style=None, # Use random color like non-streaming approach
+ collect_chunks=True,
+ on_chunk_callback=on_chunk_received,
+ )
+
+ # Restore original stream setting
+ self.llm.stream = original_stream
+
+ # Return the complete response for further processing
+ return complete_response
+ else:
+ # Restore original stream setting
+ self.llm.stream = original_stream
+ return streaming_response
else:
- out = self.llm.run(task=task, *args, **kwargs)
+ # Non-streaming call
+ if img is not None:
+ out = self.llm.run(
+ task=task, img=img, *args, **kwargs
+ )
+ else:
+ out = self.llm.run(task=task, *args, **kwargs)
+
+ return out
- return out
except AgentLLMError as e:
logger.error(
f"Error calling LLM: {e}. Task: {task}, Args: {args}, Kwargs: {kwargs}"
@@ -2445,7 +2633,8 @@ class Agent:
self,
task: Optional[Union[str, Any]] = None,
img: Optional[str] = None,
- scheduled_run_date: Optional[datetime] = None,
+ imgs: Optional[List[str]] = None,
+ correct_answer: Optional[str] = None,
*args,
**kwargs,
) -> Any:
@@ -2459,11 +2648,7 @@ class Agent:
Args:
task (Optional[str], optional): The task to be executed. Defaults to None.
img (Optional[str], optional): The image to be processed. Defaults to None.
- device (str, optional): The device to use for execution. Defaults to "cpu".
- device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
- all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True.
- scheduled_run_date (Optional[datetime], optional): The date and time to schedule the task. Defaults to None.
- do_not_use_cluster_ops (bool, optional): If True, does not use cluster ops. Defaults to False.
+ imgs (Optional[List[str]], optional): The list of images to be processed. Defaults to None.
*args: Additional positional arguments to be passed to the execution method.
**kwargs: Additional keyword arguments to be passed to the execution method.
@@ -2476,21 +2661,28 @@ class Agent:
"""
if not isinstance(task, str):
- task = any_to_str(task)
-
- if scheduled_run_date:
- while datetime.now() < scheduled_run_date:
- time.sleep(
- 1
- ) # Sleep for a short period to avoid busy waiting
+ task = format_data_structure(task)
try:
- output = self._run(
- task=task,
- img=img,
- *args,
- **kwargs,
- )
+ if exists(imgs):
+ output = self.run_multiple_images(
+ task=task, imgs=imgs, *args, **kwargs
+ )
+ elif exists(correct_answer):
+ output = self.continuous_run_with_answer(
+ task=task,
+ img=img,
+ correct_answer=correct_answer,
+ *args,
+ **kwargs,
+ )
+ else:
+ output = self._run(
+ task=task,
+ img=img,
+ *args,
+ **kwargs,
+ )
return output
@@ -2630,21 +2822,23 @@ class Agent:
return self.role
def pretty_print(self, response: str, loop_count: int):
- if self.no_print is False:
- if self.streaming_on is True:
- # self.stream_response(response)
- formatter.print_panel_token_by_token(
- f"{self.agent_name}: {response}",
- title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]",
- )
- elif self.no_print is True:
- pass
- else:
- # logger.info(f"Response: {response}")
- formatter.print_panel(
- f"{self.agent_name}: {response}",
- f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
- )
+ # if self.print_on is False:
+ # if self.streaming_on is True:
+ # # Skip printing here since real streaming is handled in call_llm
+ # # This avoids double printing when streaming_on=True
+ # pass
+ # elif self.print_on is False:
+ # pass
+ # else:
+ # # logger.info(f"Response: {response}")
+ # formatter.print_panel(
+ # response,
+ # f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
+ # )
+ formatter.print_panel(
+ response,
+ f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
+ )
def parse_llm_output(self, response: Any):
"""Parse and standardize the output from the LLM.
@@ -2670,7 +2864,7 @@ class Agent:
) # Convert other dicts to string
elif isinstance(response, BaseModel):
- out = response.model_dump()
+ response = response.model_dump()
# Handle List[BaseModel] responses
elif (
@@ -2680,14 +2874,9 @@ class Agent:
):
return [item.model_dump() for item in response]
- elif isinstance(response, list):
- out = format_data_structure(response)
- else:
- out = str(response)
-
- return out
+ return response
- except Exception as e:
+ except AgentChatCompletionResponse as e:
logger.error(f"Error parsing LLM output: {e}")
raise ValueError(
f"Failed to parse LLM output: {type(response)}"
@@ -2744,17 +2933,30 @@ class Agent:
connection=self.mcp_config,
)
)
+ elif exists(self.mcp_urls):
+ tool_response = execute_multiple_tools_on_multiple_mcp_servers_sync(
+ responses=response,
+ urls=self.mcp_urls,
+ output_type="json",
+ )
+ # tool_response = format_data_structure(tool_response)
+
+ # print(f"Multiple MCP Tool Response: {tool_response}")
else:
raise AgentMCPConnectionError(
"mcp_url must be either a string URL or MCPConnection object"
)
# Get the text content from the tool response
- text_content = (
- tool_response.content[0].text
- if tool_response.content
- else str(tool_response)
- )
+ # execute_tool_call_simple returns a string directly, not an object with content attribute
+ text_content = f"MCP Tool Response: \n\n {json.dumps(tool_response, indent=2)}"
+
+ if self.print_on is True:
+ formatter.print_panel(
+ content=text_content,
+ title="MCP Tool Response: 🛠️",
+ style="green",
+ )
# Add to the memory
self.short_memory.add(
@@ -2776,7 +2978,8 @@ class Agent:
# Fallback: provide a default summary
summary = "I successfully executed the MCP tool and retrieved the information above."
- self.pretty_print(summary, loop_count=current_loop)
+ if self.print_on is True:
+ self.pretty_print(summary, loop_count=current_loop)
# Add to the memory
self.short_memory.add(
@@ -2792,7 +2995,7 @@ class Agent:
temperature=self.temperature,
max_tokens=self.max_tokens,
system_prompt=self.system_prompt,
- stream=self.streaming_on,
+ stream=False, # Always disable streaming for tool summaries
tools_list_dictionary=None,
parallel_tool_calls=False,
base_url=self.llm_base_url,
@@ -2800,48 +3003,247 @@ class Agent:
)
def execute_tools(self, response: any, loop_count: int):
+ # Handle None response gracefully
+ if response is None:
+ logger.warning(
+ f"Cannot execute tools with None response in loop {loop_count}. "
+ "This may indicate the LLM did not return a valid response."
+ )
+ return
- output = (
- self.tool_struct.execute_function_calls_from_api_response(
+ try:
+ output = self.tool_struct.execute_function_calls_from_api_response(
+ response
+ )
+ except Exception as e:
+ # Retry the tool call
+ output = self.tool_struct.execute_function_calls_from_api_response(
response
)
- )
+
+ if output is None:
+ logger.error(f"Error executing tools: {e}")
+ raise e
self.short_memory.add(
role="Tool Executor",
content=format_data_structure(output),
)
- self.pretty_print(
- f"{format_data_structure(output)}",
- loop_count,
- )
+ if self.print_on is True:
+ self.pretty_print(
+ f"Tool Executed Successfully [{time.strftime('%H:%M:%S')}]",
+ loop_count,
+ )
# Now run the LLM again without tools - create a temporary LLM instance
# instead of modifying the cached one
# Create a temporary LLM instance without tools for the follow-up call
- temp_llm = self.temp_llm_instance_for_tool_summary()
+ if self.tool_call_summary is True:
+ temp_llm = self.temp_llm_instance_for_tool_summary()
+
+ tool_response = temp_llm.run(
+ f"""
+ Please analyze and summarize the following tool execution output in a clear and concise way.
+ Focus on the key information and insights that would be most relevant to the user's original request.
+ If there are any errors or issues, highlight them prominently.
+
+ Tool Output:
+ {output}
+ """
+ )
- tool_response = temp_llm.run(
- f"""
- Please analyze and summarize the following tool execution output in a clear and concise way.
- Focus on the key information and insights that would be most relevant to the user's original request.
- If there are any errors or issues, highlight them prominently.
-
- Tool Output:
+ self.short_memory.add(
+ role=self.agent_name,
+ content=tool_response,
+ )
+
+ if self.print_on is True:
+ self.pretty_print(
+ tool_response,
+ loop_count,
+ )
+
+ def list_output_types(self):
+ return OutputType
+
+ def run_multiple_images(
+ self, task: str, imgs: List[str], *args, **kwargs
+ ):
+ """
+ Run the agent with multiple images using concurrent processing.
+
+ Args:
+ task (str): The task to be performed on each image.
+ imgs (List[str]): List of image paths or URLs to process.
+ *args: Additional positional arguments to pass to the agent's run method.
+ **kwargs: Additional keyword arguments to pass to the agent's run method.
+
+ Returns:
+ List[Any]: A list of outputs generated for each image in the same order as the input images.
+
+ Examples:
+ >>> agent = Agent()
+ >>> outputs = agent.run_multiple_images(
+ ... task="Describe what you see in this image",
+ ... imgs=["image1.jpg", "image2.png", "image3.jpeg"]
+ ... )
+ >>> print(f"Processed {len(outputs)} images")
+ Processed 3 images
+
+ Raises:
+ Exception: If an error occurs while processing any of the images.
+ """
+ # Calculate number of workers as 95% of available CPU cores
+ cpu_count = os.cpu_count()
+ max_workers = max(1, int(cpu_count * 0.95))
+
+ # Use ThreadPoolExecutor for concurrent processing
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
+ # Submit all image processing tasks
+ future_to_img = {
+ executor.submit(
+ self.run, task=task, img=img, *args, **kwargs
+ ): img
+ for img in imgs
+ }
+
+ # Collect results in order
+ outputs = []
+ for future in future_to_img:
+ try:
+ output = future.result()
+ outputs.append(output)
+ except Exception as e:
+ logger.error(f"Error processing image: {e}")
+ outputs.append(
+ None
+ ) # or raise the exception based on your preference
+
+ # Combine the outputs into a single string if summarization is enabled
+ if self.summarize_multiple_images is True:
+ output = "\n".join(outputs)
+
+ prompt = f"""
+ You have already analyzed {len(outputs)} images and provided detailed descriptions for each one.
+ Now, based on your previous analysis of these images, create a comprehensive report that:
+
+ 1. Synthesizes the key findings across all images
+ 2. Identifies common themes, patterns, or relationships between the images
+ 3. Provides an overall summary that captures the most important insights
+ 4. Highlights any notable differences or contrasts between the images
+
+ Here are your previous analyses of the images:
{output}
+
+ Please create a well-structured report that brings together your insights from all {len(outputs)} images.
"""
- )
- self.short_memory.add(
- role=self.agent_name,
- content=tool_response,
- )
+ outputs = self.run(task=prompt, *args, **kwargs)
+
+ return outputs
+
+ def continuous_run_with_answer(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ correct_answer: str = None,
+ max_attempts: int = 10,
+ ):
+ """
+ Run the agent with the task until the correct answer is provided.
+
+ Args:
+ task (str): The task to be performed
+ correct_answer (str): The correct answer that must be found in the response
+ max_attempts (int): Maximum number of attempts before giving up (default: 10)
+
+ Returns:
+ str: The response containing the correct answer
+
+ Raises:
+ Exception: If max_attempts is reached without finding the correct answer
+ """
+ attempts = 0
+
+ while attempts < max_attempts:
+ attempts += 1
+
+ if self.verbose:
+ logger.info(
+ f"Attempt {attempts}/{max_attempts} to find correct answer"
+ )
+
+ response = self._run(task=task, img=img)
+
+ # Check if the correct answer is in the response (case-insensitive)
+ if correct_answer.lower() in response.lower():
+ if self.verbose:
+ logger.info(
+ f"Correct answer found on attempt {attempts}"
+ )
+ return response
+ else:
+ # Add feedback to help guide the agent
+ feedback = "Your previous response was incorrect. Think carefully about the question and ensure your response directly addresses what was asked."
+ self.short_memory.add(role="User", content=feedback)
+
+ if self.verbose:
+ logger.info(
+ f"Correct answer not found. Expected: '{correct_answer}'"
+ )
- self.pretty_print(
- f"{tool_response}",
- loop_count,
+ # If we reach here, we've exceeded max_attempts
+ raise Exception(
+ f"Failed to find correct answer '{correct_answer}' after {max_attempts} attempts"
)
- def list_output_types(self):
- return OutputType
+ def tool_execution_retry(self, response: any, loop_count: int):
+ """
+ Execute tools with retry logic for handling failures.
+
+ This method attempts to execute tools based on the LLM response. If the response
+ is None, it logs a warning and skips execution. If an exception occurs during
+ tool execution, it logs the error with full traceback and retries the operation
+ using the configured retry attempts.
+
+ Args:
+ response (any): The response from the LLM that may contain tool calls to execute.
+ Can be None if the LLM failed to provide a valid response.
+ loop_count (int): The current iteration loop number for logging and debugging purposes.
+
+ Returns:
+ None
+
+ Raises:
+ Exception: Re-raises any exception that occurs during tool execution after
+ all retry attempts have been exhausted.
+
+ Note:
+ - Uses self.tool_retry_attempts for the maximum number of retry attempts
+ - Logs detailed error information including agent name and loop count
+ - Skips execution gracefully if response is None
+ """
+ try:
+ if response is not None:
+ self.execute_tools(
+ response=response,
+ loop_count=loop_count,
+ )
+ else:
+ logger.warning(
+ f"Agent '{self.agent_name}' received None response from LLM in loop {loop_count}. "
+ f"This may indicate an issue with the model or prompt. Skipping tool execution."
+ )
+ except Exception as e:
+ logger.error(
+ f"Agent '{self.agent_name}' encountered error during tool execution in loop {loop_count}: {str(e)}. "
+ f"Full traceback: {traceback.format_exc()}. "
+ f"Attempting to retry tool execution with 3 attempts"
+ )
+ retry_function(
+ self.execute_tools,
+ response=response,
+ loop_count=loop_count,
+ max_retries=self.tool_retry_attempts,
+ )
diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py
index c6a653ae..a3abe1eb 100644
--- a/swarms/structs/concurrent_workflow.py
+++ b/swarms/structs/concurrent_workflow.py
@@ -1,13 +1,10 @@
+import concurrent.futures
import os
-import time
-from concurrent.futures import ThreadPoolExecutor
-from functools import lru_cache
-from typing import Any, Callable, Dict, List, Optional, Union
+from typing import Callable, List, Optional, Union
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.conversation import Conversation
-from swarms.utils.formatter import formatter
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
@@ -35,9 +32,7 @@ class ConcurrentWorkflow(BaseSwarm):
return_str_on (bool): Flag indicating whether to return the output as a string. Defaults to False.
auto_generate_prompts (bool): Flag indicating whether to auto-generate prompts for agents. Defaults to False.
return_entire_history (bool): Flag indicating whether to return the entire conversation history. Defaults to False.
- cache_size (int): The size of the cache. Defaults to 100.
- max_retries (int): The maximum number of retry attempts. Defaults to 3.
- retry_delay (float): The delay between retry attempts in seconds. Defaults to 1.0.
+
Raises:
ValueError: If the list of agents is empty or if the description is empty.
@@ -50,13 +45,7 @@ class ConcurrentWorkflow(BaseSwarm):
auto_save (bool): Flag indicating whether to automatically save the metadata.
output_type (str): The type of output format.
max_loops (int): The maximum number of loops for each agent.
- return_str_on (bool): Flag indicating whether to return the output as a string.
auto_generate_prompts (bool): Flag indicating whether to auto-generate prompts for agents.
- return_entire_history (bool): Flag indicating whether to return the entire conversation history.
- cache_size (int): The size of the cache.
- max_retries (int): The maximum number of retry attempts.
- retry_delay (float): The delay between retry attempts in seconds.
- _cache (dict): The cache for storing agent outputs.
"""
def __init__(
@@ -68,12 +57,7 @@ class ConcurrentWorkflow(BaseSwarm):
auto_save: bool = True,
output_type: str = "dict-all-except-first",
max_loops: int = 1,
- return_str_on: bool = False,
auto_generate_prompts: bool = False,
- return_entire_history: bool = False,
- cache_size: int = 100,
- max_retries: int = 3,
- retry_delay: float = 1.0,
*args,
**kwargs,
):
@@ -90,63 +74,31 @@ class ConcurrentWorkflow(BaseSwarm):
self.metadata_output_path = metadata_output_path
self.auto_save = auto_save
self.max_loops = max_loops
- self.return_str_on = return_str_on
self.auto_generate_prompts = auto_generate_prompts
- self.max_workers = os.cpu_count()
self.output_type = output_type
- self.return_entire_history = return_entire_history
- self.tasks = [] # Initialize tasks list
- self.cache_size = cache_size
- self.max_retries = max_retries
- self.retry_delay = retry_delay
- self._cache = {}
self.reliability_check()
self.conversation = Conversation()
def reliability_check(self):
try:
- formatter.print_panel(
- content=f"\n 🏷️ Name: {self.name}\n 📝 Description: {self.description}\n 🤖 Agents: {len(self.agents)}\n 🔄 Max Loops: {self.max_loops}\n ",
- title="⚙️ Concurrent Workflow Settings",
- style="bold blue",
- )
- formatter.print_panel(
- content="🔍 Starting reliability checks",
- title="🔒 Reliability Checks",
- style="bold blue",
- )
-
- if self.name is None:
- logger.error("❌ A name is required for the swarm")
+ if self.agents is None:
raise ValueError(
- "❌ A name is required for the swarm"
+ "ConcurrentWorkflow: No agents provided"
)
- if not self.agents or len(self.agents) <= 1:
- logger.error(
- "❌ The list of agents must not be empty."
- )
+ if len(self.agents) == 0:
raise ValueError(
- "❌ The list of agents must not be empty."
+ "ConcurrentWorkflow: No agents provided"
)
- if not self.description:
- logger.error("❌ A description is required.")
- raise ValueError("❌ A description is required.")
-
- formatter.print_panel(
- content="✅ Reliability checks completed successfully",
- title="🎉 Reliability Checks",
- style="bold green",
- )
-
- except ValueError as e:
- logger.error(f"❌ Reliability check failed: {e}")
- raise
+ if len(self.agents) == 1:
+ logger.warning(
+ "ConcurrentWorkflow: Only one agent provided. With ConcurrentWorkflow, you should use at least 2+ agents."
+ )
except Exception as e:
logger.error(
- f"💥 An unexpected error occurred during reliability checks: {e}"
+ f"ConcurrentWorkflow: Reliability check failed: {e}"
)
raise
@@ -163,162 +115,84 @@ class ConcurrentWorkflow(BaseSwarm):
for agent in self.agents:
agent.auto_generate_prompt = True
- @lru_cache(maxsize=100)
- def _cached_run(self, task: str, agent_id: int) -> Any:
- """Cached version of agent execution to avoid redundant computations"""
- return self.agents[agent_id].run(task=task)
-
- def _validate_input(self, task: str) -> bool:
- """Validate input task"""
- if not isinstance(task, str):
- raise ValueError("Task must be a string")
- if not task.strip():
- raise ValueError("Task cannot be empty")
- return True
-
- def _run_with_retry(
- self, agent: Agent, task: str, img: str = None
- ) -> Any:
- """Run agent with retry mechanism"""
- for attempt in range(self.max_retries):
- try:
- output = agent.run(task=task, img=img)
- self.conversation.add(agent.agent_name, output)
- return output
- except Exception as e:
- if attempt == self.max_retries - 1:
- logger.error(
- f"Error running agent {agent.agent_name} after {self.max_retries} attempts: {e}"
- )
- raise
- logger.warning(
- f"Attempt {attempt + 1} failed for agent {agent.agent_name}: {e}"
- )
- time.sleep(
- self.retry_delay * (attempt + 1)
- ) # Exponential backoff
-
- def _process_agent(
- self, agent: Agent, task: str, img: str = None
- ) -> Any:
+ def run(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
+ ):
"""
- Process a single agent with caching and error handling.
+ Executes all agents in the workflow concurrently on the given task.
Args:
- agent: The agent to process
- task: Task to execute
- img: Optional image input
+ task (str): The task to be executed by all agents.
+ img (Optional[str]): Optional image path for agents that support image input.
+ imgs (Optional[List[str]]): Optional list of image paths for agents that support multiple image inputs.
Returns:
- The agent's output
- """
- try:
- # Fast path - check cache first
- cache_key = f"{task}_{agent.agent_name}"
- if cache_key in self._cache:
- output = self._cache[cache_key]
- else:
- # Slow path - run agent and update cache
- output = self._run_with_retry(agent, task, img)
-
- if len(self._cache) >= self.cache_size:
- self._cache.pop(next(iter(self._cache)))
+ The formatted output based on the configured output_type.
- self._cache[cache_key] = output
-
- return output
- except Exception as e:
- logger.error(
- f"Error running agent {agent.agent_name}: {e}"
- )
- raise
-
- def _run(
- self, task: str, img: str = None, *args, **kwargs
- ) -> Union[Dict[str, Any], str]:
- """
- Enhanced run method with parallel execution.
+ Example:
+ >>> workflow = ConcurrentWorkflow(agents=[agent1, agent2])
+ >>> result = workflow.run("Analyze this financial data")
+ >>> print(result)
"""
- # Fast validation
- self._validate_input(task)
- self.conversation.add("User", task)
-
- try:
- # Parallel execution with optimized thread pool
- with ThreadPoolExecutor(
- max_workers=self.max_workers
- ) as executor:
- futures = [
- executor.submit(
- self._process_agent, agent, task, img
- )
- for agent in self.agents
- ]
- # Wait for all futures to complete
- for future in futures:
- future.result()
-
- except Exception as e:
- logger.error(f"An error occurred during execution: {e}")
- raise e
+ self.conversation.add(role="User", content=task)
+
+ # Use 95% of available CPU cores for optimal performance
+ max_workers = int(os.cpu_count() * 0.95)
+
+ # Run agents concurrently using ThreadPoolExecutor
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=max_workers
+ ) as executor:
+ # Submit all agent tasks and store with their index
+ future_to_agent = {
+ executor.submit(
+ agent.run, task=task, img=img, imgs=imgs
+ ): agent
+ for agent in self.agents
+ }
+
+ # Collect results and add to conversation in completion order
+ for future in concurrent.futures.as_completed(
+ future_to_agent
+ ):
+ agent = future_to_agent[future]
+ output = future.result()
+ self.conversation.add(role=agent.name, content=output)
return history_output_formatter(
- self.conversation,
+ conversation=self.conversation,
type=self.output_type,
)
- def run(
+ def batch_run(
self,
- task: Optional[str] = None,
+ tasks: List[str],
img: Optional[str] = None,
- *args,
- **kwargs,
- ) -> Any:
+ imgs: Optional[List[str]] = None,
+ ):
"""
- Executes the agent's run method with parallel execution.
+ Executes the workflow on multiple tasks sequentially.
Args:
- task (Optional[str], optional): The task to be executed. Defaults to None.
- img (Optional[str], optional): The image to be processed. Defaults to None.
- *args: Additional positional arguments to be passed to the execution method.
- **kwargs: Additional keyword arguments to be passed to the execution method.
+ tasks (List[str]): List of tasks to be executed by all agents.
+ img (Optional[str]): Optional image path for agents that support image input.
+ imgs (Optional[List[str]]): Optional list of image paths for agents that support multiple image inputs.
Returns:
- Any: The result of the execution.
-
- Raises:
- ValueError: If task validation fails.
- Exception: If any other error occurs during execution.
- """
- if task is not None:
- self.tasks.append(task)
-
- try:
- outputs = self._run(task, img, *args, **kwargs)
- return outputs
- except Exception as e:
- logger.error(f"An error occurred during execution: {e}")
- raise e
+ List of results, one for each task.
- def run_batched(self, tasks: List[str]) -> Any:
- """
- Enhanced batched execution
+ Example:
+ >>> workflow = ConcurrentWorkflow(agents=[agent1, agent2])
+ >>> tasks = ["Task 1", "Task 2", "Task 3"]
+ >>> results = workflow.batch_run(tasks)
+ >>> print(len(results)) # 3
"""
- if not tasks:
- raise ValueError("Tasks list cannot be empty")
-
- return [self.run(task) for task in tasks]
-
- def clear_cache(self):
- """Clear the task cache"""
- self._cache.clear()
-
- def get_cache_stats(self) -> Dict[str, int]:
- """Get cache statistics"""
- return {
- "cache_size": len(self._cache),
- "max_cache_size": self.cache_size,
- }
+ return [
+ self.run(task=task, img=img, imgs=imgs) for task in tasks
+ ]
# if __name__ == "__main__":
diff --git a/swarms/structs/conversation.py b/swarms/structs/conversation.py
index 3b4052a1..82493f38 100644
--- a/swarms/structs/conversation.py
+++ b/swarms/structs/conversation.py
@@ -221,27 +221,6 @@ class Conversation(BaseStructure):
):
super().__init__()
- # Support both 'provider' and 'backend' parameters for backwards compatibility
- # 'backend' takes precedence if both are provided
- self.backend = backend or provider
- self.backend_instance = None
-
- # Validate backend
- valid_backends = [
- "in-memory",
- "mem0",
- "supabase",
- "redis",
- "sqlite",
- "duckdb",
- "pulsar",
- ]
- if self.backend not in valid_backends:
- raise ValueError(
- f"Invalid backend: '{self.backend}'. "
- f"Valid backends are: {', '.join(valid_backends)}"
- )
-
# Initialize all attributes first
self.id = id
self.name = name or id
@@ -275,6 +254,27 @@ class Conversation(BaseStructure):
self.provider = provider # Keep for backwards compatibility
self.conversations_dir = conversations_dir
+ # Support both 'provider' and 'backend' parameters for backwards compatibility
+ # 'backend' takes precedence if both are provided
+ self.backend = backend or provider
+ self.backend_instance = None
+
+ # Validate backend
+ valid_backends = [
+ "in-memory",
+ "mem0",
+ "supabase",
+ "redis",
+ "sqlite",
+ "duckdb",
+ "pulsar",
+ ]
+ if self.backend not in valid_backends:
+ raise ValueError(
+ f"Invalid backend: '{self.backend}'. "
+ f"Valid backends are: {', '.join(valid_backends)}"
+ )
+
# Initialize backend if using persistent storage
if self.backend in [
"supabase",
@@ -484,8 +484,7 @@ class Conversation(BaseStructure):
self,
role: str,
content: Union[str, dict, list, Any],
- *args,
- **kwargs,
+ category: Optional[str] = None,
):
"""Add a message to the conversation history.
@@ -505,6 +504,9 @@ class Conversation(BaseStructure):
if self.message_id_on:
message["message_id"] = str(uuid.uuid4())
+ if category:
+ message["category"] = category
+
# Add message to conversation history
self.conversation_history.append(message)
@@ -520,6 +522,79 @@ class Conversation(BaseStructure):
f"Failed to autosave conversation: {str(e)}"
)
+ def export_and_count_categories(
+ self, tokenizer_model_name: Optional[str] = "gpt-4.1-mini"
+ ) -> Dict[str, int]:
+ """Export all messages with category 'input' and 'output' and count their tokens.
+
+ This method searches through the conversation history and:
+ 1. Extracts all messages marked with category 'input' or 'output'
+ 2. Concatenates the content of each category
+ 3. Counts tokens for each category using the specified tokenizer model
+
+ Args:
+ tokenizer_model_name (str): Name of the model to use for tokenization
+
+ Returns:
+ Dict[str, int]: A dictionary containing:
+ - input_tokens: Number of tokens in input messages
+ - output_tokens: Number of tokens in output messages
+ - total_tokens: Total tokens across both categories
+ """
+ try:
+ # Extract input and output messages
+ input_messages = []
+ output_messages = []
+
+ for message in self.conversation_history:
+ # Get message content and ensure it's a string
+ content = message.get("content", "")
+ if not isinstance(content, str):
+ content = str(content)
+
+ # Sort messages by category
+ category = message.get("category", "")
+ if category == "input":
+ input_messages.append(content)
+ elif category == "output":
+ output_messages.append(content)
+
+ # Join messages with spaces
+ all_input_text = " ".join(input_messages)
+ all_output_text = " ".join(output_messages)
+
+ print(all_input_text)
+ print(all_output_text)
+
+ # Count tokens only if there is text
+ input_tokens = (
+ count_tokens(all_input_text, tokenizer_model_name)
+ if all_input_text.strip()
+ else 0
+ )
+ output_tokens = (
+ count_tokens(all_output_text, tokenizer_model_name)
+ if all_output_text.strip()
+ else 0
+ )
+ total_tokens = input_tokens + output_tokens
+
+ return {
+ "input_tokens": input_tokens,
+ "output_tokens": output_tokens,
+ "total_tokens": total_tokens,
+ }
+
+ except Exception as e:
+ logger.error(
+ f"Error in export_and_count_categories: {str(e)}"
+ )
+ return {
+ "input_tokens": 0,
+ "output_tokens": 0,
+ "total_tokens": 0,
+ }
+
def add_mem0(
self,
role: str,
@@ -546,8 +621,9 @@ class Conversation(BaseStructure):
def add(
self,
role: str,
- content: Union[str, dict, list],
+ content: Union[str, dict, list, Any],
metadata: Optional[dict] = None,
+ category: Optional[str] = None,
):
"""Add a message to the conversation history."""
# If using a persistent backend, delegate to it
@@ -562,7 +638,9 @@ class Conversation(BaseStructure):
)
return self.add_in_memory(role, content)
elif self.provider == "in-memory":
- return self.add_in_memory(role, content)
+ return self.add_in_memory(
+ role=role, content=content, category=category
+ )
elif self.provider == "mem0":
return self.add_mem0(
role=role, content=content, metadata=metadata
@@ -1326,6 +1404,12 @@ class Conversation(BaseStructure):
self.conversation_history[-1]["content"],
)
+ def return_list_final(self):
+ """Return the final message as a list."""
+ return [
+ self.conversation_history[-1]["content"],
+ ]
+
@classmethod
def list_conversations(
cls, conversations_dir: Optional[str] = None
diff --git a/swarms/structs/csv_to_agent.py b/swarms/structs/csv_to_agent.py
index b76cc644..c5f7f355 100644
--- a/swarms/structs/csv_to_agent.py
+++ b/swarms/structs/csv_to_agent.py
@@ -104,9 +104,7 @@ class AgentValidator:
model_name in model["model_name"]
for model in model_list
):
- valid_models = [
- model["model_name"] for model in model_list
- ]
+ [model["model_name"] for model in model_list]
raise AgentValidationError(
"Invalid model name. Must be one of the supported litellm models",
"model_name",
diff --git a/swarms/structs/interactive_groupchat.py b/swarms/structs/interactive_groupchat.py
index 802de393..9c0e8afe 100644
--- a/swarms/structs/interactive_groupchat.py
+++ b/swarms/structs/interactive_groupchat.py
@@ -1,5 +1,6 @@
import re
-from typing import Callable, List, Union
+import random
+from typing import Callable, List, Union, Optional
from loguru import logger
@@ -35,6 +36,156 @@ class InvalidTaskFormatError(InteractiveGroupChatError):
pass
+class InvalidSpeakerFunctionError(InteractiveGroupChatError):
+ """Raised when an invalid speaker function is provided"""
+
+ pass
+
+
+# Built-in speaker functions
+def round_robin_speaker(
+ agents: List[str], current_index: int = 0
+) -> str:
+ """
+ Round robin speaker function that cycles through agents in order.
+
+ Args:
+ agents: List of agent names
+ current_index: Current position in the cycle
+
+ Returns:
+ Next agent name in the round robin sequence
+ """
+ if not agents:
+ raise ValueError("No agents provided for round robin")
+ return agents[current_index % len(agents)]
+
+
+def random_speaker(agents: List[str], **kwargs) -> str:
+ """
+ Random speaker function that selects agents randomly.
+
+ Args:
+ agents: List of agent names
+ **kwargs: Additional arguments (ignored)
+
+ Returns:
+ Randomly selected agent name
+ """
+ if not agents:
+ raise ValueError("No agents provided for random selection")
+ return random.choice(agents)
+
+
+def priority_speaker(
+ agents: List[str], priorities: dict, **kwargs
+) -> str:
+ """
+ Priority-based speaker function that selects agents based on priority weights.
+
+ Args:
+ agents: List of agent names
+ priorities: Dictionary mapping agent names to priority weights
+ **kwargs: Additional arguments (ignored)
+
+ Returns:
+ Selected agent name based on priority weights
+ """
+ if not agents:
+ raise ValueError("No agents provided for priority selection")
+
+ # Filter agents that exist in the priorities dict
+ available_agents = [
+ agent for agent in agents if agent in priorities
+ ]
+ if not available_agents:
+ # Fallback to random if no priorities match
+ return random.choice(agents)
+
+ # Calculate total weight
+ total_weight = sum(
+ priorities[agent] for agent in available_agents
+ )
+ if total_weight == 0:
+ return random.choice(available_agents)
+
+ # Select based on weighted probability
+ rand_val = random.uniform(0, total_weight)
+ current_weight = 0
+
+ for agent in available_agents:
+ current_weight += priorities[agent]
+ if rand_val <= current_weight:
+ return agent
+
+ return available_agents[-1] # Fallback
+
+
+def random_dynamic_speaker(
+ agents: List[str],
+ response: str = "",
+ strategy: str = "parallel",
+ **kwargs,
+) -> Union[str, List[str]]:
+ """
+ Random dynamic speaker function that selects agents based on @mentions in responses.
+
+ This function works in two phases:
+ 1. If no response is provided (first call), randomly selects an agent
+ 2. If a response is provided, extracts @mentions and returns agent(s) based on strategy
+
+ Args:
+ agents: List of available agent names
+ response: The response from the previous agent (may contain @mentions)
+ strategy: How to handle multiple mentions - "sequential" or "parallel"
+ **kwargs: Additional arguments (ignored)
+
+ Returns:
+ For sequential strategy: str (single agent name)
+ For parallel strategy: List[str] (list of agent names)
+ """
+ if not agents:
+ raise ValueError(
+ "No agents provided for random dynamic selection"
+ )
+
+ # If no response provided, randomly select first agent
+ if not response:
+ return random.choice(agents)
+
+ # Extract @mentions from the response
+ mentions = re.findall(r"@(\w+)", response)
+
+ # Filter mentions to only include valid agents
+ valid_mentions = [
+ mention for mention in mentions if mention in agents
+ ]
+
+ if not valid_mentions:
+ # If no valid mentions, randomly select from all agents
+ return random.choice(agents)
+
+ # Handle multiple mentions based on strategy
+ if strategy == "sequential":
+ # Return the first mentioned agent for sequential execution
+ return valid_mentions[0]
+ elif strategy == "parallel":
+ # Return all mentioned agents for parallel execution
+ return valid_mentions
+ else:
+ raise ValueError(
+ f"Invalid strategy: {strategy}. Must be 'sequential' or 'parallel'"
+ )
+
+
+speaker_functions = {
+ "round-robin-speaker": round_robin_speaker,
+ "random-speaker": random_speaker,
+ "priority-speaker": priority_speaker,
+ "random-dynamic-speaker": random_dynamic_speaker,
+}
+
+
class InteractiveGroupChat:
"""
An interactive group chat system that enables conversations with multiple agents using @mentions.
@@ -49,6 +200,8 @@ class InteractiveGroupChat:
max_loops (int): Maximum number of conversation turns
conversation (Conversation): Stores the chat history
agent_map (Dict[str, Union[Agent, Callable]]): Mapping of agent names to their instances
+ speaker_function (Callable): Function to determine speaking order
+ speaker_state (dict): State for speaker functions that need it
Args:
name (str, optional): Name of the group chat. Defaults to "InteractiveGroupChat".
@@ -57,9 +210,38 @@ class InteractiveGroupChat:
max_loops (int, optional): Maximum conversation turns. Defaults to 1.
output_type (str, optional): Type of output format. Defaults to "string".
interactive (bool, optional): Whether to enable interactive terminal mode. Defaults to False.
+ speaker_function (Union[str, Callable], optional): Function to determine speaking order. Can be:
+ - A string name: "round-robin-speaker", "random-speaker", "priority-speaker", "random-dynamic-speaker"
+ - A custom callable function
+ - None (defaults to round_robin_speaker)
+ speaker_state (dict, optional): Initial state for speaker function. Defaults to empty dict.
Raises:
ValueError: If invalid initialization parameters are provided
+ InvalidSpeakerFunctionError: If the speaker function is invalid
+
+ Examples:
+ # Initialize with string-based speaker function
+ group_chat = InteractiveGroupChat(
+ agents=[agent1, agent2, agent3],
+ speaker_function="random-speaker"
+ )
+
+ # Initialize with priority speaker function
+ group_chat = InteractiveGroupChat(
+ agents=[agent1, agent2, agent3],
+ speaker_function="priority-speaker",
+ speaker_state={"priorities": {"agent1": 3, "agent2": 2, "agent3": 1}}
+ )
+
+ # Initialize with dynamic speaker function (agents mention each other)
+ group_chat = InteractiveGroupChat(
+ agents=[agent1, agent2, agent3],
+ speaker_function="random-dynamic-speaker"
+ )
+
+ # Change speaker function during runtime
+ group_chat.set_speaker_function("round-robin-speaker")
"""
def __init__(
@@ -71,6 +253,8 @@ class InteractiveGroupChat:
max_loops: int = 1,
output_type: str = "string",
interactive: bool = False,
+ speaker_function: Optional[Union[str, Callable]] = None,
+ speaker_state: Optional[dict] = None,
):
self.id = id
self.name = name
@@ -80,6 +264,33 @@ class InteractiveGroupChat:
self.output_type = output_type
self.interactive = interactive
+ # Speaker function configuration
+ if speaker_function is None:
+ self.speaker_function = round_robin_speaker
+ elif isinstance(speaker_function, str):
+ if speaker_function not in speaker_functions:
+ available_functions = ", ".join(
+ speaker_functions.keys()
+ )
+ raise InvalidSpeakerFunctionError(
+ f"Invalid speaker function: '{speaker_function}'. "
+ f"Available functions: {available_functions}"
+ )
+ self.speaker_function = speaker_functions[
+ speaker_function
+ ]
+ elif callable(speaker_function):
+ self.speaker_function = speaker_function
+ else:
+ raise InvalidSpeakerFunctionError(
+ "Speaker function must be either a string, callable, or None"
+ )
+
+ self.speaker_state = speaker_state or {"current_index": 0}
+
+ # Validate speaker function
+ self._validate_speaker_function()
+
# Initialize conversation history
self.conversation = Conversation(time_enabled=True)
@@ -96,6 +307,256 @@ class InteractiveGroupChat:
self._setup_conversation_context()
self._update_agent_prompts()
+ def set_speaker_function(
+ self,
+ speaker_function: Union[str, Callable],
+ speaker_state: Optional[dict] = None,
+ ) -> None:
+ """
+ Set the speaker function using either a string name or a custom callable.
+
+ Args:
+ speaker_function: Either a string name of a predefined function or a custom callable
+ String options:
+ - "round-robin-speaker": Cycles through agents in order
+ - "random-speaker": Selects agents randomly
+ - "priority-speaker": Selects based on priority weights
+ - "random-dynamic-speaker": Randomly selects first agent, then follows @mentions in responses
+ Callable: Custom function that takes (agents: List[str], **kwargs) -> str
+ speaker_state: Optional state for the speaker function
+
+ Raises:
+ InvalidSpeakerFunctionError: If the speaker function is invalid
+ """
+ if isinstance(speaker_function, str):
+ # Handle string-based speaker function
+ if speaker_function not in speaker_functions:
+ available_functions = ", ".join(
+ speaker_functions.keys()
+ )
+ raise InvalidSpeakerFunctionError(
+ f"Invalid speaker function: '{speaker_function}'. "
+ f"Available functions: {available_functions}"
+ )
+ self.speaker_function = speaker_functions[
+ speaker_function
+ ]
+ logger.info(
+ f"Speaker function set to: {speaker_function}"
+ )
+ elif callable(speaker_function):
+ # Handle callable speaker function
+ self.speaker_function = speaker_function
+ logger.info(
+ f"Custom speaker function set to: {speaker_function.__name__}"
+ )
+ else:
+ raise InvalidSpeakerFunctionError(
+ "Speaker function must be either a string or a callable"
+ )
+
+ # Update speaker state if provided
+ if speaker_state:
+ self.speaker_state.update(speaker_state)
+
+ # Validate the speaker function
+ self._validate_speaker_function()
+
+ def set_priorities(self, priorities: dict) -> None:
+ """
+ Set agent priorities for priority-based speaking order.
+
+ Args:
+ priorities: Dictionary mapping agent names to priority weights
+ """
+ self.speaker_state["priorities"] = priorities
+ logger.info(f"Agent priorities set: {priorities}")
+
+ def get_available_speaker_functions(self) -> List[str]:
+ """
+ Get a list of available speaker function names.
+
+ Returns:
+ List[str]: List of available speaker function names
+ """
+ return list(speaker_functions.keys())
+
+ def get_current_speaker_function(self) -> str:
+ """
+ Get the name of the current speaker function.
+
+ Returns:
+ str: Name of the current speaker function, or "custom" if it's a custom function
+ """
+ for name, func in speaker_functions.items():
+ if self.speaker_function == func:
+ return name
+ return "custom"
+
+ def start_interactive_session(self):
+ """
+ Start an interactive terminal session for chatting with agents.
+
+ This method creates a REPL (Read-Eval-Print Loop) that allows users to:
+ - Chat with agents using @mentions
+ - See available agents and their descriptions
+ - Exit the session using 'exit' or 'quit'
+ - Get help using 'help' or '?'
+ """
+ if not self.interactive:
+ raise InteractiveGroupChatError(
+ "Interactive mode is not enabled. Initialize with interactive=True"
+ )
+
+ print(f"\nWelcome to {self.name}!")
+ print(f"Description: {self.description}")
+ print(
+ f"Current speaker function: {self.get_current_speaker_function()}"
+ )
+ print("\nAvailable agents:")
+ for name, agent in self.agent_map.items():
+ if isinstance(agent, Agent):
+ print(
+ f"- @{name}: {agent.system_prompt.splitlines()[0]}"
+ )
+ else:
+ print(f"- @{name}: Custom callable function")
+
+ print("\nCommands:")
+ print("- Type 'help' or '?' for help")
+ print("- Type 'exit' or 'quit' to end the session")
+ print("- Type 'speaker' to change speaker function")
+ print("- Use @agent_name to mention agents")
+ print("\nStart chatting:")
+
+ while True:
+ try:
+ # Get user input
+ user_input = input("\nYou: ").strip()
+
+ # Handle special commands
+ if user_input.lower() in ["exit", "quit"]:
+ print("Goodbye!")
+ break
+
+ if user_input.lower() in ["help", "?"]:
+ print("\nHelp:")
+ print("1. Mention agents using @agent_name")
+ print(
+ "2. You can mention multiple agents in one task"
+ )
+ print("3. Available agents:")
+ for name in self.agent_map:
+ print(f" - @{name}")
+ print(
+ "4. Type 'speaker' to change speaker function"
+ )
+ print(
+ "5. Type 'exit' or 'quit' to end the session"
+ )
+ continue
+
+ if user_input.lower() == "speaker":
+ print(
+ f"\nCurrent speaker function: {self.get_current_speaker_function()}"
+ )
+ print("Available speaker functions:")
+ for i, func_name in enumerate(
+ self.get_available_speaker_functions(), 1
+ ):
+ print(f" {i}. {func_name}")
+
+ try:
+ choice = input(
+ "\nEnter the number or name of the speaker function: "
+ ).strip()
+
+ # Try to parse as number first
+ try:
+ func_index = int(choice) - 1
+ if (
+ 0
+ <= func_index
+ < len(
+ self.get_available_speaker_functions()
+ )
+ ):
+ selected_func = self.get_available_speaker_functions()[
+ func_index
+ ]
+ else:
+ print(
+ "Invalid number. Please try again."
+ )
+ continue
+ except ValueError:
+ # Try to parse as name
+ selected_func = choice
+
+ self.set_speaker_function(selected_func)
+ print(
+ f"Speaker function changed to: {self.get_current_speaker_function()}"
+ )
+
+ except InvalidSpeakerFunctionError as e:
+ print(f"Error: {e}")
+ except Exception as e:
+ print(f"An error occurred: {e}")
+ continue
+
+ if not user_input:
+ continue
+
+ # Process the task and get responses
+ try:
+ self.run(user_input)
+ print("\nChat:")
+ # print(response)
+
+ except NoMentionedAgentsError:
+ print(
+ "\nError: Please mention at least one agent using @agent_name"
+ )
+ except AgentNotFoundError as e:
+ print(f"\nError: {str(e)}")
+ except Exception as e:
+ print(f"\nAn error occurred: {str(e)}")
+
+ except KeyboardInterrupt:
+ print("\nSession terminated by user. Goodbye!")
+ break
+ except Exception as e:
+ print(f"\nAn unexpected error occurred: {str(e)}")
+ print(
+ "The session will continue. You can type 'exit' to end it."
+ )
+
+ def _validate_speaker_function(self) -> None:
+ """
+ Validates the speaker function.
+
+ Raises:
+ InvalidSpeakerFunctionError: If the speaker function is invalid
+ """
+ if not callable(self.speaker_function):
+ raise InvalidSpeakerFunctionError(
+ "Speaker function must be callable"
+ )
+
+ # Test the speaker function with a dummy list
+ try:
+ test_result = self.speaker_function(
+ ["test_agent"], **self.speaker_state
+ )
+ if not isinstance(test_result, str):
+ raise InvalidSpeakerFunctionError(
+ "Speaker function must return a string"
+ )
+ except Exception as e:
+ raise InvalidSpeakerFunctionError(
+ f"Speaker function validation failed: {e}"
+ )
+
def _validate_initialization(self) -> None:
"""
Validates the group chat configuration.
@@ -150,6 +611,27 @@ class InteractiveGroupChat:
}
)
+ # Create the enhanced prompt that teaches agents how to use @mentions
+ mention_instruction = """
+
+IMPORTANT: You are part of a collaborative group chat where you can interact with other agents using @mentions.
+
+-COLLABORATIVE RESPONSE PROTOCOL:
+1. FIRST: Read and understand all previous responses from other agents
+2. ACKNOWLEDGE: Reference and acknowledge what other agents have said
+3. BUILD UPON: Add your perspective while building upon their insights
+4. MENTION: Use @agent_name to call on other agents when needed
+5. COMPLETE: Acknowledge when your part is done and what still needs to be done
+
+HOW TO MENTION OTHER AGENTS:
+- Use @agent_name to mention another agent in your response
+- You can mention multiple agents: @agent1 @agent2
+- When you mention an agent, they will be notified and can respond
+- Example: "I think @analyst should review this data" or "Let's ask @researcher to investigate this further"
+
+AVAILABLE AGENTS TO MENTION:
+"""
+
group_context = (
f"\n\nYou are part of a group chat named '{self.name}' with the following description: {self.description}\n"
f"Other participants in this chat:\n"
@@ -163,11 +645,49 @@ class InteractiveGroupChat:
for info in agent_info
if info["name"] != agent.agent_name
]
- agent_context = group_context
+ agent_context = group_context + mention_instruction
for other in other_agents:
- agent_context += (
- f"- {other['name']}: {other['description']}\n"
- )
+ agent_context += f"- @{other['name']}: {other['description']}\n"
+
+ # Add final instruction
+ agent_context += """
+
+COLLABORATION GUIDELINES:
+- ALWAYS read the full conversation history before responding
+- ACKNOWLEDGE other agents' contributions: "Building on @analyst's data insights..." or "I agree with @researcher's findings that..."
+- BUILD UPON previous responses rather than repeating information
+- SYNTHESIZE multiple perspectives when possible
+- ASK CLARIFYING QUESTIONS if you need more information from other agents
+- DELEGATE appropriately: "Let me ask @expert_agent to verify this" or "@specialist, can you elaborate on this point?"
+
+TASK COMPLETION GUIDELINES:
+- ACKNOWLEDGE when you are done with your part of the task
+- CLEARLY STATE what still needs to be done before the overall task is finished
+- If you mention other agents, explain what specific input you need from them
+- Use phrases like "I have completed [specific part]" or "The task still requires [specific actions]"
+- Provide a clear status update: "My analysis is complete. The task now needs @writer to create content and @reviewer to validate the approach."
+
+RESPONSE STRUCTURE:
+1. ACKNOWLEDGE: "I've reviewed the responses from @agent1 and @agent2..."
+2. BUILD: "Building on @agent1's analysis of the data..."
+3. CONTRIBUTE: "From my perspective, I would add..."
+4. COLLABORATE: "To get a complete picture, let me ask @agent3 to..."
+5. COMPLETE: "I have completed [my part]. The task still requires [specific next steps]"
+6. SYNTHESIZE: "Combining our insights, the key findings are..."
+
+EXAMPLES OF GOOD COLLABORATION:
+- "I've reviewed @analyst's data analysis and @researcher's market insights. The data shows strong growth potential, and I agree with @researcher that we should focus on emerging markets. Let me add that from a content perspective, we should @writer to create targeted messaging for these markets. I have completed my market analysis. The task now requires @writer to develop content and @reviewer to validate our approach."
+- "Building on @researcher's findings about customer behavior, I can see that @analyst's data supports this trend. To get a complete understanding, let me ask @writer to help us craft messaging that addresses these specific customer needs. My data analysis is complete. The task still needs @writer to create messaging and @reviewer to approve the final strategy."
+
+AVOID:
+- Ignoring other agents' responses
+- Repeating what others have already said
+- Making assumptions without consulting relevant experts
+- Responding in isolation without considering the group's collective knowledge
+- Not acknowledging task completion status
+
+Remember: You are part of a team. Your response should reflect that you've read, understood, and are building upon the contributions of others, and clearly communicate your task completion status.
+"""
# Update the agent's system prompt
agent.system_prompt = (
@@ -202,90 +722,100 @@ class InteractiveGroupChat:
logger.error(f"Error extracting mentions: {e}")
raise InvalidTaskFormatError(f"Invalid task format: {e}")
- def start_interactive_session(self):
+ def _get_speaking_order(
+ self, mentioned_agents: List[str]
+ ) -> List[str]:
"""
- Start an interactive terminal session for chatting with agents.
+ Determines the speaking order using the configured speaker function.
- This method creates a REPL (Read-Eval-Print Loop) that allows users to:
- - Chat with agents using @mentions
- - See available agents and their descriptions
- - Exit the session using 'exit' or 'quit'
- - Get help using 'help' or '?'
+ Args:
+ mentioned_agents: List of agent names that were mentioned
+
+ Returns:
+ List of agent names in the order they should speak
"""
- if not self.interactive:
- raise InteractiveGroupChatError(
- "Interactive mode is not enabled. Initialize with interactive=True"
- )
+ if not mentioned_agents:
+ return []
- print(f"\nWelcome to {self.name}!")
- print(f"Description: {self.description}")
- print("\nAvailable agents:")
- for name, agent in self.agent_map.items():
- if isinstance(agent, Agent):
- print(
- f"- @{name}: {agent.system_prompt.splitlines()[0]}"
+ # Use the speaker function to determine order
+ try:
+ if self.speaker_function == round_robin_speaker:
+ # For round robin, we need to maintain state
+ current_index = self.speaker_state.get(
+ "current_index", 0
)
- else:
- print(f"- @{name}: Custom callable function")
+ ordered_agents = []
- print("\nCommands:")
- print("- Type 'help' or '?' for help")
- print("- Type 'exit' or 'quit' to end the session")
- print("- Use @agent_name to mention agents")
- print("\nStart chatting:")
+ # Create the order starting from current index
+ for i in range(len(mentioned_agents)):
+ agent = round_robin_speaker(
+ mentioned_agents, current_index + i
+ )
+ ordered_agents.append(agent)
- while True:
- try:
- # Get user input
- user_input = input("\nYou: ").strip()
+ # Update state for next round
+ self.speaker_state["current_index"] = (
+ current_index + len(mentioned_agents)
+ ) % len(mentioned_agents)
+ return ordered_agents
- # Handle special commands
- if user_input.lower() in ["exit", "quit"]:
- print("Goodbye!")
- break
+ elif self.speaker_function == random_speaker:
+ # For random, shuffle the list
+ shuffled = mentioned_agents.copy()
+ random.shuffle(shuffled)
+ return shuffled
- if user_input.lower() in ["help", "?"]:
- print("\nHelp:")
- print("1. Mention agents using @agent_name")
- print(
- "2. You can mention multiple agents in one task"
- )
- print("3. Available agents:")
- for name in self.agent_map:
- print(f" - @{name}")
- print(
- "4. Type 'exit' or 'quit' to end the session"
- )
- continue
+ elif self.speaker_function == priority_speaker:
+ # For priority, we need priorities in speaker_state
+ priorities = self.speaker_state.get("priorities", {})
+ if not priorities:
+ # Fallback to random if no priorities set
+ shuffled = mentioned_agents.copy()
+ random.shuffle(shuffled)
+ return shuffled
- if not user_input:
- continue
+ # Sort by priority (higher priority first)
+ sorted_agents = sorted(
+ mentioned_agents,
+ key=lambda x: priorities.get(x, 0),
+ reverse=True,
+ )
+ return sorted_agents
- # Process the task and get responses
- try:
- response = self.run(user_input)
- print("\nChat:")
- print(response)
+ elif self.speaker_function == random_dynamic_speaker:
+ # For dynamic speaker, we need to handle it differently
+ # The dynamic speaker will be called during the run method
+ # For now, just return the original order
+ return mentioned_agents
- except NoMentionedAgentsError:
- print(
- "\nError: Please mention at least one agent using @agent_name"
- )
- except AgentNotFoundError as e:
- print(f"\nError: {str(e)}")
- except Exception as e:
- print(f"\nAn error occurred: {str(e)}")
-
- except KeyboardInterrupt:
- print("\nSession terminated by user. Goodbye!")
- break
- except Exception as e:
- print(f"\nAn unexpected error occurred: {str(e)}")
- print(
- "The session will continue. You can type 'exit' to end it."
+ else:
+ # Custom speaker function
+ # For custom functions, we'll use the first agent returned
+ # and then process the rest in original order
+ first_speaker = self.speaker_function(
+ mentioned_agents, **self.speaker_state
)
+ if first_speaker in mentioned_agents:
+ remaining = [
+ agent
+ for agent in mentioned_agents
+ if agent != first_speaker
+ ]
+ return [first_speaker] + remaining
+ else:
+ return mentioned_agents
+
+ except Exception as e:
+ logger.error(f"Error in speaker function: {e}")
+ # Fallback to original order
+ return mentioned_agents
- def run(self, task: str) -> str:
+ def run(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
+ ) -> str:
"""
Process a task and get responses from mentioned agents.
If interactive mode is enabled, this will be called by start_interactive_session().
@@ -303,43 +833,126 @@ class InteractiveGroupChat:
# Add user task to conversation
self.conversation.add(role="User", content=task)
- # Get responses from mentioned agents
- for agent_name in mentioned_agents:
- agent = self.agent_map.get(agent_name)
- if not agent:
- raise AgentNotFoundError(
- f"Agent '{agent_name}' not found"
- )
+ # Handle dynamic speaker function differently
+ if self.speaker_function == random_dynamic_speaker:
+ # Get strategy from speaker state (default to sequential)
+ strategy = self.speaker_state.get(
+ "strategy", "sequential"
+ )
- try:
- # Get the complete conversation history
- context = (
- self.conversation.return_history_as_string()
+ # For dynamic speaker, we'll determine the next speaker after each response
+ # Track which agents have spoken to ensure all get a chance
+ spoken_agents = set()
+ last_response = ""
+ max_iterations = (
+ len(mentioned_agents) * 3
+ ) # Allow more iterations for parallel
+ iteration = 0
+
+ while iteration < max_iterations and len(
+ spoken_agents
+ ) < len(mentioned_agents):
+ # Determine next speaker(s) using dynamic function
+ next_speakers = self.speaker_function(
+ mentioned_agents, # Use all mentioned agents, not remaining_agents
+ last_response,
+ strategy=strategy,
+ **self.speaker_state,
)
- # Get response from agent
- if isinstance(agent, Agent):
- response = agent.run(
- task=f"{context}\nPlease respond to the latest task as {agent_name}."
- )
- else:
- # For callable functions
- response = agent(context)
-
- # Add response to conversation
- if response and not response.isspace():
- self.conversation.add(
- role=agent_name, content=response
- )
- logger.info(f"Agent {agent_name} responded")
+ # Handle both single agent and multiple agents
+ if isinstance(next_speakers, str):
+ next_speakers = [next_speakers]
- except Exception as e:
- logger.error(
- f"Error getting response from {agent_name}: {e}"
- )
- self.conversation.add(
- role=agent_name,
- content=f"Error: Unable to generate response - {str(e)}",
+ # Filter out invalid agents
+ valid_next_speakers = [
+ agent
+ for agent in next_speakers
+ if agent in mentioned_agents
+ ]
+
+ if not valid_next_speakers:
+ # If no valid mentions found, randomly select from unspoken agents
+ unspoken_agents = [
+ agent
+ for agent in mentioned_agents
+ if agent not in spoken_agents
+ ]
+ if unspoken_agents:
+ valid_next_speakers = [
+ random.choice(unspoken_agents)
+ ]
+ else:
+ # All agents have spoken, break the loop
+ break
+
+ # Process agents based on strategy
+ if strategy == "sequential":
+ # Process one agent at a time
+ for next_speaker in valid_next_speakers:
+ if next_speaker in spoken_agents:
+ continue # Skip if already spoken
+
+ response = self._get_agent_response(
+ next_speaker, img, imgs
+ )
+ if response:
+ last_response = response
+ spoken_agents.add(next_speaker)
+ break # Only process one agent in sequential mode
+
+ elif strategy == "parallel":
+ # Process all mentioned agents in parallel
+ import concurrent.futures
+
+ # Get responses from all valid agents
+ responses = []
+ with concurrent.futures.ThreadPoolExecutor() as executor:
+ future_to_agent = {
+ executor.submit(
+ self._get_agent_response,
+ agent,
+ img,
+ imgs,
+ ): agent
+ for agent in valid_next_speakers
+ if agent not in spoken_agents
+ }
+
+ for (
+ future
+ ) in concurrent.futures.as_completed(
+ future_to_agent
+ ):
+ agent = future_to_agent[future]
+ try:
+ response = future.result()
+ if response:
+ responses.append(response)
+ spoken_agents.add(agent)
+ except Exception as e:
+ logger.error(
+ f"Error getting response from {agent}: {e}"
+ )
+
+ # Combine responses for next iteration
+ if responses:
+ last_response = "\n\n".join(responses)
+
+ iteration += 1
+ else:
+ # For non-dynamic speaker functions, use the original logic
+ speaking_order = self._get_speaking_order(
+ mentioned_agents
+ )
+ logger.info(
+ f"Speaking order determined: {speaking_order}"
+ )
+
+ # Get responses from mentioned agents in the determined order
+ for agent_name in speaking_order:
+ response = self._get_agent_response(
+ agent_name, img, imgs
)
return history_output_formatter(
@@ -354,3 +967,97 @@ class InteractiveGroupChat:
raise InteractiveGroupChatError(
f"Unexpected error occurred: {str(e)}"
)
+
+ def _get_agent_response(
+ self,
+ agent_name: str,
+ img: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
+ ) -> Optional[str]:
+ """
+ Get response from a specific agent.
+
+ Args:
+ agent_name: Name of the agent to get response from
+ img: Optional image for the task
+ imgs: Optional list of images for the task
+
+ Returns:
+ The agent's response or None if error
+ """
+ agent = self.agent_map.get(agent_name)
+ if not agent:
+ raise AgentNotFoundError(
+ f"Agent '{agent_name}' not found"
+ )
+
+ try:
+ # Get the complete conversation history
+ context = self.conversation.return_history_as_string()
+
+ # Get response from agent
+ if isinstance(agent, Agent):
+ collaborative_task = f"""{context}
+
+COLLABORATIVE TASK: Please respond to the latest task as {agent_name}.
+
+IMPORTANT INSTRUCTIONS:
+1. Read the ENTIRE conversation history above
+2. Acknowledge what other agents have said before adding your perspective
+3. Build upon their insights rather than repeating information
+4. If you need input from other agents, mention them using @agent_name
+5. Provide your unique expertise while showing you understand the group's collective knowledge
+
+TASK COMPLETION GUIDELINES:
+- Acknowledge when you are done with your part of the task
+- Clearly state what still needs to be done before the overall task is finished
+- If you mention other agents, explain what specific input you need from them
+- Use phrases like "I have completed [specific part]" or "The task still requires [specific actions]"
+
+Remember: You are part of a collaborative team. Your response should demonstrate that you've read, understood, and are building upon the contributions of others."""
+
+ response = agent.run(
+ task=collaborative_task,
+ img=img,
+ imgs=imgs,
+ )
+ else:
+ # For callable functions
+ response = agent(context)
+
+ # Add response to conversation
+ if response and not response.isspace():
+ self.conversation.add(
+ role=agent_name, content=response
+ )
+ logger.info(f"Agent {agent_name} responded")
+ return response
+
+ except Exception as e:
+ logger.error(
+ f"Error getting response from {agent_name}: {e}"
+ )
+ self.conversation.add(
+ role=agent_name,
+ content=f"Error: Unable to generate response - {str(e)}",
+ )
+ return f"Error: Unable to generate response - {str(e)}"
+
+ return None
+
+ def set_dynamic_strategy(self, strategy: str) -> None:
+ """
+ Set the strategy for the random-dynamic-speaker function.
+
+ Args:
+ strategy: Either "sequential" or "parallel"
+ - "sequential": Process one agent at a time based on @mentions
+ - "parallel": Process all mentioned agents simultaneously
+ """
+ if strategy not in ["sequential", "parallel"]:
+ raise ValueError(
+ "Strategy must be either 'sequential' or 'parallel'"
+ )
+
+ self.speaker_state["strategy"] = strategy
+ logger.info(f"Dynamic speaker strategy set to: {strategy}")
diff --git a/swarms/structs/ma_utils.py b/swarms/structs/ma_utils.py
index 8d28b76e..1cf1e0fb 100644
--- a/swarms/structs/ma_utils.py
+++ b/swarms/structs/ma_utils.py
@@ -1,12 +1,17 @@
from typing import List, Any, Optional, Union, Callable
import random
+from swarms.prompts.collaborative_prompts import (
+ get_multi_agent_collaboration_prompt_one,
+)
def list_all_agents(
agents: List[Union[Callable, Any]],
conversation: Optional[Any] = None,
- name: str = "",
- add_to_conversation: bool = False,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ add_to_conversation: Optional[bool] = False,
+ add_collaboration_prompt: Optional[bool] = True,
) -> str:
"""Lists all agents in a swarm and optionally adds them to a conversation.
@@ -27,6 +32,7 @@ def list_all_agents(
>>> conversation = Conversation()
>>> agent_info = list_all_agents(agents, conversation, "MySwarm")
>>> print(agent_info)
+ Swarm: MySwarm
Total Agents: 2
Agent: Agent1
@@ -39,8 +45,15 @@ def list_all_agents(
# Compile information about all agents
total_agents = len(agents)
- all_agents = f"Total Agents: {total_agents}\n\n" + "\n\n".join(
- f"Agent: {agent.agent_name} \n\n Description: {agent.description or (agent.system_prompt[:50] + '...' if len(agent.system_prompt) > 50 else agent.system_prompt)}"
+ all_agents = f"Team Name: {name}\n" if name else ""
+ all_agents += (
+ f"Team Description: {description}\n" if description else ""
+ )
+ all_agents += f"Total Agents: {total_agents}\n\n"
+ all_agents += "| Agent | Description |\n"
+ all_agents += "|-------|-------------|\n"
+ all_agents += "\n".join(
+ f"| {agent.agent_name} | {agent.description or (agent.system_prompt[:50] + '...' if len(agent.system_prompt) > 50 else agent.system_prompt)} |"
for agent in agents
)
@@ -48,10 +61,15 @@ def list_all_agents(
# Add the agent information to the conversation
conversation.add(
role="System",
- content=f"All Agents Available in the Swarm {name}:\n\n{all_agents}",
+ content=all_agents,
)
- return all_agents
+ if add_collaboration_prompt:
+ return get_multi_agent_collaboration_prompt_one(
+ agents_in_swarm=all_agents
+ )
+ else:
+ return all_agents
models = [
@@ -68,6 +86,7 @@ models = [
"o4-mini",
"o3",
"gpt-4.1",
+ "groq/llama-3.1-8b-instant",
"gpt-4.1-nano",
]
diff --git a/swarms/structs/mixture_of_agents.py b/swarms/structs/mixture_of_agents.py
index 9c6b8756..3bab8211 100644
--- a/swarms/structs/mixture_of_agents.py
+++ b/swarms/structs/mixture_of_agents.py
@@ -1,17 +1,18 @@
-import asyncio
import os
from typing import List, Optional
from swarms.structs.agent import Agent
from swarms.prompts.ag_prompt import aggregator_system_prompt_main
+from swarms.structs.ma_utils import list_all_agents
+from swarms.utils.history_output_formatter import (
+ history_output_formatter,
+)
from swarms.utils.loguru_logger import initialize_logger
import concurrent.futures
from swarms.utils.output_types import OutputType
from swarms.structs.conversation import Conversation
-from swarms.utils.history_output_formatter import (
- history_output_formatter,
-)
+
logger = initialize_logger(log_folder="mixture_of_agents")
@@ -25,13 +26,13 @@ class MixtureOfAgents:
self,
name: str = "MixtureOfAgents",
description: str = "A class to run a mixture of agents and aggregate their responses.",
- agents: List[Agent] = [],
+ agents: List[Agent] = None,
aggregator_agent: Agent = None,
aggregator_system_prompt: str = aggregator_system_prompt_main,
layers: int = 3,
max_loops: int = 1,
- return_str_on: bool = False,
- output_type: OutputType = "dict",
+ output_type: OutputType = "final",
+ aggregator_model_name: str = "claude-3-5-sonnet-20240620",
) -> None:
"""
Initialize the Mixture of Agents class with agents and configuration.
@@ -48,16 +49,36 @@ class MixtureOfAgents:
self.description = description
self.agents = agents
self.aggregator_agent = aggregator_agent
- self.aggregator_system_prompt = aggregator_system_prompt_main
+ self.aggregator_system_prompt = aggregator_system_prompt
self.layers = layers
self.max_loops = max_loops
- self.return_str_on = return_str_on
self.output_type = output_type
+ self.aggregator_model_name = aggregator_model_name
+ self.aggregator_agent = self.aggregator_agent_setup()
self.reliability_check()
self.conversation = Conversation()
+ list_all_agents(
+ agents=self.agents,
+ conversation=self.conversation,
+ description=self.description,
+ name=self.name,
+ add_to_conversation=True,
+ )
+
+ def aggregator_agent_setup(self):
+ return Agent(
+ agent_name="Aggregator Agent",
+ description="An agent that aggregates the responses of the other agents.",
+ system_prompt=aggregator_system_prompt_main,
+ model_name=self.aggregator_model_name,
+ temperature=0.5,
+ max_loops=1,
+ output_type="str-all-except-first",
+ )
+
def reliability_check(self) -> None:
"""
Performs a reliability check on the Mixture of Agents class.
@@ -66,8 +87,8 @@ class MixtureOfAgents:
"Checking the reliability of the Mixture of Agents class."
)
- if not self.agents:
- raise ValueError("No reference agents provided.")
+ if len(self.agents) == 0:
+ raise ValueError("No agents provided.")
if not self.aggregator_agent:
raise ValueError("No aggregator agent provided.")
@@ -78,129 +99,83 @@ class MixtureOfAgents:
if not self.layers:
raise ValueError("No layers provided.")
- if self.layers < 1:
- raise ValueError("Layers must be greater than 0.")
-
logger.info("Reliability check passed.")
logger.info("Mixture of Agents class is ready for use.")
- def _get_final_system_prompt(
- self, system_prompt: str, results: List[str]
- ) -> str:
- """
- Constructs a system prompt for subsequent layers that includes previous responses.
+ def save_to_markdown_file(self, file_path: str = "moa.md"):
+ with open(file_path, "w") as f:
+ f.write(self.conversation.get_str())
- Args:
- system_prompt (str): The initial system prompt.
- results (List[str]): A list of previous responses.
-
- Returns:
- str: The final system prompt including previous responses.
- """
- return (
- system_prompt
- + "\n"
- + "\n".join(
- [
- f"{i+1}. {str(element)}"
- for i, element in enumerate(results)
- ]
- )
- )
-
- async def _run_agent_async(
+ def step(
self,
- agent: Agent,
task: str,
- prev_responses: Optional[List[str]] = None,
- ) -> str:
- """
- Asynchronous method to run a single agent.
+ img: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
+ ):
+ # self.conversation.add(role="User", content=task)
- Args:
- agent (Agent): The agent to be run.
- task (str): The task for the agent.
- prev_responses (Optional[List[str]], optional): A list of previous responses. Defaults to None.
+ # Run agents concurrently
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=os.cpu_count()
+ ) as executor:
+ # Submit all agent tasks and store with their index
+ future_to_agent = {
+ executor.submit(
+ agent.run, task=task, img=img, imgs=imgs
+ ): agent
+ for agent in self.agents
+ }
- Returns:
- str: The response from the agent.
- """
- # If there are previous responses, update the agent's system prompt
- if prev_responses:
- system_prompt_with_responses = (
- self._get_final_system_prompt(
- self.aggregator_system_prompt, prev_responses
- )
- )
- agent.system_prompt = system_prompt_with_responses
+ # Collect results and add to conversation in completion order
+ for future in concurrent.futures.as_completed(
+ future_to_agent
+ ):
+ agent = future_to_agent[future]
+ output = future.result()
+ self.conversation.add(role=agent.name, content=output)
- # Run the agent asynchronously
- response = await asyncio.to_thread(agent.run, task)
+ return self.conversation.get_str()
- self.conversation.add(agent.agent_name, response)
+ def _run(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
+ ):
- # Log the agent's response
- print(f"Agent {agent.agent_name} response: {response}")
- return response
+ self.conversation.add(role="User", content=task)
- async def _run_async(self, task: str) -> None:
- """
- Asynchronous method to run the Mixture of Agents process.
+ for i in range(self.layers):
+ out = self.step(
+ task=self.conversation.get_str(), img=img, imgs=imgs
+ )
+ task = out
- Args:
- task (str): The task for the mixture of agents.
- """
- # Gather initial responses from reference agents
- results: List[str] = await asyncio.gather(
- *[
- self._run_agent_async(agent, task)
- for agent in self.agents
- ]
+ out = self.aggregator_agent.run(
+ task=self.conversation.get_str()
)
- # Process additional layers, if applicable
- for _ in range(1, self.layers - 1):
- results = await asyncio.gather(
- *[
- self._run_agent_async(
- agent, task, prev_responses=results
- )
- for agent in self.agents
- ]
- )
-
- # Perform final aggregation using the aggregator agent
- final_result = await self._run_agent_async(
- self.aggregator_agent, task, prev_responses=results
+ self.conversation.add(
+ role=self.aggregator_agent.agent_name, content=out
)
- print(f"Final Aggregated Response: {final_result}")
+ out = history_output_formatter(
+ conversation=self.conversation, type=self.output_type
+ )
- def run(self, task: str) -> None:
- """
- Synchronous wrapper to run the async process.
+ return out
- Args:
- task (str): The task for the mixture of agents.
- """
+ def run(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
+ ):
try:
- self.conversation.add("user", task)
-
- for _ in range(self.max_loops):
- # Add previous context to task if available
- prompt = f"History: {self.conversation.get_str()}\n\nTask: {task}"
-
- # Run async process
- asyncio.run(self._run_async(prompt))
-
- return history_output_formatter(
- conversation=self.conversation,
- type=self.output_type,
- )
-
+ return self._run(task=task, img=img, imgs=imgs)
except Exception as e:
- logger.error(f"Error running mixture of agents: {str(e)}")
- raise e
+ logger.error(f"Error running Mixture of Agents: {e}")
+ return f"Error: {e}"
def run_batched(self, tasks: List[str]) -> List[str]:
"""
diff --git a/swarms/structs/rearrange.py b/swarms/structs/rearrange.py
index 7c59b864..dd9a65c7 100644
--- a/swarms/structs/rearrange.py
+++ b/swarms/structs/rearrange.py
@@ -1,4 +1,3 @@
-import asyncio
import json
import uuid
from concurrent.futures import ThreadPoolExecutor
@@ -282,7 +281,6 @@ class AgentRearrange(BaseSwarm):
)
for task_idx, task in enumerate(tasks):
- is_last = task == tasks[-1]
agent_names = [
name.strip() for name in task.split(",")
]
@@ -298,7 +296,6 @@ class AgentRearrange(BaseSwarm):
result = agent.run(
task=self.conversation.get_str(),
img=img,
- is_last=is_last,
*args,
**kwargs,
)
@@ -327,7 +324,6 @@ class AgentRearrange(BaseSwarm):
current_task = agent.run(
task=self.conversation.get_str(),
img=img,
- is_last=is_last,
*args,
**kwargs,
)
@@ -344,7 +340,8 @@ class AgentRearrange(BaseSwarm):
logger.info("Task execution completed")
return history_output_formatter(
- self.conversation, self.output_type
+ conversation=self.conversation,
+ type=self.output_type,
)
except Exception as e:
@@ -364,11 +361,6 @@ class AgentRearrange(BaseSwarm):
self,
task: str = None,
img: str = None,
- device: str = "cpu",
- device_id: int = 2,
- all_cores: bool = True,
- all_gpus: bool = False,
- no_use_clusterops: bool = True,
*args,
**kwargs,
):
@@ -481,58 +473,11 @@ class AgentRearrange(BaseSwarm):
except Exception as e:
self._catch_error(e)
- async def abatch_run(
- self,
- tasks: List[str],
- img: Optional[List[str]] = None,
- batch_size: int = 10,
- *args,
- **kwargs,
- ) -> List[str]:
- """
- Asynchronously process multiple tasks in batches.
-
- Args:
- tasks: List of tasks to process
- img: Optional list of images corresponding to tasks
- batch_size: Number of tasks to process simultaneously
-
- Returns:
- List of results corresponding to input tasks
- """
- try:
- results = []
- for i in range(0, len(tasks), batch_size):
- batch_tasks = tasks[i : i + batch_size]
- batch_imgs = (
- img[i : i + batch_size]
- if img
- else [None] * len(batch_tasks)
- )
-
- # Process batch using asyncio.gather
- batch_coros = [
- self.astream(
- task=task, img=img_path, *args, **kwargs
- )
- for task, img_path in zip(batch_tasks, batch_imgs)
- ]
- batch_results = await asyncio.gather(*batch_coros)
- results.extend(batch_results)
-
- return results
- except Exception as e:
- self._catch_error(e)
-
def concurrent_run(
self,
tasks: List[str],
img: Optional[List[str]] = None,
max_workers: Optional[int] = None,
- device: str = "cpu",
- device_id: int = None,
- all_cores: bool = True,
- all_gpus: bool = False,
*args,
**kwargs,
) -> List[str]:
@@ -561,10 +506,6 @@ class AgentRearrange(BaseSwarm):
self.run,
task=task,
img=img_path,
- device=device,
- device_id=device_id,
- all_cores=all_cores,
- all_gpus=all_gpus,
*args,
**kwargs,
)
diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py
index 5c26df7f..da6c5e3d 100644
--- a/swarms/structs/sequential_workflow.py
+++ b/swarms/structs/sequential_workflow.py
@@ -49,15 +49,12 @@ class SequentialWorkflow:
self.flow = self.sequential_flow()
self.agent_rearrange = AgentRearrange(
- name=name,
- description=description,
- agents=agents,
+ name=self.name,
+ description=self.description,
+ agents=self.agents,
flow=self.flow,
- max_loops=max_loops,
- output_type=output_type,
- shared_memory_system=shared_memory_system,
- *args,
- **kwargs,
+ max_loops=self.max_loops,
+ output_type=self.output_type,
)
def sequential_flow(self):
@@ -105,11 +102,7 @@ class SequentialWorkflow:
self,
task: str,
img: Optional[str] = None,
- device: str = "cpu",
- all_cores: bool = False,
- all_gpus: bool = False,
- device_id: int = 0,
- no_use_clusterops: bool = True,
+ imgs: Optional[List[str]] = None,
*args,
**kwargs,
):
@@ -134,14 +127,14 @@ class SequentialWorkflow:
"""
try:
- result = self.agent_rearrange.run(
+ return self.agent_rearrange.run(
task=task,
img=img,
- *args,
- **kwargs,
+ # imgs=imgs,
+ # *args,
+ # **kwargs,
)
- return result
except Exception as e:
logger.error(
f"An error occurred while executing the task: {e}"
diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py
index 023db9d0..e79d2f82 100644
--- a/swarms/structs/swarm_router.py
+++ b/swarms/structs/swarm_router.py
@@ -1,6 +1,7 @@
+import concurrent.futures
+import json
import os
-import uuid
-from datetime import datetime
+import traceback
from typing import Any, Callable, Dict, List, Literal, Optional, Union
from pydantic import BaseModel, Field
@@ -20,13 +21,15 @@ from swarms.structs.rearrange import AgentRearrange
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm
from swarms.structs.swarm_matcher import swarm_matcher
+from swarms.telemetry.log_executions import log_execution
from swarms.utils.output_types import OutputType
from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.malt import MALT
from swarms.structs.deep_research_swarm import DeepResearchSwarm
from swarms.structs.council_judge import CouncilAsAJudge
from swarms.structs.interactive_groupchat import InteractiveGroupChat
-
+from swarms.structs.ma_utils import list_all_agents
+from swarms.utils.generate_keys import generate_api_key
logger = initialize_logger(log_folder="swarm_router")
@@ -54,25 +57,6 @@ class Document(BaseModel):
data: str
-class SwarmLog(BaseModel):
- """
- A Pydantic model to capture log entries.
- """
-
- id: Optional[str] = Field(
- default_factory=lambda: str(uuid.uuid4())
- )
- timestamp: Optional[datetime] = Field(
- default_factory=datetime.utcnow
- )
- level: Optional[str] = None
- message: Optional[str] = None
- swarm_type: Optional[SwarmType] = None
- task: Optional[str] = ""
- metadata: Optional[Dict[str, Any]] = Field(default_factory=dict)
- documents: List[Document] = []
-
-
class SwarmRouterConfig(BaseModel):
"""Configuration model for SwarmRouter."""
@@ -172,12 +156,11 @@ class SwarmRouter:
concurrent_batch_run(tasks: List[str], *args, **kwargs) -> List[Any]:
Executes multiple tasks concurrently
- get_logs() -> List[SwarmLog]:
- Retrieves execution logs
"""
def __init__(
self,
+ id: str = generate_api_key(prefix="swarm-router"),
name: str = "swarm-router",
description: str = "Routes your task to the desired swarm",
max_loops: int = 1,
@@ -191,15 +174,19 @@ class SwarmRouter:
rules: str = None,
documents: List[str] = [], # A list of docs file paths
output_type: OutputType = "dict-all-except-first",
- no_cluster_ops: bool = False,
speaker_fn: callable = None,
load_agents_from_csv: bool = False,
csv_file_path: str = None,
return_entire_history: bool = True,
multi_agent_collab_prompt: bool = True,
+ list_all_agents: bool = False,
+ conversation: Any = None,
+ agents_config: Optional[Dict[Any, Any]] = None,
+ speaker_function: str = None,
*args,
**kwargs,
):
+ self.id = id
self.name = name
self.description = description
self.max_loops = max_loops
@@ -213,13 +200,16 @@ class SwarmRouter:
self.rules = rules
self.documents = documents
self.output_type = output_type
- self.no_cluster_ops = no_cluster_ops
self.speaker_fn = speaker_fn
self.logs = []
self.load_agents_from_csv = load_agents_from_csv
self.csv_file_path = csv_file_path
self.return_entire_history = return_entire_history
self.multi_agent_collab_prompt = multi_agent_collab_prompt
+ self.list_all_agents = list_all_agents
+ self.conversation = conversation
+ self.agents_config = agents_config
+ self.speaker_function = speaker_function
# Reliability check
self.reliability_check()
@@ -230,6 +220,8 @@ class SwarmRouter:
csv_path=self.csv_file_path
).load_agents()
+ self.agent_config = self.agent_config()
+
def setup(self):
if self.auto_generate_prompts is True:
self.activate_ape()
@@ -276,15 +268,12 @@ class SwarmRouter:
logger.info(
f"Successfully activated APE for {activated_count} agents"
)
- self._log(
- "info",
- f"Activated automatic prompt engineering for {activated_count} agents",
- )
except Exception as e:
error_msg = f"Error activating automatic prompt engineering: {str(e)}"
- logger.error(error_msg)
- self._log("error", error_msg)
+ logger.error(
+ f"Error activating automatic prompt engineering in SwarmRouter: {str(e)}"
+ )
raise RuntimeError(error_msg) from e
def reliability_check(self):
@@ -293,48 +282,24 @@ class SwarmRouter:
Validates essential swarm parameters and configuration before execution.
Handles special case for CouncilAsAJudge which may not require agents.
"""
- logger.info(
- "🔍 [SYSTEM] Initializing advanced swarm reliability diagnostics..."
- )
- logger.info(
- "⚡ [SYSTEM] Running pre-flight checks and system validation..."
- )
# Check swarm type first since it affects other validations
if self.swarm_type is None:
- logger.error(
- "❌ [CRITICAL] Swarm type validation failed - type cannot be 'none'"
+ raise ValueError(
+ "SwarmRouter: Swarm type cannot be 'none'."
)
- raise ValueError("Swarm type cannot be 'none'.")
- # Special handling for CouncilAsAJudge
- if self.swarm_type == "CouncilAsAJudge":
- if self.agents is not None:
- logger.warning(
- "⚠️ [ADVISORY] CouncilAsAJudge detected with agents - this is atypical"
- )
- elif not self.agents:
- logger.error(
- "❌ [CRITICAL] Agent validation failed - no agents detected in swarm"
+ if self.agents is None:
+ raise ValueError(
+ "SwarmRouter: No agents provided for the swarm."
)
- raise ValueError("No agents provided for the swarm.")
# Validate max_loops
if self.max_loops == 0:
- logger.error(
- "❌ [CRITICAL] Loop validation failed - max_loops cannot be 0"
- )
- raise ValueError("max_loops cannot be 0.")
+ raise ValueError("SwarmRouter: max_loops cannot be 0.")
- # Setup other functionality
- logger.info("🔄 [SYSTEM] Initializing swarm subsystems...")
self.setup()
- logger.info(
- "✅ [SYSTEM] All reliability checks passed successfully"
- )
- logger.info("🚀 [SYSTEM] Swarm is ready for deployment")
-
def _create_swarm(self, task: str = None, *args, **kwargs):
"""
Dynamically create and return the specified swarm type or automatically match the best swarm type for a given task.
@@ -395,6 +360,7 @@ class SwarmRouter:
agents=self.agents,
max_loops=self.max_loops,
output_type=self.output_type,
+ speaker_function=self.speaker_function,
)
elif self.swarm_type == "DeepResearchSwarm":
@@ -500,46 +466,24 @@ class SwarmRouter:
def update_system_prompt_for_agent_in_swarm(self):
# Use list comprehension for faster iteration
- [
- setattr(
- agent,
- "system_prompt",
- agent.system_prompt + MULTI_AGENT_COLLAB_PROMPT_TWO,
- )
- for agent in self.agents
- ]
+ for agent in self.agents:
+ if agent.system_prompt is None:
+ agent.system_prompt = ""
+ agent.system_prompt += MULTI_AGENT_COLLAB_PROMPT_TWO
- def _log(
- self,
- level: str,
- message: str,
- task: str = "",
- metadata: Dict[str, Any] = None,
- ):
- """
- Create a log entry and add it to the logs list.
+ def agent_config(self):
+ agent_config = {}
+ for agent in self.agents:
+ agent_config[agent.agent_name] = agent.to_dict()
- Args:
- level (str): The log level (e.g., "info", "error").
- message (str): The log message.
- task (str, optional): The task being performed. Defaults to "".
- metadata (Dict[str, Any], optional): Additional metadata. Defaults to None.
- """
- log_entry = SwarmLog(
- level=level,
- message=message,
- swarm_type=self.swarm_type,
- task=task,
- metadata=metadata or {},
- )
- self.logs.append(log_entry)
- logger.log(level.upper(), message)
+ return agent_config
def _run(
self,
task: str,
img: Optional[str] = None,
model_response: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
*args,
**kwargs,
) -> Any:
@@ -559,17 +503,39 @@ class SwarmRouter:
"""
self.swarm = self._create_swarm(task, *args, **kwargs)
+ if self.swarm_type == "SequentialWorkflow":
+ self.conversation = (
+ self.swarm.agent_rearrange.conversation
+ )
+ else:
+ self.conversation = self.swarm.conversation
+
+ if self.list_all_agents is True:
+ list_all_agents(
+ agents=self.agents,
+ conversation=self.swarm.conversation,
+ name=self.name,
+ description=self.description,
+ add_collaboration_prompt=True,
+ add_to_conversation=True,
+ )
+
if self.multi_agent_collab_prompt is True:
self.update_system_prompt_for_agent_in_swarm()
- try:
- logger.info(
- f"Running task on {self.swarm_type} swarm with task: {task}"
- )
+ log_execution(
+ swarm_id=self.id,
+ status="start",
+ swarm_config=self.to_dict(),
+ swarm_architecture="swarm_router",
+ )
+ try:
if self.swarm_type == "CouncilAsAJudge":
result = self.swarm.run(
task=task,
+ img=img,
+ imgs=imgs,
model_response=model_response,
*args,
**kwargs,
@@ -577,21 +543,24 @@ class SwarmRouter:
else:
result = self.swarm.run(task=task, *args, **kwargs)
- logger.info("Swarm completed successfully")
+ log_execution(
+ swarm_id=self.id,
+ status="completion",
+ swarm_config=self.to_dict(),
+ swarm_architecture="swarm_router",
+ )
+
return result
except Exception as e:
- self._log(
- "error",
- f"Error occurred while running task on {self.swarm_type} swarm: {str(e)}",
- task=task,
- metadata={"error": str(e)},
+ raise RuntimeError(
+ f"SwarmRouter: Error executing task on swarm: {str(e)} Traceback: {traceback.format_exc()}"
)
- raise
def run(
self,
task: str,
img: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
model_response: Optional[str] = None,
*args,
**kwargs,
@@ -617,15 +586,24 @@ class SwarmRouter:
return self._run(
task=task,
img=img,
+ imgs=imgs,
model_response=model_response,
*args,
**kwargs,
)
except Exception as e:
- logger.error(f"Error executing task on swarm: {str(e)}")
- raise
+ raise RuntimeError(
+ f"SwarmRouter: Error executing task on swarm: {str(e)} Traceback: {traceback.format_exc()}"
+ )
- def __call__(self, task: str, *args, **kwargs) -> Any:
+ def __call__(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
+ *args,
+ **kwargs,
+ ) -> Any:
"""
Make the SwarmRouter instance callable.
@@ -637,10 +615,17 @@ class SwarmRouter:
Returns:
Any: The result of the swarm's execution.
"""
- return self.run(task=task, *args, **kwargs)
+ return self.run(
+ task=task, img=img, imgs=imgs, *args, **kwargs
+ )
def batch_run(
- self, tasks: List[str], *args, **kwargs
+ self,
+ tasks: List[str],
+ img: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
+ *args,
+ **kwargs,
) -> List[Any]:
"""
Execute a batch of tasks on the selected or matched swarm type.
@@ -659,21 +644,26 @@ class SwarmRouter:
results = []
for task in tasks:
try:
- result = self.run(task, *args, **kwargs)
+ result = self.run(
+ task, img=img, imgs=imgs, *args, **kwargs
+ )
results.append(result)
except Exception as e:
- self._log(
- "error",
- f"Error occurred while running batch task on {self.swarm_type} swarm: {str(e)}",
- task=task,
- metadata={"error": str(e)},
+ raise RuntimeError(
+ f"SwarmRouter: Error executing batch task on swarm: {str(e)} Traceback: {traceback.format_exc()}"
)
- raise
return results
- def async_run(self, task: str, *args, **kwargs) -> Any:
+ def concurrent_run(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ imgs: Optional[List[str]] = None,
+ *args,
+ **kwargs,
+ ) -> Any:
"""
- Execute a task on the selected or matched swarm type asynchronously.
+ Execute a task on the selected or matched swarm type concurrently.
Args:
task (str): The task to be executed by the swarm.
@@ -686,95 +676,70 @@ class SwarmRouter:
Raises:
Exception: If an error occurs during task execution.
"""
- import asyncio
- async def run_async():
- try:
- result = await asyncio.to_thread(
- self.run, task, *args, **kwargs
- )
- return result
- except Exception as e:
- self._log(
- "error",
- f"Error occurred while running task asynchronously on {self.swarm_type} swarm: {str(e)}",
- task=task,
- metadata={"error": str(e)},
- )
- raise
-
- return asyncio.run(run_async())
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=os.cpu_count()
+ ) as executor:
+ future = executor.submit(
+ self.run, task, img=img, imgs=imgs, *args, **kwargs
+ )
+ result = future.result()
+ return result
- def get_logs(self) -> List[SwarmLog]:
+ def _serialize_callable(
+ self, attr_value: Callable
+ ) -> Dict[str, Any]:
"""
- Retrieve all logged entries.
+ Serializes callable attributes by extracting their name and docstring.
+
+ Args:
+ attr_value (Callable): The callable to serialize.
Returns:
- List[SwarmLog]: A list of all log entries.
+ Dict[str, Any]: Dictionary with name and docstring of the callable.
"""
- return self.logs
-
- def concurrent_run(self, task: str, *args, **kwargs) -> Any:
+ return {
+ "name": getattr(
+ attr_value, "__name__", type(attr_value).__name__
+ ),
+ "doc": getattr(attr_value, "__doc__", None),
+ }
+
+ def _serialize_attr(self, attr_name: str, attr_value: Any) -> Any:
"""
- Execute a task on the selected or matched swarm type concurrently.
+ Serializes an individual attribute, handling non-serializable objects.
Args:
- task (str): The task to be executed by the swarm.
- *args: Variable length argument list.
- **kwargs: Arbitrary keyword arguments.
+ attr_name (str): The name of the attribute.
+ attr_value (Any): The value of the attribute.
Returns:
- Any: The result of the swarm's execution.
-
- Raises:
- Exception: If an error occurs during task execution.
+ Any: The serialized value of the attribute.
"""
- from concurrent.futures import ThreadPoolExecutor
-
- with ThreadPoolExecutor(
- max_workers=os.cpu_count()
- ) as executor:
- future = executor.submit(self.run, task, *args, **kwargs)
- result = future.result()
- return result
-
- def concurrent_batch_run(
- self, tasks: List[str], *args, **kwargs
- ) -> List[Any]:
+ try:
+ if callable(attr_value):
+ return self._serialize_callable(attr_value)
+ elif hasattr(attr_value, "to_dict"):
+ return (
+ attr_value.to_dict()
+ ) # Recursive serialization for nested objects
+ else:
+ json.dumps(
+ attr_value
+ ) # Attempt to serialize to catch non-serializable objects
+ return attr_value
+ except (TypeError, ValueError):
+ return f""
+
+ def to_dict(self) -> Dict[str, Any]:
"""
- Execute a batch of tasks on the selected or matched swarm type concurrently.
-
- Args:
- tasks (List[str]): A list of tasks to be executed by the swarm.
- *args: Variable length argument list.
- **kwargs: Arbitrary keyword arguments.
+ Converts all attributes of the class, including callables, into a dictionary.
+ Handles non-serializable attributes by converting them or skipping them.
Returns:
- List[Any]: A list of results from the swarm's execution.
-
- Raises:
- Exception: If an error occurs during task execution.
+ Dict[str, Any]: A dictionary representation of the class attributes.
"""
- from concurrent.futures import (
- ThreadPoolExecutor,
- as_completed,
- )
-
- results = []
- with ThreadPoolExecutor() as executor:
- # Submit all tasks to executor
- futures = [
- executor.submit(self.run, task, *args, **kwargs)
- for task in tasks
- ]
-
- # Process results as they complete rather than waiting for all
- for future in as_completed(futures):
- try:
- result = future.result()
- results.append(result)
- except Exception as e:
- logger.error(f"Task execution failed: {str(e)}")
- results.append(None)
-
- return results
+ return {
+ attr_name: self._serialize_attr(attr_name, attr_value)
+ for attr_name, attr_value in self.__dict__.items()
+ }
diff --git a/swarms/telemetry/__init__.py b/swarms/telemetry/__init__.py
index 9792f266..a7f92a78 100644
--- a/swarms/telemetry/__init__.py
+++ b/swarms/telemetry/__init__.py
@@ -1,31 +1,13 @@
from swarms.telemetry.main import (
- generate_unique_identifier,
generate_user_id,
- get_cpu_info,
get_machine_id,
- get_os_version,
- get_package_mismatches,
- get_pip_version,
- get_python_version,
- get_ram_info,
- get_swarms_verison,
- get_system_info,
- get_user_device_data,
- system_info,
+ get_comprehensive_system_info,
+ log_agent_data,
)
__all__ = [
"generate_user_id",
"get_machine_id",
- "get_system_info",
- "generate_unique_identifier",
- "get_python_version",
- "get_pip_version",
- "get_swarms_verison",
- "get_os_version",
- "get_cpu_info",
- "get_ram_info",
- "get_package_mismatches",
- "system_info",
- "get_user_device_data",
+ "get_comprehensive_system_info",
+ "log_agent_data",
]
diff --git a/swarms/telemetry/log_executions.py b/swarms/telemetry/log_executions.py
new file mode 100644
index 00000000..8fd13837
--- /dev/null
+++ b/swarms/telemetry/log_executions.py
@@ -0,0 +1,43 @@
+from typing import Optional
+from swarms.telemetry.main import log_agent_data
+
+
+def log_execution(
+ swarm_id: Optional[str] = None,
+ status: Optional[str] = None,
+ swarm_config: Optional[dict] = None,
+ swarm_architecture: Optional[str] = None,
+):
+ """
+ Log execution data for a swarm router instance.
+
+ This function logs telemetry data about swarm router executions, including
+ the swarm ID, execution status, and configuration details. It silently
+ handles any logging errors to prevent execution interruption.
+
+ Args:
+ swarm_id (str): Unique identifier for the swarm router instance
+ status (str): Current status of the execution (e.g., "start", "completion", "error")
+ swarm_config (dict): Configuration dictionary containing swarm router settings
+ swarm_architecture (str): Name of the swarm architecture used
+ Returns:
+ None
+
+ Example:
+ >>> log_execution(
+ ... swarm_id="swarm-router-abc123",
+ ... status="start",
+ ... swarm_config={"name": "my-swarm", "swarm_type": "SequentialWorkflow"}
+ ... )
+ """
+ try:
+ log_agent_data(
+ data_dict={
+ "swarm_router_id": swarm_id,
+ "status": status,
+ "swarm_router_config": swarm_config,
+ "swarm_architecture": swarm_architecture,
+ }
+ )
+ except Exception:
+ pass
diff --git a/swarms/telemetry/main.py b/swarms/telemetry/main.py
index 5c81a90b..597680c1 100644
--- a/swarms/telemetry/main.py
+++ b/swarms/telemetry/main.py
@@ -1,24 +1,13 @@
-import asyncio
-
-
import datetime
import hashlib
import platform
import socket
-import subprocess
import uuid
-from concurrent.futures import ThreadPoolExecutor
-from functools import lru_cache
-from threading import Lock
-from typing import Dict
+from typing import Any, Dict
-import aiohttp
-import pkg_resources
import psutil
-import toml
-from requests import Session
-from requests.adapters import HTTPAdapter
-from urllib3.util.retry import Retry
+import requests
+from functools import lru_cache
# Helper functions
@@ -42,355 +31,104 @@ def get_machine_id():
return hashed_id
-def get_system_info():
- """
- Gathers basic system information.
-
- Returns:
- dict: A dictionary containing system-related information.
- """
- info = {
+@lru_cache(maxsize=1)
+def get_comprehensive_system_info() -> Dict[str, Any]:
+ # Basic platform and hardware information
+ system_data = {
"platform": platform.system(),
"platform_release": platform.release(),
"platform_version": platform.version(),
+ "platform_full": platform.platform(),
"architecture": platform.machine(),
+ "architecture_details": platform.architecture()[0],
+ "processor": platform.processor(),
"hostname": socket.gethostname(),
- "ip_address": socket.gethostbyname(socket.gethostname()),
- "mac_address": ":".join(
+ }
+
+ # MAC address
+ try:
+ system_data["mac_address"] = ":".join(
[
f"{(uuid.getnode() >> elements) & 0xFF:02x}"
for elements in range(0, 2 * 6, 8)
][::-1]
- ),
- "processor": platform.processor(),
- "python_version": platform.python_version(),
- "Misc": system_info(),
- }
- return info
-
-
-def generate_unique_identifier():
- """Generate unique identifier
-
- Returns:
- str: unique id
-
- """
- system_info = get_system_info()
- unique_id = uuid.uuid5(uuid.NAMESPACE_DNS, str(system_info))
- return str(unique_id)
-
-
-def get_local_ip():
- """Get local ip
-
- Returns:
- str: local ip
-
- """
- return socket.gethostbyname(socket.gethostname())
-
-
-def get_user_device_data():
- data = {
- "ID": generate_user_id(),
- "Machine ID": get_machine_id(),
- "System Info": get_system_info(),
- "UniqueID": generate_unique_identifier(),
- }
- return data
-
-
-def get_python_version():
- return platform.python_version()
-
-
-def get_pip_version() -> str:
- """Get pip version
-
- Returns:
- str: The version of pip installed
- """
- try:
- pip_version = (
- subprocess.check_output(["pip", "--version"])
- .decode()
- .split()[1]
- )
- except Exception as e:
- pip_version = str(e)
- return pip_version
-
-
-def get_swarms_verison() -> tuple[str, str]:
- """Get swarms version from both command line and package
-
- Returns:
- tuple[str, str]: A tuple containing (command line version, package version)
- """
- try:
- swarms_verison_cmd = (
- subprocess.check_output(["swarms", "--version"])
- .decode()
- .split()[1]
)
except Exception as e:
- swarms_verison_cmd = str(e)
- swarms_verison_pkg = pkg_resources.get_distribution(
- "swarms"
- ).version
- swarms_verison = swarms_verison_cmd, swarms_verison_pkg
- return swarms_verison
+ system_data["mac_address"] = f"Error: {str(e)}"
+ # CPU information
+ system_data["cpu_count_logical"] = psutil.cpu_count(logical=True)
+ system_data["cpu_count_physical"] = psutil.cpu_count(
+ logical=False
+ )
-def get_os_version() -> str:
- """Get operating system version
-
- Returns:
- str: The operating system version and platform details
- """
- return platform.platform()
-
-
-def get_cpu_info() -> str:
- """Get CPU information
-
- Returns:
- str: The processor information
- """
- return platform.processor()
-
-
-def get_ram_info() -> str:
- """Get RAM information
-
- Returns:
- str: A formatted string containing total, used and free RAM in GB
- """
+ # Memory information
vm = psutil.virtual_memory()
+ total_ram_gb = vm.total / (1024**3)
used_ram_gb = vm.used / (1024**3)
free_ram_gb = vm.free / (1024**3)
- total_ram_gb = vm.total / (1024**3)
- return (
- f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:"
- f" {free_ram_gb:.2f}"
+ available_ram_gb = vm.available / (1024**3)
+
+ system_data.update(
+ {
+ "memory_total_gb": f"{total_ram_gb:.2f}",
+ "memory_used_gb": f"{used_ram_gb:.2f}",
+ "memory_free_gb": f"{free_ram_gb:.2f}",
+ "memory_available_gb": f"{available_ram_gb:.2f}",
+ "memory_summary": f"Total: {total_ram_gb:.2f} GB, Used: {used_ram_gb:.2f} GB, Free: {free_ram_gb:.2f} GB, Available: {available_ram_gb:.2f} GB",
+ }
)
+ # Python version
+ system_data["python_version"] = platform.python_version()
-def get_package_mismatches(file_path: str = "pyproject.toml") -> str:
- """Get package version mismatches between pyproject.toml and installed packages
-
- Args:
- file_path (str, optional): Path to pyproject.toml file. Defaults to "pyproject.toml".
-
- Returns:
- str: A formatted string containing package version mismatches
- """
- with open(file_path) as file:
- pyproject = toml.load(file)
- dependencies = pyproject["tool"]["poetry"]["dependencies"]
- dev_dependencies = pyproject["tool"]["poetry"]["group"]["dev"][
- "dependencies"
- ]
- dependencies.update(dev_dependencies)
-
- installed_packages = {
- pkg.key: pkg.version for pkg in pkg_resources.working_set
- }
-
- mismatches = []
- for package, version_info in dependencies.items():
- if isinstance(version_info, dict):
- version_info = version_info["version"]
- installed_version = installed_packages.get(package)
- if installed_version and version_info.startswith("^"):
- expected_version = version_info[1:]
- if not installed_version.startswith(expected_version):
- mismatches.append(
- f"\t {package}: Mismatch,"
- f" pyproject.toml={expected_version},"
- f" pip={installed_version}"
- )
- else:
- mismatches.append(f"\t {package}: Not found in pip list")
-
- return "\n" + "\n".join(mismatches)
-
-
-def system_info() -> dict[str, str]:
- """Get system information including Python, pip, OS, CPU and RAM details
-
- Returns:
- dict[str, str]: A dictionary containing system information
- """
- return {
- "Python Version": get_python_version(),
- "Pip Version": get_pip_version(),
- # "Swarms Version": swarms_verison,
- "OS Version and Architecture": get_os_version(),
- "CPU Info": get_cpu_info(),
- "RAM Info": get_ram_info(),
- }
-
-
-def capture_system_data() -> Dict[str, str]:
- """
- Captures extensive system data including platform information, user ID, IP address, CPU count,
- memory information, and other system details.
-
- Returns:
- Dict[str, str]: A dictionary containing system data.
- """
+ # Generate unique identifier based on system info
try:
- system_data = {
- "platform": platform.system(),
- "platform_version": platform.version(),
- "platform_release": platform.release(),
- "hostname": socket.gethostname(),
- "ip_address": socket.gethostbyname(socket.gethostname()),
- "cpu_count": psutil.cpu_count(logical=True),
- "memory_total": f"{psutil.virtual_memory().total / (1024 ** 3):.2f} GB",
- "memory_available": f"{psutil.virtual_memory().available / (1024 ** 3):.2f} GB",
- "user_id": str(uuid.uuid4()), # Unique user identifier
- "machine_type": platform.machine(),
- "processor": platform.processor(),
- "architecture": platform.architecture()[0],
- }
-
- return system_data
+ unique_id = uuid.uuid5(uuid.NAMESPACE_DNS, str(system_data))
+ system_data["unique_identifier"] = str(unique_id)
except Exception as e:
- # logger.error("Failed to capture system data: {}", e)
- print(f"Failed to capture system data: {e}")
-
-
-# Global variables
-_session = None
-_session_lock = Lock()
-_executor = ThreadPoolExecutor(max_workers=10)
-_aiohttp_session = None
-
-
-def get_session() -> Session:
- """Thread-safe session getter with optimized connection pooling"""
- global _session
- if _session is None:
- with _session_lock:
- if _session is None: # Double-check pattern
- _session = Session()
- adapter = HTTPAdapter(
- pool_connections=1000, # Increased pool size
- pool_maxsize=1000, # Increased max size
- max_retries=Retry(
- total=3,
- backoff_factor=0.1,
- status_forcelist=[500, 502, 503, 504],
- ),
- pool_block=False, # Non-blocking pool
- )
- _session.mount("http://", adapter)
- _session.mount("https://", adapter)
- _session.headers.update(
- {
- "Content-Type": "application/json",
- "Authorization": "Bearer sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24",
- "Connection": "keep-alive", # Enable keep-alive
- }
- )
- return _session
-
-
-@lru_cache(maxsize=2048, typed=True)
-def get_user_device_data_cached():
- """Cached version with increased cache size"""
- return get_user_device_data()
+ system_data["unique_identifier"] = f"Error: {str(e)}"
+ return system_data
-async def get_aiohttp_session():
- """Get or create aiohttp session for async requests"""
- global _aiohttp_session
- if _aiohttp_session is None or _aiohttp_session.closed:
- timeout = aiohttp.ClientTimeout(total=10)
- connector = aiohttp.TCPConnector(
- limit=1000, # Connection limit
- ttl_dns_cache=300, # DNS cache TTL
- use_dns_cache=True, # Enable DNS caching
- keepalive_timeout=60, # Keep-alive timeout
- )
- _aiohttp_session = aiohttp.ClientSession(
- timeout=timeout,
- connector=connector,
- headers={
- "Content-Type": "application/json",
- "Authorization": "Bearer sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24",
- },
- )
- return _aiohttp_session
-
-async def log_agent_data_async(data_dict: dict):
- """Asynchronous version of log_agent_data"""
- if not data_dict:
- return None
+def _log_agent_data(data_dict: dict):
+ """Simple function to log agent data using requests library"""
url = "https://swarms.world/api/get-agents/log-agents"
- payload = {
+
+ log = {
"data": data_dict,
- "system_data": get_user_device_data_cached(),
+ "system_data": get_comprehensive_system_info(),
"timestamp": datetime.datetime.now(
datetime.timezone.utc
).isoformat(),
}
- session = await get_aiohttp_session()
- try:
- async with session.post(url, json=payload) as response:
- if response.status == 200:
- return await response.json()
- except Exception:
- return None
-
-
-def _log_agent_data(data_dict: dict):
- """
- Enhanced log_agent_data with both sync and async capabilities
- """
- if not data_dict:
- return None
+ payload = {
+ "data": log,
+ }
- # If running in an event loop, use async version
- try:
- loop = asyncio.get_event_loop()
- if loop.is_running():
- return asyncio.create_task(
- log_agent_data_async(data_dict)
- )
- except RuntimeError:
- pass
+ key = "Bearer sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24"
- # Fallback to optimized sync version
- url = "https://swarms.world/api/get-agents/log-agents"
- payload = {
- "data": data_dict,
- "system_data": get_user_device_data_cached(),
- "timestamp": datetime.datetime.now(
- datetime.timezone.utc
- ).isoformat(),
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": key,
}
+ response = requests.post(
+ url, json=payload, headers=headers, timeout=10
+ )
+
try:
- session = get_session()
- response = session.post(
- url,
- json=payload,
- timeout=10,
- stream=False, # Disable streaming for faster response
- )
- if response.ok and response.text.strip():
- return response.json()
+ if response.status_code == 200:
+ return
except Exception:
- return None
+ pass
def log_agent_data(data_dict: dict):
- """Log agent data"""
- pass
+ try:
+ _log_agent_data(data_dict)
+ except Exception:
+ pass
diff --git a/swarms/tools/__init__.py b/swarms/tools/__init__.py
index e6b8032f..a437ccc3 100644
--- a/swarms/tools/__init__.py
+++ b/swarms/tools/__init__.py
@@ -33,6 +33,11 @@ from swarms.tools.mcp_client_call import (
get_tools_for_multiple_mcp_servers,
get_mcp_tools_sync,
aget_mcp_tools,
+ execute_multiple_tools_on_multiple_mcp_servers,
+ execute_multiple_tools_on_multiple_mcp_servers_sync,
+ _create_server_tool_mapping,
+ _create_server_tool_mapping_async,
+ _execute_tool_on_server,
)
@@ -62,4 +67,9 @@ __all__ = [
"get_tools_for_multiple_mcp_servers",
"get_mcp_tools_sync",
"aget_mcp_tools",
+ "execute_multiple_tools_on_multiple_mcp_servers",
+ "execute_multiple_tools_on_multiple_mcp_servers_sync",
+ "_create_server_tool_mapping",
+ "_create_server_tool_mapping_async",
+ "_execute_tool_on_server",
]
diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py
index 04add0c7..af08f11e 100644
--- a/swarms/tools/base_tool.py
+++ b/swarms/tools/base_tool.py
@@ -2223,8 +2223,13 @@ class BaseTool(BaseModel):
>>> tool_calls = [ChatCompletionMessageToolCall(...), ...]
>>> results = tool.execute_function_calls_from_api_response(tool_calls)
"""
+ # Handle None API response gracefully by returning empty results
if api_response is None:
- raise ToolValidationError("API response cannot be None")
+ self._log_if_verbose(
+ "warning",
+ "API response is None, returning empty results. This may indicate the LLM did not return a valid response.",
+ )
+ return [] if not return_as_string else []
# Handle direct list of tool call objects (e.g., from OpenAI ChatCompletionMessageToolCall or Anthropic BaseModels)
if isinstance(api_response, list):
@@ -2256,14 +2261,18 @@ class BaseTool(BaseModel):
try:
api_response = json.loads(api_response)
except json.JSONDecodeError as e:
- raise ToolValidationError(
- f"Invalid JSON in API response: {e}"
- ) from e
+ self._log_if_verbose(
+ "error",
+ f"Failed to parse JSON from API response: {e}. Response: '{api_response[:100]}...'",
+ )
+ return []
if not isinstance(api_response, dict):
- raise ToolValidationError(
- "API response must be a dictionary, JSON string, BaseModel, or list of tool calls"
+ self._log_if_verbose(
+ "warning",
+ f"API response is not a dictionary (type: {type(api_response)}), returning empty list",
)
+ return []
# Extract function calls from dictionary response
function_calls = (
diff --git a/swarms/tools/mcp_client_call.py b/swarms/tools/mcp_client_call.py
index 25302c78..3fa3a9fa 100644
--- a/swarms/tools/mcp_client_call.py
+++ b/swarms/tools/mcp_client_call.py
@@ -494,6 +494,9 @@ async def execute_tool_call_simple(
*args,
**kwargs,
) -> List[Dict[str, Any]]:
+ if isinstance(response, str):
+ response = json.loads(response)
+
return await _execute_tool_call_simple(
response=response,
server_path=server_path,
@@ -502,3 +505,511 @@ async def execute_tool_call_simple(
*args,
**kwargs,
)
+
+
+def _create_server_tool_mapping(
+ urls: List[str],
+ connections: List[MCPConnection] = None,
+ format: str = "openai",
+) -> Dict[str, Dict[str, Any]]:
+ """
+ Create a mapping of function names to server information for all MCP servers.
+
+ Args:
+ urls: List of server URLs
+ connections: Optional list of MCPConnection objects
+ format: Format to fetch tools in
+
+ Returns:
+ Dict mapping function names to server info (url, connection, tool)
+ """
+ server_tool_mapping = {}
+
+ for i, url in enumerate(urls):
+ connection = (
+ connections[i]
+ if connections and i < len(connections)
+ else None
+ )
+
+ try:
+ # Get tools for this server
+ tools = get_mcp_tools_sync(
+ server_path=url,
+ connection=connection,
+ format=format,
+ )
+
+ # Create mapping for each tool
+ for tool in tools:
+ if isinstance(tool, dict) and "function" in tool:
+ function_name = tool["function"]["name"]
+ server_tool_mapping[function_name] = {
+ "url": url,
+ "connection": connection,
+ "tool": tool,
+ "server_index": i,
+ }
+ elif hasattr(tool, "name"):
+ # Handle MCPTool objects
+ server_tool_mapping[tool.name] = {
+ "url": url,
+ "connection": connection,
+ "tool": tool,
+ "server_index": i,
+ }
+
+ except Exception as e:
+ logger.warning(
+ f"Failed to fetch tools from server {url}: {str(e)}"
+ )
+ continue
+
+ return server_tool_mapping
+
+
+async def _create_server_tool_mapping_async(
+ urls: List[str],
+ connections: List[MCPConnection] = None,
+ format: str = "openai",
+) -> Dict[str, Dict[str, Any]]:
+ """
+ Async version: Create a mapping of function names to server information for all MCP servers.
+
+ Args:
+ urls: List of server URLs
+ connections: Optional list of MCPConnection objects
+ format: Format to fetch tools in
+
+ Returns:
+ Dict mapping function names to server info (url, connection, tool)
+ """
+ server_tool_mapping = {}
+
+ for i, url in enumerate(urls):
+ connection = (
+ connections[i]
+ if connections and i < len(connections)
+ else None
+ )
+
+ try:
+ # Get tools for this server using async function
+ tools = await aget_mcp_tools(
+ server_path=url,
+ connection=connection,
+ format=format,
+ )
+
+ # Create mapping for each tool
+ for tool in tools:
+ if isinstance(tool, dict) and "function" in tool:
+ function_name = tool["function"]["name"]
+ server_tool_mapping[function_name] = {
+ "url": url,
+ "connection": connection,
+ "tool": tool,
+ "server_index": i,
+ }
+ elif hasattr(tool, "name"):
+ # Handle MCPTool objects
+ server_tool_mapping[tool.name] = {
+ "url": url,
+ "connection": connection,
+ "tool": tool,
+ "server_index": i,
+ }
+
+ except Exception as e:
+ logger.warning(
+ f"Failed to fetch tools from server {url}: {str(e)}"
+ )
+ continue
+
+ return server_tool_mapping
+
+
+async def _execute_tool_on_server(
+ tool_call: Dict[str, Any],
+ server_info: Dict[str, Any],
+ output_type: Literal["json", "dict", "str", "formatted"] = "str",
+) -> Dict[str, Any]:
+ """
+ Execute a single tool call on a specific server.
+
+ Args:
+ tool_call: The tool call to execute
+ server_info: Server information from the mapping
+ output_type: Output format type
+
+ Returns:
+ Execution result with server metadata
+ """
+ try:
+ result = await _execute_tool_call_simple(
+ response=tool_call,
+ server_path=server_info["url"],
+ connection=server_info["connection"],
+ output_type=output_type,
+ )
+
+ return {
+ "server_url": server_info["url"],
+ "server_index": server_info["server_index"],
+ "function_name": tool_call.get("function", {}).get(
+ "name", "unknown"
+ ),
+ "result": result,
+ "status": "success",
+ }
+
+ except Exception as e:
+ logger.error(
+ f"Failed to execute tool on server {server_info['url']}: {str(e)}"
+ )
+ return {
+ "server_url": server_info["url"],
+ "server_index": server_info["server_index"],
+ "function_name": tool_call.get("function", {}).get(
+ "name", "unknown"
+ ),
+ "result": None,
+ "error": str(e),
+ "status": "error",
+ }
+
+
+async def execute_multiple_tools_on_multiple_mcp_servers(
+ responses: List[Dict[str, Any]],
+ urls: List[str],
+ connections: List[MCPConnection] = None,
+ output_type: Literal["json", "dict", "str", "formatted"] = "str",
+ max_concurrent: Optional[int] = None,
+ *args,
+ **kwargs,
+) -> List[Dict[str, Any]]:
+ """
+ Execute multiple tool calls across multiple MCP servers.
+
+ This function creates a mapping of function names to servers, then for each response
+ that contains tool calls, it finds the appropriate server for each function and
+ executes the calls concurrently.
+
+ Args:
+ responses: List of responses containing tool calls (OpenAI format)
+ urls: List of MCP server URLs
+ connections: Optional list of MCPConnection objects corresponding to each URL
+ output_type: Output format type for results
+ max_concurrent: Maximum number of concurrent executions (default: len(responses))
+
+ Returns:
+ List of execution results with server metadata
+
+ Example:
+ # Example responses format:
+ responses = [
+ {
+ "function": {
+ "name": "search_web",
+ "arguments": {"query": "python programming"}
+ }
+ },
+ {
+ "function": {
+ "name": "search_database",
+ "arguments": {"table": "users", "id": 123}
+ }
+ }
+ ]
+
+ urls = ["http://server1:8000", "http://server2:8000"]
+
+ results = await execute_multiple_tools_on_multiple_mcp_servers(
+ responses=responses,
+ urls=urls
+ )
+ """
+ if not responses:
+ logger.warning("No responses provided for execution")
+ return []
+
+ if not urls:
+ raise MCPValidationError("No server URLs provided")
+
+ # Create mapping of function names to servers using async version
+ logger.info(f"Creating tool mapping for {len(urls)} servers")
+ server_tool_mapping = await _create_server_tool_mapping_async(
+ urls=urls, connections=connections, format="openai"
+ )
+
+ if not server_tool_mapping:
+ raise MCPExecutionError(
+ "No tools found on any of the provided servers"
+ )
+
+ logger.info(
+ f"Found {len(server_tool_mapping)} unique functions across all servers"
+ )
+
+ # Extract all tool calls from responses
+ all_tool_calls = []
+ logger.info(
+ f"Processing {len(responses)} responses for tool call extraction"
+ )
+
+ # Check if responses are individual characters that need to be reconstructed
+ if len(responses) > 10 and all(
+ isinstance(r, str) and len(r) == 1 for r in responses
+ ):
+ logger.info(
+ "Detected character-by-character response, reconstructing JSON string"
+ )
+ try:
+ reconstructed_response = "".join(responses)
+ logger.info(
+ f"Reconstructed response length: {len(reconstructed_response)}"
+ )
+ logger.debug(
+ f"Reconstructed response: {reconstructed_response}"
+ )
+
+ # Try to parse the reconstructed response to validate it
+ try:
+ json.loads(reconstructed_response)
+ logger.info(
+ "Successfully validated reconstructed JSON response"
+ )
+ except json.JSONDecodeError as e:
+ logger.warning(
+ f"Reconstructed response is not valid JSON: {str(e)}"
+ )
+ logger.debug(
+ f"First 100 chars: {reconstructed_response[:100]}"
+ )
+ logger.debug(
+ f"Last 100 chars: {reconstructed_response[-100:]}"
+ )
+
+ responses = [reconstructed_response]
+ except Exception as e:
+ logger.warning(
+ f"Failed to reconstruct response from characters: {str(e)}"
+ )
+
+ for i, response in enumerate(responses):
+ logger.debug(
+ f"Processing response {i}: {type(response)} - {response}"
+ )
+
+ # Handle JSON string responses
+ if isinstance(response, str):
+ try:
+ response = json.loads(response)
+ logger.debug(
+ f"Parsed JSON string response {i}: {response}"
+ )
+ except json.JSONDecodeError:
+ logger.warning(
+ f"Failed to parse JSON response at index {i}: {response}"
+ )
+ continue
+
+ if isinstance(response, dict):
+ # Single tool call
+ if "function" in response:
+ logger.debug(
+ f"Found single tool call in response {i}: {response['function']}"
+ )
+ # Parse arguments if they're a JSON string
+ if isinstance(
+ response["function"].get("arguments"), str
+ ):
+ try:
+ response["function"]["arguments"] = (
+ json.loads(
+ response["function"]["arguments"]
+ )
+ )
+ logger.debug(
+ f"Parsed function arguments: {response['function']['arguments']}"
+ )
+ except json.JSONDecodeError:
+ logger.warning(
+ f"Failed to parse function arguments: {response['function']['arguments']}"
+ )
+
+ all_tool_calls.append((i, response))
+ # Multiple tool calls
+ elif "tool_calls" in response:
+ logger.debug(
+ f"Found multiple tool calls in response {i}: {len(response['tool_calls'])} calls"
+ )
+ for tool_call in response["tool_calls"]:
+ # Parse arguments if they're a JSON string
+ if isinstance(
+ tool_call.get("function", {}).get(
+ "arguments"
+ ),
+ str,
+ ):
+ try:
+ tool_call["function"]["arguments"] = (
+ json.loads(
+ tool_call["function"]["arguments"]
+ )
+ )
+ logger.debug(
+ f"Parsed tool call arguments: {tool_call['function']['arguments']}"
+ )
+ except json.JSONDecodeError:
+ logger.warning(
+ f"Failed to parse tool call arguments: {tool_call['function']['arguments']}"
+ )
+
+ all_tool_calls.append((i, tool_call))
+ # Direct tool call
+ elif "name" in response and "arguments" in response:
+ logger.debug(
+ f"Found direct tool call in response {i}: {response}"
+ )
+ # Parse arguments if they're a JSON string
+ if isinstance(response.get("arguments"), str):
+ try:
+ response["arguments"] = json.loads(
+ response["arguments"]
+ )
+ logger.debug(
+ f"Parsed direct tool call arguments: {response['arguments']}"
+ )
+ except json.JSONDecodeError:
+ logger.warning(
+ f"Failed to parse direct tool call arguments: {response['arguments']}"
+ )
+
+ all_tool_calls.append((i, {"function": response}))
+ else:
+ logger.debug(
+ f"Response {i} is a dict but doesn't match expected tool call formats: {list(response.keys())}"
+ )
+ else:
+ logger.warning(
+ f"Unsupported response type at index {i}: {type(response)}"
+ )
+ continue
+
+ if not all_tool_calls:
+ logger.warning("No tool calls found in responses")
+ return []
+
+ logger.info(f"Found {len(all_tool_calls)} tool calls to execute")
+
+ # Execute tool calls concurrently
+ max_concurrent = max_concurrent or len(all_tool_calls)
+ semaphore = asyncio.Semaphore(max_concurrent)
+
+ async def execute_with_semaphore(tool_call_info):
+ async with semaphore:
+ response_index, tool_call = tool_call_info
+ function_name = tool_call.get("function", {}).get(
+ "name", "unknown"
+ )
+
+ if function_name not in server_tool_mapping:
+ logger.warning(
+ f"Function '{function_name}' not found on any server"
+ )
+ return {
+ "response_index": response_index,
+ "function_name": function_name,
+ "result": None,
+ "error": f"Function '{function_name}' not available on any server",
+ "status": "not_found",
+ }
+
+ server_info = server_tool_mapping[function_name]
+ result = await _execute_tool_on_server(
+ tool_call=tool_call,
+ server_info=server_info,
+ output_type=output_type,
+ )
+ result["response_index"] = response_index
+ return result
+
+ # Execute all tool calls concurrently
+ tasks = [
+ execute_with_semaphore(tool_call_info)
+ for tool_call_info in all_tool_calls
+ ]
+ results = await asyncio.gather(*tasks, return_exceptions=True)
+
+ # Process results and handle exceptions
+ processed_results = []
+ for i, result in enumerate(results):
+ if isinstance(result, Exception):
+ logger.error(
+ f"Task {i} failed with exception: {str(result)}"
+ )
+ processed_results.append(
+ {
+ "response_index": (
+ all_tool_calls[i][0]
+ if i < len(all_tool_calls)
+ else -1
+ ),
+ "function_name": "unknown",
+ "result": None,
+ "error": str(result),
+ "status": "exception",
+ }
+ )
+ else:
+ processed_results.append(result)
+
+ logger.info(
+ f"Completed execution of {len(processed_results)} tool calls"
+ )
+ return processed_results
+
+
+def execute_multiple_tools_on_multiple_mcp_servers_sync(
+ responses: List[Dict[str, Any]],
+ urls: List[str],
+ connections: List[MCPConnection] = None,
+ output_type: Literal["json", "dict", "str", "formatted"] = "str",
+ max_concurrent: Optional[int] = None,
+ *args,
+ **kwargs,
+) -> List[Dict[str, Any]]:
+ """
+ Synchronous version of execute_multiple_tools_on_multiple_mcp_servers.
+
+ Args:
+ responses: List of responses containing tool calls (OpenAI format)
+ urls: List of MCP server URLs
+ connections: Optional list of MCPConnection objects corresponding to each URL
+ output_type: Output format type for results
+ max_concurrent: Maximum number of concurrent executions
+
+ Returns:
+ List of execution results with server metadata
+ """
+ with get_or_create_event_loop() as loop:
+ try:
+ return loop.run_until_complete(
+ execute_multiple_tools_on_multiple_mcp_servers(
+ responses=responses,
+ urls=urls,
+ connections=connections,
+ output_type=output_type,
+ max_concurrent=max_concurrent,
+ *args,
+ **kwargs,
+ )
+ )
+ except Exception as e:
+ logger.error(
+ f"Error in execute_multiple_tools_on_multiple_mcp_servers_sync: {str(e)}"
+ )
+ raise MCPExecutionError(
+ f"Failed to execute multiple tools sync: {str(e)}"
+ )
diff --git a/swarms/tools/py_func_to_openai_func_str.py b/swarms/tools/py_func_to_openai_func_str.py
index d7dc0530..26f64455 100644
--- a/swarms/tools/py_func_to_openai_func_str.py
+++ b/swarms/tools/py_func_to_openai_func_str.py
@@ -492,7 +492,6 @@ def convert_multiple_functions_to_openai_function_schema(
# ]
# Use 40% of cpu cores
max_workers = int(os.cpu_count() * 0.8)
- print(f"max_workers: {max_workers}")
with concurrent.futures.ThreadPoolExecutor(
max_workers=max_workers
diff --git a/swarms/utils/__init__.py b/swarms/utils/__init__.py
index 53cbcea6..f331c6b9 100644
--- a/swarms/utils/__init__.py
+++ b/swarms/utils/__init__.py
@@ -20,6 +20,9 @@ from swarms.utils.output_types import HistoryOutputType
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
+from swarms.utils.check_all_model_max_tokens import (
+ check_all_model_max_tokens,
+)
__all__ = [
@@ -39,4 +42,5 @@ __all__ = [
"count_tokens",
"HistoryOutputType",
"history_output_formatter",
+ "check_all_model_max_tokens",
]
diff --git a/swarms/utils/auto_download_check_packages.py b/swarms/utils/auto_download_check_packages.py
index ea694a16..187e2b11 100644
--- a/swarms/utils/auto_download_check_packages.py
+++ b/swarms/utils/auto_download_check_packages.py
@@ -8,9 +8,10 @@ import subprocess
import sys
from typing import Literal, Optional, Union
from swarms.utils.loguru_logger import initialize_logger
-import pkg_resources
+from importlib.metadata import distribution, PackageNotFoundError
+
logger = initialize_logger("autocheckpackages")
@@ -39,13 +40,13 @@ def check_and_install_package(
# Check if package exists
if package_manager == "pip":
try:
- pkg_resources.get_distribution(package_name)
+ distribution(package_name)
if not upgrade:
logger.info(
f"Package {package_name} is already installed"
)
return True
- except pkg_resources.DistributionNotFound:
+ except PackageNotFoundError:
pass
# Construct installation command
diff --git a/swarms/utils/check_all_model_max_tokens.py b/swarms/utils/check_all_model_max_tokens.py
new file mode 100644
index 00000000..c641fcd4
--- /dev/null
+++ b/swarms/utils/check_all_model_max_tokens.py
@@ -0,0 +1,43 @@
+from litellm import model_list, get_max_tokens
+from swarms.utils.formatter import formatter
+
+# Add model overrides here
+MODEL_MAX_TOKEN_OVERRIDES = {
+ "llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf": 4096, # Example override
+}
+
+
+def check_all_model_max_tokens():
+ """
+ Check and display the maximum token limits for all available models.
+
+ This function iterates through all models in the litellm model list and attempts
+ to retrieve their maximum token limits. For models that are not properly mapped
+ in litellm, it checks for custom overrides in MODEL_MAX_TOKEN_OVERRIDES.
+
+ Returns:
+ None: Prints the results to console using formatter.print_panel()
+
+ Note:
+ Models that are not mapped in litellm and have no override set will be
+ marked with a [WARNING] in the output.
+ """
+ text = ""
+ for model in model_list:
+ # skip model names
+ try:
+ max_tokens = get_max_tokens(model)
+ except Exception:
+ max_tokens = MODEL_MAX_TOKEN_OVERRIDES.get(
+ model, "[NOT MAPPED]"
+ )
+ if max_tokens == "[NOT MAPPED]":
+ text += f"[WARNING] {model}: not mapped in litellm and no override set.\n"
+ text += f"{model}: {max_tokens}\n"
+ text += "─" * 80 + "\n" # Add borderline for each model
+ formatter.print_panel(text, "All Model Max Tokens")
+ return text
+
+
+# if __name__ == "__main__":
+# print(check_all_model_max_tokens())
diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py
index 3f418647..34aa5eb8 100644
--- a/swarms/utils/formatter.py
+++ b/swarms/utils/formatter.py
@@ -1,6 +1,6 @@
import threading
import time
-from typing import Any, Callable, Dict, List
+from typing import Any, Callable, Dict, List, Optional
from rich.console import Console
from rich.live import Live
@@ -10,6 +10,23 @@ from rich.table import Table
from rich.text import Text
+def choose_random_color():
+ import random
+
+ colors = [
+ "red",
+ "green",
+ "blue",
+ "yellow",
+ "magenta",
+ "cyan",
+ "white",
+ ]
+ random_color = random.choice(colors)
+
+ return random_color
+
+
class Formatter:
"""
A class for formatting and printing rich text to the console.
@@ -32,18 +49,8 @@ class Formatter:
title (str, optional): The title of the panel. Defaults to "".
style (str, optional): The style of the panel. Defaults to "bold blue".
"""
- import random
-
- colors = [
- "red",
- "green",
- "blue",
- "yellow",
- "magenta",
- "cyan",
- "white",
- ]
- random_color = random.choice(colors)
+ random_color = choose_random_color()
+
panel = Panel(
content, title=title, style=f"bold {random_color}"
)
@@ -145,5 +152,115 @@ class Formatter:
)
time.sleep(delay)
+ def print_streaming_panel(
+ self,
+ streaming_response,
+ title: str = "🤖 Agent Streaming Response",
+ style: str = None,
+ collect_chunks: bool = False,
+ on_chunk_callback: Optional[Callable] = None,
+ ) -> str:
+ """
+ Display real-time streaming response using Rich Live and Panel.
+ Similar to the approach used in litellm_stream.py.
+
+ Args:
+ streaming_response: The streaming response generator from LiteLLM.
+ title (str): Title of the panel.
+ style (str): Style for the panel border (if None, will use random color).
+ collect_chunks (bool): Whether to collect individual chunks for conversation saving.
+ on_chunk_callback (Optional[Callable]): Callback function to call for each chunk.
+
+ Returns:
+ str: The complete accumulated response text.
+ """
+ # Get random color similar to non-streaming approach
+ random_color = choose_random_color()
+ panel_style = (
+ f"bold {random_color}" if style is None else style
+ )
+ text_style = (
+ "white" # Make text white instead of random color
+ )
+
+ def create_streaming_panel(text_obj, is_complete=False):
+ """Create panel with proper text wrapping using Rich's built-in capabilities"""
+ panel_title = f"[white]{title}[/white]"
+ if is_complete:
+ panel_title += " [bold green]✅[/bold green]"
+
+ # Add blinking cursor if still streaming
+ display_text = Text.from_markup("")
+ display_text.append_text(text_obj)
+ if not is_complete:
+ display_text.append("▊", style="bold green blink")
+
+ panel = Panel(
+ display_text,
+ title=panel_title,
+ border_style=panel_style,
+ padding=(1, 2),
+ width=self.console.size.width, # Rich handles wrapping automatically
+ )
+ return panel
+
+ # Create a Text object for streaming content
+ streaming_text = Text()
+ complete_response = ""
+ chunks_collected = []
+
+ # TRUE streaming with Rich's automatic text wrapping
+ with Live(
+ create_streaming_panel(streaming_text),
+ console=self.console,
+ refresh_per_second=20,
+ ) as live:
+ try:
+ for part in streaming_response:
+ if (
+ hasattr(part, "choices")
+ and part.choices
+ and part.choices[0].delta.content
+ ):
+ # Add ONLY the new chunk to the Text object with random color style
+ chunk = part.choices[0].delta.content
+ streaming_text.append(chunk, style=text_style)
+ complete_response += chunk
+
+ # Collect chunks if requested
+ if collect_chunks:
+ chunks_collected.append(chunk)
+
+ # Call chunk callback if provided
+ if on_chunk_callback:
+ on_chunk_callback(chunk)
+
+ # Update display with new text - Rich handles all wrapping automatically
+ live.update(
+ create_streaming_panel(
+ streaming_text, is_complete=False
+ )
+ )
+
+ # Final update to show completion
+ live.update(
+ create_streaming_panel(
+ streaming_text, is_complete=True
+ )
+ )
+
+ except Exception as e:
+ # Handle any streaming errors gracefully
+ streaming_text.append(
+ f"\n[Error: {str(e)}]", style="bold red"
+ )
+ live.update(
+ create_streaming_panel(
+ streaming_text, is_complete=True
+ )
+ )
+
+ return complete_response
+
formatter = Formatter()
diff --git a/swarms/utils/history_output_formatter.py b/swarms/utils/history_output_formatter.py
index e190dd8e..f7b86e29 100644
--- a/swarms/utils/history_output_formatter.py
+++ b/swarms/utils/history_output_formatter.py
@@ -23,6 +23,8 @@ def history_output_formatter(
return yaml.safe_dump(conversation.to_dict(), sort_keys=False)
elif type == "dict-all-except-first":
return conversation.return_all_except_first()
+ elif type == "list-final":
+ return conversation.return_list_final()
elif type == "str-all-except-first":
return conversation.return_all_except_first_string()
elif type == "dict-final":
diff --git a/swarms/utils/litellm_wrapper.py b/swarms/utils/litellm_wrapper.py
index 6aa5c7d3..063e6ce3 100644
--- a/swarms/utils/litellm_wrapper.py
+++ b/swarms/utils/litellm_wrapper.py
@@ -151,6 +151,8 @@ class LiteLLM:
retries # Add retries for better reliability
)
+ litellm.drop_params = True
+
def output_for_tools(self, response: any):
if self.mcp_call is True:
out = response.choices[0].message.tool_calls[0].function
@@ -449,8 +451,12 @@ class LiteLLM:
# Make the completion call
response = completion(**completion_params)
+ # Handle streaming response
+ if self.stream:
+ return response # Return the streaming generator directly
+
# Handle tool-based response
- if self.tools_list_dictionary is not None:
+ elif self.tools_list_dictionary is not None:
return self.output_for_tools(response)
elif self.return_all is True:
return response.model_dump()
diff --git a/swarms/utils/output_types.py b/swarms/utils/output_types.py
index 843d4608..62b63874 100644
--- a/swarms/utils/output_types.py
+++ b/swarms/utils/output_types.py
@@ -12,11 +12,11 @@ HistoryOutputType = Literal[
"all",
"yaml",
"xml",
- # "dict-final",
"dict-all-except-first",
"str-all-except-first",
"basemodel",
"dict-final",
+ "list-final",
]
OutputType = HistoryOutputType
diff --git a/swarms/utils/retry_func.py b/swarms/utils/retry_func.py
new file mode 100644
index 00000000..2a32903d
--- /dev/null
+++ b/swarms/utils/retry_func.py
@@ -0,0 +1,66 @@
+import time
+from typing import Any, Callable, Type, Union, Tuple
+from loguru import logger
+
+
+def retry_function(
+ func: Callable,
+ *args: Any,
+ max_retries: int = 3,
+ delay: float = 1.0,
+ backoff_factor: float = 2.0,
+ exceptions: Union[
+ Type[Exception], Tuple[Type[Exception], ...]
+ ] = Exception,
+ **kwargs: Any,
+) -> Any:
+ """
+ A function that retries another function if it raises specified exceptions.
+
+ Args:
+ func (Callable): The function to retry
+ *args: Positional arguments to pass to the function
+ max_retries (int): Maximum number of retries before giving up. Defaults to 3.
+ delay (float): Initial delay between retries in seconds. Defaults to 1.0.
+ backoff_factor (float): Multiplier applied to delay between retries. Defaults to 2.0.
+ exceptions (Exception or tuple): Exception(s) that trigger a retry. Defaults to Exception.
+ **kwargs: Keyword arguments to pass to the function
+
+ Returns:
+ Any: The return value of the function if successful
+
+ Example:
+ def fetch_data(url: str) -> dict:
+ return requests.get(url).json()
+
+ # Retry the fetch_data function
+ result = retry_function(
+ fetch_data,
+ "https://api.example.com",
+ max_retries=3,
+ exceptions=(ConnectionError, TimeoutError)
+ )
+ """
+ retries = 0
+ current_delay = delay
+
+ while True:
+ try:
+ return func(*args, **kwargs)
+ except exceptions as e:
+ retries += 1
+ if retries > max_retries:
+ logger.error(
+ f"Function {func.__name__} failed after {max_retries} retries. "
+ f"Final error: {str(e)}"
+ )
+ raise
+
+ logger.warning(
+ f"Retry {retries}/{max_retries} for function {func.__name__} "
+ f"after error: {str(e)}. "
+ f"Waiting {current_delay} seconds..."
+ )
+
+ time.sleep(current_delay)
+ current_delay *= backoff_factor