Merge branch 'master' into Fix/stream-issues

pull/938/head
harshalmore31 4 months ago committed by GitHub
commit 45baaa732e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -17,7 +17,7 @@ jobs:
&& ${{ contains(github.event.pull_request.labels.*.name, 'release') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
- name: Set up Python 3.9

@ -21,7 +21,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
# Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis
- name: Run Codacy Analysis CLI
uses: codacy/codacy-analysis-cli-action@562ee3e92b8e92df8b67e0a5ff8aa8e261919c08

@ -16,7 +16,7 @@ jobs:
steps:
# Step 1: Check out the repository
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
# Step 2: Set up Python
- name: Set up Python ${{ matrix.python-version }}

@ -28,7 +28,7 @@ jobs:
language: ["python"]
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:

@ -28,7 +28,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 'Checkout repository'
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4
# Commonly enabled options, see https://github.com/actions/dependency-review-action#configuration-options for all available options.

@ -9,7 +9,7 @@ jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: actions/setup-python@v5
with:
python-version: 3.11

@ -6,7 +6,7 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python
uses: actions/setup-python@v5

@ -33,7 +33,7 @@ jobs:
security-events: write
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
submodules: true

@ -35,7 +35,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
submodules: true

@ -21,7 +21,7 @@ jobs:
python-version: ["3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:

@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python 3.10
uses: actions/setup-python@v5

@ -27,7 +27,7 @@ jobs:
runs-on: "ubuntu-20.04"
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Build an image from Dockerfile
run: |

@ -1,8 +1,6 @@
# Smart Database Powered by Hierarchical Multi-Agent Workflow
This module implements a fully autonomous database management system using a hierarchical
multi-agent architecture. The system includes specialized agents for different database
operations coordinated by a Database Director agent.
This module implements a fully autonomous database management system using a hierarchical multi-agent architecture. The system includes specialized agents for different database operations coordinated by a Database Director agent.
## Features

@ -14643,7 +14643,7 @@ The following example showcases how to use the `AgentRearrange` class to manage
```python
from swarms.structs.agent import Agent
from swarms.structs.rearrange import AgentRearrange
from swarms.structs.agent_rearrange import AgentRearrange
# Initialize the Director agent using Anthropic model via model_name
director = Agent(
@ -44327,7 +44327,7 @@ The flow pattern uses arrow notation (`->`) to define execution order:
### Basic Sequential Flow
```python
from swarms.structs.swarm_arange import SwarmRearrange
from swarms.structs.swarm_rearrange import SwarmRearrange
import os
from swarms import Agent, AgentRearrange
from swarm_models import OpenAIChat

@ -320,6 +320,9 @@ nav:
- Storage:
- AgentRegistry: "swarms/structs/agent_registry.md"
- Agent Loader:
- Agent Loader: "swarms/utils/agent_loader.md"
- Communication Structure: "swarms/structs/conversation.md"
- Tools:

@ -49,7 +49,7 @@ The following example showcases how to use the `AgentRearrange` class to manage
```python
from swarms.structs.agent import Agent
from swarms.structs.rearrange import AgentRearrange
from swarms.structs.agent_rearrange import AgentRearrange
# Initialize the Director agent using Anthropic model via model_name
director = Agent(

@ -1,149 +1,62 @@
# Swarms Ecosystem
# Swarms Infrastructure Stack
*The Complete Enterprise-Grade Multi-Agent AI Platform*
**We're Building the Operating System for the Agent Economy**
---
## **Join the Future of AI Development**
**We're Building the Operating System for the Agent Economy** - The Swarms ecosystem represents the most comprehensive, production-ready multi-agent AI platform available today. From our flagship Python framework to high-performance Rust implementations and client libraries spanning every major programming language, we provide enterprise-grade tools that power the next generation of intelligent applications.
---
## **Complete Product Portfolio**
| **Product** | **Technology** | **Status** | **Repository** | **Documentation** |
|-------------|---------------|------------|----------------|-------------------|
| **Swarms Python Framework** | Python | **Production** | [swarms](https://github.com/kyegomez/swarms) | [Docs](https://docs.swarms.world/en/latest/swarms/install/install/) |
| **Swarms Rust Framework** | Rust | **Production** | [swarms-rs](https://github.com/The-Swarm-Corporation/swarms-rs) | [Docs](https://docs.swarms.world/en/latest/swarms_rs/overview/) |
| **Python API Client** | Python | **Production** | [swarms-sdk](https://github.com/The-Swarm-Corporation/swarms-sdk) | [Docs](https://docs.swarms.world/en/latest/swarms_cloud/python_client/) |
| **TypeScript/Node.js Client** | TypeScript | **Production** | [swarms-ts](https://github.com/The-Swarm-Corporation/swarms-ts) | *Coming Soon* |
| **Go Client** | Go | **Production** | [swarms-client-go](https://github.com/The-Swarm-Corporation/swarms-client-go) | *Coming Soon* |
| **Java Client** | Java | **Production** | [swarms-java](https://github.com/The-Swarm-Corporation/swarms-java) | *Coming Soon* |
| **Kotlin Client** | Kotlin | **Q2 2025** | *In Development* | *Coming Soon* |
| **Ruby Client** | Ruby | **Q2 2025** | *In Development* | *Coming Soon* |
| **Rust Client** | Rust | **Q2 2025** | *In Development* | *Coming Soon* |
| **C#/.NET Client** | C# | **Q3 2025** | *In Development* | *Coming Soon* |
The Swarms ecosystem represents the most comprehensive, production-ready multi-agent AI platform available today. From our flagship Python framework to high-performance Rust implementations and client libraries spanning every major programming language, we provide enterprise-grade tools that power the next generation of agentic applications.
---
## **Why Choose the Swarms Ecosystem?**
### **Enterprise-Grade Architecture**
- **Production Ready**: Battle-tested in enterprise environments with 99.9%+ uptime
- **Scalable Infrastructure**: Handle millions of agent interactions with automatic scaling
- **Security First**: End-to-end encryption, API key management, and enterprise compliance
## **Product Portfolio by Language & API**
- **Observability**: Comprehensive logging, monitoring, and debugging capabilities
### 🐍 **Python**
### **Developer Experience**
- **Multiple Language Support**: Native clients for every major programming language
- **Unified API**: Consistent interface across all platforms and languages
- **Rich Documentation**: Comprehensive guides, tutorials, and API references
- **Active Community**: 24/7 support through Discord, GitHub, and direct channels
### **Performance & Reliability**
- **High Throughput**: Process thousands of concurrent agent requests
- **Low Latency**: Optimized for real-time applications and user experiences
- **Fault Tolerance**: Automatic retries, circuit breakers, and graceful degradation
- **Multi-Cloud**: Deploy on AWS, GCP, Azure, or on-premises infrastructure
| **Product** | **Description** | **Status** | **Repository** | **Documentation** |
|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------|-------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|
| **Swarms Python Framework** | The core multi-agent orchestration framework for Python. Enables building, managing, and scaling complex agentic systems with robust abstractions, workflows, and integrations. | **Production** | [swarms](https://github.com/kyegomez/swarms) | [Docs](https://docs.swarms.world/en/latest/swarms/install/install/) |
| **Python API Client** | Official Python SDK for interacting with Swarms Cloud and remote agent infrastructure. Simplifies API calls, authentication, and integration into Python applications. | **Production** | [swarms-sdk](https://github.com/The-Swarm-Corporation/swarms-sdk) | [Docs](https://docs.swarms.world/en/latest/swarms_cloud/python_client/) |
| **Swarms Tools** | A comprehensive library of prebuilt tools for various domains, including finance, social media, data processing, and more. Accelerates agent development by providing ready-to-use capabilities and integrations. | **Production** | [swarms-tools](https://github.com/The-Swarm-Corporation/swarms-tools) | *Coming Soon* |
| **Swarms Memory** | A robust library of memory structures and data loaders for Retrieval-Augmented Generation (RAG) processing. Provides advanced memory management, vector stores, and integration with agentic workflows. | **Production** | [swarms-memory](https://github.com/The-Swarm-Corporation/swarms-memory) | *Coming Soon* |
---
## **Join Our Growing Community**
### **Connect With Developers Worldwide**
### 🦀 **Rust**
| **Platform** | **Purpose** | **Join Link** | **Benefits** |
|--------------|-------------|---------------|--------------|
| **Discord Community** | Real-time support & discussions | [Join Discord](https://discord.gg/EamjgSaEQf) | • 24/7 developer support<br/>• Weekly community events<br/>• Direct access to core team<br/>• Beta feature previews |
| **Twitter/X** | Latest updates & announcements | [Follow @swarms_corp](https://x.com/swarms_corp) | • Breaking news & updates<br/>• Community highlights<br/>• Technical insights<br/>• Industry partnerships |
| **LinkedIn** | Professional network & updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | • Professional networking<br/>• Career opportunities<br/>• Enterprise partnerships<br/>• Industry insights |
| **YouTube** | Tutorials & technical content | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | • In-depth tutorials<br/>• Live coding sessions<br/>• Architecture deep dives<br/>• Community showcases |
| **Product** | **Description** | **Status** | **Repository** | **Documentation** |
|----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------|-------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|
| **Swarms Rust Framework** | High-performance, memory-safe multi-agent orchestration framework written in Rust. Designed for demanding production environments and seamless integration with Rust-based systems. | **Production** | [swarms-rs](https://github.com/The-Swarm-Corporation/swarms-rs) | [Docs](https://docs.swarms.world/en/latest/swarms_rs/overview/) |
| **Rust Client** | Official Rust client library for connecting to Swarms Cloud and orchestrating agents from Rust applications. Provides idiomatic Rust APIs for agent management and communication. | **Q2 2025** | *In Development* | *Coming Soon* |
---
## **Contribute to the Ecosystem**
### 🌐 **API Clients (Multi-Language)**
### **How You Can Make an Impact**
| **Contribution Area** | **Skills Needed** | **Impact Level** | **Getting Started** |
|-----------------------|-------------------|------------------|---------------------|
| **Core Framework Development** | Python, Rust, Systems Design | **High Impact** | [Contributing Guide](https://docs.swarms.world/en/latest/contributors/main/) |
| **Client Library Development** | Various Languages (Go, Java, TS, etc.) | **High Impact** | [Client Development](https://github.com/The-Swarm-Corporation) |
| **Documentation & Tutorials** | Technical Writing, Examples | **High Impact** | [Docs Contributing](https://docs.swarms.world/en/latest/contributors/docs/) |
| **Testing & Quality Assurance** | Testing Frameworks, QA | **Medium Impact** | [Testing Guide](https://docs.swarms.world/en/latest/swarms/framework/test/) |
| **UI/UX & Design** | Design, Frontend Development | **Medium Impact** | [Design Contributions](https://github.com/The-Swarm-Corporation/swarms/issues) |
| **Bug Reports & Feature Requests** | User Experience, Testing | **Easy Start** | [Report Issues](https://github.com/The-Swarm-Corporation/swarms/issues) |
---
## **We're Hiring Top Talent**
### **Join the Team Building the Future Of The World Economy**
**Ready to work on cutting-edge agent technology that's shaping the future?** We're actively recruiting exceptional engineers, researchers, and technical leaders to join our mission of building the operating system for the agent economy.
| **Why Join Swarms?** | **What We Offer** |
|-----------------------|-------------------|
| **Cutting-Edge Technology** | Work on the most powerful multi-agent systems, distributed computing, and enterprise-scale infrastructure |
| **Global Impact** | Your code will power agent applications used by Fortune 500 companies and millions of developers |
| **World-Class Team** | Collaborate with top engineers, researchers, and industry experts from Google, OpenAI, and more |
| **Fast Growth** | Join a rapidly scaling company with massive market opportunity and venture backing |
### **Open Positions**
| **Position** | **Role Description** |
|-------------------------------|----------------------------------------------------------|
| **Senior Rust Engineers** | Building high-performance agent infrastructure |
| **Python Framework Engineers**| Expanding our core multi-agent capabilities |
| **DevOps/Platform Engineers** | Scaling cloud infrastructure for millions of agents |
| **Technical Writers** | Creating world-class developer documentation |
| **Solutions Engineers** | Helping enterprises adopt multi-agent AI |
**Ready to Build the Future?** **[Apply Now at swarms.ai/hiring](https://swarms.ai/hiring)**
| **Language/Platform** | **Description** | **Status** | **Repository** | **Documentation** |
|----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------|-------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|
| **TypeScript/Node.js** | Official TypeScript/Node.js SDK for Swarms Cloud. Enables seamless integration of agentic workflows into JavaScript and TypeScript applications, both server-side and in the browser. | **Production** | [swarms-ts](https://github.com/The-Swarm-Corporation/swarms-ts) | *Coming Soon* |
| **Go** | Go client library for Swarms Cloud, providing Go developers with native APIs to manage, orchestrate, and interact with agents in distributed systems and microservices. | **Production** | [swarms-client-go](https://github.com/The-Swarm-Corporation/swarms-client-go) | *Coming Soon* |
| **Java** | Java SDK for Swarms Cloud, allowing enterprise Java applications to leverage multi-agent orchestration and integrate agentic capabilities into JVM-based systems. | **Production** | [swarms-java](https://github.com/The-Swarm-Corporation/swarms-java) | *Coming Soon* |
| **Kotlin** | Native Kotlin client for Swarms Cloud, designed for modern JVM and Android applications seeking to embed agentic intelligence and orchestration. | **Q2 2025** | *In Development* | *Coming Soon* |
| **Ruby** | Ruby SDK for Swarms Cloud, enabling Ruby and Rails developers to easily connect, manage, and orchestrate agents within their applications. | **Q2 2025** | *In Development* | *Coming Soon* |
| **C#/.NET** | Official C#/.NET client library for Swarms Cloud, providing .NET developers with tools to integrate agentic workflows into desktop, web, and cloud applications. | **Q3 2025** | *In Development* | *Coming Soon* |
---
---
## **Get Started Today**
### **Quick Start Guide**
| **Step** | **Action** | **Time Required** |
|----------|------------|-------------------|
| **1** | [Install Swarms Python Framework](https://docs.swarms.world/en/latest/swarms/install/install/) | 5 minutes |
| **2** | [Run Your First Agent](https://docs.swarms.world/en/latest/swarms/examples/basic_agent/) | 10 minutes |
| **3** | [Try Multi-Agent Workflows](https://docs.swarms.world/en/latest/swarms/examples/sequential_example/) | 15 minutes |
| **4** | [Join Our Discord Community](https://discord.gg/EamjgSaEQf) | 2 minutes |
| **5** | [Explore Enterprise Features](https://docs.swarms.world/en/latest/swarms_cloud/swarms_api/) | 20 minutes |
---
## **Enterprise Support & Partnerships**
### **Ready to Scale with Swarms?**
## **Why Choose the Swarms Ecosystem?**
| **Contact Type** | **Best For** | **Response Time** | **Contact Information** |
|------------------|--------------|-------------------|-------------------------|
| **Technical Support** | Development questions, troubleshooting | < 24 hours | [Book Support Call](https://cal.com/swarms/swarms-technical-support) |
| **Enterprise Sales** | Custom deployments, enterprise licensing | < 4 hours | [kye@swarms.world](mailto:kye@swarms.world) |
| **Partnerships** | Integration partnerships, technology alliances | < 48 hours | [kye@swarms.world](mailto:kye@swarms.world) |
| **Investor Relations** | Investment opportunities, funding updates | By appointment | [kye@swarms.world](mailto:kye@swarms.world) |
| **Feature** | **Description** |
|----------------------------|------------------------------------------------------------------------------------------------------|
| **Production Ready** | Battle-tested in enterprise environments with 99.9%+ uptime |
| **Scalable Infrastructure** | Handle millions of agent interactions with automatic scaling |
| **Security First** | End-to-end encryption, API key management, and enterprise compliance |
| **Observability** | Comprehensive logging, monitoring, and debugging capabilities |
| **Multiple Language Support** | Native clients for every major programming language |
| **Unified API** | Consistent interface across all platforms and languages |
| **Rich Documentation** | Comprehensive guides, tutorials, and API references |
| **Active Community** | 24/7 support through Discord, GitHub, and direct channels |
| **High Throughput** | Process thousands of concurrent agent requests |
| **Low Latency** | Optimized for real-time applications and user experiences |
| **Fault Tolerance** | Automatic retries, circuit breakers, and graceful degradation |
| **Multi-Cloud** | Deploy on AWS, GCP, Azure, or on-premises infrastructure |
---
**Ready to build the future of AI? Start with Swarms today and join thousands of developers creating the next generation of intelligent applications.**

@ -89,7 +89,6 @@ graph TD
| `callback` | Callable function to be called after each agent loop. |
| `metadata` | Dictionary containing metadata for the agent. |
| `callbacks` | List of callable functions to be called during execution. |
| `logger_handler` | Handler for logging messages. |
| `search_algorithm` | Callable function for long-term memory retrieval. |
| `logs_to_filename` | File path for logging agent activities. |
| `evaluator` | Callable function for evaluating the agent's responses. |
@ -121,14 +120,12 @@ graph TD
| `memory_chunk_size` | Integer representing the maximum size of memory chunks for long-term memory retrieval. |
| `agent_ops_on` | Boolean indicating whether agent operations should be enabled. |
| `return_step_meta` | Boolean indicating whether to return JSON of all steps and additional metadata. |
| `output_type` | Literal type indicating whether to output "string", "str", "list", "json", "dict", or "yaml". |
| `time_created` | Float representing the time the agent was created. |
| `tags` | Optional list of strings for tagging the agent. |
| `use_cases` | Optional list of dictionaries describing use cases for the agent. |
| `step_pool` | List of Step objects representing the agent's execution steps. |
| `print_every_step` | Boolean indicating whether to print every step of execution. |
| `agent_output` | ManySteps object containing the agent's output and metadata. |
| `executor_workers` | Integer representing the number of executor workers for concurrent operations. |
| `data_memory` | Optional callable for data memory operations. |
| `load_yaml_path` | String representing the path to a YAML file for loading configurations. |
| `auto_generate_prompt` | Boolean indicating whether to automatically generate prompts. |
@ -137,17 +134,44 @@ graph TD
| `artifacts_on` | Boolean indicating whether to save artifacts from agent execution |
| `artifacts_output_path` | File path where artifacts should be saved |
| `artifacts_file_extension` | File extension to use for saved artifacts |
| `device` | Device to run computations on ("cpu" or "gpu") |
| `all_cores` | Boolean indicating whether to use all CPU cores |
| `device_id` | ID of the GPU device to use if running on GPU |
| `scheduled_run_date` | Optional datetime for scheduling future agent runs |
| `do_not_use_cluster_ops` | Boolean indicating whether to avoid cluster operations |
| `all_gpus` | Boolean indicating whether to use all available GPUs |
| `model_name` | String representing the name of the model to use |
| `llm_args` | Dictionary containing additional arguments for the LLM |
| `load_state_path` | String representing the path to load state from |
| `role` | String representing the role of the agent (e.g., "worker") |
| `print_on` | Boolean indicating whether to print output |
| `tools_list_dictionary` | List of dictionaries representing tool schemas |
| `mcp_url` | String or MCPConnection representing the MCP server URL |
| `mcp_urls` | List of strings representing multiple MCP server URLs |
| `react_on` | Boolean indicating whether to enable ReAct reasoning |
| `safety_prompt_on` | Boolean indicating whether to enable safety prompts |
| `random_models_on` | Boolean indicating whether to randomly select models |
| `mcp_config` | MCPConnection object containing MCP configuration |
| `top_p` | Float representing the top-p sampling parameter |
| `conversation_schema` | ConversationSchema object for conversation formatting |
| `llm_base_url` | String representing the base URL for the LLM API |
| `llm_api_key` | String representing the API key for the LLM |
| `rag_config` | RAGConfig object containing RAG configuration |
| `tool_call_summary` | Boolean indicating whether to summarize tool calls |
| `output_raw_json_from_tool_call` | Boolean indicating whether to output raw JSON from tool calls |
| `summarize_multiple_images` | Boolean indicating whether to summarize multiple image outputs |
| `tool_retry_attempts` | Integer representing the number of retry attempts for tool execution |
| `reasoning_prompt_on` | Boolean indicating whether to enable reasoning prompts |
| `dynamic_context_window` | Boolean indicating whether to dynamically adjust context window |
| `created_at` | Float representing the timestamp when the agent was created |
| `workspace_dir` | String representing the workspace directory for the agent |
| `timeout` | Integer representing the timeout for operations in seconds |
## `Agent` Methods
| Method | Description | Inputs | Usage Example |
|--------|-------------|--------|----------------|
| `run(task, img=None, is_last=False, device="cpu", device_id=0, all_cores=True, *args, **kwargs)` | Runs the autonomous agent loop to complete the given task. | `task` (str): The task to be performed.<br>`img` (str, optional): Path to an image file.<br>`is_last` (bool): Whether this is the last task.<br>`device` (str): Device to run on ("cpu" or "gpu").<br>`device_id` (int): ID of the GPU to use.<br>`all_cores` (bool): Whether to use all CPU cores.<br>`*args`, `**kwargs`: Additional arguments. | `response = agent.run("Generate a report on financial performance.")` |
| `run(task, img=None, imgs=None, correct_answer=None, streaming_callback=None, *args, **kwargs)` | Runs the autonomous agent loop to complete the given task. | `task` (str): The task to be performed.<br>`img` (str, optional): Path to an image file.<br>`imgs` (List[str], optional): List of image paths.<br>`correct_answer` (str, optional): Expected correct answer for validation.<br>`streaming_callback` (Callable, optional): Callback for streaming tokens.<br>`*args`, `**kwargs`: Additional arguments. | `response = agent.run("Generate a report on financial performance.")` |
| `run_batched(tasks, imgs=None, *args, **kwargs)` | Runs multiple tasks concurrently in batch mode. | `tasks` (List[str]): List of tasks to run.<br>`imgs` (List[str], optional): List of images to process.<br>`*args`, `**kwargs`: Additional arguments. | `responses = agent.run_batched(["Task 1", "Task 2"])` |
| `__call__(task, img=None, *args, **kwargs)` | Alternative way to call the `run` method. | Same as `run`. | `response = agent("Generate a report on financial performance.")` |
| `parse_and_execute_tools(response, *args, **kwargs)` | Parses the agent's response and executes any tools mentioned in it. | `response` (str): The agent's response to be parsed.<br>`*args`, `**kwargs`: Additional arguments. | `agent.parse_and_execute_tools(response)` |
| `add_memory(message)` | Adds a message to the agent's memory. | `message` (str): The message to add. | `agent.add_memory("Important information")` |
@ -155,6 +179,8 @@ graph TD
| `run_concurrent(task, *args, **kwargs)` | Runs a task concurrently. | `task` (str): The task to run.<br>`*args`, `**kwargs`: Additional arguments. | `response = await agent.run_concurrent("Concurrent task")` |
| `run_concurrent_tasks(tasks, *args, **kwargs)` | Runs multiple tasks concurrently. | `tasks` (List[str]): List of tasks to run.<br>`*args`, `**kwargs`: Additional arguments. | `responses = agent.run_concurrent_tasks(["Task 1", "Task 2"])` |
| `bulk_run(inputs)` | Generates responses for multiple input sets. | `inputs` (List[Dict[str, Any]]): List of input dictionaries. | `responses = agent.bulk_run([{"task": "Task 1"}, {"task": "Task 2"}])` |
| `run_multiple_images(task, imgs, *args, **kwargs)` | Runs the agent with multiple images using concurrent processing. | `task` (str): The task to perform on each image.<br>`imgs` (List[str]): List of image paths or URLs.<br>`*args`, `**kwargs`: Additional arguments. | `outputs = agent.run_multiple_images("Describe image", ["img1.jpg", "img2.png"])` |
| `continuous_run_with_answer(task, img=None, correct_answer=None, max_attempts=10)` | Runs the agent until the correct answer is provided. | `task` (str): The task to perform.<br>`img` (str, optional): Image to process.<br>`correct_answer` (str): Expected answer.<br>`max_attempts` (int): Maximum attempts. | `response = agent.continuous_run_with_answer("Math problem", correct_answer="42")` |
| `save()` | Saves the agent's history to a file. | None | `agent.save()` |
| `load(file_path)` | Loads the agent's history from a file. | `file_path` (str): Path to the file. | `agent.load("agent_history.json")` |
| `graceful_shutdown()` | Gracefully shuts down the system, saving the state. | None | `agent.graceful_shutdown()` |
@ -178,8 +204,6 @@ graph TD
| `send_agent_message(agent_name, message, *args, **kwargs)` | Sends a message from the agent to a user. | `agent_name` (str): Name of the agent.<br>`message` (str): Message to send.<br>`*args`, `**kwargs`: Additional arguments. | `response = agent.send_agent_message("AgentX", "Task completed")` |
| `add_tool(tool)` | Adds a tool to the agent's toolset. | `tool` (Callable): Tool to add. | `agent.add_tool(my_custom_tool)` |
| `add_tools(tools)` | Adds multiple tools to the agent's toolset. | `tools` (List[Callable]): List of tools to add. | `agent.add_tools([tool1, tool2])` |
| `remove_tool(tool)` | Removes a tool from the agent's toolset. || Method | Description | Inputs | Usage Example |
|--------|-------------|--------|----------------|
| `remove_tool(tool)` | Removes a tool from the agent's toolset. | `tool` (Callable): Tool to remove. | `agent.remove_tool(my_custom_tool)` |
| `remove_tools(tools)` | Removes multiple tools from the agent's toolset. | `tools` (List[Callable]): List of tools to remove. | `agent.remove_tools([tool1, tool2])` |
| `get_docs_from_doc_folders()` | Retrieves and processes documents from the specified folder. | None | `agent.get_docs_from_doc_folders()` |
@ -208,18 +232,30 @@ graph TD
| `handle_sop_ops()` | Handles operations related to standard operating procedures. | None | `agent.handle_sop_ops()` |
| `agent_output_type(responses)` | Processes and returns the agent's output based on the specified output type. | `responses` (list): List of responses. | `formatted_output = agent.agent_output_type(responses)` |
| `check_if_no_prompt_then_autogenerate(task)` | Checks if a system prompt is not set and auto-generates one if needed. | `task` (str): The task to use for generating a prompt. | `agent.check_if_no_prompt_then_autogenerate("Analyze data")` |
| `check_if_no_prompt_then_autogenerate(task)` | Checks if auto_generate_prompt is enabled and generates a prompt by combining agent name, description and system prompt | `task` (str, optional): Task to use as fallback | `agent.check_if_no_prompt_then_autogenerate("Analyze data")` |
| `handle_artifacts(response, output_path, extension)` | Handles saving artifacts from agent execution | `response` (str): Agent response<br>`output_path` (str): Output path<br>`extension` (str): File extension | `agent.handle_artifacts(response, "outputs/", ".txt")` |
| `showcase_config()` | Displays the agent's configuration in a formatted table. | None | `agent.showcase_config()` |
| `talk_to(agent, task, img=None, *args, **kwargs)` | Initiates a conversation with another agent. | `agent` (Any): Target agent.<br>`task` (str): Task to discuss.<br>`img` (str, optional): Image to share.<br>`*args`, `**kwargs`: Additional arguments. | `response = agent.talk_to(other_agent, "Let's collaborate")` |
| `talk_to_multiple_agents(agents, task, *args, **kwargs)` | Talks to multiple agents concurrently. | `agents` (List[Any]): List of target agents.<br>`task` (str): Task to discuss.<br>`*args`, `**kwargs`: Additional arguments. | `responses = agent.talk_to_multiple_agents([agent1, agent2], "Group discussion")` |
| `get_agent_role()` | Returns the role of the agent. | None | `role = agent.get_agent_role()` |
| `pretty_print(response, loop_count)` | Prints the response in a formatted panel. | `response` (str): Response to print.<br>`loop_count` (int): Current loop number. | `agent.pretty_print("Analysis complete", 1)` |
| `parse_llm_output(response)` | Parses and standardizes the output from the LLM. | `response` (Any): Response from the LLM. | `parsed_response = agent.parse_llm_output(llm_output)` |
| `sentiment_and_evaluator(response)` | Performs sentiment analysis and evaluation on the response. | `response` (str): Response to analyze. | `agent.sentiment_and_evaluator("Great response!")` |
| `output_cleaner_op(response)` | Applies output cleaning operations to the response. | `response` (str): Response to clean. | `cleaned_response = agent.output_cleaner_op(response)` |
| `mcp_tool_handling(response, current_loop)` | Handles MCP tool execution and responses. | `response` (Any): Response containing tool calls.<br>`current_loop` (int): Current loop number. | `agent.mcp_tool_handling(response, 1)` |
| `temp_llm_instance_for_tool_summary()` | Creates a temporary LLM instance for tool summaries. | None | `temp_llm = agent.temp_llm_instance_for_tool_summary()` |
| `execute_tools(response, loop_count)` | Executes tools based on the LLM response. | `response` (Any): Response containing tool calls.<br>`loop_count` (int): Current loop number. | `agent.execute_tools(response, 1)` |
| `list_output_types()` | Returns available output types. | None | `types = agent.list_output_types()` |
| `tool_execution_retry(response, loop_count)` | Executes tools with retry logic for handling failures. | `response` (Any): Response containing tool calls.<br>`loop_count` (int): Current loop number. | `agent.tool_execution_retry(response, 1)` |
## Updated Run Method
Update the run method documentation to include new parameters:
The run method has been updated with new parameters for enhanced functionality:
| Method | Description | Inputs | Usage Example |
|--------|-------------|--------|----------------|
| `run(task, img=None, is_last=False, device="cpu", device_id=0, all_cores=True, scheduled_run_date=None)` | Runs the agent with specified parameters | `task` (str): Task to run<br>`img` (str, optional): Image path<br>`is_last` (bool): If this is last task<br>`device` (str): Device to use<br>`device_id` (int): GPU ID<br>`all_cores` (bool): Use all CPU cores<br>`scheduled_run_date` (datetime, optional): Future run date | `agent.run("Analyze data", device="gpu", device_id=0)` |
| `run(task, img=None, imgs=None, correct_answer=None, streaming_callback=None, *args, **kwargs)` | Runs the agent with enhanced parameters | `task` (str): Task to run<br>`img` (str, optional): Single image path<br>`imgs` (List[str], optional): List of image paths<br>`correct_answer` (str, optional): Expected answer for validation<br>`streaming_callback` (Callable, optional): Callback for streaming tokens<br>`*args`, `**kwargs`: Additional arguments | `agent.run("Analyze data", imgs=["img1.jpg", "img2.png"])` |
@ -420,9 +456,35 @@ tasks = [
]
responses = agent.bulk_run(tasks)
print(responses)
# Run multiple tasks in batch mode (new method)
task_list = ["Analyze data", "Generate report", "Create summary"]
batch_responses = agent.run_batched(task_list)
print(f"Completed {len(batch_responses)} tasks in batch mode")
```
### Batch Processing with `run_batched`
The new `run_batched` method allows you to process multiple tasks efficiently:
```python
# Process multiple tasks in batch
tasks = [
"Analyze the financial data for Q1",
"Generate a summary report for stakeholders",
"Create recommendations for Q2 planning"
]
# Run all tasks concurrently
batch_results = agent.run_batched(tasks)
# Process results
for i, (task, result) in enumerate(zip(tasks, batch_results)):
print(f"Task {i+1}: {task}")
print(f"Result: {result}\n")
```
### Various other settings
```python
@ -611,6 +673,36 @@ print(type(str_to_dict(out)))
```
## New Features and Parameters
### Enhanced Run Method Parameters
The `run` method now supports several new parameters for advanced functionality:
- **`imgs`**: Process multiple images simultaneously instead of just one
- **`correct_answer`**: Validate responses against expected answers with automatic retries
- **`streaming_callback`**: Real-time token streaming for interactive applications
### MCP (Model Context Protocol) Integration
New parameters enable seamless MCP server integration:
- **`mcp_url`**: Connect to a single MCP server
- **`mcp_urls`**: Connect to multiple MCP servers
- **`mcp_config`**: Advanced MCP configuration options
### Advanced Reasoning and Safety
- **`react_on`**: Enable ReAct reasoning for complex problem-solving
- **`safety_prompt_on`**: Add safety constraints to agent responses
- **`reasoning_prompt_on`**: Enable multi-loop reasoning for complex tasks
### Performance and Resource Management
- **`dynamic_context_window`**: Automatically adjust context window based on available tokens
- **`tool_retry_attempts`**: Configure retry behavior for tool execution
- **`summarize_multiple_images`**: Automatically summarize results from multiple image processing
## Best Practices
1. Always provide a clear and concise `system_prompt` to guide the agent's behavior.
@ -627,5 +719,9 @@ print(type(str_to_dict(out)))
12. Configure `device` and `device_id` appropriately for optimal performance
13. Enable `rag_every_loop` when continuous context from long-term memory is needed
14. Use `scheduled_run_date` for automated task scheduling
15. Leverage `run_batched` for efficient processing of multiple related tasks
16. Use `mcp_url` or `mcp_urls` to extend agent capabilities with external tools
17. Enable `react_on` for complex reasoning tasks requiring step-by-step analysis
18. Configure `tool_retry_attempts` for robust tool execution in production environments
By following these guidelines and leveraging the Swarm Agent's extensive features, you can create powerful, flexible, and efficient autonomous agents for a wide range of applications.

@ -46,7 +46,7 @@ The flow pattern uses arrow notation (`->`) to define execution order:
### Basic Sequential Flow
```python
from swarms.structs.swarm_arange import SwarmRearrange
from swarms.structs.swarm_rearrange import SwarmRearrange
import os
from swarms import Agent, AgentRearrange
from swarm_models import OpenAIChat

@ -0,0 +1,453 @@
# AgentLoader Documentation
The `AgentLoader` is a powerful utility for creating Swarms agents from markdown files using the Claude Code sub-agent format. It supports both single and multiple markdown file loading, providing a flexible way to define and deploy agents using YAML frontmatter configuration.
## Overview
The AgentLoader enables you to:
- Load single agents from markdown files with YAML frontmatter
- Load multiple agents from directories or file lists with concurrent processing
- Parse Claude Code sub-agent YAML frontmatter configurations
- Extract system prompts from markdown content
- Utilize 100% CPU cores for high-performance batch loading
- Provide comprehensive error handling and validation
## Installation
The AgentLoader is included with the Swarms framework:
```python
from swarms.utils import AgentLoader, load_agent_from_markdown, load_agents_from_markdown
```
## Markdown Format
The AgentLoader uses the Claude Code sub-agent YAML frontmatter format:
```markdown
---
name: your-sub-agent-name
description: Description of when this subagent should be invoked
model_name: gpt-4
temperature: 0.3
max_loops: 2
mcp_url: http://example.com/mcp # optional
---
Your subagent's system prompt goes here. This can be multiple paragraphs
and should clearly define the subagent's role, capabilities, and approach
to solving problems.
Include specific instructions, best practices, and any constraints
the subagent should follow.
```
**Schema Fields:**
- `name` (required): Your sub-agent name
- `description` (required): Description of when this subagent should be invoked
- `model_name` (optional): Name of model (defaults to random selection if not provided)
- `temperature` (optional): Float value for model temperature (0.0-2.0)
- `max_loops` (optional): Integer for maximum reasoning loops
- `mcp_url` (optional): MCP server URL if needed
## Quick Start
### Loading a Single Agent
```python
from swarms.utils import load_agent_from_markdown
# Load agent from markdown file
agent = load_agent_from_markdown("finance_advisor.md")
# Use the agent
response = agent.run(
"I have $10,000 to invest. What's a good strategy for a beginner?"
)
```
### Loading Multiple Agents (Concurrent)
```python
from swarms.utils import load_agents_from_markdown
# Load agents from list of files with concurrent processing
agents = load_agents_from_markdown([
"market_researcher.md",
"financial_analyst.md",
"risk_analyst.md"
], concurrent=True) # Uses all CPU cores for faster loading
# Use agents in a workflow
from swarms.structs import SequentialWorkflow
workflow = SequentialWorkflow(
agents=agents,
max_loops=1
)
task = "Analyze the AI healthcare market for a $50M investment."
result = workflow.run(task)
```
## Class-Based Usage
### AgentLoader Class
For more advanced usage, use the `AgentLoader` class directly:
```python
from swarms.utils import AgentLoader
# Initialize loader
loader = AgentLoader()
# Load single agent
agent = loader.load_single_agent("path/to/agent.md")
# Load multiple agents with concurrent processing
agents = loader.load_multiple_agents(
"./agents_directory/",
concurrent=True, # Enable concurrent processing
max_workers=8 # Optional: limit worker threads
)
# Parse markdown file without creating agent
config = loader.parse_markdown_file("path/to/agent.md")
print(config.name, config.description)
```
## Configuration Options
You can override default configuration when loading agents:
```python
agent = load_agent_from_markdown(
file_path="agent.md",
max_loops=5,
verbose=True,
dashboard=True,
autosave=False,
context_length=200000
)
```
### Available Configuration Parameters
- `max_loops` (int): Maximum number of reasoning loops (default: 1)
- `autosave` (bool): Enable automatic state saving (default: True)
- `dashboard` (bool): Enable dashboard monitoring (default: False)
- `verbose` (bool): Enable verbose logging (default: False)
- `dynamic_temperature_enabled` (bool): Enable dynamic temperature (default: False)
- `saved_state_path` (str): Path for saving agent state
- `user_name` (str): User identifier (default: "default_user")
- `retry_attempts` (int): Number of retry attempts (default: 3)
- `context_length` (int): Maximum context length (default: 100000)
- `return_step_meta` (bool): Return step metadata (default: False)
- `output_type` (str): Output format type (default: "str")
- `auto_generate_prompt` (bool): Auto-generate prompts (default: False)
- `artifacts_on` (bool): Enable artifacts (default: False)
## Complete Example
### Example: Finance Advisor Agent
Create a file `finance_advisor.md`:
```markdown
---
name: FinanceAdvisor
description: Expert financial advisor for investment and budgeting guidance
model_name: gpt-4
temperature: 0.7
max_loops: 1
---
You are an expert financial advisor with deep knowledge in:
- Investment strategies and portfolio management
- Personal budgeting and financial planning
- Risk assessment and diversification
- Tax optimization strategies
- Retirement planning
Your approach:
- Provide clear, actionable financial advice
- Consider individual risk tolerance and goals
- Explain complex concepts in simple terms
- Always emphasize the importance of diversification
- Include relevant disclaimers about financial advice
When analyzing financial situations:
1. Assess current financial position
2. Identify short-term and long-term goals
3. Evaluate risk tolerance
4. Recommend appropriate strategies
5. Suggest specific action steps
```
### Loading and Using the Agent
```python
from swarms.utils import load_agent_from_markdown
# Load the Finance Advisor agent
agent = load_agent_from_markdown("finance_advisor.md")
# Use the agent for financial advice
response = agent.run(
"I have $10,000 to invest. What's a good strategy for a beginner?"
)
```
## Error Handling
The AgentLoader provides comprehensive error handling:
```python
from swarms.utils import AgentLoader
loader = AgentLoader()
try:
# This will raise FileNotFoundError
agent = loader.load_single_agent("nonexistent.md")
except FileNotFoundError as e:
print(f"File not found: {e}")
try:
# This will handle parsing errors gracefully
agents = loader.load_multiple_agents("./invalid_directory/")
print(f"Successfully loaded {len(agents)} agents")
except Exception as e:
print(f"Error loading agents: {e}")
```
## Concurrent Processing Features
### Multi-Core Performance
The AgentLoader utilizes 100% of CPU cores for concurrent agent loading, providing significant performance improvements when processing multiple markdown files:
```python
from swarms.utils import load_agents_from_markdown
# Automatic concurrent processing for multiple files
agents = load_agents_from_markdown([
"agent1.md", "agent2.md", "agent3.md", "agent4.md"
]) # concurrent=True by default
# Manual control over concurrency
agents = load_agents_from_markdown(
"./agents_directory/",
concurrent=True, # Enable concurrent processing
max_workers=8 # Limit to 8 worker threads
)
# Disable concurrency for debugging or single files
agents = load_agents_from_markdown(
["single_agent.md"],
concurrent=False # Sequential processing
)
```
### Resource Management
```python
# Default: Uses all CPU cores
agents = load_agents_from_markdown(files, concurrent=True)
# Custom worker count for resource control
agents = load_agents_from_markdown(
files,
concurrent=True,
max_workers=4 # Limit to 4 threads
)
# ThreadPoolExecutor automatically manages:
# - Thread lifecycle
# - Resource cleanup
# - Exception handling
# - Result collection
```
## Advanced Features
### Custom System Prompt Building
The AgentLoader automatically builds comprehensive system prompts from the markdown structure:
```python
loader = AgentLoader()
config = loader.parse_markdown_file("agent.md")
# The system prompt includes:
# - Role description from the table
# - Focus areas as bullet points
# - Approach as numbered steps
# - Expected outputs as deliverables
print("Generated System Prompt:")
print(config.system_prompt)
```
## Integration with Swarms
The loaded agents are fully compatible with Swarms orchestration systems:
```python
from swarms.utils import load_agents_from_markdown
from swarms.structs import SequentialWorkflow
# Load multiple specialized agents
agents = load_agents_from_markdown("./specialist_agents/")
# Create a sequential workflow
workflow = SequentialWorkflow(
agents=agents,
max_loops=1
)
# Execute complex task across multiple agents
result = workflow.run("Conduct a comprehensive system audit")
```
## Best Practices
1. **Consistent Naming**: Use clear, descriptive agent names
2. **Detailed Descriptions**: Provide comprehensive role descriptions
3. **Structured Sections**: Use the optional sections to define agent behavior
4. **Error Handling**: Always wrap agent loading in try-catch blocks
5. **Model Selection**: Choose appropriate models based on agent complexity
6. **Configuration**: Override defaults when specific behavior is needed
## API Reference
### AgentLoader Class
```python
class AgentLoader:
def __init__(self, model: Optional[LiteLLM] = None)
def parse_markdown_file(self, file_path: str) -> MarkdownAgentConfig
def load_single_agent(self, file_path: str, **kwargs) -> Agent
def load_multiple_agents(self, file_paths: Union[str, List[str]], **kwargs) -> List[Agent]
```
### Convenience Functions
```python
def load_agent_from_markdown(file_path: str, **kwargs) -> Agent
def load_agents_from_markdown(
file_paths: Union[str, List[str]],
concurrent: bool = True, # Enable concurrent processing
max_workers: Optional[int] = None, # Max worker threads (defaults to CPU count)
**kwargs
) -> List[Agent]
```
### Configuration Model
```python
class MarkdownAgentConfig(BaseModel):
name: str
description: str
model_name: Optional[str] = "gpt-4"
temperature: Optional[float] = 0.1 # Model temperature (0.0-2.0)
mcp_url: Optional[str] = None # Optional MCP server URL
system_prompt: str
max_loops: int = 1
autosave: bool = False
dashboard: bool = False
verbose: bool = False
# ... additional configuration fields
```
## Examples Repository
Find complete working examples in the `examples/agent_loader/` directory:
### Single Agent Example (`agent_loader_demo.py`)
```python
from swarms.utils import load_agent_from_markdown
agent = load_agent_from_markdown("finance_advisor.md")
agent.run(
task="Analyze the financial market trends for 2023."
)
```
### Multi-Agent Workflow Example (`multi_agents_loader_demo.py`)
```python
from swarms.utils import load_agents_from_markdown
agents = load_agents_from_markdown([
"market_researcher.md",
"financial_analyst.md",
"risk_analyst.md"
])
# Use agents in a workflow
from swarms.structs.sequential_workflow import SequentialWorkflow
workflow = SequentialWorkflow(
agents=agents,
max_loops=1
)
task = """
Analyze the AI healthcare market for a $50M investment opportunity.
Focus on market size, competition, financials, and risks.
"""
result = workflow.run(task)
```
### Sample Agent Definition (`finance_advisor.md`)
```markdown
---
name: FinanceAdvisor
description: Expert financial advisor for investment and budgeting guidance
model_name: gpt-4o
temperature: 0.7
max_loops: 1
---
You are an expert financial advisor with deep knowledge in:
- Investment strategies and portfolio management
- Personal budgeting and financial planning
- Risk assessment and diversification
- Tax optimization strategies
- Retirement planning
Your approach:
- Provide clear, actionable financial advice
- Consider individual risk tolerance and goals
- Explain complex concepts in simple terms
- Always emphasize the importance of diversification
- Include relevant disclaimers about financial advice
When analyzing financial situations:
1. Assess current financial position
2. Identify short-term and long-term goals
3. Evaluate risk tolerance
4. Recommend appropriate strategies
5. Suggest specific action steps
```
## Support
For questions and support:
- GitHub Issues: [https://github.com/kyegomez/swarms/issues](https://github.com/kyegomez/swarms/issues)
- Documentation: [https://docs.swarms.world](https://docs.swarms.world)
- Community: Join our Discord for real-time support

@ -36,10 +36,8 @@ agent = Agent(
model_name="claude-sonnet-4-20250514",
dynamic_temperature_enabled=True,
output_type="str-all-except-first",
max_loops="auto",
interactive=True,
no_reasoning_prompt=True,
streaming_on=True,
max_loops=1,
dynamic_context_window=True,
)
out = agent.run(

@ -0,0 +1,7 @@
from swarms.utils import load_agent_from_markdown
agent = load_agent_from_markdown("finance_advisor.md")
agent.run(
task="Analyze the financial market trends for 2023."
)

@ -0,0 +1,28 @@
---
name: FinanceAdvisor
description: Expert financial advisor for investment and budgeting guidance
model_name: gpt-4o
temperature: 0.7
max_loops: 1
---
You are an expert financial advisor with deep knowledge in:
- Investment strategies and portfolio management
- Personal budgeting and financial planning
- Risk assessment and diversification
- Tax optimization strategies
- Retirement planning
Your approach:
- Provide clear, actionable financial advice
- Consider individual risk tolerance and goals
- Explain complex concepts in simple terms
- Always emphasize the importance of diversification
- Include relevant disclaimers about financial advice
When analyzing financial situations:
1. Assess current financial position
2. Identify short-term and long-term goals
3. Evaluate risk tolerance
4. Recommend appropriate strategies
5. Suggest specific action steps

@ -0,0 +1,22 @@
from swarms.utils import load_agents_from_markdown
agents = load_agents_from_markdown([
"market_researcher.md",
"financial_analyst.md",
"risk_analyst.md"
])
# Example 3: Use agents in a workflow
from swarms.structs.sequential_workflow import SequentialWorkflow
workflow = SequentialWorkflow(
agents=agents,
max_loops=1
)
task = """
Analyze the AI healthcare market for a $50M investment opportunity.
Focus on market size, competition, financials, and risks.
"""
result = workflow.run(task)

@ -23,6 +23,9 @@ from loguru import logger
from swarms import Agent, HierarchicalSwarm
from dotenv import load_dotenv
load_dotenv()
# =============================================================================
# DATABASE TOOLS - Core Functions for Database Operations
@ -901,6 +904,7 @@ smart_database_swarm = HierarchicalSwarm(
description="A comprehensive database management system with specialized agents for creation, schema management, data operations, and querying, coordinated by a database director",
director_model_name="gpt-4.1",
agents=database_specialists,
director_reasoning_enabled=False,
max_loops=1,
verbose=True,
)
@ -917,7 +921,8 @@ if __name__ == "__main__":
print("SMART DATABASE SWARM - E-COMMERCE SYSTEM EXAMPLE")
print("=" * 80)
task1 = """Create a comprehensive e-commerce database system with the following requirements:
task1 = """
Create a comprehensive e-commerce database system with the following requirements:
1. Create a database called 'ecommerce_db'
2. Create tables for:

@ -1,5 +1,5 @@
from swarms.structs.agent import Agent
from swarms.structs.council_judge import CouncilAsAJudge
from swarms.structs.council_as_judge import CouncilAsAJudge
# ========== USAGE EXAMPLE ==========

@ -46,7 +46,7 @@ technical_analyst = Agent(
)
# Create list of agents
agents = [market_researcher, financial_analyst, technical_analyst]
agents = [market_researcher, financial_analyst]
# Initialize the concurrent workflow
workflow = ConcurrentWorkflow(

@ -8,7 +8,7 @@ from loguru import logger
from tqdm import tqdm
from swarms.structs.agent import Agent
from swarms.structs.council_judge import CouncilAsAJudge
from swarms.structs.council_as_judge import CouncilAsAJudge
# Dataset configurations
DATASET_CONFIGS = {

@ -1,5 +1,5 @@
from swarms.structs.agent import Agent
from swarms.structs.council_judge import CouncilAsAJudge
from swarms.structs.council_as_judge import CouncilAsAJudge
if __name__ == "__main__":

@ -1,5 +1,5 @@
from swarms.structs.agent import Agent
from swarms.structs.council_judge import CouncilAsAJudge
from swarms.structs.council_as_judge import CouncilAsAJudge
if __name__ == "__main__":

@ -5,7 +5,7 @@ This example shows how to use the CouncilAsAJudge to evaluate various types
of responses including technical explanations, creative writing, and problem-solving.
"""
from swarms.structs.council_judge import CouncilAsAJudge
from swarms.structs.council_as_judge import CouncilAsAJudge
def evaluate_technical_response():

@ -5,7 +5,7 @@ This example shows how to use the CouncilAsAJudge with different output types,
custom worker configurations, and focused evaluation scenarios.
"""
from swarms.structs.council_judge import CouncilAsAJudge
from swarms.structs.council_as_judge import CouncilAsAJudge
def evaluate_with_final_output():

@ -6,7 +6,7 @@ across multiple dimensions including accuracy, helpfulness, harmlessness,
coherence, conciseness, and instruction adherence.
"""
from swarms.structs.council_judge import CouncilAsAJudge
from swarms.structs.council_as_judge import CouncilAsAJudge
def main():

@ -0,0 +1,51 @@
#!/usr/bin/env python3
"""
Basic Graph Workflow Example
A minimal example showing how to use GraphWorkflow with backend selection.
"""
from swarms.structs.graph_workflow import GraphWorkflow
from swarms.structs.agent import Agent
agent_one = Agent(agent_name="research_agent", model="gpt-4o-mini")
agent_two = Agent(
agent_name="research_agent_two", model="gpt-4o-mini"
)
agent_three = Agent(
agent_name="research_agent_three", model="gpt-4o-mini"
)
def main():
"""
Run a basic graph workflow example without print statements.
"""
# Create agents
# Create workflow with backend selection
workflow = GraphWorkflow(
name="Basic Example",
verbose=True,
)
# Add agents to workflow
workflow.add_node(agent_one)
workflow.add_node(agent_two)
workflow.add_node(agent_three)
# Create simple chain using the actual agent names
workflow.add_edge("research_agent", "research_agent_two")
workflow.add_edge("research_agent_two", "research_agent_three")
# Compile the workflow
workflow.compile()
# Run the workflow
task = "Complete a simple task"
results = workflow.run(task)
return results
if __name__ == "__main__":
main()

@ -1,19 +0,0 @@
from swarms.sims.senator_assembly import SenatorAssembly
def main():
"""
Simulate a Senate vote on a bill to invade Cuba and claim it as the 51st state.
This function initializes the SenatorAssembly and runs a concurrent vote simulation
on the specified bill.
"""
senator_simulation = SenatorAssembly()
senator_simulation.simulate_vote_concurrent(
"A bill proposing to deregulate the IPO (Initial Public Offering) market in the United States as extensively as possible. The bill seeks to remove or significantly reduce existing regulatory requirements and oversight for companies seeking to go public, with the aim of increasing market efficiency and access to capital. Senators must consider the potential economic, legal, and ethical consequences of such broad deregulation, and cast their votes accordingly.",
batch_size=10,
)
if __name__ == "__main__":
main()

@ -30,6 +30,7 @@ try:
WikipediaPersonalityScraper,
MEPPersonalityProfile,
)
WIKIPEDIA_PERSONALITY_AVAILABLE = True
except ImportError:
WIKIPEDIA_PERSONALITY_AVAILABLE = False

@ -1,520 +1,59 @@
"""
EuroSwarm Parliament - Example Script
EuroSwarm Parliament - Simple Example
This script demonstrates the comprehensive democratic functionality of the EuroSwarm Parliament,
including bill introduction, committee work, parliamentary debates, and democratic voting.
A basic demonstration of the EuroSwarm Parliament functionality.
"""
import json
import time
from datetime import datetime
from euroswarm_parliament import EuroSwarmParliament, VoteType
# Import directly from the file
from euroswarm_parliament import (
EuroSwarmParliament,
VoteType,
ParliamentaryRole,
ParliamentaryMember
)
def main():
"""Simple demonstration of EuroSwarm Parliament."""
def demonstrate_parliament_initialization():
"""Demonstrate parliament initialization and basic functionality with cost optimization."""
print("\nEUROSWARM PARLIAMENT INITIALIZATION DEMONSTRATION (COST OPTIMIZED)")
print("=" * 60)
print("EUROSWARM PARLIAMENT - SIMPLE EXAMPLE")
print("=" * 50)
# Initialize the parliament with cost optimization
# Initialize the parliament
parliament = EuroSwarmParliament(
eu_data_file="EU.xml",
parliament_size=None, # Use all MEPs from EU.xml (717)
enable_democratic_discussion=True,
enable_committee_work=True,
enable_amendment_process=True,
enable_lazy_loading=True, # NEW: Lazy load MEP agents
enable_caching=True, # NEW: Enable response caching
batch_size=25, # NEW: Batch size for concurrent execution
budget_limit=100.0, # NEW: Budget limit in dollars
verbose=True
verbose=True,
)
print(f"Parliament initialized with {len(parliament.meps)} MEPs")
# Show parliament composition with cost stats
composition = parliament.get_parliament_composition()
print(f"\nPARLIAMENT COMPOSITION:")
print(f"Total MEPs: {composition['total_meps']}")
print(f"Loaded MEPs: {composition['loaded_meps']} (lazy loading active)")
print(f"\nCOST OPTIMIZATION:")
cost_stats = composition['cost_stats']
print(f"Budget Limit: ${cost_stats['budget_remaining'] + cost_stats['total_cost']:.2f}")
print(f"Budget Used: ${cost_stats['total_cost']:.2f}")
print(f"Budget Remaining: ${cost_stats['budget_remaining']:.2f}")
print(f"Cache Hit Rate: {cost_stats['cache_hit_rate']:.1%}")
print(f"\nPOLITICAL GROUP DISTRIBUTION:")
for group, data in composition['political_groups'].items():
count = data['count']
percentage = data['percentage']
print(f" {group}: {count} MEPs ({percentage:.1f}%)")
print(f"\nCOMMITTEE LEADERSHIP:")
for committee_name, committee_data in composition['committees'].items():
chair = committee_data['chair']
if chair:
print(f" {committee_name}: {chair}")
return parliament
def demonstrate_individual_mep_interaction(parliament):
"""Demonstrate individual MEP interaction and personality."""
print("\nINDIVIDUAL MEP INTERACTION DEMONSTRATION")
print("=" * 60)
# Get a sample MEP
sample_mep_name = list(parliament.meps.keys())[0]
sample_mep = parliament.meps[sample_mep_name]
print(f"Sample MEP: {sample_mep.full_name}")
print(f"\nSample MEP: {sample_mep.full_name}")
print(f"Country: {sample_mep.country}")
print(f"Political Group: {sample_mep.political_group}")
print(f"National Party: {sample_mep.national_party}")
print(f"Committees: {', '.join(sample_mep.committees)}")
print(f"Expertise Areas: {', '.join(sample_mep.expertise_areas)}")
# Test MEP agent interaction
if sample_mep.agent:
test_prompt = "What are your views on European integration and how do you approach cross-border cooperation?"
print(f"\nMEP Response to: '{test_prompt}'")
print("-" * 50)
try:
response = sample_mep.agent.run(test_prompt)
print(response[:500] + "..." if len(response) > 500 else response)
except Exception as e:
print(f"Error getting MEP response: {e}")
def demonstrate_committee_work(parliament):
"""Demonstrate committee work and hearings."""
print("\nCOMMITTEE WORK DEMONSTRATION")
print("=" * 60)
# Get a real MEP as sponsor
sponsor = list(parliament.meps.keys())[0]
# Create a test bill
# Create a simple bill
bill = parliament.introduce_bill(
title="European Digital Rights and Privacy Protection Act",
description="Comprehensive legislation to strengthen digital rights, enhance privacy protection, and establish clear guidelines for data handling across the European Union.",
title="European Digital Rights Act",
description="Basic legislation to protect digital rights across the EU.",
bill_type=VoteType.ORDINARY_LEGISLATIVE_PROCEDURE,
committee="Legal Affairs",
sponsor=sponsor
sponsor=sample_mep_name,
)
print(f"Bill: {bill.title}")
print(f"\nBill introduced: {bill.title}")
print(f"Committee: {bill.committee}")
print(f"Sponsor: {bill.sponsor}")
# Conduct committee hearing
print(f"\nCONDUCTING COMMITTEE HEARING...")
hearing_result = parliament.conduct_committee_hearing(bill.committee, bill)
print(f"Committee: {hearing_result['committee']}")
print(f"Participants: {len(hearing_result['participants'])} MEPs")
print(f"Recommendation: {hearing_result['recommendations']['recommendation']}")
print(f"Support: {hearing_result['recommendations']['support_percentage']:.1f}%")
print(f"Oppose: {hearing_result['recommendations']['oppose_percentage']:.1f}%")
print(f"Amend: {hearing_result['recommendations']['amend_percentage']:.1f}%")
def demonstrate_parliamentary_debate(parliament):
"""Demonstrate parliamentary debate functionality."""
print("\nPARLIAMENTARY DEBATE DEMONSTRATION")
print("=" * 60)
# Get a real MEP as sponsor
sponsor = list(parliament.meps.keys())[1]
# Create a test bill
bill = parliament.introduce_bill(
title="European Green Deal Implementation Act",
description="Legislation to implement the European Green Deal, including carbon neutrality targets, renewable energy investments, and sustainable development measures.",
bill_type=VoteType.ORDINARY_LEGISLATIVE_PROCEDURE,
committee="Environment, Public Health and Food Safety",
sponsor=sponsor
)
print(f"Bill: {bill.title}")
print(f"Description: {bill.description}")
# Conduct parliamentary debate
print(f"\nCONDUCTING PARLIAMENTARY DEBATE...")
debate_result = parliament.conduct_parliamentary_debate(bill, max_speakers=10)
print(f"Debate Participants: {len(debate_result['participants'])} MEPs")
print(f"Debate Analysis:")
print(f" Support: {debate_result['analysis']['support_count']} speakers ({debate_result['analysis']['support_percentage']:.1f}%)")
print(f" Oppose: {debate_result['analysis']['oppose_count']} speakers ({debate_result['analysis']['oppose_percentage']:.1f}%)")
print(f" Neutral: {debate_result['analysis']['neutral_count']} speakers ({debate_result['analysis']['neutral_percentage']:.1f}%)")
def demonstrate_democratic_voting(parliament):
"""Demonstrate democratic voting functionality."""
print("\nDEMOCRATIC VOTING DEMONSTRATION")
print("=" * 60)
# Get a real MEP as sponsor
sponsor = list(parliament.meps.keys())[2]
# Create a test bill
bill = parliament.introduce_bill(
title="European Social Rights and Labor Protection Act",
description="Legislation to strengthen social rights, improve labor conditions, and ensure fair treatment of workers across the European Union.",
bill_type=VoteType.ORDINARY_LEGISLATIVE_PROCEDURE,
committee="Employment and Social Affairs",
sponsor=sponsor
)
print(f"Bill: {bill.title}")
print(f"Sponsor: {bill.sponsor}")
# Conduct democratic vote
print(f"\nCONDUCTING DEMOCRATIC VOTE...")
# Conduct a simple vote
print("\nConducting democratic vote...")
vote_result = parliament.conduct_democratic_vote(bill)
# Calculate percentages
total_votes = vote_result.votes_for + vote_result.votes_against + vote_result.abstentions
in_favor_percentage = (vote_result.votes_for / total_votes * 100) if total_votes > 0 else 0
against_percentage = (vote_result.votes_against / total_votes * 100) if total_votes > 0 else 0
abstentions_percentage = (vote_result.abstentions / total_votes * 100) if total_votes > 0 else 0
print(f"Vote Results:")
print(f" Total Votes: {total_votes}")
print(f" In Favor: {vote_result.votes_for} ({in_favor_percentage:.1f}%)")
print(f" Against: {vote_result.votes_against} ({against_percentage:.1f}%)")
print(f" Abstentions: {vote_result.abstentions} ({abstentions_percentage:.1f}%)")
print("Vote Results:")
print(f" In Favor: {vote_result.votes_for}")
print(f" Against: {vote_result.votes_against}")
print(f" Abstentions: {vote_result.abstentions}")
print(f" Result: {vote_result.result.value}")
# Show political group breakdown if available
if hasattr(vote_result, 'group_votes') and vote_result.group_votes:
print(f"\nPOLITICAL GROUP BREAKDOWN:")
for group, votes in vote_result.group_votes.items():
print(f" {group}: {votes['in_favor']}/{votes['total']} in favor ({votes['percentage']:.1f}%)")
else:
print(f"\nIndividual votes recorded: {len(vote_result.individual_votes)} MEPs")
def demonstrate_complete_democratic_session(parliament):
"""Demonstrate a complete democratic parliamentary session."""
print("\nCOMPLETE DEMOCRATIC SESSION DEMONSTRATION")
print("=" * 60)
# Get a real MEP as sponsor
sponsor = list(parliament.meps.keys())[3]
# Run complete session
session_result = parliament.run_democratic_session(
bill_title="European Innovation and Technology Advancement Act",
bill_description="Comprehensive legislation to promote innovation, support technology startups, and establish Europe as a global leader in digital transformation and technological advancement.",
bill_type=VoteType.ORDINARY_LEGISLATIVE_PROCEDURE,
committee="Industry, Research and Energy",
sponsor=sponsor
)
print(f"Session Results:")
print(f" Bill: {session_result['bill'].title}")
print(f" Committee Hearing: {session_result['hearing']['recommendations']['recommendation']}")
print(f" Debate Participants: {len(session_result['debate']['participants'])} MEPs")
print(f" Final Vote: {session_result['vote']['result']}")
print(f" Vote Margin: {session_result['vote']['in_favor_percentage']:.1f}% in favor")
def demonstrate_political_analysis(parliament):
"""Demonstrate political analysis and voting prediction."""
print("\nPOLITICAL ANALYSIS DEMONSTRATION")
print("=" * 60)
# Get a real MEP as sponsor
sponsor = list(parliament.meps.keys())[4]
# Create a test bill
bill = parliament.introduce_bill(
title="European Climate Action and Sustainability Act",
description="Comprehensive climate action legislation including carbon pricing, renewable energy targets, and sustainable development measures.",
bill_type=VoteType.ORDINARY_LEGISLATIVE_PROCEDURE,
committee="Environment, Public Health and Food Safety",
sponsor=sponsor
)
print(f"Bill: {bill.title}")
print(f"Sponsor: {bill.sponsor}")
# Analyze political landscape
analysis = parliament.analyze_political_landscape(bill)
print(f"\nPOLITICAL LANDSCAPE ANALYSIS:")
print(f" Overall Support: {analysis['overall_support']:.1f}%")
print(f" Opposition: {analysis['opposition']:.1f}%")
print(f" Uncertainty: {analysis['uncertainty']:.1f}%")
print(f"\nPOLITICAL GROUP ANALYSIS:")
for group, data in analysis['group_analysis'].items():
print(f" {group}: {data['support']:.1f}% support, {data['opposition']:.1f}% opposition")
def demonstrate_hierarchical_democratic_voting(parliament):
"""Demonstrate hierarchical democratic voting with political group boards."""
print("\nHIERARCHICAL DEMOCRATIC VOTING DEMONSTRATION")
print("=" * 60)
# Get a real MEP as sponsor
sponsor = list(parliament.meps.keys())[5]
# Create a test bill
bill = parliament.introduce_bill(
title="European Climate Action and Sustainability Act",
description="Comprehensive climate action legislation including carbon pricing, renewable energy targets, and sustainable development measures.",
bill_type=VoteType.ORDINARY_LEGISLATIVE_PROCEDURE,
committee="Environment, Public Health and Food Safety",
sponsor=sponsor
)
print(f"Bill: {bill.title}")
print(f"Sponsor: {bill.sponsor}")
# Conduct hierarchical vote
print(f"\nCONDUCTING HIERARCHICAL DEMOCRATIC VOTE...")
hierarchical_result = parliament.conduct_hierarchical_democratic_vote(bill)
print(f"Hierarchical Vote Results:")
print(f" Total Votes: {hierarchical_result['total_votes']}")
print(f" In Favor: {hierarchical_result['in_favor']} ({hierarchical_result['in_favor_percentage']:.1f}%)")
print(f" Against: {hierarchical_result['against']} ({hierarchical_result['against_percentage']:.1f}%)")
print(f" Result: {hierarchical_result['result']}")
print(f"\nPOLITICAL GROUP BOARD DECISIONS:")
for group, decision in hierarchical_result['group_decisions'].items():
print(f" {group}: {decision['decision']} ({decision['confidence']:.1f}% confidence)")
def demonstrate_complete_hierarchical_session(parliament):
"""Demonstrate a complete hierarchical democratic session."""
print("\nCOMPLETE HIERARCHICAL DEMOCRATIC SESSION DEMONSTRATION")
print("=" * 60)
# Get a real MEP as sponsor
sponsor = list(parliament.meps.keys())[6]
# Run complete hierarchical session
session_result = parliament.run_hierarchical_democratic_session(
bill_title="European Climate Action and Sustainability Act",
bill_description="Comprehensive climate action legislation including carbon pricing, renewable energy targets, and sustainable development measures.",
bill_type=VoteType.ORDINARY_LEGISLATIVE_PROCEDURE,
committee="Environment, Public Health and Food Safety",
sponsor=sponsor
)
print(f"Hierarchical Session Results:")
print(f" Bill: {session_result['bill'].title}")
print(f" Committee Hearing: {session_result['hearing']['recommendations']['recommendation']}")
print(f" Debate Participants: {len(session_result['debate']['participants'])} MEPs")
print(f" Final Vote: {session_result['vote']['result']}")
print(f" Vote Margin: {session_result['vote']['in_favor_percentage']:.1f}% in favor")
def demonstrate_wikipedia_personalities(parliament):
"""Demonstrate the Wikipedia personality system for realistic MEP behavior."""
print("\nWIKIPEDIA PERSONALITY SYSTEM DEMONSTRATION")
print("=" * 60)
# Check if Wikipedia personalities are available
if not parliament.enable_wikipedia_personalities:
print("Wikipedia personality system not available")
print("To enable: Install required dependencies and run Wikipedia scraper")
return
print(f"Wikipedia personality system enabled")
print(f"Loaded {len(parliament.personality_profiles)} personality profiles")
# Show sample personality profiles
print(f"\nSAMPLE PERSONALITY PROFILES:")
print("-" * 40)
sample_count = 0
for mep_name, profile in parliament.personality_profiles.items():
if sample_count >= 3: # Show only 3 samples
break
print(f"\n{mep_name}")
print(f" Wikipedia URL: {profile.wikipedia_url if profile.wikipedia_url else 'Not available'}")
print(f" Summary: {profile.summary[:200]}..." if profile.summary else "No summary available")
print(f" Political Views: {profile.political_views[:150]}..." if profile.political_views else "Based on party alignment")
print(f" Policy Focus: {profile.policy_focus[:150]}..." if profile.policy_focus else "General parliamentary work")
print(f" Achievements: {profile.achievements[:150]}..." if profile.achievements else "Parliamentary service")
print(f" Last Updated: {profile.last_updated}")
sample_count += 1
# Demonstrate personality-driven voting
print(f"\nPERSONALITY-DRIVEN VOTING DEMONSTRATION:")
print("-" * 50)
# Create a test bill that would trigger different personality responses
bill = parliament.introduce_bill(
title="European Climate Action and Green Technology Investment Act",
description="Comprehensive legislation to accelerate Europe's transition to renewable energy, including massive investments in green technology, carbon pricing mechanisms, and support for affected industries and workers.",
bill_type=VoteType.ORDINARY_LEGISLATIVE_PROCEDURE,
committee="Environment",
sponsor="Climate Action Leader"
)
print(f"Bill: {bill.title}")
print(f"Description: {bill.description}")
# Show how different MEPs with Wikipedia personalities would respond
print(f"\nPERSONALITY-BASED RESPONSES:")
print("-" * 40)
sample_meps = list(parliament.personality_profiles.keys())[:3]
for mep_name in sample_meps:
mep = parliament.meps.get(mep_name)
profile = parliament.personality_profiles.get(mep_name)
if mep and profile:
print(f"\n{mep_name} ({mep.political_group})")
# Show personality influence
if profile.political_views:
print(f" Political Views: {profile.political_views[:100]}...")
if profile.policy_focus:
print(f" Policy Focus: {profile.policy_focus[:100]}...")
# Predict voting behavior based on personality
if "environment" in profile.policy_focus.lower() or "climate" in profile.political_views.lower():
predicted_vote = "LIKELY SUPPORT"
reasoning = "Environmental policy focus and climate advocacy"
elif "economic" in profile.policy_focus.lower() or "business" in profile.political_views.lower():
predicted_vote = "LIKELY OPPOSE"
reasoning = "Economic concerns about investment costs"
else:
predicted_vote = "UNCERTAIN"
reasoning = "Mixed considerations based on party alignment"
print(f" Predicted Vote: {predicted_vote}")
print(f" Reasoning: {reasoning}")
# Demonstrate scraping functionality
print(f"\nWIKIPEDIA SCRAPING CAPABILITIES:")
print("-" * 50)
print("Can scrape Wikipedia data for all 717 MEPs")
print("Extracts political views, career history, and achievements")
print("Creates detailed personality profiles in JSON format")
print("Integrates real personality data into AI agent system prompts")
print("Enables realistic, personality-driven voting behavior")
print("Respectful API usage with configurable delays")
print(f"\nTo scrape all MEP personalities:")
print(" parliament.scrape_wikipedia_personalities(delay=1.0)")
print(" # This will create personality profiles for all 717 MEPs")
print(" # Profiles are saved in 'mep_personalities/' directory")
def demonstrate_optimized_parliamentary_session(parliament):
"""Demonstrate cost-optimized parliamentary session."""
print("\nCOST-OPTIMIZED PARLIAMENTARY SESSION DEMONSTRATION")
print("=" * 60)
# Run optimized session with cost limit
session_result = parliament.run_optimized_parliamentary_session(
bill_title="European Digital Rights and Privacy Protection Act",
bill_description="Comprehensive legislation to strengthen digital rights, enhance privacy protection, and establish clear guidelines for data handling across the European Union.",
bill_type=VoteType.ORDINARY_LEGISLATIVE_PROCEDURE,
committee="Legal Affairs",
max_cost=25.0 # Max $25 for this session
)
print(f"Session Results:")
print(f" Bill: {session_result['session_summary']['bill_title']}")
print(f" Final Outcome: {session_result['session_summary']['final_outcome']}")
print(f" Total Cost: ${session_result['session_summary']['total_cost']:.2f}")
print(f" Budget Remaining: ${session_result['cost_stats']['budget_remaining']:.2f}")
# Show detailed cost statistics
cost_stats = parliament.get_cost_statistics()
print(f"\nDETAILED COST STATISTICS:")
print(f" Total Tokens Used: {cost_stats['total_tokens']:,}")
print(f" Requests Made: {cost_stats['requests_made']}")
print(f" Cache Hits: {cost_stats['cache_hits']}")
print(f" Cache Hit Rate: {cost_stats['cache_hit_rate']:.1%}")
print(f" Loading Efficiency: {cost_stats['loading_efficiency']:.1%}")
print(f" Cache Size: {cost_stats['cache_size']} entries")
return session_result
def main():
"""Main demonstration function."""
print("EUROSWARM PARLIAMENT - COST OPTIMIZED DEMONSTRATION")
print("=" * 60)
print("This demonstration shows the EuroSwarm Parliament with cost optimization features:")
print("• Lazy loading of MEP agents (only create when needed)")
print("• Response caching (avoid repeated API calls)")
print("• Batch processing (control memory and cost)")
print("• Budget controls (hard limits on spending)")
print("• Cost tracking (real-time monitoring)")
# Initialize parliament with cost optimization
parliament = demonstrate_parliament_initialization()
# Demonstrate individual MEP interaction (will trigger lazy loading)
demonstrate_individual_mep_interaction(parliament)
# Demonstrate committee work with cost optimization
demonstrate_committee_work(parliament)
# Demonstrate parliamentary debate with cost optimization
demonstrate_parliamentary_debate(parliament)
# Demonstrate democratic voting with cost optimization
demonstrate_democratic_voting(parliament)
# Demonstrate political analysis with cost optimization
demonstrate_political_analysis(parliament)
# Demonstrate optimized parliamentary session
demonstrate_optimized_parliamentary_session(parliament)
# Show final cost statistics
final_stats = parliament.get_cost_statistics()
print(f"\nFINAL COST STATISTICS:")
print(f"Total Cost: ${final_stats['total_cost']:.2f}")
print(f"Budget Remaining: ${final_stats['budget_remaining']:.2f}")
print(f"Cache Hit Rate: {final_stats['cache_hit_rate']:.1%}")
print(f"Loading Efficiency: {final_stats['loading_efficiency']:.1%}")
print(f"\n✅ COST OPTIMIZATION DEMONSTRATION COMPLETED!")
print(f"✅ EuroSwarm Parliament now supports cost-effective large-scale simulations")
print(f"✅ Lazy loading: {final_stats['loaded_meps']}/{final_stats['total_meps']} MEPs loaded")
print(f"✅ Caching: {final_stats['cache_hit_rate']:.1%} hit rate")
print(f"✅ Budget control: ${final_stats['total_cost']:.2f} spent of ${final_stats['budget_remaining'] + final_stats['total_cost']:.2f} budget")
print("\n✅ Simple example completed!")
if __name__ == "__main__":

@ -17,13 +17,10 @@ Key Features:
import os
import random
import json
import time
import hashlib
from typing import Dict, List, Optional, Union, Any, Set
from typing import Dict, List, Optional, Any
from dataclasses import dataclass, field
from enum import Enum
from datetime import datetime
from functools import lru_cache
from swarms import Agent
from swarms.structs.multi_agent_exec import run_agents_concurrently
@ -31,10 +28,6 @@ from swarms.structs.board_of_directors_swarm import (
BoardOfDirectorsSwarm,
BoardMember,
BoardMemberRole,
BoardDecisionType,
BoardSpec,
BoardOrder,
BoardDecision,
enable_board_feature,
)
from swarms.utils.loguru_logger import initialize_logger
@ -136,7 +129,9 @@ class CostTracker:
def add_tokens(self, tokens: int):
"""Add tokens used and calculate cost."""
self.total_tokens_used += tokens
self.total_cost_estimate = (self.total_tokens_used / 1_000_000) * self.token_cost_per_1m
self.total_cost_estimate = (
self.total_tokens_used / 1_000_000
) * self.token_cost_per_1m
self.requests_made += 1
def add_cache_hit(self):
@ -154,8 +149,11 @@ class CostTracker:
"total_cost": self.total_cost_estimate,
"requests_made": self.requests_made,
"cache_hits": self.cache_hits,
"cache_hit_rate": self.cache_hits / max(1, self.requests_made + self.cache_hits),
"budget_remaining": max(0, self.budget_limit - self.total_cost_estimate)
"cache_hit_rate": self.cache_hits
/ max(1, self.requests_made + self.cache_hits),
"budget_remaining": max(
0, self.budget_limit - self.total_cost_estimate
),
}
@ -195,7 +193,9 @@ class MassAgentTemplate:
"""
self.data_source = data_source
self.agent_count = agent_count
self.enable_hierarchical_organization = enable_hierarchical_organization
self.enable_hierarchical_organization = (
enable_hierarchical_organization
)
self.enable_group_swarms = enable_group_swarms
self.enable_lazy_loading = enable_lazy_loading
self.enable_caching = enable_caching
@ -220,9 +220,15 @@ class MassAgentTemplate:
self._organize_agents()
if self.verbose:
logger.info(f"Mass Agent Template initialized with {len(self.agents)} agent profiles")
logger.info(f"Lazy loading: {self.enable_lazy_loading}, Caching: {self.enable_caching}")
logger.info(f"Budget limit: ${budget_limit}, Batch size: {batch_size}")
logger.info(
f"Mass Agent Template initialized with {len(self.agents)} agent profiles"
)
logger.info(
f"Lazy loading: {self.enable_lazy_loading}, Caching: {self.enable_caching}"
)
logger.info(
f"Budget limit: ${budget_limit}, Batch size: {batch_size}"
)
def _load_agent_profiles(self) -> List[Dict[str, Any]]:
"""
@ -238,15 +244,20 @@ class MassAgentTemplate:
if self.data_source and os.path.exists(self.data_source):
# Load from file - customize based on your data format
try:
if self.data_source.endswith('.json'):
with open(self.data_source, 'r', encoding='utf-8') as f:
if self.data_source.endswith(".json"):
with open(
self.data_source, "r", encoding="utf-8"
) as f:
agent_data = json.load(f)
elif self.data_source.endswith('.csv'):
elif self.data_source.endswith(".csv"):
import pandas as pd
df = pd.read_csv(self.data_source)
agent_data = df.to_dict('records')
agent_data = df.to_dict("records")
else:
logger.warning(f"Unsupported data format: {self.data_source}")
logger.warning(
f"Unsupported data format: {self.data_source}"
)
except Exception as e:
logger.error(f"Error loading agent data: {e}")
@ -265,7 +276,7 @@ class MassAgentTemplate:
skills=data["skills"],
experience_level=data["experience_level"],
agent=None, # Will be created on demand
is_loaded=False
is_loaded=False,
)
self.agents[data["name"]] = agent_profile
@ -300,7 +311,9 @@ class MassAgentTemplate:
return profile.agent
def _load_agents_batch(self, agent_names: List[str]) -> List[Agent]:
def _load_agents_batch(
self, agent_names: List[str]
) -> List[Agent]:
"""
Load multiple agents in a batch.
@ -319,7 +332,9 @@ class MassAgentTemplate:
return loaded_agents
def _get_cache_key(self, task: str, agent_names: List[str]) -> str:
def _get_cache_key(
self, task: str, agent_names: List[str]
) -> str:
"""
Generate a cache key for a task and agent combination.
@ -367,7 +382,9 @@ class MassAgentTemplate:
if self.enable_caching:
self.response_cache[cache_key] = response
if self.verbose:
logger.info(f"Cached response for key: {cache_key[:20]}...")
logger.info(
f"Cached response for key: {cache_key[:20]}..."
)
def _generate_synthetic_data(self) -> List[Dict[str, Any]]:
"""
@ -384,47 +401,107 @@ class MassAgentTemplate:
"name": "Alex_Developer",
"role": AgentRole.SPECIALIST,
"category": AgentCategory.TECHNICAL,
"specialization": ["Python", "Machine Learning", "API Development"],
"personality_traits": ["analytical", "detail-oriented", "problem-solver"],
"skills": ["Python", "TensorFlow", "FastAPI", "Docker"],
"experience_level": "senior"
"specialization": [
"Python",
"Machine Learning",
"API Development",
],
"personality_traits": [
"analytical",
"detail-oriented",
"problem-solver",
],
"skills": [
"Python",
"TensorFlow",
"FastAPI",
"Docker",
],
"experience_level": "senior",
},
{
"name": "Sarah_Designer",
"role": AgentRole.CREATOR,
"category": AgentCategory.CREATIVE,
"specialization": ["UI/UX Design", "Visual Design", "Brand Identity"],
"personality_traits": ["creative", "user-focused", "aesthetic"],
"skills": ["Figma", "Adobe Creative Suite", "User Research", "Prototyping"],
"experience_level": "senior"
"specialization": [
"UI/UX Design",
"Visual Design",
"Brand Identity",
],
"personality_traits": [
"creative",
"user-focused",
"aesthetic",
],
"skills": [
"Figma",
"Adobe Creative Suite",
"User Research",
"Prototyping",
],
"experience_level": "senior",
},
{
"name": "Mike_Analyst",
"role": AgentRole.ANALYST,
"category": AgentCategory.ANALYTICAL,
"specialization": ["Data Analysis", "Business Intelligence", "Market Research"],
"personality_traits": ["data-driven", "curious", "insightful"],
"specialization": [
"Data Analysis",
"Business Intelligence",
"Market Research",
],
"personality_traits": [
"data-driven",
"curious",
"insightful",
],
"skills": ["SQL", "Python", "Tableau", "Statistics"],
"experience_level": "expert"
"experience_level": "expert",
},
{
"name": "Lisa_Manager",
"role": AgentRole.MANAGER,
"category": AgentCategory.STRATEGIC,
"specialization": ["Project Management", "Team Leadership", "Strategic Planning"],
"personality_traits": ["organized", "leadership", "strategic"],
"skills": ["Agile", "Scrum", "Risk Management", "Stakeholder Communication"],
"experience_level": "senior"
"specialization": [
"Project Management",
"Team Leadership",
"Strategic Planning",
],
"personality_traits": [
"organized",
"leadership",
"strategic",
],
"skills": [
"Agile",
"Scrum",
"Risk Management",
"Stakeholder Communication",
],
"experience_level": "senior",
},
{
"name": "Tom_Coordinator",
"role": AgentRole.COORDINATOR,
"category": AgentCategory.OPERATIONAL,
"specialization": ["Process Optimization", "Workflow Management", "Resource Allocation"],
"personality_traits": ["efficient", "coordinated", "systematic"],
"skills": ["Process Mapping", "Automation", "Resource Planning", "Quality Assurance"],
"experience_level": "senior"
}
"specialization": [
"Process Optimization",
"Workflow Management",
"Resource Allocation",
],
"personality_traits": [
"efficient",
"coordinated",
"systematic",
],
"skills": [
"Process Mapping",
"Automation",
"Resource Planning",
"Quality Assurance",
],
"experience_level": "senior",
},
]
# Generate the specified number of agents
@ -437,14 +514,18 @@ class MassAgentTemplate:
"role": template["role"],
"category": template["category"],
"specialization": template["specialization"].copy(),
"personality_traits": template["personality_traits"].copy(),
"personality_traits": template[
"personality_traits"
].copy(),
"skills": template["skills"].copy(),
"experience_level": template["experience_level"]
"experience_level": template["experience_level"],
}
# Add some randomization for variety
if random.random() < 0.3:
agent_data["experience_level"] = random.choice(["junior", "senior", "expert"])
agent_data["experience_level"] = random.choice(
["junior", "senior", "expert"]
)
synthetic_data.append(agent_data)
@ -470,7 +551,9 @@ class MassAgentTemplate:
verbose=self.verbose,
)
def _generate_agent_system_prompt(self, profile: AgentProfile) -> str:
def _generate_agent_system_prompt(
self, profile: AgentProfile
) -> str:
"""
Generate a comprehensive system prompt for an agent.
@ -526,58 +609,54 @@ Remember: You are part of a large multi-agent system. Your unique combination of
- Report progress and any issues encountered
- Maintain quality standards in all work
- Collaborate with team members as needed""",
AgentRole.MANAGER: """
- Oversee team activities and coordinate efforts
- Set priorities and allocate resources
- Monitor progress and ensure deadlines are met
- Provide guidance and support to team members
- Make strategic decisions for the team""",
AgentRole.SPECIALIST: """
- Provide expert knowledge in specific domains
- Solve complex technical problems
- Mentor other agents in your area of expertise
- Stay updated on latest developments in your field
- Contribute specialized insights to projects""",
AgentRole.COORDINATOR: """
- Facilitate communication between different groups
- Ensure smooth workflow and process optimization
- Manage dependencies and resource allocation
- Track project timelines and milestones
- Resolve conflicts and bottlenecks""",
AgentRole.ANALYST: """
- Analyze data and extract meaningful insights
- Identify patterns and trends
- Provide evidence-based recommendations
- Create reports and visualizations
- Support decision-making with data""",
AgentRole.CREATOR: """
- Generate innovative ideas and solutions
- Design and develop new content or products
- Think creatively and outside the box
- Prototype and iterate on concepts
- Inspire and motivate other team members""",
AgentRole.VALIDATOR: """
- Review and validate work quality
- Ensure compliance with standards and requirements
- Provide constructive feedback
- Identify potential issues and risks
- Maintain quality assurance processes""",
AgentRole.EXECUTOR: """
- Implement plans and strategies
- Execute tasks with precision and efficiency
- Adapt to changing circumstances
- Ensure successful completion of objectives
- Maintain focus on results and outcomes"""
- Maintain focus on results and outcomes""",
}
return responsibilities.get(role, "Execute tasks according to your role and expertise.")
return responsibilities.get(
role,
"Execute tasks according to your role and expertise.",
)
def _organize_agents(self):
"""Organize agents into groups and categories."""
@ -601,13 +680,15 @@ Remember: You are part of a large multi-agent system. Your unique combination of
category=category,
agents=agent_names,
leader=leader,
total_agents=len(agent_names)
total_agents=len(agent_names),
)
self.groups[group_name] = group
if self.verbose:
logger.info(f"Organized agents into {len(self.groups)} groups")
logger.info(
f"Organized agents into {len(self.groups)} groups"
)
def _create_group_swarms(self):
"""Create Board of Directors swarms for each group."""
@ -623,28 +704,41 @@ Remember: You are part of a large multi-agent system. Your unique combination of
if group.leader and group.leader in self.agents:
leader_profile = self.agents[group.leader]
if leader_profile.agent:
board_members.append(BoardMember(
agent=leader_profile.agent,
role=BoardMemberRole.CHAIRMAN,
voting_weight=1.0,
expertise_areas=leader_profile.specialization
))
board_members.append(
BoardMember(
agent=leader_profile.agent,
role=BoardMemberRole.CHAIRMAN,
voting_weight=1.0,
expertise_areas=leader_profile.specialization,
)
)
# Add other agents as board members
for agent_name in group.agents[:5]: # Limit to 5 board members
if agent_name != group.leader and agent_name in self.agents:
for agent_name in group.agents[
:5
]: # Limit to 5 board members
if (
agent_name != group.leader
and agent_name in self.agents
):
profile = self.agents[agent_name]
if profile.agent:
board_members.append(BoardMember(
agent=profile.agent,
role=BoardMemberRole.EXECUTIVE_DIRECTOR,
voting_weight=0.8,
expertise_areas=profile.specialization
))
board_members.append(
BoardMember(
agent=profile.agent,
role=BoardMemberRole.EXECUTIVE_DIRECTOR,
voting_weight=0.8,
expertise_areas=profile.specialization,
)
)
# Create Board of Directors swarm
if board_members:
agents = [member.agent for member in board_members if member.agent is not None]
agents = [
member.agent
for member in board_members
if member.agent is not None
]
group.group_swarm = BoardOfDirectorsSwarm(
name=group_name,
@ -655,11 +749,13 @@ Remember: You are part of a large multi-agent system. Your unique combination of
verbose=self.verbose,
decision_threshold=0.6,
enable_voting=True,
enable_consensus=True
enable_consensus=True,
)
if self.verbose:
logger.info(f"Created {len([g for g in self.groups.values() if g.group_swarm])} group swarms")
logger.info(
f"Created {len([g for g in self.groups.values() if g.group_swarm])} group swarms"
)
def get_agent(self, agent_name: str) -> Optional[AgentProfile]:
"""
@ -685,7 +781,9 @@ Remember: You are part of a large multi-agent system. Your unique combination of
"""
return self.groups.get(group_name)
def get_agents_by_category(self, category: AgentCategory) -> List[str]:
def get_agents_by_category(
self, category: AgentCategory
) -> List[str]:
"""
Get all agents in a specific category.
@ -707,9 +805,15 @@ Remember: You are part of a large multi-agent system. Your unique combination of
Returns:
List[str]: List of agent names with the role
"""
return [name for name, profile in self.agents.items() if profile.role == role]
return [
name
for name, profile in self.agents.items()
if profile.role == role
]
def run_mass_task(self, task: str, agent_count: int = 10) -> Dict[str, Any]:
def run_mass_task(
self, task: str, agent_count: int = 10
) -> Dict[str, Any]:
"""
Run a task with multiple agents working in parallel with cost optimization.
@ -722,10 +826,16 @@ Remember: You are part of a large multi-agent system. Your unique combination of
"""
# Check budget before starting
if not self.cost_tracker.check_budget():
return {"error": "Budget exceeded", "cost_stats": self.cost_tracker.get_stats()}
return {
"error": "Budget exceeded",
"cost_stats": self.cost_tracker.get_stats(),
}
# Select random agents
selected_agent_names = random.sample(list(self.agents.keys()), min(agent_count, len(self.agents)))
selected_agent_names = random.sample(
list(self.agents.keys()),
min(agent_count, len(self.agents)),
)
# Check cache first
cache_key = self._get_cache_key(task, selected_agent_names)
@ -737,7 +847,7 @@ Remember: You are part of a large multi-agent system. Your unique combination of
"results": cached_result,
"total_agents": len(selected_agent_names),
"cached": True,
"cost_stats": self.cost_tracker.get_stats()
"cost_stats": self.cost_tracker.get_stats(),
}
# Process in batches to control memory and cost
@ -745,12 +855,18 @@ Remember: You are part of a large multi-agent system. Your unique combination of
total_processed = 0
for i in range(0, len(selected_agent_names), self.batch_size):
batch_names = selected_agent_names[i:i + self.batch_size]
batch_names = selected_agent_names[
i : i + self.batch_size
]
# Check budget for this batch
if not self.cost_tracker.check_budget():
logger.warning(f"Budget exceeded after processing {total_processed} agents")
logger.warning(f"Current cost: ${self.cost_tracker.total_cost_estimate:.4f}, Budget: ${self.cost_tracker.budget_limit:.2f}")
logger.warning(
f"Budget exceeded after processing {total_processed} agents"
)
logger.warning(
f"Current cost: ${self.cost_tracker.total_cost_estimate:.4f}, Budget: ${self.cost_tracker.budget_limit:.2f}"
)
break
# Load agents for this batch
@ -761,20 +877,30 @@ Remember: You are part of a large multi-agent system. Your unique combination of
# Run batch
try:
batch_results = run_agents_concurrently(batch_agents, task)
batch_results = run_agents_concurrently(
batch_agents, task
)
all_results.extend(batch_results)
total_processed += len(batch_agents)
# Estimate tokens used (more realistic approximation)
# Include both input tokens (task) and output tokens (response)
task_tokens = len(task.split()) * 1.3 # ~1.3 tokens per word
response_tokens = len(batch_agents) * 200 # ~200 tokens per response
task_tokens = (
len(task.split()) * 1.3
) # ~1.3 tokens per word
response_tokens = (
len(batch_agents) * 200
) # ~200 tokens per response
total_tokens = int(task_tokens + response_tokens)
self.cost_tracker.add_tokens(total_tokens)
if self.verbose:
logger.info(f"Processed batch {i//self.batch_size + 1}: {len(batch_agents)} agents")
logger.info(f"Current cost: ${self.cost_tracker.total_cost_estimate:.4f}, Budget remaining: ${self.cost_tracker.budget_limit - self.cost_tracker.total_cost_estimate:.2f}")
logger.info(
f"Processed batch {i//self.batch_size + 1}: {len(batch_agents)} agents"
)
logger.info(
f"Current cost: ${self.cost_tracker.total_cost_estimate:.4f}, Budget remaining: ${self.cost_tracker.budget_limit - self.cost_tracker.total_cost_estimate:.2f}"
)
except Exception as e:
logger.error(f"Error processing batch: {e}")
@ -790,11 +916,15 @@ Remember: You are part of a large multi-agent system. Your unique combination of
"results": all_results,
"total_agents": total_processed,
"cached": False,
"cost_stats": self.cost_tracker.get_stats()
"cost_stats": self.cost_tracker.get_stats(),
}
def run_mass_task_optimized(self, task: str, agent_count: int = 1000,
max_cost: float = 10.0) -> Dict[str, Any]:
def run_mass_task_optimized(
self,
task: str,
agent_count: int = 1000,
max_cost: float = 10.0,
) -> Dict[str, Any]:
"""
Run a task with cost-optimized mass execution for large-scale operations.
@ -816,7 +946,9 @@ Remember: You are part of a large multi-agent system. Your unique combination of
self.cost_tracker.budget_limit = max_cost
# Use smaller batches for better cost control
self.batch_size = min(25, self.batch_size) # Smaller batches for cost control
self.batch_size = min(
25, self.batch_size
) # Smaller batches for cost control
result = self.run_mass_task(task, agent_count)
@ -827,7 +959,9 @@ Remember: You are part of a large multi-agent system. Your unique combination of
self.cost_tracker.budget_limit = original_budget
self.batch_size = original_batch_size
def run_group_task(self, group_name: str, task: str) -> Dict[str, Any]:
def run_group_task(
self, group_name: str, task: str
) -> Dict[str, Any]:
"""
Run a task with a specific group using their Board of Directors swarm.
@ -840,7 +974,9 @@ Remember: You are part of a large multi-agent system. Your unique combination of
"""
group = self.groups.get(group_name)
if not group or not group.group_swarm:
return {"error": f"Group {group_name} not found or no swarm available"}
return {
"error": f"Group {group_name} not found or no swarm available"
}
# Run task with group swarm
result = group.group_swarm.run(task)
@ -849,7 +985,7 @@ Remember: You are part of a large multi-agent system. Your unique combination of
"group": group_name,
"task": task,
"result": result,
"agents_involved": group.agents
"agents_involved": group.agents,
}
def get_system_stats(self) -> Dict[str, Any]:
@ -862,7 +998,9 @@ Remember: You are part of a large multi-agent system. Your unique combination of
stats = {
"total_agents": len(self.agents),
"total_groups": len(self.groups),
"loaded_agents": len([a for a in self.agents.values() if a.is_loaded]),
"loaded_agents": len(
[a for a in self.agents.values() if a.is_loaded]
),
"categories": {},
"roles": {},
"experience_levels": {},
@ -871,23 +1009,29 @@ Remember: You are part of a large multi-agent system. Your unique combination of
"lazy_loading": self.enable_lazy_loading,
"caching": self.enable_caching,
"batch_size": self.batch_size,
"budget_limit": self.cost_tracker.budget_limit
}
"budget_limit": self.cost_tracker.budget_limit,
},
}
# Category breakdown
for category in AgentCategory:
stats["categories"][category.value] = len(self.get_agents_by_category(category))
stats["categories"][category.value] = len(
self.get_agents_by_category(category)
)
# Role breakdown
for role in AgentRole:
stats["roles"][role.value] = len(self.get_agents_by_role(role))
stats["roles"][role.value] = len(
self.get_agents_by_role(role)
)
# Experience level breakdown
experience_counts = {}
for profile in self.agents.values():
level = profile.experience_level
experience_counts[level] = experience_counts.get(level, 0) + 1
experience_counts[level] = (
experience_counts.get(level, 0) + 1
)
stats["experience_levels"] = experience_counts
return stats
@ -909,89 +1053,113 @@ def demonstrate_mass_agent_template():
enable_caching=True,
batch_size=25,
budget_limit=50.0, # $50 budget limit
verbose=True
verbose=True,
)
# Show system statistics
stats = template.get_system_stats()
print(f"\nSYSTEM STATISTICS:")
print("\nSYSTEM STATISTICS:")
print(f"Total Agents: {stats['total_agents']}")
print(f"Loaded Agents: {stats['loaded_agents']} (lazy loading active)")
print(
f"Loaded Agents: {stats['loaded_agents']} (lazy loading active)"
)
print(f"Total Groups: {stats['total_groups']}")
print(f"\nCOST OPTIMIZATION:")
cost_stats = stats['cost_stats']
print(f"Budget Limit: ${cost_stats['budget_remaining'] + cost_stats['total_cost']:.2f}")
print("\nCOST OPTIMIZATION:")
cost_stats = stats["cost_stats"]
print(
f"Budget Limit: ${cost_stats['budget_remaining'] + cost_stats['total_cost']:.2f}"
)
print(f"Budget Used: ${cost_stats['total_cost']:.2f}")
print(f"Budget Remaining: ${cost_stats['budget_remaining']:.2f}")
print(f"Cache Hit Rate: {cost_stats['cache_hit_rate']:.1%}")
print(f"\nCATEGORY BREAKDOWN:")
for category, count in stats['categories'].items():
print("\nCATEGORY BREAKDOWN:")
for category, count in stats["categories"].items():
print(f" {category}: {count} agents")
print(f"\nROLE BREAKDOWN:")
for role, count in stats['roles'].items():
print("\nROLE BREAKDOWN:")
for role, count in stats["roles"].items():
print(f" {role}: {count} agents")
print(f"\nEXPERIENCE LEVEL BREAKDOWN:")
for level, count in stats['experience_levels'].items():
print("\nEXPERIENCE LEVEL BREAKDOWN:")
for level, count in stats["experience_levels"].items():
print(f" {level}: {count} agents")
# Demonstrate cost-optimized mass task execution
print(f"\nCOST-OPTIMIZED MASS TASK DEMONSTRATION:")
print("\nCOST-OPTIMIZED MASS TASK DEMONSTRATION:")
print("-" * 40)
# Small task first (low cost)
small_result = template.run_mass_task(
"What is the most important skill for a software developer?",
agent_count=5
agent_count=5,
)
print(f"Small Task Results:")
print("Small Task Results:")
print(f" Agents Used: {len(small_result['agents_used'])}")
print(f" Cached: {small_result.get('cached', False)}")
print(f" Cost: ${small_result['cost_stats']['total_cost']:.2f}")
# Large task to demonstrate full capability
print(f"\nLarge Task Demonstration (Full Capability):")
print("\nLarge Task Demonstration (Full Capability):")
large_result = template.run_mass_task(
"Analyze the benefits of cloud computing for small businesses",
agent_count=200 # Use more agents to show capability
agent_count=200, # Use more agents to show capability
)
print(f" Agents Used: {len(large_result['agents_used'])}")
print(f" Cached: {large_result.get('cached', False)}")
print(f" Cost: ${large_result['cost_stats']['total_cost']:.2f}")
print(f" Budget Remaining: ${large_result['cost_stats']['budget_remaining']:.2f}")
print(
f" Budget Remaining: ${large_result['cost_stats']['budget_remaining']:.2f}"
)
# Show what happens with cost limits
print(f"\nCost-Limited Task Demonstration:")
print("\nCost-Limited Task Demonstration:")
cost_limited_result = template.run_mass_task_optimized(
"What are the key principles of agile development?",
agent_count=100,
max_cost=2.0 # Show cost limiting in action
max_cost=2.0, # Show cost limiting in action
)
print(f" Agents Used: {len(cost_limited_result['agents_used'])}")
print(f" Cached: {cost_limited_result.get('cached', False)}")
print(f" Cost: ${cost_limited_result['cost_stats']['total_cost']:.2f}")
print(f" Budget Remaining: ${cost_limited_result['cost_stats']['budget_remaining']:.2f}")
print(
f" Cost: ${cost_limited_result['cost_stats']['total_cost']:.2f}"
)
print(
f" Budget Remaining: ${cost_limited_result['cost_stats']['budget_remaining']:.2f}"
)
# Show final cost statistics
final_stats = template.get_system_stats()
print(f"\nFINAL COST STATISTICS:")
print(f"Total Cost: ${final_stats['cost_stats']['total_cost']:.2f}")
print(f"Budget Remaining: ${final_stats['cost_stats']['budget_remaining']:.2f}")
print(f"Cache Hit Rate: {final_stats['cost_stats']['cache_hit_rate']:.1%}")
print(f"Total Requests: {final_stats['cost_stats']['requests_made']}")
print("\nFINAL COST STATISTICS:")
print(
f"Total Cost: ${final_stats['cost_stats']['total_cost']:.2f}"
)
print(
f"Budget Remaining: ${final_stats['cost_stats']['budget_remaining']:.2f}"
)
print(
f"Cache Hit Rate: {final_stats['cost_stats']['cache_hit_rate']:.1%}"
)
print(
f"Total Requests: {final_stats['cost_stats']['requests_made']}"
)
print(f"Cache Hits: {final_stats['cost_stats']['cache_hits']}")
print(f"\nDEMONSTRATION COMPLETED SUCCESSFULLY!")
print(f"✅ Cost optimization working: ${final_stats['cost_stats']['total_cost']:.2f} spent")
print(f"✅ Lazy loading working: {final_stats['loaded_agents']}/{final_stats['total_agents']} agents loaded")
print(f"✅ Caching working: {final_stats['cost_stats']['cache_hit_rate']:.1%} hit rate")
print("\nDEMONSTRATION COMPLETED SUCCESSFULLY!")
print(
f"✅ Cost optimization working: ${final_stats['cost_stats']['total_cost']:.2f} spent"
)
print(
f"✅ Lazy loading working: {final_stats['loaded_agents']}/{final_stats['total_agents']} agents loaded"
)
print(
f"✅ Caching working: {final_stats['cost_stats']['cache_hit_rate']:.1%} hit rate"
)
if __name__ == "__main__":

@ -5,8 +5,11 @@ Test script to verify mass agent template can process more than 500 agents.
from mass_agent_template import MassAgentTemplate
def test_mass_agents():
print("Testing Mass Agent Template - Processing More Than 50 Agents")
print(
"Testing Mass Agent Template - Processing More Than 50 Agents"
)
print("=" * 60)
# Initialize template with 200 agents
@ -14,48 +17,57 @@ def test_mass_agents():
agent_count=200,
budget_limit=50.0,
batch_size=25,
verbose=True
verbose=True,
)
print(f"Initialized with {len(template.agents)} agents")
print(f"Budget limit: ${template.cost_tracker.budget_limit}")
# Test processing 100 agents
print(f"\nTesting with 100 agents...")
print("\nTesting with 100 agents...")
result = template.run_mass_task(
"What is the most important skill for your role?",
agent_count=100
agent_count=100,
)
print(f"Results:")
print("Results:")
print(f" Agents processed: {len(result['agents_used'])}")
print(f" Cost: ${result['cost_stats']['total_cost']:.4f}")
print(f" Budget remaining: ${result['cost_stats']['budget_remaining']:.2f}")
print(
f" Budget remaining: ${result['cost_stats']['budget_remaining']:.2f}"
)
print(f" Cached: {result.get('cached', False)}")
# Test processing 150 agents
print(f"\nTesting with 150 agents...")
print("\nTesting with 150 agents...")
result2 = template.run_mass_task(
"Describe your approach to problem-solving",
agent_count=150
"Describe your approach to problem-solving", agent_count=150
)
print(f"Results:")
print("Results:")
print(f" Agents processed: {len(result2['agents_used'])}")
print(f" Cost: ${result2['cost_stats']['total_cost']:.4f}")
print(f" Budget remaining: ${result2['cost_stats']['budget_remaining']:.2f}")
print(
f" Budget remaining: ${result2['cost_stats']['budget_remaining']:.2f}"
)
print(f" Cached: {result2.get('cached', False)}")
# Show final stats
final_stats = template.get_system_stats()
print(f"\nFinal Statistics:")
print("\nFinal Statistics:")
print(f" Total agents: {final_stats['total_agents']}")
print(f" Loaded agents: {final_stats['loaded_agents']}")
print(f" Total cost: ${final_stats['cost_stats']['total_cost']:.4f}")
print(f" Budget remaining: ${final_stats['cost_stats']['budget_remaining']:.2f}")
print(
f" Total cost: ${final_stats['cost_stats']['total_cost']:.4f}"
)
print(
f" Budget remaining: ${final_stats['cost_stats']['budget_remaining']:.2f}"
)
# Success criteria
total_processed = len(result['agents_used']) + len(result2['agents_used'])
total_processed = len(result["agents_used"]) + len(
result2["agents_used"]
)
print(f"\nTotal agents processed: {total_processed}")
if total_processed > 50:
@ -63,5 +75,6 @@ def test_mass_agents():
else:
print("❌ FAILURE: Template still limited to 50 agents")
if __name__ == "__main__":
test_mass_agents()

@ -14,7 +14,6 @@ from typing import Dict, List, Optional, Any
from dataclasses import dataclass, asdict
import requests
from loguru import logger
import xml.etree.ElementTree as ET
@dataclass
@ -78,7 +77,11 @@ class WikipediaPersonalityScraper:
Scraper for gathering Wikipedia personality data for MEPs.
"""
def __init__(self, output_dir: str = "mep_personalities", verbose: bool = True):
def __init__(
self,
output_dir: str = "mep_personalities",
verbose: bool = True,
):
"""
Initialize the Wikipedia personality scraper.
@ -89,17 +92,23 @@ class WikipediaPersonalityScraper:
self.output_dir = output_dir
self.verbose = verbose
self.session = requests.Session()
self.session.headers.update({
'User-Agent': 'EuroSwarm Parliament Personality Scraper/1.0 (https://github.com/swarms-democracy)'
})
self.session.headers.update(
{
"User-Agent": "EuroSwarm Parliament Personality Scraper/1.0 (https://github.com/swarms-democracy)"
}
)
# Create output directory
os.makedirs(output_dir, exist_ok=True)
if verbose:
logger.info(f"Wikipedia Personality Scraper initialized. Output directory: {output_dir}")
logger.info(
f"Wikipedia Personality Scraper initialized. Output directory: {output_dir}"
)
def extract_mep_data_from_xml(self, xml_file: str = "EU.xml") -> List[Dict[str, str]]:
def extract_mep_data_from_xml(
self, xml_file: str = "EU.xml"
) -> List[Dict[str, str]]:
"""
Extract MEP data from EU.xml file.
@ -112,31 +121,45 @@ class WikipediaPersonalityScraper:
meps = []
try:
with open(xml_file, 'r', encoding='utf-8') as f:
with open(xml_file, "r", encoding="utf-8") as f:
content = f.read()
# Use regex to extract MEP data
mep_pattern = r'<mep>\s*<fullName>(.*?)</fullName>\s*<country>(.*?)</country>\s*<politicalGroup>(.*?)</politicalGroup>\s*<id>(.*?)</id>\s*<nationalPoliticalGroup>(.*?)</nationalPoliticalGroup>\s*</mep>'
mep_pattern = r"<mep>\s*<fullName>(.*?)</fullName>\s*<country>(.*?)</country>\s*<politicalGroup>(.*?)</politicalGroup>\s*<id>(.*?)</id>\s*<nationalPoliticalGroup>(.*?)</nationalPoliticalGroup>\s*</mep>"
mep_matches = re.findall(mep_pattern, content, re.DOTALL)
for full_name, country, political_group, mep_id, national_party in mep_matches:
meps.append({
'full_name': full_name.strip(),
'country': country.strip(),
'political_group': political_group.strip(),
'mep_id': mep_id.strip(),
'national_party': national_party.strip()
})
for (
full_name,
country,
political_group,
mep_id,
national_party,
) in mep_matches:
meps.append(
{
"full_name": full_name.strip(),
"country": country.strip(),
"political_group": political_group.strip(),
"mep_id": mep_id.strip(),
"national_party": national_party.strip(),
}
)
if self.verbose:
logger.info(f"Extracted {len(meps)} MEPs from {xml_file}")
logger.info(
f"Extracted {len(meps)} MEPs from {xml_file}"
)
except Exception as e:
logger.error(f"Error extracting MEP data from {xml_file}: {e}")
logger.error(
f"Error extracting MEP data from {xml_file}: {e}"
)
return meps
def search_wikipedia_page(self, mep_name: str, country: str) -> Optional[str]:
def search_wikipedia_page(
self, mep_name: str, country: str
) -> Optional[str]:
"""
Search for a Wikipedia page for an MEP.
@ -151,42 +174,50 @@ class WikipediaPersonalityScraper:
# Search for the MEP on Wikipedia
search_url = "https://en.wikipedia.org/w/api.php"
search_params = {
'action': 'query',
'format': 'json',
'list': 'search',
'srsearch': f'"{mep_name}" {country}',
'srlimit': 5,
'srnamespace': 0
"action": "query",
"format": "json",
"list": "search",
"srsearch": f'"{mep_name}" {country}',
"srlimit": 5,
"srnamespace": 0,
}
response = self.session.get(search_url, params=search_params)
response = self.session.get(
search_url, params=search_params
)
response.raise_for_status()
data = response.json()
search_results = data.get('query', {}).get('search', [])
search_results = data.get("query", {}).get("search", [])
if search_results:
# Return the first result
return search_results[0]['title']
return search_results[0]["title"]
# Try alternative search without quotes
search_params['srsearch'] = f'{mep_name} {country}'
response = self.session.get(search_url, params=search_params)
search_params["srsearch"] = f"{mep_name} {country}"
response = self.session.get(
search_url, params=search_params
)
response.raise_for_status()
data = response.json()
search_results = data.get('query', {}).get('search', [])
search_results = data.get("query", {}).get("search", [])
if search_results:
return search_results[0]['title']
return search_results[0]["title"]
except Exception as e:
if self.verbose:
logger.warning(f"Error searching Wikipedia for {mep_name}: {e}")
logger.warning(
f"Error searching Wikipedia for {mep_name}: {e}"
)
return None
def get_wikipedia_content(self, page_title: str) -> Optional[Dict[str, Any]]:
def get_wikipedia_content(
self, page_title: str
) -> Optional[Dict[str, Any]]:
"""
Get Wikipedia content for a specific page.
@ -200,42 +231,51 @@ class WikipediaPersonalityScraper:
# Get page content
content_url = "https://en.wikipedia.org/w/api.php"
content_params = {
'action': 'query',
'format': 'json',
'titles': page_title,
'prop': 'extracts|info|categories',
'exintro': True,
'explaintext': True,
'inprop': 'url',
'cllimit': 50
"action": "query",
"format": "json",
"titles": page_title,
"prop": "extracts|info|categories",
"exintro": True,
"explaintext": True,
"inprop": "url",
"cllimit": 50,
}
response = self.session.get(content_url, params=content_params)
response = self.session.get(
content_url, params=content_params
)
response.raise_for_status()
data = response.json()
pages = data.get('query', {}).get('pages', {})
pages = data.get("query", {}).get("pages", {})
if pages:
page_id = list(pages.keys())[0]
page_data = pages[page_id]
return {
'title': page_data.get('title', ''),
'extract': page_data.get('extract', ''),
'url': page_data.get('fullurl', ''),
'categories': [cat['title'] for cat in page_data.get('categories', [])],
'pageid': page_data.get('pageid', ''),
'length': page_data.get('length', 0)
"title": page_data.get("title", ""),
"extract": page_data.get("extract", ""),
"url": page_data.get("fullurl", ""),
"categories": [
cat["title"]
for cat in page_data.get("categories", [])
],
"pageid": page_data.get("pageid", ""),
"length": page_data.get("length", 0),
}
except Exception as e:
if self.verbose:
logger.warning(f"Error getting Wikipedia content for {page_title}: {e}")
logger.warning(
f"Error getting Wikipedia content for {page_title}: {e}"
)
return None
def parse_wikipedia_content(self, content: str, mep_name: str) -> Dict[str, str]:
def parse_wikipedia_content(
self, content: str, mep_name: str
) -> Dict[str, str]:
"""
Parse Wikipedia content to extract structured personality information.
@ -247,112 +287,136 @@ class WikipediaPersonalityScraper:
Dictionary of parsed personality information
"""
personality_data = {
'summary': '',
'early_life': '',
'political_career': '',
'political_views': '',
'policy_focus': '',
'achievements': '',
'controversies': '',
'personal_life': '',
'education': '',
'professional_background': '',
'party_affiliations': '',
'committee_experience': '',
'voting_record': '',
'public_statements': '',
'interests': '',
'languages': '',
'awards': '',
'publications': '',
'social_media': ''
"summary": "",
"early_life": "",
"political_career": "",
"political_views": "",
"policy_focus": "",
"achievements": "",
"controversies": "",
"personal_life": "",
"education": "",
"professional_background": "",
"party_affiliations": "",
"committee_experience": "",
"voting_record": "",
"public_statements": "",
"interests": "",
"languages": "",
"awards": "",
"publications": "",
"social_media": "",
}
# Extract summary (first paragraph)
paragraphs = content.split('\n\n')
paragraphs = content.split("\n\n")
if paragraphs:
personality_data['summary'] = paragraphs[0][:1000] # Limit summary length
personality_data["summary"] = paragraphs[0][
:1000
] # Limit summary length
# Look for specific sections
content_lower = content.lower()
# Early life and education
early_life_patterns = [
r'early life[^.]*\.',
r'born[^.]*\.',
r'childhood[^.]*\.',
r'grew up[^.]*\.',
r'education[^.]*\.'
r"early life[^.]*\.",
r"born[^.]*\.",
r"childhood[^.]*\.",
r"grew up[^.]*\.",
r"education[^.]*\.",
]
for pattern in early_life_patterns:
matches = re.findall(pattern, content_lower, re.IGNORECASE)
matches = re.findall(
pattern, content_lower, re.IGNORECASE
)
if matches:
personality_data['early_life'] = ' '.join(matches[:3]) # Take first 3 matches
personality_data["early_life"] = " ".join(
matches[:3]
) # Take first 3 matches
break
# Political career
political_patterns = [
r'political career[^.]*\.',
r'elected[^.]*\.',
r'parliament[^.]*\.',
r'minister[^.]*\.',
r'party[^.]*\.'
r"political career[^.]*\.",
r"elected[^.]*\.",
r"parliament[^.]*\.",
r"minister[^.]*\.",
r"party[^.]*\.",
]
for pattern in political_patterns:
matches = re.findall(pattern, content_lower, re.IGNORECASE)
matches = re.findall(
pattern, content_lower, re.IGNORECASE
)
if matches:
personality_data['political_career'] = ' '.join(matches[:5]) # Take first 5 matches
personality_data["political_career"] = " ".join(
matches[:5]
) # Take first 5 matches
break
# Political views
views_patterns = [
r'political views[^.]*\.',
r'positions[^.]*\.',
r'advocates[^.]*\.',
r'supports[^.]*\.',
r'opposes[^.]*\.'
r"political views[^.]*\.",
r"positions[^.]*\.",
r"advocates[^.]*\.",
r"supports[^.]*\.",
r"opposes[^.]*\.",
]
for pattern in views_patterns:
matches = re.findall(pattern, content_lower, re.IGNORECASE)
matches = re.findall(
pattern, content_lower, re.IGNORECASE
)
if matches:
personality_data['political_views'] = ' '.join(matches[:3])
personality_data["political_views"] = " ".join(
matches[:3]
)
break
# Policy focus
policy_patterns = [
r'policy[^.]*\.',
r'focus[^.]*\.',
r'issues[^.]*\.',
r'legislation[^.]*\.'
r"policy[^.]*\.",
r"focus[^.]*\.",
r"issues[^.]*\.",
r"legislation[^.]*\.",
]
for pattern in policy_patterns:
matches = re.findall(pattern, content_lower, re.IGNORECASE)
matches = re.findall(
pattern, content_lower, re.IGNORECASE
)
if matches:
personality_data['policy_focus'] = ' '.join(matches[:3])
personality_data["policy_focus"] = " ".join(
matches[:3]
)
break
# Achievements
achievement_patterns = [
r'achievements[^.]*\.',
r'accomplishments[^.]*\.',
r'success[^.]*\.',
r'won[^.]*\.',
r'received[^.]*\.'
r"achievements[^.]*\.",
r"accomplishments[^.]*\.",
r"success[^.]*\.",
r"won[^.]*\.",
r"received[^.]*\.",
]
for pattern in achievement_patterns:
matches = re.findall(pattern, content_lower, re.IGNORECASE)
matches = re.findall(
pattern, content_lower, re.IGNORECASE
)
if matches:
personality_data['achievements'] = ' '.join(matches[:3])
personality_data["achievements"] = " ".join(
matches[:3]
)
break
return personality_data
def create_personality_profile(self, mep_data: Dict[str, str]) -> MEPPersonalityProfile:
def create_personality_profile(
self, mep_data: Dict[str, str]
) -> MEPPersonalityProfile:
"""
Create a personality profile for an MEP.
@ -362,8 +426,8 @@ class WikipediaPersonalityScraper:
Returns:
MEPPersonalityProfile object
"""
mep_name = mep_data['full_name']
country = mep_data['country']
mep_name = mep_data["full_name"]
country = mep_data["country"]
# Search for Wikipedia page
page_title = self.search_wikipedia_page(mep_name, country)
@ -374,56 +438,76 @@ class WikipediaPersonalityScraper:
if wiki_content:
# Parse content
personality_data = self.parse_wikipedia_content(wiki_content['extract'], mep_name)
personality_data = self.parse_wikipedia_content(
wiki_content["extract"], mep_name
)
# Create profile
profile = MEPPersonalityProfile(
full_name=mep_name,
mep_id=mep_data['mep_id'],
wikipedia_url=wiki_content['url'],
summary=personality_data['summary'],
early_life=personality_data['early_life'],
political_career=personality_data['political_career'],
political_views=personality_data['political_views'],
policy_focus=personality_data['policy_focus'],
achievements=personality_data['achievements'],
controversies=personality_data['controversies'],
personal_life=personality_data['personal_life'],
education=personality_data['education'],
professional_background=personality_data['professional_background'],
party_affiliations=personality_data['party_affiliations'],
committee_experience=personality_data['committee_experience'],
voting_record=personality_data['voting_record'],
public_statements=personality_data['public_statements'],
interests=personality_data['interests'],
languages=personality_data['languages'],
awards=personality_data['awards'],
publications=personality_data['publications'],
social_media=personality_data['social_media'],
last_updated=time.strftime("%Y-%m-%d %H:%M:%S")
mep_id=mep_data["mep_id"],
wikipedia_url=wiki_content["url"],
summary=personality_data["summary"],
early_life=personality_data["early_life"],
political_career=personality_data[
"political_career"
],
political_views=personality_data[
"political_views"
],
policy_focus=personality_data["policy_focus"],
achievements=personality_data["achievements"],
controversies=personality_data["controversies"],
personal_life=personality_data["personal_life"],
education=personality_data["education"],
professional_background=personality_data[
"professional_background"
],
party_affiliations=personality_data[
"party_affiliations"
],
committee_experience=personality_data[
"committee_experience"
],
voting_record=personality_data["voting_record"],
public_statements=personality_data[
"public_statements"
],
interests=personality_data["interests"],
languages=personality_data["languages"],
awards=personality_data["awards"],
publications=personality_data["publications"],
social_media=personality_data["social_media"],
last_updated=time.strftime("%Y-%m-%d %H:%M:%S"),
)
if self.verbose:
logger.info(f"Created personality profile for {mep_name} from Wikipedia")
logger.info(
f"Created personality profile for {mep_name} from Wikipedia"
)
return profile
# Create minimal profile if no Wikipedia data found
profile = MEPPersonalityProfile(
full_name=mep_name,
mep_id=mep_data['mep_id'],
mep_id=mep_data["mep_id"],
summary=f"{mep_name} is a Member of the European Parliament representing {country}.",
political_career=f"Currently serving as MEP for {country}.",
political_views=f"Member of {mep_data['political_group']} and {mep_data['national_party']}.",
last_updated=time.strftime("%Y-%m-%d %H:%M:%S")
last_updated=time.strftime("%Y-%m-%d %H:%M:%S"),
)
if self.verbose:
logger.warning(f"No Wikipedia data found for {mep_name}, created minimal profile")
logger.warning(
f"No Wikipedia data found for {mep_name}, created minimal profile"
)
return profile
def save_personality_profile(self, profile: MEPPersonalityProfile) -> str:
def save_personality_profile(
self, profile: MEPPersonalityProfile
) -> str:
"""
Save personality profile to JSON file.
@ -434,15 +518,15 @@ class WikipediaPersonalityScraper:
Path to saved file
"""
# Create safe filename
safe_name = re.sub(r'[^\w\s-]', '', profile.full_name).strip()
safe_name = re.sub(r'[-\s]+', '_', safe_name)
safe_name = re.sub(r"[^\w\s-]", "", profile.full_name).strip()
safe_name = re.sub(r"[-\s]+", "_", safe_name)
filename = f"{safe_name}_{profile.mep_id}.json"
filepath = os.path.join(self.output_dir, filename)
# Convert to dictionary and save
profile_dict = asdict(profile)
with open(filepath, 'w', encoding='utf-8') as f:
with open(filepath, "w", encoding="utf-8") as f:
json.dump(profile_dict, f, indent=2, ensure_ascii=False)
if self.verbose:
@ -450,7 +534,9 @@ class WikipediaPersonalityScraper:
return filepath
def scrape_all_mep_personalities(self, xml_file: str = "EU.xml", delay: float = 1.0) -> Dict[str, str]:
def scrape_all_mep_personalities(
self, xml_file: str = "EU.xml", delay: float = 1.0
) -> Dict[str, str]:
"""
Scrape personality data for all MEPs.
@ -465,10 +551,12 @@ class WikipediaPersonalityScraper:
profile_files = {}
if self.verbose:
logger.info(f"Starting personality scraping for {len(meps)} MEPs")
logger.info(
f"Starting personality scraping for {len(meps)} MEPs"
)
for i, mep_data in enumerate(meps, 1):
mep_name = mep_data['full_name']
mep_name = mep_data["full_name"]
if self.verbose:
logger.info(f"Processing {i}/{len(meps)}: {mep_name}")
@ -489,11 +577,15 @@ class WikipediaPersonalityScraper:
continue
if self.verbose:
logger.info(f"Completed personality scraping. {len(profile_files)} profiles created.")
logger.info(
f"Completed personality scraping. {len(profile_files)} profiles created."
)
return profile_files
def load_personality_profile(self, filepath: str) -> MEPPersonalityProfile:
def load_personality_profile(
self, filepath: str
) -> MEPPersonalityProfile:
"""
Load personality profile from JSON file.
@ -503,12 +595,14 @@ class WikipediaPersonalityScraper:
Returns:
MEPPersonalityProfile object
"""
with open(filepath, 'r', encoding='utf-8') as f:
with open(filepath, "r", encoding="utf-8") as f:
data = json.load(f)
return MEPPersonalityProfile(**data)
def get_personality_summary(self, profile: MEPPersonalityProfile) -> str:
def get_personality_summary(
self, profile: MEPPersonalityProfile
) -> str:
"""
Generate a personality summary for use in AI agent system prompts.
@ -524,22 +618,32 @@ class WikipediaPersonalityScraper:
summary_parts.append(f"Background: {profile.summary}")
if profile.political_career:
summary_parts.append(f"Political Career: {profile.political_career}")
summary_parts.append(
f"Political Career: {profile.political_career}"
)
if profile.political_views:
summary_parts.append(f"Political Views: {profile.political_views}")
summary_parts.append(
f"Political Views: {profile.political_views}"
)
if profile.policy_focus:
summary_parts.append(f"Policy Focus: {profile.policy_focus}")
summary_parts.append(
f"Policy Focus: {profile.policy_focus}"
)
if profile.achievements:
summary_parts.append(f"Notable Achievements: {profile.achievements}")
summary_parts.append(
f"Notable Achievements: {profile.achievements}"
)
if profile.education:
summary_parts.append(f"Education: {profile.education}")
if profile.professional_background:
summary_parts.append(f"Professional Background: {profile.professional_background}")
summary_parts.append(
f"Professional Background: {profile.professional_background}"
)
return "\n".join(summary_parts)
@ -551,12 +655,14 @@ def main():
print("=" * 70)
# Initialize scraper
scraper = WikipediaPersonalityScraper(output_dir="mep_personalities", verbose=True)
scraper = WikipediaPersonalityScraper(
output_dir="mep_personalities", verbose=True
)
# Scrape all MEP personalities
profile_files = scraper.scrape_all_mep_personalities(delay=1.0)
print(f"\n✅ Scraping completed!")
print("\n✅ Scraping completed!")
print(f"📁 Profiles saved to: {scraper.output_dir}")
print(f"📊 Total profiles created: {len(profile_files)}")

@ -0,0 +1,39 @@
"""
Bell Labs Research Simulation Example
This example demonstrates how to use the BellLabsSwarm to simulate
collaborative research among famous physicists.
"""
from swarms.sims.bell_labs import (
run_bell_labs_research,
)
def main():
"""
Run the Bell Labs research simulation.
This example asks the research question:
"Why doesn't physics take a vacation? Why are the laws of physics consistent?"
"""
research_question = """
Why doesn't physics take a vacation? Why are the laws of physics consistent across time and space?
Explore the philosophical and scientific foundations for the uniformity and invariance of physical laws.
Consider both theoretical explanations and any empirical evidence or challenges to this consistency.
"""
# Run the research simulation
results = run_bell_labs_research(
research_question=research_question,
max_loops=1,
model_name="claude-3-5-sonnet-20240620",
verbose=True,
)
print(results)
if __name__ == "__main__":
main()

@ -0,0 +1,29 @@
from swarms import Agent
def main():
"""
Run a quantitative trading agent to recommend top 3 gold ETFs.
"""
agent = Agent(
agent_name="Quantitative-Trading-Agent",
agent_description="Advanced quantitative trading and algorithmic analysis agent",
system_prompt=(
"You are an expert quantitative trading agent. "
"Recommend the best gold ETFs using your expertise in trading strategies, "
"risk management, and financial analysis. Be concise and precise."
),
model_name="claude-sonnet-4-20250514",
dynamic_temperature_enabled=True,
max_loops=1,
dynamic_context_window=True,
)
out = agent.run(
task="What are the best top 3 etfs for gold coverage?"
)
print(out)
if __name__ == "__main__":
main()

@ -0,0 +1,206 @@
"""
Claude Code Agent Tool - Setup Guide
This tool provides a Claude Code Agent that can:
- Generate code and applications from natural language descriptions
- Write files, execute shell commands, and manage Git repositories
- Perform web searches and file operations
- Handle complex development tasks with retry logic
SETUP GUIDE:
1. Install dependencies:
pip install claude-code-sdk
npm install -g @anthropic-ai/claude-code
2. Set environment variable:
export ANTHROPIC_API_KEY="your-api-key-here"
3. Use the tool:
from claude_as_a_tool import developer_worker_agent
result = developer_worker_agent(
task="Create a Python web scraper",
system_prompt="You are a helpful coding assistant"
)
REQUIRED: ANTHROPIC_API_KEY environment variable must be set
"""
import asyncio
from typing import Any, Dict
from claude_code_sdk import ClaudeCodeOptions, ClaudeSDKClient
from dotenv import load_dotenv
from tenacity import retry, stop_after_attempt, wait_exponential
from loguru import logger
load_dotenv()
class ClaudeAppGenerator:
"""
Generates applications using Claude Code SDK based on specifications.
"""
def __init__(
self,
name: str = "Developer Worker Agent",
description: str = "A developer worker agent that can generate code and write it to a file.",
retries: int = 3,
retry_delay: float = 2.0,
system_prompt: str = None,
debug_mode: bool = False,
max_steps: int = 40,
model: str = "claude-sonnet-4-20250514",
max_thinking_tokens: int = 1000,
):
"""
Initialize the app generator.
Args:
name: Name of the app
description: Description of the app
retries: Number of retries
retry_delay: Delay between retries
system_prompt: System prompt
debug_mode: Enable extra verbose logging for Claude outputs
max_steps: Maximum number of steps
model: Model to use
"""
self.name = name
self.description = description
self.retries = retries
self.retry_delay = retry_delay
self.system_prompt = system_prompt
self.model = model
self.debug_mode = debug_mode
self.max_steps = max_steps
self.max_thinking_tokens = max_thinking_tokens
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=15),
)
async def generate_app_with_claude(
self, task: str
) -> Dict[str, Any]:
"""
Generate app using Claude Code SDK with robust error handling and retry logic.
Args:
task: Task to be completed
Returns:
Dict containing generation results
"""
# Log the Claude SDK configuration
claude_options = ClaudeCodeOptions(
system_prompt=self.system_prompt,
max_turns=self.max_steps, # Sufficient for local app development and GitHub setup
allowed_tools=[
"Read",
"Write",
"Bash",
"GitHub",
"Git",
"Grep",
"WebSearch",
],
continue_conversation=True, # Start fresh each time
model=self.model,
max_thinking_tokens=self.max_thinking_tokens,
)
async with ClaudeSDKClient(options=claude_options) as client:
# Generate the application
await client.query(task)
response_text = []
message_count = 0
async for message in client.receive_response():
message_count += 1
if hasattr(message, "content"):
for block in message.content:
if hasattr(block, "text"):
text_content = block.text
response_text.append(text_content)
logger.info(text_content)
elif hasattr(block, "type"):
if self.debug_mode and hasattr(
block, "input"
):
input_str = str(block.input)
if len(input_str) > 200:
input_str = (
input_str[:200]
+ "... (truncated)"
)
print(f"Tool Input: {input_str}")
elif type(message).__name__ == "ResultMessage":
result_text = str(message.result)
response_text.append(result_text)
return response_text
def run(self, task: str) -> Dict[str, Any]:
"""
Synchronous wrapper for app generation to work with ThreadPoolExecutor.
Args:
spec: App specification
Returns:
Dict containing generation results
"""
return asyncio.run(self.generate_app_with_claude(task))
def developer_worker_agent(task: str, system_prompt: str) -> str:
"""
Developer Worker Agent
This function instantiates a ClaudeAppGenerator agent, which is a highly capable developer assistant designed to automate software development tasks.
The agent leverages the Claude Code SDK to interpret natural language instructions and generate code, scripts, or even entire applications.
It can interact with files, execute shell commands, perform web searches, and utilize version control systems such as Git and GitHub.
The agent is robust, featuring retry logic, customizable system prompts, and debug modes for verbose output.
It is ideal for automating repetitive coding tasks, prototyping, and integrating with developer workflows.
Capabilities:
- Generate code based on detailed task descriptions.
- Write generated code to files.
- Execute shell commands and scripts.
- Interact with Git and GitHub for version control operations.
- Perform web searches to gather information or code snippets.
- Provide detailed logs and debugging information if enabled.
- Handle errors gracefully with configurable retry logic.
Args:
task (str): The development task or instruction for the agent to complete.
system_prompt (str): The system prompt to guide the agent's behavior and context.
Returns:
str: The result of the agent's execution for the given task.
"""
claude_code_sdk = ClaudeAppGenerator(system_prompt=system_prompt)
return claude_code_sdk.run(task)
# agent = Agent(
# agent_name="Developer Worker Agent",
# agent_description="A developer worker agent that can generate code and write it to a file.",
# tools=[developer_worker_agent],
# system_prompt="You are a developer worker agent. You are given a task and you need to complete it.",
# )
# agent.run(
# task="Write a simple python script that prints 'Hello, World!'"
# )
# if __name__ == "__main__":
# task = "Write a simple python script that prints 'Hello, World!'"
# system_prompt = "You are a developer worker agent. You are given a task and you need to complete it."
# print(developer_worker_agent(task, system_prompt))

@ -1,170 +0,0 @@
from loguru import logger
from swarms.structs.swarm_eval import (
SwarmEvaluator,
PRESET_DATASETS,
)
import os
from swarms import Agent
from dotenv import load_dotenv
from swarm_models import OpenAIChat
load_dotenv()
model = OpenAIChat(
model_name="deepseek-ai/DeepSeek-R1-Distill-Llama-70B-free",
openai_api_key=os.getenv("TOGETHER_API_KEY"),
base_url="https://api.together.xyz/v1",
)
# Define system prompts for reasoning agents
THINKING_AGENT_PROMPT = """You are a sophisticated analytical and strategic thinking agent focused on deep problem analysis and solution design.
Your core capabilities include:
1. Comprehensive Problem Analysis
- Break down complex problems into constituent elements
- Map relationships and dependencies between components
- Identify root causes and underlying patterns
- Consider historical context and precedents
2. Multi-Perspective Evaluation
- Examine issues from multiple stakeholder viewpoints
- Consider short-term and long-term implications
- Evaluate social, economic, technical, and ethical dimensions
- Challenge assumptions and identify potential biases
3. Risk Assessment and Mitigation
- Conduct thorough risk analysis across scenarios
- Identify potential failure modes and edge cases
- Develop contingency plans and mitigation strategies
- Assess probability and impact of various outcomes
4. Strategic Solution Development
- Generate multiple solution approaches
- Evaluate trade-offs between different strategies
- Consider resource constraints and limitations
- Design scalable and sustainable solutions
5. Decision Framework Creation
- Establish clear evaluation criteria
- Weight competing priorities appropriately
- Create structured decision matrices
- Document reasoning and key decision factors
6. Systems Thinking
- Map interconnections between system elements
- Identify feedback loops and cascade effects
- Consider emergent properties and behaviors
- Account for dynamic system evolution
Your output should always include:
- Clear articulation of your analytical process
- Key assumptions and their justification
- Potential risks and mitigation strategies
- Multiple solution options with pros/cons
- Specific recommendations with supporting rationale
- Areas of uncertainty requiring further investigation
Focus on developing robust, well-reasoned strategies that account for complexity while remaining practical and actionable."""
ACTION_AGENT_PROMPT = """You are an advanced implementation and execution agent focused on turning strategic plans into concrete results.
Your core capabilities include:
1. Strategic Implementation Planning
- Break down high-level strategies into specific actions
- Create detailed project roadmaps and timelines
- Identify critical path dependencies
- Establish clear milestones and success metrics
- Design feedback and monitoring mechanisms
2. Resource Optimization
- Assess resource requirements and constraints
- Optimize resource allocation and scheduling
- Identify efficiency opportunities
- Plan for scalability and flexibility
- Manage competing priorities effectively
3. Execution Management
- Develop detailed implementation procedures
- Create clear operational guidelines
- Establish quality control measures
- Design progress tracking systems
- Build in review and adjustment points
4. Risk Management
- Implement specific risk mitigation measures
- Create early warning systems
- Develop contingency procedures
- Establish fallback positions
- Monitor risk indicators
5. Stakeholder Management
- Identify key stakeholders and their needs
- Create communication plans
- Establish feedback mechanisms
- Manage expectations effectively
- Build support and buy-in
6. Continuous Improvement
- Monitor implementation effectiveness
- Gather and analyze performance data
- Identify improvement opportunities
- Implement iterative enhancements
- Document lessons learned
Your output should always include:
- Detailed action plans with specific steps
- Resource requirements and allocation plans
- Timeline with key milestones
- Success metrics and monitoring approach
- Risk mitigation procedures
- Communication and stakeholder management plans
- Quality control measures
- Feedback and adjustment mechanisms
Focus on practical, efficient, and effective implementation while maintaining high quality standards and achieving desired outcomes."""
# Initialize the thinking agent
thinking_agent = Agent(
agent_name="Strategic-Thinker",
agent_description="Deep analysis and strategic planning agent",
system_prompt=THINKING_AGENT_PROMPT,
max_loops=1,
llm=model,
dynamic_temperature_enabled=True,
)
class DeepSeekSwarm:
def __init__(self):
self.thinking_agent = thinking_agent
def run(self, task: str):
first_one = self.thinking_agent.run(task)
return self.thinking_agent.run(first_one)
if __name__ == "__main__":
# Initialize the swarm (replace with your actual multi-agent system)
swarm = DeepSeekSwarm()
# Initialize the evaluator with the swarm instance
evaluator = SwarmEvaluator(swarm)
logger.info("Starting evaluation for dataset: gsm8k")
# For demonstration, we use 4 concurrent workers, show progress, and save results.
results = evaluator.evaluate(
"gsm8k",
split="train",
config=PRESET_DATASETS["gsm8k"],
max_workers=os.cpu_count(),
max_retries=3,
show_progress=True,
output_file="gsm8k_results.txt",
)
logger.info(f"Results for gsm8k: {results}")

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "8.0.5"
version = "8.1.1"
description = "Swarms - TGSC"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -1,6 +1,6 @@
torch>=2.1.1,<3.0
transformers>=4.39.0,<4.51.0
asyncio>=3.4.3,<4.0
asyncio>=3.4.3,<5.0
toml
pypdf==5.1.0
ratelimit==2.2.1

@ -1,4 +1,3 @@
#!/usr/bin/env python3
"""
Test script to verify Swarms installation in Docker container.
@ -7,6 +6,7 @@ Test script to verify Swarms installation in Docker container.
import sys
from typing import Dict, Any
def test_swarms_import() -> Dict[str, Any]:
"""
Test that swarms can be imported and basic functionality works.
@ -16,16 +16,20 @@ def test_swarms_import() -> Dict[str, Any]:
"""
try:
import swarms
print(f" Swarms imported successfully. Version: {swarms.__version__}")
print(
f" Swarms imported successfully. Version: {swarms.__version__}"
)
# Test basic functionality
from swarms import Agent
print(" Agent class imported successfully")
return {
"status": "success",
"version": swarms.__version__,
"message": "Swarms package is working correctly"
"message": "Swarms package is working correctly",
}
except ImportError as e:
@ -33,16 +37,17 @@ def test_swarms_import() -> Dict[str, Any]:
return {
"status": "error",
"error": str(e),
"message": "Swarms package import failed"
"message": "Swarms package import failed",
}
except Exception as e:
print(f" Unexpected error: {e}")
return {
"status": "error",
"error": str(e),
"message": "Unexpected error occurred"
"message": "Unexpected error occurred",
}
def main() -> None:
"""Main function to run tests."""
print(" Testing Swarms Docker Image...")
@ -62,5 +67,6 @@ def main() -> None:
print(" Tests failed! Please check the Docker image.")
sys.exit(1)
if __name__ == "__main__":
main()

@ -0,0 +1,23 @@
from swarms.sims.senator_assembly import SenatorAssembly
def main():
senator_simulation = SenatorAssembly(
model_name="claude-sonnet-4-20250514"
)
senator_simulation.simulate_vote_concurrent(
(
"A bill proposing a significant reduction in federal income tax rates for all American citizens. "
"The legislation aims to lower tax brackets across the board, increase the standard deduction, "
"and provide additional tax relief for middle- and lower-income families. Proponents argue that "
"the bill will stimulate economic growth, increase disposable income, and enhance consumer spending. "
"Opponents raise concerns about the potential impact on the federal deficit, funding for public services, "
"and long-term fiscal responsibility. Senators must weigh the economic, social, and budgetary implications "
"before casting their votes."
),
batch_size=10,
)
if __name__ == "__main__":
main()

@ -0,0 +1,816 @@
"""
Bell Labs Research Simulation with Physicist Agents
This simulation creates specialized AI agents representing famous physicists
from the Bell Labs era, including Oppenheimer, von Neumann, Feynman, Einstein,
and others. The agents work together in a collaborative research environment
following a structured workflow: task -> Oppenheimer (planning) -> physicist discussion
-> code implementation -> results analysis -> repeat for n loops.
"""
from functools import lru_cache
from typing import Any, Dict, List, Optional
from loguru import logger
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
# from examples.tools.claude_as_a_tool import developer_worker_agent
@lru_cache(maxsize=1)
def _create_physicist_agents(
model_name: str, random_model_name: bool = False
) -> List[Agent]:
"""
Create specialized agents for each physicist.
Args:
model_name: Model to use for all agents
Returns:
List of configured physicist agents
"""
physicists_data = {
"J. Robert Oppenheimer": {
"role": "Research Director & Theoretical Physicist",
"expertise": [
"Nuclear physics",
"Quantum mechanics",
"Research coordination",
"Strategic planning",
"Team leadership",
],
"background": "Director of the Manhattan Project, expert in quantum mechanics and nuclear physics",
"system_prompt": """You are J. Robert Oppenheimer, the brilliant theoretical physicist and research director.
Your role is to:
1. Analyze complex research questions and break them down into manageable components
2. Create comprehensive research plans with clear objectives and methodologies
3. Coordinate the research team and ensure effective collaboration
4. Synthesize findings from different physicists into coherent conclusions
5. Guide the research process with strategic insights and theoretical frameworks
You excel at:
- Identifying the core theoretical challenges in any research question
- Designing experimental approaches that test fundamental principles
- Balancing theoretical rigor with practical implementation
- Fostering interdisciplinary collaboration between specialists
- Maintaining focus on the most promising research directions
When creating research plans, be thorough, systematic, and consider multiple approaches.
Always emphasize the theoretical foundations and experimental validation of any proposed solution.""",
},
"John von Neumann": {
"role": "Mathematical Physicist & Computer Scientist",
"expertise": [
"Mathematical physics",
"Computer architecture",
"Game theory",
"Quantum mechanics",
"Numerical methods",
],
"background": "Pioneer of computer science, game theory, and mathematical physics",
"system_prompt": """You are John von Neumann, the brilliant mathematical physicist and computer scientist.
Your approach to research questions involves:
1. Mathematical rigor and formal mathematical frameworks
2. Computational and algorithmic solutions to complex problems
3. Game theory and strategic analysis of research approaches
4. Numerical methods and computational physics
5. Bridging abstract theory with practical implementation
You excel at:
- Formulating problems in precise mathematical terms
- Developing computational algorithms and numerical methods
- Applying game theory to optimize research strategies
- Creating mathematical models that capture complex phenomena
- Designing efficient computational approaches to physical problems
When analyzing research questions, focus on mathematical foundations, computational feasibility,
and the development of rigorous theoretical frameworks that can be implemented and tested.""",
},
"Richard Feynman": {
"role": "Theoretical Physicist & Problem Solver",
"expertise": [
"Quantum electrodynamics",
"Particle physics",
"Problem-solving methodology",
"Intuitive physics",
"Experimental design",
],
"background": "Nobel laureate in physics, known for intuitive problem-solving and quantum electrodynamics",
"system_prompt": """You are Richard Feynman, the brilliant theoretical physicist and master problem solver.
Your research methodology involves:
1. Intuitive understanding of complex physical phenomena
2. Creative problem-solving approaches that cut through complexity
3. Experimental design that tests fundamental principles
4. Clear communication of complex ideas through analogies and examples
5. Focus on the most essential aspects of any research question
You excel at:
- Finding elegant solutions to seemingly intractable problems
- Designing experiments that reveal fundamental truths
- Communicating complex physics in accessible terms
- Identifying the core physics behind any phenomenon
- Developing intuitive models that capture essential behavior
When approaching research questions, look for the simplest, most elegant solutions.
Focus on the fundamental physics and design experiments that test your understanding directly.""",
},
"Albert Einstein": {
"role": "Theoretical Physicist & Conceptual Innovator",
"expertise": [
"Relativity theory",
"Quantum mechanics",
"Conceptual physics",
"Thought experiments",
"Fundamental principles",
],
"background": "Revolutionary physicist who developed relativity theory and influenced quantum mechanics",
"system_prompt": """You are Albert Einstein, the revolutionary theoretical physicist and conceptual innovator.
Your research approach involves:
1. Deep conceptual thinking about fundamental physical principles
2. Thought experiments that reveal the essence of physical phenomena
3. Questioning established assumptions and exploring new paradigms
4. Focus on the most fundamental and universal aspects of physics
5. Intuitive understanding of space, time, and the nature of reality
You excel at:
- Identifying the conceptual foundations of any physical theory
- Developing thought experiments that challenge conventional wisdom
- Finding elegant mathematical descriptions of physical reality
- Questioning fundamental assumptions and exploring alternatives
- Developing unified theories that explain diverse phenomena
When analyzing research questions, focus on the conceptual foundations and fundamental principles.
Look for elegant, unified explanations and be willing to challenge established paradigms.""",
},
"Enrico Fermi": {
"role": "Experimental Physicist & Nuclear Scientist",
"expertise": [
"Nuclear physics",
"Experimental physics",
"Neutron physics",
"Statistical physics",
"Practical applications",
],
"background": "Nobel laureate known for nuclear physics, experimental work, and the first nuclear reactor",
"system_prompt": """You are Enrico Fermi, the brilliant experimental physicist and nuclear scientist.
Your research methodology involves:
1. Rigorous experimental design and execution
2. Practical application of theoretical principles
3. Statistical analysis and probability in physics
4. Nuclear physics and particle interactions
5. Bridging theory with experimental validation
You excel at:
- Designing experiments that test theoretical predictions
- Applying statistical methods to physical problems
- Developing practical applications of fundamental physics
- Nuclear physics and particle physics experiments
- Creating experimental setups that reveal new phenomena
When approaching research questions, focus on experimental design and practical implementation.
Emphasize the importance of experimental validation and statistical analysis in physics research.""",
},
"Code-Implementer": {
"role": "Computational Physicist & Code Developer",
"expertise": [
"Scientific computing",
"Physics simulations",
"Data analysis",
"Algorithm implementation",
"Numerical methods",
],
"background": "Specialized in implementing computational solutions to physics problems",
"system_prompt": """You are a specialized computational physicist and code developer.
Your responsibilities include:
1. Implementing computational solutions to physics problems
2. Developing simulations and numerical methods
3. Analyzing data and presenting results clearly
4. Testing theoretical predictions through computation
5. Providing quantitative analysis of research findings
You excel at:
- Writing clear, efficient scientific code
- Implementing numerical algorithms for physics problems
- Data analysis and visualization
- Computational optimization and performance
- Bridging theoretical physics with computational implementation
When implementing solutions, focus on:
- Clear, well-documented code
- Efficient numerical algorithms
- Comprehensive testing and validation
- Clear presentation of results and analysis
- Quantitative assessment of theoretical predictions""",
},
}
agents = []
for name, data in physicists_data.items():
agent = Agent(
agent_name=name,
system_prompt=data["system_prompt"],
model_name=model_name,
random_model_name=random_model_name,
max_loops=1,
dynamic_temperature_enabled=True,
dynamic_context_window=True,
)
agents.append(agent)
return agents
class BellLabsSwarm:
"""
Bell Labs Research Simulation Swarm
Simulates the collaborative research environment of Bell Labs with famous physicists
working together on complex research questions. The workflow follows:
1. Task is presented to the team
2. Oppenheimer creates a research plan
3. Physicists discuss and vote on approaches using majority voting
4. Code implementation agent tests the theory
5. Results are analyzed and fed back to the team
6. Process repeats for n loops with iterative refinement
"""
def __init__(
self,
name: str = "Bell Labs Research Team",
description: str = "A collaborative research environment simulating Bell Labs physicists",
max_loops: int = 1,
verbose: bool = True,
model_name: str = "gpt-4o-mini",
random_model_name: bool = False,
output_type: str = "str-all-except-first",
dynamic_context_window: bool = True,
**kwargs,
):
"""
Initialize the Bell Labs Research Swarm.
Args:
name: Name of the swarm
description: Description of the swarm's purpose
max_loops: Number of research iteration loops
verbose: Whether to enable verbose logging
model_name: Model to use for all agents
**kwargs: Additional arguments passed to BaseSwarm
"""
self.name = name
self.description = description
self.max_loops = max_loops
self.verbose = verbose
self.model_name = model_name
self.kwargs = kwargs
self.random_model_name = random_model_name
self.output_type = output_type
self.dynamic_context_window = dynamic_context_window
self.conversation = Conversation(
dynamic_context_window=dynamic_context_window
)
# Create the physicist agents
self.agents = _create_physicist_agents(
model_name=model_name, random_model_name=random_model_name
)
# Set up specialized agents
self.oppenheimer = self._get_agent_by_name(
"J. Robert Oppenheimer"
)
self.code_implementer = self._get_agent_by_name(
"Code-Implementer"
)
self.physicists = [
agent
for agent in self.agents
if agent.agent_name != "J. Robert Oppenheimer"
and agent.agent_name != "Code-Implementer"
]
# # Find the code implementer agent
# code_implementer = self._get_agent_by_name("Code-Implementer")
# code_implementer.tools = [developer_worker_agent]
logger.info(
f"Bell Labs Research Team initialized with {len(self.agents)} agents"
)
def _get_agent_by_name(self, name: str) -> Optional[Agent]:
"""Get an agent by name."""
for agent in self.agents:
if agent.agent_name == name:
return agent
return None
def run(
self, task: str, img: Optional[str] = None
) -> Dict[str, Any]:
"""
Run the Bell Labs research simulation.
Args:
task: The research question or task to investigate
Returns:
Dictionary containing the research results, process history, and full conversation
"""
logger.info(f"Starting Bell Labs research on: {task}")
# Add initial task to conversation history
self.conversation.add(
"Research Coordinator", f"Initial Research Task: {task}"
)
# Oppenheimer
oppenheimer_plan = self.oppenheimer.run(
task=self.conversation.get_str(), img=img
)
self.conversation.add(
self.oppenheimer.agent_name,
f"Research Plan: {oppenheimer_plan}",
)
# Discussion
# Physicists
physicist_discussion = self._conduct_physicist_discussion(
task, self.conversation.get_str()
)
# Add to conversation history
self.conversation.add(
"Group Discussion", physicist_discussion
)
# Now implement the solution
implementation_results = self._implement_and_test_solution(
history=self.conversation.get_str()
)
# Add to conversation history
self.conversation.add(
self.code_implementer.agent_name, implementation_results
)
return history_output_formatter(
conversation=self.conversation, type="str"
)
def _create_research_plan(
self, task: str, loop_number: int
) -> str:
"""
Have Oppenheimer create a research plan.
Args:
task: Research task
loop_number: Current loop number
Returns:
Research plan from Oppenheimer
"""
prompt = f"""
Research Task: {task}
Loop Number: {loop_number + 1}
As J. Robert Oppenheimer, create a comprehensive research plan for this task.
Your plan should include:
1. Clear research objectives and hypotheses
2. Theoretical framework and approach
3. Specific research questions to investigate
4. Methodology for testing and validation
5. Expected outcomes and success criteria
6. Timeline and milestones
7. Resource requirements and team coordination
Provide a detailed, actionable plan that the research team can follow.
"""
plan = self.oppenheimer.run(prompt)
return plan
def _conduct_physicist_discussion(
self, task: str, history: str
) -> str:
"""
Conduct a natural discussion among physicists where they build on each other's ideas.
Args:
task: Research task
history: Conversation history including Oppenheimer's plan
Returns:
Results of the physicist discussion as a conversation transcript
"""
import random
# Shuffle the physicists to create random discussion order
discussion_order = self.physicists.copy()
random.shuffle(discussion_order)
discussion_transcript = []
current_context = (
f"{history}\n\nCurrent Research Task: {task}\n\n"
)
# Each physicist contributes to the discussion, building on previous contributions
for i, physicist in enumerate(discussion_order):
if i == 0:
# First physicist starts the discussion
discussion_prompt = f"""
{current_context}
As {physicist.agent_name}, you are starting the group discussion about this research plan.
Based on your expertise, provide your initial thoughts on:
1. What aspects of Oppenheimer's research plan do you find most promising?
2. What theoretical challenges or concerns do you see?
3. What specific approaches would you recommend based on your expertise?
4. What questions or clarifications do you have for the team?
Be specific and draw from your unique perspective and expertise. This will set the tone for the group discussion.
"""
else:
# Subsequent physicists build on the discussion
previous_contributions = "\n\n".join(
discussion_transcript
)
discussion_prompt = f"""
{current_context}
Previous Discussion:
{previous_contributions}
As {physicist.agent_name}, continue the group discussion by building on your colleagues' ideas.
Consider:
1. How do your colleagues' perspectives relate to your expertise in {', '.join(physicist.expertise)}?
2. What additional insights can you add to the discussion?
3. How can you address any concerns or questions raised by others?
4. What specific next steps would you recommend based on the discussion so far?
Engage directly with your colleagues' ideas and contribute your unique perspective to move the research forward.
"""
# Get the physicist's contribution
contribution = physicist.run(discussion_prompt)
# Add to transcript with clear attribution
discussion_transcript.append(
f"{physicist.agent_name}: {contribution}"
)
# Update context for next iteration
current_context = (
f"{history}\n\nCurrent Research Task: {task}\n\nGroup Discussion:\n"
+ "\n\n".join(discussion_transcript)
)
# Create a summary of the discussion
summary_prompt = f"""
Research Task: {task}
Complete Discussion Transcript:
{chr(10).join(discussion_transcript)}
As a research coordinator, provide a concise summary of the key points from this group discussion:
1. Main areas of agreement among the physicists
2. Key concerns or challenges identified
3. Specific recommendations made by the team
4. Next steps for moving forward with the research
Focus on actionable insights and clear next steps that the team can implement.
"""
# Use Oppenheimer to summarize the discussion
discussion_summary = self.oppenheimer.run(summary_prompt)
# Return the full discussion transcript with summary
full_discussion = f"Group Discussion Transcript:\n\n{chr(10).join(discussion_transcript)}\n\n---\nDiscussion Summary:\n{discussion_summary}"
return full_discussion
def _implement_and_test_solution(
self,
history: str,
) -> Dict[str, Any]:
"""
Implement and test the proposed solution.
Args:
task: Research task
plan: Research plan
discussion_results: Results from physicist discussion
loop_number: Current loop number
Returns:
Implementation and testing results
"""
implementation_prompt = f"""
{history}
As the Code Implementer, your task is to:
1. Implement a computational solution based on the research plan
2. Test the theoretical predictions through simulation or calculation
3. Analyze the results and provide quantitative assessment
4. Identify any discrepancies between theory and implementation
5. Suggest improvements or next steps
Provide:
- Clear description of your implementation approach
- Code or algorithm description
- Test results and analysis
- Comparison with theoretical predictions
- Recommendations for further investigation
Focus on practical implementation and quantitative results.
"""
implementation_results = self.code_implementer.run(
implementation_prompt
)
return implementation_results
def _analyze_results(
self, implementation_results: Dict[str, Any], loop_number: int
) -> str:
"""
Analyze the results and provide team review.
Args:
implementation_results: Results from implementation phase
loop_number: Current loop number
Returns:
Analysis and recommendations
"""
analysis_prompt = f"""
Implementation Results: {implementation_results}
Loop Number: {loop_number + 1}
As the research team, analyze these results and provide:
1. Assessment of whether the implementation supports the theoretical predictions
2. Identification of any unexpected findings or discrepancies
3. Evaluation of the methodology and approach
4. Recommendations for the next research iteration
5. Insights gained from this round of investigation
Consider:
- What worked well in this approach?
- What challenges or limitations were encountered?
- How can the research be improved in the next iteration?
- What new questions or directions have emerged?
Provide a comprehensive analysis that will guide the next research phase.
"""
# Use team discussion for results analysis
analysis_results = self._conduct_team_analysis(
analysis_prompt
)
return analysis_results
def _conduct_team_analysis(self, analysis_prompt: str) -> str:
"""
Conduct a team analysis discussion using the same approach as physicist discussion.
Args:
analysis_prompt: The prompt for the analysis
Returns:
Results of the team analysis discussion
"""
import random
# Shuffle the agents to create random discussion order
discussion_order = self.agents.copy()
random.shuffle(discussion_order)
discussion_transcript = []
current_context = analysis_prompt
# Each agent contributes to the analysis, building on previous contributions
for i, agent in enumerate(discussion_order):
if i == 0:
# First agent starts the analysis
agent_prompt = f"""
{current_context}
As {agent.agent_name}, you are starting the team analysis discussion.
Based on your expertise and role, provide your initial analysis of the implementation results.
Focus on what you can contribute from your unique perspective.
"""
else:
# Subsequent agents build on the analysis
previous_contributions = "\n\n".join(
discussion_transcript
)
agent_prompt = f"""
{current_context}
Previous Analysis:
{previous_contributions}
As {agent.agent_name}, continue the team analysis by building on your colleagues' insights.
Consider:
1. How do your colleagues' perspectives relate to your expertise?
2. What additional insights can you add to the analysis?
3. How can you address any concerns or questions raised by others?
4. What specific recommendations would you make based on the analysis so far?
Engage directly with your colleagues' ideas and contribute your unique perspective.
"""
# Get the agent's contribution
contribution = agent.run(agent_prompt)
# Add to transcript with clear attribution
discussion_transcript.append(
f"{agent.agent_name}: {contribution}"
)
# Update context for next iteration
current_context = (
f"{analysis_prompt}\n\nTeam Analysis:\n"
+ "\n\n".join(discussion_transcript)
)
# Create a summary of the analysis
summary_prompt = f"""
Analysis Prompt: {analysis_prompt}
Complete Analysis Transcript:
{chr(10).join(discussion_transcript)}
As a research coordinator, provide a concise summary of the key points from this team analysis:
1. Main findings and insights from the team
2. Key recommendations made
3. Areas of agreement and disagreement
4. Next steps for the research
Focus on actionable insights and clear next steps.
"""
# Use Oppenheimer to summarize the analysis
analysis_summary = self.oppenheimer.run(summary_prompt)
# Return the full analysis transcript with summary
full_analysis = f"Team Analysis Transcript:\n\n{chr(10).join(discussion_transcript)}\n\n---\nAnalysis Summary:\n{analysis_summary}"
return full_analysis
def _refine_task_for_next_iteration(
self, current_task: str, loop_results: Dict[str, Any]
) -> str:
"""
Refine the task for the next research iteration.
Args:
current_task: Current research task
loop_results: Results from the current loop
Returns:
Refined task for next iteration
"""
refinement_prompt = f"""
Current Research Task: {current_task}
Results from Current Loop: {loop_results}
Based on the findings and analysis from this research loop, refine the research task for the next iteration.
Consider:
- What new questions have emerged?
- What aspects need deeper investigation?
- What alternative approaches should be explored?
- What specific hypotheses should be tested?
Provide a refined, focused research question that builds upon the current findings
and addresses the most important next steps identified by the team.
"""
# Use Oppenheimer to refine the task
refined_task = self.oppenheimer.run(refinement_prompt)
# Add task refinement to conversation history
self.conversation.add(
"J. Robert Oppenheimer",
f"Task Refined for Next Iteration: {refined_task}",
)
return refined_task
def _generate_final_conclusion(
self, research_results: Dict[str, Any]
) -> str:
"""
Generate a final conclusion summarizing all research findings.
Args:
research_results: Complete research results from all loops
Returns:
Final research conclusion
"""
conclusion_prompt = f"""
Complete Research Results: {research_results}
As J. Robert Oppenheimer, provide a comprehensive final conclusion for this research project.
Your conclusion should:
1. Summarize the key findings from all research loops
2. Identify the most significant discoveries or insights
3. Evaluate the success of the research approach
4. Highlight any limitations or areas for future investigation
5. Provide a clear statement of what was accomplished
6. Suggest next steps for continued research
Synthesize the work of the entire team and provide a coherent narrative
of the research journey and its outcomes.
"""
final_conclusion = self.oppenheimer.run(conclusion_prompt)
return final_conclusion
# Example usage function
def run_bell_labs_research(
research_question: str,
max_loops: int = 3,
model_name: str = "gpt-4o-mini",
verbose: bool = True,
) -> Dict[str, Any]:
"""
Run a Bell Labs research simulation.
Args:
research_question: The research question to investigate
max_loops: Number of research iteration loops
model_name: Model to use for all agents
verbose: Whether to enable verbose logging
Returns:
Complete research results and findings
"""
bell_labs = BellLabsSwarm(
max_loops=max_loops, verbose=verbose, model_name=model_name
)
results = bell_labs.run(research_question)
return results
# if __name__ == "__main__":
# # Example research question
# research_question = """
# Investigate the feasibility of quantum computing for solving complex optimization problems.
# Consider both theoretical foundations and practical implementation challenges.
# """
# print("Starting Bell Labs Research Simulation...")
# print(f"Research Question: {research_question}")
# print("-" * 80)
# results = run_bell_labs_research(
# research_question=research_question,
# max_loops=2,
# verbose=True
# )
# print("\n" + "=" * 80)
# print("RESEARCH SIMULATION COMPLETED")
# print("=" * 80)
# print(f"\nFinal Conclusion:\n{results['final_conclusion']}")
# print(f"\nResearch completed in {len(results['research_history'])} loops.")
# print("Check the results dictionary for complete research details.")

@ -1,5 +1,6 @@
from swarms.structs.agent import Agent
from swarms.structs.agent_builder import AgentsBuilder
from swarms.structs.agent_rearrange import AgentRearrange, rearrange
from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
from swarms.structs.base_structure import BaseStructure
from swarms.structs.base_swarm import BaseSwarm
@ -9,7 +10,7 @@ from swarms.structs.board_of_directors_swarm import (
)
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.conversation import Conversation
from swarms.structs.council_judge import CouncilAsAJudge
from swarms.structs.council_as_judge import CouncilAsAJudge
from swarms.structs.cron_job import CronJob
from swarms.structs.de_hallucination_swarm import DeHallucinationSwarm
from swarms.structs.deep_research_swarm import DeepResearchSwarm
@ -66,11 +67,10 @@ from swarms.structs.multi_agent_exec import (
run_single_agent,
)
from swarms.structs.multi_agent_router import MultiAgentRouter
from swarms.structs.rearrange import AgentRearrange, rearrange
from swarms.structs.round_robin import RoundRobinSwarm
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm
from swarms.structs.swarm_arange import SwarmRearrange
from swarms.structs.swarm_rearrange import SwarmRearrange
from swarms.structs.swarm_router import (
SwarmRouter,
SwarmType,

@ -21,6 +21,13 @@ from typing import (
import toml
import yaml
from litellm import model_list
from litellm.utils import (
get_max_tokens,
supports_function_calling,
supports_parallel_function_calling,
supports_vision,
)
from loguru import logger
from pydantic import BaseModel
@ -45,7 +52,6 @@ from swarms.schemas.base_schemas import (
ChatMessageResponse,
)
from swarms.schemas.conversation_schema import ConversationSchema
from swarms.schemas.llm_agent_schema import ModelConfigOrigin
from swarms.schemas.mcp_schemas import (
MCPConnection,
)
@ -422,7 +428,6 @@ class Agent:
mcp_config: Optional[MCPConnection] = None,
top_p: Optional[float] = 0.90,
conversation_schema: Optional[ConversationSchema] = None,
aditional_llm_config: Optional[ModelConfigOrigin] = None,
llm_base_url: Optional[str] = None,
llm_api_key: Optional[str] = None,
rag_config: Optional[RAGConfig] = None,
@ -430,8 +435,9 @@ class Agent:
output_raw_json_from_tool_call: bool = False,
summarize_multiple_images: bool = False,
tool_retry_attempts: int = 3,
speed_mode: str = None,
reasoning_prompt_on: bool = True,
dynamic_context_window: bool = True,
show_tool_execution_output: bool = True,
*args,
**kwargs,
):
@ -562,7 +568,6 @@ class Agent:
self.mcp_config = mcp_config
self.top_p = top_p
self.conversation_schema = conversation_schema
self.aditional_llm_config = aditional_llm_config
self.llm_base_url = llm_base_url
self.llm_api_key = llm_api_key
self.rag_config = rag_config
@ -572,17 +577,19 @@ class Agent:
)
self.summarize_multiple_images = summarize_multiple_images
self.tool_retry_attempts = tool_retry_attempts
self.speed_mode = speed_mode
self.reasoning_prompt_on = reasoning_prompt_on
# Initialize the feedback
self.feedback = []
self.dynamic_context_window = dynamic_context_window
self.show_tool_execution_output = show_tool_execution_output
# self.init_handling()
self.setup_config()
# Initialize the short memory
self.short_memory = self.short_memory_init()
# Initialize the tools
self.tool_struct = self.setup_tools()
if exists(self.docs_folder):
self.get_docs_from_doc_folders()
@ -606,8 +613,6 @@ class Agent:
if self.react_on is True:
self.system_prompt += REACT_SYS_PROMPT
# Run sequential operations after all concurrent tasks are done
# self.agent_output = self.agent_output_model()
if self.autosave is True:
log_agent_data(self.to_dict())
@ -636,13 +641,14 @@ class Agent:
verbose=self.verbose,
)
def tool_handling(self):
self.tool_struct = BaseTool(
def setup_tools(self):
return BaseTool(
tools=self.tools,
verbose=self.verbose,
)
def tool_handling(self):
# Convert all the tools into a list of dictionaries
self.tools_list_dictionary = (
convert_multiple_functions_to_openai_function_schema(
@ -660,11 +666,13 @@ class Agent:
# Add agent name, description, and instructions to the prompt
if self.agent_name is not None:
prompt += f"\n Name: {self.agent_name}"
prompt += f"\n Your Name: {self.agent_name} \n"
elif self.agent_description is not None:
prompt += f"\n Description: {self.agent_description}"
prompt += (
f"\n Your Description: {self.agent_description} \n"
)
elif self.system_prompt is not None:
prompt += f"\n Instructions: {self.system_prompt}"
prompt += f"\n Your Instructions: {self.system_prompt} \n"
else:
prompt = self.system_prompt
@ -674,53 +682,19 @@ class Agent:
# Initialize the short term memory
memory = Conversation(
name=f"{self.agent_name}_conversation",
system_prompt=prompt,
user=self.user_name,
rules=self.rules,
token_count=(
self.conversation_schema.count_tokens
if self.conversation_schema
else False
),
message_id_on=(
self.conversation_schema.message_id_on
if self.conversation_schema
else False
),
time_enabled=(
self.conversation_schema.time_enabled
if self.conversation_schema
else False
),
)
# Add the system prompt to the conversation
memory.add(
role="System",
content=prompt,
token_count=False,
message_id_on=False,
time_enabled=True,
dynamic_context_window=self.dynamic_context_window,
tokenizer_model_name=self.model_name,
context_length=self.context_length,
)
return memory
def agent_output_model(self):
# Many steps
id = agent_id()
return ManySteps(
agent_id=id,
agent_name=self.agent_name,
# run_id=run_id,
task="",
max_loops=self.max_loops,
steps=self.short_memory.to_dict(),
full_history=self.short_memory.get_str(),
total_tokens=count_tokens(
text=self.short_memory.get_str()
),
stopping_token=self.stopping_token,
interactive=self.interactive,
dynamic_temperature_enabled=self.dynamic_temperature_enabled,
)
def llm_handling(self, *args, **kwargs):
"""Initialize the LiteLLM instance with combined configuration from all sources.
@ -737,9 +711,6 @@ class Agent:
Returns:
LiteLLM: The initialized LiteLLM instance
"""
# Use cached instance if available
if self.llm is not None:
return self.llm
if self.model_name is None:
self.model_name = "gpt-4o-mini"
@ -762,6 +733,7 @@ class Agent:
"max_tokens": self.max_tokens,
"system_prompt": self.system_prompt,
"stream": self.streaming_on,
"top_p": self.top_p,
}
# Initialize tools_list_dictionary, if applicable
@ -823,7 +795,7 @@ class Agent:
return self.llm
except AgentLLMInitializationError as e:
logger.error(
f"AgentLLMInitializationError: Agent Name: {self.agent_name} Error in llm_handling: {e} Your current configuration is not supported. Please check the configuration and parameters."
f"AgentLLMInitializationError: Agent Name: {self.agent_name} Error in llm_handling: {e} Your current configuration is not supported. Please check the configuration and parameters. Traceback: {traceback.format_exc()}"
)
return None
@ -886,6 +858,9 @@ class Agent:
if self.preset_stopping_token is not None:
self.stopping_token = "<DONE>"
# Initialize the feedback
self.feedback = []
def check_model_supports_utilities(
self, img: Optional[str] = None
) -> bool:
@ -898,11 +873,6 @@ class Agent:
Returns:
bool: True if model supports vision and image is provided, False otherwise.
"""
from litellm.utils import (
supports_function_calling,
supports_parallel_function_calling,
supports_vision,
)
# Only check vision support if an image is provided
if img is not None:
@ -1234,7 +1204,7 @@ class Agent:
self.save()
logger.error(
f"Attempt {attempt+1}/{self.retry_attempts}: Error generating response in loop {loop_count} for agent '{self.agent_name}': {str(e)} | "
f"Attempt {attempt+1}/{self.retry_attempts}: Error generating response in loop {loop_count} for agent '{self.agent_name}': {str(e)} | Traceback: {traceback.format_exc()}"
)
attempt += 1
@ -1312,9 +1282,7 @@ class Agent:
except KeyboardInterrupt as error:
self._handle_run_error(error)
def __handle_run_error(self, error: any):
import traceback
def _handle_run_error(self, error: any):
if self.autosave is True:
self.save()
log_agent_data(self.to_dict())
@ -1336,11 +1304,6 @@ class Agent:
raise error
def _handle_run_error(self, error: any):
# Handle error directly instead of using daemon thread
# to ensure proper exception propagation
self.__handle_run_error(error)
async def arun(
self,
task: Optional[str] = None,
@ -1537,32 +1500,7 @@ class Agent:
except Exception as error:
logger.info(f"Error running bulk run: {error}", "red")
async def arun_batched(
self,
tasks: List[str],
*args,
**kwargs,
):
"""Asynchronously runs a batch of tasks."""
try:
# Create a list of coroutines for each task
coroutines = [
self.arun(task=task, *args, **kwargs)
for task in tasks
]
# Use asyncio.gather to run them concurrently
results = await asyncio.gather(*coroutines)
return results
except Exception as error:
logger.error(f"Error running batched tasks: {error}")
raise
def reliability_check(self):
from litellm import model_list
from litellm.utils import (
get_max_tokens,
supports_function_calling,
)
if self.system_prompt is None:
logger.warning(
@ -1596,7 +1534,7 @@ class Agent:
try:
if self.max_tokens > get_max_tokens(self.model_name):
logger.warning(
f"Max tokens is set to {self.max_tokens}, but the model '{self.model_name}' only supports {get_max_tokens(self.model_name)} tokens. Please set max tokens to {get_max_tokens(self.model_name)} or less."
f"Max tokens is set to {self.max_tokens}, but the model '{self.model_name}' may or may not support {get_max_tokens(self.model_name)} tokens. Please set max tokens to {get_max_tokens(self.model_name)} or less."
)
except Exception:
@ -1604,7 +1542,7 @@ class Agent:
if self.model_name not in model_list:
logger.warning(
f"The model '{self.model_name}' is not supported. Please use a supported model, or override the model name with the 'llm' parameter, which should be a class with a 'run(task: str)' method or a '__call__' method."
f"The model '{self.model_name}' may not be supported. Please use a supported model, or override the model name with the 'llm' parameter, which should be a class with a 'run(task: str)' method or a '__call__' method."
)
def save(self, file_path: str = None) -> None:
@ -1850,14 +1788,6 @@ class Agent:
) as executor:
self.executor = executor
# # Reinitialize tool structure if needed
# if hasattr(self, 'tools') and (self.tools or getattr(self, 'list_base_models', None)):
# self.tool_struct = BaseTool(
# tools=self.tools,
# base_models=getattr(self, 'list_base_models', None),
# tool_system_prompt=self.tool_system_prompt
# )
except Exception as e:
logger.error(f"Error reinitializing components: {e}")
raise
@ -2622,19 +2552,20 @@ class Agent:
**kwargs,
)
else:
# Non-streaming call
args = {
"task": task,
}
if img is not None:
out = self.llm.run(
task=task, img=img, *args, **kwargs
)
else:
out = self.llm.run(task=task, *args, **kwargs)
args["img"] = img
out = self.llm.run(**args, **kwargs)
return out
except AgentLLMError as e:
logger.error(
f"Error calling LLM: {e}. Task: {task}, Args: {args}, Kwargs: {kwargs}"
f"Error calling LLM: {e}. Task: {task}, Args: {args}, Kwargs: {kwargs} Traceback: {traceback.format_exc()}"
)
raise e
@ -2725,6 +2656,30 @@ class Agent:
)
raise KeyboardInterrupt
def run_batched(
self,
tasks: List[str],
imgs: List[str] = None,
*args,
**kwargs,
):
"""
Run a batch of tasks concurrently.
Args:
tasks (List[str]): List of tasks to run.
imgs (List[str], optional): List of images to run. Defaults to None.
*args: Additional positional arguments to be passed to the execution method.
**kwargs: Additional keyword arguments to be passed to the execution method.
Returns:
List[Any]: List of results from each task execution.
"""
return [
self.run(task=task, imgs=imgs, *args, **kwargs)
for task in tasks
]
def handle_artifacts(
self, text: str, file_output_path: str, file_extension: str
) -> None:
@ -3072,10 +3027,17 @@ class Agent:
)
if self.print_on is True:
self.pretty_print(
f"Tool Executed Successfully [{time.strftime('%H:%M:%S')}]",
loop_count,
)
if self.show_tool_execution_output is True:
self.pretty_print(
f"Tool Executed Successfully [{time.strftime('%H:%M:%S')}] \n\nTool Output: {format_data_structure(output)}",
loop_count,
)
else:
self.pretty_print(
f"Tool Executed Successfully [{time.strftime('%H:%M:%S')}]",
loop_count,
)
# Now run the LLM again without tools - create a temporary LLM instance
# instead of modifying the cached one

@ -3,19 +3,17 @@ import uuid
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Callable, Dict, List, Optional, Union
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.conversation import Conversation
from swarms.structs.multi_agent_exec import get_agents_info
from swarms.telemetry.main import log_agent_data
from swarms.utils.any_to_str import any_to_str
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
from swarms.utils.loguru_logger import initialize_logger
from swarms.telemetry.main import log_agent_data
from swarms.structs.conversation import Conversation
from swarms.utils.output_types import OutputType
from swarms.structs.multi_agent_exec import get_agents_info
logger = initialize_logger(log_folder="rearrange")

@ -7,10 +7,15 @@ from loguru import logger
import traceback
class BatchAgentExecutionError(Exception):
pass
def batch_agent_execution(
agents: List[Union[Agent, Callable]],
tasks: List[str] = None,
imgs: List[str] = None,
max_workers: int = max(1, int(os.cpu_count() * 0.9)),
):
"""
Execute a batch of agents on a list of tasks concurrently.
@ -38,9 +43,6 @@ def batch_agent_execution(
results = []
# Calculate max workers as 90% of available CPU cores
max_workers = max(1, int(os.cpu_count() * 0.9))
formatter.print_panel(
f"Executing {len(agents)} agents on {len(tasks)} tasks using {max_workers} workers"
)
@ -78,5 +80,7 @@ def batch_agent_execution(
return results
except Exception as e:
log = f"Batch agent execution failed Error: {str(e)} Traceback: {traceback.format_exc()}"
logger.error(log)
raise e
raise BatchAgentExecutionError(log)

@ -295,7 +295,7 @@ class ConcurrentWorkflow(BaseSwarm):
def display_agent_dashboard(
self,
title: str = "🤖 Agent Dashboard",
title: str = "ConcurrentWorkflow Dashboard",
is_final: bool = False,
) -> None:
"""
@ -307,7 +307,7 @@ class ConcurrentWorkflow(BaseSwarm):
Args:
title (str, optional): The dashboard title to display at the top.
Defaults to "🤖 Agent Dashboard".
Defaults to "🤖 ConcurrentWorkflow Dashboard".
is_final (bool, optional): Whether this is the final dashboard display
after all agents have completed. Changes formatting and styling.
Defaults to False.
@ -543,7 +543,8 @@ class ConcurrentWorkflow(BaseSwarm):
# Display final dashboard if enabled
if self.show_dashboard:
self.display_agent_dashboard(
"🎉 Final Agent Dashboard", is_final=True
"Final ConcurrentWorkflow Dashboard",
is_final=True,
)
return history_output_formatter(

@ -1,21 +1,21 @@
import traceback
import concurrent.futures
import datetime
import inspect
import json
import os
import traceback
import uuid
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Literal,
Optional,
Union,
Literal,
Any,
)
import yaml
import inspect
from swarms.utils.any_to_str import any_to_str
from swarms.utils.litellm_tokenizer import count_tokens
@ -26,6 +26,18 @@ if TYPE_CHECKING:
from loguru import logger
# Define available providers
providers = Literal[
"mem0",
"in-memory",
"supabase",
"redis",
"sqlite",
"duckdb",
"pulsar",
]
def generate_conversation_id():
"""Generate a unique conversation ID."""
return str(uuid.uuid4())
@ -50,18 +62,6 @@ def get_conversation_dir():
return conversation_dir
# Define available providers
providers = Literal[
"mem0",
"in-memory",
"supabase",
"redis",
"sqlite",
"duckdb",
"pulsar",
]
def _create_backend_conversation(backend: str, **kwargs):
"""
Create a backend conversation instance based on the specified backend type.
@ -183,9 +183,9 @@ class Conversation:
name: str = "conversation-test",
system_prompt: Optional[str] = None,
time_enabled: bool = False,
autosave: bool = False, # Changed default to False
autosave: bool = False,
save_filepath: str = None,
load_filepath: str = None, # New parameter to specify which file to load from
load_filepath: str = None,
context_length: int = 8192,
rules: str = None,
custom_rules_prompt: str = None,
@ -211,6 +211,8 @@ class Conversation:
redis_data_dir: Optional[str] = None,
conversations_dir: Optional[str] = None,
export_method: str = "json",
dynamic_context_window: bool = True,
caching: bool = True,
*args,
**kwargs,
):
@ -249,6 +251,8 @@ class Conversation:
self.auto_persist = auto_persist
self.redis_data_dir = redis_data_dir
self.export_method = export_method
self.dynamic_context_window = dynamic_context_window
self.caching = caching
if self.name is None:
self.name = id
@ -933,7 +937,15 @@ class Conversation:
# Fallback to in-memory implementation
pass
elif self.dynamic_context_window is True:
return self.dynamic_auto_chunking()
else:
return self._return_history_as_string_worker()
def _return_history_as_string_worker(self):
formatted_messages = []
for message in self.conversation_history:
formatted_messages.append(
f"{message['role']}: {message['content']}"
@ -1778,20 +1790,80 @@ class Conversation:
pass
self.conversation_history = []
def _dynamic_auto_chunking_worker(self):
"""
Dynamically chunk the conversation history to fit within the context length.
Returns:
str: The chunked conversation history as a string that fits within context_length tokens.
"""
all_tokens = self._return_history_as_string_worker()
total_tokens = count_tokens(
all_tokens, self.tokenizer_model_name
)
if total_tokens <= self.context_length:
return all_tokens
# We need to remove characters from the beginning until we're under the limit
# Start by removing a percentage of characters and adjust iteratively
target_tokens = self.context_length
current_string = all_tokens
# Binary search approach to find the right cutoff point
left, right = 0, len(all_tokens)
while left < right:
mid = (left + right) // 2
test_string = all_tokens[mid:]
if not test_string:
break
test_tokens = count_tokens(
test_string, self.tokenizer_model_name
)
if test_tokens <= target_tokens:
# We can remove more from the beginning
right = mid
current_string = test_string
else:
# We need to keep more from the beginning
left = mid + 1
return current_string
def dynamic_auto_chunking(self):
"""
Dynamically chunk the conversation history to fit within the context length.
Returns:
str: The chunked conversation history as a string that fits within context_length tokens.
"""
try:
return self._dynamic_auto_chunking_worker()
except Exception as e:
logger.error(f"Dynamic auto chunking failed: {e}")
return self._return_history_as_string_worker()
# # Example usage
# # conversation = Conversation()
# conversation = Conversation(token_count=True)
# Example usage
# conversation = Conversation()
# conversation = Conversation(token_count=True, context_length=14)
# conversation.add("user", "Hello, how are you?")
# conversation.add("assistant", "I am doing well, thanks.")
# conversation.add("user", "What is the weather in Tokyo?")
# print(conversation.dynamic_auto_chunking())
# # conversation.add(
# # "assistant", {"name": "tool_1", "output": "Hello, how are you?"}
# # )
# # print(conversation.return_json())
# )
# print(conversation.return_json())
# # # print(conversation.get_last_message_as_string())
# # print(conversation.get_last_message_as_string())
# print(conversation.return_json())
# # # conversation.add("assistant", "I am doing well, thanks.")
# # # # print(conversation.to_json())
# # print(type(conversation.to_dict()))
# # print(conversation.to_yaml())
# # conversation.add("assistant", "I am doing well, thanks.")
# # # print(conversation.to_json())
# print(type(conversation.to_dict()))
# print(conversation.to_yaml())

@ -1,306 +0,0 @@
import json
from typing import Any, List
from loguru import logger
from pydantic import BaseModel, Field
from swarms import Agent
class AgentOutput(BaseModel):
"""
Schema for capturing metadata and results of an agent run.
"""
agent_name: str = Field(..., description="Name of the agent.")
input_query: str = Field(
..., description="Input query provided to the agent."
)
output_result: Any = Field(
..., description="Result produced by the agent."
)
metadata: dict = Field(
..., description="Additional metadata about the agent run."
)
class MatrixSwarm:
"""
A class to manage a matrix of agents and perform matrix operations similar to linear algebra.
"""
def __init__(self, agents: List[List[Agent]]):
"""
Initializes the MatrixSwarm with a 2D list of agents.
Args:
agents (List[List[Agent]]): 2D list of agents representing the matrix.
"""
if not agents or not all(
isinstance(row, list) for row in agents
):
raise ValueError("Agents must be provided as a 2D list.")
if not all(
isinstance(agent, Agent)
for row in agents
for agent in row
):
raise ValueError(
"All elements of the matrix must be instances of `Agent`."
)
self.agents = agents
self.outputs = [] # List to store outputs as AgentOutput
def validate_dimensions(self, other: "MatrixSwarm") -> None:
"""
Validates that two matrices have compatible dimensions for operations.
Args:
other (MatrixSwarm): Another MatrixSwarm.
Raises:
ValueError: If dimensions are incompatible.
"""
if len(self.agents) != len(other.agents) or len(
self.agents[0]
) != len(other.agents[0]):
raise ValueError(
"Matrix dimensions are incompatible for this operation."
)
def transpose(self) -> "MatrixSwarm":
"""
Transposes the matrix of agents (swap rows and columns).
Returns:
MatrixSwarm: A new transposed MatrixSwarm.
"""
transposed_agents = [
[self.agents[j][i] for j in range(len(self.agents))]
for i in range(len(self.agents[0]))
]
return MatrixSwarm(transposed_agents)
def add(self, other: "MatrixSwarm") -> "MatrixSwarm":
"""
Adds two matrices element-wise.
Args:
other (MatrixSwarm): Another MatrixSwarm to add.
Returns:
MatrixSwarm: A new MatrixSwarm resulting from the addition.
"""
self.validate_dimensions(other)
added_agents = [
[self.agents[i][j] for j in range(len(self.agents[i]))]
for i in range(len(self.agents))
]
return MatrixSwarm(added_agents)
def scalar_multiply(self, scalar: int) -> "MatrixSwarm":
"""
Scales the agents by duplicating them scalar times along the row.
Args:
scalar (int): The scalar multiplier.
Returns:
MatrixSwarm: A new MatrixSwarm where each agent is repeated scalar times along the row.
"""
scaled_agents = [
[agent for _ in range(scalar) for agent in row]
for row in self.agents
]
return MatrixSwarm(scaled_agents)
def multiply(
self, other: "MatrixSwarm", inputs: List[str]
) -> List[List[AgentOutput]]:
"""
Multiplies two matrices (dot product between rows and columns).
Args:
other (MatrixSwarm): Another MatrixSwarm for multiplication.
inputs (List[str]): A list of input queries for the agents.
Returns:
List[List[AgentOutput]]: A resulting matrix of outputs after multiplication.
"""
if len(self.agents[0]) != len(other.agents):
raise ValueError(
"Matrix dimensions are incompatible for multiplication."
)
results = []
for i, row in enumerate(self.agents):
row_results = []
for col_idx in range(len(other.agents[0])):
col = [
other.agents[row_idx][col_idx]
for row_idx in range(len(other.agents))
]
query = inputs[
i
] # Input query for the corresponding row
intermediate_result = []
for agent_r, agent_c in zip(row, col):
try:
result = agent_r.run(query)
intermediate_result.append(result)
except Exception as e:
intermediate_result.append(f"Error: {e}")
# Aggregate outputs from dot product
combined_result = " ".join(
intermediate_result
) # Example aggregation
row_results.append(
AgentOutput(
agent_name=f"DotProduct-{i}-{col_idx}",
input_query=query,
output_result=combined_result,
metadata={"row": i, "col": col_idx},
)
)
results.append(row_results)
return results
def subtract(self, other: "MatrixSwarm") -> "MatrixSwarm":
"""
Subtracts two matrices element-wise.
Args:
other (MatrixSwarm): Another MatrixSwarm to subtract.
Returns:
MatrixSwarm: A new MatrixSwarm resulting from the subtraction.
"""
self.validate_dimensions(other)
subtracted_agents = [
[self.agents[i][j] for j in range(len(self.agents[i]))]
for i in range(len(self.agents))
]
return MatrixSwarm(subtracted_agents)
def identity(self, size: int) -> "MatrixSwarm":
"""
Creates an identity matrix of agents with size `size`.
Args:
size (int): Size of the identity matrix (NxN).
Returns:
MatrixSwarm: An identity MatrixSwarm.
"""
identity_agents = [
[
(
self.agents[i][j]
if i == j
else Agent(
agent_name=f"Zero-Agent-{i}-{j}",
system_prompt="",
)
)
for j in range(size)
]
for i in range(size)
]
return MatrixSwarm(identity_agents)
def determinant(self) -> Any:
"""
Computes the determinant of a square MatrixSwarm.
Returns:
Any: Determinant of the matrix (as agent outputs).
"""
if len(self.agents) != len(self.agents[0]):
raise ValueError(
"Determinant can only be computed for square matrices."
)
# Recursive determinant calculation (example using placeholder logic)
if len(self.agents) == 1:
return self.agents[0][0].run("Compute determinant")
det_result = 0
for i in range(len(self.agents)):
submatrix = MatrixSwarm(
[row[:i] + row[i + 1 :] for row in self.agents[1:]]
)
cofactor = ((-1) ** i) * self.agents[0][i].run(
"Compute determinant"
)
det_result += cofactor * submatrix.determinant()
return det_result
def save_to_file(self, path: str) -> None:
"""
Saves the agent matrix structure and metadata to a file.
Args:
path (str): File path to save the matrix.
"""
try:
matrix_data = {
"agents": [
[agent.agent_name for agent in row]
for row in self.agents
],
"outputs": [output.dict() for output in self.outputs],
}
with open(path, "w") as f:
json.dump(matrix_data, f, indent=4)
logger.info(f"MatrixSwarm saved to {path}")
except Exception as e:
logger.error(f"Error saving MatrixSwarm: {e}")
# # Example usage
# if __name__ == "__main__":
# from swarms.prompts.finance_agent_sys_prompt import (
# FINANCIAL_AGENT_SYS_PROMPT,
# )
# # Create a 3x3 matrix of agents
# agents = [
# [
# Agent(
# agent_name=f"Agent-{i}-{j}",
# system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
# model_name="gpt-4o-mini",
# max_loops=1,
# autosave=True,
# dashboard=False,
# verbose=True,
# dynamic_temperature_enabled=True,
# saved_state_path=f"agent_{i}_{j}.json",
# user_name="swarms_corp",
# retry_attempts=1,
# context_length=200000,
# return_step_meta=False,
# output_type="string",
# streaming_on=False,
# )
# for j in range(3)
# ]
# for i in range(3)
# ]
# # Initialize the matrix
# agent_matrix = MatrixSwarm(agents)
# # Example queries
# inputs = [
# "Explain Roth IRA benefits",
# "Differences between ETFs and mutual funds",
# "How to create a diversified portfolio",
# ]
# # Run agents
# outputs = agent_matrix.multiply(agent_matrix.transpose(), inputs)
# # Save results
# agent_matrix.save_to_file("agent_matrix_results.json")

@ -2,7 +2,7 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Callable, List, Optional, Union
from swarms.structs.agent import Agent
from swarms.structs.rearrange import AgentRearrange
from swarms.structs.agent_rearrange import AgentRearrange
from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.output_types import OutputType

@ -1,326 +0,0 @@
import math
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any, Callable, Dict, Optional, Tuple
from datasets import Dataset, load_dataset
from loguru import logger
from tqdm import tqdm
# -----------------------------------------------------------------------------
# Logging configuration: log to console and file (rotating by size)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Swarm interface example
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Benchmark configuration
# -----------------------------------------------------------------------------
class BenchmarkConfig:
"""
Configuration for a benchmark dataset.
Attributes:
input_column (str): The column containing the task prompt.
answer_column (str): The column containing the expected answer.
answer_extractor (Optional[Callable[[Any], str]]): Function to extract
a string answer from the dataset's raw answer format.
answer_matcher (Optional[Callable[[str, str], bool]]): Function to compare
the expected answer and the swarm output. If None, a simple substring
containment is used.
"""
def __init__(
self,
input_column: str,
answer_column: str,
answer_extractor: Optional[Callable[[Any], str]] = None,
answer_matcher: Optional[Callable[[str, str], bool]] = None,
):
self.input_column = input_column
self.answer_column = answer_column
self.answer_extractor = answer_extractor
self.answer_matcher = answer_matcher
# -----------------------------------------------------------------------------
# Preset dataset configurations for popular benchmarks
# -----------------------------------------------------------------------------
PRESET_DATASETS: Dict[str, BenchmarkConfig] = {
"gsm8k": BenchmarkConfig(
input_column="question",
answer_column="answer",
),
"squad": BenchmarkConfig(
input_column="question",
answer_column="answers",
answer_extractor=lambda ans: (
ans["text"][0]
if isinstance(ans, dict)
and "text" in ans
and isinstance(ans["text"], list)
and ans["text"]
else str(ans)
),
),
"winogrande": BenchmarkConfig(
input_column="sentence",
answer_column="answer",
),
"commonsense_qa": BenchmarkConfig(
input_column="question",
answer_column="answerKey",
),
# Add additional presets here.
}
# -----------------------------------------------------------------------------
# SwarmEvaluator with extended features
# -----------------------------------------------------------------------------
class SwarmEvaluator:
"""
Evaluator that uses a swarm of agents to process benchmark datasets
from Hugging Face, with concurrency, retries, progress display, performance timing,
and customizable answer matching.
Example:
swarm = Swarm()
evaluator = SwarmEvaluator(swarm)
results = evaluator.evaluate("gsm8k", split="test", max_workers=4)
print(results)
"""
def __init__(self, swarm: callable) -> None:
"""
Initialize the evaluator with a given swarm.
Args:
swarm (Swarm): A swarm instance with a callable run(task: str) method.
"""
self.swarm = swarm
def evaluate(
self,
dataset_name: str,
split: str = "test",
config: Optional[BenchmarkConfig] = None,
max_workers: int = 1,
max_retries: int = 3,
show_progress: bool = True,
output_file: Optional[str] = None,
) -> Dict[str, Any]:
"""
Evaluate the specified benchmark dataset using the swarm.
Args:
dataset_name (str): The dataset name (from Hugging Face).
split (str): The dataset split (e.g., "test", "validation").
config (Optional[BenchmarkConfig]): Benchmark configuration. If None,
a preset config is used.
max_workers (int): Number of concurrent workers.
max_retries (int): Number of retries for swarm tasks on failure.
show_progress (bool): If True, display a progress bar.
output_file (Optional[str]): Path to a file to write the results.
Returns:
Dict[str, Any]: Evaluation metrics including total examples, correct answers,
accuracy, and total evaluation time.
"""
if config is None:
config = PRESET_DATASETS.get(dataset_name)
if config is None:
raise ValueError(
f"No preset config for dataset '{dataset_name}'. Provide a BenchmarkConfig."
)
logger.info(
f"Loading dataset '{dataset_name}' (split: {split})..."
)
dataset: Dataset = load_dataset(dataset_name, split=split)
total_examples = len(dataset)
logger.info(f"Total examples to evaluate: {total_examples}")
start_time = time.time()
correct = 0
# Function to process a single example.
def _process_example(
example: Dict[str, Any], idx: int
) -> Tuple[bool, float]:
task_start = time.time()
task_text = example.get(config.input_column)
expected_answer = example.get(config.answer_column)
if task_text is None or expected_answer is None:
logger.warning(
f"Example {idx}: Missing '{config.input_column}' or '{config.answer_column}', skipping."
)
return (False, 0.0)
# Use answer_extractor if provided.
if config.answer_extractor:
try:
expected_answer = config.answer_extractor(
expected_answer
)
except Exception as e:
logger.error(
f"Example {idx}: Error extracting answer: {e}"
)
return (False, 0.0)
logger.debug(f"Example {idx} - Task: {task_text}")
logger.debug(
f"Example {idx} - Expected Answer: {expected_answer}"
)
try:
swarm_output = self._run_with_retry(
task_text, max_retries
)
except Exception as e:
logger.error(
f"Example {idx}: Failed after retries. Error: {e}"
)
return (False, time.time() - task_start)
logger.debug(
f"Example {idx} - Swarm Output: {swarm_output}"
)
# Use custom matcher if provided; otherwise, default matching.
if config.answer_matcher:
is_correct = config.answer_matcher(
expected_answer, swarm_output
)
else:
is_correct = self._default_matcher(
expected_answer, swarm_output
)
task_time = time.time() - task_start
logger.info(
f"Example {idx}: {'Correct' if is_correct else 'Incorrect'} in {task_time:.2f}s"
)
return (is_correct, task_time)
# Use ThreadPoolExecutor for concurrency.
futures = []
total_time = 0.0
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# Optionally wrap the dataset with tqdm for a progress bar.
examples_iter = enumerate(dataset, start=1)
if show_progress:
examples_iter = tqdm(
list(examples_iter),
total=total_examples,
desc="Evaluating",
)
for idx, example in examples_iter:
futures.append(
executor.submit(_process_example, example, idx)
)
for future in as_completed(futures):
try:
is_correct, elapsed = future.result()
total_time += elapsed
if is_correct:
correct += 1
except Exception as e:
logger.error(f"Error processing an example: {e}")
overall_time = time.time() - start_time
accuracy = (
correct / total_examples if total_examples > 0 else 0.0
)
logger.info(
f"Evaluation complete. Total examples: {total_examples}, Correct: {correct}, "
f"Accuracy: {accuracy:.2%}, Overall Time: {overall_time:.2f}s, "
f"Average per-example time: {total_time/total_examples if total_examples else 0:.2f}s"
)
results = {
"total": total_examples,
"correct": correct,
"accuracy": accuracy,
"overall_time": overall_time,
"average_example_time": (
total_time / total_examples
if total_examples
else math.nan
),
}
# Optionally save results to a file.
if output_file:
try:
with open(output_file, "w") as f:
for key, value in results.items():
f.write(f"{key}: {value}\n")
logger.info(f"Results saved to {output_file}")
except Exception as e:
logger.error(
f"Error saving results to {output_file}: {e}"
)
return results
def _run_with_retry(self, task: str, max_retries: int) -> str:
"""
Runs the swarm task with a retry mechanism.
Args:
task (str): The task string.
max_retries (int): Maximum number of retries.
Returns:
str: Swarm output.
Raises:
Exception: If all retries fail.
"""
attempt = 0
while attempt <= max_retries:
try:
start = time.time()
result = self.swarm.run(task)
elapsed = time.time() - start
logger.debug(
f"Task succeeded in {elapsed:.2f}s on attempt {attempt + 1}"
)
return result
except Exception as e:
logger.warning(
f"Task failed on attempt {attempt + 1}: {e}"
)
attempt += 1
time.sleep(0.5 * attempt) # Exponential backoff
raise Exception("Max retries exceeded for task.")
@staticmethod
def _default_matcher(expected: str, output: str) -> bool:
"""
Default answer matching using a normalized substring check.
Args:
expected (str): The expected answer.
output (str): The swarm output.
Returns:
bool: True if expected is found in output; otherwise, False.
"""
expected_norm = " ".join(expected.strip().split())
output_norm = " ".join(output.strip().split())
return expected_norm in output_norm
# -----------------------------------------------------------------------------
# Example usage
# -----------------------------------------------------------------------------

@ -2,4 +2,4 @@ import uuid
def generate_swarm_id():
return str(uuid.uuid4())
return f"swarm-{uuid.uuid4().hex}"

@ -11,33 +11,31 @@ from swarms.prompts.multi_agent_collab_prompt import (
)
from swarms.structs.agent import Agent
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.council_as_judge import CouncilAsAJudge
from swarms.structs.csv_to_agent import AgentLoader
from swarms.structs.deep_research_swarm import DeepResearchSwarm
from swarms.structs.groupchat import GroupChat
from swarms.structs.heavy_swarm import HeavySwarm
from swarms.structs.hiearchical_swarm import HierarchicalSwarm
from swarms.structs.interactive_groupchat import InteractiveGroupChat
from swarms.structs.ma_utils import list_all_agents
from swarms.structs.majority_voting import MajorityVoting
from swarms.structs.malt import MALT
from swarms.structs.mixture_of_agents import MixtureOfAgents
from swarms.structs.multi_agent_router import MultiAgentRouter
from swarms.structs.rearrange import AgentRearrange
from swarms.structs.agent_rearrange import AgentRearrange
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm
from swarms.structs.swarm_matcher import swarm_matcher
from swarms.telemetry.log_executions import log_execution
from swarms.utils.output_types import OutputType
from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.malt import MALT
from swarms.structs.deep_research_swarm import DeepResearchSwarm
from swarms.structs.council_judge import CouncilAsAJudge
from swarms.structs.interactive_groupchat import InteractiveGroupChat
from swarms.structs.heavy_swarm import HeavySwarm
from swarms.structs.ma_utils import list_all_agents
from swarms.utils.generate_keys import generate_api_key
from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.output_types import OutputType
logger = initialize_logger(log_folder="swarm_router")
SwarmType = Literal[
"AgentRearrange",
"MixtureOfAgents",
"SpreadSheetSwarm",
"SequentialWorkflow",
"ConcurrentWorkflow",
"GroupChat",
@ -146,7 +144,6 @@ class SwarmRouter:
Available Swarm Types:
- AgentRearrange: Optimizes agent arrangement for task execution
- MixtureOfAgents: Combines multiple agent types for diverse tasks
- SpreadSheetSwarm: Uses spreadsheet-like operations for task management
- SequentialWorkflow: Executes tasks sequentially
- ConcurrentWorkflow: Executes tasks in parallel
- "auto": Automatically selects best swarm type via embedding search
@ -179,7 +176,7 @@ class SwarmRouter:
description: str = "Routes your task to the desired swarm",
max_loops: int = 1,
agents: List[Union[Agent, Callable]] = [],
swarm_type: SwarmType = "SequentialWorkflow", # "SpreadSheetSwarm" # "auto"
swarm_type: SwarmType = "SequentialWorkflow", # "ConcurrentWorkflow" # "auto"
autosave: bool = False,
rearrange_flow: str = None,
return_json: bool = False,
@ -396,7 +393,6 @@ class SwarmRouter:
"MajorityVoting": self._create_majority_voting,
"GroupChat": self._create_group_chat,
"MultiAgentRouter": self._create_multi_agent_router,
"SpreadSheetSwarm": self._create_spreadsheet_swarm,
"SequentialWorkflow": self._create_sequential_workflow,
"ConcurrentWorkflow": self._create_concurrent_workflow,
}
@ -528,18 +524,6 @@ class SwarmRouter:
output_type=self.output_type,
)
def _create_spreadsheet_swarm(self, *args, **kwargs):
"""Factory function for SpreadSheetSwarm."""
return SpreadSheetSwarm(
name=self.name,
description=self.description,
agents=self.agents,
max_loops=self.max_loops,
autosave_on=self.autosave,
*args,
**kwargs,
)
def _create_sequential_workflow(self, *args, **kwargs):
"""Factory function for SequentialWorkflow."""
return SequentialWorkflow(
@ -580,7 +564,7 @@ class SwarmRouter:
**kwargs: Arbitrary keyword arguments.
Returns:
Union[AgentRearrange, MixtureOfAgents, SpreadSheetSwarm, SequentialWorkflow, ConcurrentWorkflow]:
Union[AgentRearrange, MixtureOfAgents, SequentialWorkflow, ConcurrentWorkflow]:
The instantiated swarm object.
Raises:

@ -1,28 +1,38 @@
from swarms.utils.check_all_model_max_tokens import (
check_all_model_max_tokens,
)
from swarms.utils.data_to_text import (
csv_to_text,
data_to_text,
json_to_text,
txt_to_text,
)
from swarms.utils.dynamic_context_window import (
dynamic_auto_chunking,
)
from swarms.utils.file_processing import (
create_file_in_folder,
load_json,
sanitize_file_path,
zip_workspace,
create_file_in_folder,
zip_folders,
zip_workspace,
)
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.try_except_wrapper import try_except_wrapper
from swarms.utils.litellm_tokenizer import count_tokens
from swarms.utils.output_types import HistoryOutputType
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
from swarms.utils.check_all_model_max_tokens import (
check_all_model_max_tokens,
from swarms.utils.agent_loader import (
AgentLoader,
MarkdownAgentConfig,
load_agent_from_markdown,
load_agents_from_markdown,
)
from swarms.utils.litellm_tokenizer import count_tokens
from swarms.utils.output_types import HistoryOutputType
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.try_except_wrapper import try_except_wrapper
__all__ = [
"csv_to_text",
@ -41,4 +51,9 @@ __all__ = [
"HistoryOutputType",
"history_output_formatter",
"check_all_model_max_tokens",
"AgentLoader",
"MarkdownAgentConfig",
"load_agent_from_markdown",
"load_agents_from_markdown",
"dynamic_auto_chunking",
]

@ -0,0 +1,447 @@
import os
import yaml
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from concurrent.futures import (
ThreadPoolExecutor,
as_completed,
TimeoutError,
)
from pydantic import BaseModel, Field, field_validator
from loguru import logger
# Lazy import to avoid circular dependency
# Default model configuration
DEFAULT_MODEL = "gpt-4o"
class MarkdownAgentConfig(BaseModel):
"""Configuration model for agents loaded from Claude Code markdown files."""
name: str
description: str
model_name: Optional[str] = "gpt-4o"
temperature: Optional[float] = Field(default=0.1, ge=0.0, le=2.0)
mcp_url: Optional[int] = None
system_prompt: str
max_loops: int = Field(default=1, ge=1)
autosave: bool = False
dashboard: bool = False
verbose: bool = False
dynamic_temperature_enabled: bool = False
saved_state_path: Optional[str] = None
user_name: str = "default_user"
retry_attempts: int = Field(default=3, ge=1)
context_length: int = Field(default=100000, ge=1000)
return_step_meta: bool = False
output_type: str = "str"
auto_generate_prompt: bool = False
artifacts_on: bool = False
artifacts_file_extension: str = ".md"
artifacts_output_path: str = ""
streaming_on: bool = False
@field_validator("system_prompt")
@classmethod
def validate_system_prompt(cls, v):
if not v or not isinstance(v, str) or len(v.strip()) == 0:
raise ValueError(
"System prompt must be a non-empty string"
)
return v
class AgentLoader:
"""
Loader for creating agents from markdown files using Claude Code sub-agent format.
Supports both single markdown file and multiple markdown files.
Uses YAML frontmatter format for agent configuration.
Features:
- Single markdown file loading
- Multiple markdown files loading (batch processing)
- YAML frontmatter parsing
- Agent configuration extraction from YAML metadata
- Error handling and validation
"""
def __init__(self):
"""
Initialize the AgentLoader.
"""
pass
def parse_yaml_frontmatter(self, content: str) -> Dict[str, Any]:
"""
Parse YAML frontmatter from markdown content.
Args:
content: Markdown content with potential YAML frontmatter
Returns:
Dictionary with parsed YAML data and remaining content
"""
lines = content.split("\n")
# Check if content starts with YAML frontmatter
if not lines[0].strip() == "---":
return {"frontmatter": {}, "content": content}
# Find end of frontmatter
end_marker = -1
for i, line in enumerate(lines[1:], 1):
if line.strip() == "---":
end_marker = i
break
if end_marker == -1:
return {"frontmatter": {}, "content": content}
# Extract frontmatter and content
frontmatter_text = "\n".join(lines[1:end_marker])
remaining_content = "\n".join(lines[end_marker + 1 :]).strip()
try:
frontmatter_data = yaml.safe_load(frontmatter_text) or {}
except yaml.YAMLError as e:
logger.warning(f"Failed to parse YAML frontmatter: {e}")
return {"frontmatter": {}, "content": content}
return {
"frontmatter": frontmatter_data,
"content": remaining_content,
}
def parse_markdown_file(
self, file_path: str
) -> MarkdownAgentConfig:
"""
Parse a single markdown file to extract agent configuration.
Uses Claude Code sub-agent YAML frontmatter format.
Args:
file_path: Path to markdown file
Returns:
MarkdownAgentConfig object with parsed configuration
Raises:
FileNotFoundError: If file doesn't exist
ValueError: If parsing fails or no YAML frontmatter found
"""
if not os.path.exists(file_path):
raise FileNotFoundError(
f"Markdown file {file_path} not found."
)
try:
with open(file_path, "r", encoding="utf-8") as file:
content = file.read()
# Parse YAML frontmatter (Claude Code sub-agent format)
yaml_result = self.parse_yaml_frontmatter(content)
frontmatter = yaml_result["frontmatter"]
remaining_content = yaml_result["content"]
if not frontmatter:
raise ValueError(
f"No YAML frontmatter found in {file_path}. File must use Claude Code sub-agent format with YAML frontmatter."
)
# Use YAML frontmatter data
config_data = {
"name": frontmatter.get("name", Path(file_path).stem),
"description": frontmatter.get(
"description", "Agent loaded from markdown"
),
"model_name": frontmatter.get("model_name")
or frontmatter.get("model", DEFAULT_MODEL),
"temperature": frontmatter.get("temperature", 0.1),
"max_loops": frontmatter.get("max_loops", 1),
"mcp_url": frontmatter.get("mcp_url"),
"system_prompt": remaining_content.strip(),
"streaming_on": frontmatter.get(
"streaming_on", False
),
}
# Use default model if not specified
if not config_data["model_name"]:
config_data["model_name"] = DEFAULT_MODEL
logger.info(
f"Successfully parsed markdown file: {file_path}"
)
return MarkdownAgentConfig(**config_data)
except Exception as e:
logger.error(
f"Error parsing markdown file {file_path}: {str(e)}"
)
raise ValueError(
f"Error parsing markdown file {file_path}: {str(e)}"
)
def load_agent_from_markdown(
self, file_path: str, **kwargs
) -> "Agent":
"""
Load a single agent from a markdown file.
Args:
file_path: Path to markdown file
**kwargs: Additional arguments to override default configuration
Returns:
Configured Agent instance
"""
config = self.parse_markdown_file(file_path)
# Override with any provided kwargs
config_dict = config.model_dump()
config_dict.update(kwargs)
# Map config fields to Agent parameters, handling special cases
field_mapping = {
"name": "agent_name", # name -> agent_name
"description": None, # not used by Agent
"mcp_url": None, # not used by Agent
}
agent_fields = {}
for config_key, config_value in config_dict.items():
# Handle special field mappings
if config_key in field_mapping:
agent_key = field_mapping[config_key]
if agent_key: # Only include if mapped to something
agent_fields[agent_key] = config_value
else:
# Direct mapping for most fields
agent_fields[config_key] = config_value
try:
# Lazy import to avoid circular dependency
from swarms.structs.agent import Agent
logger.info(
f"Creating agent '{config.name}' from {file_path}"
)
agent = Agent(**agent_fields)
logger.info(
f"Successfully created agent '{config.name}' from {file_path}"
)
return agent
except Exception as e:
import traceback
logger.error(
f"Error creating agent from {file_path}: {str(e)}"
)
logger.error(f"Traceback: {traceback.format_exc()}")
raise ValueError(
f"Error creating agent from {file_path}: {str(e)}"
)
def load_agents_from_markdown(
self,
file_paths: Union[str, List[str]],
concurrent: bool = True,
max_workers: Optional[int] = None,
max_file_size_mb: float = 10.0,
**kwargs,
) -> List["Agent"]:
"""
Load multiple agents from markdown files with optional concurrent processing.
Args:
file_paths: Single file path, directory path, or list of file paths
concurrent: Whether to use concurrent processing for multiple files
max_workers: Maximum number of worker threads (defaults to CPU count)
max_file_size_mb: Maximum file size in MB to prevent memory issues
**kwargs: Additional arguments to override default configuration
Returns:
List of configured Agent instances
"""
agents = []
paths_to_process = []
# Handle different input types
if isinstance(file_paths, str):
if os.path.isdir(file_paths):
# Directory - find all .md files
md_files = list(Path(file_paths).glob("*.md"))
paths_to_process = [str(f) for f in md_files]
elif os.path.isfile(file_paths):
# Single file
paths_to_process = [file_paths]
else:
raise FileNotFoundError(
f"Path {file_paths} not found."
)
elif isinstance(file_paths, list):
paths_to_process = file_paths
else:
raise ValueError(
"file_paths must be a string or list of strings"
)
# Validate file sizes to prevent memory issues
for file_path in paths_to_process:
try:
file_size_mb = os.path.getsize(file_path) / (
1024 * 1024
)
if file_size_mb > max_file_size_mb:
logger.warning(
f"Skipping {file_path}: size {file_size_mb:.2f}MB exceeds limit {max_file_size_mb}MB"
)
paths_to_process.remove(file_path)
except OSError:
logger.warning(
f"Could not check size of {file_path}, skipping validation"
)
# Adjust max_workers for I/O-bound operations
if max_workers is None and concurrent:
# For I/O-bound: use more threads than CPU count, but cap it
max_workers = min(
20, len(paths_to_process), os.cpu_count() * 2
)
# Use concurrent processing for multiple files if enabled
if concurrent and len(paths_to_process) > 1:
logger.info(
f"Loading {len(paths_to_process)} agents concurrently with {max_workers} workers..."
)
with ThreadPoolExecutor(
max_workers=max_workers
) as executor:
# Submit all tasks
future_to_path = {
executor.submit(
self.load_agent_from_markdown,
file_path,
**kwargs,
): file_path
for file_path in paths_to_process
}
# Collect results as they complete with timeout
for future in as_completed(
future_to_path, timeout=300
): # 5 minute timeout
file_path = future_to_path[future]
try:
agent = future.result(
timeout=60
) # 1 minute per agent
agents.append(agent)
logger.info(
f"Successfully loaded agent from {file_path}"
)
except TimeoutError:
logger.error(f"Timeout loading {file_path}")
continue
except Exception as e:
logger.error(
f"Failed to load {file_path}: {str(e)}"
)
continue
else:
# Sequential processing for single file or when concurrent is disabled
logger.info(
f"Loading {len(paths_to_process)} agents sequentially..."
)
for file_path in paths_to_process:
try:
agent = self.load_agent_from_markdown(
file_path, **kwargs
)
agents.append(agent)
except Exception as e:
logger.warning(
f"Skipping {file_path} due to error: {str(e)}"
)
continue
logger.info(
f"Successfully loaded {len(agents)} agents from markdown files"
)
return agents
def load_single_agent(self, file_path: str, **kwargs) -> "Agent":
"""
Convenience method for loading a single agent.
Uses Claude Code sub-agent YAML frontmatter format.
Args:
file_path: Path to markdown file with YAML frontmatter
**kwargs: Additional configuration overrides
Returns:
Configured Agent instance
"""
return self.load_agent_from_markdown(file_path, **kwargs)
def load_multiple_agents(
self, file_paths: Union[str, List[str]], **kwargs
) -> List["Agent"]:
"""
Convenience method for loading multiple agents.
Uses Claude Code sub-agent YAML frontmatter format.
Args:
file_paths: Directory path or list of file paths with YAML frontmatter
**kwargs: Additional configuration overrides
Returns:
List of configured Agent instances
"""
return self.load_agents_from_markdown(file_paths, **kwargs)
# Convenience functions
def load_agent_from_markdown(file_path: str, **kwargs) -> "Agent":
"""
Load a single agent from a markdown file with Claude Code YAML frontmatter format.
Args:
file_path: Path to markdown file with YAML frontmatter
**kwargs: Additional configuration overrides
Returns:
Configured Agent instance
"""
loader = AgentLoader()
return loader.load_single_agent(file_path, **kwargs)
def load_agents_from_markdown(
file_paths: Union[str, List[str]],
concurrent: bool = True,
max_file_size_mb: float = 10.0,
**kwargs,
) -> List["Agent"]:
"""
Load multiple agents from markdown files with Claude Code YAML frontmatter format.
Args:
file_paths: Directory path or list of file paths with YAML frontmatter
concurrent: Whether to use concurrent processing for multiple files
max_file_size_mb: Maximum file size in MB to prevent memory issues
**kwargs: Additional configuration overrides
Returns:
List of configured Agent instances
"""
loader = AgentLoader()
return loader.load_agents_from_markdown(
file_paths,
concurrent=concurrent,
max_file_size_mb=max_file_size_mb,
**kwargs,
)

@ -0,0 +1,85 @@
import traceback
from loguru import logger
from swarms.utils.litellm_tokenizer import count_tokens
from typing import Optional
def dynamic_auto_chunking_(
content: str,
context_length: Optional[int] = 8192,
tokenizer_model_name: Optional[str] = "gpt-4.1",
):
"""
Dynamically chunk the conversation history to fit within the context length.
Args:
content (str): The conversation history as a string.
context_length (int): The maximum number of tokens allowed.
tokenizer_model_name (str): The name of the tokenizer model to use.
Returns:
str: The chunked conversation history as a string that fits within context_length tokens.
"""
total_tokens = count_tokens(
text=content, model=tokenizer_model_name
)
if total_tokens <= context_length:
return content
# We need to remove characters from the beginning until we're under the limit
# Start by removing a percentage of characters and adjust iteratively
target_tokens = context_length
current_string = content
# Binary search approach to find the right cutoff point
left, right = 0, len(content)
while left < right:
mid = (left + right) // 2
test_string = content[mid:]
if not test_string:
break
test_tokens = count_tokens(
text=test_string, model=tokenizer_model_name
)
if test_tokens <= target_tokens:
# We can remove more from the beginning
right = mid
current_string = test_string
else:
# We need to keep more from the beginning
left = mid + 1
return current_string
def dynamic_auto_chunking(
content: str,
context_length: Optional[int] = 8192,
tokenizer_model_name: Optional[str] = "gpt-4.1",
):
"""
Dynamically chunk the conversation history to fit within the context length.
Args:
content (str): The conversation history as a string.
context_length (int): The maximum number of tokens allowed.
tokenizer_model_name (str): The name of the tokenizer model to use.
"""
try:
return dynamic_auto_chunking_(
content=content,
context_length=context_length,
tokenizer_model_name=tokenizer_model_name,
)
except Exception as e:
logger.error(
f"Dynamic auto chunking failed: {e} Traceback: {traceback.format_exc()}"
)
return content

@ -397,7 +397,7 @@ class Formatter:
def print_agent_dashboard(
self,
agents_data: List[Dict[str, Any]],
title: str = "🤖 Agent Dashboard",
title: str = "ConcurrentWorkflow Dashboard",
is_final: bool = False,
) -> None:
"""

@ -176,6 +176,12 @@ class LiteLLM:
litellm.drop_params = True
# Add system prompt if present
if self.system_prompt is not None:
self.messages.append(
{"role": "system", "content": self.system_prompt}
)
# Store additional args and kwargs for use in run method
self.init_args = args
self.init_kwargs = kwargs
@ -232,8 +238,8 @@ class LiteLLM:
def _prepare_messages(
self,
task: str,
img: str = None,
task: Optional[str] = None,
img: Optional[str] = None,
):
"""
Prepare the messages for the given task.
@ -246,24 +252,14 @@ class LiteLLM:
"""
self.check_if_model_supports_vision(img=img)
# Initialize messages
messages = []
# Add system prompt if present
if self.system_prompt is not None:
messages.append(
{"role": "system", "content": self.system_prompt}
)
# Handle vision case
if img is not None:
messages = self.vision_processing(
task=task, image=img, messages=messages
)
else:
messages.append({"role": "user", "content": task})
self.vision_processing(task=task, image=img)
return messages
if task is not None:
self.messages.append({"role": "user", "content": task})
return self.messages
def anthropic_vision_processing(
self, task: str, image: str, messages: list
@ -879,12 +875,18 @@ class LiteLLM:
5. Default parameters
"""
try:
messages = self._prepare_messages(task=task, img=img)
self.messages.append({"role": "user", "content": task})
if img is not None:
self.messages = self.vision_processing(
task=task, image=img
)
# Base completion parameters
completion_params = {
"model": self.model_name,
"messages": messages,
"messages": self.messages,
"stream": self.stream,
"max_tokens": self.max_tokens,
"caching": self.caching,

@ -7,7 +7,7 @@ from loguru import logger
from swarm_models import OpenAIChat
from swarms.structs.agent import Agent
from swarms.structs.rearrange import AgentRearrange
from swarms.structs.agent_rearrange import AgentRearrange
class TestResult:

@ -1,216 +0,0 @@
from swarms.structs.matrix_swarm import AgentMatrix, AgentOutput
from swarms import Agent
def create_test_matrix(rows: int, cols: int) -> AgentMatrix:
"""Helper function to create a test agent matrix"""
agents = [
[
Agent(
agent_name=f"TestAgent-{i}-{j}",
system_prompt="Test prompt",
)
for j in range(cols)
]
for i in range(rows)
]
return AgentMatrix(agents)
def test_init():
"""Test AgentMatrix initialization"""
# Test valid initialization
matrix = create_test_matrix(2, 2)
assert isinstance(matrix, AgentMatrix)
assert len(matrix.agents) == 2
assert len(matrix.agents[0]) == 2
# Test invalid initialization
try:
AgentMatrix([[1, 2], [3, 4]]) # Non-agent elements
assert False, "Should raise ValueError"
except ValueError:
pass
try:
AgentMatrix([]) # Empty matrix
assert False, "Should raise ValueError"
except ValueError:
pass
def test_transpose():
"""Test matrix transpose operation"""
matrix = create_test_matrix(2, 3)
transposed = matrix.transpose()
assert len(transposed.agents) == 3 # Original cols become rows
assert len(transposed.agents[0]) == 2 # Original rows become cols
# Verify agent positions
for i in range(2):
for j in range(3):
assert (
matrix.agents[i][j].agent_name
== transposed.agents[j][i].agent_name
)
def test_add():
"""Test matrix addition"""
matrix1 = create_test_matrix(2, 2)
matrix2 = create_test_matrix(2, 2)
result = matrix1.add(matrix2)
assert len(result.agents) == 2
assert len(result.agents[0]) == 2
# Test incompatible dimensions
matrix3 = create_test_matrix(2, 3)
try:
matrix1.add(matrix3)
assert False, "Should raise ValueError"
except ValueError:
pass
def test_scalar_multiply():
"""Test scalar multiplication"""
matrix = create_test_matrix(2, 2)
scalar = 3
result = matrix.scalar_multiply(scalar)
assert len(result.agents) == 2
assert len(result.agents[0]) == 2 * scalar
# Verify agent duplication
for i in range(len(result.agents)):
for j in range(0, len(result.agents[0]), scalar):
original_agent = matrix.agents[i][j // scalar]
for k in range(scalar):
assert (
result.agents[i][j + k].agent_name
== original_agent.agent_name
)
def test_multiply():
"""Test matrix multiplication"""
matrix1 = create_test_matrix(2, 3)
matrix2 = create_test_matrix(3, 2)
inputs = ["test query 1", "test query 2"]
result = matrix1.multiply(matrix2, inputs)
assert len(result) == 2 # Number of rows in first matrix
assert len(result[0]) == 2 # Number of columns in second matrix
# Verify output structure
for row in result:
for output in row:
assert isinstance(output, AgentOutput)
assert isinstance(output.input_query, str)
assert isinstance(output.metadata, dict)
def test_subtract():
"""Test matrix subtraction"""
matrix1 = create_test_matrix(2, 2)
matrix2 = create_test_matrix(2, 2)
result = matrix1.subtract(matrix2)
assert len(result.agents) == 2
assert len(result.agents[0]) == 2
def test_identity():
"""Test identity matrix creation"""
matrix = create_test_matrix(3, 3)
identity = matrix.identity(3)
assert len(identity.agents) == 3
assert len(identity.agents[0]) == 3
# Verify diagonal elements are from original matrix
for i in range(3):
assert (
identity.agents[i][i].agent_name
== matrix.agents[i][i].agent_name
)
# Verify non-diagonal elements are zero agents
for j in range(3):
if i != j:
assert identity.agents[i][j].agent_name.startswith(
"Zero-Agent"
)
def test_determinant():
"""Test determinant calculation"""
# Test 1x1 matrix
matrix1 = create_test_matrix(1, 1)
det1 = matrix1.determinant()
assert det1 is not None
# Test 2x2 matrix
matrix2 = create_test_matrix(2, 2)
det2 = matrix2.determinant()
assert det2 is not None
# Test non-square matrix
matrix3 = create_test_matrix(2, 3)
try:
matrix3.determinant()
assert False, "Should raise ValueError"
except ValueError:
pass
def test_save_to_file(tmp_path):
"""Test saving matrix to file"""
import os
matrix = create_test_matrix(2, 2)
file_path = os.path.join(tmp_path, "test_matrix.json")
matrix.save_to_file(file_path)
assert os.path.exists(file_path)
# Verify file contents
import json
with open(file_path, "r") as f:
data = json.load(f)
assert "agents" in data
assert "outputs" in data
assert len(data["agents"]) == 2
assert len(data["agents"][0]) == 2
def run_all_tests():
"""Run all test functions"""
test_functions = [
test_init,
test_transpose,
test_add,
test_scalar_multiply,
test_multiply,
test_subtract,
test_identity,
test_determinant,
]
for test_func in test_functions:
try:
test_func()
print(f"{test_func.__name__} passed")
except AssertionError as e:
print(f"{test_func.__name__} failed: {str(e)}")
except Exception as e:
print(
f"{test_func.__name__} failed with exception: {str(e)}"
)
if __name__ == "__main__":
run_all_tests()
Loading…
Cancel
Save