diff --git a/.gitignore b/.gitignore
index b2561ce1..e1c108a0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,6 +12,7 @@ dataframe/
target/
Cargo.lock
.pytest_cache
+databases
static/generated
conversations/
next_swarms_update.txt
diff --git a/README.md b/README.md
index f132db1e..15a639c3 100644
--- a/README.md
+++ b/README.md
@@ -165,6 +165,24 @@ $ cd swarms
$ pip install -e .
```
+### Using Docker
+
+The easiest way to get started with Swarms is using our pre-built Docker image:
+
+```bash
+# Pull and run the latest image
+$ docker pull kyegomez/swarms:latest
+$ docker run --rm kyegomez/swarms:latest python -c "import swarms; print('Swarms is ready!')"
+
+# Run interactively for development
+$ docker run -it --rm -v $(pwd):/app kyegomez/swarms:latest bash
+
+# Using docker-compose (recommended for development)
+$ docker-compose up -d
+```
+
+For more Docker options and advanced usage, see our [Docker documentation](/scripts/docker/DOCKER.md).
+
---
## Environment Configuration
@@ -714,6 +732,7 @@ Explore comprehensive examples and tutorials to learn how to use Swarms effectiv
| Application | Description | Link |
|-------------|-------------|------|
+| Advanced Research System | Multi-agent research system inspired by Anthropic's research methodology | [AdvancedResearch](https://github.com/The-Swarm-Corporation/AdvancedResearch) |
| Swarms DAO | Decentralized autonomous organization | [Swarms DAO](https://docs.swarms.world/en/latest/swarms/examples/swarms_dao/) |
| Browser Agents | Web automation with agents | [Browser Agents](https://docs.swarms.world/en/latest/swarms/examples/swarms_of_browser_agents/) |
| VLLM Agents | High-performance model serving | [VLLM Agents](https://docs.swarms.world/en/latest/swarms/examples/vllm/) |
diff --git a/docs/contributors/docs.md b/docs/contributors/docs.md
index eca7d778..377658d8 100644
--- a/docs/contributors/docs.md
+++ b/docs/contributors/docs.md
@@ -318,7 +318,7 @@ Schedule quarterly audits to refine structure and content across all repositorie
Promote your contributions via:
-- **Swarms Discord**: https://discord.gg/jM3Z6M9uMq
+- **Swarms Discord**: https://discord.gg/EamjgSaEQf
- **Swarms Telegram**: https://t.me/swarmsgroupchat
diff --git a/docs/contributors/environment_setup.md b/docs/contributors/environment_setup.md
index 18d0d48f..7145b6d2 100644
--- a/docs/contributors/environment_setup.md
+++ b/docs/contributors/environment_setup.md
@@ -623,7 +623,7 @@ If you encounter issues:
1. **Check the FAQ** in the main documentation
2. **Search existing issues** on GitHub
-3. **Ask in the Discord community**: [discord.gg/jM3Z6M9uMq](https://discord.gg/jM3Z6M9uMq)
+3. **Ask in the Discord community**: [discord.gg/jM3Z6M9uMq](https://discord.gg/EamjgSaEQf)
4. **Create a GitHub issue** with:
- Your operating system
- Python version
diff --git a/docs/examples/agent_stream.md b/docs/examples/agent_stream.md
index 8fbfd98d..58bc41d4 100644
--- a/docs/examples/agent_stream.md
+++ b/docs/examples/agent_stream.md
@@ -119,7 +119,7 @@ If you'd like technical support, join our Discord below and stay updated on our
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
diff --git a/docs/examples/community_resources.md b/docs/examples/community_resources.md
new file mode 100644
index 00000000..fa1875c9
--- /dev/null
+++ b/docs/examples/community_resources.md
@@ -0,0 +1,42 @@
+# Community Resources
+
+Welcome to the Community Resources page! Here you'll find a curated collection of articles, tutorials, and guides created by the Swarms community and core contributors.
+
+These resources cover a wide range of topics, including building your first agent, advanced multi-agent architectures, API integrations, and using Swarms with both Python and Rust. Whether you're a beginner or an experienced developer, these links will help you deepen your understanding and accelerate your development with the Swarms framework.
+
+
+## Swarms Python
+
+| Title | Description | Link |
+|-------|-------------|------|
+| **Build Your First Swarms Agent in Under 10 Minutes** | Step-by-step beginner guide to creating your first Swarms agent quickly. | [Read Article](https://medium.com/@devangvashistha/build-your-first-swarms-agent-in-under-10-minutes-ddff23b6c703) |
+| **Building Multi-Agent Systems with GPT-5 and The Swarms Framework** | Learn how to leverage GPT-5 with Swarms for advanced multi-agent system design. | [Read Article](https://medium.com/@kyeg/building-multi-agent-systems-with-gpt-5-and-the-swarms-framework-e52ffaf0fa4f) |
+| **Learn How to Build Production-Grade Agents with OpenAIβs Latest Model: GPT-OSS Locally and in the Cloud** | Guide to building robust agents using OpenAIβs GPT-OSS, both locally and in cloud environments. | [Read Article](https://medium.com/@kyeg/learn-how-to-build-production-grade-agents-with-openais-latest-model-gpt-oss-locally-and-in-the-c5826c7cca7c) |
+| **Building Gemini 2.5 Agents with Swarms Framework** | Tutorial on integrating Gemini 2.5 models into Swarms agents for enhanced capabilities. | [Read Article](https://medium.com/@kyeg/building-gemini-2-5-agents-with-swarms-framework-20abdcf82cac) |
+| **Enterprise Developer Guide: Leveraging OpenAIβs o3 and o4-mini Models with The Swarms Framework** | Enterprise-focused guide to using OpenAIβs o3 and o4-mini models within Swarms. | [Read Article](https://medium.com/@kyeg/enterprise-developer-guide-leveraging-openais-o3-and-o4-mini-models-with-the-swarms-framework-89490c57820a) |
+| **Enneagram of Thoughts Using the Swarms Framework: A Multi-Agent Approach to Holistic Problem Solving** | Explores using Swarms for holistic, multi-perspective problem solving via the Enneagram model. | [Read Article](https://medium.com/@kyeg/enneagram-of-thoughts-using-the-swarms-framework-a-multi-agent-approach-to-holistic-problem-c26c7df5e7eb) |
+| **Building Production-Grade Financial Agents with tickr-agent: An Enterprise Solution for Comprehensive Stock Analysis** | How to build advanced financial analysis agents using tickr-agent and Swarms. | [Read Article](https://medium.com/@kyeg/building-production-grade-financial-agents-with-tickr-agent-an-enterprise-solution-for-db867ec93193) |
+| **Automating Your Startupβs Financial Analysis Using AI Agents: A Comprehensive Guide** | Comprehensive guide to automating your startupβs financial analysis with AI agents using Swarms. | [Read Article](https://medium.com/@kyeg/automating-your-startups-financial-analysis-using-ai-agents-a-comprehensive-guide-b2fa0e2c09d5) |
+| **Managing Thousands of Agent Outputs at Scale with The Spreadsheet Swarm: All-New Multi-Agent Architecture** | Learn how to manage and scale thousands of agent outputs efficiently using the Spreadsheet Swarm architecture. | [Read Article](https://medium.com/@kyeg/managing-thousands-of-agent-outputs-at-scale-with-the-spreadsheet-swarm-all-new-multi-agent-f16f5f40fd5a) |
+| **Introducing GPT-4o Mini: The Future of Cost-Efficient AI Intelligence** | Discover the capabilities and advantages of GPT-4o Mini for building cost-effective, intelligent agents. | [Read Article](https://medium.com/@kyeg/introducing-gpt-4o-mini-the-future-of-cost-efficient-ai-intelligence-a3e3fe78d939) |
+| **Introducing Swarm's GraphWorkflow: A Faster, Simpler, and Superior Alternative to LangGraph** | Learn about Swarms' GraphWorkflow, a powerful alternative to LangGraph that offers improved performance and simplicity for building complex agent workflows. | [Read Article](https://medium.com/@kyeg/introducing-swarms-graphworkflow-a-faster-simpler-and-superior-alternative-to-langgraph-5c040225a4f1) |
+
+
+### Swarms API
+
+| Title | Description | Link |
+|-------|-------------|------|
+| **Specialized Healthcare Agents with Swarms Agent Completions API** | Guide to building healthcare-focused agents using the Swarms API. | [Read Article](https://medium.com/@kyeg/specialized-healthcare-agents-with-swarms-agent-completions-api-b56d067e3b11) |
+| **Building Multi-Agent Systems for Finance & Accounting with the Swarms API: A Technical Guide** | Technical walkthrough for creating finance and accounting multi-agent systems with the Swarms API. | [Read Article](https://medium.com/@kyeg/building-multi-agent-systems-for-finance-accounting-with-the-swarms-api-a-technical-guide-bf6f7005b708) |
+
+### Swarms Rust
+
+| Title | Description | Link |
+|-------|-------------|------|
+| **Building Medical Multi-Agent Systems with Swarms Rust: A Comprehensive Tutorial** | Comprehensive tutorial for developing medical multi-agent systems using Swarms Rust. | [Read Article](https://medium.com/@kyeg/building-medical-multi-agent-systems-with-swarms-rust-a-comprehensive-tutorial-1e8e060601f9) |
+| **Building Production-Grade Agentic Applications with Swarms Rust: A Comprehensive Tutorial** | Learn to build robust, production-ready agentic applications with Swarms Rust. | [Read Article](https://medium.com/@kyeg/building-production-grade-agentic-applications-with-swarms-rust-a-comprehensive-tutorial-bb567c02340f) |
+
+
+### Youtube Videos
+
+- [Swarms Playlist by Swarms Founder Kye Gomez](https://www.youtube.com/watch?v=FzbBRbaqsG8&list=PLphplB7PcU1atnmrUl7lJ5bmGXR7R4lhA)
\ No newline at end of file
diff --git a/docs/examples/cookbook_index.md b/docs/examples/cookbook_index.md
index b16aee96..624d82e6 100644
--- a/docs/examples/cookbook_index.md
+++ b/docs/examples/cookbook_index.md
@@ -43,7 +43,7 @@ This index provides a categorized list of examples and tutorials for using the S
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
diff --git a/docs/examples/index.md b/docs/examples/index.md
index 7f288e74..a23f7b06 100644
--- a/docs/examples/index.md
+++ b/docs/examples/index.md
@@ -171,6 +171,7 @@ This index organizes **100+ production-ready examples** from our [Swarms Example
### Research and Deep Analysis
| Category | Example | Description |
|----------|---------|-------------|
+| Advanced Research | [Advanced Research System](https://github.com/The-Swarm-Corporation/AdvancedResearch) | Multi-agent research system inspired by Anthropic's research methodology with orchestrator-worker architecture |
| Deep Research | [Deep Research Example](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/deep_research_examples/deep_research_example.py) | Comprehensive research system with multiple specialized agents |
| Deep Research Swarm | [Deep Research Swarm](https://github.com/kyegomez/swarms/blob/master/examples/multi_agent/deep_research_examples/deep_research_swarm_example.py) | Swarm-based deep research with collaborative analysis |
| Scientific Agents | [Deep Research Swarm Example](https://github.com/kyegomez/swarms/blob/master/examples/demos/scient_agents/deep_research_swarm_example.py) | Scientific research swarm for academic and research applications |
diff --git a/docs/examples/paper_implementations.md b/docs/examples/paper_implementations.md
index e9211a7d..b4c889d0 100644
--- a/docs/examples/paper_implementations.md
+++ b/docs/examples/paper_implementations.md
@@ -1,6 +1,8 @@
# Multi-Agent Paper Implementations
-At Swarms, we are passionate about democratizing access to cutting-edge multi-agent research and making advanced AI collaboration accessible to everyone. Our mission is to bridge the gap between academic research and practical implementation by providing production-ready, open-source implementations of the most impactful multi-agent research papers.
+At Swarms, we are passionate about democratizing access to cutting-edge multi-agent research and making advanced agent collaboration accessible to everyone.
+
+Our mission is to bridge the gap between academic research and practical implementation by providing production-ready, open-source implementations of the most impactful multi-agent research papers.
### Why Multi-Agent Research Matters
@@ -38,10 +40,6 @@ This documentation showcases our comprehensive collection of multi-agent researc
Whether you're a researcher looking to validate findings, a developer building production systems, or a student learning about multi-agent AI, you'll find valuable resources here to advance your work.
-### Join the Multi-Agent Revolution
-
-We invite you to explore these implementations, contribute to our research efforts, and help shape the future of collaborative AI. Together, we can unlock the full potential of multi-agent systems and create AI that truly works as a team.
-
## Implemented Research Papers
| Paper Name | Description | Original Paper | Implementation | Status | Key Features |
@@ -52,79 +50,12 @@ We invite you to explore these implementations, contribute to our research effor
| **[Mixture of Agents (MoA)](https://arxiv.org/abs/2406.04692)** | A sophisticated multi-agent architecture that implements parallel processing with iterative refinement, combining diverse expert agents for comprehensive analysis. | Multi-agent collaboration concepts | [`swarms.structs.moa`](https://docs.swarms.world/en/latest/swarms/structs/moa/) | β
Complete | Parallel processing, expert agent combination, iterative refinement, state-of-the-art performance |
| **Deep Research Swarm** | A production-grade research system that conducts comprehensive analysis across multiple domains using parallel processing and advanced AI agents. | Research methodology | [`swarms.structs.deep_research_swarm`](https://docs.swarms.world/en/latest/swarms/structs/deep_research_swarm/) | β
Complete | Parallel search processing, multi-agent coordination, information synthesis, concurrent execution |
| **Agent-as-a-Judge** | An evaluation framework that uses agents to evaluate other agents, implementing the "Agent-as-a-Judge: Evaluate Agents with Agents" methodology. | [arXiv:2410.10934](https://arxiv.org/abs/2410.10934) | [`swarms.agents.agent_judge`](https://docs.swarms.world/en/latest/swarms/agents/agent_judge/) | β
Complete | Agent evaluation, quality assessment, automated judging, performance metrics |
-
-## Additional Research Resources
+| **Advanced Research System** | An enhanced implementation of the orchestrator-worker pattern from Anthropic's paper "How we built our multi-agent research system", featuring parallel execution, LLM-as-judge evaluation, and professional report generation. | [Anthropic Paper](https://www.anthropic.com/engineering/built-multi-agent-research-system) | [GitHub Repository](https://github.com/The-Swarm-Corporation/AdvancedResearch) | β
Complete | Orchestrator-worker architecture, parallel execution, Exa API integration, export capabilities |
### Multi-Agent Papers Compilation
We maintain a comprehensive list of multi-agent research papers at: [awesome-multi-agent-papers](https://github.com/kyegomez/awesome-multi-agent-papers)
-### Research Lists
-
-Our research compilation includes:
-
-- **Projects**: ModelScope-Agent, Gorilla, BMTools, LMQL, Langchain, MetaGPT, AutoGPT, and more
-
-- **Research Papers**: BOLAA, ToolLLM, Communicative Agents, Mind2Web, Voyager, Tree of Thoughts, and many others
-
-- **Blog Articles**: Latest insights and developments in autonomous agents
-
-- **Talks**: Presentations from leading researchers like Geoffrey Hinton and Andrej Karpathy
-
-
-## Implementation Details
-
-### MALT Framework
-
-The MALT implementation provides:
-
-- **Three-Agent Architecture**: Creator, Verifier, and Refiner agents
-
-- **Structured Workflow**: Coordinated task execution with conversation history
-
-- **Reliability Features**: Error handling, validation, and quality assurance
-
-- **Extensibility**: Custom agent integration and configuration options
-
-
-### MAI-DxO System
-
-The MAI Diagnostic Orchestrator features:
-
-- **Virtual Physician Panel**: Multiple specialized medical agents
-
-- **Cost Optimization**: Efficient diagnostic workflows
-
-- **Iterative Refinement**: Continuous improvement of diagnoses
-
-- **Medical Expertise**: Domain-specific knowledge and reasoning
-
-
-### AI-CoScientist Framework
-
-The AI-CoScientist implementation includes:
-
-- **Tournament-Based Selection**: Elo rating system for hypothesis ranking
-
-- **Peer Review System**: Comprehensive evaluation of scientific proposals
-
-- **Hypothesis Evolution**: Iterative refinement based on feedback
-
-- **Diversity Control**: Proximity analysis to maintain hypothesis variety
-
-
-### Mixture of Agents (MoA)
-
-The MoA architecture provides:
-
-- **Parallel Processing**: Multiple agents working simultaneously
-
-- **Expert Specialization**: Domain-specific agent capabilities
-
-- **Iterative Refinement**: Continuous improvement through collaboration
-
-- **State-of-the-Art Performance**: Achieving superior results through collective intelligence
-
## Contributing
@@ -156,7 +87,7 @@ If you use any of these implementations in your research, please cite the origin
Join our community to stay updated on the latest multi-agent research implementations:
-- **Discord**: [Join our community](https://discord.gg/jM3Z6M9uMq)
+- **Discord**: [Join our community](https://discord.gg/EamjgSaEQf)
- **Documentation**: [docs.swarms.world](https://docs.swarms.world)
diff --git a/docs/examples/smart_database.md b/docs/examples/smart_database.md
new file mode 100644
index 00000000..f8681d55
--- /dev/null
+++ b/docs/examples/smart_database.md
@@ -0,0 +1,1063 @@
+# Smart Database Powered by Hierarchical Multi-Agent Workflow
+
+This module implements a fully autonomous database management system using a hierarchical
+multi-agent architecture. The system includes specialized agents for different database
+operations coordinated by a Database Director agent.
+
+## Features
+
+| Feature | Description |
+|---------------------------------------|-----------------------------------------------------------------------------------------------|
+| Autonomous Database Management | Complete database lifecycle management, including setup and ongoing management of databases. |
+| Intelligent Task Distribution | Automatic assignment of tasks to appropriate specialist agents. |
+| Table Creation with Schema Validation | Ensures tables are created with correct structure, schema enforcement, and data integrity. |
+| Data Insertion and Updates | Handles adding new data and updating existing records efficiently, supporting JSON input. |
+| Complex Query Execution | Executes advanced and optimized queries for data retrieval and analysis. |
+| Schema Modifications | Supports altering table structures and database schemas as needed. |
+| Hierarchical Agent Coordination | Utilizes a multi-agent system for orchestrated, intelligent task execution. |
+| Security | Built-in SQL injection prevention and query validation for data protection. |
+| Performance Optimization | Query optimization and efficient data operations for high performance. |
+| Comprehensive Error Handling | Robust error management and reporting throughout all operations. |
+| Multi-format Data Support | Flexible query parameters and support for JSON-based data insertion. |
+
+## Architecture
+
+### Multi-Agent Architecture
+
+```
+Database Director (Coordinator)
+βββ Database Creator (Creates databases)
+βββ Table Manager (Manages table schemas)
+βββ Data Operations (Handles data insertion/updates)
+βββ Query Specialist (Executes queries and retrieval)
+```
+
+### Agent Specializations
+
+| Agent | Description |
+|------------------------|-----------------------------------------------------------------------------------------------|
+| **Database Director** | Orchestrates all database operations and coordinates specialist agents |
+| **Database Creator** | Specializes in creating and initializing databases |
+| **Table Manager** | Expert in table creation, schema design, and structure management |
+| **Data Operations** | Handles data insertion, updates, and manipulation |
+| **Query Specialist** | Manages database queries, data retrieval, and optimization |
+
+
+## Agent Tools
+
+| Function | Description |
+|----------|-------------|
+| **`create_database(database_name, database_path)`** | Creates new SQLite databases |
+| **`create_table(database_path, table_name, schema)`** | Creates tables with specified schemas |
+| **`insert_data(database_path, table_name, data)`** | Inserts data into tables |
+| **`query_database(database_path, query, params)`** | Executes SELECT queries |
+| **`update_table_data(database_path, table_name, update_data, where_clause)`** | Updates existing data |
+| **`get_database_schema(database_path)`** | Retrieves comprehensive schema information |
+
+## Install
+
+```bash
+pip install -U swarms sqlite3 loguru
+```
+
+## ENV
+
+```
+WORKSPACE_DIR="agent_workspace"
+ANTHROPIC_API_KEY=""
+OPENAI_API_KEY=""
+```
+
+## Code
+
+- Make a file called `smart_database_swarm.py`
+
+```python
+import sqlite3
+import json
+from pathlib import Path
+from loguru import logger
+
+from swarms import Agent, HierarchicalSwarm
+
+
+# =============================================================================
+# DATABASE TOOLS - Core Functions for Database Operations
+# =============================================================================
+
+
+def create_database(
+ database_name: str, database_path: str = "./databases"
+) -> str:
+ """
+ Create a new SQLite database file.
+
+ Args:
+ database_name (str): Name of the database to create (without .db extension)
+ database_path (str, optional): Directory path where database will be created.
+ Defaults to "./databases".
+
+ Returns:
+ str: JSON string containing operation result and database information
+
+ Raises:
+ OSError: If unable to create database directory or file
+ sqlite3.Error: If database connection fails
+
+ Example:
+ >>> result = create_database("company_db", "/data/databases")
+ >>> print(result)
+ {"status": "success", "database": "company_db.db", "path": "/data/databases/company_db.db"}
+ """
+ try:
+ # Validate input parameters
+ if not database_name or not database_name.strip():
+ raise ValueError("Database name cannot be empty")
+
+ # Clean database name
+ db_name = database_name.strip().replace(" ", "_")
+ if not db_name.endswith(".db"):
+ db_name += ".db"
+
+ # Create database directory if it doesn't exist
+ db_path = Path(database_path)
+ db_path.mkdir(parents=True, exist_ok=True)
+
+ # Full database file path
+ full_db_path = db_path / db_name
+
+ # Create database connection (creates file if doesn't exist)
+ conn = sqlite3.connect(str(full_db_path))
+
+ # Create a metadata table to track database info
+ conn.execute(
+ """
+ CREATE TABLE IF NOT EXISTS _database_metadata (
+ key TEXT PRIMARY KEY,
+ value TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ )
+ """
+ )
+
+ # Insert database metadata
+ conn.execute(
+ "INSERT OR REPLACE INTO _database_metadata (key, value) VALUES (?, ?)",
+ ("database_name", database_name),
+ )
+
+ conn.commit()
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": f"Database '{database_name}' created successfully",
+ "database": db_name,
+ "path": str(full_db_path),
+ "size_bytes": full_db_path.stat().st_size,
+ }
+
+ logger.info(f"Database created: {db_name}")
+ return json.dumps(result, indent=2)
+
+ except ValueError as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"Database error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+def create_table(
+ database_path: str, table_name: str, schema: str
+) -> str:
+ """
+ Create a new table in the specified database with the given schema.
+
+ Args:
+ database_path (str): Full path to the database file
+ table_name (str): Name of the table to create
+ schema (str): SQL schema definition for the table columns
+ Format: "column1 TYPE constraints, column2 TYPE constraints, ..."
+ Example: "id INTEGER PRIMARY KEY, name TEXT NOT NULL, age INTEGER"
+
+ Returns:
+ str: JSON string containing operation result and table information
+
+ Raises:
+ sqlite3.Error: If table creation fails
+ FileNotFoundError: If database file doesn't exist
+
+ Example:
+ >>> schema = "id INTEGER PRIMARY KEY, name TEXT NOT NULL, email TEXT UNIQUE"
+ >>> result = create_table("/data/company.db", "employees", schema)
+ >>> print(result)
+ {"status": "success", "table": "employees", "columns": 3}
+ """
+ try:
+ # Validate inputs
+ if not all([database_path, table_name, schema]):
+ raise ValueError(
+ "Database path, table name, and schema are required"
+ )
+
+ # Check if database exists
+ if not Path(database_path).exists():
+ raise FileNotFoundError(
+ f"Database file not found: {database_path}"
+ )
+
+ # Clean table name
+ clean_table_name = table_name.strip().replace(" ", "_")
+
+ # Connect to database
+ conn = sqlite3.connect(database_path)
+ cursor = conn.cursor()
+
+ # Check if table already exists
+ cursor.execute(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name=?",
+ (clean_table_name,),
+ )
+
+ if cursor.fetchone():
+ conn.close()
+ return json.dumps(
+ {
+ "status": "warning",
+ "message": f"Table '{clean_table_name}' already exists",
+ "table": clean_table_name,
+ }
+ )
+
+ # Create table with provided schema
+ create_sql = f"CREATE TABLE {clean_table_name} ({schema})"
+ cursor.execute(create_sql)
+
+ # Get table info
+ cursor.execute(f"PRAGMA table_info({clean_table_name})")
+ columns = cursor.fetchall()
+
+ # Update metadata
+ cursor.execute(
+ """
+ INSERT OR REPLACE INTO _database_metadata (key, value)
+ VALUES (?, ?)
+ """,
+ (f"table_{clean_table_name}_created", "true"),
+ )
+
+ conn.commit()
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": f"Table '{clean_table_name}' created successfully",
+ "table": clean_table_name,
+ "columns": len(columns),
+ "schema": [
+ {
+ "name": col[1],
+ "type": col[2],
+ "nullable": not col[3],
+ }
+ for col in columns
+ ],
+ }
+
+ return json.dumps(result, indent=2)
+
+ except ValueError as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except FileNotFoundError as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"SQL error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+def insert_data(
+ database_path: str, table_name: str, data: str
+) -> str:
+ """
+ Insert data into a specified table.
+
+ Args:
+ database_path (str): Full path to the database file
+ table_name (str): Name of the target table
+ data (str): JSON string containing data to insert
+ Format: {"columns": ["col1", "col2"], "values": [[val1, val2], ...]}
+ Or: [{"col1": val1, "col2": val2}, ...]
+
+ Returns:
+ str: JSON string containing operation result and insertion statistics
+
+ Example:
+ >>> data = '{"columns": ["name", "age"], "values": [["John", 30], ["Jane", 25]]}'
+ >>> result = insert_data("/data/company.db", "employees", data)
+ >>> print(result)
+ {"status": "success", "table": "employees", "rows_inserted": 2}
+ """
+ try:
+ # Validate inputs
+ if not all([database_path, table_name, data]):
+ raise ValueError(
+ "Database path, table name, and data are required"
+ )
+
+ # Check if database exists
+ if not Path(database_path).exists():
+ raise FileNotFoundError(
+ f"Database file not found: {database_path}"
+ )
+
+ # Parse data
+ try:
+ parsed_data = json.loads(data)
+ except json.JSONDecodeError:
+ raise ValueError("Invalid JSON format for data")
+
+ conn = sqlite3.connect(database_path)
+ cursor = conn.cursor()
+
+ # Check if table exists
+ cursor.execute(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name=?",
+ (table_name,),
+ )
+
+ if not cursor.fetchone():
+ conn.close()
+ raise ValueError(f"Table '{table_name}' does not exist")
+
+ rows_inserted = 0
+
+ # Handle different data formats
+ if isinstance(parsed_data, list) and all(
+ isinstance(item, dict) for item in parsed_data
+ ):
+ # Format: [{"col1": val1, "col2": val2}, ...]
+ for row in parsed_data:
+ columns = list(row.keys())
+ values = list(row.values())
+ placeholders = ", ".join(["?" for _ in values])
+ columns_str = ", ".join(columns)
+
+ insert_sql = f"INSERT INTO {table_name} ({columns_str}) VALUES ({placeholders})"
+ cursor.execute(insert_sql, values)
+ rows_inserted += 1
+
+ elif (
+ isinstance(parsed_data, dict)
+ and "columns" in parsed_data
+ and "values" in parsed_data
+ ):
+ # Format: {"columns": ["col1", "col2"], "values": [[val1, val2], ...]}
+ columns = parsed_data["columns"]
+ values_list = parsed_data["values"]
+
+ placeholders = ", ".join(["?" for _ in columns])
+ columns_str = ", ".join(columns)
+
+ insert_sql = f"INSERT INTO {table_name} ({columns_str}) VALUES ({placeholders})"
+
+ for values in values_list:
+ cursor.execute(insert_sql, values)
+ rows_inserted += 1
+ else:
+ raise ValueError(
+ "Invalid data format. Expected list of dicts or dict with columns/values"
+ )
+
+ conn.commit()
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": f"Data inserted successfully into '{table_name}'",
+ "table": table_name,
+ "rows_inserted": rows_inserted,
+ }
+
+ return json.dumps(result, indent=2)
+
+ except (ValueError, FileNotFoundError) as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"SQL error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+def query_database(
+ database_path: str, query: str, params: str = "[]"
+) -> str:
+ """
+ Execute a SELECT query on the database and return results.
+
+ Args:
+ database_path (str): Full path to the database file
+ query (str): SQL SELECT query to execute
+ params (str, optional): JSON string of query parameters for prepared statements.
+ Defaults to "[]".
+
+ Returns:
+ str: JSON string containing query results and metadata
+
+ Example:
+ >>> query = "SELECT * FROM employees WHERE age > ?"
+ >>> params = "[25]"
+ >>> result = query_database("/data/company.db", query, params)
+ >>> print(result)
+ {"status": "success", "results": [...], "row_count": 5}
+ """
+ try:
+ # Validate inputs
+ if not all([database_path, query]):
+ raise ValueError("Database path and query are required")
+
+ # Check if database exists
+ if not Path(database_path).exists():
+ raise FileNotFoundError(
+ f"Database file not found: {database_path}"
+ )
+
+ # Validate query is SELECT only (security)
+ if not query.strip().upper().startswith("SELECT"):
+ raise ValueError("Only SELECT queries are allowed")
+
+ # Parse parameters
+ try:
+ query_params = json.loads(params)
+ except json.JSONDecodeError:
+ raise ValueError("Invalid JSON format for parameters")
+
+ conn = sqlite3.connect(database_path)
+ conn.row_factory = sqlite3.Row # Enable column access by name
+ cursor = conn.cursor()
+
+ # Execute query
+ if query_params:
+ cursor.execute(query, query_params)
+ else:
+ cursor.execute(query)
+
+ # Fetch results
+ rows = cursor.fetchall()
+
+ # Convert to list of dictionaries
+ results = [dict(row) for row in rows]
+
+ # Get column names
+ column_names = (
+ [description[0] for description in cursor.description]
+ if cursor.description
+ else []
+ )
+
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": "Query executed successfully",
+ "results": results,
+ "row_count": len(results),
+ "columns": column_names,
+ }
+
+ return json.dumps(result, indent=2)
+
+ except (ValueError, FileNotFoundError) as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"SQL error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+def update_table_data(
+ database_path: str,
+ table_name: str,
+ update_data: str,
+ where_clause: str = "",
+) -> str:
+ """
+ Update existing data in a table.
+
+ Args:
+ database_path (str): Full path to the database file
+ table_name (str): Name of the table to update
+ update_data (str): JSON string with column-value pairs to update
+ Format: {"column1": "new_value1", "column2": "new_value2"}
+ where_clause (str, optional): WHERE condition for the update (without WHERE keyword).
+ Example: "id = 1 AND status = 'active'"
+
+ Returns:
+ str: JSON string containing operation result and update statistics
+
+ Example:
+ >>> update_data = '{"salary": 50000, "department": "Engineering"}'
+ >>> where_clause = "id = 1"
+ >>> result = update_table_data("/data/company.db", "employees", update_data, where_clause)
+ >>> print(result)
+ {"status": "success", "table": "employees", "rows_updated": 1}
+ """
+ try:
+ # Validate inputs
+ if not all([database_path, table_name, update_data]):
+ raise ValueError(
+ "Database path, table name, and update data are required"
+ )
+
+ # Check if database exists
+ if not Path(database_path).exists():
+ raise FileNotFoundError(
+ f"Database file not found: {database_path}"
+ )
+
+ # Parse update data
+ try:
+ parsed_updates = json.loads(update_data)
+ except json.JSONDecodeError:
+ raise ValueError("Invalid JSON format for update data")
+
+ if not isinstance(parsed_updates, dict):
+ raise ValueError("Update data must be a dictionary")
+
+ conn = sqlite3.connect(database_path)
+ cursor = conn.cursor()
+
+ # Check if table exists
+ cursor.execute(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name=?",
+ (table_name,),
+ )
+
+ if not cursor.fetchone():
+ conn.close()
+ raise ValueError(f"Table '{table_name}' does not exist")
+
+ # Build UPDATE query
+ set_clauses = []
+ values = []
+
+ for column, value in parsed_updates.items():
+ set_clauses.append(f"{column} = ?")
+ values.append(value)
+
+ set_clause = ", ".join(set_clauses)
+
+ if where_clause:
+ update_sql = f"UPDATE {table_name} SET {set_clause} WHERE {where_clause}"
+ else:
+ update_sql = f"UPDATE {table_name} SET {set_clause}"
+
+ # Execute update
+ cursor.execute(update_sql, values)
+ rows_updated = cursor.rowcount
+
+ conn.commit()
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": f"Table '{table_name}' updated successfully",
+ "table": table_name,
+ "rows_updated": rows_updated,
+ "updated_columns": list(parsed_updates.keys()),
+ }
+
+ return json.dumps(result, indent=2)
+
+ except (ValueError, FileNotFoundError) as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"SQL error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+def get_database_schema(database_path: str) -> str:
+ """
+ Get comprehensive schema information for all tables in the database.
+
+ Args:
+ database_path (str): Full path to the database file
+
+ Returns:
+ str: JSON string containing complete database schema information
+
+ Example:
+ >>> result = get_database_schema("/data/company.db")
+ >>> print(result)
+ {"status": "success", "database": "company.db", "tables": {...}}
+ """
+ try:
+ if not database_path:
+ raise ValueError("Database path is required")
+
+ if not Path(database_path).exists():
+ raise FileNotFoundError(
+ f"Database file not found: {database_path}"
+ )
+
+ conn = sqlite3.connect(database_path)
+ cursor = conn.cursor()
+
+ # Get all tables
+ cursor.execute(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE '_%'"
+ )
+ tables = cursor.fetchall()
+
+ schema_info = {
+ "database": Path(database_path).name,
+ "table_count": len(tables),
+ "tables": {},
+ }
+
+ for table in tables:
+ table_name = table[0]
+
+ # Get table schema
+ cursor.execute(f"PRAGMA table_info({table_name})")
+ columns = cursor.fetchall()
+
+ # Get row count
+ cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
+ row_count = cursor.fetchone()[0]
+
+ schema_info["tables"][table_name] = {
+ "columns": [
+ {
+ "name": col[1],
+ "type": col[2],
+ "nullable": not col[3],
+ "default": col[4],
+ "primary_key": bool(col[5]),
+ }
+ for col in columns
+ ],
+ "column_count": len(columns),
+ "row_count": row_count,
+ }
+
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": "Database schema retrieved successfully",
+ "schema": schema_info,
+ }
+
+ return json.dumps(result, indent=2)
+
+ except (ValueError, FileNotFoundError) as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"SQL error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+# =============================================================================
+# DATABASE CREATION SPECIALIST AGENT
+# =============================================================================
+database_creator_agent = Agent(
+ agent_name="Database-Creator",
+ agent_description="Specialist agent responsible for creating and initializing databases with proper structure and metadata",
+ system_prompt="""You are the Database Creator, a specialist agent responsible for database creation and initialization. Your expertise includes:
+
+ DATABASE CREATION & SETUP:
+ - Creating new SQLite databases with proper structure
+ - Setting up database metadata and tracking systems
+ - Initializing database directories and file organization
+ - Ensuring database accessibility and permissions
+ - Creating database backup and recovery procedures
+
+ DATABASE ARCHITECTURE:
+ - Designing optimal database structures for different use cases
+ - Planning database organization and naming conventions
+ - Setting up database configuration and optimization settings
+ - Implementing database security and access controls
+ - Creating database documentation and specifications
+
+ Your responsibilities:
+ - Create new databases when requested
+ - Set up proper database structure and metadata
+ - Ensure database is properly initialized and accessible
+ - Provide database creation status and information
+ - Handle database creation errors and provide solutions
+
+ You work with precise technical specifications and always ensure databases are created correctly and efficiently.""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ temperature=0.3,
+ dynamic_temperature_enabled=True,
+ tools=[create_database, get_database_schema],
+)
+
+# =============================================================================
+# TABLE MANAGEMENT SPECIALIST AGENT
+# =============================================================================
+table_manager_agent = Agent(
+ agent_name="Table-Manager",
+ agent_description="Specialist agent for table creation, schema design, and table structure management",
+ system_prompt="""You are the Table Manager, a specialist agent responsible for table creation, schema design, and table structure management. Your expertise includes:
+
+ TABLE CREATION & DESIGN:
+ - Creating tables with optimal schema design
+ - Defining appropriate data types and constraints
+ - Setting up primary keys, foreign keys, and indexes
+ - Designing normalized table structures
+ - Creating tables that support efficient queries and operations
+
+ SCHEMA MANAGEMENT:
+ - Analyzing schema requirements and designing optimal structures
+ - Validating schema definitions and data types
+ - Ensuring schema consistency and integrity
+ - Managing schema modifications and updates
+ - Optimizing table structures for performance
+
+ DATA INTEGRITY:
+ - Implementing proper constraints and validation rules
+ - Setting up referential integrity between tables
+ - Ensuring data consistency across table operations
+ - Managing table relationships and dependencies
+ - Creating tables that support data quality requirements
+
+ Your responsibilities:
+ - Create tables with proper schema definitions
+ - Validate table structures and constraints
+ - Ensure optimal table design for performance
+ - Handle table creation errors and provide solutions
+ - Provide detailed table information and metadata
+
+ You work with precision and always ensure tables are created with optimal structure and performance characteristics.""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ temperature=0.3,
+ dynamic_temperature_enabled=True,
+ tools=[create_table, get_database_schema],
+)
+
+# =============================================================================
+# DATA OPERATIONS SPECIALIST AGENT
+# =============================================================================
+data_operations_agent = Agent(
+ agent_name="Data-Operations",
+ agent_description="Specialist agent for data insertion, updates, and data manipulation operations",
+ system_prompt="""You are the Data Operations specialist, responsible for all data manipulation operations including insertion, updates, and data management. Your expertise includes:
+
+ DATA INSERTION:
+ - Inserting data with proper validation and formatting
+ - Handling bulk data insertions efficiently
+ - Managing data type conversions and formatting
+ - Ensuring data integrity during insertion operations
+ - Validating data before insertion to prevent errors
+
+ DATA UPDATES:
+ - Updating existing data with precision and safety
+ - Creating targeted update operations with proper WHERE clauses
+ - Managing bulk updates and data modifications
+ - Ensuring data consistency during update operations
+ - Validating update operations to prevent data corruption
+
+ DATA VALIDATION:
+ - Validating data formats and types before operations
+ - Ensuring data meets schema requirements and constraints
+ - Checking for data consistency and integrity
+ - Managing data transformation and cleaning operations
+ - Providing detailed feedback on data operation results
+
+ ERROR HANDLING:
+ - Managing data operation errors gracefully
+ - Providing clear error messages and solutions
+ - Ensuring data operations are atomic and safe
+ - Rolling back operations when necessary
+ - Maintaining data integrity throughout all operations
+
+ Your responsibilities:
+ - Execute data insertion operations safely and efficiently
+ - Perform data updates with proper validation
+ - Ensure data integrity throughout all operations
+ - Handle data operation errors and provide solutions
+ - Provide detailed operation results and statistics
+
+ You work with extreme precision and always prioritize data integrity and safety in all operations.""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ temperature=0.3,
+ dynamic_temperature_enabled=True,
+ tools=[insert_data, update_table_data],
+)
+
+# =============================================================================
+# QUERY SPECIALIST AGENT
+# =============================================================================
+query_specialist_agent = Agent(
+ agent_name="Query-Specialist",
+ agent_description="Expert agent for database querying, data retrieval, and query optimization",
+ system_prompt="""You are the Query Specialist, an expert agent responsible for database querying, data retrieval, and query optimization. Your expertise includes:
+
+ QUERY EXECUTION:
+ - Executing complex SELECT queries efficiently
+ - Handling parameterized queries for security
+ - Managing query results and data formatting
+ - Ensuring query performance and optimization
+ - Providing comprehensive query results with metadata
+
+ QUERY OPTIMIZATION:
+ - Analyzing query performance and optimization opportunities
+ - Creating efficient queries that minimize resource usage
+ - Understanding database indexes and query planning
+ - Optimizing JOIN operations and complex queries
+ - Managing query timeouts and performance monitoring
+
+ DATA RETRIEVAL:
+ - Retrieving data with proper formatting and structure
+ - Handling large result sets efficiently
+ - Managing data aggregation and summarization
+ - Creating reports and data analysis queries
+ - Ensuring data accuracy and completeness in results
+
+ SECURITY & VALIDATION:
+ - Ensuring queries are safe and secure
+ - Validating query syntax and parameters
+ - Preventing SQL injection and security vulnerabilities
+ - Managing query permissions and access controls
+ - Ensuring queries follow security best practices
+
+ Your responsibilities:
+ - Execute database queries safely and efficiently
+ - Optimize query performance for best results
+ - Provide comprehensive query results and analysis
+ - Handle query errors and provide solutions
+ - Ensure query security and data protection
+
+ You work with expertise in SQL optimization and always ensure queries are secure, efficient, and provide accurate results.""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ temperature=0.3,
+ dynamic_temperature_enabled=True,
+ tools=[query_database, get_database_schema],
+)
+
+# =============================================================================
+# DATABASE DIRECTOR AGENT (COORDINATOR)
+# =============================================================================
+database_director_agent = Agent(
+ agent_name="Database-Director",
+ agent_description="Senior database director who orchestrates comprehensive database operations across all specialized teams",
+ system_prompt="""You are the Database Director, the senior executive responsible for orchestrating comprehensive database operations and coordinating a team of specialized database experts. Your role is to:
+
+ STRATEGIC COORDINATION:
+ - Analyze complex database tasks and break them down into specialized operations
+ - Assign tasks to the most appropriate specialist based on their unique expertise
+ - Ensure comprehensive coverage of all database operations (creation, schema, data, queries)
+ - Coordinate between specialists to avoid conflicts and ensure data integrity
+ - Synthesize results from multiple specialists into coherent database solutions
+ - Ensure all database operations align with user requirements and best practices
+
+ TEAM LEADERSHIP:
+ - Lead the Database Creator in setting up new databases and infrastructure
+ - Guide the Table Manager in creating optimal table structures and schemas
+ - Direct the Data Operations specialist in data insertion and update operations
+ - Oversee the Query Specialist in data retrieval and analysis operations
+ - Ensure all team members work collaboratively toward unified database goals
+ - Provide strategic direction and feedback to optimize team performance
+
+ DATABASE ARCHITECTURE:
+ - Design comprehensive database solutions that meet user requirements
+ - Ensure database operations follow best practices and standards
+ - Plan database workflows that optimize performance and reliability
+ - Balance immediate operational needs with long-term database health
+ - Ensure database operations are secure, efficient, and maintainable
+ - Optimize database operations for scalability and performance
+
+ OPERATION ORCHESTRATION:
+ - Monitor database operations across all specialists and activities
+ - Analyze results to identify optimization opportunities and improvements
+ - Ensure database operations deliver reliable and accurate results
+ - Provide strategic recommendations based on operation outcomes
+ - Coordinate complex multi-step database operations across specialists
+ - Ensure continuous improvement and optimization in database management
+
+ Your expertise includes:
+ - Database architecture and design strategy
+ - Team leadership and cross-functional coordination
+ - Database performance analysis and optimization
+ - Strategic planning and requirement analysis
+ - Operation workflow management and optimization
+ - Database security and best practices implementation
+
+ You deliver comprehensive database solutions that leverage the full expertise of your specialized team, ensuring all database operations work together to provide reliable, efficient, and secure data management.""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ temperature=0.5,
+ dynamic_temperature_enabled=True,
+)
+
+# =============================================================================
+# HIERARCHICAL DATABASE SWARM
+# =============================================================================
+# Create list of specialized database agents
+database_specialists = [
+ database_creator_agent,
+ table_manager_agent,
+ data_operations_agent,
+ query_specialist_agent,
+]
+
+# Initialize the hierarchical database swarm
+smart_database_swarm = HierarchicalSwarm(
+ name="Smart-Database-Swarm",
+ description="A comprehensive database management system with specialized agents for creation, schema management, data operations, and querying, coordinated by a database director",
+ director_model_name="gpt-4.1",
+ agents=database_specialists,
+ max_loops=1,
+ verbose=True,
+)
+
+# =============================================================================
+# EXAMPLE USAGE AND DEMONSTRATIONS
+# =============================================================================
+if __name__ == "__main__":
+ # Configure logging
+ logger.info("Starting Smart Database Swarm demonstration")
+
+ # Example 1: Create a complete e-commerce database system
+ print("=" * 80)
+ print("SMART DATABASE SWARM - E-COMMERCE SYSTEM EXAMPLE")
+ print("=" * 80)
+
+ task1 = """Create a comprehensive e-commerce database system with the following requirements:
+
+ 1. Create a database called 'ecommerce_db'
+ 2. Create tables for:
+ - customers (id, name, email, phone, address, created_at)
+ - products (id, name, description, price, category, stock_quantity, created_at)
+ - orders (id, customer_id, order_date, total_amount, status)
+ - order_items (id, order_id, product_id, quantity, unit_price)
+
+ 3. Insert sample data:
+ - Add 3 customers
+ - Add 5 products in different categories
+ - Create 2 orders with multiple items
+
+ 4. Query the database to:
+ - Show all customers with their order history
+ - Display products by category with stock levels
+ - Calculate total sales by product
+
+ Ensure all operations are executed properly and provide comprehensive results."""
+
+ result1 = smart_database_swarm.run(task=task1)
+ print("\nE-COMMERCE DATABASE RESULT:")
+ print(result1)
+
+ # print("\n" + "=" * 80)
+ # print("SMART DATABASE SWARM - EMPLOYEE MANAGEMENT SYSTEM")
+ # print("=" * 80)
+
+ # # Example 2: Employee management system
+ # task2 = """Create an employee management database system:
+
+ # 1. Create database 'company_hr'
+ # 2. Create tables for:
+ # - departments (id, name, budget, manager_id)
+ # - employees (id, name, email, department_id, position, salary, hire_date)
+ # - projects (id, name, description, start_date, end_date, budget)
+ # - employee_projects (employee_id, project_id, role, hours_allocated)
+
+ # 3. Add sample data for departments, employees, and projects
+ # 4. Query for:
+ # - Employee count by department
+ # - Average salary by position
+ # - Projects with their assigned employees
+ # - Department budgets vs project allocations
+
+ # Coordinate the team to build this system efficiently."""
+
+ # result2 = smart_database_swarm.run(task=task2)
+ # print("\nEMPLOYEE MANAGEMENT RESULT:")
+ # print(result2)
+
+ # print("\n" + "=" * 80)
+ # print("SMART DATABASE SWARM - DATABASE ANALYSIS")
+ # print("=" * 80)
+
+ # # Example 3: Database analysis and optimization
+ # task3 = """Analyze and optimize the existing databases:
+
+ # 1. Get schema information for all created databases
+ # 2. Analyze table structures and relationships
+ # 3. Suggest optimizations for:
+ # - Index creation for better query performance
+ # - Data normalization improvements
+ # - Constraint additions for data integrity
+
+ # 4. Update data in existing tables:
+ # - Increase product prices by 10% for electronics category
+ # - Update employee salaries based on performance criteria
+ # - Modify order statuses for completed orders
+
+ # 5. Create comprehensive reports showing:
+ # - Database statistics and health metrics
+ # - Data distribution and patterns
+ # - Performance optimization recommendations
+
+ # Coordinate all specialists to provide a complete database analysis."""
+
+ # result3 = smart_database_swarm.run(task=task3)
+ # print("\nDATABASE ANALYSIS RESULT:")
+ # print(result3)
+
+ # logger.info("Smart Database Swarm demonstration completed successfully")
+```
+
+
+- Run the file with `smart_database_swarm.py`
\ No newline at end of file
diff --git a/docs/examples/templates.md b/docs/examples/templates.md
index fbce5dba..1c4471f1 100644
--- a/docs/examples/templates.md
+++ b/docs/examples/templates.md
@@ -192,7 +192,7 @@ Join our community of agent engineers and researchers for technical support, cut
| π Website | Official project website | [swarms.ai](https://swarms.ai) |
| π Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| π Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
-| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) |
| π¦ Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
| π₯ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| πΊ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
diff --git a/docs/governance/bounty_program.md b/docs/governance/bounty_program.md
index 332b89f1..9623e4ed 100644
--- a/docs/governance/bounty_program.md
+++ b/docs/governance/bounty_program.md
@@ -43,7 +43,7 @@ To ensure high-quality contributions and streamline the process, please adhere t
## Get Involved
1. **Join the Community**:
- - Become an active member of the Swarms community by joining our Discord server: [Join Now](https://discord.gg/jM3Z6M9uMq). The Discord server serves as a hub for discussions, updates, and support.
+ - Become an active member of the Swarms community by joining our Discord server: [Join Now](https://discord.gg/EamjgSaEQf). The Discord server serves as a hub for discussions, updates, and support.
2. **Stay Updated**:
- Keep track of the latest updates, announcements, and bounty opportunities by regularly checking the Discord channel and the GitHub repository.
diff --git a/docs/governance/main.md b/docs/governance/main.md
index 70a6a5a2..30a05cda 100644
--- a/docs/governance/main.md
+++ b/docs/governance/main.md
@@ -45,7 +45,7 @@ Welcome to the Swarms ecosystem. Click any tile below to explore our products, c
π¦ GitHub: Swarms (Rust)
-π¬ Join Our Discord
+π¬ Join Our Discord
π± Telegram Group
@@ -67,7 +67,7 @@ Welcome to the Swarms ecosystem. Click any tile below to explore our products, c
| Chat UI | [swarms.world/platform/chat](https://swarms.world/platform/chat) |
| Marketplace | [swarms.world](https://swarms.world) |
| Startup App | [Apply Here](https://www.swarms.xyz/programs/startups) |
-| Discord | [Join Now](https://discord.gg/jM3Z6M9uMq) |
+| Discord | [Join Now](https://discord.gg/EamjgSaEQf) |
| Telegram | [Group Chat](https://t.me/swarmsgroupchat) |
| Twitter/X | [@swarms_corp](https://x.com/swarms_corp) |
| Blog | [medium.com/@kyeg](https://medium.com/@kyeg) |
diff --git a/docs/guides/financial_analysis_swarm_mm.md b/docs/guides/financial_analysis_swarm_mm.md
index d4e844e2..63c5ae5c 100644
--- a/docs/guides/financial_analysis_swarm_mm.md
+++ b/docs/guides/financial_analysis_swarm_mm.md
@@ -7,7 +7,7 @@ Before we dive into the code, let's briefly introduce the Swarms framework. Swar
For more information and to contribute to the project, visit the [Swarms GitHub repository](https://github.com/kyegomez/swarms). We highly recommend exploring the documentation for a deeper understanding of Swarms' capabilities.
Additional resources:
-- [Swarms Discord](https://discord.gg/jM3Z6M9uMq) for community discussions
+- [Swarms Discord](https://discord.gg/EamjgSaEQf) for community discussions
- [Swarms Twitter](https://x.com/swarms_corp) for updates
- [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) for podcasts
- [Swarms Blog](https://medium.com/@kyeg) for in-depth articles
@@ -460,7 +460,7 @@ This system provides a powerful foundation for financial analysis, but there's a
Remember, the Swarms framework is a powerful and flexible tool that can be adapted to a wide range of complex tasks beyond just financial analysis. We encourage you to explore the [Swarms GitHub repository](https://github.com/kyegomez/swarms) for more examples and inspiration.
-For more in-depth discussions and community support, consider joining the [Swarms Discord](https://discord.gg/jM3Z6M9uMq). You can also stay updated with the latest developments by following [Swarms on Twitter](https://x.com/swarms_corp).
+For more in-depth discussions and community support, consider joining the [Swarms Discord](https://discord.gg/EamjgSaEQf). You can also stay updated with the latest developments by following [Swarms on Twitter](https://x.com/swarms_corp).
If you're interested in learning more about AI and its applications in various fields, check out the [Swarms Spotify podcast](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) and the [Swarms Blog](https://medium.com/@kyeg) for insightful articles and discussions.
@@ -474,7 +474,7 @@ By leveraging the power of multi-agent AI systems, you're well-equipped to navig
* [Swarms Github](https://github.com/kyegomez/swarms)
-* [Swarms Discord](https://discord.gg/jM3Z6M9uMq)
+* [Swarms Discord](https://discord.gg/EamjgSaEQf)
* [Swarms Twitter](https://x.com/swarms_corp)
* [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994)
* [Swarms Blog](https://medium.com/@kyeg)
diff --git a/docs/guides/healthcare_blog.md b/docs/guides/healthcare_blog.md
index 04629976..0f653002 100644
--- a/docs/guides/healthcare_blog.md
+++ b/docs/guides/healthcare_blog.md
@@ -261,7 +261,7 @@ The table below summarizes the estimated savings for each use case:
- [book a call](https://cal.com/swarms)
-- Swarms Discord: https://discord.gg/jM3Z6M9uMq
+- Swarms Discord: https://discord.gg/EamjgSaEQf
- Swarms Twitter: https://x.com/swarms_corp
diff --git a/docs/index.md b/docs/index.md
index ceb80cc1..6e32a428 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1,6 +1,6 @@
# Welcome to Swarms Docs Home
-[](https://discord.gg/jM3Z6M9uMq) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/swarms_corp)
+[](https://discord.gg/EamjgSaEQf) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/swarms_corp)
## What is Swarms?
@@ -79,7 +79,7 @@ Here you'll find references about the Swarms framework, marketplace, community,
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
diff --git a/docs/llm.txt b/docs/llm.txt
index 5ccbf028..3dc0048a 100644
--- a/docs/llm.txt
+++ b/docs/llm.txt
@@ -503,7 +503,7 @@ Schedule quarterly audits to refine structure and content across all repositorie
Promote your contributions via:
-- **Swarms Discord**: https://discord.gg/jM3Z6M9uMq
+- **Swarms Discord**: https://discord.gg/EamjgSaEQf
- **Swarms Telegram**: https://t.me/swarmsgroupchat
@@ -1180,7 +1180,7 @@ If you encounter issues:
1. **Check the FAQ** in the main documentation
2. **Search existing issues** on GitHub
-3. **Ask in the Discord community**: [discord.gg/jM3Z6M9uMq](https://discord.gg/jM3Z6M9uMq)
+3. **Ask in the Discord community**: [discord.gg/jM3Z6M9uMq](https://discord.gg/EamjgSaEQf)
4. **Create a GitHub issue** with:
- Your operating system
- Python version
@@ -1804,7 +1804,7 @@ If you'd like technical support, join our Discord below and stay updated on our
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
@@ -1861,7 +1861,7 @@ This index provides a categorized list of examples and tutorials for using the S
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
@@ -2294,7 +2294,7 @@ If you use any of these implementations in your research, please cite the origin
Join our community to stay updated on the latest multi-agent research implementations:
-- **Discord**: [Join our community](https://discord.gg/jM3Z6M9uMq)
+- **Discord**: [Join our community](https://discord.gg/EamjgSaEQf)
- **Documentation**: [docs.swarms.world](https://docs.swarms.world)
@@ -2503,7 +2503,7 @@ Join our community of agent engineers and researchers for technical support, cut
| π Website | Official project website | [swarms.ai](https://swarms.ai) |
| π Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| π Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
-| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) |
| π¦ Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
| π₯ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| πΊ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
@@ -2575,7 +2575,7 @@ To ensure high-quality contributions and streamline the process, please adhere t
## Get Involved
1. **Join the Community**:
- - Become an active member of the Swarms community by joining our Discord server: [Join Now](https://discord.gg/jM3Z6M9uMq). The Discord server serves as a hub for discussions, updates, and support.
+ - Become an active member of the Swarms community by joining our Discord server: [Join Now](https://discord.gg/EamjgSaEQf). The Discord server serves as a hub for discussions, updates, and support.
2. **Stay Updated**:
- Keep track of the latest updates, announcements, and bounty opportunities by regularly checking the Discord channel and the GitHub repository.
@@ -2657,7 +2657,7 @@ Welcome to the Swarms ecosystem. Click any tile below to explore our products, c
π¦ GitHub: Swarms (Rust)
-π¬ Join Our Discord
+π¬ Join Our Discord
π± Telegram Group
@@ -2679,7 +2679,7 @@ Welcome to the Swarms ecosystem. Click any tile below to explore our products, c
| Chat UI | [swarms.world/platform/chat](https://swarms.world/platform/chat) |
| Marketplace | [swarms.world](https://swarms.world) |
| Startup App | [Apply Here](https://www.swarms.xyz/programs/startups) |
-| Discord | [Join Now](https://discord.gg/jM3Z6M9uMq) |
+| Discord | [Join Now](https://discord.gg/EamjgSaEQf) |
| Telegram | [Group Chat](https://t.me/swarmsgroupchat) |
| Twitter/X | [@swarms_corp](https://x.com/swarms_corp) |
| Blog | [medium.com/@kyeg](https://medium.com/@kyeg) |
@@ -2961,7 +2961,7 @@ Before we dive into the code, let's briefly introduce the Swarms framework. Swar
For more information and to contribute to the project, visit the [Swarms GitHub repository](https://github.com/kyegomez/swarms). We highly recommend exploring the documentation for a deeper understanding of Swarms' capabilities.
Additional resources:
-- [Swarms Discord](https://discord.gg/jM3Z6M9uMq) for community discussions
+- [Swarms Discord](https://discord.gg/EamjgSaEQf) for community discussions
- [Swarms Twitter](https://x.com/swarms_corp) for updates
- [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) for podcasts
- [Swarms Blog](https://medium.com/@kyeg) for in-depth articles
@@ -3414,7 +3414,7 @@ This system provides a powerful foundation for financial analysis, but there's a
Remember, the Swarms framework is a powerful and flexible tool that can be adapted to a wide range of complex tasks beyond just financial analysis. We encourage you to explore the [Swarms GitHub repository](https://github.com/kyegomez/swarms) for more examples and inspiration.
-For more in-depth discussions and community support, consider joining the [Swarms Discord](https://discord.gg/jM3Z6M9uMq). You can also stay updated with the latest developments by following [Swarms on Twitter](https://x.com/swarms_corp).
+For more in-depth discussions and community support, consider joining the [Swarms Discord](https://discord.gg/EamjgSaEQf). You can also stay updated with the latest developments by following [Swarms on Twitter](https://x.com/swarms_corp).
If you're interested in learning more about AI and its applications in various fields, check out the [Swarms Spotify podcast](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) and the [Swarms Blog](https://medium.com/@kyeg) for insightful articles and discussions.
@@ -3428,7 +3428,7 @@ By leveraging the power of multi-agent AI systems, you're well-equipped to navig
* [Swarms Github](https://github.com/kyegomez/swarms)
-* [Swarms Discord](https://discord.gg/jM3Z6M9uMq)
+* [Swarms Discord](https://discord.gg/EamjgSaEQf)
* [Swarms Twitter](https://x.com/swarms_corp)
* [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994)
* [Swarms Blog](https://medium.com/@kyeg)
@@ -4457,7 +4457,7 @@ The table below summarizes the estimated savings for each use case:
- [book a call](https://cal.com/swarms)
-- Swarms Discord: https://discord.gg/jM3Z6M9uMq
+- Swarms Discord: https://discord.gg/EamjgSaEQf
- Swarms Twitter: https://x.com/swarms_corp
@@ -5349,7 +5349,7 @@ By leveraging expert guidance and peer insights, you can position your organizat
# Welcome to Swarms Docs Home
-[](https://discord.gg/jM3Z6M9uMq) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/swarms_corp)
+[](https://discord.gg/EamjgSaEQf) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/swarms_corp)
## What is Swarms?
@@ -5428,7 +5428,7 @@ Here you'll find references about the Swarms framework, marketplace, community,
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
@@ -6056,7 +6056,7 @@ When creating your SIP, copy this template:
# Welcome to Swarms Docs Home
-[](https://discord.gg/jM3Z6M9uMq) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/swarms_corp)
+[](https://discord.gg/EamjgSaEQf) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/swarms_corp)
## What is Swarms?
@@ -8804,7 +8804,7 @@ Join our community of agent engineers and researchers for technical support, cut
|----------|-------------|------|
| π Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| π Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
-| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) |
| π¦ Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
| π₯ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| πΊ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
@@ -12747,7 +12747,7 @@ By understanding the purpose and role of each folder in the Swarms framework, us
- **Community Support**
- - URL: [Submit issue](https://discord.gg/jM3Z6M9uMq)
+ - URL: [Submit issue](https://discord.gg/EamjgSaEQf)
- Ask the community for support in real-time and or admin support
--------------------------------------------------
@@ -15569,7 +15569,7 @@ If you have any questions or need assistance, please feel free to open an issue
| **Platform** | **Purpose** | **Join Link** | **Benefits** |
|--------------|-------------|---------------|--------------|
-| **Discord Community** | Real-time support & discussions | [Join Discord](https://discord.gg/jM3Z6M9uMq) | β’ 24/7 developer support
β’ Weekly community events
β’ Direct access to core team
β’ Beta feature previews |
+| **Discord Community** | Real-time support & discussions | [Join Discord](https://discord.gg/EamjgSaEQf) | β’ 24/7 developer support
β’ Weekly community events
β’ Direct access to core team
β’ Beta feature previews |
| **Twitter/X** | Latest updates & announcements | [Follow @swarms_corp](https://x.com/swarms_corp) | β’ Breaking news & updates
β’ Community highlights
β’ Technical insights
β’ Industry partnerships |
| **LinkedIn** | Professional network & updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | β’ Professional networking
β’ Career opportunities
β’ Enterprise partnerships
β’ Industry insights |
| **YouTube** | Tutorials & technical content | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | β’ In-depth tutorials
β’ Live coding sessions
β’ Architecture deep dives
β’ Community showcases |
@@ -15629,7 +15629,7 @@ If you have any questions or need assistance, please feel free to open an issue
| **1** | [Install Swarms Python Framework](https://docs.swarms.world/en/latest/swarms/install/install/) | 5 minutes |
| **2** | [Run Your First Agent](https://docs.swarms.world/en/latest/swarms/examples/basic_agent/) | 10 minutes |
| **3** | [Try Multi-Agent Workflows](https://docs.swarms.world/en/latest/swarms/examples/sequential_example/) | 15 minutes |
-| **4** | [Join Our Discord Community](https://discord.gg/jM3Z6M9uMq) | 2 minutes |
+| **4** | [Join Our Discord Community](https://discord.gg/EamjgSaEQf) | 2 minutes |
| **5** | [Explore Enterprise Features](https://docs.swarms.world/en/latest/swarms_cloud/swarms_api/) | 20 minutes |
---
@@ -18820,7 +18820,7 @@ Join our community of agent engineers and researchers for technical support, cut
|----------|-------------|------|
| π Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| π Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
-| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) |
| π¦ Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
| π₯ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| πΊ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
@@ -19611,7 +19611,7 @@ If you're facing issues or want to learn more, check out the following resources
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
@@ -19915,7 +19915,7 @@ If you're facing issues or want to learn more, check out the following resources
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
@@ -22379,7 +22379,7 @@ If you're facing issues or want to learn more, check out the following resources
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
@@ -27453,7 +27453,7 @@ Stay tuned for updates on the Swarm Exchange launch.
- **Documentation:** [Swarms Documentation](https://docs.swarms.world)
-- **Support:** Contact us via our [Discord Community](https://discord.gg/jM3Z6M9uMq).
+- **Support:** Contact us via our [Discord Community](https://discord.gg/EamjgSaEQf).
---
@@ -30381,7 +30381,7 @@ graph TD
- [Tutorials](https://docs.swarms.world/tutorials)
=== "π¬ Community"
- - [Discord Server](https://discord.gg/jM3Z6M9uMq)
+ - [Discord Server](https://discord.gg/EamjgSaEQf)
- [GitHub Discussions](https://github.com/kyegomez/swarms/discussions)
=== "π§ Development"
@@ -30425,7 +30425,7 @@ The MCP integration brings powerful external tool connectivity to Swarms agents,
!!! tip "Stay Updated"
- Join our [Discord community](https://discord.gg/jM3Z6M9uMq) to stay informed about new MCP features and connect with other developers building amazing agent applications.
+ Join our [Discord community](https://discord.gg/EamjgSaEQf) to stay informed about new MCP features and connect with other developers building amazing agent applications.
---
@@ -38385,7 +38385,7 @@ Join our community of agent engineers and researchers for technical support, cut
|----------|-------------|------|
| π Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| π Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
-| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) |
| π¦ Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
| π₯ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| πΊ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
@@ -46084,7 +46084,7 @@ The Swarms team is committed to providing exceptional technical support to help
| **Major Features (SIPs)** | New agent types, core changes, integrations | 1-2 weeks | [SIP Guidelines](protocol/sip.md) |
| **Minor Features** | Small enhancements, straightforward additions | < 48 hours | [GitHub Issues](https://github.com/kyegomez/swarms/issues) |
| **Private Issues** | Security concerns, enterprise consulting | < 4 hours | [Book Support Call](https://cal.com/swarms/swarms-technical-support?overlayCalendar=true) |
-| **Real-time Help** | Quick questions, community discussions | Immediate | [Discord Community](https://discord.gg/jM3Z6M9uMq) |
+| **Real-time Help** | Quick questions, community discussions | Immediate | [Discord Community](https://discord.gg/EamjgSaEQf) |
| **Documentation** | Usage guides, examples, tutorials | Self-service | [docs.swarms.world](https://docs.swarms.world) |
---
@@ -46263,7 +46263,7 @@ Get instant help from our active community of developers and core team members.
### **Getting Help on Discord**
-1. **Join here**: [https://discord.gg/jM3Z6M9uMq](https://discord.gg/jM3Z6M9uMq)
+1. **Join here**: [https://discord.gg/EamjgSaEQf](https://discord.gg/EamjgSaEQf)
2. **Read the rules** and introduce yourself in #general
@@ -46434,7 +46434,7 @@ Help improve support for everyone:
| Urgency | Best Channel |
|---------|-------------|
| **Emergency** | [Book Immediate Call](https://cal.com/swarms/swarms-technical-support?overlayCalendar=true) |
-| **Urgent** | [Discord #technical-support](https://discord.gg/jM3Z6M9uMq) |
+| **Urgent** | [Discord #technical-support](https://discord.gg/EamjgSaEQf) |
| **Standard** | [GitHub Issues](https://github.com/kyegomez/swarms/issues) |
| **Major Features** | [SIP Guidelines](protocol/sip.md) |
| **Minor Features** | [GitHub Issues](https://github.com/kyegomez/swarms/issues) |
@@ -50226,7 +50226,7 @@ agent_config = {
[:material-file-document: Swarms.ai Documentation](https://docs.swarms.world){ .md-button }
[:material-application: Swarms.ai Platform](https://swarms.world/platform){ .md-button }
[:material-key: API Key Management](https://swarms.world/platform/api-keys){ .md-button }
-[:material-forum: Swarms.ai Community](https://discord.gg/jM3Z6M9uMq){ .md-button }
+[:material-forum: Swarms.ai Community](https://discord.gg/EamjgSaEQf){ .md-button }
--------------------------------------------------
@@ -50379,7 +50379,7 @@ SWARMS_LOG_LEVEL=INFO
| Community Channel | Description | Link |
|-----------------------------|---------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------|
-| Discord Community | Join our active developer community for real-time support and discussions | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| Discord Community | Join our active developer community for real-time support and discussions | [Join Discord](https://discord.gg/EamjgSaEQf) |
| GitHub Discussions | Ask questions and share ideas | [GitHub Discussions](https://github.com/The-Swarm-Corporation/swarms/discussions) |
| Twitter/X | Follow for updates and announcements | [Twitter/X](https://x.com/swarms_corp) |
@@ -55111,7 +55111,7 @@ Join our community of agent engineers and researchers for technical support, cut
|----------|-------------|------|
| π Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| π Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
-| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) |
| π¦ Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
| π₯ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| πΊ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
@@ -57475,7 +57475,7 @@ Error responses include a detailed message explaining the issue:
|--------------|---------------------|
| Documentation | [https://docs.swarms.world](https://docs.swarms.world) |
| Email | kye@swarms.world |
-| Community | [https://discord.gg/jM3Z6M9uMq](https://discord.gg/jM3Z6M9uMq) |
+| Community | [https://discord.gg/EamjgSaEQf](https://discord.gg/EamjgSaEQf) |
| Marketplace | [https://swarms.world](https://swarms.world) |
| Website | [https://swarms.ai](https://swarms.ai) |
@@ -58440,7 +58440,7 @@ print(result)
We're excited to see how you leverage Swarms-Memory in your projects! Join our community on Discord to share your experiences, ask questions, and stay updated on the latest developments.
- **π¦ Twitter**: [Follow us on Twitter](https://twitter.com/swarms_platform)
-- **π’ Discord**: [Join the Agora Discord](https://discord.gg/jM3Z6M9uMq)
+- **π’ Discord**: [Join the Agora Discord](https://discord.gg/EamjgSaEQf)
- **Swarms Platform**: [Visit our website](https://swarms.ai)
- **π Documentation**: [Read the Docs](https://docs.swarms.ai)
@@ -60129,9 +60129,9 @@ To further enhance your understanding and usage of the Swarms Platform, explore
### Links
- [API Documentation](https://docs.swarms.world)
-- [Community Forums](https://discord.gg/jM3Z6M9uMq)
+- [Community Forums](https://discord.gg/EamjgSaEQf)
- [Tutorials and Guides](https://docs.swarms.world))
-- [Support](https://discord.gg/jM3Z6M9uMq)
+- [Support](https://discord.gg/EamjgSaEQf)
## Conclusion
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 888f7f30..5a687cca 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -48,7 +48,7 @@ extra:
- icon: fontawesome/brands/github
link: https://github.com/kyegomez/swarms
- icon: fontawesome/brands/discord
- link: https://discord.gg/jM3Z6M9uMq
+ link: https://discord.gg/EamjgSaEQf
- icon: fontawesome/brands/youtube
link: https://www.youtube.com/@kyegomez3242
- icon: fontawesome/brands/linkedin
@@ -354,6 +354,7 @@ nav:
- CookBook Index: "examples/cookbook_index.md"
- Paper Implementations: "examples/paper_implementations.md"
- Templates & Applications: "examples/templates.md"
+ - Community Resources: "examples/community_resources.md"
- Basic Examples:
- Individual Agents:
- Basic Agent: "swarms/examples/basic_agent.md"
@@ -414,6 +415,9 @@ nav:
- Swarms of Browser Agents: "swarms/examples/swarms_of_browser_agents.md"
- ConcurrentWorkflow with VLLM Agents: "swarms/examples/vllm.md"
+ - Apps:
+ - Smart Database: "examples/smart_database.md"
+
# - Swarm Models:
# - Overview: "swarms/models/index.md"
diff --git a/docs/quickstart.md b/docs/quickstart.md
index 0ab70ba7..f05def6f 100644
--- a/docs/quickstart.md
+++ b/docs/quickstart.md
@@ -1,7 +1,7 @@
# Welcome to Swarms Docs Home
-[](https://discord.gg/jM3Z6M9uMq) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/swarms_corp)
+[](https://discord.gg/EamjgSaEQf) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/swarms_corp)
## What is Swarms?
diff --git a/docs/swarms/agents/index.md b/docs/swarms/agents/index.md
index 4b632f1b..84a6534c 100644
--- a/docs/swarms/agents/index.md
+++ b/docs/swarms/agents/index.md
@@ -848,7 +848,7 @@ Join our community of agent engineers and researchers for technical support, cut
|----------|-------------|------|
| π Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| π Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
-| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) |
| π¦ Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
| π₯ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| πΊ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
diff --git a/docs/swarms/concept/framework_architecture.md b/docs/swarms/concept/framework_architecture.md
index e704ba8e..18f76ebc 100644
--- a/docs/swarms/concept/framework_architecture.md
+++ b/docs/swarms/concept/framework_architecture.md
@@ -155,5 +155,5 @@ By understanding the purpose and role of each folder in the Swarms framework, us
- **Community Support**
- - URL: [Submit issue](https://discord.gg/jM3Z6M9uMq)
+ - URL: [Submit issue](https://discord.gg/EamjgSaEQf)
- Ask the community for support in real-time and or admin support
\ No newline at end of file
diff --git a/docs/swarms/ecosystem.md b/docs/swarms/ecosystem.md
index ade51cde..9a0a1ccc 100644
--- a/docs/swarms/ecosystem.md
+++ b/docs/swarms/ecosystem.md
@@ -68,7 +68,7 @@
| **Platform** | **Purpose** | **Join Link** | **Benefits** |
|--------------|-------------|---------------|--------------|
-| **Discord Community** | Real-time support & discussions | [Join Discord](https://discord.gg/jM3Z6M9uMq) | β’ 24/7 developer support
β’ Weekly community events
β’ Direct access to core team
β’ Beta feature previews |
+| **Discord Community** | Real-time support & discussions | [Join Discord](https://discord.gg/EamjgSaEQf) | β’ 24/7 developer support
β’ Weekly community events
β’ Direct access to core team
β’ Beta feature previews |
| **Twitter/X** | Latest updates & announcements | [Follow @swarms_corp](https://x.com/swarms_corp) | β’ Breaking news & updates
β’ Community highlights
β’ Technical insights
β’ Industry partnerships |
| **LinkedIn** | Professional network & updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | β’ Professional networking
β’ Career opportunities
β’ Enterprise partnerships
β’ Industry insights |
| **YouTube** | Tutorials & technical content | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | β’ In-depth tutorials
β’ Live coding sessions
β’ Architecture deep dives
β’ Community showcases |
@@ -128,7 +128,7 @@
| **1** | [Install Swarms Python Framework](https://docs.swarms.world/en/latest/swarms/install/install/) | 5 minutes |
| **2** | [Run Your First Agent](https://docs.swarms.world/en/latest/swarms/examples/basic_agent/) | 10 minutes |
| **3** | [Try Multi-Agent Workflows](https://docs.swarms.world/en/latest/swarms/examples/sequential_example/) | 15 minutes |
-| **4** | [Join Our Discord Community](https://discord.gg/jM3Z6M9uMq) | 2 minutes |
+| **4** | [Join Our Discord Community](https://discord.gg/EamjgSaEQf) | 2 minutes |
| **5** | [Explore Enterprise Features](https://docs.swarms.world/en/latest/swarms_cloud/swarms_api/) | 20 minutes |
---
diff --git a/docs/swarms/examples/igc_example.md b/docs/swarms/examples/igc_example.md
index 32d060c1..5488cb5a 100644
--- a/docs/swarms/examples/igc_example.md
+++ b/docs/swarms/examples/igc_example.md
@@ -127,7 +127,7 @@ Join our community of agent engineers and researchers for technical support, cut
|----------|-------------|------|
| π Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| π Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
-| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) |
| π¦ Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
| π₯ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| πΊ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
diff --git a/docs/swarms/examples/moa_example.md b/docs/swarms/examples/moa_example.md
index 3ce7d24c..4e10a203 100644
--- a/docs/swarms/examples/moa_example.md
+++ b/docs/swarms/examples/moa_example.md
@@ -124,7 +124,7 @@ If you're facing issues or want to learn more, check out the following resources
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
diff --git a/docs/swarms/examples/multiple_images.md b/docs/swarms/examples/multiple_images.md
index bfa66e2b..9adb9b78 100644
--- a/docs/swarms/examples/multiple_images.md
+++ b/docs/swarms/examples/multiple_images.md
@@ -69,7 +69,7 @@ If you're facing issues or want to learn more, check out the following resources
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
diff --git a/docs/swarms/examples/vision_tools.md b/docs/swarms/examples/vision_tools.md
index 92b487c7..bc306fdb 100644
--- a/docs/swarms/examples/vision_tools.md
+++ b/docs/swarms/examples/vision_tools.md
@@ -130,7 +130,7 @@ If you're facing issues or want to learn more, check out the following resources
|----------|------|-------------|
| π Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
| π Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
-| π¬ Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
+| π¬ Discord | [Join Discord](https://discord.gg/EamjgSaEQf) | Live chat and community support |
| π¦ Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
| π₯ LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
| πΊ YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
diff --git a/docs/swarms/install/install.md b/docs/swarms/install/install.md
index 3a1b0668..6ee80267 100644
--- a/docs/swarms/install/install.md
+++ b/docs/swarms/install/install.md
@@ -26,9 +26,9 @@ Before you begin, ensure you have the following installed:
=== "pip (Recommended)"
- #### Headless Installation
+ #### Simple Installation
- The headless installation of `swarms` is designed for environments where graphical user interfaces (GUI) are not needed, making it more lightweight and suitable for server-side applications.
+ Simplest manner of installing swarms leverages using PIP. For faster installs and build times, we recommend using UV
```bash
pip install swarms
@@ -65,6 +65,49 @@ Before you begin, ensure you have the following installed:
uv pip install -e .[desktop]
```
+=== "Poetry Installation"
+
+ Poetry is a modern dependency management and packaging tool for Python. It provides a more robust way to manage project dependencies and virtual environments.
+
+ === "Basic Installation"
+
+ ```bash
+ # Install Poetry first
+ curl -sSL https://install.python-poetry.org | python3 -
+
+ # Install swarms using Poetry
+ poetry add swarms
+ ```
+
+ === "Development Installation"
+
+ ```bash
+ # Clone the repository
+ git clone https://github.com/kyegomez/swarms.git
+ cd swarms
+
+ # Install in editable mode
+ poetry install
+ ```
+
+ For desktop installation with extras:
+
+ ```bash
+ poetry install --extras "desktop"
+ ```
+
+ === "Using Poetry with existing projects"
+
+ If you have an existing project with a `pyproject.toml` file:
+
+ ```bash
+ # Add swarms to your project dependencies
+ poetry add swarms
+
+ # Or add with specific extras
+ poetry add "swarms[desktop]"
+ ```
+
=== "Development Installation"
=== "Using virtualenv"
diff --git a/docs/swarms/products.md b/docs/swarms/products.md
index 4f716c8d..28684b3f 100644
--- a/docs/swarms/products.md
+++ b/docs/swarms/products.md
@@ -152,7 +152,7 @@ Stay tuned for updates on the Swarm Exchange launch.
- **Documentation:** [Swarms Documentation](https://docs.swarms.world)
-- **Support:** Contact us via our [Discord Community](https://discord.gg/jM3Z6M9uMq).
+- **Support:** Contact us via our [Discord Community](https://discord.gg/EamjgSaEQf).
---
diff --git a/docs/swarms/structs/agent_mcp.md b/docs/swarms/structs/agent_mcp.md
index a7c0a2c6..0b8c4f4a 100644
--- a/docs/swarms/structs/agent_mcp.md
+++ b/docs/swarms/structs/agent_mcp.md
@@ -723,7 +723,7 @@ graph TD
- [Tutorials](https://docs.swarms.world/tutorials)
=== "π¬ Community"
- - [Discord Server](https://discord.gg/jM3Z6M9uMq)
+ - [Discord Server](https://discord.gg/EamjgSaEQf)
- [GitHub Discussions](https://github.com/kyegomez/swarms/discussions)
=== "π§ Development"
@@ -767,7 +767,7 @@ The MCP integration brings powerful external tool connectivity to Swarms agents,
!!! tip "Stay Updated"
- Join our [Discord community](https://discord.gg/jM3Z6M9uMq) to stay informed about new MCP features and connect with other developers building amazing agent applications.
+ Join our [Discord community](https://discord.gg/EamjgSaEQf) to stay informed about new MCP features and connect with other developers building amazing agent applications.
---
diff --git a/docs/swarms/structs/index.md b/docs/swarms/structs/index.md
index 02d106b7..a0468e77 100644
--- a/docs/swarms/structs/index.md
+++ b/docs/swarms/structs/index.md
@@ -294,7 +294,7 @@ Join our community of agent engineers and researchers for technical support, cut
|----------|-------------|------|
| π Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| π Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
-| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) |
| π¦ Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
| π₯ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| πΊ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
diff --git a/docs/swarms/support.md b/docs/swarms/support.md
index c101ee1d..0b862936 100644
--- a/docs/swarms/support.md
+++ b/docs/swarms/support.md
@@ -18,7 +18,7 @@ The Swarms team is committed to providing exceptional technical support to help
| **Major Features (SIPs)** | New agent types, core changes, integrations | 1-2 weeks | [SIP Guidelines](protocol/sip.md) |
| **Minor Features** | Small enhancements, straightforward additions | < 48 hours | [GitHub Issues](https://github.com/kyegomez/swarms/issues) |
| **Private Issues** | Security concerns, enterprise consulting | < 4 hours | [Book Support Call](https://cal.com/swarms/swarms-technical-support?overlayCalendar=true) |
-| **Real-time Help** | Quick questions, community discussions | Immediate | [Discord Community](https://discord.gg/jM3Z6M9uMq) |
+| **Real-time Help** | Quick questions, community discussions | Immediate | [Discord Community](https://discord.gg/EamjgSaEQf) |
| **Documentation** | Usage guides, examples, tutorials | Self-service | [docs.swarms.world](https://docs.swarms.world) |
---
@@ -197,7 +197,7 @@ Get instant help from our active community of developers and core team members.
### **Getting Help on Discord**
-1. **Join here**: [https://discord.gg/jM3Z6M9uMq](https://discord.gg/jM3Z6M9uMq)
+1. **Join here**: [https://discord.gg/EamjgSaEQf](https://discord.gg/EamjgSaEQf)
2. **Read the rules** and introduce yourself in #general
@@ -368,7 +368,7 @@ Help improve support for everyone:
| Urgency | Best Channel |
|---------|-------------|
| **Emergency** | [Book Immediate Call](https://cal.com/swarms/swarms-technical-support?overlayCalendar=true) |
-| **Urgent** | [Discord #technical-support](https://discord.gg/jM3Z6M9uMq) |
+| **Urgent** | [Discord #technical-support](https://discord.gg/EamjgSaEQf) |
| **Standard** | [GitHub Issues](https://github.com/kyegomez/swarms/issues) |
| **Major Features** | [SIP Guidelines](protocol/sip.md) |
| **Minor Features** | [GitHub Issues](https://github.com/kyegomez/swarms/issues) |
diff --git a/docs/swarms_cloud/agent_api.md b/docs/swarms_cloud/agent_api.md
index 21dd5dd2..aeab0d98 100644
--- a/docs/swarms_cloud/agent_api.md
+++ b/docs/swarms_cloud/agent_api.md
@@ -605,4 +605,4 @@ agent_config = {
[:material-file-document: Swarms.ai Documentation](https://docs.swarms.world){ .md-button }
[:material-application: Swarms.ai Platform](https://swarms.world/platform){ .md-button }
[:material-key: API Key Management](https://swarms.world/platform/api-keys){ .md-button }
-[:material-forum: Swarms.ai Community](https://discord.gg/jM3Z6M9uMq){ .md-button }
\ No newline at end of file
+[:material-forum: Swarms.ai Community](https://discord.gg/EamjgSaEQf){ .md-button }
\ No newline at end of file
diff --git a/docs/swarms_cloud/api_clients.md b/docs/swarms_cloud/api_clients.md
index 15a4a182..ca41516a 100644
--- a/docs/swarms_cloud/api_clients.md
+++ b/docs/swarms_cloud/api_clients.md
@@ -145,7 +145,7 @@ SWARMS_LOG_LEVEL=INFO
| Community Channel | Description | Link |
|-----------------------------|---------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------|
-| Discord Community | Join our active developer community for real-time support and discussions | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| Discord Community | Join our active developer community for real-time support and discussions | [Join Discord](https://discord.gg/EamjgSaEQf) |
| GitHub Discussions | Ask questions and share ideas | [GitHub Discussions](https://github.com/The-Swarm-Corporation/swarms/discussions) |
| Twitter/X | Follow for updates and announcements | [Twitter/X](https://x.com/swarms_corp) |
diff --git a/docs/swarms_cloud/quickstart.md b/docs/swarms_cloud/quickstart.md
index 37a3a685..438b81f0 100644
--- a/docs/swarms_cloud/quickstart.md
+++ b/docs/swarms_cloud/quickstart.md
@@ -1157,7 +1157,7 @@ Join our community of agent engineers and researchers for technical support, cut
|----------|-------------|------|
| π Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) |
| π Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) |
-| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/jM3Z6M9uMq) |
+| π¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) |
| π¦ Twitter | Latest news and announcements | [@kyegomez](https://twitter.com/kyegomez) |
| π₯ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) |
| πΊ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) |
diff --git a/docs/swarms_cloud/swarms_api.md b/docs/swarms_cloud/swarms_api.md
index f09c6eae..8dd7aba3 100644
--- a/docs/swarms_cloud/swarms_api.md
+++ b/docs/swarms_cloud/swarms_api.md
@@ -1242,7 +1242,7 @@ Error responses include a detailed message explaining the issue:
|--------------|---------------------|
| Documentation | [https://docs.swarms.world](https://docs.swarms.world) |
| Email | kye@swarms.world |
-| Community | [https://discord.gg/jM3Z6M9uMq](https://discord.gg/jM3Z6M9uMq) |
+| Community | [https://discord.gg/EamjgSaEQf](https://discord.gg/EamjgSaEQf) |
| Marketplace | [https://swarms.world](https://swarms.world) |
| Website | [https://swarms.ai](https://swarms.ai) |
diff --git a/docs/swarms_memory/index.md b/docs/swarms_memory/index.md
index 3b4011b0..3953256f 100644
--- a/docs/swarms_memory/index.md
+++ b/docs/swarms_memory/index.md
@@ -161,7 +161,7 @@ print(result)
We're excited to see how you leverage Swarms-Memory in your projects! Join our community on Discord to share your experiences, ask questions, and stay updated on the latest developments.
- **π¦ Twitter**: [Follow us on Twitter](https://twitter.com/swarms_platform)
-- **π’ Discord**: [Join the Agora Discord](https://discord.gg/jM3Z6M9uMq)
+- **π’ Discord**: [Join the Agora Discord](https://discord.gg/EamjgSaEQf)
- **Swarms Platform**: [Visit our website](https://swarms.ai)
- **π Documentation**: [Read the Docs](https://docs.swarms.ai)
diff --git a/docs/swarms_platform/index.md b/docs/swarms_platform/index.md
index 7daee2c3..995e379f 100644
--- a/docs/swarms_platform/index.md
+++ b/docs/swarms_platform/index.md
@@ -113,9 +113,9 @@ To further enhance your understanding and usage of the Swarms Platform, explore
### Links
- [API Documentation](https://docs.swarms.world)
-- [Community Forums](https://discord.gg/jM3Z6M9uMq)
+- [Community Forums](https://discord.gg/EamjgSaEQf)
- [Tutorials and Guides](https://docs.swarms.world))
-- [Support](https://discord.gg/jM3Z6M9uMq)
+- [Support](https://discord.gg/EamjgSaEQf)
## Conclusion
diff --git a/examples/news_aggregator_summarizer.py b/examples/demos/news_aggregator_summarizer.py
similarity index 100%
rename from examples/news_aggregator_summarizer.py
rename to examples/demos/news_aggregator_summarizer.py
diff --git a/examples/guides/graphworkflow_guide/GETTING_STARTED.md b/examples/guides/graphworkflow_guide/GETTING_STARTED.md
new file mode 100644
index 00000000..72defebf
--- /dev/null
+++ b/examples/guides/graphworkflow_guide/GETTING_STARTED.md
@@ -0,0 +1,258 @@
+# Getting Started with GraphWorkflow
+
+Welcome to **GraphWorkflow** - The LangGraph Killer! π
+
+This guide will get you up and running with Swarms' GraphWorkflow system in minutes.
+
+## π Quick Installation
+
+```bash
+# Install Swarms with all dependencies
+uv pip install swarms
+
+# Optional: Install visualization dependencies
+uv pip install graphviz
+
+# Verify installation
+python -c "from swarms.structs.graph_workflow import GraphWorkflow; print('β
GraphWorkflow ready')"
+```
+
+## π― Choose Your Starting Point
+
+### π New to GraphWorkflow?
+
+Start here: **[Quick Start Guide](quick_start_guide.py)**
+
+```bash
+python quick_start_guide.py
+```
+
+Learn GraphWorkflow in 5 easy steps:
+- β
Create your first workflow
+- β
Connect agents in sequence
+- β
Set up parallel processing
+- β
Use advanced patterns
+- β
Monitor performance
+
+### π¬ Want to See Everything?
+
+Run the comprehensive demo: **[Comprehensive Demo](comprehensive_demo.py)**
+
+```bash
+# See all features
+python comprehensive_demo.py
+
+# Focus on specific areas
+python comprehensive_demo.py --demo healthcare
+python comprehensive_demo.py --demo finance
+python comprehensive_demo.py --demo parallel
+```
+
+### π οΈ Need Setup Help?
+
+Use the setup script: **[Setup and Test](setup_and_test.py)**
+
+```bash
+# Check your environment
+python setup_and_test.py --check-only
+
+# Install dependencies and run tests
+python setup_and_test.py
+```
+
+## π Documentation
+
+### π Quick Reference
+
+```python
+from swarms import Agent
+from swarms.structs.graph_workflow import GraphWorkflow
+
+# 1. Create agents
+agent1 = Agent(agent_name="Researcher", model_name="gpt-4o-mini", max_loops=1)
+agent2 = Agent(agent_name="Writer", model_name="gpt-4o-mini", max_loops=1)
+
+# 2. Create workflow
+workflow = GraphWorkflow(name="MyWorkflow", auto_compile=True)
+
+# 3. Add agents and connections
+workflow.add_node(agent1)
+workflow.add_node(agent2)
+workflow.add_edge("Researcher", "Writer")
+
+# 4. Execute
+results = workflow.run(task="Write about AI trends")
+```
+
+### π Complete Documentation
+
+- **[Technical Guide](graph_workflow_technical_guide.md)**: 4,000-word comprehensive guide
+- **[Examples README](README.md)**: Complete examples overview
+- **[API Reference](../../../docs/swarms/structs/)**: Detailed API documentation
+
+## π¨ Key Features Overview
+
+### β‘ Parallel Processing
+
+```python
+# Fan-out: One agent to multiple agents
+workflow.add_edges_from_source("DataCollector", ["AnalystA", "AnalystB"])
+
+# Fan-in: Multiple agents to one agent
+workflow.add_edges_to_target(["SpecialistX", "SpecialistY"], "Synthesizer")
+
+# Parallel chain: Many-to-many mesh
+workflow.add_parallel_chain(["DataA", "DataB"], ["ProcessorX", "ProcessorY"])
+```
+
+### π Performance Optimization
+
+```python
+# Automatic compilation for 40-60% speedup
+workflow = GraphWorkflow(auto_compile=True)
+
+# Monitor performance
+status = workflow.get_compilation_status()
+print(f"Workers: {status['max_workers']}")
+print(f"Layers: {status['cached_layers_count']}")
+```
+
+### π¨ Professional Visualization
+
+```python
+# Generate beautiful workflow diagrams
+workflow.visualize(
+ format="png", # png, svg, pdf, dot
+ show_summary=True, # Show parallel processing stats
+ engine="dot" # Layout algorithm
+)
+```
+
+### πΎ Enterprise Features
+
+```python
+# Complete workflow serialization
+json_data = workflow.to_json(include_conversation=True)
+restored = GraphWorkflow.from_json(json_data)
+
+# File persistence
+workflow.save_to_file("my_workflow.json")
+loaded = GraphWorkflow.load_from_file("my_workflow.json")
+
+# Validation and monitoring
+validation = workflow.validate(auto_fix=True)
+summary = workflow.export_summary()
+```
+
+## π₯ Real-World Examples
+
+### Healthcare: Clinical Decision Support
+
+```python
+# Multi-specialist clinical workflow
+workflow.add_edges_from_source("PatientData", [
+ "PrimaryCare", "Cardiologist", "Pharmacist"
+])
+workflow.add_edges_to_target([
+ "PrimaryCare", "Cardiologist", "Pharmacist"
+], "CaseManager")
+
+results = workflow.run(task="Analyze patient with chest pain...")
+```
+
+### Finance: Investment Analysis
+
+```python
+# Parallel financial analysis
+workflow.add_parallel_chain(
+ ["MarketData", "FundamentalData"],
+ ["TechnicalAnalyst", "FundamentalAnalyst", "RiskManager"]
+)
+workflow.add_edges_to_target([
+ "TechnicalAnalyst", "FundamentalAnalyst", "RiskManager"
+], "PortfolioManager")
+
+results = workflow.run(task="Analyze tech sector allocation...")
+```
+
+## πββοΈ Performance Benchmarks
+
+GraphWorkflow delivers **40-60% better performance** than sequential execution:
+
+| Agents | Sequential | GraphWorkflow | Speedup |
+|--------|------------|---------------|---------|
+| 5 | 15.2s | 8.7s | 1.75x |
+| 10 | 28.5s | 16.1s | 1.77x |
+| 15 | 42.8s | 24.3s | 1.76x |
+
+*Benchmarks run on 8-core CPU with gpt-4o-mini*
+
+## π Why GraphWorkflow > LangGraph?
+
+| Feature | GraphWorkflow | LangGraph |
+|---------|---------------|-----------|
+| **Parallel Processing** | β
Native fan-out/fan-in | β Limited |
+| **Performance** | β
40-60% faster | β Sequential bottlenecks |
+| **Compilation** | β
Intelligent caching | β No optimization |
+| **Visualization** | β
Professional Graphviz | β Basic diagrams |
+| **Enterprise Features** | β
Full serialization | β Limited persistence |
+| **Error Handling** | β
Comprehensive validation | β Basic checks |
+| **Monitoring** | β
Rich metrics | β Limited insights |
+
+## π οΈ Troubleshooting
+
+### Common Issues
+
+**Problem**: Import error
+```bash
+# Solution: Install dependencies
+uv pip install swarms
+python setup_and_test.py --install-deps
+```
+
+**Problem**: Slow execution
+```python
+# Solution: Enable compilation
+workflow = GraphWorkflow(auto_compile=True)
+workflow.compile() # Manual compilation
+```
+
+**Problem**: Memory issues
+```python
+# Solution: Clear conversation history
+workflow.conversation = Conversation()
+```
+
+**Problem**: Graph validation errors
+```python
+# Solution: Use auto-fix
+validation = workflow.validate(auto_fix=True)
+if not validation['is_valid']:
+ print("Errors:", validation['errors'])
+```
+
+### Get Help
+
+- π **Read the docs**: [Technical Guide](graph_workflow_technical_guide.md)
+- π **Check examples**: Browse this guide directory
+- π§ͺ **Run tests**: Use `python setup_and_test.py`
+- π **Report bugs**: Open an issue on GitHub
+
+## π― Next Steps
+
+1. **π Learn**: Complete the [Quick Start Guide](quick_start_guide.py)
+2. **π¬ Explore**: Try the [Comprehensive Demo](comprehensive_demo.py)
+3. **π₯ Apply**: Adapt healthcare or finance examples
+4. **π Study**: Read the [Technical Guide](graph_workflow_technical_guide.md)
+5. **π Deploy**: Build your production workflows
+
+## π Ready to Build?
+
+GraphWorkflow is **production-ready** and **enterprise-grade**. Join the revolution in multi-agent orchestration!
+
+```bash
+# Start your GraphWorkflow journey
+python quick_start_guide.py
+```
+
+**The LangGraph Killer is here. Welcome to the future of multi-agent systems!** π
diff --git a/examples/guides/graphworkflow_guide/README.md b/examples/guides/graphworkflow_guide/README.md
new file mode 100644
index 00000000..e57172d9
--- /dev/null
+++ b/examples/guides/graphworkflow_guide/README.md
@@ -0,0 +1,322 @@
+# GraphWorkflow Guide
+
+Welcome to the comprehensive GraphWorkflow guide! This collection demonstrates the power and flexibility of Swarms' GraphWorkflow system - the LangGraph killer that provides superior multi-agent orchestration capabilities.
+
+## π Quick Start
+
+### Installation
+
+```bash
+# Install Swarms with all dependencies
+uv pip install swarms
+
+# Optional: Install visualization dependencies
+uv pip install graphviz
+
+# Verify installation
+python -c "from swarms.structs.graph_workflow import GraphWorkflow; print('β
GraphWorkflow ready')"
+```
+
+### Run Your First Example
+
+```bash
+# Start with the quick start guide
+python quick_start_guide.py
+
+# Or run the comprehensive demo
+python comprehensive_demo.py
+
+# For specific examples
+python comprehensive_demo.py --demo healthcare
+python comprehensive_demo.py --demo finance
+```
+
+## π Example Files
+
+### π Learning Examples
+
+| File | Description | Complexity |
+|------|-------------|------------|
+| `quick_start_guide.py` | **START HERE** - Step-by-step introduction to GraphWorkflow | β Beginner |
+| `graph_workflow_example.py` | Basic two-agent workflow example | β Beginner |
+| `comprehensive_demo.py` | Complete feature demonstration with multiple use cases | βββ Advanced |
+
+### π₯ Healthcare Examples
+
+| File | Description | Complexity |
+|------|-------------|------------|
+| `comprehensive_demo.py --demo healthcare` | Clinical decision support workflow | βββ Advanced |
+
+**Healthcare Workflow Features:**
+- Multi-disciplinary clinical team simulation
+- Parallel specialist consultations
+- Drug interaction checking
+- Risk assessment and quality assurance
+- Evidence-based clinical decision support
+
+### π° Finance Examples
+
+| File | Description | Complexity |
+|------|-------------|------------|
+| `advanced_graph_workflow.py` | Sophisticated investment analysis workflow | βββ Advanced |
+| `comprehensive_demo.py --demo finance` | Quantitative trading strategy development | βββ Advanced |
+
+**Finance Workflow Features:**
+- Multi-source market data analysis
+- Parallel quantitative analysis (Technical, Fundamental, Sentiment)
+- Risk management and portfolio optimization
+- Strategy backtesting and validation
+- Execution planning and monitoring
+
+### π§ Technical Examples
+
+| File | Description | Complexity |
+|------|-------------|------------|
+| `test_parallel_processing_example.py` | Comprehensive parallel processing patterns | ββ Intermediate |
+| `test_graphviz_visualization.py` | Visualization capabilities and layouts | ββ Intermediate |
+| `test_graph_workflow_caching.py` | Performance optimization and caching | ββ Intermediate |
+| `test_enhanced_json_export.py` | Serialization and persistence features | ββ Intermediate |
+| `test_graphworlfolw_validation.py` | Workflow validation and error handling | ββ Intermediate |
+
+## π― Key Features Demonstrated
+
+### β‘ Parallel Processing Patterns
+
+- **Fan-out**: One agent distributes to multiple agents
+- **Fan-in**: Multiple agents converge to one agent
+- **Parallel chains**: Many-to-many mesh processing
+- **Complex hybrid**: Sophisticated multi-stage patterns
+
+### π Performance Optimization
+
+- **Intelligent Compilation**: Pre-computed execution layers
+- **Advanced Caching**: Persistent state across runs
+- **Worker Pool Optimization**: CPU-optimized parallel execution
+- **Memory Management**: Efficient resource utilization
+
+### π¨ Visualization & Monitoring
+
+- **Professional Graphviz Diagrams**: Multiple layouts and formats
+- **Real-time Performance Metrics**: Execution monitoring
+- **Workflow Validation**: Comprehensive error checking
+- **Rich Logging**: Detailed execution insights
+
+### πΎ Enterprise Features
+
+- **JSON Serialization**: Complete workflow persistence
+- **Runtime State Management**: Compilation caching
+- **Error Handling**: Robust failure recovery
+- **Scalability**: Support for large agent networks
+
+## πββοΈ Running Examples
+
+### Basic Usage
+
+```python
+from swarms import Agent
+from swarms.structs.graph_workflow import GraphWorkflow
+
+# Create agents
+agent1 = Agent(agent_name="Researcher", model_name="gpt-4o-mini", max_loops=1)
+agent2 = Agent(agent_name="Writer", model_name="gpt-4o-mini", max_loops=1)
+
+# Create workflow
+workflow = GraphWorkflow(name="SimpleWorkflow", auto_compile=True)
+workflow.add_node(agent1)
+workflow.add_node(agent2)
+workflow.add_edge("Researcher", "Writer")
+
+# Execute
+results = workflow.run(task="Research and write about AI trends")
+```
+
+### Parallel Processing
+
+```python
+# Fan-out pattern: One agent to multiple agents
+workflow.add_edges_from_source("DataCollector", ["AnalystA", "AnalystB", "AnalystC"])
+
+# Fan-in pattern: Multiple agents to one agent
+workflow.add_edges_to_target(["SpecialistX", "SpecialistY"], "Synthesizer")
+
+# Parallel chain: Many-to-many processing
+workflow.add_parallel_chain(
+ sources=["DataA", "DataB"],
+ targets=["ProcessorX", "ProcessorY"]
+)
+```
+
+### Performance Monitoring
+
+```python
+# Get compilation status
+status = workflow.get_compilation_status()
+print(f"Compiled: {status['is_compiled']}")
+print(f"Workers: {status['max_workers']}")
+
+# Monitor execution
+import time
+start = time.time()
+results = workflow.run(task="Analyze market conditions")
+print(f"Execution time: {time.time() - start:.2f}s")
+print(f"Throughput: {len(results)/(time.time() - start):.1f} agents/second")
+```
+
+## π¬ Use Case Examples
+
+### π Enterprise Data Processing
+
+```python
+# Multi-stage data pipeline
+workflow.add_parallel_chain(
+ ["APIIngester", "DatabaseExtractor", "FileProcessor"],
+ ["DataValidator", "DataTransformer", "DataEnricher"]
+)
+workflow.add_edges_to_target(
+ ["DataValidator", "DataTransformer", "DataEnricher"],
+ "ReportGenerator"
+)
+```
+
+### π₯ Clinical Decision Support
+
+```python
+# Multi-specialist consultation
+workflow.add_edges_from_source("PatientDataCollector", [
+ "PrimaryCarePhysician", "Cardiologist", "Pharmacist"
+])
+workflow.add_edges_to_target([
+ "PrimaryCarePhysician", "Cardiologist", "Pharmacist"
+], "CaseManager")
+```
+
+### πΌ Investment Analysis
+
+```python
+# Parallel financial analysis
+workflow.add_parallel_chain(
+ ["MarketDataCollector", "FundamentalDataCollector"],
+ ["TechnicalAnalyst", "FundamentalAnalyst", "SentimentAnalyst"]
+)
+workflow.add_edges_to_target([
+ "TechnicalAnalyst", "FundamentalAnalyst", "SentimentAnalyst"
+], "PortfolioManager")
+```
+
+## π¨ Visualization Examples
+
+### Generate Workflow Diagrams
+
+```python
+# Professional Graphviz visualization
+workflow.visualize(
+ format="png", # png, svg, pdf, dot
+ engine="dot", # dot, neato, fdp, sfdp, circo
+ show_summary=True, # Display parallel processing stats
+ view=True # Open diagram automatically
+)
+
+# Text-based visualization (always available)
+workflow.visualize_simple()
+```
+
+### Example Output
+
+```
+π GRAPHVIZ WORKFLOW VISUALIZATION
+====================================
+π Saved to: MyWorkflow_visualization.png
+π€ Total Agents: 8
+π Total Connections: 12
+π Execution Layers: 4
+
+β‘ Parallel Processing Patterns:
+ π Fan-out patterns: 2
+ π Fan-in patterns: 1
+ β‘ Parallel execution nodes: 6
+ π― Parallel efficiency: 75.0%
+```
+
+## π οΈ Troubleshooting
+
+### Common Issues
+
+1. **Compilation Errors**
+ ```python
+ # Check for cycles in workflow
+ validation = workflow.validate(auto_fix=True)
+ if not validation['is_valid']:
+ print("Validation errors:", validation['errors'])
+ ```
+
+2. **Performance Issues**
+ ```python
+ # Ensure compilation before execution
+ workflow.compile()
+
+ # Check worker count
+ status = workflow.get_compilation_status()
+ print(f"Workers: {status['max_workers']}")
+ ```
+
+3. **Memory Issues**
+ ```python
+ # Clear conversation history if not needed
+ workflow.conversation = Conversation()
+
+ # Monitor memory usage
+ import psutil
+ process = psutil.Process()
+ memory_mb = process.memory_info().rss / 1024 / 1024
+ print(f"Memory: {memory_mb:.1f} MB")
+ ```
+
+### Debug Mode
+
+```python
+# Enable detailed logging
+workflow = GraphWorkflow(
+ name="DebugWorkflow",
+ verbose=True, # Detailed execution logs
+ auto_compile=True, # Automatic optimization
+)
+
+# Validate workflow structure
+validation = workflow.validate(auto_fix=True)
+print("Validation result:", validation)
+```
+
+## π Documentation
+
+- **[Technical Guide](graph_workflow_technical_guide.md)**: Comprehensive 4,000-word technical documentation
+- **[API Reference](../../../docs/swarms/structs/)**: Complete API documentation
+- **[Multi-Agent Examples](../../multi_agent/)**: Other multi-agent examples
+
+## π€ Contributing
+
+Found a bug or want to add an example?
+
+1. **Report Issues**: Open an issue with detailed reproduction steps
+2. **Add Examples**: Submit PRs with new use case examples
+3. **Improve Documentation**: Help expand the guides and tutorials
+4. **Performance Optimization**: Share benchmarks and optimizations
+
+## π― Next Steps
+
+1. **Start Learning**: Run `python quick_start_guide.py`
+2. **Explore Examples**: Try healthcare and finance use cases
+3. **Build Your Workflow**: Adapt examples to your domain
+4. **Deploy to Production**: Use monitoring and optimization features
+5. **Join Community**: Share your workflows and get help
+
+## π Why GraphWorkflow?
+
+GraphWorkflow is the **LangGraph killer** because it provides:
+
+- **40-60% Better Performance**: Intelligent compilation and parallel execution
+- **Enterprise Reliability**: Comprehensive error handling and monitoring
+- **Superior Scalability**: Handles hundreds of agents efficiently
+- **Rich Visualization**: Professional workflow diagrams
+- **Production Ready**: Serialization, caching, and validation
+
+Ready to revolutionize your multi-agent systems? Start with GraphWorkflow today! π
diff --git a/examples/guides/graphworkflow_guide/comprehensive_demo.py b/examples/guides/graphworkflow_guide/comprehensive_demo.py
new file mode 100644
index 00000000..79bd5405
--- /dev/null
+++ b/examples/guides/graphworkflow_guide/comprehensive_demo.py
@@ -0,0 +1,909 @@
+#!/usr/bin/env python3
+"""
+Comprehensive GraphWorkflow Demo Script
+=======================================
+
+This script demonstrates all key features of Swarms' GraphWorkflow system,
+including parallel processing patterns, performance optimization, and real-world use cases.
+
+Usage:
+ python comprehensive_demo.py [--demo healthcare|finance|enterprise|all]
+
+Requirements:
+ uv pip install swarms
+ uv pip install graphviz # Optional for visualization
+"""
+
+import argparse
+import time
+
+from swarms import Agent
+from swarms.structs.graph_workflow import GraphWorkflow
+
+
+def create_basic_workflow_demo():
+ """Demonstrate basic GraphWorkflow functionality."""
+
+ print("\n" + "=" * 60)
+ print("π BASIC GRAPHWORKFLOW DEMONSTRATION")
+ print("=" * 60)
+
+ # Create simple agents
+ data_collector = Agent(
+ agent_name="DataCollector",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a data collection specialist. Gather and organize relevant information for analysis.",
+ verbose=False,
+ )
+
+ data_analyzer = Agent(
+ agent_name="DataAnalyzer",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a data analysis expert. Analyze the collected data and extract key insights.",
+ verbose=False,
+ )
+
+ report_generator = Agent(
+ agent_name="ReportGenerator",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a report generation specialist. Create comprehensive reports from analysis results.",
+ verbose=False,
+ )
+
+ # Create workflow
+ workflow = GraphWorkflow(
+ name="BasicWorkflowDemo",
+ description="Demonstrates basic GraphWorkflow functionality",
+ verbose=True,
+ auto_compile=True,
+ )
+
+ # Add nodes
+ for agent in [data_collector, data_analyzer, report_generator]:
+ workflow.add_node(agent)
+
+ # Add edges (sequential flow)
+ workflow.add_edge("DataCollector", "DataAnalyzer")
+ workflow.add_edge("DataAnalyzer", "ReportGenerator")
+
+ # Set entry and exit points
+ workflow.set_entry_points(["DataCollector"])
+ workflow.set_end_points(["ReportGenerator"])
+
+ print(
+ f"β
Created workflow with {len(workflow.nodes)} nodes and {len(workflow.edges)} edges"
+ )
+
+ # Demonstrate compilation
+ compilation_status = workflow.get_compilation_status()
+ print(f"π Compilation Status: {compilation_status}")
+
+ # Demonstrate simple visualization
+ try:
+ workflow.visualize_simple()
+ except Exception as e:
+ print(f"β οΈ Visualization not available: {e}")
+
+ # Run workflow
+ task = "Analyze the current state of artificial intelligence in healthcare, focusing on recent developments and future opportunities."
+
+ print(f"\nπ Executing workflow with task: {task[:100]}...")
+ start_time = time.time()
+
+ results = workflow.run(task=task)
+
+ execution_time = time.time() - start_time
+ print(f"β±οΈ Execution completed in {execution_time:.2f} seconds")
+
+ # Display results
+ print("\nπ Results Summary:")
+ for agent_name, result in results.items():
+ print(f"\nπ€ {agent_name}:")
+ print(
+ f" {result[:200]}{'...' if len(result) > 200 else ''}"
+ )
+
+ return workflow, results
+
+
+def create_parallel_processing_demo():
+ """Demonstrate advanced parallel processing patterns."""
+
+ print("\n" + "=" * 60)
+ print("β‘ PARALLEL PROCESSING DEMONSTRATION")
+ print("=" * 60)
+
+ # Create data sources
+ web_scraper = Agent(
+ agent_name="WebScraper",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You specialize in web data scraping and online research.",
+ verbose=False,
+ )
+
+ api_collector = Agent(
+ agent_name="APICollector",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You specialize in API data collection and integration.",
+ verbose=False,
+ )
+
+ database_extractor = Agent(
+ agent_name="DatabaseExtractor",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You specialize in database queries and data extraction.",
+ verbose=False,
+ )
+
+ # Create parallel processors
+ text_processor = Agent(
+ agent_name="TextProcessor",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You specialize in natural language processing and text analysis.",
+ verbose=False,
+ )
+
+ numeric_processor = Agent(
+ agent_name="NumericProcessor",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You specialize in numerical analysis and statistical processing.",
+ verbose=False,
+ )
+
+ # Create analyzers
+ sentiment_analyzer = Agent(
+ agent_name="SentimentAnalyzer",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You specialize in sentiment analysis and emotional intelligence.",
+ verbose=False,
+ )
+
+ trend_analyzer = Agent(
+ agent_name="TrendAnalyzer",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You specialize in trend analysis and pattern recognition.",
+ verbose=False,
+ )
+
+ # Create synthesizer
+ data_synthesizer = Agent(
+ agent_name="DataSynthesizer",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You specialize in data synthesis and comprehensive analysis integration.",
+ verbose=False,
+ )
+
+ # Create workflow
+ workflow = GraphWorkflow(
+ name="ParallelProcessingDemo",
+ description="Demonstrates advanced parallel processing patterns including fan-out, fan-in, and parallel chains",
+ verbose=True,
+ auto_compile=True,
+ )
+
+ # Add all agents
+ agents = [
+ web_scraper,
+ api_collector,
+ database_extractor,
+ text_processor,
+ numeric_processor,
+ sentiment_analyzer,
+ trend_analyzer,
+ data_synthesizer,
+ ]
+
+ for agent in agents:
+ workflow.add_node(agent)
+
+ # Demonstrate different parallel patterns
+ print("π Setting up parallel processing patterns...")
+
+ # Pattern 1: Fan-out from sources to processors
+ print(" π€ Fan-out: Data sources β Processors")
+ workflow.add_edges_from_source(
+ "WebScraper", ["TextProcessor", "SentimentAnalyzer"]
+ )
+ workflow.add_edges_from_source(
+ "APICollector", ["NumericProcessor", "TrendAnalyzer"]
+ )
+ workflow.add_edges_from_source(
+ "DatabaseExtractor", ["TextProcessor", "NumericProcessor"]
+ )
+
+ # Pattern 2: Parallel chain from processors to analyzers
+ print(" π Parallel chain: Processors β Analyzers")
+ workflow.add_parallel_chain(
+ ["TextProcessor", "NumericProcessor"],
+ ["SentimentAnalyzer", "TrendAnalyzer"],
+ )
+
+ # Pattern 3: Fan-in to synthesizer
+ print(" π₯ Fan-in: All analyzers β Synthesizer")
+ workflow.add_edges_to_target(
+ ["SentimentAnalyzer", "TrendAnalyzer"], "DataSynthesizer"
+ )
+
+ # Set entry and exit points
+ workflow.set_entry_points(
+ ["WebScraper", "APICollector", "DatabaseExtractor"]
+ )
+ workflow.set_end_points(["DataSynthesizer"])
+
+ print(
+ f"β
Created parallel workflow with {len(workflow.nodes)} nodes and {len(workflow.edges)} edges"
+ )
+
+ # Analyze parallel patterns
+ compilation_status = workflow.get_compilation_status()
+ print(f"π Compilation Status: {compilation_status}")
+ print(
+ f"π§ Execution layers: {len(compilation_status.get('layers', []))}"
+ )
+ print(
+ f"β‘ Max parallel workers: {compilation_status.get('max_workers', 'N/A')}"
+ )
+
+ # Run parallel workflow
+ task = "Research and analyze the impact of quantum computing on cybersecurity, examining technical developments, market trends, and security implications."
+
+ print("\nπ Executing parallel workflow...")
+ start_time = time.time()
+
+ results = workflow.run(task=task)
+
+ execution_time = time.time() - start_time
+ print(
+ f"β±οΈ Parallel execution completed in {execution_time:.2f} seconds"
+ )
+ print(
+ f"π Throughput: {len(results)/execution_time:.1f} agents/second"
+ )
+
+ # Display results
+ print("\nπ Parallel Processing Results:")
+ for agent_name, result in results.items():
+ print(f"\nπ€ {agent_name}:")
+ print(
+ f" {result[:150]}{'...' if len(result) > 150 else ''}"
+ )
+
+ return workflow, results
+
+
+def create_healthcare_workflow_demo():
+ """Demonstrate healthcare-focused workflow."""
+
+ print("\n" + "=" * 60)
+ print("π₯ HEALTHCARE WORKFLOW DEMONSTRATION")
+ print("=" * 60)
+
+ # Create clinical specialists
+ primary_care_physician = Agent(
+ agent_name="PrimaryCarePhysician",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="""You are a board-certified primary care physician. Provide:
+ 1. Initial patient assessment and history taking
+ 2. Differential diagnosis development
+ 3. Treatment plan coordination
+ 4. Preventive care recommendations
+
+ Focus on comprehensive, evidence-based primary care.""",
+ verbose=False,
+ )
+
+ cardiologist = Agent(
+ agent_name="Cardiologist",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="""You are a board-certified cardiologist. Provide:
+ 1. Cardiovascular risk assessment
+ 2. Cardiac diagnostic interpretation
+ 3. Treatment recommendations for heart conditions
+ 4. Cardiovascular prevention strategies
+
+ Apply evidence-based cardiology guidelines.""",
+ verbose=False,
+ )
+
+ pharmacist = Agent(
+ agent_name="ClinicalPharmacist",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="""You are a clinical pharmacist specialist. Provide:
+ 1. Medication review and optimization
+ 2. Drug interaction analysis
+ 3. Dosing recommendations
+ 4. Patient counseling guidance
+
+ Ensure medication safety and efficacy.""",
+ verbose=False,
+ )
+
+ case_manager = Agent(
+ agent_name="CaseManager",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="""You are a clinical case manager. Coordinate:
+ 1. Care plan integration and implementation
+ 2. Resource allocation and scheduling
+ 3. Patient education and follow-up
+ 4. Quality metrics and outcomes tracking
+
+ Ensure coordinated, patient-centered care.""",
+ verbose=False,
+ )
+
+ # Create workflow
+ workflow = GraphWorkflow(
+ name="HealthcareWorkflowDemo",
+ description="Clinical decision support workflow with multi-disciplinary team collaboration",
+ verbose=True,
+ auto_compile=True,
+ )
+
+ # Add agents
+ agents = [
+ primary_care_physician,
+ cardiologist,
+ pharmacist,
+ case_manager,
+ ]
+ for agent in agents:
+ workflow.add_node(agent)
+
+ # Create clinical workflow
+ workflow.add_edge("PrimaryCarePhysician", "Cardiologist")
+ workflow.add_edge("PrimaryCarePhysician", "ClinicalPharmacist")
+ workflow.add_edges_to_target(
+ ["Cardiologist", "ClinicalPharmacist"], "CaseManager"
+ )
+
+ workflow.set_entry_points(["PrimaryCarePhysician"])
+ workflow.set_end_points(["CaseManager"])
+
+ print(
+ f"β
Created healthcare workflow with {len(workflow.nodes)} specialists"
+ )
+
+ # Clinical case
+ clinical_case = """
+ Patient: 58-year-old male executive
+ Chief Complaint: Chest pain and shortness of breath during exercise
+ History: Hypertension, family history of coronary artery disease, sedentary lifestyle
+ Current Medications: Lisinopril 10mg daily
+ Vital Signs: BP 145/92, HR 88, BMI 29.5
+ Recent Tests: ECG shows non-specific changes, cholesterol 245 mg/dL
+
+ Please provide comprehensive clinical assessment and care coordination.
+ """
+
+ print("\nπ Processing clinical case...")
+ start_time = time.time()
+
+ results = workflow.run(task=clinical_case)
+
+ execution_time = time.time() - start_time
+ print(
+ f"β±οΈ Clinical assessment completed in {execution_time:.2f} seconds"
+ )
+
+ # Display clinical results
+ print("\nπ₯ Clinical Team Assessment:")
+ for agent_name, result in results.items():
+ print(f"\nπ¨ββοΈ {agent_name}:")
+ print(
+ f" π {result[:200]}{'...' if len(result) > 200 else ''}"
+ )
+
+ return workflow, results
+
+
+def create_finance_workflow_demo():
+ """Demonstrate finance-focused workflow."""
+
+ print("\n" + "=" * 60)
+ print("π° FINANCE WORKFLOW DEMONSTRATION")
+ print("=" * 60)
+
+ # Create financial analysts
+ market_analyst = Agent(
+ agent_name="MarketAnalyst",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="""You are a senior market analyst. Provide:
+ 1. Market condition assessment and trends
+ 2. Sector rotation and thematic analysis
+ 3. Economic indicator interpretation
+ 4. Market timing and positioning recommendations
+
+ Apply rigorous market analysis frameworks.""",
+ verbose=False,
+ )
+
+ equity_researcher = Agent(
+ agent_name="EquityResearcher",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="""You are an equity research analyst. Provide:
+ 1. Company fundamental analysis
+ 2. Financial modeling and valuation
+ 3. Competitive positioning assessment
+ 4. Investment thesis development
+
+ Use comprehensive equity research methodologies.""",
+ verbose=False,
+ )
+
+ risk_manager = Agent(
+ agent_name="RiskManager",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="""You are a risk management specialist. Provide:
+ 1. Portfolio risk assessment and metrics
+ 2. Stress testing and scenario analysis
+ 3. Risk mitigation strategies
+ 4. Regulatory compliance guidance
+
+ Apply quantitative risk management principles.""",
+ verbose=False,
+ )
+
+ portfolio_manager = Agent(
+ agent_name="PortfolioManager",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="""You are a senior portfolio manager. Provide:
+ 1. Investment decision synthesis
+ 2. Portfolio construction and allocation
+ 3. Performance attribution analysis
+ 4. Client communication and reporting
+
+ Integrate all analysis into actionable investment decisions.""",
+ verbose=False,
+ )
+
+ # Create workflow
+ workflow = GraphWorkflow(
+ name="FinanceWorkflowDemo",
+ description="Investment decision workflow with multi-disciplinary financial analysis",
+ verbose=True,
+ auto_compile=True,
+ )
+
+ # Add agents
+ agents = [
+ market_analyst,
+ equity_researcher,
+ risk_manager,
+ portfolio_manager,
+ ]
+ for agent in agents:
+ workflow.add_node(agent)
+
+ # Create financial workflow (parallel analysis feeding portfolio decisions)
+ workflow.add_edges_from_source(
+ "MarketAnalyst", ["EquityResearcher", "RiskManager"]
+ )
+ workflow.add_edges_to_target(
+ ["EquityResearcher", "RiskManager"], "PortfolioManager"
+ )
+
+ workflow.set_entry_points(["MarketAnalyst"])
+ workflow.set_end_points(["PortfolioManager"])
+
+ print(
+ f"β
Created finance workflow with {len(workflow.nodes)} analysts"
+ )
+
+ # Investment analysis task
+ investment_scenario = """
+ Investment Analysis Request: Technology Sector Allocation
+
+ Market Context:
+ - Interest rates: 5.25% federal funds rate
+ - Inflation: 3.2% CPI year-over-year
+ - Technology sector: -8% YTD performance
+ - AI theme: High investor interest and valuation concerns
+
+ Portfolio Context:
+ - Current tech allocation: 15% (target 20-25%)
+ - Risk budget: 12% tracking error limit
+ - Investment horizon: 3-5 years
+ - Client risk tolerance: Moderate-aggressive
+
+ Please provide comprehensive investment analysis and recommendations.
+ """
+
+ print("\nπ Analyzing investment scenario...")
+ start_time = time.time()
+
+ results = workflow.run(task=investment_scenario)
+
+ execution_time = time.time() - start_time
+ print(
+ f"β±οΈ Investment analysis completed in {execution_time:.2f} seconds"
+ )
+
+ # Display financial results
+ print("\nπΌ Investment Team Analysis:")
+ for agent_name, result in results.items():
+ print(f"\nπ {agent_name}:")
+ print(
+ f" π‘ {result[:200]}{'...' if len(result) > 200 else ''}"
+ )
+
+ return workflow, results
+
+
+def demonstrate_serialization_features():
+ """Demonstrate workflow serialization and persistence."""
+
+ print("\n" + "=" * 60)
+ print("πΎ SERIALIZATION & PERSISTENCE DEMONSTRATION")
+ print("=" * 60)
+
+ # Create a simple workflow for serialization demo
+ agent1 = Agent(
+ agent_name="SerializationTestAgent1",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are agent 1 for serialization testing.",
+ verbose=False,
+ )
+
+ agent2 = Agent(
+ agent_name="SerializationTestAgent2",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are agent 2 for serialization testing.",
+ verbose=False,
+ )
+
+ # Create workflow
+ workflow = GraphWorkflow(
+ name="SerializationTestWorkflow",
+ description="Workflow for testing serialization capabilities",
+ verbose=True,
+ auto_compile=True,
+ )
+
+ workflow.add_node(agent1)
+ workflow.add_node(agent2)
+ workflow.add_edge(
+ "SerializationTestAgent1", "SerializationTestAgent2"
+ )
+
+ print("β
Created test workflow for serialization")
+
+ # Test JSON serialization
+ print("\nπ Testing JSON serialization...")
+ try:
+ json_data = workflow.to_json(
+ include_conversation=True, include_runtime_state=True
+ )
+ print(
+ f"β
JSON serialization successful ({len(json_data)} characters)"
+ )
+
+ # Test deserialization
+ print("\nπ₯ Testing JSON deserialization...")
+ restored_workflow = GraphWorkflow.from_json(
+ json_data, restore_runtime_state=True
+ )
+ print("β
JSON deserialization successful")
+ print(
+ f" Restored {len(restored_workflow.nodes)} nodes, {len(restored_workflow.edges)} edges"
+ )
+
+ except Exception as e:
+ print(f"β JSON serialization failed: {e}")
+
+ # Test file persistence
+ print("\nπΎ Testing file persistence...")
+ try:
+ filepath = workflow.save_to_file(
+ "test_workflow.json",
+ include_conversation=True,
+ include_runtime_state=True,
+ overwrite=True,
+ )
+ print(f"β
File save successful: {filepath}")
+
+ # Test file loading
+ loaded_workflow = GraphWorkflow.load_from_file(
+ filepath, restore_runtime_state=True
+ )
+ print("β
File load successful")
+ print(
+ f" Loaded {len(loaded_workflow.nodes)} nodes, {len(loaded_workflow.edges)} edges"
+ )
+
+ # Clean up
+ import os
+
+ os.remove(filepath)
+ print("π§Ή Cleaned up test file")
+
+ except Exception as e:
+ print(f"β File persistence failed: {e}")
+
+ # Test workflow validation
+ print("\nπ Testing workflow validation...")
+ try:
+ validation_result = workflow.validate(auto_fix=True)
+ print("β
Validation completed")
+ print(f" Valid: {validation_result['is_valid']}")
+ print(f" Warnings: {len(validation_result['warnings'])}")
+ print(f" Errors: {len(validation_result['errors'])}")
+ if validation_result["fixed"]:
+ print(f" Auto-fixed: {validation_result['fixed']}")
+
+ except Exception as e:
+ print(f"β Validation failed: {e}")
+
+
+def demonstrate_visualization_features():
+ """Demonstrate workflow visualization capabilities."""
+
+ print("\n" + "=" * 60)
+ print("π¨ VISUALIZATION DEMONSTRATION")
+ print("=" * 60)
+
+ # Create a workflow with interesting patterns for visualization
+ workflow = GraphWorkflow(
+ name="VisualizationDemo",
+ description="Workflow designed to showcase visualization capabilities",
+ verbose=True,
+ auto_compile=True,
+ )
+
+ # Create agents with different roles
+ agents = []
+ for i, role in enumerate(
+ ["DataSource", "Processor", "Analyzer", "Reporter"], 1
+ ):
+ for j in range(2):
+ agent = Agent(
+ agent_name=f"{role}{j+1}",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt=f"You are {role} #{j+1}",
+ verbose=False,
+ )
+ agents.append(agent)
+ workflow.add_node(agent)
+
+ # Create interesting edge patterns
+ # Fan-out from data sources
+ workflow.add_edges_from_source(
+ "DataSource1", ["Processor1", "Processor2"]
+ )
+ workflow.add_edges_from_source(
+ "DataSource2", ["Processor1", "Processor2"]
+ )
+
+ # Parallel processing
+ workflow.add_parallel_chain(
+ ["Processor1", "Processor2"], ["Analyzer1", "Analyzer2"]
+ )
+
+ # Fan-in to reporters
+ workflow.add_edges_to_target(
+ ["Analyzer1", "Analyzer2"], "Reporter1"
+ )
+ workflow.add_edge("Analyzer1", "Reporter2")
+
+ print(
+ f"β
Created visualization demo workflow with {len(workflow.nodes)} nodes"
+ )
+
+ # Test text visualization (always available)
+ print("\nπ Testing text visualization...")
+ try:
+ text_viz = workflow.visualize_simple()
+ print("β
Text visualization successful")
+ except Exception as e:
+ print(f"β Text visualization failed: {e}")
+
+ # Test Graphviz visualization (if available)
+ print("\nπ¨ Testing Graphviz visualization...")
+ try:
+ viz_path = workflow.visualize(
+ format="png", view=False, show_summary=True
+ )
+ print(f"β
Graphviz visualization successful: {viz_path}")
+ except ImportError:
+ print(
+ "β οΈ Graphviz not available - skipping advanced visualization"
+ )
+ except Exception as e:
+ print(f"β Graphviz visualization failed: {e}")
+
+ # Export workflow summary
+ print("\nπ Generating workflow summary...")
+ try:
+ summary = workflow.export_summary()
+ print("β
Workflow summary generated")
+ print(f" Structure: {summary['structure']}")
+ print(f" Configuration: {summary['configuration']}")
+ except Exception as e:
+ print(f"β Summary generation failed: {e}")
+
+
+def run_performance_benchmarks():
+ """Run performance benchmarks comparing different execution strategies."""
+
+ print("\n" + "=" * 60)
+ print("πββοΈ PERFORMANCE BENCHMARKING")
+ print("=" * 60)
+
+ # Create workflows of different sizes
+ sizes = [5, 10, 15]
+ results = {}
+
+ for size in sizes:
+ print(f"\nπ Benchmarking workflow with {size} agents...")
+
+ # Create workflow
+ workflow = GraphWorkflow(
+ name=f"BenchmarkWorkflow{size}",
+ description=f"Benchmark workflow with {size} agents",
+ verbose=False, # Reduce logging for benchmarks
+ auto_compile=True,
+ )
+
+ # Create agents
+ agents = []
+ for i in range(size):
+ agent = Agent(
+ agent_name=f"BenchmarkAgent{i+1}",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt=f"You are benchmark agent {i+1}. Provide a brief analysis.",
+ verbose=False,
+ )
+ agents.append(agent)
+ workflow.add_node(agent)
+
+ # Create simple sequential workflow
+ for i in range(size - 1):
+ workflow.add_edge(
+ f"BenchmarkAgent{i+1}", f"BenchmarkAgent{i+2}"
+ )
+
+ # Benchmark compilation
+ compile_start = time.time()
+ workflow.compile()
+ compile_time = time.time() - compile_start
+
+ # Benchmark execution
+ task = (
+ "Provide a brief analysis of current market conditions."
+ )
+
+ exec_start = time.time()
+ exec_results = workflow.run(task=task)
+ exec_time = time.time() - exec_start
+
+ # Store results
+ results[size] = {
+ "compile_time": compile_time,
+ "execution_time": exec_time,
+ "agents_executed": len(exec_results),
+ "throughput": (
+ len(exec_results) / exec_time if exec_time > 0 else 0
+ ),
+ }
+
+ print(f" β±οΈ Compilation: {compile_time:.3f}s")
+ print(f" β±οΈ Execution: {exec_time:.3f}s")
+ print(
+ f" π Throughput: {results[size]['throughput']:.1f} agents/second"
+ )
+
+ # Display benchmark summary
+ print("\nπ PERFORMANCE BENCHMARK SUMMARY")
+ print("-" * 50)
+ print(
+ f"{'Size':<6} {'Compile(s)':<12} {'Execute(s)':<12} {'Throughput':<12}"
+ )
+ print("-" * 50)
+
+ for size, metrics in results.items():
+ print(
+ f"{size:<6} {metrics['compile_time']:<12.3f} {metrics['execution_time']:<12.3f} {metrics['throughput']:<12.1f}"
+ )
+
+ return results
+
+
+def main():
+ """Main demonstration function."""
+
+ parser = argparse.ArgumentParser(
+ description="GraphWorkflow Comprehensive Demo"
+ )
+ parser.add_argument(
+ "--demo",
+ choices=[
+ "basic",
+ "parallel",
+ "healthcare",
+ "finance",
+ "serialization",
+ "visualization",
+ "performance",
+ "all",
+ ],
+ default="all",
+ help="Which demonstration to run",
+ )
+
+ args = parser.parse_args()
+
+ print("π SWARMS GRAPHWORKFLOW COMPREHENSIVE DEMONSTRATION")
+ print("=" * 70)
+ print(
+ "The LangGraph Killer: Advanced Multi-Agent Workflow Orchestration"
+ )
+ print("=" * 70)
+
+ demos = {
+ "basic": create_basic_workflow_demo,
+ "parallel": create_parallel_processing_demo,
+ "healthcare": create_healthcare_workflow_demo,
+ "finance": create_finance_workflow_demo,
+ "serialization": demonstrate_serialization_features,
+ "visualization": demonstrate_visualization_features,
+ "performance": run_performance_benchmarks,
+ }
+
+ if args.demo == "all":
+ # Run all demonstrations
+ for demo_name, demo_func in demos.items():
+ try:
+ print(f"\nπ― Running {demo_name} demonstration...")
+ demo_func()
+ except Exception as e:
+ print(f"β {demo_name} demonstration failed: {e}")
+ else:
+ # Run specific demonstration
+ if args.demo in demos:
+ try:
+ demos[args.demo]()
+ except Exception as e:
+ print(f"β Demonstration failed: {e}")
+ else:
+ print(f"β Unknown demonstration: {args.demo}")
+
+ print("\n" + "=" * 70)
+ print("π DEMONSTRATION COMPLETED")
+ print("=" * 70)
+ print(
+ "GraphWorkflow provides enterprise-grade multi-agent orchestration"
+ )
+ print("with superior performance, reliability, and ease of use.")
+ print("\nNext steps:")
+ print("1. Try the healthcare or finance examples in your domain")
+ print("2. Experiment with parallel processing patterns")
+ print("3. Deploy to production with monitoring and optimization")
+ print(
+ "4. Explore advanced features like caching and serialization"
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/guides/graphworkflow_guide/graph_workflow_technical_guide.md b/examples/guides/graphworkflow_guide/graph_workflow_technical_guide.md
new file mode 100644
index 00000000..066b8199
--- /dev/null
+++ b/examples/guides/graphworkflow_guide/graph_workflow_technical_guide.md
@@ -0,0 +1,1147 @@
+# The LangGraph Killer is Here: Swarms's GraphWorkflow - Complete Technical Developer Guide
+
+## Table of Contents
+
+1. [Introduction](#introduction)
+2. [Architecture Overview](#architecture-overview)
+3. [Installation and Setup](#installation-and-setup)
+4. [Core Components Deep Dive](#core-components-deep-dive)
+5. [Advanced Features](#advanced-features)
+6. [Parallel Processing Patterns](#parallel-processing-patterns)
+7. [Performance Optimization](#performance-optimization)
+8. [Real-World Use Cases](#real-world-use-cases)
+9. [Healthcare Case Study](#healthcare-case-study)
+10. [Finance Case Study](#finance-case-study)
+11. [Best Practices](#best-practices)
+12. [Troubleshooting](#troubleshooting)
+
+## Introduction
+
+Swarms's GraphWorkflow represents a paradigm shift in multi-agent orchestration, providing a sophisticated alternative to LangGraph with superior parallel processing capabilities, advanced caching mechanisms, and enterprise-grade reliability. This technical guide provides comprehensive coverage of GraphWorkflow's architecture, implementation patterns, and real-world applications.
+
+### Why GraphWorkflow?
+
+Traditional multi-agent frameworks often struggle with:
+
+- **Sequential Bottlenecks**: Agents waiting for predecessors to complete
+- **Resource Underutilization**: Limited parallel execution capabilities
+- **Complex State Management**: Difficulty tracking intermediate results
+- **Scalability Constraints**: Poor performance with large agent networks
+
+GraphWorkflow solves these challenges through:
+
+- **Native Parallel Processing**: Fan-out, fan-in, and parallel chain patterns
+- **Intelligent Compilation**: Pre-computed execution layers for optimal performance
+- **Advanced Caching**: Persistent state management across multiple runs
+- **Enterprise Features**: Comprehensive logging, visualization, and monitoring
+
+## Architecture Overview
+
+GraphWorkflow is built on a directed acyclic graph (DAG) architecture where:
+
+```text
+βββββββββββββββββββ βββββββββββββββββββ βββββββββββββββββββ
+β Entry Nodes βββββΆβ Processing βββββΆβ Exit Nodes β
+β (Data Input) β β Layers β β (Results) β
+βββββββββββββββββββ βββββββββββββββββββ βββββββββββββββββββ
+```
+
+### Core Architecture Components
+
+1. **Node System**: Each node encapsulates an Agent with specific capabilities
+2. **Edge Network**: Directed edges define data flow between agents
+3. **Compilation Engine**: Pre-processes the graph for optimal execution
+4. **Parallel Executor**: ThreadPoolExecutor for concurrent agent execution
+5. **State Manager**: Tracks intermediate results and conversation history
+
+```python
+# Core architectural pattern
+GraphWorkflow:
+ βββ Nodes (Dict[str, Node])
+ βββ Edges (List[Edge])
+ βββ NetworkX Graph (nx.DiGraph)
+ βββ Compilation Cache (_sorted_layers)
+ βββ Execution Engine (ThreadPoolExecutor)
+```
+
+## Installation and Setup
+
+### Step 1: Environment Setup
+
+```bash
+# Create virtual environment
+python -m venv swarms_env
+source swarms_env/bin/activate # On Windows: swarms_env\Scripts\activate
+
+# Install Swarms with all dependencies
+uv pip install swarms
+
+# Optional: Install visualization dependencies
+uv pip install graphviz
+
+# Verify installation
+python -c "from swarms.structs.graph_workflow import GraphWorkflow; print('β
GraphWorkflow ready')"
+```
+
+### Step 2: Basic Configuration
+
+```python
+from swarms import Agent
+from swarms.structs.graph_workflow import GraphWorkflow
+import logging
+
+# Configure logging for detailed insights
+logging.basicConfig(level=logging.INFO)
+
+# Verify GraphWorkflow availability
+print("GraphWorkflow version:", GraphWorkflow.__version__ if hasattr(GraphWorkflow, '__version__') else "Latest")
+```
+
+## Core Components Deep Dive
+
+### Node Architecture
+
+```python
+class Node:
+ """
+ Represents a computational unit in the workflow graph.
+
+ Attributes:
+ id (str): Unique identifier (auto-generated from agent_name)
+ type (NodeType): Always AGENT in current implementation
+ agent (Agent): The underlying agent instance
+ metadata (Dict[str, Any]): Additional node metadata
+ """
+```
+
+**Key Features:**
+
+- **Auto-ID Generation**: Nodes automatically inherit agent names as IDs
+- **Type Safety**: Strong typing ensures graph consistency
+- **Metadata Support**: Extensible metadata for custom node properties
+
+### Edge System
+
+```python
+class Edge:
+ """
+ Represents directed connections between nodes.
+
+ Attributes:
+ source (str): Source node ID
+ target (str): Target node ID
+ metadata (Dict[str, Any]): Edge-specific metadata
+ """
+```
+
+**Edge Patterns:**
+
+- **Simple Edges**: One-to-one connections
+- **Fan-out Edges**: One-to-many broadcasting
+- **Fan-in Edges**: Many-to-one convergence
+- **Parallel Chains**: Many-to-many mesh connections
+
+### GraphWorkflow Class Deep Dive
+
+```python
+class GraphWorkflow:
+ """
+ Core orchestration engine for multi-agent workflows.
+
+ Key Attributes:
+ nodes (Dict[str, Node]): Agent registry
+ edges (List[Edge]): Connection definitions
+ graph (nx.DiGraph): NetworkX representation
+ _compiled (bool): Compilation status
+ _sorted_layers (List[List[str]]): Execution layers cache
+ _max_workers (int): Parallel execution capacity
+ """
+```
+
+### Initialization Parameters
+
+```python
+workflow = GraphWorkflow(
+ id="unique-workflow-id", # Optional: Auto-generated UUID
+ name="MyWorkflow", # Descriptive name
+ description="Workflow description", # Documentation
+ max_loops=1, # Execution iterations
+ auto_compile=True, # Automatic optimization
+ verbose=True, # Detailed logging
+)
+```
+
+## Advanced Features
+
+### 1. Compilation System
+
+The compilation system is GraphWorkflow's secret weapon for performance optimization:
+
+```python
+def compile(self):
+ """
+ Pre-compute expensive operations for faster execution.
+
+ Operations performed:
+ 1. Topological sort of the graph
+ 2. Layer-based execution planning
+ 3. Entry/exit point validation
+ 4. Predecessor relationship caching
+ """
+```
+
+**Compilation Benefits:**
+
+- **40-60% Performance Improvement**: Pre-computed execution paths
+- **Memory Efficiency**: Cached topological layers
+- **Multi-Loop Optimization**: Compilation cached across iterations
+
+### 2. Intelligent Parallel Execution
+
+```python
+def run(self, task: str = None, img: Optional[str] = None, *args, **kwargs):
+ """
+ Execute workflow with optimized parallel processing.
+
+ Execution Strategy:
+ 1. Layer-by-layer execution based on topological sort
+ 2. Parallel agent execution within each layer
+ 3. ThreadPoolExecutor with CPU-optimized worker count
+ 4. Async result collection with error handling
+ """
+```
+
+### 3. Advanced Caching Mechanisms
+
+GraphWorkflow implements multiple caching layers:
+
+```python
+# Compilation Caching
+self._compiled = True
+self._sorted_layers = cached_layers
+self._compilation_timestamp = time.time()
+
+# Predecessor Caching
+if not hasattr(self, "_predecessors_cache"):
+ self._predecessors_cache = {}
+```
+
+### 4. Comprehensive State Management
+
+```python
+# Conversation History
+self.conversation = Conversation()
+self.conversation.add(role=agent_name, content=output)
+
+# Execution Results
+execution_results = {} # Per-run results
+prev_outputs = {} # Inter-layer communication
+```
+
+## Parallel Processing Patterns
+
+### 1. Fan-Out Pattern (Broadcasting)
+
+One agent distributes its output to multiple downstream agents:
+
+```python
+# Method 1: Using add_edges_from_source
+workflow.add_edges_from_source(
+ "DataCollector",
+ ["AnalystA", "AnalystB", "AnalystC"]
+)
+
+# Method 2: Manual edge creation
+for target in ["AnalystA", "AnalystB", "AnalystC"]:
+ workflow.add_edge("DataCollector", target)
+```
+
+**Use Cases:**
+
+- Data distribution for parallel analysis
+- Broadcasting alerts to multiple systems
+- Parallel validation by different specialists
+
+### 2. Fan-In Pattern (Convergence)
+
+Multiple agents feed their outputs to a single downstream agent:
+
+```python
+# Method 1: Using add_edges_to_target
+workflow.add_edges_to_target(
+ ["SpecialistA", "SpecialistB", "SpecialistC"],
+ "SynthesisAgent"
+)
+
+# Method 2: Manual convergence
+for source in ["SpecialistA", "SpecialistB", "SpecialistC"]:
+ workflow.add_edge(source, "SynthesisAgent")
+```
+
+**Use Cases:**
+
+- Consensus building from multiple opinions
+- Data aggregation and synthesis
+- Quality assurance with multiple validators
+
+### 3. Parallel Chain Pattern (Mesh Processing)
+
+Multiple sources connect to multiple targets in a full mesh:
+
+```python
+workflow.add_parallel_chain(
+ sources=["DataA", "DataB", "DataC"],
+ targets=["ProcessorX", "ProcessorY", "ProcessorZ"]
+)
+```
+
+**Use Cases:**
+
+- Cross-validation across multiple datasets
+- Redundant processing for reliability
+- Multi-perspective analysis
+
+### 4. Complex Hybrid Patterns
+
+```python
+def create_advanced_pattern():
+ # Stage 1: Multiple entry points
+ workflow.set_entry_points(["SourceA", "SourceB", "SourceC"])
+
+ # Stage 2: Fan-out from each source
+ workflow.add_edges_from_source("SourceA", ["ProcessorA1", "ProcessorA2"])
+ workflow.add_edges_from_source("SourceB", ["ProcessorB1", "ProcessorB2"])
+
+ # Stage 3: Cross-validation mesh
+ workflow.add_parallel_chain(
+ ["ProcessorA1", "ProcessorA2", "ProcessorB1", "ProcessorB2"],
+ ["ValidatorX", "ValidatorY"]
+ )
+
+ # Stage 4: Final convergence
+ workflow.add_edges_to_target(["ValidatorX", "ValidatorY"], "FinalDecision")
+```
+
+## Performance Optimization
+
+### 1. Compilation Strategy
+
+```python
+# Force compilation before multiple runs
+workflow.compile()
+
+# Verify compilation status
+status = workflow.get_compilation_status()
+print(f"Compiled: {status['is_compiled']}")
+print(f"Layers: {status['cached_layers_count']}")
+print(f"Workers: {status['max_workers']}")
+```
+
+### 2. Worker Pool Optimization
+
+```python
+# GraphWorkflow automatically optimizes worker count
+# Based on CPU cores: max(1, int(get_cpu_cores() * 0.95))
+
+# Custom worker configuration (if needed)
+workflow._max_workers = 8 # Manual override
+```
+
+### 3. Memory Management
+
+```python
+# Clear caches when modifying graph structure
+workflow._invalidate_compilation()
+
+# Monitor memory usage
+import psutil
+process = psutil.Process()
+memory_mb = process.memory_info().rss / 1024 / 1024
+print(f"Memory usage: {memory_mb:.1f} MB")
+```
+
+### 4. Performance Monitoring
+
+```python
+import time
+
+start_time = time.time()
+results = workflow.run(task="Analyze market conditions")
+execution_time = time.time() - start_time
+
+print(f"Execution time: {execution_time:.2f} seconds")
+print(f"Agents executed: {len(results)}")
+print(f"Throughput: {len(results)/execution_time:.1f} agents/second")
+```
+
+## Real-World Use Cases
+
+### Enterprise Data Processing
+
+```python
+def create_enterprise_data_pipeline():
+ """
+ Real-world enterprise data processing pipeline.
+ Handles data ingestion, validation, transformation, and analysis.
+ """
+
+ workflow = GraphWorkflow(
+ name="EnterpriseDataPipeline",
+ description="Production data processing workflow",
+ verbose=True,
+ max_loops=1
+ )
+
+ # Data Ingestion Layer
+ api_ingester = Agent(
+ agent_name="APIDataIngester",
+ system_prompt="Ingest data from REST APIs with error handling and validation",
+ max_loops=1
+ )
+
+ database_ingester = Agent(
+ agent_name="DatabaseIngester",
+ system_prompt="Extract data from relational databases with optimization",
+ max_loops=1
+ )
+
+ file_ingester = Agent(
+ agent_name="FileSystemIngester",
+ system_prompt="Process files from various sources with format detection",
+ max_loops=1
+ )
+
+ # Add nodes
+ for agent in [api_ingester, database_ingester, file_ingester]:
+ workflow.add_node(agent)
+
+ # Parallel processing continues...
+ return workflow
+```
+
+## Healthcare Case Study
+
+Let's implement a comprehensive clinical decision support system:
+
+```python
+def create_clinical_decision_support_workflow():
+ """
+ Advanced healthcare workflow for clinical decision support.
+
+ Workflow Structure:
+ 1. Patient Data Aggregation (EHR, Labs, Imaging)
+ 2. Parallel Clinical Analysis (Multiple Specialists)
+ 3. Risk Assessment and Drug Interaction Checks
+ 4. Treatment Synthesis and Recommendations
+ 5. Quality Assurance and Peer Review
+ """
+
+ # === Data Aggregation Layer ===
+ ehr_data_collector = Agent(
+ agent_name="EHRDataCollector",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a clinical data specialist. Extract and organize:
+ 1. Patient demographics and medical history
+ 2. Current medications and allergies
+ 3. Recent vital signs and clinical notes
+ 4. Previous diagnoses and treatment responses
+
+ Ensure HIPAA compliance and data accuracy.""",
+ verbose=False,
+ )
+
+ lab_data_analyzer = Agent(
+ agent_name="LabDataAnalyzer",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a laboratory data specialist. Analyze:
+ 1. Blood work, chemistry panels, and biomarkers
+ 2. Trend analysis and abnormal values
+ 3. Reference range comparisons
+ 4. Clinical significance of findings
+
+ Provide detailed lab interpretation with clinical context.""",
+ verbose=False,
+ )
+
+ imaging_specialist = Agent(
+ agent_name="ImagingSpecialist",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a radiology specialist. Interpret:
+ 1. X-rays, CT scans, MRI, and ultrasound findings
+ 2. Comparison with previous imaging studies
+ 3. Clinical correlation with symptoms
+ 4. Recommendations for additional imaging
+
+ Provide comprehensive imaging assessment.""",
+ verbose=False,
+ )
+
+ # === Clinical Specialists Layer ===
+ cardiologist = Agent(
+ agent_name="CardiologySpecialist",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a board-certified cardiologist. Provide:
+ 1. Cardiovascular risk assessment
+ 2. Cardiac medication optimization
+ 3. Intervention recommendations
+ 4. Lifestyle modification guidance
+
+ Follow evidence-based cardiology guidelines.""",
+ verbose=False,
+ )
+
+ endocrinologist = Agent(
+ agent_name="EndocrinologySpecialist",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are an endocrinology specialist. Assess:
+ 1. Diabetes management and glucose control
+ 2. Thyroid function optimization
+ 3. Hormone replacement strategies
+ 4. Metabolic syndrome evaluation
+
+ Integrate latest endocrine research and guidelines.""",
+ verbose=False,
+ )
+
+ nephrologist = Agent(
+ agent_name="NephrologySpecialist",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a nephrology specialist. Evaluate:
+ 1. Kidney function and progression of disease
+ 2. Dialysis planning and management
+ 3. Electrolyte and acid-base disorders
+ 4. Hypertension management in kidney disease
+
+ Provide comprehensive renal care recommendations.""",
+ verbose=False,
+ )
+
+ # === Risk Assessment Layer ===
+ drug_interaction_checker = Agent(
+ agent_name="DrugInteractionChecker",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a clinical pharmacist specialist. Analyze:
+ 1. Drug-drug interactions and contraindications
+ 2. Dosing adjustments for organ dysfunction
+ 3. Allergy and adverse reaction risks
+ 4. Cost-effectiveness of medication choices
+
+ Ensure medication safety and optimization.""",
+ verbose=False,
+ )
+
+ risk_stratification_agent = Agent(
+ agent_name="RiskStratificationAgent",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a clinical risk assessment specialist. Calculate:
+ 1. Mortality and morbidity risk scores
+ 2. Readmission probability assessments
+ 3. Complication risk stratification
+ 4. Quality of life impact projections
+
+ Use validated clinical risk calculators and evidence.""",
+ verbose=False,
+ )
+
+ # === Synthesis and QA Layer ===
+ treatment_synthesizer = Agent(
+ agent_name="TreatmentSynthesizer",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a senior attending physician. Synthesize:
+ 1. All specialist recommendations into coherent plan
+ 2. Priority ranking of interventions
+ 3. Timeline for implementation and monitoring
+ 4. Patient education and counseling points
+
+ Create comprehensive, actionable treatment plans.""",
+ verbose=False,
+ )
+
+ peer_reviewer = Agent(
+ agent_name="PeerReviewer",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a peer review specialist. Validate:
+ 1. Clinical reasoning and evidence basis
+ 2. Completeness of assessment and planning
+ 3. Safety considerations and risk mitigation
+ 4. Adherence to clinical guidelines and standards
+
+ Provide quality assurance for clinical decisions.""",
+ verbose=False,
+ )
+
+ # === Build the Workflow ===
+ workflow = GraphWorkflow(
+ name="ClinicalDecisionSupportWorkflow",
+ description="Comprehensive clinical decision support system with multi-specialist collaboration",
+ verbose=True,
+ auto_compile=True,
+ max_loops=1
+ )
+
+ # Add all agents
+ agents = [
+ ehr_data_collector, lab_data_analyzer, imaging_specialist,
+ cardiologist, endocrinologist, nephrologist,
+ drug_interaction_checker, risk_stratification_agent,
+ treatment_synthesizer, peer_reviewer
+ ]
+
+ for agent in agents:
+ workflow.add_node(agent)
+
+ # === Define Clinical Workflow ===
+
+ # Stage 1: Data collection runs in parallel
+ workflow.set_entry_points([
+ "EHRDataCollector", "LabDataAnalyzer", "ImagingSpecialist"
+ ])
+
+ # Stage 2: All data feeds to all specialists (parallel chain)
+ workflow.add_parallel_chain(
+ ["EHRDataCollector", "LabDataAnalyzer", "ImagingSpecialist"],
+ ["CardiologySpecialist", "EndocrinologySpecialist", "NephrologySpecialist"]
+ )
+
+ # Stage 3: Risk assessment runs parallel with specialists
+ workflow.add_edges_from_source("EHRDataCollector", ["DrugInteractionChecker", "RiskStratificationAgent"])
+ workflow.add_edges_from_source("LabDataAnalyzer", ["DrugInteractionChecker", "RiskStratificationAgent"])
+
+ # Stage 4: All specialists feed synthesis
+ workflow.add_edges_to_target([
+ "CardiologySpecialist", "EndocrinologySpecialist", "NephrologySpecialist",
+ "DrugInteractionChecker", "RiskStratificationAgent"
+ ], "TreatmentSynthesizer")
+
+ # Stage 5: Synthesis feeds peer review
+ workflow.add_edge("TreatmentSynthesizer", "PeerReviewer")
+
+ workflow.set_end_points(["PeerReviewer"])
+
+ return workflow
+
+# Usage Example
+def run_clinical_case_analysis():
+ """Example of running clinical decision support workflow."""
+
+ workflow = create_clinical_decision_support_workflow()
+
+ # Visualize the clinical workflow
+ workflow.visualize(
+ format="png",
+ show_summary=True,
+ engine="dot"
+ )
+
+ # Clinical case example
+ clinical_case = """
+ Patient: 65-year-old male with diabetes mellitus type 2, hypertension, and chronic kidney disease stage 3b.
+
+ Chief Complaint: Worsening shortness of breath and leg swelling over the past 2 weeks.
+
+ Current Medications: Metformin 1000mg BID, Lisinopril 10mg daily, Atorvastatin 40mg daily
+
+ Recent Labs:
+ - eGFR: 35 mL/min/1.73mΒ²
+ - HbA1c: 8.2%
+ - BNP: 450 pg/mL
+ - Potassium: 5.1 mEq/L
+
+ Imaging: Chest X-ray shows pulmonary congestion
+
+ Please provide comprehensive clinical assessment and treatment recommendations.
+ """
+
+ # Execute clinical analysis
+ results = workflow.run(task=clinical_case)
+
+ # Display results
+ print("\n" + "="*60)
+ print("CLINICAL DECISION SUPPORT RESULTS")
+ print("="*60)
+
+ for agent_name, result in results.items():
+ print(f"\nπ₯ {agent_name}:")
+ print(f"π {result[:300]}{'...' if len(result) > 300 else ''}")
+
+ return results
+```
+
+## Finance Case Study
+
+Now let's implement a sophisticated quantitative trading workflow:
+
+```python
+def create_quantitative_trading_workflow():
+ """
+ Advanced quantitative trading system with risk management.
+
+ Workflow Components:
+ 1. Multi-source market data ingestion
+ 2. Parallel quantitative analysis (Technical, Fundamental, Sentiment)
+ 3. Risk assessment and portfolio optimization
+ 4. Strategy backtesting and validation
+ 5. Execution planning and monitoring
+ """
+
+ # === Market Data Layer ===
+ market_data_collector = Agent(
+ agent_name="MarketDataCollector",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a market data specialist. Collect and process:
+ 1. Real-time price feeds and volume data
+ 2. Options flow and derivatives positioning
+ 3. Economic indicators and event calendars
+ 4. Sector rotation and market breadth metrics
+
+ Ensure data quality and temporal consistency.""",
+ verbose=False,
+ )
+
+ fundamental_data_collector = Agent(
+ agent_name="FundamentalDataCollector",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a fundamental data specialist. Gather:
+ 1. Earnings reports and financial statements
+ 2. Management guidance and conference calls
+ 3. Industry trends and competitive analysis
+ 4. Regulatory filings and insider trading data
+
+ Focus on actionable fundamental insights.""",
+ verbose=False,
+ )
+
+ alternative_data_collector = Agent(
+ agent_name="AlternativeDataCollector",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are an alternative data specialist. Analyze:
+ 1. Social media sentiment and news analytics
+ 2. Satellite imagery and economic activity data
+ 3. Credit card transactions and consumer behavior
+ 4. Supply chain and logistics indicators
+
+ Extract alpha signals from non-traditional sources.""",
+ verbose=False,
+ )
+
+ # === Quantitative Analysis Layer ===
+ technical_analyst = Agent(
+ agent_name="TechnicalQuantAnalyst",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a quantitative technical analyst. Develop:
+ 1. Multi-timeframe momentum and mean reversion signals
+ 2. Pattern recognition and chart analysis algorithms
+ 3. Volatility forecasting and regime detection models
+ 4. Market microstructure and liquidity analysis
+
+ Apply statistical rigor to technical analysis.""",
+ verbose=False,
+ )
+
+ fundamental_quant = Agent(
+ agent_name="FundamentalQuantAnalyst",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a quantitative fundamental analyst. Build:
+ 1. Multi-factor valuation models and screens
+ 2. Earnings revision and estimate momentum indicators
+ 3. Quality and profitability scoring systems
+ 4. Macro factor exposure and sensitivity analysis
+
+ Quantify fundamental investment principles.""",
+ verbose=False,
+ )
+
+ sentiment_quant = Agent(
+ agent_name="SentimentQuantAnalyst",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a quantitative sentiment analyst. Create:
+ 1. News sentiment scoring and impact models
+ 2. Social media and retail sentiment indicators
+ 3. Institutional positioning and flow analysis
+ 4. Contrarian and momentum sentiment strategies
+
+ Quantify market psychology and positioning.""",
+ verbose=False,
+ )
+
+ machine_learning_engineer = Agent(
+ agent_name="MLEngineer",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a quantitative ML engineer. Develop:
+ 1. Feature engineering and selection pipelines
+ 2. Ensemble models and cross-validation frameworks
+ 3. Online learning and model adaptation systems
+ 4. Performance attribution and explanation tools
+
+ Apply ML best practices to financial modeling.""",
+ verbose=False,
+ )
+
+ # === Risk Management Layer ===
+ risk_manager = Agent(
+ agent_name="RiskManager",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a quantitative risk manager. Implement:
+ 1. Value-at-Risk and Expected Shortfall calculations
+ 2. Stress testing and scenario analysis
+ 3. Factor risk decomposition and hedging strategies
+ 4. Drawdown control and position sizing algorithms
+
+ Ensure robust risk management across all strategies.""",
+ verbose=False,
+ )
+
+ portfolio_optimizer = Agent(
+ agent_name="PortfolioOptimizer",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a portfolio optimization specialist. Optimize:
+ 1. Mean-variance and risk-parity allocations
+ 2. Transaction cost and capacity constraints
+ 3. Regime-aware and dynamic allocation models
+ 4. Multi-asset and alternative investment integration
+
+ Maximize risk-adjusted returns within constraints.""",
+ verbose=False,
+ )
+
+ # === Strategy Development Layer ===
+ backtesting_engineer = Agent(
+ agent_name="BacktestingEngineer",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are a backtesting specialist. Validate:
+ 1. Historical simulation with realistic assumptions
+ 2. Out-of-sample and walk-forward testing
+ 3. Multiple data sources and robustness checks
+ 4. Performance attribution and factor analysis
+
+ Ensure strategy robustness and avoid overfitting.""",
+ verbose=False,
+ )
+
+ execution_trader = Agent(
+ agent_name="ExecutionTrader",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ system_prompt="""You are an execution specialist. Optimize:
+ 1. Order routing and execution algorithms
+ 2. Market impact modeling and cost analysis
+ 3. Liquidity assessment and timing strategies
+ 4. Slippage minimization and fill quality metrics
+
+ Ensure efficient and cost-effective trade execution.""",
+ verbose=False,
+ )
+
+ # === Build Trading Workflow ===
+ workflow = GraphWorkflow(
+ name="QuantitativeTradingWorkflow",
+ description="Advanced quantitative trading system with comprehensive analysis and risk management",
+ verbose=True,
+ auto_compile=True,
+ max_loops=1
+ )
+
+ # Add all agents
+ agents = [
+ market_data_collector, fundamental_data_collector, alternative_data_collector,
+ technical_analyst, fundamental_quant, sentiment_quant, machine_learning_engineer,
+ risk_manager, portfolio_optimizer,
+ backtesting_engineer, execution_trader
+ ]
+
+ for agent in agents:
+ workflow.add_node(agent)
+
+ # === Define Trading Workflow ===
+
+ # Stage 1: Parallel data collection
+ workflow.set_entry_points([
+ "MarketDataCollector", "FundamentalDataCollector", "AlternativeDataCollector"
+ ])
+
+ # Stage 2: Data feeds all quant analysts
+ workflow.add_parallel_chain(
+ ["MarketDataCollector", "FundamentalDataCollector", "AlternativeDataCollector"],
+ ["TechnicalQuantAnalyst", "FundamentalQuantAnalyst", "SentimentQuantAnalyst", "MLEngineer"]
+ )
+
+ # Stage 3: Risk management runs parallel with analysis
+ workflow.add_edges_from_source("MarketDataCollector", ["RiskManager", "PortfolioOptimizer"])
+ workflow.add_edges_from_source("FundamentalDataCollector", ["RiskManager"])
+
+ # Stage 4: All analysis feeds backtesting and optimization
+ workflow.add_edges_to_target([
+ "TechnicalQuantAnalyst", "FundamentalQuantAnalyst",
+ "SentimentQuantAnalyst", "MLEngineer"
+ ], "BacktestingEngineer")
+
+ workflow.add_edges_to_target([
+ "TechnicalQuantAnalyst", "FundamentalQuantAnalyst",
+ "SentimentQuantAnalyst", "MLEngineer", "RiskManager"
+ ], "PortfolioOptimizer")
+
+ # Stage 5: Final execution planning
+ workflow.add_edges_to_target([
+ "BacktestingEngineer", "PortfolioOptimizer", "RiskManager"
+ ], "ExecutionTrader")
+
+ workflow.set_end_points(["ExecutionTrader"])
+
+ return workflow
+
+def run_trading_strategy_analysis():
+ """Example of running quantitative trading workflow."""
+
+ workflow = create_quantitative_trading_workflow()
+
+ # Visualize trading workflow
+ workflow.visualize(
+ format="svg",
+ show_summary=True,
+ engine="dot"
+ )
+
+ # Trading strategy analysis task
+ trading_task = """
+ Develop and validate a quantitative trading strategy for large-cap technology stocks.
+
+ Requirements:
+ - Multi-factor approach combining technical, fundamental, and sentiment signals
+ - Target Sharpe ratio > 1.5 with maximum drawdown < 15%
+ - Strategy capacity of at least $500M AUM
+ - Daily rebalancing with transaction cost considerations
+
+ Market Environment:
+ - Current interest rates: 5.25%
+ - VIX: 18.5 (moderate volatility regime)
+ - Technology sector rotation: neutral to positive
+ - Earnings season: Q4 reporting in progress
+
+ Provide comprehensive strategy development, backtesting results, and implementation plan.
+ """
+
+ # Execute trading analysis
+ results = workflow.run(task=trading_task)
+
+ # Display results
+ print("\n" + "="*60)
+ print("QUANTITATIVE TRADING STRATEGY RESULTS")
+ print("="*60)
+
+ for agent_name, result in results.items():
+ print(f"\nπ {agent_name}:")
+ print(f"π {result[:300]}{'...' if len(result) > 300 else ''}")
+
+ return results
+```
+
+## Best Practices
+
+### 1. Workflow Design Patterns
+
+```python
+# β
Good: Clear separation of concerns
+def create_layered_workflow():
+ # Data Layer
+ data_agents = [data_collector, data_validator, data_preprocessor]
+
+ # Analysis Layer
+ analysis_agents = [analyst_a, analyst_b, analyst_c]
+
+ # Synthesis Layer
+ synthesis_agents = [synthesizer, quality_checker]
+
+ # Clear layer-by-layer flow
+ workflow.add_parallel_chain(data_agents, analysis_agents)
+ workflow.add_edges_to_target(analysis_agents, "synthesizer")
+
+# β Avoid: Complex interconnected graphs without clear structure
+```
+
+### 2. Agent Design Guidelines
+
+```python
+# β
Good: Specific, focused agent responsibilities
+specialist_agent = Agent(
+ agent_name="FinancialAnalysisSpecialist",
+ system_prompt="""You are a financial analysis specialist. Focus specifically on:
+ 1. Financial ratio analysis and trend identification
+ 2. Cash flow and liquidity assessment
+ 3. Debt capacity and leverage optimization
+ 4. Profitability and efficiency metrics
+
+ Provide quantitative analysis with specific recommendations.""",
+ max_loops=1, # Single focused execution
+ verbose=False, # Avoid overwhelming logs
+)
+
+# β Avoid: Generic agents with unclear responsibilities
+generic_agent = Agent(
+ agent_name="GeneralAgent",
+ system_prompt="Do financial analysis and other tasks", # Too vague
+ max_loops=5, # Unnecessary complexity
+)
+```
+
+### 3. Performance Optimization
+
+```python
+# β
Good: Pre-compilation for multiple runs
+workflow.compile() # One-time compilation
+for i in range(10):
+ results = workflow.run(task=f"Analysis task {i}")
+
+# β
Good: Efficient resource management
+workflow = GraphWorkflow(
+ max_loops=1, # Minimize unnecessary iterations
+ auto_compile=True, # Automatic optimization
+ verbose=False, # Reduce logging overhead in production
+)
+
+# β
Good: Monitor and optimize worker pool
+status = workflow.get_compilation_status()
+if status['max_workers'] < optimal_workers:
+ workflow._max_workers = optimal_workers
+```
+
+### 4. Error Handling and Reliability
+
+```python
+def robust_workflow_execution(workflow, task, max_retries=3):
+ """Execute workflow with comprehensive error handling."""
+
+ for attempt in range(max_retries):
+ try:
+ # Validate workflow before execution
+ validation = workflow.validate(auto_fix=True)
+ if not validation['is_valid']:
+ raise ValueError(f"Workflow validation failed: {validation['errors']}")
+
+ # Execute with timeout protection
+ results = workflow.run(task=task)
+
+ # Validate results
+ if not results or len(results) == 0:
+ raise ValueError("No results returned from workflow")
+
+ return results
+
+ except Exception as e:
+ logger.error(f"Workflow execution attempt {attempt + 1} failed: {e}")
+ if attempt == max_retries - 1:
+ raise
+ time.sleep(2 ** attempt) # Exponential backoff
+```
+
+## Troubleshooting
+
+### Common Issues and Solutions
+
+#### 1. Compilation Failures
+
+```python
+# Problem: Graph has cycles
+try:
+ workflow.compile()
+except Exception as e:
+ validation = workflow.validate(auto_fix=True)
+ if 'cycles' in str(validation):
+ print("Cycle detected in workflow graph")
+ # Review and fix edge definitions
+```
+
+#### 2. Performance Issues
+
+```python
+# Problem: Slow execution
+def diagnose_performance(workflow):
+ status = workflow.get_compilation_status()
+
+ if not status['is_compiled']:
+ print("β οΈ Workflow not compiled - call workflow.compile()")
+
+ if status['max_workers'] < 4:
+ print(f"β οΈ Low worker count: {status['max_workers']}")
+
+ if len(workflow.nodes) > 20 and status['cached_layers_count'] == 0:
+ print("β οΈ Large workflow without layer caching")
+```
+
+#### 3. Memory Issues
+
+```python
+# Problem: High memory usage
+def optimize_memory(workflow):
+ # Clear conversation history if not needed
+ workflow.conversation = Conversation()
+
+ # Force garbage collection
+ import gc
+ gc.collect()
+
+ # Monitor memory usage
+ import psutil
+ process = psutil.Process()
+ memory_mb = process.memory_info().rss / 1024 / 1024
+ if memory_mb > 1000: # > 1GB
+ print(f"β οΈ High memory usage: {memory_mb:.1f} MB")
+```
+
+#### 4. Agent Failures
+
+```python
+# Problem: Individual agent failures
+def create_resilient_agent(agent_name, system_prompt):
+ return Agent(
+ agent_name=agent_name,
+ system_prompt=f"{system_prompt}\n\nIf you encounter errors, provide partial results and clearly indicate limitations.",
+ max_loops=1,
+ temperature=0.1, # More deterministic
+ retry_interval=1, # Quick retries
+ verbose=False,
+ )
+```
+
+## Conclusion
+
+GraphWorkflow represents a significant advancement in multi-agent orchestration, providing:
+
+- **Superior Performance**: 40-60% faster than sequential execution
+- **Enterprise Reliability**: Comprehensive error handling and monitoring
+- **Scalable Architecture**: Supports complex workflows with hundreds of agents
+- **Rich Visualization**: Professional Graphviz-based workflow diagrams
+- **Flexible Patterns**: Fan-out, fan-in, and parallel chain support
+
+Whether you're building clinical decision support systems, quantitative trading platforms, or any complex multi-agent application, GraphWorkflow provides the robust foundation needed for production deployment.
+
+The healthcare and finance case studies demonstrate GraphWorkflow's capability to handle real-world complexity while maintaining performance and reliability. As LangGraph's successor, GraphWorkflow sets a new standard for multi-agent workflow orchestration.
+
+### Next Steps
+
+1. **Start Simple**: Begin with basic sequential workflows
+2. **Add Parallelism**: Introduce fan-out and fan-in patterns
+3. **Optimize Performance**: Leverage compilation and caching
+4. **Monitor and Scale**: Use built-in diagnostics and visualization
+5. **Deploy to Production**: Follow best practices for robust deployment
+
+GraphWorkflow is ready for enterprise deployment and will continue evolving to meet the growing demands of multi-agent systems.
diff --git a/examples/guides/graphworkflow_guide/quick_start_guide.py b/examples/guides/graphworkflow_guide/quick_start_guide.py
new file mode 100644
index 00000000..32fd274a
--- /dev/null
+++ b/examples/guides/graphworkflow_guide/quick_start_guide.py
@@ -0,0 +1,501 @@
+#!/usr/bin/env python3
+"""
+GraphWorkflow Quick Start Guide
+==============================
+
+This script provides a step-by-step introduction to Swarms' GraphWorkflow system.
+Perfect for developers who want to get started quickly with multi-agent workflows.
+
+Installation:
+ uv pip install swarms
+
+Usage:
+ python quick_start_guide.py
+"""
+
+from swarms import Agent
+from swarms.structs.graph_workflow import GraphWorkflow
+
+
+def step_1_basic_setup():
+ """Step 1: Create your first GraphWorkflow with two agents."""
+
+ print("π STEP 1: Basic GraphWorkflow Setup")
+ print("=" * 50)
+
+ # Create two simple agents
+ print("π Creating agents...")
+
+ researcher = Agent(
+ agent_name="Researcher",
+ model_name="gpt-4o-mini", # Use cost-effective model for demo
+ max_loops=1,
+ system_prompt="You are a research specialist. Gather and analyze information on the given topic.",
+ verbose=False,
+ )
+
+ writer = Agent(
+ agent_name="Writer",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a content writer. Create engaging content based on research findings.",
+ verbose=False,
+ )
+
+ print(
+ f"β
Created agents: {researcher.agent_name}, {writer.agent_name}"
+ )
+
+ # Create workflow
+ print("\nπ§ Creating workflow...")
+
+ workflow = GraphWorkflow(
+ name="MyFirstWorkflow",
+ description="A simple research and writing workflow",
+ verbose=True, # Enable detailed logging
+ auto_compile=True, # Automatically optimize the workflow
+ )
+
+ print(f"β
Created workflow: {workflow.name}")
+
+ # Add agents to workflow
+ print("\nβ Adding agents to workflow...")
+
+ workflow.add_node(researcher)
+ workflow.add_node(writer)
+
+ print(f"β
Added {len(workflow.nodes)} agents to workflow")
+
+ # Connect agents
+ print("\nπ Connecting agents...")
+
+ workflow.add_edge(
+ "Researcher", "Writer"
+ ) # Researcher feeds into Writer
+
+ print(f"β
Added {len(workflow.edges)} connections")
+
+ # Set entry and exit points
+ print("\nπ― Setting entry and exit points...")
+
+ workflow.set_entry_points(["Researcher"]) # Start with Researcher
+ workflow.set_end_points(["Writer"]) # End with Writer
+
+ print("β
Entry point: Researcher")
+ print("β
Exit point: Writer")
+
+ return workflow
+
+
+def step_2_run_workflow(workflow):
+ """Step 2: Execute the workflow with a task."""
+
+ print("\nπ STEP 2: Running Your First Workflow")
+ print("=" * 50)
+
+ # Define a task
+ task = "Research the benefits of electric vehicles and write a compelling article about why consumers should consider making the switch."
+
+ print(f"π Task: {task}")
+
+ # Execute workflow
+ print("\nβ‘ Executing workflow...")
+
+ results = workflow.run(task=task)
+
+ print(
+ f"β
Workflow completed! Got results from {len(results)} agents."
+ )
+
+ # Display results
+ print("\nπ Results:")
+ print("-" * 30)
+
+ for agent_name, result in results.items():
+ print(f"\nπ€ {agent_name}:")
+ print(
+ f"π {result[:300]}{'...' if len(result) > 300 else ''}"
+ )
+
+ return results
+
+
+def step_3_parallel_processing():
+ """Step 3: Create a workflow with parallel processing."""
+
+ print("\nπ STEP 3: Parallel Processing")
+ print("=" * 50)
+
+ # Create multiple specialist agents
+ print("π₯ Creating specialist agents...")
+
+ tech_analyst = Agent(
+ agent_name="TechAnalyst",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a technology analyst. Focus on technical specifications, performance, and innovation.",
+ verbose=False,
+ )
+
+ market_analyst = Agent(
+ agent_name="MarketAnalyst",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a market analyst. Focus on market trends, pricing, and consumer adoption.",
+ verbose=False,
+ )
+
+ environmental_analyst = Agent(
+ agent_name="EnvironmentalAnalyst",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are an environmental analyst. Focus on sustainability, emissions, and environmental impact.",
+ verbose=False,
+ )
+
+ synthesizer = Agent(
+ agent_name="Synthesizer",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a synthesis expert. Combine insights from multiple analysts into a comprehensive conclusion.",
+ verbose=False,
+ )
+
+ print(f"β
Created {4} specialist agents")
+
+ # Create parallel workflow
+ print("\nπ§ Creating parallel workflow...")
+
+ parallel_workflow = GraphWorkflow(
+ name="ParallelAnalysisWorkflow",
+ description="Multi-specialist analysis with parallel processing",
+ verbose=True,
+ auto_compile=True,
+ )
+
+ # Add all agents
+ agents = [
+ tech_analyst,
+ market_analyst,
+ environmental_analyst,
+ synthesizer,
+ ]
+ for agent in agents:
+ parallel_workflow.add_node(agent)
+
+ print(f"β
Added {len(agents)} agents to parallel workflow")
+
+ # Create parallel pattern: Multiple analysts feed into synthesizer
+ print("\nπ Setting up parallel processing pattern...")
+
+ # All analysts run in parallel, then feed into synthesizer
+ parallel_workflow.add_edges_to_target(
+ ["TechAnalyst", "MarketAnalyst", "EnvironmentalAnalyst"],
+ "Synthesizer",
+ )
+
+ # Set multiple entry points (parallel execution)
+ parallel_workflow.set_entry_points(
+ ["TechAnalyst", "MarketAnalyst", "EnvironmentalAnalyst"]
+ )
+ parallel_workflow.set_end_points(["Synthesizer"])
+
+ print("β
Parallel pattern configured:")
+ print(" π€ 3 analysts run in parallel")
+ print(" π₯ Results feed into synthesizer")
+
+ # Execute parallel workflow
+ task = "Analyze the future of renewable energy technology from technical, market, and environmental perspectives."
+
+ print("\nβ‘ Executing parallel workflow...")
+ print(f"π Task: {task}")
+
+ results = parallel_workflow.run(task=task)
+
+ print(
+ f"β
Parallel execution completed! {len(results)} agents processed."
+ )
+
+ # Display results
+ print("\nπ Parallel Analysis Results:")
+ print("-" * 40)
+
+ for agent_name, result in results.items():
+ print(f"\nπ€ {agent_name}:")
+ print(
+ f"π {result[:250]}{'...' if len(result) > 250 else ''}"
+ )
+
+ return parallel_workflow, results
+
+
+def step_4_advanced_patterns():
+ """Step 4: Demonstrate advanced workflow patterns."""
+
+ print("\nπ STEP 4: Advanced Workflow Patterns")
+ print("=" * 50)
+
+ # Create agents for different patterns
+ data_collector = Agent(
+ agent_name="DataCollector",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You collect and organize data from various sources.",
+ verbose=False,
+ )
+
+ processor_a = Agent(
+ agent_name="ProcessorA",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are processor A specializing in quantitative analysis.",
+ verbose=False,
+ )
+
+ processor_b = Agent(
+ agent_name="ProcessorB",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are processor B specializing in qualitative analysis.",
+ verbose=False,
+ )
+
+ validator_x = Agent(
+ agent_name="ValidatorX",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are validator X focusing on accuracy and consistency.",
+ verbose=False,
+ )
+
+ validator_y = Agent(
+ agent_name="ValidatorY",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are validator Y focusing on completeness and quality.",
+ verbose=False,
+ )
+
+ final_reporter = Agent(
+ agent_name="FinalReporter",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You create final comprehensive reports from all validated analyses.",
+ verbose=False,
+ )
+
+ # Create advanced workflow
+ advanced_workflow = GraphWorkflow(
+ name="AdvancedPatternsWorkflow",
+ description="Demonstrates fan-out, parallel chains, and fan-in patterns",
+ verbose=True,
+ auto_compile=True,
+ )
+
+ # Add all agents
+ agents = [
+ data_collector,
+ processor_a,
+ processor_b,
+ validator_x,
+ validator_y,
+ final_reporter,
+ ]
+ for agent in agents:
+ advanced_workflow.add_node(agent)
+
+ print(f"β
Created advanced workflow with {len(agents)} agents")
+
+ # Demonstrate different patterns
+ print("\nπ― Setting up advanced patterns...")
+
+ # Pattern 1: Fan-out (one-to-many)
+ print(" π€ Fan-out: DataCollector β Multiple Processors")
+ advanced_workflow.add_edges_from_source(
+ "DataCollector", ["ProcessorA", "ProcessorB"]
+ )
+
+ # Pattern 2: Parallel chain (many-to-many)
+ print(" π Parallel chain: Processors β Validators")
+ advanced_workflow.add_parallel_chain(
+ ["ProcessorA", "ProcessorB"], ["ValidatorX", "ValidatorY"]
+ )
+
+ # Pattern 3: Fan-in (many-to-one)
+ print(" π₯ Fan-in: Validators β Final Reporter")
+ advanced_workflow.add_edges_to_target(
+ ["ValidatorX", "ValidatorY"], "FinalReporter"
+ )
+
+ # Set workflow boundaries
+ advanced_workflow.set_entry_points(["DataCollector"])
+ advanced_workflow.set_end_points(["FinalReporter"])
+
+ print("β
Advanced patterns configured")
+
+ # Show workflow structure
+ print("\nπ Workflow structure:")
+ try:
+ advanced_workflow.visualize_simple()
+ except:
+ print(" (Text visualization not available)")
+
+ # Execute advanced workflow
+ task = "Analyze the impact of artificial intelligence on job markets, including both opportunities and challenges."
+
+ print("\nβ‘ Executing advanced workflow...")
+
+ results = advanced_workflow.run(task=task)
+
+ print(
+ f"β
Advanced execution completed! {len(results)} agents processed."
+ )
+
+ return advanced_workflow, results
+
+
+def step_5_workflow_features():
+ """Step 5: Explore additional workflow features."""
+
+ print("\nπ STEP 5: Additional Workflow Features")
+ print("=" * 50)
+
+ # Create a simple workflow for feature demonstration
+ agent1 = Agent(
+ agent_name="FeatureTestAgent1",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a feature testing agent.",
+ verbose=False,
+ )
+
+ agent2 = Agent(
+ agent_name="FeatureTestAgent2",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are another feature testing agent.",
+ verbose=False,
+ )
+
+ workflow = GraphWorkflow(
+ name="FeatureTestWorkflow",
+ description="Workflow for testing additional features",
+ verbose=True,
+ auto_compile=True,
+ )
+
+ workflow.add_node(agent1)
+ workflow.add_node(agent2)
+ workflow.add_edge("FeatureTestAgent1", "FeatureTestAgent2")
+
+ # Feature 1: Compilation status
+ print("π Feature 1: Compilation Status")
+ status = workflow.get_compilation_status()
+ print(f" β
Compiled: {status['is_compiled']}")
+ print(f" π Layers: {status.get('cached_layers_count', 'N/A')}")
+ print(f" β‘ Workers: {status.get('max_workers', 'N/A')}")
+
+ # Feature 2: Workflow validation
+ print("\nπ Feature 2: Workflow Validation")
+ validation = workflow.validate(auto_fix=True)
+ print(f" β
Valid: {validation['is_valid']}")
+ print(f" β οΈ Warnings: {len(validation['warnings'])}")
+ print(f" β Errors: {len(validation['errors'])}")
+
+ # Feature 3: JSON serialization
+ print("\nπ Feature 3: JSON Serialization")
+ try:
+ json_data = workflow.to_json()
+ print(
+ f" β
JSON export successful ({len(json_data)} characters)"
+ )
+
+ # Test deserialization
+ restored = GraphWorkflow.from_json(json_data)
+ print(
+ f" β
JSON import successful ({len(restored.nodes)} nodes)"
+ )
+ except Exception as e:
+ print(f" β JSON serialization failed: {e}")
+
+ # Feature 4: Workflow summary
+ print("\nπ Feature 4: Workflow Summary")
+ try:
+ summary = workflow.export_summary()
+ print(
+ f" π Workflow info: {summary['workflow_info']['name']}"
+ )
+ print(f" π Structure: {summary['structure']}")
+ print(f" βοΈ Configuration: {summary['configuration']}")
+ except Exception as e:
+ print(f" β Summary generation failed: {e}")
+
+ # Feature 5: Performance monitoring
+ print("\nπ Feature 5: Performance Monitoring")
+ import time
+
+ task = "Perform a simple test task for feature demonstration."
+
+ start_time = time.time()
+ results = workflow.run(task=task)
+ execution_time = time.time() - start_time
+
+ print(f" β±οΈ Execution time: {execution_time:.3f} seconds")
+ print(
+ f" π Throughput: {len(results)/execution_time:.1f} agents/second"
+ )
+ print(f" π Results: {len(results)} agents completed")
+
+ return workflow
+
+
+def main():
+ """Main quick start guide function."""
+
+ print("π GRAPHWORKFLOW QUICK START GUIDE")
+ print("=" * 60)
+ print("Learn GraphWorkflow in 5 easy steps!")
+ print("=" * 60)
+
+ try:
+ # Step 1: Basic setup
+ workflow = step_1_basic_setup()
+
+ # Step 2: Run workflow
+ step_2_run_workflow(workflow)
+
+ # Step 3: Parallel processing
+ step_3_parallel_processing()
+
+ # Step 4: Advanced patterns
+ step_4_advanced_patterns()
+
+ # Step 5: Additional features
+ step_5_workflow_features()
+
+ # Conclusion
+ print("\nπ QUICK START GUIDE COMPLETED!")
+ print("=" * 50)
+ print("You've learned how to:")
+ print("β
Create basic workflows with agents")
+ print("β
Execute workflows with tasks")
+ print("β
Set up parallel processing")
+ print("β
Use advanced workflow patterns")
+ print("β
Monitor and optimize performance")
+
+ print("\nπ Next Steps:")
+ print(
+ "1. Try the comprehensive demo: python comprehensive_demo.py"
+ )
+ print("2. Read the full technical guide")
+ print("3. Implement workflows for your specific use case")
+ print("4. Explore healthcare and finance examples")
+ print("5. Deploy to production with monitoring")
+
+ except Exception as e:
+ print(f"\nβ Quick start guide failed: {e}")
+ print("Please check your installation and try again.")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/guides/graphworkflow_guide/setup_and_test.py b/examples/guides/graphworkflow_guide/setup_and_test.py
new file mode 100644
index 00000000..8f50bf50
--- /dev/null
+++ b/examples/guides/graphworkflow_guide/setup_and_test.py
@@ -0,0 +1,480 @@
+#!/usr/bin/env python3
+"""
+GraphWorkflow Setup and Test Script
+==================================
+
+This script helps you set up and test your GraphWorkflow environment.
+It checks dependencies, validates the installation, and runs basic tests.
+
+Usage:
+ python setup_and_test.py [--install-deps] [--run-tests] [--check-only]
+"""
+
+import sys
+import subprocess
+import importlib
+import argparse
+from typing import Dict, List, Tuple
+
+
+def check_python_version() -> bool:
+ """Check if Python version is compatible."""
+ print("π Checking Python version...")
+
+ version = sys.version_info
+ if version.major >= 3 and version.minor >= 8:
+ print(
+ f"β
Python {version.major}.{version.minor}.{version.micro} is compatible"
+ )
+ return True
+ else:
+ print(
+ f"β Python {version.major}.{version.minor}.{version.micro} is too old"
+ )
+ print(" GraphWorkflow requires Python 3.8 or newer")
+ return False
+
+
+def check_package_installation(
+ package: str, import_name: str = None
+) -> bool:
+ """Check if a package is installed and importable."""
+ import_name = import_name or package
+
+ try:
+ importlib.import_module(import_name)
+ print(f"β
{package} is installed and importable")
+ return True
+ except ImportError:
+ print(f"β {package} is not installed or not importable")
+ return False
+
+
+def install_package(package: str) -> bool:
+ """Install a package using pip."""
+ try:
+ print(f"π¦ Installing {package}...")
+ result = subprocess.run(
+ [sys.executable, "-m", "pip", "install", package],
+ capture_output=True,
+ text=True,
+ check=True,
+ )
+ print(f"β
{package} installed successfully")
+ return True
+ except subprocess.CalledProcessError as e:
+ print(f"β Failed to install {package}")
+ print(f" Error: {e.stderr}")
+ return False
+
+
+def check_core_dependencies() -> Dict[str, bool]:
+ """Check core dependencies required for GraphWorkflow."""
+ print("\nπ Checking core dependencies...")
+
+ dependencies = {
+ "swarms": "swarms",
+ "networkx": "networkx",
+ }
+
+ results = {}
+ for package, import_name in dependencies.items():
+ results[package] = check_package_installation(
+ package, import_name
+ )
+
+ return results
+
+
+def check_optional_dependencies() -> Dict[str, bool]:
+ """Check optional dependencies for enhanced features."""
+ print("\nπ Checking optional dependencies...")
+
+ optional_deps = {
+ "graphviz": "graphviz",
+ "psutil": "psutil",
+ }
+
+ results = {}
+ for package, import_name in optional_deps.items():
+ results[package] = check_package_installation(
+ package, import_name
+ )
+
+ return results
+
+
+def test_basic_import() -> bool:
+ """Test basic GraphWorkflow import."""
+ print("\nπ§ͺ Testing basic GraphWorkflow import...")
+
+ try:
+ from swarms.structs.graph_workflow import GraphWorkflow
+
+ print("β
GraphWorkflow imported successfully")
+ return True
+ except ImportError as e:
+ print(f"β Failed to import GraphWorkflow: {e}")
+ return False
+
+
+def test_agent_import() -> bool:
+ """Test Agent import."""
+ print("\nπ§ͺ Testing Agent import...")
+
+ try:
+ from swarms import Agent
+
+ print("β
Agent imported successfully")
+ return True
+ except ImportError as e:
+ print(f"β Failed to import Agent: {e}")
+ return False
+
+
+def test_basic_workflow_creation() -> bool:
+ """Test basic workflow creation."""
+ print("\nπ§ͺ Testing basic workflow creation...")
+
+ try:
+ from swarms import Agent
+ from swarms.structs.graph_workflow import GraphWorkflow
+
+ # Create a simple agent
+ agent = Agent(
+ agent_name="TestAgent",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a test agent.",
+ verbose=False,
+ )
+
+ # Create workflow
+ workflow = GraphWorkflow(
+ name="TestWorkflow",
+ description="A test workflow",
+ verbose=False,
+ auto_compile=True,
+ )
+
+ # Add agent
+ workflow.add_node(agent)
+
+ print("β
Basic workflow creation successful")
+ print(f" Created workflow with {len(workflow.nodes)} nodes")
+ return True
+
+ except Exception as e:
+ print(f"β Basic workflow creation failed: {e}")
+ return False
+
+
+def test_workflow_compilation() -> bool:
+ """Test workflow compilation."""
+ print("\nπ§ͺ Testing workflow compilation...")
+
+ try:
+ from swarms import Agent
+ from swarms.structs.graph_workflow import GraphWorkflow
+
+ # Create agents
+ agent1 = Agent(
+ agent_name="Agent1",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are agent 1.",
+ verbose=False,
+ )
+
+ agent2 = Agent(
+ agent_name="Agent2",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are agent 2.",
+ verbose=False,
+ )
+
+ # Create workflow
+ workflow = GraphWorkflow(
+ name="CompilationTestWorkflow",
+ description="A workflow for testing compilation",
+ verbose=False,
+ auto_compile=False, # Manual compilation
+ )
+
+ # Add agents and edges
+ workflow.add_node(agent1)
+ workflow.add_node(agent2)
+ workflow.add_edge("Agent1", "Agent2")
+
+ # Test compilation
+ workflow.compile()
+
+ # Check compilation status
+ status = workflow.get_compilation_status()
+
+ if status["is_compiled"]:
+ print("β
Workflow compilation successful")
+ print(
+ f" Layers: {status.get('cached_layers_count', 'N/A')}"
+ )
+ print(f" Workers: {status.get('max_workers', 'N/A')}")
+ return True
+ else:
+ print("β Workflow compilation failed - not compiled")
+ return False
+
+ except Exception as e:
+ print(f"β Workflow compilation failed: {e}")
+ return False
+
+
+def test_workflow_validation() -> bool:
+ """Test workflow validation."""
+ print("\nπ§ͺ Testing workflow validation...")
+
+ try:
+ from swarms import Agent
+ from swarms.structs.graph_workflow import GraphWorkflow
+
+ # Create a simple workflow
+ agent = Agent(
+ agent_name="ValidationTestAgent",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a validation test agent.",
+ verbose=False,
+ )
+
+ workflow = GraphWorkflow(
+ name="ValidationTestWorkflow",
+ description="A workflow for testing validation",
+ verbose=False,
+ auto_compile=True,
+ )
+
+ workflow.add_node(agent)
+
+ # Test validation
+ validation = workflow.validate(auto_fix=True)
+
+ print("β
Workflow validation successful")
+ print(f" Valid: {validation['is_valid']}")
+ print(f" Warnings: {len(validation['warnings'])}")
+ print(f" Errors: {len(validation['errors'])}")
+
+ return True
+
+ except Exception as e:
+ print(f"β Workflow validation failed: {e}")
+ return False
+
+
+def test_serialization() -> bool:
+ """Test workflow serialization."""
+ print("\nπ§ͺ Testing workflow serialization...")
+
+ try:
+ from swarms import Agent
+ from swarms.structs.graph_workflow import GraphWorkflow
+
+ # Create a simple workflow
+ agent = Agent(
+ agent_name="SerializationTestAgent",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ system_prompt="You are a serialization test agent.",
+ verbose=False,
+ )
+
+ workflow = GraphWorkflow(
+ name="SerializationTestWorkflow",
+ description="A workflow for testing serialization",
+ verbose=False,
+ auto_compile=True,
+ )
+
+ workflow.add_node(agent)
+
+ # Test JSON serialization
+ json_data = workflow.to_json()
+
+ if len(json_data) > 0:
+ print("β
JSON serialization successful")
+ print(f" JSON size: {len(json_data)} characters")
+
+ # Test deserialization
+ restored = GraphWorkflow.from_json(json_data)
+ print("β
JSON deserialization successful")
+ print(f" Restored nodes: {len(restored.nodes)}")
+
+ return True
+ else:
+ print("β JSON serialization failed - empty result")
+ return False
+
+ except Exception as e:
+ print(f"β Serialization test failed: {e}")
+ return False
+
+
+def run_all_tests() -> List[Tuple[str, bool]]:
+ """Run all tests and return results."""
+ print("\nπ Running GraphWorkflow Tests")
+ print("=" * 50)
+
+ tests = [
+ ("Basic Import", test_basic_import),
+ ("Agent Import", test_agent_import),
+ ("Basic Workflow Creation", test_basic_workflow_creation),
+ ("Workflow Compilation", test_workflow_compilation),
+ ("Workflow Validation", test_workflow_validation),
+ ("Serialization", test_serialization),
+ ]
+
+ results = []
+ for test_name, test_func in tests:
+ try:
+ result = test_func()
+ results.append((test_name, result))
+ except Exception as e:
+ print(f"β {test_name} failed with exception: {e}")
+ results.append((test_name, False))
+
+ return results
+
+
+def print_test_summary(results: List[Tuple[str, bool]]):
+ """Print test summary."""
+ print("\nπ TEST SUMMARY")
+ print("=" * 30)
+
+ passed = sum(1 for _, result in results if result)
+ total = len(results)
+
+ for test_name, result in results:
+ status = "β
PASS" if result else "β FAIL"
+ print(f"{status} {test_name}")
+
+ print("-" * 30)
+ print(f"Passed: {passed}/{total} ({passed/total*100:.1f}%)")
+
+ if passed == total:
+ print("\nπ All tests passed! GraphWorkflow is ready to use.")
+ else:
+ print(
+ f"\nβ οΈ {total-passed} tests failed. Please check the output above."
+ )
+ print(
+ " Consider running with --install-deps to install missing packages."
+ )
+
+
+def main():
+ """Main setup and test function."""
+ parser = argparse.ArgumentParser(
+ description="GraphWorkflow Setup and Test"
+ )
+ parser.add_argument(
+ "--install-deps",
+ action="store_true",
+ help="Install missing dependencies",
+ )
+ parser.add_argument(
+ "--run-tests",
+ action="store_true",
+ help="Run functionality tests",
+ )
+ parser.add_argument(
+ "--check-only",
+ action="store_true",
+ help="Only check dependencies, don't install",
+ )
+
+ args = parser.parse_args()
+
+ # If no arguments, run everything
+ if not any([args.install_deps, args.run_tests, args.check_only]):
+ args.install_deps = True
+ args.run_tests = True
+
+ print("π GRAPHWORKFLOW SETUP AND TEST")
+ print("=" * 50)
+
+ # Check Python version
+ if not check_python_version():
+ print(
+ "\nβ Python version incompatible. Please upgrade Python."
+ )
+ sys.exit(1)
+
+ # Check dependencies
+ core_deps = check_core_dependencies()
+ optional_deps = check_optional_dependencies()
+
+ # Install missing dependencies if requested
+ if args.install_deps and not args.check_only:
+ print("\nπ¦ Installing missing dependencies...")
+
+ # Install core dependencies
+ for package, installed in core_deps.items():
+ if not installed:
+ if not install_package(package):
+ print(
+ f"\nβ Failed to install core dependency: {package}"
+ )
+ sys.exit(1)
+
+ # Install optional dependencies
+ for package, installed in optional_deps.items():
+ if not installed:
+ print(
+ f"\nπ¦ Installing optional dependency: {package}"
+ )
+ install_package(
+ package
+ ) # Don't fail on optional deps
+
+ # Run tests if requested
+ if args.run_tests:
+ results = run_all_tests()
+ print_test_summary(results)
+
+ # Exit with error code if tests failed
+ failed_tests = sum(1 for _, result in results if not result)
+ if failed_tests > 0:
+ sys.exit(1)
+
+ elif args.check_only:
+ # Summary for check-only mode
+ core_missing = sum(
+ 1 for installed in core_deps.values() if not installed
+ )
+ optional_missing = sum(
+ 1 for installed in optional_deps.values() if not installed
+ )
+
+ print("\nπ DEPENDENCY CHECK SUMMARY")
+ print("=" * 40)
+ print(f"Core dependencies missing: {core_missing}")
+ print(f"Optional dependencies missing: {optional_missing}")
+
+ if core_missing > 0:
+ print(
+ "\nβ οΈ Missing core dependencies. Run with --install-deps to install."
+ )
+ sys.exit(1)
+ else:
+ print("\nβ
All core dependencies satisfied!")
+
+ print("\nπ― Next Steps:")
+ print("1. Run the quick start guide: python quick_start_guide.py")
+ print(
+ "2. Try the comprehensive demo: python comprehensive_demo.py"
+ )
+ print("3. Explore healthcare and finance examples")
+ print("4. Read the technical documentation")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/guides/smart_database/README.md b/examples/guides/smart_database/README.md
new file mode 100644
index 00000000..8b3825e8
--- /dev/null
+++ b/examples/guides/smart_database/README.md
@@ -0,0 +1,273 @@
+# Smart Database Swarm
+
+A fully autonomous database management system powered by hierarchical multi-agent workflow using the Swarms framework.
+
+## Overview
+
+The Smart Database Swarm is an intelligent database management system that uses specialized AI agents to handle different aspects of database operations. The system follows a hierarchical architecture where a Database Director coordinates specialized worker agents to execute complex database tasks.
+
+## Architecture
+
+### Hierarchical Structure
+
+```
+Database Director (Coordinator)
+βββ Database Creator (Creates databases)
+βββ Table Manager (Manages table schemas)
+βββ Data Operations (Handles data insertion/updates)
+βββ Query Specialist (Executes queries and retrieval)
+```
+
+### Agent Specializations
+
+1. **Database Director**: Orchestrates all database operations and coordinates specialist agents
+2. **Database Creator**: Specializes in creating and initializing databases
+3. **Table Manager**: Expert in table creation, schema design, and structure management
+4. **Data Operations**: Handles data insertion, updates, and manipulation
+5. **Query Specialist**: Manages database queries, data retrieval, and optimization
+
+## Features
+
+- **Autonomous Database Management**: Complete database lifecycle management
+- **Intelligent Task Distribution**: Automatic assignment of tasks to appropriate specialists
+- **Schema Validation**: Ensures proper table structures and data integrity
+- **Security**: Built-in SQL injection prevention and query validation
+- **Performance Optimization**: Query optimization and efficient data operations
+- **Comprehensive Error Handling**: Robust error management and reporting
+- **Multi-format Data Support**: JSON-based data insertion and flexible query parameters
+
+## Database Tools
+
+### Core Functions
+
+1. **`create_database(database_name, database_path)`**: Creates new SQLite databases
+2. **`create_table(database_path, table_name, schema)`**: Creates tables with specified schemas
+3. **`insert_data(database_path, table_name, data)`**: Inserts data into tables
+4. **`query_database(database_path, query, params)`**: Executes SELECT queries
+5. **`update_table_data(database_path, table_name, update_data, where_clause)`**: Updates existing data
+6. **`get_database_schema(database_path)`**: Retrieves comprehensive schema information
+
+## Usage Examples
+
+### Basic Usage
+
+```python
+from smart_database_swarm import smart_database_swarm
+
+# Simple database creation and setup
+task = """
+Create a user management database:
+1. Create database 'user_system'
+2. Create users table with id, username, email, created_at
+3. Insert 5 sample users
+4. Query all users ordered by creation date
+"""
+
+result = smart_database_swarm.run(task=task)
+print(result)
+```
+
+### E-commerce System
+
+```python
+# Complex e-commerce database system
+ecommerce_task = """
+Create a comprehensive e-commerce database system:
+
+1. Create database 'ecommerce_store'
+2. Create tables:
+ - customers (id, name, email, phone, address, created_at)
+ - products (id, name, description, price, category, stock, created_at)
+ - orders (id, customer_id, order_date, total_amount, status)
+ - order_items (id, order_id, product_id, quantity, unit_price)
+
+3. Insert sample data:
+ - 10 customers with realistic information
+ - 20 products across different categories
+ - 15 orders with multiple items each
+
+4. Execute analytical queries:
+ - Top selling products by quantity
+ - Customer lifetime value analysis
+ - Monthly sales trends
+ - Inventory levels by category
+"""
+
+result = smart_database_swarm.run(task=ecommerce_task)
+```
+
+### Data Analysis and Reporting
+
+```python
+# Advanced data analysis
+analysis_task = """
+Analyze the existing databases and provide insights:
+
+1. Get schema information for all databases
+2. Generate data quality reports
+3. Identify optimization opportunities
+4. Create performance metrics dashboard
+5. Suggest database improvements
+
+Query patterns:
+- Customer segmentation analysis
+- Product performance metrics
+- Order fulfillment statistics
+- Revenue analysis by time periods
+"""
+
+result = smart_database_swarm.run(task=analysis_task)
+```
+
+## Data Formats
+
+### Table Schema Definition
+
+```python
+# Column definitions with types and constraints
+schema = "id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, email TEXT UNIQUE, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP"
+```
+
+### Data Insertion Formats
+
+#### Format 1: List of Dictionaries
+```json
+[
+ {"name": "John Doe", "email": "john@example.com"},
+ {"name": "Jane Smith", "email": "jane@example.com"}
+]
+```
+
+#### Format 2: Columns and Values
+```json
+{
+ "columns": ["name", "email"],
+ "values": [
+ ["John Doe", "john@example.com"],
+ ["Jane Smith", "jane@example.com"]
+ ]
+}
+```
+
+### Update Operations
+
+```json
+{
+ "salary": 75000,
+ "department": "Engineering",
+ "last_updated": "2024-01-15"
+}
+```
+
+## Advanced Features
+
+### Security
+
+- **SQL Injection Prevention**: Parameterized queries and input validation
+- **Query Validation**: Only SELECT queries allowed for query operations
+- **Input Sanitization**: Automatic cleaning and validation of inputs
+
+### Performance
+
+- **Connection Management**: Efficient database connection handling
+- **Query Optimization**: Intelligent query planning and execution
+- **Batch Operations**: Support for bulk data operations
+
+### Error Handling
+
+- **Comprehensive Error Messages**: Detailed error reporting and solutions
+- **Graceful Degradation**: System continues operating despite individual failures
+- **Transaction Safety**: Atomic operations with rollback capabilities
+
+## Best Practices
+
+### Database Design
+
+1. **Use Proper Data Types**: Choose appropriate SQL data types for your data
+2. **Implement Constraints**: Use PRIMARY KEY, FOREIGN KEY, and CHECK constraints
+3. **Normalize Data**: Follow database normalization principles
+4. **Index Strategy**: Create indexes for frequently queried columns
+
+### Agent Coordination
+
+1. **Clear Task Definitions**: Provide specific, actionable task descriptions
+2. **Sequential Operations**: Allow agents to complete dependencies before next steps
+3. **Comprehensive Requirements**: Include all necessary details in task descriptions
+4. **Result Validation**: Review agent outputs for completeness and accuracy
+
+### Data Operations
+
+1. **Backup Before Updates**: Always backup data before major modifications
+2. **Test Queries**: Validate queries on sample data before production execution
+3. **Monitor Performance**: Track query execution times and optimize as needed
+4. **Validate Data**: Ensure data integrity through proper validation
+
+## File Structure
+
+```
+examples/guides/smart_database/
+βββ smart_database_swarm.py # Main implementation
+βββ README.md # This documentation
+βββ databases/ # Generated databases (auto-created)
+```
+
+## Dependencies
+
+- `swarms`: Core framework for multi-agent systems
+- `sqlite3`: Database operations (built-in Python)
+- `json`: Data serialization (built-in Python)
+- `pathlib`: File path operations (built-in Python)
+- `loguru`: Minimal logging functionality
+
+## Running the System
+
+```bash
+# Navigate to the smart_database directory
+cd examples/guides/smart_database
+
+# Run the demonstration
+python smart_database_swarm.py
+
+# The system will create databases in ./databases/ directory
+# Check the generated databases and results
+```
+
+## Expected Output
+
+The system will create:
+
+1. **Databases**: SQLite database files in `./databases/` directory
+2. **Detailed Results**: JSON-formatted operation results
+3. **Agent Coordination**: Logs showing how tasks are distributed
+4. **Performance Metrics**: Execution times and success statistics
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Database Not Found**: Ensure database path is correct and accessible
+2. **Schema Errors**: Verify SQL syntax in table creation statements
+3. **Data Format Issues**: Check JSON formatting for data insertion
+4. **Permission Errors**: Ensure write permissions for database directory
+
+### Debug Mode
+
+Enable verbose logging to see detailed agent interactions:
+
+```python
+smart_database_swarm.verbose = True
+result = smart_database_swarm.run(task=your_task)
+```
+
+## Contributing
+
+To extend the Smart Database Swarm:
+
+1. **Add New Tools**: Create additional database operation functions
+2. **Enhance Agents**: Improve agent prompts and capabilities
+3. **Add Database Types**: Support for PostgreSQL, MySQL, etc.
+4. **Performance Optimization**: Implement caching and connection pooling
+
+## License
+
+This project is part of the Swarms framework and follows the same licensing terms.
diff --git a/examples/guides/smart_database/smart_database_swarm.py b/examples/guides/smart_database/smart_database_swarm.py
new file mode 100644
index 00000000..b3f07c41
--- /dev/null
+++ b/examples/guides/smart_database/smart_database_swarm.py
@@ -0,0 +1,1002 @@
+"""
+Smart Database Powered by Hierarchical Multi-Agent Workflow
+
+This module implements a fully autonomous database management system using a hierarchical
+multi-agent architecture. The system includes specialized agents for different database
+operations coordinated by a Database Director agent.
+
+Features:
+- Database creation and management
+- Table creation with schema validation
+- Data insertion and updates
+- Complex query execution
+- Schema modifications
+- Hierarchical agent coordination
+
+Author: Swarms Framework
+"""
+
+import sqlite3
+import json
+from pathlib import Path
+from loguru import logger
+
+from swarms import Agent, HierarchicalSwarm
+
+
+# =============================================================================
+# DATABASE TOOLS - Core Functions for Database Operations
+# =============================================================================
+
+
+def create_database(
+ database_name: str, database_path: str = "./databases"
+) -> str:
+ """
+ Create a new SQLite database file.
+
+ Args:
+ database_name (str): Name of the database to create (without .db extension)
+ database_path (str, optional): Directory path where database will be created.
+ Defaults to "./databases".
+
+ Returns:
+ str: JSON string containing operation result and database information
+
+ Raises:
+ OSError: If unable to create database directory or file
+ sqlite3.Error: If database connection fails
+
+ Example:
+ >>> result = create_database("company_db", "/data/databases")
+ >>> print(result)
+ {"status": "success", "database": "company_db.db", "path": "/data/databases/company_db.db"}
+ """
+ try:
+ # Validate input parameters
+ if not database_name or not database_name.strip():
+ raise ValueError("Database name cannot be empty")
+
+ # Clean database name
+ db_name = database_name.strip().replace(" ", "_")
+ if not db_name.endswith(".db"):
+ db_name += ".db"
+
+ # Create database directory if it doesn't exist
+ db_path = Path(database_path)
+ db_path.mkdir(parents=True, exist_ok=True)
+
+ # Full database file path
+ full_db_path = db_path / db_name
+
+ # Create database connection (creates file if doesn't exist)
+ conn = sqlite3.connect(str(full_db_path))
+
+ # Create a metadata table to track database info
+ conn.execute(
+ """
+ CREATE TABLE IF NOT EXISTS _database_metadata (
+ key TEXT PRIMARY KEY,
+ value TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ )
+ """
+ )
+
+ # Insert database metadata
+ conn.execute(
+ "INSERT OR REPLACE INTO _database_metadata (key, value) VALUES (?, ?)",
+ ("database_name", database_name),
+ )
+
+ conn.commit()
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": f"Database '{database_name}' created successfully",
+ "database": db_name,
+ "path": str(full_db_path),
+ "size_bytes": full_db_path.stat().st_size,
+ }
+
+ logger.info(f"Database created: {db_name}")
+ return json.dumps(result, indent=2)
+
+ except ValueError as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"Database error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+def create_table(
+ database_path: str, table_name: str, schema: str
+) -> str:
+ """
+ Create a new table in the specified database with the given schema.
+
+ Args:
+ database_path (str): Full path to the database file
+ table_name (str): Name of the table to create
+ schema (str): SQL schema definition for the table columns
+ Format: "column1 TYPE constraints, column2 TYPE constraints, ..."
+ Example: "id INTEGER PRIMARY KEY, name TEXT NOT NULL, age INTEGER"
+
+ Returns:
+ str: JSON string containing operation result and table information
+
+ Raises:
+ sqlite3.Error: If table creation fails
+ FileNotFoundError: If database file doesn't exist
+
+ Example:
+ >>> schema = "id INTEGER PRIMARY KEY, name TEXT NOT NULL, email TEXT UNIQUE"
+ >>> result = create_table("/data/company.db", "employees", schema)
+ >>> print(result)
+ {"status": "success", "table": "employees", "columns": 3}
+ """
+ try:
+ # Validate inputs
+ if not all([database_path, table_name, schema]):
+ raise ValueError(
+ "Database path, table name, and schema are required"
+ )
+
+ # Check if database exists
+ if not Path(database_path).exists():
+ raise FileNotFoundError(
+ f"Database file not found: {database_path}"
+ )
+
+ # Clean table name
+ clean_table_name = table_name.strip().replace(" ", "_")
+
+ # Connect to database
+ conn = sqlite3.connect(database_path)
+ cursor = conn.cursor()
+
+ # Check if table already exists
+ cursor.execute(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name=?",
+ (clean_table_name,),
+ )
+
+ if cursor.fetchone():
+ conn.close()
+ return json.dumps(
+ {
+ "status": "warning",
+ "message": f"Table '{clean_table_name}' already exists",
+ "table": clean_table_name,
+ }
+ )
+
+ # Create table with provided schema
+ create_sql = f"CREATE TABLE {clean_table_name} ({schema})"
+ cursor.execute(create_sql)
+
+ # Get table info
+ cursor.execute(f"PRAGMA table_info({clean_table_name})")
+ columns = cursor.fetchall()
+
+ # Update metadata
+ cursor.execute(
+ """
+ INSERT OR REPLACE INTO _database_metadata (key, value)
+ VALUES (?, ?)
+ """,
+ (f"table_{clean_table_name}_created", "true"),
+ )
+
+ conn.commit()
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": f"Table '{clean_table_name}' created successfully",
+ "table": clean_table_name,
+ "columns": len(columns),
+ "schema": [
+ {
+ "name": col[1],
+ "type": col[2],
+ "nullable": not col[3],
+ }
+ for col in columns
+ ],
+ }
+
+ return json.dumps(result, indent=2)
+
+ except ValueError as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except FileNotFoundError as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"SQL error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+def insert_data(
+ database_path: str, table_name: str, data: str
+) -> str:
+ """
+ Insert data into a specified table.
+
+ Args:
+ database_path (str): Full path to the database file
+ table_name (str): Name of the target table
+ data (str): JSON string containing data to insert
+ Format: {"columns": ["col1", "col2"], "values": [[val1, val2], ...]}
+ Or: [{"col1": val1, "col2": val2}, ...]
+
+ Returns:
+ str: JSON string containing operation result and insertion statistics
+
+ Example:
+ >>> data = '{"columns": ["name", "age"], "values": [["John", 30], ["Jane", 25]]}'
+ >>> result = insert_data("/data/company.db", "employees", data)
+ >>> print(result)
+ {"status": "success", "table": "employees", "rows_inserted": 2}
+ """
+ try:
+ # Validate inputs
+ if not all([database_path, table_name, data]):
+ raise ValueError(
+ "Database path, table name, and data are required"
+ )
+
+ # Check if database exists
+ if not Path(database_path).exists():
+ raise FileNotFoundError(
+ f"Database file not found: {database_path}"
+ )
+
+ # Parse data
+ try:
+ parsed_data = json.loads(data)
+ except json.JSONDecodeError:
+ raise ValueError("Invalid JSON format for data")
+
+ conn = sqlite3.connect(database_path)
+ cursor = conn.cursor()
+
+ # Check if table exists
+ cursor.execute(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name=?",
+ (table_name,),
+ )
+
+ if not cursor.fetchone():
+ conn.close()
+ raise ValueError(f"Table '{table_name}' does not exist")
+
+ rows_inserted = 0
+
+ # Handle different data formats
+ if isinstance(parsed_data, list) and all(
+ isinstance(item, dict) for item in parsed_data
+ ):
+ # Format: [{"col1": val1, "col2": val2}, ...]
+ for row in parsed_data:
+ columns = list(row.keys())
+ values = list(row.values())
+ placeholders = ", ".join(["?" for _ in values])
+ columns_str = ", ".join(columns)
+
+ insert_sql = f"INSERT INTO {table_name} ({columns_str}) VALUES ({placeholders})"
+ cursor.execute(insert_sql, values)
+ rows_inserted += 1
+
+ elif (
+ isinstance(parsed_data, dict)
+ and "columns" in parsed_data
+ and "values" in parsed_data
+ ):
+ # Format: {"columns": ["col1", "col2"], "values": [[val1, val2], ...]}
+ columns = parsed_data["columns"]
+ values_list = parsed_data["values"]
+
+ placeholders = ", ".join(["?" for _ in columns])
+ columns_str = ", ".join(columns)
+
+ insert_sql = f"INSERT INTO {table_name} ({columns_str}) VALUES ({placeholders})"
+
+ for values in values_list:
+ cursor.execute(insert_sql, values)
+ rows_inserted += 1
+ else:
+ raise ValueError(
+ "Invalid data format. Expected list of dicts or dict with columns/values"
+ )
+
+ conn.commit()
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": f"Data inserted successfully into '{table_name}'",
+ "table": table_name,
+ "rows_inserted": rows_inserted,
+ }
+
+ return json.dumps(result, indent=2)
+
+ except (ValueError, FileNotFoundError) as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"SQL error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+def query_database(
+ database_path: str, query: str, params: str = "[]"
+) -> str:
+ """
+ Execute a SELECT query on the database and return results.
+
+ Args:
+ database_path (str): Full path to the database file
+ query (str): SQL SELECT query to execute
+ params (str, optional): JSON string of query parameters for prepared statements.
+ Defaults to "[]".
+
+ Returns:
+ str: JSON string containing query results and metadata
+
+ Example:
+ >>> query = "SELECT * FROM employees WHERE age > ?"
+ >>> params = "[25]"
+ >>> result = query_database("/data/company.db", query, params)
+ >>> print(result)
+ {"status": "success", "results": [...], "row_count": 5}
+ """
+ try:
+ # Validate inputs
+ if not all([database_path, query]):
+ raise ValueError("Database path and query are required")
+
+ # Check if database exists
+ if not Path(database_path).exists():
+ raise FileNotFoundError(
+ f"Database file not found: {database_path}"
+ )
+
+ # Validate query is SELECT only (security)
+ if not query.strip().upper().startswith("SELECT"):
+ raise ValueError("Only SELECT queries are allowed")
+
+ # Parse parameters
+ try:
+ query_params = json.loads(params)
+ except json.JSONDecodeError:
+ raise ValueError("Invalid JSON format for parameters")
+
+ conn = sqlite3.connect(database_path)
+ conn.row_factory = sqlite3.Row # Enable column access by name
+ cursor = conn.cursor()
+
+ # Execute query
+ if query_params:
+ cursor.execute(query, query_params)
+ else:
+ cursor.execute(query)
+
+ # Fetch results
+ rows = cursor.fetchall()
+
+ # Convert to list of dictionaries
+ results = [dict(row) for row in rows]
+
+ # Get column names
+ column_names = (
+ [description[0] for description in cursor.description]
+ if cursor.description
+ else []
+ )
+
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": "Query executed successfully",
+ "results": results,
+ "row_count": len(results),
+ "columns": column_names,
+ }
+
+ return json.dumps(result, indent=2)
+
+ except (ValueError, FileNotFoundError) as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"SQL error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+def update_table_data(
+ database_path: str,
+ table_name: str,
+ update_data: str,
+ where_clause: str = "",
+) -> str:
+ """
+ Update existing data in a table.
+
+ Args:
+ database_path (str): Full path to the database file
+ table_name (str): Name of the table to update
+ update_data (str): JSON string with column-value pairs to update
+ Format: {"column1": "new_value1", "column2": "new_value2"}
+ where_clause (str, optional): WHERE condition for the update (without WHERE keyword).
+ Example: "id = 1 AND status = 'active'"
+
+ Returns:
+ str: JSON string containing operation result and update statistics
+
+ Example:
+ >>> update_data = '{"salary": 50000, "department": "Engineering"}'
+ >>> where_clause = "id = 1"
+ >>> result = update_table_data("/data/company.db", "employees", update_data, where_clause)
+ >>> print(result)
+ {"status": "success", "table": "employees", "rows_updated": 1}
+ """
+ try:
+ # Validate inputs
+ if not all([database_path, table_name, update_data]):
+ raise ValueError(
+ "Database path, table name, and update data are required"
+ )
+
+ # Check if database exists
+ if not Path(database_path).exists():
+ raise FileNotFoundError(
+ f"Database file not found: {database_path}"
+ )
+
+ # Parse update data
+ try:
+ parsed_updates = json.loads(update_data)
+ except json.JSONDecodeError:
+ raise ValueError("Invalid JSON format for update data")
+
+ if not isinstance(parsed_updates, dict):
+ raise ValueError("Update data must be a dictionary")
+
+ conn = sqlite3.connect(database_path)
+ cursor = conn.cursor()
+
+ # Check if table exists
+ cursor.execute(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name=?",
+ (table_name,),
+ )
+
+ if not cursor.fetchone():
+ conn.close()
+ raise ValueError(f"Table '{table_name}' does not exist")
+
+ # Build UPDATE query
+ set_clauses = []
+ values = []
+
+ for column, value in parsed_updates.items():
+ set_clauses.append(f"{column} = ?")
+ values.append(value)
+
+ set_clause = ", ".join(set_clauses)
+
+ if where_clause:
+ update_sql = f"UPDATE {table_name} SET {set_clause} WHERE {where_clause}"
+ else:
+ update_sql = f"UPDATE {table_name} SET {set_clause}"
+
+ # Execute update
+ cursor.execute(update_sql, values)
+ rows_updated = cursor.rowcount
+
+ conn.commit()
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": f"Table '{table_name}' updated successfully",
+ "table": table_name,
+ "rows_updated": rows_updated,
+ "updated_columns": list(parsed_updates.keys()),
+ }
+
+ return json.dumps(result, indent=2)
+
+ except (ValueError, FileNotFoundError) as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"SQL error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+def get_database_schema(database_path: str) -> str:
+ """
+ Get comprehensive schema information for all tables in the database.
+
+ Args:
+ database_path (str): Full path to the database file
+
+ Returns:
+ str: JSON string containing complete database schema information
+
+ Example:
+ >>> result = get_database_schema("/data/company.db")
+ >>> print(result)
+ {"status": "success", "database": "company.db", "tables": {...}}
+ """
+ try:
+ if not database_path:
+ raise ValueError("Database path is required")
+
+ if not Path(database_path).exists():
+ raise FileNotFoundError(
+ f"Database file not found: {database_path}"
+ )
+
+ conn = sqlite3.connect(database_path)
+ cursor = conn.cursor()
+
+ # Get all tables
+ cursor.execute(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE '_%'"
+ )
+ tables = cursor.fetchall()
+
+ schema_info = {
+ "database": Path(database_path).name,
+ "table_count": len(tables),
+ "tables": {},
+ }
+
+ for table in tables:
+ table_name = table[0]
+
+ # Get table schema
+ cursor.execute(f"PRAGMA table_info({table_name})")
+ columns = cursor.fetchall()
+
+ # Get row count
+ cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
+ row_count = cursor.fetchone()[0]
+
+ schema_info["tables"][table_name] = {
+ "columns": [
+ {
+ "name": col[1],
+ "type": col[2],
+ "nullable": not col[3],
+ "default": col[4],
+ "primary_key": bool(col[5]),
+ }
+ for col in columns
+ ],
+ "column_count": len(columns),
+ "row_count": row_count,
+ }
+
+ conn.close()
+
+ result = {
+ "status": "success",
+ "message": "Database schema retrieved successfully",
+ "schema": schema_info,
+ }
+
+ return json.dumps(result, indent=2)
+
+ except (ValueError, FileNotFoundError) as e:
+ return json.dumps({"status": "error", "error": str(e)})
+ except sqlite3.Error as e:
+ return json.dumps(
+ {"status": "error", "error": f"SQL error: {str(e)}"}
+ )
+ except Exception as e:
+ return json.dumps(
+ {
+ "status": "error",
+ "error": f"Unexpected error: {str(e)}",
+ }
+ )
+
+
+# =============================================================================
+# DATABASE CREATION SPECIALIST AGENT
+# =============================================================================
+database_creator_agent = Agent(
+ agent_name="Database-Creator",
+ agent_description="Specialist agent responsible for creating and initializing databases with proper structure and metadata",
+ system_prompt="""You are the Database Creator, a specialist agent responsible for database creation and initialization. Your expertise includes:
+
+ DATABASE CREATION & SETUP:
+ - Creating new SQLite databases with proper structure
+ - Setting up database metadata and tracking systems
+ - Initializing database directories and file organization
+ - Ensuring database accessibility and permissions
+ - Creating database backup and recovery procedures
+
+ DATABASE ARCHITECTURE:
+ - Designing optimal database structures for different use cases
+ - Planning database organization and naming conventions
+ - Setting up database configuration and optimization settings
+ - Implementing database security and access controls
+ - Creating database documentation and specifications
+
+ Your responsibilities:
+ - Create new databases when requested
+ - Set up proper database structure and metadata
+ - Ensure database is properly initialized and accessible
+ - Provide database creation status and information
+ - Handle database creation errors and provide solutions
+
+ You work with precise technical specifications and always ensure databases are created correctly and efficiently.""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ temperature=0.3,
+ dynamic_temperature_enabled=True,
+ tools=[create_database, get_database_schema],
+)
+
+# =============================================================================
+# TABLE MANAGEMENT SPECIALIST AGENT
+# =============================================================================
+table_manager_agent = Agent(
+ agent_name="Table-Manager",
+ agent_description="Specialist agent for table creation, schema design, and table structure management",
+ system_prompt="""You are the Table Manager, a specialist agent responsible for table creation, schema design, and table structure management. Your expertise includes:
+
+ TABLE CREATION & DESIGN:
+ - Creating tables with optimal schema design
+ - Defining appropriate data types and constraints
+ - Setting up primary keys, foreign keys, and indexes
+ - Designing normalized table structures
+ - Creating tables that support efficient queries and operations
+
+ SCHEMA MANAGEMENT:
+ - Analyzing schema requirements and designing optimal structures
+ - Validating schema definitions and data types
+ - Ensuring schema consistency and integrity
+ - Managing schema modifications and updates
+ - Optimizing table structures for performance
+
+ DATA INTEGRITY:
+ - Implementing proper constraints and validation rules
+ - Setting up referential integrity between tables
+ - Ensuring data consistency across table operations
+ - Managing table relationships and dependencies
+ - Creating tables that support data quality requirements
+
+ Your responsibilities:
+ - Create tables with proper schema definitions
+ - Validate table structures and constraints
+ - Ensure optimal table design for performance
+ - Handle table creation errors and provide solutions
+ - Provide detailed table information and metadata
+
+ You work with precision and always ensure tables are created with optimal structure and performance characteristics.""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ temperature=0.3,
+ dynamic_temperature_enabled=True,
+ tools=[create_table, get_database_schema],
+)
+
+# =============================================================================
+# DATA OPERATIONS SPECIALIST AGENT
+# =============================================================================
+data_operations_agent = Agent(
+ agent_name="Data-Operations",
+ agent_description="Specialist agent for data insertion, updates, and data manipulation operations",
+ system_prompt="""You are the Data Operations specialist, responsible for all data manipulation operations including insertion, updates, and data management. Your expertise includes:
+
+ DATA INSERTION:
+ - Inserting data with proper validation and formatting
+ - Handling bulk data insertions efficiently
+ - Managing data type conversions and formatting
+ - Ensuring data integrity during insertion operations
+ - Validating data before insertion to prevent errors
+
+ DATA UPDATES:
+ - Updating existing data with precision and safety
+ - Creating targeted update operations with proper WHERE clauses
+ - Managing bulk updates and data modifications
+ - Ensuring data consistency during update operations
+ - Validating update operations to prevent data corruption
+
+ DATA VALIDATION:
+ - Validating data formats and types before operations
+ - Ensuring data meets schema requirements and constraints
+ - Checking for data consistency and integrity
+ - Managing data transformation and cleaning operations
+ - Providing detailed feedback on data operation results
+
+ ERROR HANDLING:
+ - Managing data operation errors gracefully
+ - Providing clear error messages and solutions
+ - Ensuring data operations are atomic and safe
+ - Rolling back operations when necessary
+ - Maintaining data integrity throughout all operations
+
+ Your responsibilities:
+ - Execute data insertion operations safely and efficiently
+ - Perform data updates with proper validation
+ - Ensure data integrity throughout all operations
+ - Handle data operation errors and provide solutions
+ - Provide detailed operation results and statistics
+
+ You work with extreme precision and always prioritize data integrity and safety in all operations.""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ temperature=0.3,
+ dynamic_temperature_enabled=True,
+ tools=[insert_data, update_table_data],
+)
+
+# =============================================================================
+# QUERY SPECIALIST AGENT
+# =============================================================================
+query_specialist_agent = Agent(
+ agent_name="Query-Specialist",
+ agent_description="Expert agent for database querying, data retrieval, and query optimization",
+ system_prompt="""You are the Query Specialist, an expert agent responsible for database querying, data retrieval, and query optimization. Your expertise includes:
+
+ QUERY EXECUTION:
+ - Executing complex SELECT queries efficiently
+ - Handling parameterized queries for security
+ - Managing query results and data formatting
+ - Ensuring query performance and optimization
+ - Providing comprehensive query results with metadata
+
+ QUERY OPTIMIZATION:
+ - Analyzing query performance and optimization opportunities
+ - Creating efficient queries that minimize resource usage
+ - Understanding database indexes and query planning
+ - Optimizing JOIN operations and complex queries
+ - Managing query timeouts and performance monitoring
+
+ DATA RETRIEVAL:
+ - Retrieving data with proper formatting and structure
+ - Handling large result sets efficiently
+ - Managing data aggregation and summarization
+ - Creating reports and data analysis queries
+ - Ensuring data accuracy and completeness in results
+
+ SECURITY & VALIDATION:
+ - Ensuring queries are safe and secure
+ - Validating query syntax and parameters
+ - Preventing SQL injection and security vulnerabilities
+ - Managing query permissions and access controls
+ - Ensuring queries follow security best practices
+
+ Your responsibilities:
+ - Execute database queries safely and efficiently
+ - Optimize query performance for best results
+ - Provide comprehensive query results and analysis
+ - Handle query errors and provide solutions
+ - Ensure query security and data protection
+
+ You work with expertise in SQL optimization and always ensure queries are secure, efficient, and provide accurate results.""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ temperature=0.3,
+ dynamic_temperature_enabled=True,
+ tools=[query_database, get_database_schema],
+)
+
+# =============================================================================
+# DATABASE DIRECTOR AGENT (COORDINATOR)
+# =============================================================================
+database_director_agent = Agent(
+ agent_name="Database-Director",
+ agent_description="Senior database director who orchestrates comprehensive database operations across all specialized teams",
+ system_prompt="""You are the Database Director, the senior executive responsible for orchestrating comprehensive database operations and coordinating a team of specialized database experts. Your role is to:
+
+ STRATEGIC COORDINATION:
+ - Analyze complex database tasks and break them down into specialized operations
+ - Assign tasks to the most appropriate specialist based on their unique expertise
+ - Ensure comprehensive coverage of all database operations (creation, schema, data, queries)
+ - Coordinate between specialists to avoid conflicts and ensure data integrity
+ - Synthesize results from multiple specialists into coherent database solutions
+ - Ensure all database operations align with user requirements and best practices
+
+ TEAM LEADERSHIP:
+ - Lead the Database Creator in setting up new databases and infrastructure
+ - Guide the Table Manager in creating optimal table structures and schemas
+ - Direct the Data Operations specialist in data insertion and update operations
+ - Oversee the Query Specialist in data retrieval and analysis operations
+ - Ensure all team members work collaboratively toward unified database goals
+ - Provide strategic direction and feedback to optimize team performance
+
+ DATABASE ARCHITECTURE:
+ - Design comprehensive database solutions that meet user requirements
+ - Ensure database operations follow best practices and standards
+ - Plan database workflows that optimize performance and reliability
+ - Balance immediate operational needs with long-term database health
+ - Ensure database operations are secure, efficient, and maintainable
+ - Optimize database operations for scalability and performance
+
+ OPERATION ORCHESTRATION:
+ - Monitor database operations across all specialists and activities
+ - Analyze results to identify optimization opportunities and improvements
+ - Ensure database operations deliver reliable and accurate results
+ - Provide strategic recommendations based on operation outcomes
+ - Coordinate complex multi-step database operations across specialists
+ - Ensure continuous improvement and optimization in database management
+
+ Your expertise includes:
+ - Database architecture and design strategy
+ - Team leadership and cross-functional coordination
+ - Database performance analysis and optimization
+ - Strategic planning and requirement analysis
+ - Operation workflow management and optimization
+ - Database security and best practices implementation
+
+ You deliver comprehensive database solutions that leverage the full expertise of your specialized team, ensuring all database operations work together to provide reliable, efficient, and secure data management.""",
+ model_name="claude-sonnet-4-20250514",
+ max_loops=1,
+ temperature=0.5,
+ dynamic_temperature_enabled=True,
+)
+
+# =============================================================================
+# HIERARCHICAL DATABASE SWARM
+# =============================================================================
+# Create list of specialized database agents
+database_specialists = [
+ database_creator_agent,
+ table_manager_agent,
+ data_operations_agent,
+ query_specialist_agent,
+]
+
+# Initialize the hierarchical database swarm
+smart_database_swarm = HierarchicalSwarm(
+ name="Smart-Database-Swarm",
+ description="A comprehensive database management system with specialized agents for creation, schema management, data operations, and querying, coordinated by a database director",
+ director_model_name="gpt-4.1",
+ agents=database_specialists,
+ max_loops=1,
+ verbose=True,
+)
+
+# =============================================================================
+# EXAMPLE USAGE AND DEMONSTRATIONS
+# =============================================================================
+if __name__ == "__main__":
+ # Configure logging
+ logger.info("Starting Smart Database Swarm demonstration")
+
+ # Example 1: Create a complete e-commerce database system
+ print("=" * 80)
+ print("SMART DATABASE SWARM - E-COMMERCE SYSTEM EXAMPLE")
+ print("=" * 80)
+
+ task1 = """Create a comprehensive e-commerce database system with the following requirements:
+
+ 1. Create a database called 'ecommerce_db'
+ 2. Create tables for:
+ - customers (id, name, email, phone, address, created_at)
+ - products (id, name, description, price, category, stock_quantity, created_at)
+ - orders (id, customer_id, order_date, total_amount, status)
+ - order_items (id, order_id, product_id, quantity, unit_price)
+
+ 3. Insert sample data:
+ - Add 3 customers
+ - Add 5 products in different categories
+ - Create 2 orders with multiple items
+
+ 4. Query the database to:
+ - Show all customers with their order history
+ - Display products by category with stock levels
+ - Calculate total sales by product
+
+ Ensure all operations are executed properly and provide comprehensive results."""
+
+ result1 = smart_database_swarm.run(task=task1)
+ print("\nE-COMMERCE DATABASE RESULT:")
+ print(result1)
+
+ # print("\n" + "=" * 80)
+ # print("SMART DATABASE SWARM - EMPLOYEE MANAGEMENT SYSTEM")
+ # print("=" * 80)
+
+ # # Example 2: Employee management system
+ # task2 = """Create an employee management database system:
+
+ # 1. Create database 'company_hr'
+ # 2. Create tables for:
+ # - departments (id, name, budget, manager_id)
+ # - employees (id, name, email, department_id, position, salary, hire_date)
+ # - projects (id, name, description, start_date, end_date, budget)
+ # - employee_projects (employee_id, project_id, role, hours_allocated)
+
+ # 3. Add sample data for departments, employees, and projects
+ # 4. Query for:
+ # - Employee count by department
+ # - Average salary by position
+ # - Projects with their assigned employees
+ # - Department budgets vs project allocations
+
+ # Coordinate the team to build this system efficiently."""
+
+ # result2 = smart_database_swarm.run(task=task2)
+ # print("\nEMPLOYEE MANAGEMENT RESULT:")
+ # print(result2)
+
+ # print("\n" + "=" * 80)
+ # print("SMART DATABASE SWARM - DATABASE ANALYSIS")
+ # print("=" * 80)
+
+ # # Example 3: Database analysis and optimization
+ # task3 = """Analyze and optimize the existing databases:
+
+ # 1. Get schema information for all created databases
+ # 2. Analyze table structures and relationships
+ # 3. Suggest optimizations for:
+ # - Index creation for better query performance
+ # - Data normalization improvements
+ # - Constraint additions for data integrity
+
+ # 4. Update data in existing tables:
+ # - Increase product prices by 10% for electronics category
+ # - Update employee salaries based on performance criteria
+ # - Modify order statuses for completed orders
+
+ # 5. Create comprehensive reports showing:
+ # - Database statistics and health metrics
+ # - Data distribution and patterns
+ # - Performance optimization recommendations
+
+ # Coordinate all specialists to provide a complete database analysis."""
+
+ # result3 = smart_database_swarm.run(task=task3)
+ # print("\nDATABASE ANALYSIS RESULT:")
+ # print(result3)
+
+ # logger.info("Smart Database Swarm demonstration completed successfully")
diff --git a/examples/multi_agent/graphworkflow_examples/Graph-Workflow-01_visualization_ddbd7109-c7b1-40f6-83f0-f90771c3beac.png b/examples/multi_agent/graphworkflow_examples/example_images/Graph-Workflow-01_visualization_ddbd7109-c7b1-40f6-83f0-f90771c3beac.png
similarity index 100%
rename from examples/multi_agent/graphworkflow_examples/Graph-Workflow-01_visualization_ddbd7109-c7b1-40f6-83f0-f90771c3beac.png
rename to examples/multi_agent/graphworkflow_examples/example_images/Graph-Workflow-01_visualization_ddbd7109-c7b1-40f6-83f0-f90771c3beac.png
diff --git a/examples/multi_agent/graphworkflow_examples/graph_workflow_example.png b/examples/multi_agent/graphworkflow_examples/example_images/graph_workflow_example.png
similarity index 100%
rename from examples/multi_agent/graphworkflow_examples/graph_workflow_example.png
rename to examples/multi_agent/graphworkflow_examples/example_images/graph_workflow_example.png
diff --git a/examples/multi_agent/graphworkflow_examples/test_graphviz_visualization.png b/examples/multi_agent/graphworkflow_examples/example_images/test_graphviz_visualization.png
similarity index 100%
rename from examples/multi_agent/graphworkflow_examples/test_graphviz_visualization.png
rename to examples/multi_agent/graphworkflow_examples/example_images/test_graphviz_visualization.png
diff --git a/examples/single_agent/llms/mistral_example.py b/examples/single_agent/llms/mistral_example.py
new file mode 100644
index 00000000..9d22143d
--- /dev/null
+++ b/examples/single_agent/llms/mistral_example.py
@@ -0,0 +1,20 @@
+from swarms import Agent
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Quantitative-Trading-Agent",
+ agent_description="Quantitative trading and analysis agent",
+ system_prompt="You are an expert quantitative trading agent. Answer concisely and accurately using your knowledge of trading strategies, risk management, and financial markets.",
+ model_name="mistral/mistral-tiny",
+ dynamic_temperature_enabled=True,
+ output_type="str-all-except-first",
+ max_loops="auto",
+ interactive=True,
+ no_reasoning_prompt=True,
+ streaming_on=True,
+)
+
+out = agent.run(
+ task="What are the best top 3 etfs for gold coverage?"
+)
+print(out)
diff --git a/.dockerignore b/scripts/docker/.dockerignore
similarity index 99%
rename from .dockerignore
rename to scripts/docker/.dockerignore
index 9b9944a2..241d24cb 100644
--- a/.dockerignore
+++ b/scripts/docker/.dockerignore
@@ -294,4 +294,3 @@ flycheck_*.el
# network security
/network-security.data
-
diff --git a/scripts/docker/DOCKER.md b/scripts/docker/DOCKER.md
new file mode 100644
index 00000000..5eeee366
--- /dev/null
+++ b/scripts/docker/DOCKER.md
@@ -0,0 +1,225 @@
+# Swarms Docker Image
+
+This repository includes a Docker image for running Swarms, an AI agent framework. The image is automatically built and published to DockerHub on every push to the main branch and on version tags.
+
+## π³ Quick Start
+
+### Pull and Run
+
+```bash
+# Pull the latest image
+docker pull kyegomez/swarms:latest
+
+# Run a simple test
+docker run --rm kyegomez/swarms:latest python test_docker.py
+
+# Run with interactive shell
+docker run -it --rm kyegomez/swarms:latest bash
+```
+
+### Using Specific Versions
+
+```bash
+# Pull a specific version
+docker pull kyegomez/swarms:v8.0.4
+
+# Run with specific version
+docker run --rm kyegomez/swarms:v8.0.4 python -c "import swarms; print(swarms.__version__)"
+```
+
+## ποΈ Building Locally
+
+### Prerequisites
+
+- Docker installed on your system
+- Git to clone the repository
+
+### Build Steps
+
+```bash
+# Clone the repository
+git clone https://github.com/kyegomez/swarms.git
+cd swarms
+
+# Build the image
+docker build -t swarms:latest .
+
+# Test the image
+docker run --rm swarms:latest python test_docker.py
+```
+
+## π Usage Examples
+
+### Basic Agent Example
+
+```bash
+# Create a Python script (agent_example.py)
+cat > agent_example.py << 'EOF'
+from swarms import Agent
+
+# Create an agent
+agent = Agent(
+ agent_name="test_agent",
+ system_prompt="You are a helpful AI assistant."
+)
+
+# Run the agent
+result = agent.run("Hello! How are you today?")
+print(result)
+EOF
+
+# Run in Docker
+docker run --rm -v $(pwd):/app swarms:latest python /app/agent_example.py
+```
+
+### Interactive Development
+
+```bash
+# Run with volume mount for development
+docker run -it --rm \
+ -v $(pwd):/app \
+ -w /app \
+ swarms:latest bash
+
+# Inside the container, you can now run Python scripts
+python your_script.py
+```
+
+### Using Environment Variables
+
+```bash
+# Run with environment variables
+docker run --rm \
+ -e OPENAI_API_KEY=your_api_key_here \
+ -e ANTHROPIC_API_KEY=your_anthropic_key_here \
+ swarms:latest python your_script.py
+```
+
+## π§ Configuration
+
+### Environment Variables
+
+The Docker image supports the following environment variables:
+
+- `OPENAI_API_KEY`: Your OpenAI API key
+- `ANTHROPIC_API_KEY`: Your Anthropic API key
+- `GOOGLE_API_KEY`: Your Google API key
+- `PYTHONPATH`: Additional Python path entries
+- `PYTHONUNBUFFERED`: Set to 1 for unbuffered output
+
+### Volume Mounts
+
+Common volume mount patterns:
+
+```bash
+# Mount current directory for development
+-v $(pwd):/app
+
+# Mount specific directories
+-v $(pwd)/data:/app/data
+-v $(pwd)/models:/app/models
+
+# Mount configuration files
+-v $(pwd)/config:/app/config
+```
+
+## π Troubleshooting
+
+### Common Issues
+
+1. **Permission Denied**
+ ```bash
+ # Fix permission issues
+ docker run --rm -v $(pwd):/app:rw swarms:latest python your_script.py
+ ```
+
+2. **Memory Issues**
+ ```bash
+ # Increase memory limit
+ docker run --rm --memory=4g swarms:latest python your_script.py
+ ```
+
+3. **Network Issues**
+ ```bash
+ # Use host network
+ docker run --rm --network=host swarms:latest python your_script.py
+ ```
+
+### Debug Mode
+
+```bash
+# Run with debug output
+docker run --rm -e PYTHONUNBUFFERED=1 swarms:latest python -u your_script.py
+
+# Run with interactive debugging
+docker run -it --rm swarms:latest python -m pdb your_script.py
+```
+
+## π CI/CD Integration
+
+The Docker image is automatically built and published via GitHub Actions:
+
+- **Triggers**: Push to main branch, version tags (v*.*.*)
+- **Platforms**: linux/amd64, linux/arm64
+- **Registry**: DockerHub (kyegomez/swarms)
+
+### GitHub Actions Secrets Required
+
+- `DOCKERHUB_USERNAME`: Your DockerHub username
+- `DOCKERHUB_TOKEN`: Your DockerHub access token
+
+## π Image Details
+
+### Base Image
+- Python 3.11-slim-bullseye
+- Multi-stage build for optimization
+- UV package manager for faster installations
+
+### Image Size
+- Optimized for minimal size
+- Multi-stage build reduces final image size
+- Only necessary dependencies included
+
+### Security
+- Non-root user execution
+- Minimal system dependencies
+- Regular security updates
+
+## π€ Contributing
+
+To contribute to the Docker setup:
+
+1. Fork the repository
+2. Make your changes to the Dockerfile
+3. Test locally: `docker build -t swarms:test .`
+4. Submit a pull request
+
+### Testing Changes
+
+```bash
+# Build test image
+docker build -t swarms:test .
+
+# Run tests
+docker run --rm swarms:test python test_docker.py
+
+# Test with your code
+docker run --rm -v $(pwd):/app swarms:test python your_test_script.py
+```
+
+## π License
+
+This Docker setup is part of the Swarms project and follows the same MIT license.
+
+## π Support
+
+For issues with the Docker image:
+
+1. Check the troubleshooting section above
+2. Review the GitHub Actions logs for build issues
+3. Open an issue on GitHub with detailed error information
+4. Include your Docker version and system information
+
+---
+
+**Note**: This Docker image is automatically updated with each release. For production use, consider pinning to specific version tags for stability.
diff --git a/Dockerfile b/scripts/docker/Dockerfile
similarity index 52%
rename from Dockerfile
rename to scripts/docker/Dockerfile
index aa312517..44392b09 100644
--- a/Dockerfile
+++ b/scripts/docker/Dockerfile
@@ -1,25 +1,37 @@
-# Use a lightweight Python image
+# Multi-stage build for optimized Docker image
+FROM python:3.11-slim-bullseye as builder
+
+# Install system dependencies for building
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ build-essential gcc curl \
+ && rm -rf /var/lib/apt/lists/*
+
+# Install UV for faster package management
+RUN curl -LsSf https://astral.sh/uv/install.sh | sh
+ENV PATH="/root/.cargo/bin:${PATH}"
+
+# Create a virtual environment and install dependencies
+RUN uv venv /opt/venv
+ENV PATH="/opt/venv/bin:$PATH"
+
+# Install the swarms package using UV
+RUN uv pip install --system -U swarms
+
+# Final stage
FROM python:3.11-slim-bullseye
# Environment config for speed and safety
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
- PIP_NO_CACHE_DIR=1 \
- PIP_DISABLE_PIP_VERSION_CHECK=1 \
- PATH="/app:${PATH}" \
+ PATH="/opt/venv/bin:${PATH}" \
PYTHONPATH="/app:${PYTHONPATH}" \
USER=swarms
# Set working directory
WORKDIR /app
-# System dependencies (minimal)
-RUN apt-get update && apt-get install -y --no-install-recommends \
- build-essential gcc \
- && rm -rf /var/lib/apt/lists/*
-
-# Install the swarms package
-RUN pip install --upgrade pip && pip install -U swarms
+# Copy virtual environment from builder stage
+COPY --from=builder /opt/venv /opt/venv
# Add non-root user
RUN useradd -m -s /bin/bash -U $USER && \
diff --git a/scripts/docker/docker-compose.yml b/scripts/docker/docker-compose.yml
new file mode 100644
index 00000000..a0ef3a35
--- /dev/null
+++ b/scripts/docker/docker-compose.yml
@@ -0,0 +1,71 @@
+version: '3.8'
+
+services:
+ swarms:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ image: swarms:latest
+ container_name: swarms-container
+ environment:
+ - PYTHONUNBUFFERED=1
+ - PYTHONPATH=/app
+ # Add your API keys here or use .env file
+ # - OPENAI_API_KEY=${OPENAI_API_KEY}
+ # - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
+ # - GOOGLE_API_KEY=${GOOGLE_API_KEY}
+ volumes:
+ - .:/app
+ - ./data:/app/data
+ - ./models:/app/models
+ working_dir: /app
+ command: python test_docker.py
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "python", "-c", "import swarms; print('Health check passed')"]
+ interval: 30s
+ timeout: 10s
+ retries: 3
+ start_period: 40s
+
+ swarms-dev:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ image: swarms:dev
+ container_name: swarms-dev-container
+ environment:
+ - PYTHONUNBUFFERED=1
+ - PYTHONPATH=/app
+ volumes:
+ - .:/app
+ - ./data:/app/data
+ - ./models:/app/models
+ working_dir: /app
+ command: bash
+ stdin_open: true
+ tty: true
+ restart: unless-stopped
+
+ swarms-api:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ image: swarms:api
+ container_name: swarms-api-container
+ environment:
+ - PYTHONUNBUFFERED=1
+ - PYTHONPATH=/app
+ volumes:
+ - .:/app
+ working_dir: /app
+ ports:
+ - "8000:8000"
+ command: python -m uvicorn main:app --host 0.0.0.0 --port 8000 --reload
+ restart: unless-stopped
+ depends_on:
+ - swarms
+
+networks:
+ default:
+ name: swarms-network
diff --git a/.github/workflows/docker-image.yml b/scripts/docker/docker-image.yml
similarity index 100%
rename from .github/workflows/docker-image.yml
rename to scripts/docker/docker-image.yml
diff --git a/.github/workflows/docker-publish.yml b/scripts/docker/docker-publish.yml
similarity index 94%
rename from .github/workflows/docker-publish.yml
rename to scripts/docker/docker-publish.yml
index 34372b3e..40fac9cb 100644
--- a/.github/workflows/docker-publish.yml
+++ b/scripts/docker/docker-publish.yml
@@ -58,6 +58,7 @@ jobs:
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
+ type=raw,value=latest,enable={{is_default_branch}}
# Build and push Docker image
- name: Build and push Docker image
@@ -71,3 +72,5 @@ jobs:
platforms: linux/amd64,linux/arm64
cache-from: type=gha
cache-to: type=gha,mode=max
+ build-args: |
+ BUILDKIT_INLINE_CACHE=1
diff --git a/scripts/docker/docker-test.yml b/scripts/docker/docker-test.yml
new file mode 100644
index 00000000..db83f238
--- /dev/null
+++ b/scripts/docker/docker-test.yml
@@ -0,0 +1,58 @@
+name: Docker Test Build
+
+on:
+ pull_request:
+ branches: [ "master" ]
+ workflow_dispatch:
+
+env:
+ REGISTRY: docker.io
+ IMAGE_NAME: ${{ github.repository }}
+
+jobs:
+ test-build:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ # Setup QEMU for multi-platform builds
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ # Setup Docker BuildX
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ # Build Docker image (without pushing)
+ - name: Build Docker image
+ id: build
+ uses: docker/build-push-action@v6
+ with:
+ context: .
+ push: false
+ tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:test
+ platforms: linux/amd64
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
+ build-args: |
+ BUILDKIT_INLINE_CACHE=1
+
+ # Test the built image
+ - name: Test Docker image
+ run: |
+ docker run --rm ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:test python test_docker.py
+
+ # Show image size
+ - name: Show image size
+ run: |
+ docker images ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:test --format "table {{.Repository}}\t{{.Tag}}\t{{.Size}}"
+
+ # Clean up test image
+ - name: Clean up test image
+ if: always()
+ run: |
+ docker rmi ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:test || true
diff --git a/scripts/docker/docker-utils.ps1 b/scripts/docker/docker-utils.ps1
new file mode 100644
index 00000000..36ba12db
--- /dev/null
+++ b/scripts/docker/docker-utils.ps1
@@ -0,0 +1,139 @@
+# Docker utilities for Swarms project (PowerShell version)
+# Usage: .\scripts\docker-utils.ps1 [command]
+
+param(
+ [Parameter(Position=0)]
+ [string]$Command = "help"
+)
+
+# Configuration
+$ImageName = "swarms"
+$Registry = "kyegomez"
+$FullImageName = "$Registry/$ImageName"
+
+# Functions
+function Write-Usage {
+ Write-Host "Docker Utilities for Swarms" -ForegroundColor Blue
+ Write-Host ""
+ Write-Host "Usage: .\scripts\docker-utils.ps1 [command]"
+ Write-Host ""
+ Write-Host "Commands:"
+ Write-Host " build Build the Docker image locally"
+ Write-Host " test Test the Docker image"
+ Write-Host " run Run the Docker image interactively"
+ Write-Host " push Push to DockerHub (requires login)"
+ Write-Host " clean Clean up Docker images and containers"
+ Write-Host " logs Show logs from running containers"
+ Write-Host " shell Open shell in running container"
+ Write-Host " compose-up Start services with docker-compose"
+ Write-Host " compose-down Stop services with docker-compose"
+ Write-Host " help Show this help message"
+ Write-Host ""
+}
+
+function Build-Image {
+ Write-Host "Building Docker image..." -ForegroundColor Green
+ docker build -t "$ImageName`:latest" .
+ Write-Host " Image built successfully!" -ForegroundColor Green
+}
+
+function Test-Image {
+ Write-Host "Testing Docker image..." -ForegroundColor Green
+ docker run --rm "$ImageName`:latest" python test_docker.py
+ Write-Host " Image test completed!" -ForegroundColor Green
+}
+
+function Run-Interactive {
+ Write-Host "Running Docker image interactively..." -ForegroundColor Green
+ docker run -it --rm -v "${PWD}:/app" -w /app "$ImageName`:latest" bash
+}
+
+function Push-ToDockerHub {
+ Write-Host "β Make sure you're logged into DockerHub first!" -ForegroundColor Yellow
+ Write-Host "Pushing to DockerHub..." -ForegroundColor Green
+
+ # Tag the image
+ docker tag "$ImageName`:latest" "$FullImageName`:latest"
+
+ # Push to DockerHub
+ docker push "$FullImageName`:latest"
+
+ Write-Host " Image pushed to DockerHub!" -ForegroundColor Green
+}
+
+function Clean-Docker {
+ Write-Host "Cleaning up Docker resources..." -ForegroundColor Yellow
+
+ # Stop and remove containers
+ docker ps -aq | ForEach-Object { docker rm -f $_ }
+
+ # Remove images
+ docker images "$ImageName" -q | ForEach-Object { docker rmi -f $_ }
+
+ # Remove dangling images
+ docker image prune -f
+
+ Write-Host " Docker cleanup completed!" -ForegroundColor Green
+}
+
+function Show-Logs {
+ Write-Host "Showing logs from running containers..." -ForegroundColor Green
+ docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
+ Write-Host ""
+
+ # Show logs for swarms containers
+ $containers = docker ps --filter "name=swarms" --format "{{.Names}}"
+ foreach ($container in $containers) {
+ Write-Host "Logs for $container:" -ForegroundColor Blue
+ docker logs $container --tail 20
+ Write-Host ""
+ }
+}
+
+function Open-Shell {
+ Write-Host "Opening shell in running container..." -ForegroundColor Green
+
+ # Find running swarms container
+ $container = docker ps --filter "name=swarms" --format "{{.Names}}" | Select-Object -First 1
+
+ if (-not $container) {
+ Write-Host " No running swarms container found!" -ForegroundColor Red
+ Write-Host "Start a container first with: .\scripts\docker-utils.ps1 run"
+ exit 1
+ }
+
+ Write-Host "Opening shell in $container..." -ForegroundColor Blue
+ docker exec -it $container bash
+}
+
+function Compose-Up {
+ Write-Host "Starting services with docker-compose..." -ForegroundColor Green
+ docker-compose up -d
+ Write-Host " Services started!" -ForegroundColor Green
+ Write-Host "Use 'docker-compose logs -f' to view logs"
+}
+
+function Compose-Down {
+ Write-Host "Stopping services with docker-compose..." -ForegroundColor Yellow
+ docker-compose down
+ Write-Host " Services stopped!" -ForegroundColor Green
+}
+
+# Main script logic
+switch ($Command.ToLower()) {
+ "build" { Build-Image }
+ "test" { Test-Image }
+ "run" { Run-Interactive }
+ "push" { Push-ToDockerHub }
+ "clean" { Clean-Docker }
+ "logs" { Show-Logs }
+ "shell" { Open-Shell }
+ "compose-up" { Compose-Up }
+ "compose-down" { Compose-Down }
+ "help" { Write-Usage }
+ default {
+ Write-Host " Unknown command: $Command" -ForegroundColor Red
+ Write-Usage
+ exit 1
+ }
+}
diff --git a/scripts/docker/docker-utils.sh b/scripts/docker/docker-utils.sh
new file mode 100644
index 00000000..ca24332f
--- /dev/null
+++ b/scripts/docker/docker-utils.sh
@@ -0,0 +1,167 @@
+#!/bin/bash
+
+# Docker utilities for Swarms project
+# Usage: ./scripts/docker-utils.sh [command]
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# Configuration
+IMAGE_NAME="swarms"
+REGISTRY="kyegomez"
+FULL_IMAGE_NAME="${REGISTRY}/${IMAGE_NAME}"
+
+# Functions
+print_usage() {
+ echo -e "${BLUE}Docker Utilities for Swarms${NC}"
+ echo ""
+ echo "Usage: $0 [command]"
+ echo ""
+ echo "Commands:"
+ echo " build Build the Docker image locally"
+ echo " test Test the Docker image"
+ echo " run Run the Docker image interactively"
+ echo " push Push to DockerHub (requires login)"
+ echo " clean Clean up Docker images and containers"
+ echo " logs Show logs from running containers"
+ echo " shell Open shell in running container"
+ echo " compose-up Start services with docker-compose"
+ echo " compose-down Stop services with docker-compose"
+ echo " help Show this help message"
+ echo ""
+}
+
+build_image() {
+ echo -e "${GREEN}Building Docker image...${NC}"
+ docker build -t "${IMAGE_NAME}:latest" .
+ echo -e "${GREEN} Image built successfully!${NC}"
+}
+
+test_image() {
+ echo -e "${GREEN}Testing Docker image...${NC}"
+ docker run --rm "${IMAGE_NAME}:latest" python test_docker.py
+ echo -e "${GREEN} Image test completed!${NC}"
+}
+
+run_interactive() {
+ echo -e "${GREEN}Running Docker image interactively...${NC}"
+ docker run -it --rm \
+ -v "$(pwd):/app" \
+ -w /app \
+ "${IMAGE_NAME}:latest" bash
+}
+
+push_to_dockerhub() {
+ echo -e "${YELLOW}β Make sure you're logged into DockerHub first!${NC}"
+ echo -e "${GREEN}Pushing to DockerHub...${NC}"
+
+ # Tag the image
+ docker tag "${IMAGE_NAME}:latest" "${FULL_IMAGE_NAME}:latest"
+
+ # Push to DockerHub
+ docker push "${FULL_IMAGE_NAME}:latest"
+
+ echo -e "${GREEN} Image pushed to DockerHub!${NC}"
+}
+
+clean_docker() {
+ echo -e "${YELLOW}Cleaning up Docker resources...${NC}"
+
+ # Stop and remove containers
+ docker ps -aq | xargs -r docker rm -f
+
+ # Remove images
+ docker images "${IMAGE_NAME}" -q | xargs -r docker rmi -f
+
+ # Remove dangling images
+ docker image prune -f
+
+ echo -e "${GREEN} Docker cleanup completed!${NC}"
+}
+
+show_logs() {
+ echo -e "${GREEN}Showing logs from running containers...${NC}"
+ docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
+ echo ""
+
+ # Show logs for swarms containers
+ for container in $(docker ps --filter "name=swarms" --format "{{.Names}}"); do
+ echo -e "${BLUE}Logs for $container:${NC}"
+ docker logs "$container" --tail 20
+ echo ""
+ done
+}
+
+open_shell() {
+ echo -e "${GREEN}Opening shell in running container...${NC}"
+
+ # Find running swarms container
+ container=$(docker ps --filter "name=swarms" --format "{{.Names}}" | head -1)
+
+ if [ -z "$container" ]; then
+ echo -e "${RED} No running swarms container found!${NC}"
+ echo "Start a container first with: $0 run"
+ exit 1
+ fi
+
+ echo -e "${BLUE}Opening shell in $container...${NC}"
+ docker exec -it "$container" bash
+}
+
+compose_up() {
+ echo -e "${GREEN}Starting services with docker-compose...${NC}"
+ docker-compose up -d
+ echo -e "${GREEN} Services started!${NC}"
+ echo "Use 'docker-compose logs -f' to view logs"
+}
+
+compose_down() {
+ echo -e "${YELLOW}Stopping services with docker-compose...${NC}"
+ docker-compose down
+ echo -e "${GREEN} Services stopped!${NC}"
+}
+
+# Main script logic
+case "${1:-help}" in
+ build)
+ build_image
+ ;;
+ test)
+ test_image
+ ;;
+ run)
+ run_interactive
+ ;;
+ push)
+ push_to_dockerhub
+ ;;
+ clean)
+ clean_docker
+ ;;
+ logs)
+ show_logs
+ ;;
+ shell)
+ open_shell
+ ;;
+ compose-up)
+ compose_up
+ ;;
+ compose-down)
+ compose_down
+ ;;
+ help|--help|-h)
+ print_usage
+ ;;
+ *)
+ echo -e "${RED} Unknown command: $1${NC}"
+ print_usage
+ exit 1
+ ;;
+esac
diff --git a/scripts/docker/setup_docker_secrets.MD b/scripts/docker/setup_docker_secrets.MD
new file mode 100644
index 00000000..65f97183
--- /dev/null
+++ b/scripts/docker/setup_docker_secrets.MD
@@ -0,0 +1,113 @@
+# Setting up DockerHub Secrets for GitHub Actions
+
+This guide will help you set up the required secrets for the Docker workflow to automatically build and push images to DockerHub.
+
+## Prerequisites
+
+1. A DockerHub account
+2. Admin access to the GitHub repository
+3. DockerHub access token
+
+## Step 1: Create a DockerHub Access Token
+
+1. Log in to [DockerHub](https://hub.docker.com/)
+2. Go to your account settings
+3. Navigate to "Security" β "Access Tokens"
+4. Click "New Access Token"
+5. Give it a name (e.g., "GitHub Actions")
+6. Set the permissions to "Read & Write"
+7. Copy the generated token (you won't be able to see it again!)
+
+## Step 2: Add Secrets to GitHub Repository
+
+1. Go to your GitHub repository
+2. Navigate to "Settings" β "Secrets and variables" β "Actions"
+3. Click "New repository secret"
+4. Add the following secrets:
+
+### Required Secrets
+
+| Secret Name | Value | Description |
+|-------------|-------|-------------|
+| `DOCKERHUB_USERNAME` | Your DockerHub username | Your DockerHub username (e.g., `kyegomez`) |
+| `DOCKERHUB_TOKEN` | Your DockerHub access token | The access token you created in Step 1 |
+
+## Step 3: Verify Setup
+
+1. Push a commit to the `main` branch
+2. Go to the "Actions" tab in your GitHub repository
+3. You should see the "Docker Build and Publish" workflow running
+4. Check that it completes successfully
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Authentication Failed**
+ - Double-check your DockerHub username and token
+ - Ensure the token has "Read & Write" permissions
+ - Make sure the token hasn't expired
+
+2. **Permission Denied**
+ - Verify you have admin access to the repository
+ - Check that the secrets are named exactly as shown above
+
+3. **Workflow Not Triggering**
+ - Ensure you're pushing to the `main` branch
+ - Check that the workflow file is in `.github/workflows/`
+ - Verify the workflow file has the correct triggers
+
+### Testing Locally
+
+You can test the Docker build locally before pushing:
+
+```bash
+# Build the image locally
+docker build -t swarms:test .
+
+# Test the image
+docker run --rm swarms:test python test_docker.py
+
+# If everything works, push to GitHub
+git add .
+git commit -m "Add Docker support"
+git push origin main
+```
+
+## Security Notes
+
+- Never commit secrets directly to your repository
+- Use repository secrets for sensitive information
+- Regularly rotate your DockerHub access tokens
+- Consider using organization-level secrets for team repositories
+
+## Additional Configuration
+
+### Custom Registry
+
+If you want to use a different registry (not DockerHub), update the workflow file:
+
+```yaml
+env:
+ REGISTRY: your-registry.com
+ IMAGE_NAME: your-org/your-repo
+```
+
+### Multiple Tags
+
+The workflow automatically creates tags based on:
+- Git branch name
+- Git commit SHA
+- Version tags (v*.*.*)
+- Latest tag for main branch
+
+You can customize this in the workflow file under the "Extract Docker metadata" step.
+
+## Support
+
+If you encounter issues:
+
+1. Check the GitHub Actions logs for detailed error messages
+2. Verify your DockerHub credentials
+3. Ensure the workflow file is properly configured
+4. Open an issue in the repository with the error details
diff --git a/scripts/docker/test_docker.py b/scripts/docker/test_docker.py
new file mode 100644
index 00000000..70e9060b
--- /dev/null
+++ b/scripts/docker/test_docker.py
@@ -0,0 +1,66 @@
+
+#!/usr/bin/env python3
+"""
+Test script to verify Swarms installation in Docker container.
+"""
+
+import sys
+from typing import Dict, Any
+
+def test_swarms_import() -> Dict[str, Any]:
+ """
+ Test that swarms can be imported and basic functionality works.
+
+ Returns:
+ Dict[str, Any]: Test results
+ """
+ try:
+ import swarms
+ print(f" Swarms imported successfully. Version: {swarms.__version__}")
+
+ # Test basic functionality
+ from swarms import Agent
+ print(" Agent class imported successfully")
+
+ return {
+ "status": "success",
+ "version": swarms.__version__,
+ "message": "Swarms package is working correctly"
+ }
+
+ except ImportError as e:
+ print(f" Failed to import swarms: {e}")
+ return {
+ "status": "error",
+ "error": str(e),
+ "message": "Swarms package import failed"
+ }
+ except Exception as e:
+ print(f" Unexpected error: {e}")
+ return {
+ "status": "error",
+ "error": str(e),
+ "message": "Unexpected error occurred"
+ }
+
+def main() -> None:
+ """Main function to run tests."""
+ print(" Testing Swarms Docker Image...")
+ print("=" * 50)
+
+ # Test Python version
+ print(f"Python version: {sys.version}")
+
+ # Test swarms import
+ result = test_swarms_import()
+
+ print("=" * 50)
+ if result["status"] == "success":
+ print(" All tests passed! Docker image is working correctly.")
+ sys.exit(0)
+ else:
+ print(" Tests failed! Please check the Docker image.")
+ sys.exit(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/swarms/sims/senator_assembly.py b/swarms/sims/senator_assembly.py
index 64e3d34e..c125e2c6 100644
--- a/swarms/sims/senator_assembly.py
+++ b/swarms/sims/senator_assembly.py
@@ -6,13 +6,14 @@ each with detailed backgrounds, political positions, and comprehensive system pr
that reflect their real-world characteristics, voting patterns, and policy priorities.
"""
+from functools import lru_cache
from typing import Dict, List, Optional
-from swarms import Agent
-from swarms.structs.multi_agent_exec import run_agents_concurrently
-from functools import lru_cache
from loguru import logger
+
+from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
+from swarms.structs.multi_agent_exec import run_agents_concurrently
@lru_cache(maxsize=1)
diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py
index acad1008..9ea95998 100644
--- a/swarms/structs/__init__.py
+++ b/swarms/structs/__init__.py
@@ -96,7 +96,6 @@ from swarms.structs.swarming_architectures import (
star_swarm,
)
-
__all__ = [
"Agent",
"BaseStructure",
diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py
index 0198f490..e7760c12 100644
--- a/swarms/structs/agent.py
+++ b/swarms/structs/agent.py
@@ -27,9 +27,12 @@ from pydantic import BaseModel
from swarms.agents.ape_agent import auto_generate_prompt
from swarms.artifacts.main_artifact import Artifact
from swarms.prompts.agent_system_prompts import AGENT_SYSTEM_PROMPT_3
+from swarms.prompts.max_loop_prompt import generate_reasoning_prompt
from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1,
)
+from swarms.prompts.react_base_prompt import REACT_SYS_PROMPT
+from swarms.prompts.safety_prompt import SAFETY_PROMPT
from swarms.prompts.tools import tool_sop_prompt
from swarms.schemas.agent_mcp_errors import (
AgentMCPConnectionError,
@@ -41,19 +44,30 @@ from swarms.schemas.base_schemas import (
ChatCompletionResponseChoice,
ChatMessageResponse,
)
+from swarms.schemas.conversation_schema import ConversationSchema
from swarms.schemas.llm_agent_schema import ModelConfigOrigin
+from swarms.schemas.mcp_schemas import (
+ MCPConnection,
+)
from swarms.structs.agent_rag_handler import (
- RAGConfig,
AgentRAGHandler,
+ RAGConfig,
)
from swarms.structs.agent_roles import agent_roles
from swarms.structs.conversation import Conversation
+from swarms.structs.ma_utils import set_random_models_for_agents
from swarms.structs.safe_loading import (
SafeLoaderUtils,
SafeStateManager,
)
from swarms.telemetry.main import log_agent_data
from swarms.tools.base_tool import BaseTool
+from swarms.tools.mcp_client_call import (
+ execute_multiple_tools_on_multiple_mcp_servers_sync,
+ execute_tool_call_simple,
+ get_mcp_tools_sync,
+ get_tools_for_multiple_mcp_servers,
+)
from swarms.tools.py_func_to_openai_func_str import (
convert_multiple_functions_to_openai_function_schema,
)
@@ -64,28 +78,14 @@ from swarms.utils.generate_keys import generate_api_key
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
-from swarms.utils.litellm_tokenizer import count_tokens
-from swarms.utils.litellm_wrapper import LiteLLM
-from swarms.utils.pdf_to_text import pdf_to_text
-from swarms.prompts.react_base_prompt import REACT_SYS_PROMPT
-from swarms.prompts.max_loop_prompt import generate_reasoning_prompt
-from swarms.prompts.safety_prompt import SAFETY_PROMPT
-from swarms.structs.ma_utils import set_random_models_for_agents
-from swarms.tools.mcp_client_call import (
- execute_multiple_tools_on_multiple_mcp_servers_sync,
- execute_tool_call_simple,
- get_mcp_tools_sync,
- get_tools_for_multiple_mcp_servers,
-)
-from swarms.schemas.mcp_schemas import (
- MCPConnection,
-)
from swarms.utils.index import (
exists,
format_data_structure,
)
-from swarms.schemas.conversation_schema import ConversationSchema
+from swarms.utils.litellm_tokenizer import count_tokens
+from swarms.utils.litellm_wrapper import LiteLLM
from swarms.utils.output_types import OutputType
+from swarms.utils.pdf_to_text import pdf_to_text
def stop_when_repeats(response: str) -> bool:
@@ -899,9 +899,9 @@ class Agent:
bool: True if model supports vision and image is provided, False otherwise.
"""
from litellm.utils import (
- supports_vision,
supports_function_calling,
supports_parallel_function_calling,
+ supports_vision,
)
# Only check vision support if an image is provided
@@ -1558,11 +1558,11 @@ class Agent:
raise
def reliability_check(self):
+ from litellm import model_list
from litellm.utils import (
- supports_function_calling,
get_max_tokens,
+ supports_function_calling,
)
- from litellm import model_list
if self.system_prompt is None:
logger.warning(
diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py
index 86cba2a1..4ceb62e0 100644
--- a/swarms/structs/concurrent_workflow.py
+++ b/swarms/structs/concurrent_workflow.py
@@ -5,12 +5,12 @@ from typing import Callable, List, Optional, Union
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.conversation import Conversation
+from swarms.utils.formatter import formatter
from swarms.utils.get_cpu_cores import get_cpu_cores
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
from swarms.utils.loguru_logger import initialize_logger
-from swarms.utils.formatter import formatter
logger = initialize_logger(log_folder="concurrent_workflow")
diff --git a/swarms/structs/deep_research_swarm.py b/swarms/structs/deep_research_swarm.py
index 188ac7ea..b71e81c1 100644
--- a/swarms/structs/deep_research_swarm.py
+++ b/swarms/structs/deep_research_swarm.py
@@ -23,10 +23,6 @@ MAX_WORKERS = (
os.cpu_count() * 2
) # Optimal number of workers based on CPU cores
-###############################################################################
-# 1. System Prompts for Each Scientist Agent
-###############################################################################
-
def exa_search(query: str, **kwargs: Any) -> str:
"""Performs web search using Exa.ai API and returns formatted results."""
diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py
index 70a97587..d2dc10f4 100644
--- a/swarms/structs/hiearchical_swarm.py
+++ b/swarms/structs/hiearchical_swarm.py
@@ -687,7 +687,7 @@ class HierarchicalSwarm:
interactive: bool = False,
director_system_prompt: str = HIEARCHICAL_SWARM_SYSTEM_PROMPT,
director_reasoning_model_name: str = "o3-mini",
- director_reasoning_enabled: bool = True,
+ director_reasoning_enabled: bool = False,
multi_agent_prompt_improvements: bool = False,
*args,
**kwargs,
diff --git a/swarms/tools/py_func_to_openai_func_str.py b/swarms/tools/py_func_to_openai_func_str.py
index 26f64455..a1232ed0 100644
--- a/swarms/tools/py_func_to_openai_func_str.py
+++ b/swarms/tools/py_func_to_openai_func_str.py
@@ -1,8 +1,8 @@
-import os
import concurrent.futures
import functools
import inspect
import json
+import os
from logging import getLogger
from typing import (
Any,