diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml
index 21129735..be346103 100644
--- a/.github/workflows/autofix.yml
+++ b/.github/workflows/autofix.yml
@@ -22,4 +22,4 @@ jobs:
- run: ruff format .
- run: ruff check --fix .
- - uses: autofix-ci/action@dd55f44df8f7cdb7a6bf74c78677eb8acd40cd0a
+ - uses: autofix-ci/action@ff86a557419858bb967097bfc916833f5647fa8c
diff --git a/.github/workflows/bearer.yml b/.github/workflows/bearer.yml
new file mode 100644
index 00000000..be0fb591
--- /dev/null
+++ b/.github/workflows/bearer.yml
@@ -0,0 +1,43 @@
+# This workflow uses actions that are not certified by GitHub.
+# They are provided by a third-party and are governed by
+# separate terms of service, privacy policy, and support
+# documentation.
+#
+# This workflow file requires a free account on Bearer.com to manage findings, notifications and more.
+# See https://docs.bearer.com/guides/bearer-cloud/
+name: Bearer
+
+on:
+ push:
+ branches: ["master" ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: ["master"]
+ schedule:
+ - cron: '24 22 * * 6'
+
+permissions:
+ contents: read # for actions/checkout to fetch code
+ security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
+ actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
+
+jobs:
+ bearer:
+ runs-on: ubuntu-latest
+ steps:
+ # Checkout project source
+ - uses: actions/checkout@v4
+ # Scan code using Bearer CLI
+ - name: Run Report
+ id: report
+ uses: bearer/bearer-action@828eeb928ce2f4a7ca5ed57fb8b59508cb8c79bc
+ with:
+ api-key: ${{ secrets.BEARER_TOKEN }}
+ format: sarif
+ output: results.sarif
+ exit-code: 0
+ # Upload SARIF file generated in previous step
+ - name: Upload SARIF file
+ uses: github/codeql-action/upload-sarif@v3
+ with:
+ sarif_file: results.sarif
diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml
new file mode 100644
index 00000000..9bbf3ba2
--- /dev/null
+++ b/.github/workflows/dependency-review.yml
@@ -0,0 +1,39 @@
+# Dependency Review Action
+#
+# This Action will scan dependency manifest files that change as part of a Pull Request,
+# surfacing known-vulnerable versions of the packages declared or updated in the PR.
+# Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable
+# packages will be blocked from merging.
+#
+# Source repository: https://github.com/actions/dependency-review-action
+# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
+name: 'Dependency review'
+on:
+ pull_request:
+ branches: [ "master" ]
+
+# If using a dependency submission action in this workflow this permission will need to be set to:
+#
+# permissions:
+# contents: write
+#
+# https://docs.github.com/en/enterprise-cloud@latest/code-security/supply-chain-security/understanding-your-software-supply-chain/using-the-dependency-submission-api
+permissions:
+ contents: read
+ # Write permissions for pull-requests are required for using the `comment-summary-in-pr` option, comment out if you aren't using this option
+ pull-requests: write
+
+jobs:
+ dependency-review:
+ runs-on: ubuntu-latest
+ steps:
+ - name: 'Checkout repository'
+ uses: actions/checkout@v4
+ - name: 'Dependency Review'
+ uses: actions/dependency-review-action@v4
+ # Commonly enabled options, see https://github.com/actions/dependency-review-action#configuration-options for all available options.
+ with:
+ comment-summary-in-pr: always
+ # fail-on-severity: moderate
+ # deny-licenses: GPL-1.0-or-later, LGPL-2.0-or-later
+ # retry-on-snapshot-warnings: true
diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml
new file mode 100644
index 00000000..793d8e0e
--- /dev/null
+++ b/.github/workflows/docker-image.yml
@@ -0,0 +1,18 @@
+name: Docker Image CI
+
+on:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ branches: [ "master" ]
+
+jobs:
+
+ build:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ - name: Build the Docker image
+ run: docker build . --file Dockerfile --tag my-image-name:$(date +%s)
diff --git a/.github/workflows/pyre.yml b/.github/workflows/pyre.yml
new file mode 100644
index 00000000..2e4713d3
--- /dev/null
+++ b/.github/workflows/pyre.yml
@@ -0,0 +1,46 @@
+# This workflow uses actions that are not certified by GitHub.
+# They are provided by a third-party and are governed by
+# separate terms of service, privacy policy, and support
+# documentation.
+
+# This workflow integrates Pyre with GitHub's
+# Code Scanning feature.
+#
+# Pyre is a performant type checker for Python compliant with
+# PEP 484. Pyre can analyze codebases with millions of lines
+# of code incrementally ā providing instantaneous feedback
+# to developers as they write code.
+#
+# See https://pyre-check.org
+
+name: Pyre
+
+on:
+ workflow_dispatch:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ branches: [ "master" ]
+
+permissions:
+ contents: read
+
+jobs:
+ pyre:
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: true
+
+ - name: Run Pyre
+ uses: facebook/pyre-action@60697a7858f7cc8470d8cc494a3cf2ad6b06560d
+ with:
+ # To customize these inputs:
+ # See https://github.com/facebook/pyre-action#inputs
+ repo-directory: './'
+ requirements-path: 'requirements.txt'
diff --git a/.github/workflows/pysa.yml b/.github/workflows/pysa.yml
new file mode 100644
index 00000000..6c301e80
--- /dev/null
+++ b/.github/workflows/pysa.yml
@@ -0,0 +1,50 @@
+# This workflow uses actions that are not certified by GitHub.
+# They are provided by a third-party and are governed by
+# separate terms of service, privacy policy, and support
+# documentation.
+
+# This workflow integrates Python Static Analyzer (Pysa) with
+# GitHub's Code Scanning feature.
+#
+# Python Static Analyzer (Pysa) is a security-focused static
+# analysis tool that tracks flows of data from where they
+# originate to where they terminate in a dangerous location.
+#
+# See https://pyre-check.org/docs/pysa-basics/
+
+name: Pysa
+
+on:
+ workflow_dispatch:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ branches: [ "master" ]
+ schedule:
+ - cron: '43 5 * * 3'
+
+permissions:
+ contents: read
+
+jobs:
+ pysa:
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: true
+
+ - name: Run Pysa
+ uses: facebook/pysa-action@f46a63777e59268613bd6e2ff4e29f144ca9e88b
+ with:
+ # To customize these inputs:
+ # See https://github.com/facebook/pysa-action#inputs
+ repo-directory: './'
+ requirements-path: 'requirements.txt'
+ infer-types: true
+ include-default-sapp-filters: true
diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml
new file mode 100644
index 00000000..f3586044
--- /dev/null
+++ b/.github/workflows/python-package-conda.yml
@@ -0,0 +1,34 @@
+name: Python Package using Conda
+
+on: [push]
+
+jobs:
+ build-linux:
+ runs-on: ubuntu-latest
+ strategy:
+ max-parallel: 5
+
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v3
+ with:
+ python-version: '3.10'
+ - name: Add conda to system path
+ run: |
+ # $CONDA is an environment variable pointing to the root of the miniconda directory
+ echo $CONDA/bin >> $GITHUB_PATH
+ - name: Install dependencies
+ run: |
+ conda env update --file environment.yml --name base
+ - name: Lint with flake8
+ run: |
+ conda install flake8
+ # stop the build if there are Python syntax errors or undefined names
+ flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
+ # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
+ flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
+ - name: Test with pytest
+ run: |
+ conda install pytest
+ pytest
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
index 6e563af2..c0ad132e 100644
--- a/.github/workflows/python-package.yml
+++ b/.github/workflows/python-package.yml
@@ -1,6 +1,8 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
+# [ ] TODO [pep 458](https://blog.pypi.org/posts/2024-11-14-pypi-now-supports-digital-attestations/)
+
name: Python package
on:
@@ -16,7 +18,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- python-version: ["3.9", "3.10", "3.11", "3.12"]
+ python-version: ["3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml
new file mode 100644
index 00000000..1e78a687
--- /dev/null
+++ b/.github/workflows/semgrep.yml
@@ -0,0 +1,49 @@
+# This workflow uses actions that are not certified by GitHub.
+# They are provided by a third-party and are governed by
+# separate terms of service, privacy policy, and support
+# documentation.
+
+# This workflow file requires a free account on Semgrep.dev to
+# manage rules, file ignores, notifications, and more.
+#
+# See https://semgrep.dev/docs
+
+name: Semgrep
+
+on:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ "master" ]
+ schedule:
+ - cron: '19 7 * * 3'
+
+permissions:
+ contents: read
+
+jobs:
+ semgrep:
+ permissions:
+ contents: read # for actions/checkout to fetch code
+ security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
+ actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
+ name: Scan
+ runs-on: ubuntu-latest
+ steps:
+ # Checkout project source
+ - uses: actions/checkout@v4
+
+ # Scan code using project's configuration on https://semgrep.dev/manage
+ - uses: returntocorp/semgrep-action@fcd5ab7459e8d91cb1777481980d1b18b4fc6735
+ with:
+ publishToken: ${{ secrets.SEMGREP_APP_TOKEN }}
+ publishDeployment: ${{ secrets.SEMGREP_DEPLOYMENT_ID }}
+ generateSarif: "1"
+
+ # Upload SARIF file generated in previous step
+ - name: Upload SARIF file
+ uses: github/codeql-action/upload-sarif@v3
+ with:
+ sarif_file: semgrep.sarif
+ if: always()
diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml
new file mode 100644
index 00000000..d9e6c82b
--- /dev/null
+++ b/.github/workflows/trivy.yml
@@ -0,0 +1,48 @@
+# This workflow uses actions that are not certified by GitHub.
+# They are provided by a third-party and are governed by
+# separate terms of service, privacy policy, and support
+# documentation.
+
+name: trivy
+
+on:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ "master" ]
+ schedule:
+ - cron: '31 0 * * 5'
+
+permissions:
+ contents: read
+
+jobs:
+ build:
+ permissions:
+ contents: read # for actions/checkout to fetch code
+ security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
+ actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
+ name: Build
+ runs-on: "ubuntu-20.04"
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Build an image from Dockerfile
+ run: |
+ docker build -t docker.io/my-organization/my-app:${{ github.sha }} .
+
+ - name: Run Trivy vulnerability scanner
+ uses: aquasecurity/trivy-action@7b7aa264d83dc58691451798b4d117d53d21edfe
+ with:
+ image-ref: 'docker.io/my-organization/my-app:${{ github.sha }}'
+ format: 'template'
+ template: '@/contrib/sarif.tpl'
+ output: 'trivy-results.sarif'
+ severity: 'CRITICAL,HIGH'
+
+ - name: Upload Trivy scan results to GitHub Security tab
+ uses: github/codeql-action/upload-sarif@v3
+ with:
+ sarif_file: 'trivy-results.sarif'
diff --git a/.gitignore b/.gitignore
index 18b6849c..9f6e25b6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,10 +8,12 @@ audio/
video/
artifacts_three
dataframe/
-
+.ruff_cache
+.pytest_cache
static/generated
runs
Financial-Analysis-Agent_state.json
+experimental
artifacts_five
encryption
errors
diff --git a/README.md b/README.md
index 96be5b67..0180866c 100644
--- a/README.md
+++ b/README.md
@@ -37,8 +37,21 @@
[![Share on Reddit](https://img.shields.io/badge/-Share%20on%20Reddit-orange)](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&title=Swarms%20-%20the%20future%20of%20AI) [![Share on Hacker News](https://img.shields.io/badge/-Share%20on%20Hacker%20News-orange)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&t=Swarms%20-%20the%20future%20of%20AI) [![Share on Pinterest](https://img.shields.io/badge/-Share%20on%20Pinterest-red)](https://pinterest.com/pin/create/button/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&media=https%3A%2F%2Fexample.com%2Fimage.jpg&description=Swarms%20-%20the%20future%20of%20AI) [![Share on WhatsApp](https://img.shields.io/badge/-Share%20on%20WhatsApp-green)](https://api.whatsapp.com/send?text=Check%20out%20Swarms%20-%20the%20future%20of%20AI%20%23swarms%20%23AI%0A%0Ahttps%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms)
+## āØ Features
+
+| Category | Features | Benefits |
+|----------|----------|-----------|
+| š¢ Enterprise Architecture | ā¢ Production-Ready Infrastructure
ā¢ High Reliability Systems
ā¢ Modular Design
ā¢ Comprehensive Logging | ā¢ Reduced downtime
ā¢ Easier maintenance
ā¢ Better debugging
ā¢ Enhanced monitoring |
+| š¤ Agent Orchestration | ā¢ Hierarchical Swarms
ā¢ Parallel Processing
ā¢ Sequential Workflows
ā¢ Graph-based Workflows
ā¢ Dynamic Agent Rearrangement | ā¢ Complex task handling
ā¢ Improved performance
ā¢ Flexible workflows
ā¢ Optimized execution |
+| š Integration Capabilities | ā¢ Multi-Model Support
ā¢ Custom Agent Creation
ā¢ Extensive Tool Library
ā¢ Multiple Memory Systems | ā¢ Provider flexibility
ā¢ Custom solutions
ā¢ Extended functionality
ā¢ Enhanced memory management |
+| š Scalability | ā¢ Concurrent Processing
ā¢ Resource Management
ā¢ Load Balancing
ā¢ Horizontal Scaling | ā¢ Higher throughput
ā¢ Efficient resource use
ā¢ Better performance
ā¢ Easy scaling |
+| š ļø Developer Tools | ā¢ Simple API
ā¢ Extensive Documentation
ā¢ Active Community
ā¢ CLI Tools | ā¢ Faster development
ā¢ Easy learning curve
ā¢ Community support
ā¢ Quick deployment |
+| š Security Features | ā¢ Error Handling
ā¢ Rate Limiting
ā¢ Monitoring Integration
ā¢ Audit Logging | ā¢ Improved reliability
ā¢ API protection
ā¢ Better monitoring
ā¢ Enhanced tracking |
+| š Advanced Features | ā¢ SpreadsheetSwarm
ā¢ Group Chat
ā¢ Agent Registry
ā¢ Mixture of Agents | ā¢ Mass agent management
ā¢ Collaborative AI
ā¢ Centralized control
ā¢ Complex solutions |
+| š Provider Support | ā¢ OpenAI
ā¢ Anthropic
ā¢ ChromaDB
ā¢ Custom Providers | ā¢ Provider flexibility
ā¢ Storage options
ā¢ Custom integration
ā¢ Vendor independence |
+| šŖ Production Features | ā¢ Automatic Retries
ā¢ Async Support
ā¢ Environment Management
ā¢ Type Safety | ā¢ Better reliability
ā¢ Improved performance
ā¢ Easy configuration
ā¢ Safer code |
+| šÆ Use Case Support | ā¢ Task-Specific Agents
ā¢ Custom Workflows
ā¢ Industry Solutions
ā¢ Extensible Framework | ā¢ Quick deployment
ā¢ Flexible solutions
ā¢ Industry readiness
ā¢ Easy customization |
-Swarms is an enterprise grade and production ready multi-agent collaboration framework that enables you to orchestrate many agents to work collaboratively at scale to automate real-world activities.
----
@@ -49,7 +62,7 @@ Swarms is an enterprise grade and production ready multi-agent collaboration fra
- Set an `.env` Variable with your desired workspace dir: `WORKSPACE_DIR="agent_workspace"` or do it in your terminal with `export WORKSPACE_DIR="agent_workspace"`
- Finally, `swarms onboarding` to get you started.
-## Onboarding
+## Guides and Walkthroughs
Refer to our documentation for production grade implementation details.
@@ -68,9 +81,10 @@ Refer to our documentation for production grade implementation details.
## Install š»
+Install the following packages with copy and paste
```bash
-$ pip3 install -U swarms
+$ pip3 install -U swarms swarm-models swarms-memory
```
@@ -100,14 +114,36 @@ Here are some example scripts to get you started. For more comprehensive documen
| Swarms Examples | A collection of simple examples to demonstrate Swarms capabilities. | Basic Usage | [https://github.com/The-Swarm-Corporation/swarms-examples?tab=readme-ov-file](https://github.com/The-Swarm-Corporation/swarms-examples?tab=readme-ov-file) |
| Cookbook | A comprehensive guide with recipes for various use cases and scenarios. | Advanced Usage | [https://github.com/The-Swarm-Corporation/Cookbook](https://github.com/The-Swarm-Corporation/Cookbook) |
+
+
+
---
## `Agent` Class
The `Agent` class is a fundamental component of the Swarms framework, designed to execute tasks autonomously. It fuses llms, tools and long-term memory capabilities to create a full stack agent. The `Agent` class is highly customizable, allowing for fine-grained control over its behavior and interactions.
+
### `run` Method
-The `run` method is the primary entry point for executing tasks with an `Agent` instance. It accepts a task string as the main input task and processes it according to the agent's configuration. And, it can also accept an `img` parameter such as `img="image_filepath.png` to process images if you have a VLM
+The `run` method is the primary entry point for executing tasks with an `Agent` instance. It accepts a task string as the main input task and processes it according to the agent's configuration. And, it can also accept an `img` parameter such as `img="image_filepath.png` to process images if you have a VLM attached such as `GPT4VisionAPI`
+
+
+
+## Simple Example
+
+```python
+from swarms import Agent
+
+agent = Agent(
+ agent_name="Stock-Analysis-Agent",
+ model_name="gpt-4o-mini",
+ max_loops="auto",
+ interactive=True,
+ streaming_on=True,
+)
+
+agent.run("What is the current market trend for tech stocks?")
+```
### Settings and Customization
The `Agent` class offers a range of settings to tailor its behavior to specific needs. Some key settings include:
@@ -133,28 +169,15 @@ The `Agent` class offers a range of settings to tailor its behavior to specific
```python
import os
from swarms import Agent
-from swarm_models import OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
-from dotenv import load_dotenv
-
-load_dotenv()
-
-# Get the OpenAI API key from the environment variable
-api_key = os.getenv("OPENAI_API_KEY")
-
-# Create an instance of the OpenAIChat class
-model = OpenAIChat(
- openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
-)
-
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
- llm=model,
+ model_name="gpt-4o-mini",
max_loops=1,
autosave=True,
dashboard=False,
@@ -176,11 +199,10 @@ agent.run(
```
-----
+
### Integrating RAG with Swarms for Enhanced Long-Term Memory
`Agent` equipped with quasi-infinite long term memory using RAG (Relational Agent Graph) for advanced document understanding, analysis, and retrieval capabilities.
-
-
**Mermaid Diagram for RAG Integration**
```mermaid
graph TD
@@ -192,8 +214,11 @@ graph TD
F --> G[Return Output]
```
-**Step 1: Initialize the ChromaDB Client**
```python
+from swarms import Agent
+from swarms.prompts.finance_agent_sys_prompt import (
+ FINANCIAL_AGENT_SYS_PROMPT,
+)
import os
from swarms_memory import ChromaDB
@@ -204,29 +229,13 @@ chromadb = ChromaDB(
output_dir="finance_agent_rag", # Directory for storing RAG data
# docs_folder="artifacts", # Uncomment and specify the folder containing your documents
)
-```
-
-**Step 2: Define the Model**
-```python
-from swarm_models import Anthropic
-from swarms.prompts.finance_agent_sys_prompt import (
- FINANCIAL_AGENT_SYS_PROMPT,
-)
-
-# Define the Anthropic model for language processing
-model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
-```
-
-**Step 3: Initialize the Agent with RAG**
-```python
-from swarms import Agent
# Initialize the agent with RAG capabilities
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
agent_description="Agent creates a comprehensive financial analysis",
- llm=model,
+ model_name="gpt-4o-mini",
max_loops="auto", # Auto-adjusts loops based on task complexity
autosave=True, # Automatically saves agent state
dashboard=False, # Disables dashboard for this example
@@ -343,7 +352,6 @@ The following is an example of an agent that intakes a pydantic basemodel and ou
```python
from pydantic import BaseModel, Field
from swarms import Agent
-from swarm_models import Anthropic
# Initialize the schema for the person's information
@@ -375,7 +383,7 @@ agent = Agent(
),
# Set the tool schema to the JSON string -- this is the key difference
tool_schema=tool_schema,
- llm=Anthropic(),
+ model_name="gpt-4o",
max_loops=3,
autosave=True,
dashboard=False,
@@ -454,7 +462,7 @@ from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
-from swarms.utils.json_utils import base_model_to_json
+from swarms.tools.json_utils import base_model_to_json
# Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
@@ -582,8 +590,6 @@ You can now easily plug this custom Griptape agent into the **Swarms Framework**
## Understanding Swarms
-### What is a Swarm?
-
A swarm refers to a group of more than two agents working collaboratively to achieve a common goal. These agents can be software entities, such as llms that interact with each other to perform complex tasks. The concept of a swarm is inspired by natural systems like ant colonies or bird flocks, where simple individual behaviors lead to complex group dynamics and problem-solving capabilities.
### How Swarm Architectures Facilitate Communication
@@ -596,9 +602,6 @@ Swarm architectures are designed to establish and manage communication between a
3. **Sequential Communication**: Sequential swarms process tasks in a linear order, where each agent's output becomes the input for the next agent. This ensures that tasks with dependencies are handled in the correct sequence, maintaining the integrity of the workflow.
-4. **Mesh Communication**: In mesh swarms, agents are fully connected, allowing any agent to communicate with any other agent. This setup provides high flexibility and redundancy, making it ideal for complex systems requiring dynamic interactions.
-
-5. **Federated Communication**: Federated swarms involve multiple independent swarms that collaborate by sharing information and results. Each swarm operates autonomously but can contribute to a larger task, enabling distributed problem-solving across different nodes.
Swarm architectures leverage these communication patterns to ensure that agents work together efficiently, adapting to the specific requirements of the task at hand. By defining clear communication protocols and interaction models, swarm architectures enable the seamless orchestration of multiple agents, leading to enhanced performance and problem-solving capabilities.
@@ -876,14 +879,12 @@ The `run` method returns the final output after all agents have processed the in
from swarms import Agent, AgentRearrange
-from swarm_models import Anthropic
-
# Initialize the director agent
director = Agent(
agent_name="Director",
system_prompt="Directs the tasks for the workers",
- llm=Anthropic(),
+ model_name="claude-2",
max_loops=1,
dashboard=False,
streaming_on=True,
@@ -899,7 +900,7 @@ director = Agent(
worker1 = Agent(
agent_name="Worker1",
system_prompt="Generates a transcript for a youtube video on what swarms are",
- llm=Anthropic(),
+ model_name="claude-2",
max_loops=1,
dashboard=False,
streaming_on=True,
@@ -914,7 +915,7 @@ worker1 = Agent(
worker2 = Agent(
agent_name="Worker2",
system_prompt="Summarizes the transcript generated by Worker1",
- llm=Anthropic(),
+ model_name="claude-2",
max_loops=1,
dashboard=False,
streaming_on=True,
@@ -1068,20 +1069,12 @@ The `run` method returns the final output after all agents have processed the in
```python
import os
-from swarm_models import OpenAIChat
from swarms import Agent, MixtureOfAgents
-api_key = os.getenv("OPENAI_API_KEY")
-
-# Create individual agents with the OpenAIChat model
-model = OpenAIChat(
- openai_api_key=api_key, model_name="gpt-4", temperature=0.1
-)
-
# Agent 1: Financial Statement Analyzer
agent1 = Agent(
agent_name="FinancialStatementAnalyzer",
- llm=model,
+ model_name="gpt-4o",
system_prompt="""You are a Financial Statement Analyzer specializing in 10-K SEC reports. Your primary focus is on analyzing the financial statements, including the balance sheet, income statement, and cash flow statement.
Key responsibilities:
@@ -1107,7 +1100,7 @@ When analyzing, consider industry standards and compare the company's performanc
# Agent 2: Risk Assessment Specialist
agent2 = Agent(
agent_name="RiskAssessmentSpecialist",
- llm=model,
+ model_name="gpt-4o",
system_prompt="""You are a Risk Assessment Specialist focusing on 10-K SEC reports. Your primary role is to identify, analyze, and evaluate potential risks disclosed in the report.
Key responsibilities:
@@ -1134,7 +1127,7 @@ Your analysis should provide a comprehensive overview of the company's risk land
# Agent 3: Business Strategy Evaluator
agent3 = Agent(
agent_name="BusinessStrategyEvaluator",
- llm=model,
+ model_name="gpt-4o",
system_prompt="""You are a Business Strategy Evaluator specializing in analyzing 10-K SEC reports. Your focus is on assessing the company's overall strategy, market position, and future outlook.
Key responsibilities:
@@ -1162,7 +1155,7 @@ Your analysis should provide insights into the company's strategic direction, it
# Aggregator Agent
aggregator_agent = Agent(
agent_name="10KReportAggregator",
- llm=model,
+ model_name="gpt-4o",
system_prompt="""You are the 10-K Report Aggregator, responsible for synthesizing and summarizing the analyses provided by the Financial Statement Analyzer, Risk Assessment Specialist, and Business Strategy Evaluator. Your goal is to create a comprehensive, coherent, and insightful summary of the 10-K SEC report.
Key responsibilities:
@@ -1188,7 +1181,7 @@ Your final report should be well-structured, easy to understand, and provide a h
# Create the Mixture of Agents class
moa = MixtureOfAgents(
- reference_agents=[agent1, agent2, agent3],
+ agents=[agent1, agent2, agent3],
aggregator_agent=aggregator_agent,
aggregator_system_prompt="""As the 10-K Report Aggregator, your task is to synthesize the analyses provided by the Financial Statement Analyzer, Risk Assessment Specialist, and Business Strategy Evaluator into a comprehensive and coherent report.
@@ -1252,9 +1245,8 @@ The `run` method returns a dictionary containing the outputs of each agent that
```python
import os
-from swarms import Agent
+from swarms import Agent, SpreadSheetSwarm
from swarm_models import OpenAIChat
-from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm
# Define custom system prompts for each social media platform
TWITTER_AGENT_SYS_PROMPT = """
@@ -1277,20 +1269,12 @@ EMAIL_AGENT_SYS_PROMPT = """
You are an Email marketing expert specializing in real estate. Your task is to write compelling email campaigns to promote properties, focusing on personalization, subject lines, and effective call-to-action strategies to drive conversions.
"""
-# Example usage:
-api_key = os.getenv("OPENAI_API_KEY")
-
-# Model
-model = OpenAIChat(
- openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
-)
-
# Initialize your agents for different social media platforms
agents = [
Agent(
agent_name="Twitter-RealEstate-Agent",
system_prompt=TWITTER_AGENT_SYS_PROMPT,
- llm=model,
+ model_name="gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="twitter_realestate_agent.json",
@@ -1300,7 +1284,7 @@ agents = [
Agent(
agent_name="Instagram-RealEstate-Agent",
system_prompt=INSTAGRAM_AGENT_SYS_PROMPT,
- llm=model,
+ model_name="gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="instagram_realestate_agent.json",
@@ -1310,7 +1294,7 @@ agents = [
Agent(
agent_name="Facebook-RealEstate-Agent",
system_prompt=FACEBOOK_AGENT_SYS_PROMPT,
- llm=model,
+ model_name="gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="facebook_realestate_agent.json",
@@ -1320,7 +1304,7 @@ agents = [
Agent(
agent_name="LinkedIn-RealEstate-Agent",
system_prompt=LINKEDIN_AGENT_SYS_PROMPT,
- llm=model,
+ model_name="gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="linkedin_realestate_agent.json",
@@ -1330,7 +1314,7 @@ agents = [
Agent(
agent_name="Email-RealEstate-Agent",
system_prompt=EMAIL_AGENT_SYS_PROMPT,
- llm=model,
+ model_name="gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="email_realestate_agent.json",
@@ -1439,7 +1423,7 @@ The `run` method returns the output from the most relevant agent selected based
```python
-from swarms.structs.tree_swarm import TreeAgent, Tree, ForestSwarm
+from swarms import TreeAgent, Tree, ForestSwarm
# Create agents with varying system prompts and dynamically generated distances/keywords
agents_tree1 = [
diff --git a/agent_with_rag.py b/agent_with_rag.py
deleted file mode 100644
index 153c207d..00000000
--- a/agent_with_rag.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import os
-
-from swarms_memory import ChromaDB
-
-from swarms import Agent
-from swarm_models import Anthropic
-from swarms.prompts.finance_agent_sys_prompt import (
- FINANCIAL_AGENT_SYS_PROMPT,
-)
-
-# Initilaize the chromadb client
-chromadb = ChromaDB(
- metric="cosine",
- output_dir="fiance_agent_rag",
- # docs_folder="artifacts", # Folder of your documents
-)
-
-# Model
-model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
-
-
-# Initialize the agent
-agent = Agent(
- agent_name="Financial-Analysis-Agent",
- system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
- agent_description="Agent creates ",
- llm=model,
- max_loops="auto",
- autosave=True,
- dashboard=False,
- verbose=True,
- streaming_on=True,
- dynamic_temperature_enabled=True,
- saved_state_path="finance_agent.json",
- user_name="swarms_corp",
- retry_attempts=3,
- context_length=200000,
- long_term_memory=chromadb,
-)
-
-
-agent.run(
- "What are the components of a startups stock incentive equity plan"
-)
diff --git a/agent_with_rag_and_tools.py b/agent_with_rag_and_tools.py
deleted file mode 100644
index f278c173..00000000
--- a/agent_with_rag_and_tools.py
+++ /dev/null
@@ -1,117 +0,0 @@
-from swarms import Agent
-from swarm_models import OpenAIChat
-from swarms_memory import ChromaDB
-import subprocess
-import os
-
-# Making an instance of the ChromaDB class
-memory = ChromaDB(
- metric="cosine",
- n_results=3,
- output_dir="results",
- docs_folder="docs",
-)
-
-# Model
-model = OpenAIChat(
- api_key=os.getenv("OPENAI_API_KEY"),
- model_name="gpt-4o-mini",
- temperature=0.1,
-)
-
-
-# Tools in swarms are simple python functions and docstrings
-def terminal(
- code: str,
-):
- """
- Run code in the terminal.
-
- Args:
- code (str): The code to run in the terminal.
-
- Returns:
- str: The output of the code.
- """
- out = subprocess.run(
- code, shell=True, capture_output=True, text=True
- ).stdout
- return str(out)
-
-
-def browser(query: str):
- """
- Search the query in the browser with the `browser` tool.
-
- Args:
- query (str): The query to search in the browser.
-
- Returns:
- str: The search results.
- """
- import webbrowser
-
- url = f"https://www.google.com/search?q={query}"
- webbrowser.open(url)
- return f"Searching for {query} in the browser."
-
-
-def create_file(file_path: str, content: str):
- """
- Create a file using the file editor tool.
-
- Args:
- file_path (str): The path to the file.
- content (str): The content to write to the file.
-
- Returns:
- str: The result of the file creation operation.
- """
- with open(file_path, "w") as file:
- file.write(content)
- return f"File {file_path} created successfully."
-
-
-def file_editor(file_path: str, mode: str, content: str):
- """
- Edit a file using the file editor tool.
-
- Args:
- file_path (str): The path to the file.
- mode (str): The mode to open the file in.
- content (str): The content to write to the file.
-
- Returns:
- str: The result of the file editing operation.
- """
- with open(file_path, mode) as file:
- file.write(content)
- return f"File {file_path} edited successfully."
-
-
-# Agent
-agent = Agent(
- agent_name="Devin",
- system_prompt=(
- "Autonomous agent that can interact with humans and other"
- " agents. Be Helpful and Kind. Use the tools provided to"
- " assist the user. Return all code in markdown format."
- ),
- llm=model,
- max_loops="auto",
- autosave=True,
- dashboard=False,
- streaming_on=True,
- verbose=True,
- stopping_token="",
- interactive=True,
- tools=[terminal, browser, file_editor, create_file],
- streaming=True,
- long_term_memory=memory,
-)
-
-# Run the agent
-out = agent(
- "Create a CSV file with the latest tax rates for C corporations in the following ten states and the District of Columbia: Alabama, California, Florida, Georgia, Illinois, New York, North Carolina, Ohio, Texas, and Washington."
-)
-print(out)
diff --git a/api/agent_api.py b/api/agent_api.py
new file mode 100644
index 00000000..d1968d9d
--- /dev/null
+++ b/api/agent_api.py
@@ -0,0 +1,629 @@
+import os
+from fastapi import (
+ FastAPI,
+ HTTPException,
+ status,
+ Query,
+ BackgroundTasks,
+)
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel, Field
+from typing import Optional, Dict, Any, List
+from loguru import logger
+import uvicorn
+from datetime import datetime, timedelta
+from uuid import UUID, uuid4
+from enum import Enum
+from pathlib import Path
+from concurrent.futures import ThreadPoolExecutor
+import traceback
+
+from swarms import Agent
+from dotenv import load_dotenv
+
+# Load environment variables
+load_dotenv()
+
+# Configure Loguru
+logger.add(
+ "logs/api_{time}.log",
+ rotation="500 MB",
+ retention="10 days",
+ level="INFO",
+ format="{time} {level} {message}",
+ backtrace=True,
+ diagnose=True,
+)
+
+
+class AgentStatus(str, Enum):
+ """Enum for agent status."""
+
+ IDLE = "idle"
+ PROCESSING = "processing"
+ ERROR = "error"
+ MAINTENANCE = "maintenance"
+
+
+class AgentConfig(BaseModel):
+ """Configuration model for creating a new agent."""
+
+ agent_name: str = Field(..., description="Name of the agent")
+ model_name: str = Field(
+ ...,
+ description="Name of the llm you want to use provided by litellm",
+ )
+ description: str = Field(
+ default="", description="Description of the agent's purpose"
+ )
+ system_prompt: str = Field(
+ ..., description="System prompt for the agent"
+ )
+ model_name: str = Field(
+ default="gpt-4", description="Model name to use"
+ )
+ temperature: float = Field(
+ default=0.1,
+ ge=0.0,
+ le=2.0,
+ description="Temperature for the model",
+ )
+ max_loops: int = Field(
+ default=1, ge=1, description="Maximum number of loops"
+ )
+ autosave: bool = Field(
+ default=True, description="Enable autosave"
+ )
+ dashboard: bool = Field(
+ default=False, description="Enable dashboard"
+ )
+ verbose: bool = Field(
+ default=True, description="Enable verbose output"
+ )
+ dynamic_temperature_enabled: bool = Field(
+ default=True, description="Enable dynamic temperature"
+ )
+ user_name: str = Field(
+ default="default_user", description="Username for the agent"
+ )
+ retry_attempts: int = Field(
+ default=1, ge=1, description="Number of retry attempts"
+ )
+ context_length: int = Field(
+ default=200000, ge=1000, description="Context length"
+ )
+ output_type: str = Field(
+ default="string", description="Output type (string or json)"
+ )
+ streaming_on: bool = Field(
+ default=False, description="Enable streaming"
+ )
+ tags: List[str] = Field(
+ default_factory=list,
+ description="Tags for categorizing the agent",
+ )
+
+
+class AgentUpdate(BaseModel):
+ """Model for updating agent configuration."""
+
+ description: Optional[str] = None
+ system_prompt: Optional[str] = None
+ temperature: Optional[float] = None
+ max_loops: Optional[int] = None
+ tags: Optional[List[str]] = None
+ status: Optional[AgentStatus] = None
+
+
+class AgentSummary(BaseModel):
+ """Summary model for agent listing."""
+
+ agent_id: UUID
+ agent_name: str
+ description: str
+ created_at: datetime
+ last_used: datetime
+ total_completions: int
+ tags: List[str]
+ status: AgentStatus
+
+
+class AgentMetrics(BaseModel):
+ """Model for agent performance metrics."""
+
+ total_completions: int
+ average_response_time: float
+ error_rate: float
+ last_24h_completions: int
+ total_tokens_used: int
+ uptime_percentage: float
+ success_rate: float
+ peak_tokens_per_minute: int
+
+
+class CompletionRequest(BaseModel):
+ """Model for completion requests."""
+
+ prompt: str = Field(..., description="The prompt to process")
+ agent_id: UUID = Field(..., description="ID of the agent to use")
+ max_tokens: Optional[int] = Field(
+ None, description="Maximum tokens to generate"
+ )
+ temperature_override: Optional[float] = None
+ stream: bool = Field(
+ default=False, description="Enable streaming response"
+ )
+
+
+class CompletionResponse(BaseModel):
+ """Model for completion responses."""
+
+ agent_id: UUID
+ response: str
+ metadata: Dict[str, Any]
+ timestamp: datetime
+ processing_time: float
+ token_usage: Dict[str, int]
+
+
+class AgentStore:
+ """Enhanced store for managing agents."""
+
+ def __init__(self):
+ self.agents: Dict[UUID, Agent] = {}
+ self.agent_metadata: Dict[UUID, Dict[str, Any]] = {}
+ self.executor = ThreadPoolExecutor(max_workers=4)
+ self._ensure_directories()
+
+ def _ensure_directories(self):
+ """Ensure required directories exist."""
+ Path("logs").mkdir(exist_ok=True)
+ Path("states").mkdir(exist_ok=True)
+
+ async def create_agent(self, config: AgentConfig) -> UUID:
+ """Create a new agent with the given configuration."""
+ try:
+
+ agent = Agent(
+ agent_name=config.agent_name,
+ system_prompt=config.system_prompt,
+ model_name=config.model_name,
+ max_loops=config.max_loops,
+ autosave=config.autosave,
+ dashboard=config.dashboard,
+ verbose=config.verbose,
+ dynamic_temperature_enabled=config.dynamic_temperature_enabled,
+ saved_state_path=f"states/{config.agent_name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
+ user_name=config.user_name,
+ retry_attempts=config.retry_attempts,
+ context_length=config.context_length,
+ return_step_meta=True,
+ output_type="str",
+ streaming_on=config.streaming_on,
+ )
+
+ agent_id = uuid4()
+ self.agents[agent_id] = agent
+ self.agent_metadata[agent_id] = {
+ "description": config.description,
+ "created_at": datetime.utcnow(),
+ "last_used": datetime.utcnow(),
+ "total_completions": 0,
+ "tags": config.tags,
+ "total_tokens": 0,
+ "error_count": 0,
+ "response_times": [],
+ "status": AgentStatus.IDLE,
+ "start_time": datetime.utcnow(),
+ "downtime": timedelta(),
+ "successful_completions": 0,
+ }
+
+ logger.info(f"Created agent with ID: {agent_id}")
+ return agent_id
+
+ except Exception as e:
+ logger.error(f"Error creating agent: {str(e)}")
+ raise HTTPException(
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+ detail=f"Failed to create agent: {str(e)}",
+ )
+
+ async def get_agent(self, agent_id: UUID) -> Agent:
+ """Retrieve an agent by ID."""
+ agent = self.agents.get(agent_id)
+ if not agent:
+ logger.error(f"Agent not found: {agent_id}")
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail=f"Agent {agent_id} not found",
+ )
+ return agent
+
+ async def update_agent(
+ self, agent_id: UUID, update: AgentUpdate
+ ) -> None:
+ """Update agent configuration."""
+ agent = await self.get_agent(agent_id)
+ metadata = self.agent_metadata[agent_id]
+
+ if update.system_prompt:
+ agent.system_prompt = update.system_prompt
+ if update.temperature is not None:
+ agent.llm.temperature = update.temperature
+ if update.max_loops is not None:
+ agent.max_loops = update.max_loops
+ if update.tags is not None:
+ metadata["tags"] = update.tags
+ if update.description is not None:
+ metadata["description"] = update.description
+ if update.status is not None:
+ metadata["status"] = update.status
+ if update.status == AgentStatus.MAINTENANCE:
+ metadata["downtime"] += (
+ datetime.utcnow() - metadata["last_used"]
+ )
+
+ logger.info(f"Updated agent {agent_id}")
+
+ async def list_agents(
+ self,
+ tags: Optional[List[str]] = None,
+ status: Optional[AgentStatus] = None,
+ ) -> List[AgentSummary]:
+ """List all agents, optionally filtered by tags and status."""
+ summaries = []
+ for agent_id, agent in self.agents.items():
+ metadata = self.agent_metadata[agent_id]
+
+ # Apply filters
+ if tags and not any(
+ tag in metadata["tags"] for tag in tags
+ ):
+ continue
+ if status and metadata["status"] != status:
+ continue
+
+ summaries.append(
+ AgentSummary(
+ agent_id=agent_id,
+ agent_name=agent.agent_name,
+ description=metadata["description"],
+ created_at=metadata["created_at"],
+ last_used=metadata["last_used"],
+ total_completions=metadata["total_completions"],
+ tags=metadata["tags"],
+ status=metadata["status"],
+ )
+ )
+ return summaries
+
+ async def get_agent_metrics(self, agent_id: UUID) -> AgentMetrics:
+ """Get performance metrics for an agent."""
+ metadata = self.agent_metadata[agent_id]
+ response_times = metadata["response_times"]
+
+ # Calculate metrics
+ total_time = datetime.utcnow() - metadata["start_time"]
+ uptime = total_time - metadata["downtime"]
+ uptime_percentage = (
+ uptime.total_seconds() / total_time.total_seconds()
+ ) * 100
+
+ success_rate = (
+ metadata["successful_completions"]
+ / metadata["total_completions"]
+ * 100
+ if metadata["total_completions"] > 0
+ else 0
+ )
+
+ return AgentMetrics(
+ total_completions=metadata["total_completions"],
+ average_response_time=(
+ sum(response_times) / len(response_times)
+ if response_times
+ else 0
+ ),
+ error_rate=(
+ metadata["error_count"]
+ / metadata["total_completions"]
+ if metadata["total_completions"] > 0
+ else 0
+ ),
+ last_24h_completions=sum(
+ 1
+ for t in response_times
+ if (datetime.utcnow() - t).days < 1
+ ),
+ total_tokens_used=metadata["total_tokens"],
+ uptime_percentage=uptime_percentage,
+ success_rate=success_rate,
+ peak_tokens_per_minute=max(
+ metadata.get("tokens_per_minute", [0])
+ ),
+ )
+
+ async def clone_agent(
+ self, agent_id: UUID, new_name: str
+ ) -> UUID:
+ """Clone an existing agent with a new name."""
+ original_agent = await self.get_agent(agent_id)
+ original_metadata = self.agent_metadata[agent_id]
+
+ config = AgentConfig(
+ agent_name=new_name,
+ description=f"Clone of {original_agent.agent_name}",
+ system_prompt=original_agent.system_prompt,
+ model_name=original_agent.llm.model_name,
+ temperature=original_agent.llm.temperature,
+ max_loops=original_agent.max_loops,
+ tags=original_metadata["tags"],
+ )
+
+ return await self.create_agent(config)
+
+ async def delete_agent(self, agent_id: UUID) -> None:
+ """Delete an agent."""
+ if agent_id not in self.agents:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail=f"Agent {agent_id} not found",
+ )
+
+ # Clean up any resources
+ agent = self.agents[agent_id]
+ if agent.autosave and os.path.exists(agent.saved_state_path):
+ os.remove(agent.saved_state_path)
+
+ del self.agents[agent_id]
+ del self.agent_metadata[agent_id]
+ logger.info(f"Deleted agent {agent_id}")
+
+ async def process_completion(
+ self,
+ agent: Agent,
+ prompt: str,
+ agent_id: UUID,
+ max_tokens: Optional[int] = None,
+ temperature_override: Optional[float] = None,
+ ) -> CompletionResponse:
+ """Process a completion request using the specified agent."""
+ start_time = datetime.utcnow()
+ metadata = self.agent_metadata[agent_id]
+
+ try:
+ # Update agent status
+ metadata["status"] = AgentStatus.PROCESSING
+ metadata["last_used"] = start_time
+
+ # Apply temporary overrides if specified
+ original_temp = agent.llm.temperature
+ if temperature_override is not None:
+ agent.llm.temperature = temperature_override
+
+ # Process the completion
+ response = agent.run(prompt)
+
+ # Reset overrides
+ if temperature_override is not None:
+ agent.llm.temperature = original_temp
+
+ # Update metrics
+ processing_time = (
+ datetime.utcnow() - start_time
+ ).total_seconds()
+ metadata["response_times"].append(processing_time)
+ metadata["total_completions"] += 1
+ metadata["successful_completions"] += 1
+
+ # Estimate token usage (this is a rough estimate)
+ prompt_tokens = len(prompt.split()) * 1.3
+ completion_tokens = len(response.split()) * 1.3
+ total_tokens = int(prompt_tokens + completion_tokens)
+ metadata["total_tokens"] += total_tokens
+
+ # Update tokens per minute tracking
+ current_minute = datetime.utcnow().replace(
+ second=0, microsecond=0
+ )
+ if "tokens_per_minute" not in metadata:
+ metadata["tokens_per_minute"] = {}
+ metadata["tokens_per_minute"][current_minute] = (
+ metadata["tokens_per_minute"].get(current_minute, 0)
+ + total_tokens
+ )
+
+ return CompletionResponse(
+ agent_id=agent_id,
+ response=response,
+ metadata={
+ "agent_name": agent.agent_name,
+ "model_name": agent.llm.model_name,
+ "temperature": agent.llm.temperature,
+ },
+ timestamp=datetime.utcnow(),
+ processing_time=processing_time,
+ token_usage={
+ "prompt_tokens": int(prompt_tokens),
+ "completion_tokens": int(completion_tokens),
+ "total_tokens": total_tokens,
+ },
+ )
+
+ except Exception as e:
+ metadata["error_count"] += 1
+ metadata["status"] = AgentStatus.ERROR
+ logger.error(
+ f"Error in completion processing: {str(e)}\n{traceback.format_exc()}"
+ )
+ raise HTTPException(
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+ detail=f"Error processing completion: {str(e)}",
+ )
+ finally:
+ metadata["status"] = AgentStatus.IDLE
+
+
+class SwarmsAPI:
+ """Enhanced API class for Swarms agent integration."""
+
+ def __init__(self):
+ self.app = FastAPI(
+ title="Swarms Agent API",
+ description="Production-grade API for Swarms agent interaction",
+ version="1.0.0",
+ docs_url="/v1/docs",
+ redoc_url="/v1/redoc",
+ )
+ self.store = AgentStore()
+ # Configure CORS
+ self.app.add_middleware(
+ CORSMiddleware,
+ allow_origins=[
+ "*"
+ ], # Configure appropriately for production
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+ )
+
+ self._setup_routes()
+
+ def _setup_routes(self):
+ """Set up API routes."""
+
+ @self.app.post("/v1/agent", response_model=Dict[str, UUID])
+ async def create_agent(config: AgentConfig):
+ """Create a new agent with the specified configuration."""
+ agent_id = await self.store.create_agent(config)
+ return {"agent_id": agent_id}
+
+ @self.app.get("/v1/agents", response_model=List[AgentSummary])
+ async def list_agents(
+ tags: Optional[List[str]] = Query(None),
+ status: Optional[AgentStatus] = None,
+ ):
+ """List all agents, optionally filtered by tags and status."""
+ return await self.store.list_agents(tags, status)
+
+ @self.app.patch(
+ "/v1/agent/{agent_id}", response_model=Dict[str, str]
+ )
+ async def update_agent(agent_id: UUID, update: AgentUpdate):
+ """Update an existing agent's configuration."""
+ await self.store.update_agent(agent_id, update)
+ return {"status": "updated"}
+
+ @self.app.get(
+ "/v1/agent/{agent_id}/metrics",
+ response_model=AgentMetrics,
+ )
+ async def get_agent_metrics(agent_id: UUID):
+ """Get performance metrics for a specific agent."""
+ return await self.store.get_agent_metrics(agent_id)
+
+ @self.app.post(
+ "/v1/agent/{agent_id}/clone",
+ response_model=Dict[str, UUID],
+ )
+ async def clone_agent(agent_id: UUID, new_name: str):
+ """Clone an existing agent with a new name."""
+ new_id = await self.store.clone_agent(agent_id, new_name)
+ return {"agent_id": new_id}
+
+ @self.app.delete("/v1/agent/{agent_id}")
+ async def delete_agent(agent_id: UUID):
+ """Delete an agent."""
+ await self.store.delete_agent(agent_id)
+ return {"status": "deleted"}
+
+ @self.app.post(
+ "/v1/agent/completions", response_model=CompletionResponse
+ )
+ async def create_completion(
+ request: CompletionRequest,
+ background_tasks: BackgroundTasks,
+ ):
+ """Process a completion request with the specified agent."""
+ try:
+ agent = await self.store.get_agent(request.agent_id)
+
+ # Process completion
+ response = await self.store.process_completion(
+ agent,
+ request.prompt,
+ request.agent_id,
+ request.max_tokens,
+ request.temperature_override,
+ )
+
+ # Schedule background cleanup
+ background_tasks.add_task(
+ self._cleanup_old_metrics, request.agent_id
+ )
+
+ return response
+
+ except Exception as e:
+ logger.error(f"Error processing completion: {str(e)}")
+ raise HTTPException(
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+ detail=f"Error processing completion: {str(e)}",
+ )
+
+ @self.app.get("/v1/agent/{agent_id}/status")
+ async def get_agent_status(agent_id: UUID):
+ """Get the current status of an agent."""
+ metadata = self.store.agent_metadata.get(agent_id)
+ if not metadata:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail=f"Agent {agent_id} not found",
+ )
+ return {
+ "agent_id": agent_id,
+ "status": metadata["status"],
+ "last_used": metadata["last_used"],
+ "total_completions": metadata["total_completions"],
+ "error_count": metadata["error_count"],
+ }
+
+ async def _cleanup_old_metrics(self, agent_id: UUID):
+ """Clean up old metrics data to prevent memory bloat."""
+ metadata = self.store.agent_metadata.get(agent_id)
+ if metadata:
+ # Keep only last 24 hours of response times
+ cutoff = datetime.utcnow() - timedelta(days=1)
+ metadata["response_times"] = [
+ t
+ for t in metadata["response_times"]
+ if isinstance(t, (int, float))
+ and t > cutoff.timestamp()
+ ]
+
+ # Clean up old tokens per minute data
+ if "tokens_per_minute" in metadata:
+ metadata["tokens_per_minute"] = {
+ k: v
+ for k, v in metadata["tokens_per_minute"].items()
+ if k > cutoff
+ }
+
+
+def create_app() -> FastAPI:
+ """Create and configure the FastAPI application."""
+ api = SwarmsAPI()
+ return api.app
+
+
+if __name__ == "__main__":
+ # Configure uvicorn logging
+ logger.info("API Starting")
+ uvicorn.run(
+ "main:create_app",
+ host="0.0.0.0",
+ port=8000,
+ reload=True,
+ workers=4,
+ )
diff --git a/api/agent_api_test.py b/api/agent_api_test.py
new file mode 100644
index 00000000..066efc4f
--- /dev/null
+++ b/api/agent_api_test.py
@@ -0,0 +1,107 @@
+import requests
+from loguru import logger
+import time
+
+# Configure loguru
+logger.add(
+ "api_tests_{time}.log",
+ rotation="100 MB",
+ level="DEBUG",
+ format="{time} {level} {message}",
+)
+
+BASE_URL = "http://localhost:8000/v1"
+
+
+def test_create_agent():
+ """Test creating a new agent."""
+ logger.info("Testing agent creation")
+
+ payload = {
+ "agent_name": "Test Agent",
+ "system_prompt": "You are a helpful assistant",
+ "model_name": "gpt-4",
+ "description": "Test agent",
+ "tags": ["test"],
+ }
+
+ response = requests.post(f"{BASE_URL}/agent", json=payload)
+ logger.debug(f"Create response: {response.json()}")
+
+ if response.status_code == 200:
+ logger.success("Successfully created agent")
+ return response.json()["agent_id"]
+ else:
+ logger.error(f"Failed to create agent: {response.text}")
+ return None
+
+
+def test_list_agents():
+ """Test listing all agents."""
+ logger.info("Testing agent listing")
+
+ response = requests.get(f"{BASE_URL}/agents")
+ logger.debug(f"List response: {response.json()}")
+
+ if response.status_code == 200:
+ logger.success(f"Found {len(response.json())} agents")
+ else:
+ logger.error(f"Failed to list agents: {response.text}")
+
+
+def test_completion(agent_id):
+ """Test running a completion."""
+ logger.info("Testing completion")
+
+ payload = {
+ "prompt": "What is the weather like today?",
+ "agent_id": agent_id,
+ }
+
+ response = requests.post(
+ f"{BASE_URL}/agent/completions", json=payload
+ )
+ logger.debug(f"Completion response: {response.json()}")
+
+ if response.status_code == 200:
+ logger.success("Successfully got completion")
+ else:
+ logger.error(f"Failed to get completion: {response.text}")
+
+
+def test_delete_agent(agent_id):
+ """Test deleting an agent."""
+ logger.info("Testing agent deletion")
+
+ response = requests.delete(f"{BASE_URL}/agent/{agent_id}")
+ logger.debug(f"Delete response: {response.json()}")
+
+ if response.status_code == 200:
+ logger.success("Successfully deleted agent")
+ else:
+ logger.error(f"Failed to delete agent: {response.text}")
+
+
+def run_tests():
+ """Run all tests in sequence."""
+ logger.info("Starting API tests")
+
+ # Create agent and get ID
+ agent_id = test_create_agent()
+ if not agent_id:
+ logger.error("Cannot continue tests without agent ID")
+ return
+
+ # Wait a bit for agent to be ready
+ time.sleep(1)
+
+ # Run other tests
+ test_list_agents()
+ test_completion(agent_id)
+ test_delete_agent(agent_id)
+
+ logger.info("Tests completed")
+
+
+if __name__ == "__main__":
+ run_tests()
diff --git a/auto_flow.py b/auto_flow.py
deleted file mode 100644
index 4a2f84c2..00000000
--- a/auto_flow.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import os
-import json
-from pydantic import BaseModel, Field
-from swarm_models import OpenAIFunctionCaller
-from dotenv import load_dotenv
-from typing import Any, List
-
-load_dotenv()
-
-
-class Flow(BaseModel):
- id: str = Field(
- description="A unique identifier for the flow. This should be a short, descriptive name that captures the main purpose of the flow. Use - to separate words and make it lowercase."
- )
- plan: str = Field(
- description="The comprehensive plan detailing how the flow will accomplish the given task. This should include the high-level strategy, key milestones, and expected outcomes. The plan should clearly articulate what the overall goal is, what success looks like, and how progress will be measured throughout execution."
- )
- failures_prediction: str = Field(
- description="A thorough analysis of potential failure modes and mitigation strategies. This should identify technical risks, edge cases, error conditions, and possible points of failure in the flow. For each identified risk, include specific preventive measures, fallback approaches, and recovery procedures to ensure robustness and reliability."
- )
- rationale: str = Field(
- description="The detailed reasoning and justification for why this specific flow design is optimal for the given task. This should explain the key architectural decisions, tradeoffs considered, alternatives evaluated, and why this approach best satisfies the requirements. Include both technical and business factors that influenced the design."
- )
- flow: str = Field(
- description="The precise execution flow defining how agents interact and coordinate. Use -> to indicate sequential processing where one agent must complete before the next begins (e.g. agent1 -> agent2 -> agent3). Use , to indicate parallel execution where multiple agents can run simultaneously (e.g. agent1 -> agent2, agent3, agent4). The flow should clearly show the dependencies and parallelization opportunities between agents. You must only use the agent names provided in the task description do not make up new agent names and do not use any other formatting."
- )
-
-
-class AgentRearrangeBuilder(BaseModel):
- name: str = Field(
- description="The name of the swarm. This should be a short, descriptive name that captures the main purpose of the flow."
- )
- description: str = Field(
- description="A brief description of the swarm. This should be a concise summary of the main purpose of the swarm."
- )
- flows: List[Flow] = Field(
- description="A list of flows that are optimal for the given task. Each flow should be a detailed plan, failure prediction, rationale, and execution flow."
- )
- swarm_flow: str = Field(
- description="The flow defining how each team should communicate and coordinate with eachother.Use -> to indicate sequential processing where one id must complete before the next begins (e.g. team1 -> team2 -> team3). Use , to indicate parallel execution where multiple teams can run simultaneously (e.g. team1 -> team2, team3, team4). The flow should clearly show the dependencies and parallelization opportunities between teams. You must only use the team names provided in the id do not make up new team names and do not use any other formatting."
- )
-
-
-# def flow_generator(task: str) -> Flow:
-
-
-def setup_model(base_model: BaseModel = Flow):
- model = OpenAIFunctionCaller(
- system_prompt="""You are an expert flow architect specializing in designing multi-agent workflows. Your role is to analyze tasks and create optimal execution flows that coordinate multiple AI agents effectively.
-
- When given a task, you will:
- 1. Develop a comprehensive plan breaking down the task into logical steps
- 2. Carefully consider potential failure modes and build in robust error handling
- 3. Provide clear rationale for your architectural decisions and agent coordination strategy
- 4. Design a precise flow showing both sequential dependencies and parallel execution opportunities
-
- Your flows should maximize:
- - Efficiency through smart parallelization
- - Reliability through thorough error handling
- - Clarity through well-structured agent interactions
- - Effectiveness through strategic task decomposition
-
- Format your flow using -> for sequential steps and , for parallel execution. Be specific about agent roles and interactions.
- """,
- base_model=base_model,
- openai_api_key=os.getenv("OPENAI_API_KEY"),
- temperature=0.5,
- )
- return model
-
-
-def generate_flow(task: str) -> Any:
- model = setup_model()
- flow = model.run(task)
- print(json.dumps(flow, indent=4))
- return flow
-
-
-def generate_agent_rearrange(task: str) -> Any:
- model = setup_model(base_model=AgentRearrangeBuilder)
- flow = model.run(task)
- print(json.dumps(flow, indent=4))
- return flow
-
-
-if __name__ == "__main__":
- # Basic patient diagnosis flow
- # generate_flow("Diagnose a patient's symptoms and create a treatment plan. You have 3 agents to use: Diagnostician, Specialist, CareCoordinator")
-
- # # Complex multi-condition case
- # generate_flow("""Handle a complex patient case with multiple chronic conditions requiring ongoing care coordination.
- # The patient has diabetes, heart disease, and chronic pain.
- # Create a comprehensive diagnosis and treatment plan.
- # You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
-
- # # Emergency trauma case
- # generate_flow("""Process an emergency trauma case requiring rapid diagnosis and immediate intervention.
- # Patient presents with multiple injuries from a car accident.
- # Develop immediate and long-term treatment plans.
- # You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
-
- # # Long-term care planning
- # generate_flow("""Design a 6-month care plan for an elderly patient with declining cognitive function.
- # Include regular assessments, specialist consultations, and family coordination.
- # You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
-
- # # Mental health assessment
- # generate_flow("""Conduct a comprehensive mental health assessment and develop treatment strategy.
- # Patient shows signs of depression and anxiety with possible underlying conditions.
- # Create both immediate intervention and long-term support plans.
- # You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
-
- generate_agent_rearrange(
- """Build a complete automated hedge fund system.
- Design and implement a sophisticated trading strategy incorporating multiple asset classes,
- risk management protocols, and automated execution systems.
- The system should include:
- - Market analysis and research capabilities
- - Portfolio optimization and risk management
- - Automated trade execution and settlement
- - Compliance and regulatory monitoring
- - Performance tracking and reporting
- - Fund operations and administration
- Create a comprehensive architecture that integrates all these components into a fully automated system."""
- )
diff --git a/auto_swarm_router.py b/auto_swarm_router.py
deleted file mode 100644
index 8a692454..00000000
--- a/auto_swarm_router.py
+++ /dev/null
@@ -1,162 +0,0 @@
-import os
-from dotenv import load_dotenv
-from swarms import Agent
-from swarm_models import OpenAIChat
-from swarms.structs.swarm_router import SwarmRouter
-
-load_dotenv()
-
-# Get the OpenAI API key from the environment variable
-api_key = os.getenv("GROQ_API_KEY")
-
-# Model
-model = OpenAIChat(
- openai_api_base="https://api.groq.com/openai/v1",
- openai_api_key=api_key,
- model_name="llama-3.1-70b-versatile",
- temperature=0.1,
-)
-# Define specialized system prompts for each agent
-DATA_EXTRACTOR_PROMPT = """You are a highly specialized private equity agent focused on data extraction from various documents. Your expertise includes:
-1. Extracting key financial metrics (revenue, EBITDA, growth rates, etc.) from financial statements and reports
-2. Identifying and extracting important contract terms from legal documents
-3. Pulling out relevant market data from industry reports and analyses
-4. Extracting operational KPIs from management presentations and internal reports
-5. Identifying and extracting key personnel information from organizational charts and bios
-Provide accurate, structured data extracted from various document types to support investment analysis."""
-
-SUMMARIZER_PROMPT = """You are an expert private equity agent specializing in summarizing complex documents. Your core competencies include:
-1. Distilling lengthy financial reports into concise executive summaries
-2. Summarizing legal documents, highlighting key terms and potential risks
-3. Condensing industry reports to capture essential market trends and competitive dynamics
-4. Summarizing management presentations to highlight key strategic initiatives and projections
-5. Creating brief overviews of technical documents, emphasizing critical points for non-technical stakeholders
-Deliver clear, concise summaries that capture the essence of various documents while highlighting information crucial for investment decisions."""
-
-FINANCIAL_ANALYST_PROMPT = """You are a specialized private equity agent focused on financial analysis. Your key responsibilities include:
-1. Analyzing historical financial statements to identify trends and potential issues
-2. Evaluating the quality of earnings and potential adjustments to EBITDA
-3. Assessing working capital requirements and cash flow dynamics
-4. Analyzing capital structure and debt capacity
-5. Evaluating financial projections and underlying assumptions
-Provide thorough, insightful financial analysis to inform investment decisions and valuation."""
-
-MARKET_ANALYST_PROMPT = """You are a highly skilled private equity agent specializing in market analysis. Your expertise covers:
-1. Analyzing industry trends, growth drivers, and potential disruptors
-2. Evaluating competitive landscape and market positioning
-3. Assessing market size, segmentation, and growth potential
-4. Analyzing customer dynamics, including concentration and loyalty
-5. Identifying potential regulatory or macroeconomic impacts on the market
-Deliver comprehensive market analysis to assess the attractiveness and risks of potential investments."""
-
-OPERATIONAL_ANALYST_PROMPT = """You are an expert private equity agent focused on operational analysis. Your core competencies include:
-1. Evaluating operational efficiency and identifying improvement opportunities
-2. Analyzing supply chain and procurement processes
-3. Assessing sales and marketing effectiveness
-4. Evaluating IT systems and digital capabilities
-5. Identifying potential synergies in merger or add-on acquisition scenarios
-Provide detailed operational analysis to uncover value creation opportunities and potential risks."""
-
-# Initialize specialized agents
-data_extractor_agent = Agent(
- agent_name="Data-Extractor",
- system_prompt=DATA_EXTRACTOR_PROMPT,
- llm=model,
- max_loops=1,
- autosave=True,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="data_extractor_agent.json",
- user_name="pe_firm",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
-)
-
-summarizer_agent = Agent(
- agent_name="Document-Summarizer",
- system_prompt=SUMMARIZER_PROMPT,
- llm=model,
- max_loops=1,
- autosave=True,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="summarizer_agent.json",
- user_name="pe_firm",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
-)
-
-financial_analyst_agent = Agent(
- agent_name="Financial-Analyst",
- system_prompt=FINANCIAL_ANALYST_PROMPT,
- llm=model,
- max_loops=1,
- autosave=True,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="financial_analyst_agent.json",
- user_name="pe_firm",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
-)
-
-market_analyst_agent = Agent(
- agent_name="Market-Analyst",
- system_prompt=MARKET_ANALYST_PROMPT,
- llm=model,
- max_loops=1,
- autosave=True,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="market_analyst_agent.json",
- user_name="pe_firm",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
-)
-
-operational_analyst_agent = Agent(
- agent_name="Operational-Analyst",
- system_prompt=OPERATIONAL_ANALYST_PROMPT,
- llm=model,
- max_loops=1,
- autosave=True,
- verbose=True,
- dynamic_temperature_enabled=True,
- saved_state_path="operational_analyst_agent.json",
- user_name="pe_firm",
- retry_attempts=1,
- context_length=200000,
- output_type="string",
-)
-
-# Initialize the SwarmRouter
-router = SwarmRouter(
- name="pe-document-analysis-swarm",
- description="Analyze documents for private equity due diligence and investment decision-making",
- max_loops=1,
- agents=[
- data_extractor_agent,
- summarizer_agent,
- # financial_analyst_agent,
- # market_analyst_agent,
- # operational_analyst_agent,
- ],
- swarm_type="auto", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
- # auto_generate_prompts=True,
-)
-
-# Example usage
-if __name__ == "__main__":
- # Run a comprehensive private equity document analysis task
- result = router.run(
- "Where is the best place to find template term sheets for series A startups. Provide links and references"
- )
- print(result)
-
- # Retrieve and print logs
- for log in router.get_logs():
- print(f"{log.timestamp} - {log.level}: {log.message}")
diff --git a/byte.py b/byte.py
new file mode 100644
index 00000000..d0a5a92f
--- /dev/null
+++ b/byte.py
@@ -0,0 +1,898 @@
+from enum import Enum
+from typing import Union, Optional
+import io
+from PIL import Image
+import numpy as np
+import torch
+import struct
+
+
+from enum import auto
+from typing import List, Dict, Tuple
+import wave
+from dataclasses import dataclass
+import torch.nn as nn
+import torch.nn.functional as F
+from loguru import logger
+from einops import rearrange
+from torch import Tensor
+
+
+@dataclass
+class ModelConfig:
+ """Configuration for the enhanced BytePredictor model."""
+
+ vocab_size: int = 256 # Standard byte range
+ hidden_size: int = 1024
+ num_layers: int = 12
+ num_key_value_heads: int = 8 # For multi-query attention
+ num_query_heads: int = 32 # More query heads than kv heads
+ dropout: float = 0.1
+ max_sequence_length: int = 8192
+ rope_theta: float = 10000.0
+ layer_norm_eps: float = 1e-5
+ vocab_parallel: bool = False
+ qk_norm: bool = True
+ qk_norm_scale: float = None
+ attention_bias: bool = False
+
+
+class MultiQueryAttention(nn.Module):
+ """Fixed Multi-Query Attention implementation."""
+
+ def __init__(self, config: ModelConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.num_query_heads = config.num_query_heads
+ self.num_key_value_heads = config.num_key_value_heads
+ self.head_dim = config.hidden_size // config.num_query_heads
+ self.qk_scale = config.qk_norm_scale or (self.head_dim**-0.5)
+
+ self.q_proj = nn.Linear(
+ config.hidden_size, config.num_query_heads * self.head_dim
+ )
+ self.k_proj = nn.Linear(
+ config.hidden_size,
+ config.num_key_value_heads * self.head_dim,
+ )
+ self.v_proj = nn.Linear(
+ config.hidden_size,
+ config.num_key_value_heads * self.head_dim,
+ )
+ self.o_proj = nn.Linear(
+ config.num_query_heads * self.head_dim, config.hidden_size
+ )
+
+ self.qk_norm = config.qk_norm
+ if self.qk_norm:
+ self.q_norm = nn.LayerNorm(self.head_dim)
+ self.k_norm = nn.LayerNorm(self.head_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ batch_size, seq_length, _ = hidden_states.shape
+
+ # Project and reshape
+ q = self.q_proj(hidden_states)
+ k = self.k_proj(hidden_states)
+ v = self.v_proj(hidden_states)
+
+ # Reshape to [seq_len, batch, heads, head_dim]
+ q = q.view(
+ batch_size,
+ seq_length,
+ self.num_query_heads,
+ self.head_dim,
+ ).permute(1, 0, 2, 3)
+ k = k.view(
+ batch_size,
+ seq_length,
+ self.num_key_value_heads,
+ self.head_dim,
+ ).permute(1, 0, 2, 3)
+ v = v.view(
+ batch_size,
+ seq_length,
+ self.num_key_value_heads,
+ self.head_dim,
+ ).permute(1, 0, 2, 3)
+
+ # Apply rotary embeddings
+ # q, k = self.rotary(q, k, seq_length)
+
+ # Apply QK normalization if enabled
+ if self.qk_norm:
+ q = self.q_norm(q)
+ k = self.k_norm(k)
+
+ # Handle MQA head expansion
+ if self.num_key_value_heads != self.num_query_heads:
+ k = k.repeat_interleave(
+ self.num_query_heads // self.num_key_value_heads,
+ dim=2,
+ )
+ v = v.repeat_interleave(
+ self.num_query_heads // self.num_key_value_heads,
+ dim=2,
+ )
+
+ # Compute attention
+ # Reshape for matmul: [batch, heads, seq_length, head_dim]
+ q = q.permute(1, 2, 0, 3)
+ k = k.permute(1, 2, 0, 3)
+ v = v.permute(1, 2, 0, 3)
+
+ attn_weights = (
+ torch.matmul(q, k.transpose(-2, -1)) * self.qk_scale
+ )
+
+ if attention_mask is not None:
+ attn_weights = attn_weights + attention_mask
+
+ attn_weights = F.softmax(attn_weights, dim=-1)
+
+ output = torch.matmul(attn_weights, v)
+
+ # Reshape back to [batch, seq_length, hidden_size]
+ output = (
+ output.transpose(1, 2)
+ .contiguous()
+ .view(batch_size, seq_length, -1)
+ )
+ output = self.o_proj(output)
+
+ return output
+
+
+class EnhancedBytePredictor(nn.Module):
+ """Enhanced byte prediction model with state-of-the-art features."""
+
+ def __init__(self, config: ModelConfig):
+ super().__init__()
+ self.config = config
+
+ # Token embeddings
+ self.tok_embeddings = nn.Embedding(
+ config.vocab_size, config.hidden_size
+ )
+
+ # Transformer layers
+ self.layers = nn.ModuleList(
+ [
+ nn.ModuleDict(
+ {
+ "attention": MultiQueryAttention(config),
+ "attention_norm": nn.LayerNorm(
+ config.hidden_size,
+ eps=config.layer_norm_eps,
+ ),
+ "feed_forward": nn.Sequential(
+ nn.Linear(
+ config.hidden_size,
+ 4 * config.hidden_size,
+ ),
+ nn.GELU(),
+ nn.Linear(
+ 4 * config.hidden_size,
+ config.hidden_size,
+ ),
+ ),
+ "feed_forward_norm": nn.LayerNorm(
+ config.hidden_size,
+ eps=config.layer_norm_eps,
+ ),
+ }
+ )
+ for _ in range(config.num_layers)
+ ]
+ )
+
+ self.norm = nn.LayerNorm(
+ config.hidden_size, eps=config.layer_norm_eps
+ )
+ self.output = nn.Linear(
+ config.hidden_size, config.vocab_size, bias=False
+ )
+
+ # Initialize weights
+ self.apply(self._init_weights)
+
+ def _init_weights(self, module: nn.Module) -> None:
+ """Initialize weights with scaled normal distribution."""
+ if isinstance(module, nn.Linear):
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
+ if module.bias is not None:
+ torch.nn.init.zeros_(module.bias)
+ elif isinstance(module, nn.Embedding):
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
+
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ """
+ Forward pass of the model.
+
+ Args:
+ input_ids: Tensor of shape (batch_size, sequence_length)
+ attention_mask: Optional attention mask
+
+ Returns:
+ Tensor of logits with shape (batch_size, sequence_length, vocab_size)
+ """
+ hidden_states = self.tok_embeddings(input_ids)
+
+ # Create causal mask if needed
+ if attention_mask is None:
+ attention_mask = torch.triu(
+ torch.ones(
+ (input_ids.size(1), input_ids.size(1)),
+ device=input_ids.device,
+ dtype=torch.bool,
+ ),
+ diagonal=1,
+ )
+ attention_mask = attention_mask.masked_fill(
+ attention_mask == 1, float("-inf")
+ )
+
+ # Apply transformer layers
+ for layer in self.layers:
+ # Attention block
+ hidden_states = hidden_states + layer["attention"](
+ layer["attention_norm"](hidden_states), attention_mask
+ )
+
+ # Feed-forward block
+ hidden_states = hidden_states + layer["feed_forward"](
+ layer["feed_forward_norm"](hidden_states)
+ )
+
+ hidden_states = self.norm(hidden_states)
+ logits = self.output(hidden_states)
+
+ return logits
+
+ def compute_loss(
+ self,
+ input_ids: torch.Tensor,
+ target_ids: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ """
+ Compute cross entropy loss.
+
+ Args:
+ input_ids: Input token ids
+ target_ids: Target token ids
+ attention_mask: Optional attention mask
+
+ Returns:
+ Loss value
+ """
+ logits = self(input_ids, attention_mask)
+ loss = F.cross_entropy(
+ rearrange(logits, "b s v -> (b s) v"),
+ rearrange(target_ids, "b s -> (b s)"),
+ )
+ return loss
+
+ @torch.no_grad()
+ def _generate(
+ self,
+ input_ids: torch.Tensor,
+ max_new_tokens: int = 100,
+ temperature: float = 1.0,
+ top_k: Optional[int] = None,
+ top_p: Optional[float] = None,
+ repetition_penalty: float = 1.0,
+ ) -> torch.Tensor:
+ """
+ Generate new tokens autoregressively.
+
+ Args:
+ input_ids: Starting sequence
+ max_new_tokens: Number of tokens to generate
+ temperature: Sampling temperature
+ top_k: K for top-k sampling
+ top_p: P for nucleus sampling
+ repetition_penalty: Penalty for repeating tokens
+
+ Returns:
+ Generated sequence
+ """
+ batch_size, seq_length = input_ids.shape
+ generated = input_ids.clone()
+
+ for _ in range(max_new_tokens):
+ if generated.size(1) >= self.config.max_sequence_length:
+ break
+
+ # Forward pass
+ logits = self(generated)[:, -1, :]
+
+ # Apply temperature
+ logits = logits / temperature
+
+ # Apply repetition penalty
+ if repetition_penalty != 1.0:
+ for i in range(batch_size):
+ for token_id in set(generated[i].tolist()):
+ logits[i, token_id] /= repetition_penalty
+
+ # Apply top-k sampling
+ if top_k is not None:
+ indices_to_remove = (
+ logits
+ < torch.topk(logits, top_k)[0][..., -1, None]
+ )
+ logits[indices_to_remove] = float("-inf")
+
+ # Apply nucleus (top-p) sampling
+ if top_p is not None:
+ sorted_logits, sorted_indices = torch.sort(
+ logits, descending=True
+ )
+ cumulative_probs = torch.cumsum(
+ F.softmax(sorted_logits, dim=-1), dim=-1
+ )
+
+ # Remove tokens with cumulative probability above the threshold
+ sorted_indices_to_remove = cumulative_probs > top_p
+ sorted_indices_to_remove[..., 1:] = (
+ sorted_indices_to_remove[..., :-1].clone()
+ )
+ sorted_indices_to_remove[..., 0] = 0
+
+ indices_to_remove = torch.zeros_like(
+ logits, dtype=torch.bool
+ )
+ indices_to_remove.scatter_(
+ 1, sorted_indices, sorted_indices_to_remove
+ )
+ logits[indices_to_remove] = float("-inf")
+
+ # Sample next token
+ probs = F.softmax(logits, dim=-1)
+ next_token = torch.multinomial(probs, num_samples=1)
+
+ # Append to sequence
+ generated = torch.cat([generated, next_token], dim=1)
+
+ return generated
+
+ def generate(
+ self,
+ input_ids: torch.Tensor,
+ max_new_tokens: int = 100,
+ temperature: float = 1.0,
+ top_k: Optional[int] = None,
+ top_p: Optional[float] = None,
+ repetition_penalty: float = 1.0,
+ ):
+ tensor_data = self._generate(
+ input_ids=input_ids,
+ max_new_tokens=max_new_tokens,
+ temperature=temperature,
+ top_k=top_k,
+ top_p=top_p,
+ repetition_penalty=repetition_penalty,
+ )
+
+ return tensor_to_data(tensor_data)
+
+
+# import torch
+# from typing import Optional
+
+
+class DataType(Enum):
+ TEXT = "text"
+ IMAGE = "image"
+ AUDIO = "audio"
+ VIDEO = "video"
+ BINARY = "binary"
+
+
+class ByteDetokenizer:
+ """Utility class for converting model output bytes back to original data formats."""
+
+ @staticmethod
+ def tensor_to_bytes(tensor: torch.Tensor) -> bytes:
+ """Convert model output tensor to bytes."""
+ # Convert logits/probabilities to byte values
+ if tensor.dim() > 1:
+ # If we have logits, convert to byte indices
+ byte_indices = tensor.argmax(dim=-1)
+ else:
+ byte_indices = tensor
+
+ # Convert to Python bytes
+ return bytes(
+ byte_indices.cpu().numpy().astype(np.uint8).tolist()
+ )
+
+ @staticmethod
+ def decode_text(byte_sequence: bytes) -> str:
+ """Convert bytes to text."""
+ try:
+ return byte_sequence.decode("utf-8")
+ except UnicodeDecodeError:
+ # Try with error handling
+ return byte_sequence.decode("utf-8", errors="replace")
+
+ @staticmethod
+ def decode_image(
+ byte_sequence: bytes,
+ mode: str = "RGB",
+ size: Optional[tuple] = None,
+ ) -> Image.Image:
+ """Convert bytes to image.
+
+ Args:
+ byte_sequence: Raw image bytes
+ mode: Image mode (RGB, RGBA, L, etc.)
+ size: Optional tuple of (width, height)
+ """
+ try:
+ # Try to load as-is first (for standard image formats)
+ img = Image.open(io.BytesIO(byte_sequence))
+ if size:
+ img = img.resize(size)
+ return img
+ except:
+ # If failed, assume raw pixel data
+ if not size:
+ # Try to determine size from byte count
+ pixel_count = len(byte_sequence) // len(mode)
+ size = (
+ int(np.sqrt(pixel_count)),
+ int(np.sqrt(pixel_count)),
+ )
+
+ # Convert raw bytes to pixel array
+ pixels = np.frombuffer(byte_sequence, dtype=np.uint8)
+ pixels = pixels.reshape((*size, len(mode)))
+
+ return Image.fromarray(pixels, mode=mode)
+
+ @staticmethod
+ def decode_audio(
+ byte_sequence: bytes,
+ sample_rate: int = 44100,
+ channels: int = 2,
+ sample_width: int = 2,
+ ) -> np.ndarray:
+ """Convert bytes to audio samples.
+
+ Args:
+ byte_sequence: Raw audio bytes
+ sample_rate: Audio sample rate in Hz
+ channels: Number of audio channels
+ sample_width: Bytes per sample (1, 2, or 4)
+ """
+ # Determine format string based on sample width
+ format_str = {
+ 1: "b", # signed char
+ 2: "h", # short
+ 4: "i", # int
+ }[sample_width]
+
+ # Unpack bytes to samples
+ sample_count = len(byte_sequence) // (channels * sample_width)
+ samples = struct.unpack(
+ f"<{sample_count * channels}{format_str}", byte_sequence
+ )
+
+ # Reshape to [samples, channels]
+ return np.array(samples).reshape(-1, channels)
+
+ def decode_data(
+ self,
+ model_output: Union[torch.Tensor, bytes],
+ data_type: DataType,
+ **kwargs,
+ ) -> Union[str, Image.Image, np.ndarray, bytes]:
+ """Main method to decode model output to desired format.
+
+ Args:
+ model_output: Either tensor from model or raw bytes
+ data_type: Type of data to decode to
+ **kwargs: Additional parameters for specific decoders
+
+ Returns:
+ Decoded data in specified format
+ """
+ # Convert tensor to bytes if needed
+ if isinstance(model_output, torch.Tensor):
+ byte_sequence = self.tensor_to_bytes(model_output)
+ else:
+ byte_sequence = model_output
+
+ # Decode based on type
+ if data_type == DataType.TEXT:
+ return self.decode_text(byte_sequence)
+ elif data_type == DataType.IMAGE:
+ return self.decode_image(byte_sequence, **kwargs)
+ elif data_type == DataType.AUDIO:
+ return self.decode_audio(byte_sequence, **kwargs)
+ elif data_type == DataType.VIDEO:
+ raise NotImplementedError(
+ "Video decoding not yet implemented"
+ )
+ else: # BINARY
+ return byte_sequence
+
+
+# Usage example
+
+
+class Modality(Enum):
+ TEXT = auto()
+ IMAGE = auto()
+ AUDIO = auto()
+ VIDEO = auto()
+ BINARY = auto()
+ MULTIMODAL = auto()
+
+
+@dataclass
+class ModalityInfo:
+ """Information about detected modality."""
+
+ modality: Modality
+ confidence: float
+ metadata: Dict[str, any]
+ sub_modalities: Optional[List["ModalityInfo"]] = None
+
+
+class ModalityDetector:
+ """Detects data modalities from byte sequences."""
+
+ # Common file signatures (magic numbers)
+ SIGNATURES = {
+ # Images
+ b"\xFF\xD8\xFF": "JPEG",
+ b"\x89PNG\r\n\x1a\n": "PNG",
+ b"GIF87a": "GIF",
+ b"GIF89a": "GIF",
+ b"RIFF": "WEBP",
+ # Audio
+ b"RIFF....WAVE": "WAV",
+ b"ID3": "MP3",
+ b"\xFF\xFB": "MP3",
+ b"OggS": "OGG",
+ # Video
+ b"\x00\x00\x00\x18ftypmp42": "MP4",
+ b"\x00\x00\x00\x1Cftypav01": "MP4",
+ b"\x1A\x45\xDF\xA3": "WEBM",
+ }
+
+ def __init__(self):
+ self.magic = magic.Magic(mime=True)
+
+ def _check_text_probability(self, data: bytes) -> float:
+ """Estimate probability that data is text."""
+ # Check if data is valid UTF-8
+ try:
+ data.decode("utf-8")
+ # Count printable ASCII characters
+ printable = sum(1 for b in data if 32 <= b <= 126)
+ return printable / len(data)
+ except UnicodeDecodeError:
+ return 0.0
+
+ def _check_image_validity(self, data: bytes) -> Tuple[bool, Dict]:
+ """Check if data is a valid image and extract metadata."""
+ try:
+ with io.BytesIO(data) as bio:
+ img = Image.open(bio)
+ return True, {
+ "format": img.format,
+ "size": img.size,
+ "mode": img.mode,
+ }
+ except:
+ return False, {}
+
+ def _check_audio_validity(self, data: bytes) -> Tuple[bool, Dict]:
+ """Check if data is valid audio and extract metadata."""
+ try:
+ with io.BytesIO(data) as bio:
+ # Try to parse as WAV
+ with wave.open(bio) as wav:
+ return True, {
+ "channels": wav.getnchannels(),
+ "sample_width": wav.getsampwidth(),
+ "framerate": wav.getframerate(),
+ "frames": wav.getnframes(),
+ }
+ except:
+ # Check for other audio signatures
+ for sig in [b"ID3", b"\xFF\xFB", b"OggS"]:
+ if data.startswith(sig):
+ return True, {"format": "compressed_audio"}
+ return False, {}
+
+ def _detect_boundaries(
+ self, data: bytes
+ ) -> List[Tuple[int, int, Modality]]:
+ """Detect boundaries between different modalities in the data."""
+ boundaries = []
+ current_pos = 0
+
+ while current_pos < len(data):
+ # Look for known signatures
+ for sig, format_type in self.SIGNATURES.items():
+ if data[current_pos:].startswith(sig):
+ # Found a signature, determine its length
+ if format_type in ["JPEG", "PNG", "GIF"]:
+ # Find image end
+ try:
+ with io.BytesIO(
+ data[current_pos:]
+ ) as bio:
+ img = Image.open(bio)
+ img.verify()
+ size = bio.tell()
+ boundaries.append(
+ (
+ current_pos,
+ current_pos + size,
+ Modality.IMAGE,
+ )
+ )
+ current_pos += size
+ continue
+ except:
+ pass
+
+ # Check for text sections
+ text_prob = self._check_text_probability(
+ data[current_pos : current_pos + 1024]
+ )
+ if text_prob > 0.8:
+ # Look for end of text section
+ end_pos = current_pos + 1
+ while end_pos < len(data):
+ if (
+ self._check_text_probability(
+ data[end_pos : end_pos + 32]
+ )
+ < 0.5
+ ):
+ break
+ end_pos += 1
+ boundaries.append(
+ (current_pos, end_pos, Modality.TEXT)
+ )
+ current_pos = end_pos
+ continue
+
+ current_pos += 1
+
+ return boundaries
+
+ def detect_modality(self, data: bytes) -> ModalityInfo:
+ """Detect modality of byte sequence."""
+ # First check for single modality
+ mime_type = self.magic.from_buffer(data)
+
+ # Check text
+ text_prob = self._check_text_probability(data)
+ if text_prob > 0.9:
+ return ModalityInfo(
+ modality=Modality.TEXT,
+ confidence=text_prob,
+ metadata={"mime_type": mime_type},
+ )
+
+ # Check image
+ is_image, image_meta = self._check_image_validity(data)
+ if is_image:
+ return ModalityInfo(
+ modality=Modality.IMAGE,
+ confidence=1.0,
+ metadata={**image_meta, "mime_type": mime_type},
+ )
+
+ # Check audio
+ is_audio, audio_meta = self._check_audio_validity(data)
+ if is_audio:
+ return ModalityInfo(
+ modality=Modality.AUDIO,
+ confidence=1.0,
+ metadata={**audio_meta, "mime_type": mime_type},
+ )
+
+ # Check for multimodal content
+ boundaries = self._detect_boundaries(data)
+ if len(boundaries) > 1:
+ sub_modalities = []
+ for start, end, modality in boundaries:
+ chunk_data = data[start:end]
+ sub_info = self.detect_modality(chunk_data)
+ if sub_info.modality != Modality.BINARY:
+ sub_modalities.append(sub_info)
+
+ if sub_modalities:
+ return ModalityInfo(
+ modality=Modality.MULTIMODAL,
+ confidence=0.8,
+ metadata={"mime_type": "multipart/mixed"},
+ sub_modalities=sub_modalities,
+ )
+
+ # Default to binary
+ return ModalityInfo(
+ modality=Modality.BINARY,
+ confidence=0.5,
+ metadata={"mime_type": mime_type},
+ )
+
+ def split_modalities(
+ self, data: bytes
+ ) -> List[Tuple[Modality, bytes, Dict]]:
+ """Split multimodal data into separate modalities."""
+ boundaries = self._detect_boundaries(data)
+ result = []
+
+ for start, end, modality in boundaries:
+ chunk = data[start:end]
+ info = self.detect_modality(chunk)
+ result.append((modality, chunk, info.metadata))
+
+ return result
+
+
+class AutoDetectBytesDecoder:
+ """Decoder that automatically detects and decodes different modalities."""
+
+ def __init__(self):
+ self.detector = ModalityDetector()
+ self.text_decoder = ByteDetokenizer() # From previous example
+
+ def decode(
+ self, data: bytes
+ ) -> Union[str, Image.Image, np.ndarray, List[any]]:
+ """Automatically detect and decode byte sequence."""
+ info = self.detector.detect_modality(data)
+
+ if info.modality == Modality.MULTIMODAL:
+ # Handle multimodal content
+ parts = self.detector.split_modalities(data)
+ return [
+ self.decode(chunk) for modality, chunk, _ in parts
+ ]
+
+ if info.modality == Modality.TEXT:
+ return self.text_decoder.decode_text(data)
+ elif info.modality == Modality.IMAGE:
+ return self.text_decoder.decode_image(data)
+ elif info.modality == Modality.AUDIO:
+ return self.text_decoder.decode_audio(data)
+ else:
+ return data
+
+
+# # Example usage
+# def demo_auto_detection():
+# """Demonstrate auto modality detection."""
+# # Create mixed content
+# text = "Hello, World!".encode('utf-8')
+
+# # Create a small test image
+# img = Image.new('RGB', (100, 100), color='red')
+# img_bytes = io.BytesIO()
+# img.save(img_bytes, format='PNG')
+
+# # Combine into multimodal content
+# mixed_content = text + img_bytes.getvalue()
+
+# # Initialize decoder
+# decoder = AutoDetectBytesDecoder()
+
+# # Decode
+# result = decoder.decode(mixed_content)
+
+# if isinstance(result, list):
+# print("Detected multimodal content:")
+# for i, part in enumerate(result):
+# print(f"Part {i+1}: {type(part)}")
+
+# if __name__ == "__main__":
+# demo_auto_detection()
+
+
+def tensor_to_data(tensor: Tensor):
+ byte_sequence = ByteDetokenizer.tensor_to_bytes(tensor)
+
+ # Initialize auto-detector
+ decoder = AutoDetectBytesDecoder()
+
+ # Decode with automatic detection
+ result = decoder.decode(byte_sequence)
+
+ return result
+
+
+def demo_byte_predictor():
+ """Demo with smaller dimensions to test."""
+ # Initialize model configuration with adjusted dimensions
+ config = ModelConfig(
+ vocab_size=256,
+ hidden_size=128, # Smaller for testing
+ num_layers=2, # Fewer layers for testing
+ num_key_value_heads=2,
+ num_query_heads=4,
+ dropout=0.1,
+ max_sequence_length=1024,
+ )
+
+ # Initialize model
+ model = EnhancedBytePredictor(config)
+ logger.info("Model initialized")
+
+ # Move to GPU if available
+ device = torch.device(
+ "cuda" if torch.cuda.is_available() else "cpu"
+ )
+ model = model.to(device)
+ logger.info(f"Using device: {device}")
+
+ # Create sample input data
+ batch_size = 2
+ seq_length = 16 # Shorter sequence for testing
+ input_ids = torch.randint(
+ 0, config.vocab_size, (batch_size, seq_length), device=device
+ )
+ logger.info(f"Created input tensor of shape: {input_ids.shape}")
+
+ # Test forward pass
+ try:
+ logits = model(input_ids)
+ logger.info(
+ f"Forward pass successful! Output shape: {logits.shape}"
+ )
+
+ # Test loss computation
+ target_ids = torch.randint(
+ 0,
+ config.vocab_size,
+ (batch_size, seq_length),
+ device=device,
+ )
+ loss = model.compute_loss(input_ids, target_ids)
+ logger.info(
+ f"Loss computation successful! Loss value: {loss.item():.4f}"
+ )
+
+ # Test generation
+ prompt = torch.randint(
+ 0,
+ config.vocab_size,
+ (1, 4), # Very short prompt for testing
+ device=device,
+ )
+ generated = model.generate(
+ prompt, max_new_tokens=8, temperature=0.8, top_k=50
+ )
+ logger.info(
+ f"Generation successful! Generated shape: {generated.shape}"
+ )
+
+ except Exception as e:
+ logger.error(f"Error during execution: {str(e)}")
+ raise
+
+
+if __name__ == "__main__":
+ # Set up logging
+ # logger.remove() # Remove default handler
+ # logger.add(sys.stderr, format="{time:HH:mm:ss} | {level} | {message}")
+
+ demo_byte_predictor()
diff --git a/docs/assets/css/extra.css b/docs/assets/css/extra.css
index b639e2f7..a9967e01 100644
--- a/docs/assets/css/extra.css
+++ b/docs/assets/css/extra.css
@@ -1,18 +1,27 @@
-
-/* Further customization as needed */
-
+/* * Further customization as needed */ */
.md-typeset__table {
- min-width: 100%;
+ min-width: 100%;
}
.md-typeset table:not([class]) {
display: table;
}
-/*
-:root {
- --md-primary-fg-color: #EE0F0F;
- --md-primary-fg-color--light: #ECB7B7;
- --md-primary-fg-color--dark: #90030C;
- } */
\ No newline at end of file
+/* Dark mode
+[data-md-color-scheme="slate"] {
+ --md-default-bg-color: black;
+}
+
+.header__ellipsis {
+ color: black;
+}
+
+.md-copyright__highlight {
+ color: black;
+}
+
+
+.md-header.md-header--shadow {
+ color: black;
+} */
\ No newline at end of file
diff --git a/docs/corporate/culture.md b/docs/corporate/culture.md
new file mode 100644
index 00000000..4c34527d
--- /dev/null
+++ b/docs/corporate/culture.md
@@ -0,0 +1,56 @@
+# Swarms Corp Culture Document
+
+## **Our Mission and Purpose**
+At Swarms Corp, we believe in more than just building technology. We are advancing humanity by pioneering systems that allow agentsāboth AI and humanāto collaborate seamlessly, working toward the betterment of society and unlocking a future of abundance. Our mission is everything, and each of us is here because we understand the transformative potential of our work. We are not just a company; we are a movement aimed at reshaping the future. We strive to create systems that can tackle the most complex challenges facing humanity, from climate change to inequality, with solutions that are powered by collective intelligence.
+
+Our purpose goes beyond just technological advancement. We are here to create tools that empower people, uplift communities, and set a new standard for what technology can achieve when the mission is clear and the commitment is unwavering. We see every project as a step toward something greaterāan abundant future where human potential is limitless and artificial intelligence serves as a powerful ally to mankind.
+
+## **Values We Live By**
+
+### 1. **Hard Work: No Stone Unturned**
+We believe that hard work is the foundation of all great achievements. At Swarms Corp, each member of the team is dedicated to putting in the effort required to solve complex problems. This isnāt just about long hoursāitās about focused, intentional work that leads to breakthroughs. We hold each other to high standards, and we donāt shy away from the hard paths when the mission calls for it. Every challenge we face is an opportunity to demonstrate our resilience and our commitment to excellence. We understand that the pursuit of groundbreaking innovation demands not just effort, but a relentless curiosity and the courage to face the unknown.
+
+At Swarms Corp, we respect the grind because we know that transformative change doesnāt happen overnight. It requires continuous effort, sacrifice, and an unwavering focus on the task at hand. We celebrate hard work, not because itās difficult, but because we understand its potential to transform ambitious ideas into tangible solutions. We honor the sweat equity that goes into building something that can truly make a difference.
+
+### 2. **Mission Above Everything**
+Our mission is our guiding star. Every decision, every task, and every project must align with our overarching purpose: advancing humanity and creating a post-scarcity world. This means sometimes putting the collective goal ahead of individual preferences or comfort. Weāre here to do something much larger than ourselves, and we prioritize the mission with relentless commitment. We know that personal sacrifices will often be necessary, and we embrace that reality because the rewards of our mission are far greater than any individual gain.
+
+When we say "mission above everything," we mean that our focus is not just on immediate success, but on creating a lasting impact that will benefit future generations. Our mission provides meaning and direction to our daily efforts, and we see every task as a small yet crucial part of our broader vision. We remind ourselves constantly of why we are here and who we are working forānot just our customers or stakeholders, but humanity as a whole.
+
+### 3. **Finding the Shortest Path**
+Innovation thrives on efficiency. At Swarms Corp, we value finding the shortest, most effective paths to reach our goals. We encourage everyone to question the status quo, challenge existing processes, and ask, āIs there a better way to do this?ā Creativity means finding new routesāwhether by leveraging automation, questioning outdated steps, or collaborating to uncover insights faster. We honor those who seek smarter paths over conventional ones. Efficiency is not just about saving timeāitās about maximizing impact and ensuring that every ounce of effort drives meaningful progress.
+
+Finding the shortest path is about eliminating unnecessary complexity and focusing our energy on what truly matters. We encourage a culture of continuous improvement, where each team member is empowered to innovate on processes, tools, and methodologies. The shortest path does not mean cutting cornersāit means removing obstacles, optimizing workflows, and focusing on high-leverage activities that bring us closer to our mission. We celebrate those who find elegant, effective solutions that others might overlook.
+
+### 4. **Advancing Humanity**
+The ultimate goal of everything we do is to elevate humanity. We envision a world where intelligenceāboth human and artificialāworks in harmony to improve lives, solve global challenges, and expand possibilities. This ethos drives our work, whether itās developing advanced AI systems, collaborating with others to push technological boundaries, or thinking deeply about how our creations can impact society in positive ways. Every line of code, every idea, and every strategy should move us closer to this vision.
+
+Advancing humanity means we always think about the ethical implications of our work. We are deeply aware that the technology we create has the power to transform lives, and with that power comes the responsibility to ensure our contributions are always positive. We seek not only to push the boundaries of what technology can do but also to ensure that these advancements are inclusive and equitable. Our focus is on building a future where every person has access to the tools and opportunities they need to thrive.
+
+Our vision is to bridge the gap between technology and humanityās most pressing needs. We aim to democratize intelligence, making it available for everyone, regardless of their background or resources. This is how we advance humanityānot just through technological feats, but by ensuring that our innovations serve the greater good and uplift everyone.
+
+## **Our Way of Working**
+
+- **Radical Ownership**: Each team member is not just a contributor but an owner of their domain. We take full responsibility for outcomes, follow through on our promises, and ensure that nothing falls through the cracks. We donāt wait for permissionāwe act, innovate, and lead. Radical ownership means understanding that our actions have a direct impact on the success of our mission. Itās about proactive problem-solving and always stepping up when we see an opportunity to make a difference.
+
+- **Honesty and Respect**: We communicate openly and respect each otherās opinions. Tough conversations are a natural part of building something impactful. We face challenges head-on with honesty and directness while maintaining a respectful and supportive atmosphere. Honesty fosters trust, and trust is the foundation of any high-performing team. We value feedback and see it as an essential tool for growthāboth for individuals and for the organization as a whole.
+
+- **One Team, One Mission**: Collaboration isnāt just encouragedāitās essential. We operate as a swarm, where each agent contributes to a greater goal, learning from each other, sharing knowledge, and constantly iterating together. We celebrate wins collectively and approach obstacles with a unified spirit. No one succeeds alone; every achievement is the result of collective effort. We lift each other up, and we know that our strength lies in our unity and shared purpose.
+
+- **The Future is Ours to Shape**: Our work is inherently future-focused. Weāre not satisfied with simply keeping upāwe want to set the pace. Every day, we take one step closer to a future where humanityās potential is limitless, where scarcity is eliminated, and where intelligenceāhuman and machineāadvances society. We are not passive participants in the future; we are active shapers of it. We imagine a better tomorrow, and then we take deliberate steps to create it. Our work today will define what the world looks like tomorrow.
+
+## **Expectations**
+
+- **Be Bold**: Donāt be afraid to take risks. Innovation requires experimentation, and sometimes that means making mistakes. We support each other in learning from failures and taking smart, calculated risks. Boldness is at the heart of progress. We want every member of Swarms Corp to feel empowered to think outside the box, propose unconventional ideas, and drive innovation. Mistakes are seen not as setbacks, but as opportunities for learning and growth.
+
+- **Keep the Mission First**: Every decision we make should be with our mission in mind. Ask yourself how your work advances the cause of creating an abundant future. The mission is the yardstick against which we measure our efforts, ensuring that everything we do pushes us closer to our ultimate goals. We understand that the mission is bigger than any one of us, and we strive to contribute meaningfully every day.
+
+- **Find Solutions, Not Problems**: While identifying issues is important, we value those who come with solutions. Embrace challenges as opportunities to innovate and find ways to make an impact. We foster a culture of proactive problem-solving where obstacles are seen as opportunities to exercise creativity. If somethingās broken, we fix it. If thereās a better way, we find it. We expect our team members to be solution-oriented, always seeking ways to turn challenges into stepping stones for progress.
+
+- **Think Big, Act Fast**: Weāre not here to make small changesāweāre here to revolutionize how we think about intelligence, automation, and society. Dream big, but work with urgency. We are tackling problems of immense scale, and we must move with intention and speed. Thinking big means envisioning a world that is radically different and better, and acting fast means executing the steps to get us there without hesitation. We value ambition and the courage to move swiftly when the time is right.
+
+## **Our Commitment to You**
+Swarms Corp is a place for dreamers and doers, for those who are driven by purpose and are unafraid of the work required to achieve it. We commit to providing you with the tools, support, and environment you need to contribute meaningfully to our mission. We are here to advance humanity together, one agent, one solution, one breakthrough at a time. We pledge to nurture an environment that encourages creativity, collaboration, and bold thinking. Here, you will find a community that celebrates your wins, supports you through challenges, and pushes you to be your best self.
+
+Our commitment also includes ensuring that your voice is heard. We are building the future together, and every perspective matters. We strive to create an inclusive space where diversity of thought is welcomed, and where each team member feels valued for their unique contributions. At Swarms Corp, you are not just part of a teamāyou are part of a mission that aims to change the course of humanity for the better. Together, weāll make the impossible possible, one breakthrough at a time.
+
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml
index 73ec0533..f702f1c5 100644
--- a/docs/mkdocs.yml
+++ b/docs/mkdocs.yml
@@ -57,29 +57,35 @@ extra:
property: G-MPE9C65596
theme:
- name: material
- custom_dir: overrides
- logo: assets/img/swarms-logo.png
- palette:
+ name: material
+ custom_dir: overrides
+ logo: assets/img/swarms-logo.png
+ palette:
- scheme: default
- primary: black
+ primary: white # White background
+ accent: white # Black accents for interactive elements
toggle:
- icon: material/brightness-7
+ icon: material/brightness-7
name: Switch to dark mode
- # Palette toggle for dark mode
- - scheme: slate
+ - scheme: slate # Optional: lighter shades for accessibility
primary: black
+ accent: black
toggle:
icon: material/brightness-4
name: Switch to light mode
- features:
- - content.code.copy
- - content.code.annotate
- - navigation.tabs
- - navigation.sections
- - navigation.expand
- - navigation.top
- - announce.dismiss
+ features:
+ - content.code.copy
+ - content.code.annotate
+ - navigation.tabs
+ - navigation.sections
+ - navigation.expand
+ - navigation.top
+ - announce.dismiss
+ font:
+ text: "Fira Sans" # Clean and readable text
+ code: "Fira Code" # Modern look for code snippets
+
+
# Extensions
markdown_extensions:
- abbr
@@ -138,6 +144,7 @@ nav:
- Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md"
- Onboarding:
- Installation: "swarms/install/install.md"
+ - Environment Configuration: "swarms/install/workspace_manager.md"
- Quickstart: "swarms/install/quickstart.md"
- Swarms CLI: "swarms/cli/main.md"
# - Swarms + Docker:
@@ -257,6 +264,7 @@ nav:
- An Analysis on Prompting Strategies: "swarms/prompts/overview.md"
- Managing Prompts in Production: "swarms/prompts/main.md"
- Corporate:
+ - Culture: "corporate/culture.md"
- Hiring: "corporate/hiring.md"
- Swarms Goals & Milestone Tracking; A Vision for 2024 and Beyond: "corporate/2024_2025_goals.md"
- Clusterops:
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 008bedf1..121e0475 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -26,7 +26,7 @@ jinja2~=3.1
markdown~=3.7
mkdocs-material-extensions~=1.3
pygments~=2.18
-pymdown-extensions~=10.11
+pymdown-extensions~=10.12
# Requirements for plugins
babel~=2.16
diff --git a/6_0_0.md b/docs/swarms/changelog/6_0_0 2.md
similarity index 100%
rename from 6_0_0.md
rename to docs/swarms/changelog/6_0_0 2.md
diff --git a/docs/swarms/changelog/6_0_0.md b/docs/swarms/changelog/6_0_0.md
new file mode 100644
index 00000000..aae2e8ef
--- /dev/null
+++ b/docs/swarms/changelog/6_0_0.md
@@ -0,0 +1,59 @@
+# Swarms 6.0.0 - Performance & Reliability Update š
+
+We're excited to announce the release of Swarms 6.0.0, bringing significant improvements to performance, reliability, and developer experience. This release focuses on streamlining core functionalities while enhancing the overall stability of the framework.
+
+## š¦ Installation
+
+```bash
+pip3 install -U swarms
+```
+
+## š Highlights
+
+### Agent Enhancements
+- **Improved RAG Performance**: Significant improvements to Retrieval-Augmented Generation capabilities
+- **Enhanced Prompt Generation**: Auto-generate prompt now incorporates name, description, and system prompt for more contextual interactions
+- **Streamlined Architecture**: Cleaned up unused code for better performance and maintainability
+- **Simplified State Management**: Consolidated state management methods into a single `load()` function
+
+### Tools & Execution
+- **Optimized Environment Management**: Fixed multiple environment instantiation issue
+ - Environments now initialize once during `__init__`
+- **New SwarmRouter Function**: Simplified routing mechanism
+ - Returns consolidated string output from all agents
+ - Improved coordination between swarm components
+
+## šŖ Performance Improvements
+- Faster execution times
+- Reduced memory footprint
+- More reliable logging system
+- Lightweight and efficient codebase
+
+## š¤ Join Our Community
+
+### We're Hiring!
+Join our growing team! We're currently looking for:
+- Agent Engineers
+- Developer Relations
+- Infrastructure Engineers
+- And more!
+
+### Get Involved
+- ā Star our repository
+- š Fork the project
+- š Submit pull requests
+- š Report issues
+- š” Share your ideas
+
+### Contact & Support
+- š§ Email: kye@swarms.world
+- š Issues: [GitHub Issues](https://github.com/kyegomez/swarms/issues)
+
+## š What's Next?
+Have ideas for features, bug fixes, or improvements? We'd love to hear from you! Reach out through our GitHub issues or email us directly.
+
+---
+
+*Thank you to all our contributors and users who make Swarms better every day. Together, we're building the future of swarm intelligence.*
+
+#SwarmAI #OpenSource #AI #MachineLearning
\ No newline at end of file
diff --git a/docs/swarms/install/install.md b/docs/swarms/install/install.md
index f69a09bd..9d52d84e 100644
--- a/docs/swarms/install/install.md
+++ b/docs/swarms/install/install.md
@@ -127,7 +127,7 @@ Before you begin, ensure you have the following installed:
poetry install --extras "desktop"
```
-=== "Using Docker"
+=== "Using Docker COMING SOON [DOES NOT WORK YET]"
Docker is an excellent option for creating isolated and reproducible environments, suitable for both development and production.
diff --git a/docs/swarms/install/workspace_manager.md b/docs/swarms/install/workspace_manager.md
new file mode 100644
index 00000000..d2cb4ca3
--- /dev/null
+++ b/docs/swarms/install/workspace_manager.md
@@ -0,0 +1,186 @@
+# Swarms Framework Environment Configuration
+
+This guide details the environment variables used in the Swarms framework for configuration and customization of your agent-based applications.
+
+## Configuration Setup
+
+Create a `.env` file in your project's root directory to configure the Swarms framework. This file will contain all necessary environment variables for customizing your agent's behavior, logging, and analytics.
+
+## Environment Variables
+
+### Core Variables
+
+#### `WORKSPACE_DIR`
+- **Purpose**: Defines the directory where all agent states and execution logs are stored
+- **Type**: String (path)
+- **Default**: `./workspace`
+- **Example**:
+```bash
+WORKSPACE_DIR=/path/to/your/workspace
+```
+- **Usage**:
+ - Stores JSON files containing agent states
+ - Maintains execution history
+ - Keeps track of agent interactions
+ - Preserves conversation logs
+
+#### `SWARMS_AUTOUPDATE_ON`
+- **Purpose**: Controls automatic updates of the Swarms framework
+- **Type**: Boolean
+- **Default**: `false`
+- **Example**:
+```bash
+SWARMS_AUTOUPDATE_ON=true
+```
+- **Features**:
+ - Automatically updates to the latest stable version
+ - Ensures you have the newest features
+ - Maintains compatibility with the latest improvements
+ - Handles dependency updates
+- **Considerations**:
+ - Set to `false` if you need version stability
+ - Recommended `true` for development environments
+ - Consider system requirements for auto-updates
+ - May require restart after updates
+
+### Telemetry Configuration
+
+#### `USE_TELEMETRY`
+- **Purpose**: Controls whether telemetry data is collected
+- **Type**: Boolean
+- **Default**: `false`
+- **Example**:
+```bash
+USE_TELEMETRY=true
+```
+- **Data Collected**:
+ - Agent performance metrics
+ - Execution time statistics
+ - Memory usage
+ - Error rates
+ - System health indicators
+
+### Analytics Integration
+
+#### `SWARMS_API_KEY`
+- **Purpose**: Authentication key for the Swarms Analytics Suite
+- **Type**: String
+- **Required**: Yes, for analytics features
+- **Example**:
+```bash
+SWARMS_API_KEY=your_api_key_here
+```
+- **Features**:
+ - Real-time agent execution tracking
+ - Usage analytics
+ - Performance monitoring
+ - Cost tracking
+ - Custom metrics
+
+## Getting Started
+
+1. Create a new `.env` file:
+```bash
+touch .env
+```
+
+2. Add your configuration:
+```bash
+# Basic configuration
+WORKSPACE_DIR=./my_workspace
+
+# Enable auto-updates
+SWARMS_AUTOUPDATE_ON=true
+
+# Enable telemetry
+USE_TELEMETRY=true
+
+# Add your Swarms API key
+SWARMS_API_KEY=your_api_key_here
+```
+
+3. Obtain your API key:
+ - Visit [swarms.ai](https://swarms.ai)
+ - Create an account or log in
+ - Navigate to the API section
+ - Generate your unique API key
+
+## Best Practices
+
+1. **Security**:
+ - Never commit your `.env` file to version control
+ - Add `.env` to your `.gitignore` file
+ - Keep your API keys secure and rotate them periodically
+
+2. **Workspace Organization**:
+ - Use descriptive workspace directory names
+ - Implement regular cleanup of old logs
+ - Monitor workspace size to prevent disk space issues
+
+3. **Telemetry Management**:
+ - Enable telemetry in development for debugging
+ - Consider privacy implications in production
+ - Review collected data periodically
+
+4. **Auto-Update Management**:
+ - Test updates in development before enabling in production
+ - Keep backups before enabling auto-updates
+ - Monitor system resources during updates
+ - Schedule updates during low-traffic periods
+
+## Examples
+
+### Basic Development Setup
+```bash
+WORKSPACE_DIR=./dev_workspace
+SWARMS_AUTOUPDATE_ON=true
+USE_TELEMETRY=true
+SWARMS_API_KEY=sk_test_xxxxxxxxxxxx
+```
+
+### Production Setup
+```bash
+WORKSPACE_DIR=/var/log/swarms/prod_workspace
+SWARMS_AUTOUPDATE_ON=false
+USE_TELEMETRY=true
+SWARMS_API_KEY=sk_prod_xxxxxxxxxxxx
+```
+
+### Testing Environment
+```bash
+WORKSPACE_DIR=./test_workspace
+SWARMS_AUTOUPDATE_ON=true
+USE_TELEMETRY=false
+SWARMS_API_KEY=sk_test_xxxxxxxxxxxx
+```
+
+## Troubleshooting
+
+Common issues and solutions:
+
+1. **Workspace Access Issues**:
+ - Ensure proper file permissions
+ - Verify the directory exists
+ - Check disk space availability
+
+2. **API Key Problems**:
+ - Confirm key is properly formatted
+ - Verify key hasn't expired
+ - Check for proper environment variable loading
+
+3. **Telemetry Issues**:
+ - Confirm network connectivity
+ - Verify firewall settings
+ - Check for proper boolean values
+
+4. **Auto-Update Issues**:
+ - Check internet connectivity
+ - Verify sufficient disk space
+ - Ensure proper permissions for updates
+ - Check system compatibility requirements
+
+## Additional Resources
+
+- [Swarms Framework Documentation](https://github.com/kyegomez/swarms)
+- [Swarms Analytics Dashboard](https://swarms.ai)
+- [API Reference](https://swarms.ai/docs/api)
\ No newline at end of file
diff --git a/docs/swarms/structs/agent.md b/docs/swarms/structs/agent.md
index 97ab465b..6413dd2c 100644
--- a/docs/swarms/structs/agent.md
+++ b/docs/swarms/structs/agent.md
@@ -132,6 +132,16 @@ graph TD
| `data_memory` | Optional callable for data memory operations. |
| `load_yaml_path` | String representing the path to a YAML file for loading configurations. |
| `auto_generate_prompt` | Boolean indicating whether to automatically generate prompts. |
+| `rag_every_loop` | Boolean indicating whether to query RAG database for context on every loop |
+| `plan_enabled` | Boolean indicating whether planning functionality is enabled |
+| `artifacts_on` | Boolean indicating whether to save artifacts from agent execution |
+| `artifacts_output_path` | File path where artifacts should be saved |
+| `artifacts_file_extension` | File extension to use for saved artifacts |
+| `device` | Device to run computations on ("cpu" or "gpu") |
+| `all_cores` | Boolean indicating whether to use all CPU cores |
+| `device_id` | ID of the GPU device to use if running on GPU |
+| `scheduled_run_date` | Optional datetime for scheduling future agent runs |
+
## `Agent` Methods
@@ -200,6 +210,20 @@ graph TD
| `handle_sop_ops()` | Handles operations related to standard operating procedures. | None | `agent.handle_sop_ops()` |
| `agent_output_type(responses)` | Processes and returns the agent's output based on the specified output type. | `responses` (list): List of responses. | `formatted_output = agent.agent_output_type(responses)` |
| `check_if_no_prompt_then_autogenerate(task)` | Checks if a system prompt is not set and auto-generates one if needed. | `task` (str): The task to use for generating a prompt. | `agent.check_if_no_prompt_then_autogenerate("Analyze data")` |
+| `check_if_no_prompt_then_autogenerate(task)` | Checks if auto_generate_prompt is enabled and generates a prompt by combining agent name, description and system prompt | `task` (str, optional): Task to use as fallback | `agent.check_if_no_prompt_then_autogenerate("Analyze data")` |
+| `handle_artifacts(response, output_path, extension)` | Handles saving artifacts from agent execution | `response` (str): Agent response
`output_path` (str): Output path
`extension` (str): File extension | `agent.handle_artifacts(response, "outputs/", ".txt")` |
+
+
+
+## Updated Run Method
+
+Update the run method documentation to include new parameters:
+
+| Method | Description | Inputs | Usage Example |
+|--------|-------------|--------|----------------|
+| `run(task, img=None, is_last=False, device="cpu", device_id=0, all_cores=True, scheduled_run_date=None)` | Runs the agent with specified parameters | `task` (str): Task to run
`img` (str, optional): Image path
`is_last` (bool): If this is last task
`device` (str): Device to use
`device_id` (int): GPU ID
`all_cores` (bool): Use all CPU cores
`scheduled_run_date` (datetime, optional): Future run date | `agent.run("Analyze data", device="gpu", device_id=0)` |
+
+
## Getting Started
@@ -538,5 +562,9 @@ print(agent.system_prompt)
8. Optimize token usage with `dynamic_context_window` and `tokens_checks` methods.
9. Use `concurrent` and `async` methods for performance-critical applications.
10. Regularly review and analyze feedback using the `analyze_feedback` method.
+11. Use `artifacts_on` to save important outputs from agent execution
+12. Configure `device` and `device_id` appropriately for optimal performance
+13. Enable `rag_every_loop` when continuous context from long-term memory is needed
+14. Use `scheduled_run_date` for automated task scheduling
By following these guidelines and leveraging the Swarm Agent's extensive features, you can create powerful, flexible, and efficient autonomous agents for a wide range of applications.
\ No newline at end of file
diff --git a/docs/swarms/structs/agent_rearrange.md b/docs/swarms/structs/agent_rearrange.md
index 2cfe5703..d7a8bb98 100644
--- a/docs/swarms/structs/agent_rearrange.md
+++ b/docs/swarms/structs/agent_rearrange.md
@@ -7,10 +7,22 @@ The `AgentRearrange` class represents a swarm of agents for rearranging tasks. I
| Attribute | Type | Description |
| --- | --- | --- |
-| `agents` | `dict` | A dictionary of agents, where the key is the agent's name and the value is the agent object. |
-| `flow` | `str` | The flow pattern of the tasks. |
-| `max_loops` | `int` | The maximum number of loops for the agents to run. |
-| `verbose` | `bool` | Whether to enable verbose logging or not. |
+| `id` | `str` | Unique identifier for the swarm |
+| `name` | `str` | Name of the swarm |
+| `description` | `str` | Description of the swarm's purpose |
+| `agents` | `dict` | Dictionary mapping agent names to Agent objects |
+| `flow` | `str` | Flow pattern defining task execution order |
+| `max_loops` | `int` | Maximum number of execution loops |
+| `verbose` | `bool` | Whether to enable verbose logging |
+| `memory_system` | `BaseVectorDatabase` | Memory system for storing agent interactions |
+| `human_in_the_loop` | `bool` | Whether human intervention is enabled |
+| `custom_human_in_the_loop` | `Callable` | Custom function for human intervention |
+| `return_json` | `bool` | Whether to return output in JSON format |
+| `output_type` | `OutputType` | Format of output ("all", "final", "list", or "dict") |
+| `docs` | `List[str]` | List of document paths to add to agent prompts |
+| `doc_folder` | `str` | Folder path containing documents to add to agent prompts |
+| `swarm_history` | `dict` | History of agent interactions |
+
## Methods
-------
@@ -62,20 +74,55 @@ Validates the flow pattern.
- `bool`: `True` if the flow pattern is valid.
-### `run(self, task: str, *args, **kwargs)`
+### `run(self, task: str = None, img: str = None, device: str = "cpu", device_id: int = 1, all_cores: bool = True, all_gpus: bool = False, *args, **kwargs)`
-Runs the swarm to rearrange the tasks.
+Executes the agent rearrangement task with specified compute resources.
| Parameter | Type | Description |
| --- | --- | --- |
-| `task` | `str` | The initial task to be processed. |
-| `*args` | - | Additional positional arguments. |
-| `**kwargs` | - | Additional keyword arguments. |
+| `task` | `str` | The task to execute |
+| `img` | `str` | Path to input image if required |
+| `device` | `str` | Computing device to use ('cpu' or 'gpu') |
+| `device_id` | `int` | ID of specific device to use |
+| `all_cores` | `bool` | Whether to use all CPU cores |
+| `all_gpus` | `bool` | Whether to use all available GPUs |
**Returns:**
- `str`: The final processed task.
+### `batch_run(self, tasks: List[str], img: Optional[List[str]] = None, batch_size: int = 10, device: str = "cpu", device_id: int = None, all_cores: bool = True, all_gpus: bool = False, *args, **kwargs)`
+
+Process multiple tasks in batches.
+
+| Parameter | Type | Description |
+| --- | --- | --- |
+| `tasks` | `List[str]` | List of tasks to process |
+| `img` | `List[str]` | Optional list of images corresponding to tasks |
+| `batch_size` | `int` | Number of tasks to process simultaneously |
+| `device` | `str` | Computing device to use |
+| `device_id` | `int` | Specific device ID if applicable |
+| `all_cores` | `bool` | Whether to use all CPU cores |
+| `all_gpus` | `bool` | Whether to use all available GPUs |
+
+
+
+### `concurrent_run(self, tasks: List[str], img: Optional[List[str]] = None, max_workers: Optional[int] = None, device: str = "cpu", device_id: int = None, all_cores: bool = True, all_gpus: bool = False, *args, **kwargs)`
+
+Process multiple tasks concurrently using ThreadPoolExecutor.
+
+| Parameter | Type | Description |
+| --- | --- | --- |
+| `tasks` | `List[str]` | List of tasks to process |
+| `img` | `List[str]` | Optional list of images corresponding to tasks |
+| `max_workers` | `int` | Maximum number of worker threads |
+| `device` | `str` | Computing device to use |
+| `device_id` | `int` | Specific device ID if applicable |
+| `all_cores` | `bool` | Whether to use all CPU cores |
+| `all_gpus` | `bool` | Whether to use all available GPUs |
+
+
+
## Documentation for `rearrange` Function
======================================
@@ -247,18 +294,6 @@ Additionally, you can modify the `run` method of the `AgentRearrange` class to i
It's important to note that the `AgentRearrange` class and the `rearrange` function rely on the individual agents to process tasks correctly. The quality of the output will depend on the capabilities and configurations of the agents used in the swarm. Additionally, the `AgentRearrange` class does not provide any mechanisms for task prioritization or load balancing among the agents.
-## Future Improvements
--------------------
-
-Here are some potential future improvements for the `AgentRearrange` class and the `rearrange` function:
-
-- **Task Prioritization**: Implement a mechanism to prioritize tasks based on factors such as urgency, importance, or resource availability.
-- **Load Balancing**: Incorporate load balancing algorithms to distribute tasks among agents more efficiently, taking into account factors such as agent availability, performance, and resource utilization.
-- **Dynamic Flow Reconfiguration**: Allow for dynamic reconfiguration of the flow pattern during runtime, enabling the addition, removal, or reordering of agents based on specific conditions or events.
-- **Error Handling and Fault Tolerance**: Enhance error handling and fault tolerance mechanisms to gracefully handle agent failures, task timeouts, or other exceptional situations.
-- **Monitoring and Metrics**: Implement monitoring and metrics collection to track the performance and efficiency of the swarm, as well as individual agent performance.
-- **Scalability**: Enhance the scalability of the system to handle larger numbers of agents and tasks efficiently.
-
## Conclusion
----------
diff --git a/docs/swarms/structs/group_chat.md b/docs/swarms/structs/group_chat.md
index b4d805a1..71254953 100644
--- a/docs/swarms/structs/group_chat.md
+++ b/docs/swarms/structs/group_chat.md
@@ -1,238 +1,231 @@
-# GroupChat
+# GroupChat Class Documentation
-The `GroupChat` class is designed to manage a group chat session involving multiple agents. This class handles initializing the conversation, selecting the next speaker, resetting the chat, and executing the chat rounds, providing a structured approach to managing a dynamic and interactive conversation.
-### Key Concepts
+The GroupChat class manages multi-agent conversations with state persistence, comprehensive logging, and flexible agent configurations. It supports both Agent class instances and callable functions, making it versatile for different use cases.
+
+## Installation
+```bash
+pip install swarms python-dotenv pydantic
+```
-- **Agents**: Entities participating in the group chat.
-- **Conversation Management**: Handling the flow of conversation, selecting speakers, and maintaining chat history.
-- **Round-based Execution**: Managing the chat in predefined rounds.
## Attributes
-### Arguments
-
-| Argument | Type | Default | Description |
-|---------------------|----------------------|-------------|-------------|
-| `agents` | `List[Agent]` | `None` | List of agents participating in the group chat. |
-| `max_rounds` | `int` | `10` | Maximum number of chat rounds. |
-| `admin_name` | `str` | `"Admin"` | Name of the admin user. |
-| `group_objective` | `str` | `None` | Objective of the group chat. |
-| `selector_agent` | `Agent` | `None` | Agent responsible for selecting the next speaker. |
-| `rules` | `str` | `None` | Rules for the group chat. |
-| `*args` | | | Variable length argument list. |
-| `**kwargs` | | | Arbitrary keyword arguments. |
-
-### Attributes
-
-| Attribute | Type | Description |
-|---------------------|----------------------|-------------|
-| `agents` | `List[Agent]` | List of agents participating in the group chat. |
-| `max_rounds` | `int` | Maximum number of chat rounds. |
-| `admin_name` | `str` | Name of the admin user. |
-| `group_objective` | `str` | Objective of the group chat. |
-| `selector_agent` | `Agent` | Agent responsible for selecting the next speaker. |
-| `messages` | `Conversation` | Conversation object for storing the chat messages. |
+| Attribute | Type | Description |
+|-----------|------|-------------|
+| state_path | str | Path for saving/loading chat state |
+| wrapped_agents | List[AgentWrapper] | List of wrapped agent instances |
+| selector_agent | AgentWrapper | Agent responsible for speaker selection |
+| state | GroupChatState | Current state of the group chat |
## Methods
-### __init__
-
-Initializes the group chat with the given parameters.
-
-**Examples:**
+### Core Methods
```python
-agents = [Agent(name="Agent 1"), Agent(name="Agent 2")]
-group_chat = GroupChat(agents=agents, max_rounds=5, admin_name="GroupAdmin")
-```
-
-### agent_names
-
-Returns the names of the agents in the group chat.
+def run(self, task: str) -> str:
+ """Execute the group chat conversation"""
-**Returns:**
+def save_state(self) -> None:
+ """Save current state to disk"""
-| Return Type | Description |
-|-------------|-------------|
-| `List[str]` | List of agent names. |
+@classmethod
+def load_state(cls, state_path: str) -> 'GroupChat':
+ """Load GroupChat from saved state"""
-**Examples:**
+def get_conversation_summary(self) -> Dict[str, Any]:
+ """Return a summary of the conversation"""
-```python
-names = group_chat.agent_names
-print(names) # Output: ['Agent 1', 'Agent 2']
+def export_conversation(self, format: str = "json") -> Union[str, Dict]:
+ """Export the conversation in specified format"""
```
-### reset
-
-Resets the group chat by clearing the message history.
-
-**Examples:**
+### Internal Methods
```python
-group_chat.reset()
-```
-
-### agent_by_name
-
-Finds an agent whose name is contained within the given name string.
-
-**Arguments:**
+def _log_interaction(self, agent_name: str, position: int, input_text: str, output_text: str) -> None:
+ """Log a single interaction"""
-| Parameter | Type | Description |
-|-----------|--------|-------------|
-| `name` | `str` | Name string to search for. |
+def _add_message(self, role: str, content: str) -> None:
+ """Add a message to the conversation history"""
-**Returns:**
-
-| Return Type | Description |
-|-------------|-------------|
-| `Agent` | Agent object with a name contained in the given name string. |
-
-**Raises:**
-
-- `ValueError`: If no agent is found with a name contained in the given name string.
+def select_next_speaker(self, last_speaker: AgentWrapper) -> AgentWrapper:
+ """Select the next speaker using the selector agent"""
+```
-**Examples:**
+## Usage Examples
+### 1. Basic Setup with Two Agents
```python
-agent = group_chat.agent_by_name("Agent 1")
-print(agent.agent_name) # Output: 'Agent 1'
+import os
+from swarms import Agent
+from swarm_models import OpenAIChat
+
+# Initialize OpenAI
+api_key = os.getenv("OPENAI_API_KEY")
+model = OpenAIChat(openai_api_key=api_key, model_name="gpt-4-mini")
+
+# Create agents
+analyst = Agent(
+ agent_name="Financial-Analyst",
+ system_prompt="You are a financial analyst...",
+ llm=model
+)
+
+advisor = Agent(
+ agent_name="Investment-Advisor",
+ system_prompt="You are an investment advisor...",
+ llm=model
+)
+
+# Create group chat
+chat = GroupChat(
+ name="Investment Team",
+ agents=[analyst, advisor],
+ max_rounds=5,
+ group_objective="Provide investment advice"
+)
+
+response = chat.run("What's the best investment strategy for retirement?")
```
-### next_agent
-
-Returns the next agent in the list.
-
-**Arguments:**
-
-| Parameter | Type | Description |
-|-----------|--------|-------------|
-| `agent` | `Agent`| Current agent. |
-
-**Returns:**
-
-| Return Type | Description |
-|-------------|-------------|
-| `Agent` | Next agent in the list. |
-
-**Examples:**
-
+### 2. Advanced Setup with State Management
```python
-current_agent = group_chat.agents[0]
-next_agent = group_chat.next_agent(current_agent)
-print(next_agent.agent_name) # Output: Name of the next agent
+# Create group chat with state persistence
+chat = GroupChat(
+ name="Investment Advisory Team",
+ description="Expert team for financial planning",
+ agents=[analyst, advisor, tax_specialist],
+ max_rounds=10,
+ admin_name="Senior Advisor",
+ group_objective="Provide comprehensive financial planning",
+ state_path="investment_chat_state.json",
+ rules="1. Always provide sources\n2. Be concise\n3. Focus on practical advice"
+)
+
+# Run chat and save state
+response = chat.run("Create a retirement plan for a 35-year old")
+chat.save_state()
+
+# Load existing chat state
+loaded_chat = GroupChat.load_state("investment_chat_state.json")
```
-### select_speaker_msg
-
-Returns the message for selecting the next speaker.
-
-**Returns:**
-
-| Return Type | Description |
-|-------------|-------------|
-| `str` | Prompt message for selecting the next speaker. |
-
-**Examples:**
-
+### 3. Using Custom Callable Agents
```python
-message = group_chat.select_speaker_msg()
-print(message)
+def custom_agent(input_text: str) -> str:
+ # Custom logic here
+ return f"Processed: {input_text}"
+
+# Mix of regular agents and callable functions
+chat = GroupChat(
+ name="Hybrid Team",
+ agents=[analyst, custom_agent],
+ max_rounds=3
+)
```
-### select_speaker
-
-Selects the next speaker.
-
-**Arguments:**
-
-| Parameter | Type | Description |
-|----------------------|--------|-------------|
-| `last_speaker_agent` | `Agent`| Last speaker in the conversation. |
-| `selector_agent` | `Agent`| Agent responsible for selecting the next speaker. |
-
-**Returns:**
+### 4. Export and Analysis
+```python
+# Run chat
+chat.run("Analyze market conditions")
-| Return Type | Description |
-|-------------|-------------|
-| `Agent` | Next speaker. |
+# Get summary
+summary = chat.get_conversation_summary()
+print(summary)
-**Examples:**
+# Export in different formats
+json_conv = chat.export_conversation(format="json")
+text_conv = chat.export_conversation(format="text")
+```
+### 5. Advanced Configuration with Custom Selector
```python
-next_speaker = group_chat.select_speaker(last_speaker_agent, selector_agent)
-print(next_speaker.agent_name)
+class CustomSelector(Agent):
+ def run(self, input_text: str) -> str:
+ # Custom selection logic
+ return "Financial-Analyst"
+
+chat = GroupChat(
+ name="Custom Selection Team",
+ agents=[analyst, advisor],
+ selector_agent=CustomSelector(
+ agent_name="Custom-Selector",
+ system_prompt="Select the next speaker based on expertise",
+ llm=model
+ ),
+ max_rounds=5
+)
```
-### _participant_roles
-
-Returns the roles of the participants.
-
-**Returns:**
-
-| Return Type | Description |
-|-------------|-------------|
-| `str` | Participant roles. |
-
-**Examples:**
-
+### 6. Debugging Setup
```python
-roles = group_chat._participant_roles()
-print(roles)
+import logging
+
+# Configure logging
+logging.basicConfig(level=logging.DEBUG)
+
+chat = GroupChat(
+ name="Debug Team",
+ agents=[analyst, advisor],
+ max_rounds=3,
+ state_path="debug_chat.json"
+)
+
+# Run with detailed logging
+try:
+ response = chat.run("Complex query")
+except Exception as e:
+ logger.error(f"Chat failed: {str(e)}")
+ # Access last successful state
+ state = chat.state
```
-### __call__
+## Error Handling
-Executes the group chat as a function.
-
-**Arguments:**
-
-| Parameter | Type | Description |
-|-----------|--------|-------------|
-| `task` | `str` | Task to be performed. |
-
-**Returns:**
-
-| Return Type | Description |
-|-------------|-------------|
-| `str` | Reply from the last speaker. |
-
-**Examples:**
+The GroupChat class includes comprehensive error handling:
```python
-response = group_chat(task="Discuss the project plan")
-print(response)
+try:
+ chat = GroupChat(agents=[analyst]) # Will raise ValueError
+except ValueError as e:
+ print("Configuration error:", str(e))
+
+try:
+ response = chat.run("Query")
+except Exception as e:
+ # Access error state
+ error_summary = chat.get_conversation_summary()
+ print("Execution error:", str(e))
+ print("State at error:", error_summary)
```
-### Additional Examples
+## Best Practices
-#### Example 1: Initializing and Running a Group Chat
+1. **State Management**:
+ - Always specify a `state_path` for important conversations
+ - Use `save_state()` after critical operations
+ - Implement regular state backups for long conversations
-```python
-agents = [Agent(name="Agent 1"), Agent(name="Agent 2"), Agent(name="Agent 3")]
-selector_agent = Agent(name="Selector")
-group_chat = GroupChat(agents=agents, selector_agent=selector_agent, max_rounds=3, group_objective="Discuss the quarterly goals.")
+2. **Agent Configuration**:
+ - Provide clear system prompts for each agent
+ - Use descriptive agent names
+ - Consider agent expertise when setting the group objective
-response = group_chat(task="Let's start the discussion on quarterly goals.")
-print(response)
-```
+3. **Performance**:
+ - Keep `max_rounds` reasonable (5-10 for most cases)
+ - Use early stopping conditions when possible
+ - Monitor conversation length and complexity
-#### Example 2: Resetting the Group Chat
+4. **Error Handling**:
+ - Always wrap chat execution in try-except blocks
+ - Implement proper logging
+ - Save states before potentially risky operations
-```python
-group_chat.reset()
-```
-
-#### Example 3: Selecting the Next Speaker
-
-```python
-last_speaker = group_chat.agents[0]
-next_speaker = group_chat.select_speaker(last_speaker_agent=last_speaker, selector_agent=selector_agent)
-print(next_speaker.agent_name)
-```
+## Limitations
-## Summary
+- Agents must either have a `run` method or be callable
+- State files can grow large with many interactions
+- Selector agent may need optimization for large agent groups
+- Real-time streaming not supported in basic configuration
-The `GroupChat` class offers a structured approach to managing a group chat involving multiple agents. With functionalities for initializing conversations, selecting speakers, and handling chat rounds, it provides a robust framework for dynamic and interactive discussions. This makes it an essential tool for applications requiring coordinated communication among multiple agents.
\ No newline at end of file
diff --git a/example.py b/example.py
index 8f6f22da..7647d1cd 100644
--- a/example.py
+++ b/example.py
@@ -31,20 +31,20 @@ agent = Agent(
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=1,
+ streaming_on=True,
context_length=200000,
return_step_meta=True,
output_type="json", # "json", "dict", "csv" OR "string" soon "yaml" and
- streaming_on=False,
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
artifacts_on=True,
artifacts_output_path="roth_ira_report",
artifacts_file_extension=".txt",
max_tokens=8000,
+ return_history=True,
)
-print(
- agent.run(
- "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria. Create a report on this question."
- )
+agent.run(
+ "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria. Create a report on this question.",
+ all_cores=True,
)
diff --git a/new_features_examples/agent_showcase_example.py b/new_features_examples/agent_showcase_example.py
new file mode 100644
index 00000000..b78abf81
--- /dev/null
+++ b/new_features_examples/agent_showcase_example.py
@@ -0,0 +1,68 @@
+import os
+
+from swarms import Agent
+
+from swarm_models import OpenAIChat
+from swarms.structs.agents_available import showcase_available_agents
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+# Initialize the Claims Director agent
+director_agent = Agent(
+ agent_name="ClaimsDirector",
+ agent_description="Oversees and coordinates the medical insurance claims processing workflow",
+ system_prompt="""You are the Claims Director responsible for managing the medical insurance claims process.
+ Assign and prioritize tasks between claims processors and auditors. Ensure claims are handled efficiently
+ and accurately while maintaining compliance with insurance policies and regulations.""",
+ llm=model,
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="director_agent.json",
+)
+
+# Initialize Claims Processor agent
+processor_agent = Agent(
+ agent_name="ClaimsProcessor",
+ agent_description="Reviews and processes medical insurance claims, verifying coverage and eligibility",
+ system_prompt="""Review medical insurance claims for completeness and accuracy. Verify patient eligibility,
+ coverage details, and process claims according to policy guidelines. Flag any claims requiring special review.""",
+ llm=model,
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="processor_agent.json",
+)
+
+# Initialize Claims Auditor agent
+auditor_agent = Agent(
+ agent_name="ClaimsAuditor",
+ agent_description="Audits processed claims for accuracy and compliance with policies and regulations",
+ system_prompt="""Audit processed insurance claims for accuracy and compliance. Review claim decisions,
+ identify potential fraud or errors, and ensure all processing follows established guidelines and regulations.""",
+ llm=model,
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="auditor_agent.json",
+)
+
+# Create a list of agents
+agents = [director_agent, processor_agent, auditor_agent]
+
+print(showcase_available_agents(agents=agents))
diff --git a/new_features_examples/async_agents.py b/new_features_examples/async_agents.py
new file mode 100644
index 00000000..8734cd8a
--- /dev/null
+++ b/new_features_examples/async_agents.py
@@ -0,0 +1,56 @@
+import os
+
+from dotenv import load_dotenv
+from swarm_models import OpenAIChat
+
+from swarms import Agent
+from swarms.prompts.finance_agent_sys_prompt import (
+ FINANCIAL_AGENT_SYS_PROMPT,
+)
+from new_features_examples.async_executor import HighSpeedExecutor
+
+load_dotenv()
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops=1,
+ # autosave=True,
+ # dashboard=False,
+ # verbose=True,
+ # dynamic_temperature_enabled=True,
+ # saved_state_path="finance_agent.json",
+ # user_name="swarms_corp",
+ # retry_attempts=1,
+ # context_length=200000,
+ # return_step_meta=True,
+ # output_type="json", # "json", "dict", "csv" OR "string" soon "yaml" and
+ # auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
+ # # artifacts_on=True,
+ # artifacts_output_path="roth_ira_report",
+ # artifacts_file_extension=".txt",
+ # max_tokens=8000,
+ # return_history=True,
+)
+
+
+def execute_agent(
+ task: str = "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria. Create a report on this question.",
+):
+ return agent.run(task)
+
+
+executor = HighSpeedExecutor()
+results = executor.run(execute_agent, 2)
+
+print(results)
diff --git a/new_features_examples/async_executor.py b/new_features_examples/async_executor.py
new file mode 100644
index 00000000..e9fcfa4e
--- /dev/null
+++ b/new_features_examples/async_executor.py
@@ -0,0 +1,131 @@
+import asyncio
+import multiprocessing as mp
+import time
+from functools import partial
+from typing import Any, Dict, Union
+
+
+class HighSpeedExecutor:
+ def __init__(self, num_processes: int = None):
+ """
+ Initialize the executor with configurable number of processes.
+ If num_processes is None, it uses CPU count.
+ """
+ self.num_processes = num_processes or mp.cpu_count()
+
+ async def _worker(
+ self,
+ queue: asyncio.Queue,
+ func: Any,
+ *args: Any,
+ **kwargs: Any,
+ ):
+ """Async worker that processes tasks from the queue"""
+ while True:
+ try:
+ # Non-blocking get from queue
+ await queue.get()
+ await asyncio.get_event_loop().run_in_executor(
+ None, partial(func, *args, **kwargs)
+ )
+ queue.task_done()
+ except asyncio.CancelledError:
+ break
+
+ async def _distribute_tasks(
+ self, num_tasks: int, queue: asyncio.Queue
+ ):
+ """Distribute tasks across the queue"""
+ for i in range(num_tasks):
+ await queue.put(i)
+
+ async def execute_batch(
+ self,
+ func: Any,
+ num_executions: int,
+ *args: Any,
+ **kwargs: Any,
+ ) -> Dict[str, Union[int, float]]:
+ """
+ Execute the given function multiple times concurrently.
+
+ Args:
+ func: The function to execute
+ num_executions: Number of times to execute the function
+ *args, **kwargs: Arguments to pass to the function
+
+ Returns:
+ A dictionary containing the number of executions, duration, and executions per second.
+ """
+ queue = asyncio.Queue()
+
+ # Create worker tasks
+ workers = [
+ asyncio.create_task(
+ self._worker(queue, func, *args, **kwargs)
+ )
+ for _ in range(self.num_processes)
+ ]
+
+ # Start timing
+ start_time = time.perf_counter()
+
+ # Distribute tasks
+ await self._distribute_tasks(num_executions, queue)
+
+ # Wait for all tasks to complete
+ await queue.join()
+
+ # Cancel workers
+ for worker in workers:
+ worker.cancel()
+
+ # Wait for all workers to finish
+ await asyncio.gather(*workers, return_exceptions=True)
+
+ end_time = time.perf_counter()
+ duration = end_time - start_time
+
+ return {
+ "executions": num_executions,
+ "duration": duration,
+ "executions_per_second": num_executions / duration,
+ }
+
+ def run(
+ self,
+ func: Any,
+ num_executions: int,
+ *args: Any,
+ **kwargs: Any,
+ ):
+ return asyncio.run(
+ self.execute_batch(func, num_executions, *args, **kwargs)
+ )
+
+
+# def example_function(x: int = 0) -> int:
+# """Example function to execute"""
+# return x * x
+
+
+# async def main():
+# # Create executor with number of CPU cores
+# executor = HighSpeedExecutor()
+
+# # Execute the function 1000 times
+# result = await executor.execute_batch(
+# example_function, num_executions=1000, x=42
+# )
+
+# print(
+# f"Completed {result['executions']} executions in {result['duration']:.2f} seconds"
+# )
+# print(
+# f"Rate: {result['executions_per_second']:.2f} executions/second"
+# )
+
+
+# if __name__ == "__main__":
+# # Run the async main function
+# asyncio.run(main())
diff --git a/new_features_examples/auto_agent.py b/new_features_examples/auto_agent.py
new file mode 100644
index 00000000..712be089
--- /dev/null
+++ b/new_features_examples/auto_agent.py
@@ -0,0 +1,188 @@
+import json
+import os
+from contextlib import suppress
+from typing import Any, Callable, Dict, Optional, Type, Union
+
+from dotenv import load_dotenv
+from pydantic import BaseModel, Field, ValidationError, create_model
+from swarm_models.openai_function_caller import OpenAIFunctionCaller
+
+
+class DynamicParser:
+ @staticmethod
+ def extract_fields(model: Type[BaseModel]) -> Dict[str, Any]:
+ return {
+ field_name: (field.annotation, ... if field.is_required() else None)
+ for field_name, field in model.model_fields.items()
+ }
+
+ @staticmethod
+ def create_partial_model(model: Type[BaseModel], data: Dict[str, Any]) -> Type[BaseModel]:
+ fields = {
+ field_name: (field.annotation, ... if field.is_required() else None)
+ for field_name, field in model.model_fields.items()
+ if field_name in data
+ }
+ return create_model(f"Partial{model.__name__}", **fields)
+
+ @classmethod
+ def parse(cls, data: Union[str, Dict[str, Any]], model: Type[BaseModel]) -> Optional[BaseModel]:
+ if isinstance(data, str):
+ try:
+ data = json.loads(data)
+ except json.JSONDecodeError:
+ return None
+
+ # Try full model first
+ with suppress(ValidationError):
+ return model.model_validate(data)
+
+ # Create and try partial model
+ partial_model = cls.create_partial_model(model, data)
+ with suppress(ValidationError):
+ return partial_model.model_validate(data)
+
+ return None
+
+
+load_dotenv()
+
+# Define the Thoughts schema
+class Thoughts(BaseModel):
+ text: str = Field(..., description="Current thoughts or observations regarding the task.")
+ reasoning: str = Field(..., description="Logical reasoning behind the thought process.")
+ plan: str = Field(..., description="A short bulleted list that conveys the immediate and long-term plan.")
+ criticism: str = Field(..., description="Constructive self-criticism to improve future responses.")
+ speak: str = Field(..., description="A concise summary of thoughts intended for the user.")
+
+# Define the Command schema
+class Command(BaseModel):
+ name: str = Field(..., description="Command name to execute from the provided list of commands.")
+ args: Dict[str, Any] = Field(..., description="Arguments required to execute the command.")
+
+# Define the AgentResponse schema
+class AgentResponse(BaseModel):
+ thoughts: Thoughts = Field(..., description="The agent's current thoughts and reasoning.")
+ command: Command = Field(..., description="The command to execute along with its arguments.")
+
+
+
+# Define tool functions
+def fluid_api_command(task: str):
+ """Execute a fluid API request."""
+ # response = fluid_api_request(task)
+ print(response.model_dump_json(indent=4))
+ return response
+
+
+def send_tweet_command(text: str):
+ """Simulate sending a tweet."""
+ print(f"Tweet sent: {text}")
+ return {"status": "success", "message": f"Tweet sent: {text}"}
+
+
+def do_nothing_command():
+ """Do nothing."""
+ print("Doing nothing...")
+ return {"status": "success", "message": "No action taken."}
+
+
+def task_complete_command(reason: str):
+ """Mark the task as complete and provide a reason."""
+ print(f"Task completed: {reason}")
+ return {"status": "success", "message": f"Task completed: {reason}"}
+
+
+# Dynamic command execution
+def execute_command(name: str, args: Dict[str, Any]):
+ """Dynamically execute a command based on its name and arguments."""
+ command_map: Dict[str, Callable] = {
+ "fluid_api": lambda **kwargs: fluid_api_command(task=kwargs.get("task")),
+ "send_tweet": lambda **kwargs: send_tweet_command(text=kwargs.get("text")),
+ "do_nothing": lambda **kwargs: do_nothing_command(),
+ "task_complete": lambda **kwargs: task_complete_command(reason=kwargs.get("reason")),
+ }
+
+ if name not in command_map:
+ raise ValueError(f"Unknown command: {name}")
+
+ # Execute the command with the provided arguments
+ return command_map[name](**args)
+
+
+def parse_and_execute_command(response: Union[str, Dict[str, Any]], base_model: Type[BaseModel] = AgentResponse) -> Any:
+ """Enhanced command parser with flexible input handling"""
+ parsed = DynamicParser.parse(response, base_model)
+ if not parsed:
+ raise ValueError("Failed to parse response")
+
+ if hasattr(parsed, 'command'):
+ command_name = parsed.command.name
+ command_args = parsed.command.args
+ return execute_command(command_name, command_args)
+
+ return parsed
+
+
+ainame = "AutoAgent"
+userprovided = "assistant"
+
+SYSTEM_PROMPT = f"""
+You are {ainame}, an advanced and autonomous {userprovided}.
+Your role is to make decisions and complete tasks independently without seeking user assistance. Leverage your strengths as an LLM to solve tasks efficiently, adhering strictly to the commands and resources provided.
+
+### GOALS:
+1. {userprovided}
+2. Execute tasks with precision and efficiency.
+3. Ensure outputs are actionable and aligned with the user's objectives.
+4. Continuously optimize task strategies for maximum effectiveness.
+5. Maintain reliability and consistency in all responses.
+
+### CONSTRAINTS:
+1. Memory limit: ~4000 words for short-term memory. Save essential information to files immediately to avoid loss.
+2. Independent decision-making: Do not rely on user assistance.
+3. Exclusively use commands in double quotes (e.g., "command name").
+4. Use subprocesses for commands that may take longer than a few minutes.
+5. Ensure all outputs strictly adhere to the specified JSON response format.
+
+### COMMANDS:
+1. Fluid API: "fluid_api", args: "method": "", "url": "", "headers": "", "body": ""
+18. Send Tweet: "send_tweet", args: "text": ""
+19. Do Nothing: "do_nothing", args:
+20. Task Complete (Shutdown): "task_complete", args: "reason": ""
+
+### RESOURCES:
+1. Internet access for real-time information and data gathering.
+2. Long-term memory management for storing critical information.
+3. Access to GPT-3.5-powered Agents for delegating tasks.
+4. File handling capabilities for output storage and retrieval.
+
+### PERFORMANCE EVALUATION:
+1. Continuously analyze and reflect on actions to ensure optimal task completion.
+2. Self-critique decisions and strategies constructively to identify areas for improvement.
+3. Ensure every command serves a clear purpose and minimizes resource usage.
+4. Complete tasks in the least number of steps, balancing speed and accuracy.
+
+### RESPONSE FORMAT:
+Always respond in a strict JSON format as described below. Ensure your responses can be parsed with Python's `json.loads`:
+"""
+
+# Initialize the OpenAIFunctionCaller
+model = OpenAIFunctionCaller(
+ system_prompt=SYSTEM_PROMPT,
+ max_tokens=4000,
+ temperature=0.9,
+ base_model=AgentResponse, # Pass the Pydantic schema as the base model
+ parallel_tool_calls=False,
+ openai_api_key=os.getenv("OPENAI_API_KEY")
+)
+
+# Example usage
+user_input = (
+ "Analyze the provided Python code for inefficiencies, generate suggestions for improvements, "
+ "and provide optimized code."
+)
+
+response = model.run(user_input)
+response = parse_and_execute_command(response)
+print(response)
diff --git a/new_features_examples/auto_swarm_router.py b/new_features_examples/auto_swarm_router.py
new file mode 100644
index 00000000..4ca3714f
--- /dev/null
+++ b/new_features_examples/auto_swarm_router.py
@@ -0,0 +1,120 @@
+import os
+from dotenv import load_dotenv
+from swarms import Agent
+from swarm_models import OpenAIChat
+from swarms.structs.swarm_router import SwarmRouter
+
+load_dotenv()
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("GROQ_API_KEY")
+
+# Model
+model = OpenAIChat(
+ openai_api_base="https://api.groq.com/openai/v1",
+ openai_api_key=api_key,
+ model_name="llama-3.1-70b-versatile",
+ temperature=0.1,
+)
+
+
+# Initialize specialized agents
+data_extractor_agent = Agent(
+ agent_name="Data-Extractor",
+ system_prompt="You are a data extraction specialist. Extract relevant information from provided content.",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="data_extractor_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+summarizer_agent = Agent(
+ agent_name="Document-Summarizer",
+ system_prompt="You are a document summarization specialist. Provide clear and concise summaries.",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="summarizer_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+financial_analyst_agent = Agent(
+ agent_name="Financial-Analyst",
+ system_prompt="You are a financial analysis specialist. Analyze financial aspects of content.",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="financial_analyst_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+market_analyst_agent = Agent(
+ agent_name="Market-Analyst",
+ system_prompt="You are a market analysis specialist. Analyze market-related aspects.",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="market_analyst_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+operational_analyst_agent = Agent(
+ agent_name="Operational-Analyst",
+ system_prompt="You are an operational analysis specialist. Analyze operational aspects.",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="operational_analyst_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+# Initialize the SwarmRouter
+router = SwarmRouter(
+ name="pe-document-analysis-swarm",
+ description="Analyze documents for private equity due diligence and investment decision-making",
+ max_loops=1,
+ agents=[
+ data_extractor_agent,
+ summarizer_agent,
+ financial_analyst_agent,
+ market_analyst_agent,
+ operational_analyst_agent,
+ ],
+ swarm_type="SequentialWorkflow", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
+ auto_generate_prompts=True,
+ output_type="all",
+)
+
+# Example usage
+if __name__ == "__main__":
+ # Run a comprehensive private equity document analysis task
+ result = router.run(
+ "Where is the best place to find template term sheets for series A startups. Provide links and references"
+ )
+ print(result)
diff --git a/new_features_examples/concurrent_mix.py b/new_features_examples/concurrent_mix.py
new file mode 100644
index 00000000..e072eccb
--- /dev/null
+++ b/new_features_examples/concurrent_mix.py
@@ -0,0 +1,96 @@
+import os
+
+from swarm_models import OpenAIChat
+
+from swarms import Agent, run_agents_with_tasks_concurrently
+
+# Fetch the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+# Initialize agents for different roles
+delaware_ccorp_agent = Agent(
+ agent_name="Delaware-CCorp-Hiring-Agent",
+ system_prompt="""
+ Create a comprehensive hiring description for a Delaware C Corporation,
+ including all relevant laws and regulations, such as the Delaware General
+ Corporation Law (DGCL) and the Delaware Corporate Law. Ensure the description
+ covers the requirements for hiring employees, contractors, and officers,
+ including the necessary paperwork, tax obligations, and benefits. Also,
+ outline the procedures for compliance with Delaware's employment laws,
+ including anti-discrimination laws, workers' compensation, and unemployment
+ insurance. Provide guidance on how to navigate the complexities of Delaware's
+ corporate law and ensure that all hiring practices are in compliance with
+ state and federal regulations.
+ """,
+ llm=model,
+ max_loops=1,
+ autosave=False,
+ dashboard=False,
+ verbose=True,
+ output_type="str",
+ artifacts_on=True,
+ artifacts_output_path="delaware_ccorp_hiring_description.md",
+ artifacts_file_extension=".md",
+)
+
+indian_foreign_agent = Agent(
+ agent_name="Indian-Foreign-Hiring-Agent",
+ system_prompt="""
+ Create a comprehensive hiring description for an Indian or foreign country,
+ including all relevant laws and regulations, such as the Indian Contract Act,
+ the Indian Labour Laws, and the Foreign Exchange Management Act (FEMA).
+ Ensure the description covers the requirements for hiring employees,
+ contractors, and officers, including the necessary paperwork, tax obligations,
+ and benefits. Also, outline the procedures for compliance with Indian and
+ foreign employment laws, including anti-discrimination laws, workers'
+ compensation, and unemployment insurance. Provide guidance on how to navigate
+ the complexities of Indian and foreign corporate law and ensure that all hiring
+ practices are in compliance with state and federal regulations. Consider the
+ implications of hiring foreign nationals and the requirements for obtaining
+ necessary visas and work permits.
+ """,
+ llm=model,
+ max_loops=1,
+ autosave=False,
+ dashboard=False,
+ verbose=True,
+ output_type="str",
+ artifacts_on=True,
+ artifacts_output_path="indian_foreign_hiring_description.md",
+ artifacts_file_extension=".md",
+)
+
+# List of agents and corresponding tasks
+agents = [delaware_ccorp_agent, indian_foreign_agent]
+tasks = [
+ """
+ Create a comprehensive hiring description for an Agent Engineer, including
+ required skills and responsibilities. Ensure the description covers the
+ necessary technical expertise, such as proficiency in AI/ML frameworks,
+ programming languages, and data structures. Outline the key responsibilities,
+ including designing and developing AI agents, integrating with existing systems,
+ and ensuring scalability and performance.
+ """,
+ """
+ Generate a detailed job description for a Prompt Engineer, including
+ required skills and responsibilities. Ensure the description covers the
+ necessary technical expertise, such as proficiency in natural language processing,
+ machine learning, and software development. Outline the key responsibilities,
+ including designing and optimizing prompts for AI systems, ensuring prompt
+ quality and consistency, and collaborating with cross-functional teams.
+ """,
+]
+
+# Run agents with tasks concurrently
+results = run_agents_with_tasks_concurrently(
+ agents, tasks, all_cores=True, device="cpu", no_clusterops=True
+)
+
+# Print the results
+# for result in results:
+# print(result)
diff --git a/new_features_examples/dict_to_table.py b/new_features_examples/dict_to_table.py
new file mode 100644
index 00000000..5089516f
--- /dev/null
+++ b/new_features_examples/dict_to_table.py
@@ -0,0 +1,54 @@
+import pandas as pd
+import json
+from loguru import logger
+
+
+def dict_to_dataframe(data: dict) -> pd.DataFrame:
+ """
+ Converts a dictionary into a Pandas DataFrame with formatted values.
+ Handles non-serializable values gracefully by skipping them.
+
+ Args:
+ data (dict): The dictionary to convert.
+
+ Returns:
+ pd.DataFrame: A DataFrame representation of the dictionary.
+ """
+ formatted_data = {}
+
+ for key, value in data.items():
+ try:
+ # Attempt to serialize the value
+ if isinstance(value, list):
+ # Format list as comma-separated string
+ formatted_value = ", ".join(
+ str(item) for item in value
+ )
+ elif isinstance(value, dict):
+ # Format dict as key-value pairs
+ formatted_value = ", ".join(
+ f"{k}: {v}" for k, v in value.items()
+ )
+ else:
+ # Convert other serializable types to string
+ formatted_value = json.dumps(
+ value
+ ) # Serialize value to string
+
+ formatted_data[key] = formatted_value
+ except (TypeError, ValueError) as e:
+ # Log and skip non-serializable items
+ logger.warning(
+ f"Skipping non-serializable key '{key}': {e}"
+ )
+ continue
+
+ # Convert the formatted dictionary into a DataFrame
+ return pd.DataFrame(
+ list(formatted_data.items()), columns=["Key", "Value"]
+ )
+
+
+example = dict_to_dataframe(data={"chicken": "noodle_soup"})
+# formatter.print_panel(example)
+print(example)
diff --git a/new_features_examples/ethchain_agent.py b/new_features_examples/ethchain_agent.py
new file mode 100644
index 00000000..cc06aeb5
--- /dev/null
+++ b/new_features_examples/ethchain_agent.py
@@ -0,0 +1,308 @@
+import os
+from swarms import Agent
+from swarm_models import OpenAIChat
+from web3 import Web3
+from typing import Dict, Optional, Any
+from datetime import datetime
+import asyncio
+from loguru import logger
+from dotenv import load_dotenv
+import csv
+import requests
+import time
+
+BLOCKCHAIN_AGENT_PROMPT = """
+You are an expert blockchain and cryptocurrency analyst with deep knowledge of Ethereum markets and DeFi ecosystems.
+You have access to real-time ETH price data and transaction information.
+
+For each transaction, analyze:
+
+1. MARKET CONTEXT
+- Current ETH price and what this transaction means in USD terms
+- How this movement compares to typical market volumes
+- Whether this could impact ETH price
+
+2. BEHAVIORAL ANALYSIS
+- Whether this appears to be institutional, whale, or protocol movement
+- If this fits any known wallet patterns or behaviors
+- Signs of smart contract interaction or DeFi activity
+
+3. RISK & IMPLICATIONS
+- Potential market impact or price influence
+- Signs of potential market manipulation or unusual activity
+- Protocol or DeFi risks if applicable
+
+4. STRATEGIC INSIGHTS
+- What traders should know about this movement
+- Potential chain reactions or follow-up effects
+- Market opportunities or risks created
+
+Write naturally but precisely. Focus on actionable insights and important patterns.
+Your analysis helps traders and researchers understand significant market movements in real-time."""
+
+
+class EthereumAnalyzer:
+ def __init__(self, min_value_eth: float = 100.0):
+ load_dotenv()
+
+ logger.add(
+ "eth_analysis.log",
+ rotation="500 MB",
+ retention="10 days",
+ level="INFO",
+ format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
+ )
+
+ self.w3 = Web3(
+ Web3.HTTPProvider(
+ "https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161"
+ )
+ )
+ if not self.w3.is_connected():
+ raise ConnectionError(
+ "Failed to connect to Ethereum network"
+ )
+
+ self.min_value_eth = min_value_eth
+ self.last_processed_block = self.w3.eth.block_number
+ self.eth_price = self.get_eth_price()
+ self.last_price_update = time.time()
+
+ # Initialize AI agent
+ api_key = os.getenv("OPENAI_API_KEY")
+ if not api_key:
+ raise ValueError(
+ "OpenAI API key not found in environment variables"
+ )
+
+ model = OpenAIChat(
+ openai_api_key=api_key,
+ model_name="gpt-4",
+ temperature=0.1,
+ )
+
+ self.agent = Agent(
+ agent_name="Ethereum-Analysis-Agent",
+ system_prompt=BLOCKCHAIN_AGENT_PROMPT,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="eth_agent.json",
+ user_name="eth_analyzer",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+ streaming_on=False,
+ )
+
+ self.csv_filename = "ethereum_analysis.csv"
+ self.initialize_csv()
+
+ def get_eth_price(self) -> float:
+ """Get current ETH price from CoinGecko API."""
+ try:
+ response = requests.get(
+ "https://api.coingecko.com/api/v3/simple/price",
+ params={"ids": "ethereum", "vs_currencies": "usd"},
+ )
+ return float(response.json()["ethereum"]["usd"])
+ except Exception as e:
+ logger.error(f"Error fetching ETH price: {str(e)}")
+ return 0.0
+
+ def update_eth_price(self):
+ """Update ETH price if more than 5 minutes have passed."""
+ if time.time() - self.last_price_update > 300: # 5 minutes
+ self.eth_price = self.get_eth_price()
+ self.last_price_update = time.time()
+ logger.info(f"Updated ETH price: ${self.eth_price:,.2f}")
+
+ def initialize_csv(self):
+ """Initialize CSV file with headers."""
+ headers = [
+ "timestamp",
+ "transaction_hash",
+ "from_address",
+ "to_address",
+ "value_eth",
+ "value_usd",
+ "eth_price",
+ "gas_used",
+ "gas_price_gwei",
+ "block_number",
+ "analysis",
+ ]
+
+ if not os.path.exists(self.csv_filename):
+ with open(self.csv_filename, "w", newline="") as f:
+ writer = csv.writer(f)
+ writer.writerow(headers)
+
+ async def analyze_transaction(
+ self, tx_hash: str
+ ) -> Optional[Dict[str, Any]]:
+ """Analyze a single transaction."""
+ try:
+ tx = self.w3.eth.get_transaction(tx_hash)
+ receipt = self.w3.eth.get_transaction_receipt(tx_hash)
+
+ value_eth = float(self.w3.from_wei(tx.value, "ether"))
+
+ if value_eth < self.min_value_eth:
+ return None
+
+ block = self.w3.eth.get_block(tx.blockNumber)
+
+ # Update ETH price if needed
+ self.update_eth_price()
+
+ value_usd = value_eth * self.eth_price
+
+ analysis = {
+ "timestamp": datetime.fromtimestamp(
+ block.timestamp
+ ).isoformat(),
+ "transaction_hash": tx_hash.hex(),
+ "from_address": tx["from"],
+ "to_address": tx.to if tx.to else "Contract Creation",
+ "value_eth": value_eth,
+ "value_usd": value_usd,
+ "eth_price": self.eth_price,
+ "gas_used": receipt.gasUsed,
+ "gas_price_gwei": float(
+ self.w3.from_wei(tx.gasPrice, "gwei")
+ ),
+ "block_number": tx.blockNumber,
+ }
+
+ # Check if it's a contract
+ if tx.to:
+ code = self.w3.eth.get_code(tx.to)
+ analysis["is_contract"] = len(code) > 0
+
+ # Get contract events
+ if analysis["is_contract"]:
+ analysis["events"] = receipt.logs
+
+ return analysis
+
+ except Exception as e:
+ logger.error(
+ f"Error analyzing transaction {tx_hash}: {str(e)}"
+ )
+ return None
+
+ def prepare_analysis_prompt(self, tx_data: Dict[str, Any]) -> str:
+ """Prepare detailed analysis prompt including price context."""
+ value_usd = tx_data["value_usd"]
+ eth_price = tx_data["eth_price"]
+
+ prompt = f"""Analyze this Ethereum transaction in current market context:
+
+Transaction Details:
+- Value: {tx_data['value_eth']:.2f} ETH (${value_usd:,.2f} at current price)
+- Current ETH Price: ${eth_price:,.2f}
+- From: {tx_data['from_address']}
+- To: {tx_data['to_address']}
+- Contract Interaction: {tx_data.get('is_contract', False)}
+- Gas Used: {tx_data['gas_used']:,} units
+- Gas Price: {tx_data['gas_price_gwei']:.2f} Gwei
+- Block: {tx_data['block_number']}
+- Timestamp: {tx_data['timestamp']}
+
+{f"Event Count: {len(tx_data['events'])} events" if tx_data.get('events') else "No contract events"}
+
+Consider the transaction's significance given the current ETH price of ${eth_price:,.2f} and total USD value of ${value_usd:,.2f}.
+Analyze market impact, patterns, risks, and strategic implications."""
+
+ return prompt
+
+ def save_to_csv(self, tx_data: Dict[str, Any], ai_analysis: str):
+ """Save transaction data and analysis to CSV."""
+ row = [
+ tx_data["timestamp"],
+ tx_data["transaction_hash"],
+ tx_data["from_address"],
+ tx_data["to_address"],
+ tx_data["value_eth"],
+ tx_data["value_usd"],
+ tx_data["eth_price"],
+ tx_data["gas_used"],
+ tx_data["gas_price_gwei"],
+ tx_data["block_number"],
+ ai_analysis.replace("\n", " "),
+ ]
+
+ with open(self.csv_filename, "a", newline="") as f:
+ writer = csv.writer(f)
+ writer.writerow(row)
+
+ async def monitor_transactions(self):
+ """Monitor and analyze transactions one at a time."""
+ logger.info(
+ f"Starting transaction monitor (minimum value: {self.min_value_eth} ETH)"
+ )
+
+ while True:
+ try:
+ current_block = self.w3.eth.block_number
+ block = self.w3.eth.get_block(
+ current_block, full_transactions=True
+ )
+
+ for tx in block.transactions:
+ tx_analysis = await self.analyze_transaction(
+ tx.hash
+ )
+
+ if tx_analysis:
+ # Get AI analysis
+ analysis_prompt = (
+ self.prepare_analysis_prompt(tx_analysis)
+ )
+ ai_analysis = self.agent.run(analysis_prompt)
+ print(ai_analysis)
+
+ # Save to CSV
+ self.save_to_csv(tx_analysis, ai_analysis)
+
+ # Print analysis
+ print("\n" + "=" * 50)
+ print("New Transaction Analysis")
+ print(
+ f"Hash: {tx_analysis['transaction_hash']}"
+ )
+ print(
+ f"Value: {tx_analysis['value_eth']:.2f} ETH (${tx_analysis['value_usd']:,.2f})"
+ )
+ print(
+ f"Current ETH Price: ${self.eth_price:,.2f}"
+ )
+ print("=" * 50)
+ print(ai_analysis)
+ print("=" * 50 + "\n")
+
+ await asyncio.sleep(1) # Wait for next block
+
+ except Exception as e:
+ logger.error(f"Error in monitoring loop: {str(e)}")
+ await asyncio.sleep(1)
+
+
+async def main():
+ """Entry point for the analysis system."""
+ analyzer = EthereumAnalyzer(min_value_eth=100.0)
+ await analyzer.monitor_transactions()
+
+
+if __name__ == "__main__":
+ print("Starting Ethereum Transaction Analyzer...")
+ print("Saving results to ethereum_analysis.csv")
+ print("Press Ctrl+C to stop")
+ try:
+ asyncio.run(main())
+ except KeyboardInterrupt:
+ print("\nStopping analyzer...")
diff --git a/new_features_examples/example_async_vs_multithread.py b/new_features_examples/example_async_vs_multithread.py
new file mode 100644
index 00000000..25d514aa
--- /dev/null
+++ b/new_features_examples/example_async_vs_multithread.py
@@ -0,0 +1,75 @@
+import os
+import asyncio
+from swarms import Agent
+from swarm_models import OpenAIChat
+import time
+import psutil
+
+from swarms.prompts.finance_agent_sys_prompt import (
+ FINANCIAL_AGENT_SYS_PROMPT,
+)
+from dotenv import load_dotenv
+
+load_dotenv()
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="finance_agent.json",
+ user_name="swarms_corp",
+ retry_attempts=1,
+ context_length=200000,
+ return_step_meta=False,
+ output_type="string",
+ streaming_on=False,
+)
+
+
+# Function to measure time and memory usage
+def measure_time_and_memory(func):
+ def wrapper(*args, **kwargs):
+ start_time = time.time()
+ result = func(*args, **kwargs)
+ end_time = time.time()
+ memory_usage = psutil.Process().memory_info().rss / 1024**2
+ print(f"Time taken: {end_time - start_time} seconds")
+ print(f"Memory used: {memory_usage} MB")
+ return result
+
+ return wrapper
+
+
+# Function to run the agent asynchronously
+@measure_time_and_memory
+async def run_agent_async():
+ await asyncio.gather(
+ agent.run(
+ "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
+ )
+ )
+
+
+# Function to run the agent on another thread
+@measure_time_and_memory
+def run_agent_thread():
+ asyncio.run(run_agent_async())
+
+
+# Run the agent asynchronously and on another thread to test the speed
+asyncio.run(run_agent_async())
+run_agent_thread()
diff --git a/new_features_examples/full_agent_rag_example.py b/new_features_examples/full_agent_rag_example.py
new file mode 100644
index 00000000..75aee45b
--- /dev/null
+++ b/new_features_examples/full_agent_rag_example.py
@@ -0,0 +1,228 @@
+import os
+from pathlib import Path
+from typing import Optional
+
+from dotenv import load_dotenv
+from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
+from loguru import logger
+from swarm_models import OpenAIChat
+
+from swarms import Agent, AgentRearrange
+
+load_dotenv()
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("GROQ_API_KEY")
+
+# Model
+model = OpenAIChat(
+ openai_api_base="https://api.groq.com/openai/v1",
+ openai_api_key=api_key,
+ model_name="llama-3.1-70b-versatile",
+ temperature=0.1,
+)
+
+
+class LlamaIndexDB:
+ """A class to manage document indexing and querying using LlamaIndex.
+
+ This class provides functionality to add documents from a directory and query the indexed documents.
+
+ Args:
+ data_dir (str): Directory containing documents to index. Defaults to "docs".
+ **kwargs: Additional arguments passed to SimpleDirectoryReader and VectorStoreIndex.
+ SimpleDirectoryReader kwargs:
+ - filename_as_id (bool): Use filenames as document IDs
+ - recursive (bool): Recursively read subdirectories
+ - required_exts (List[str]): Only read files with these extensions
+ - exclude_hidden (bool): Skip hidden files
+
+ VectorStoreIndex kwargs:
+ - service_context: Custom service context
+ - embed_model: Custom embedding model
+ - similarity_top_k (int): Number of similar docs to retrieve
+ - store_nodes_override (bool): Override node storage
+ """
+
+ def __init__(self, data_dir: str = "docs", **kwargs) -> None:
+ """Initialize the LlamaIndexDB with an empty index.
+
+ Args:
+ data_dir (str): Directory containing documents to index
+ **kwargs: Additional arguments for SimpleDirectoryReader and VectorStoreIndex
+ """
+ self.data_dir = data_dir
+ self.index: Optional[VectorStoreIndex] = None
+ self.reader_kwargs = {
+ k: v
+ for k, v in kwargs.items()
+ if k
+ in SimpleDirectoryReader.__init__.__code__.co_varnames
+ }
+ self.index_kwargs = {
+ k: v
+ for k, v in kwargs.items()
+ if k not in self.reader_kwargs
+ }
+
+ logger.info("Initialized LlamaIndexDB")
+ data_path = Path(self.data_dir)
+ if not data_path.exists():
+ logger.error(f"Directory not found: {self.data_dir}")
+ raise FileNotFoundError(
+ f"Directory {self.data_dir} does not exist"
+ )
+
+ try:
+ documents = SimpleDirectoryReader(
+ self.data_dir, **self.reader_kwargs
+ ).load_data()
+ self.index = VectorStoreIndex.from_documents(
+ documents, **self.index_kwargs
+ )
+ logger.success(
+ f"Successfully indexed documents from {self.data_dir}"
+ )
+ except Exception as e:
+ logger.error(f"Error indexing documents: {str(e)}")
+ raise
+
+ def query(self, query: str, **kwargs) -> str:
+ """Query the indexed documents.
+
+ Args:
+ query (str): The query string to search for
+ **kwargs: Additional arguments passed to the query engine
+ - similarity_top_k (int): Number of similar documents to retrieve
+ - streaming (bool): Enable streaming response
+ - response_mode (str): Response synthesis mode
+ - max_tokens (int): Maximum tokens in response
+
+ Returns:
+ str: The response from the query engine
+
+ Raises:
+ ValueError: If no documents have been indexed yet
+ """
+ if self.index is None:
+ logger.error("No documents have been indexed yet")
+ raise ValueError("Must add documents before querying")
+
+ try:
+ query_engine = self.index.as_query_engine(**kwargs)
+ response = query_engine.query(query)
+ print(response)
+ logger.info(f"Successfully queried: {query}")
+ return str(response)
+ except Exception as e:
+ logger.error(f"Error during query: {str(e)}")
+ raise
+
+
+# Initialize specialized medical agents
+medical_data_extractor = Agent(
+ agent_name="Medical-Data-Extractor",
+ system_prompt="You are a specialized medical data extraction expert, trained in processing and analyzing clinical data, lab results, medical imaging reports, and patient records. Your role is to carefully extract relevant medical information while maintaining strict HIPAA compliance and patient confidentiality. Focus on identifying key clinical indicators, test results, vital signs, medication histories, and relevant patient history. Pay special attention to temporal relationships between symptoms, treatments, and outcomes. Ensure all extracted data maintains proper medical context and terminology.",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="medical_data_extractor.json",
+ user_name="medical_team",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+diagnostic_specialist = Agent(
+ agent_name="Diagnostic-Specialist",
+ system_prompt="You are a senior diagnostic physician with extensive experience in differential diagnosis. Your role is to analyze patient symptoms, lab results, and clinical findings to develop comprehensive diagnostic assessments. Consider all presenting symptoms, patient history, risk factors, and test results to formulate possible diagnoses. Prioritize diagnoses based on clinical probability and severity. Always consider both common and rare conditions that match the symptom pattern. Recommend additional tests or imaging when needed for diagnostic clarity. Follow evidence-based diagnostic criteria and current medical guidelines.",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="diagnostic_specialist.json",
+ user_name="medical_team",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+treatment_planner = Agent(
+ agent_name="Treatment-Planner",
+ system_prompt="You are an experienced clinical treatment specialist focused on developing comprehensive treatment plans. Your expertise covers both acute and chronic condition management, medication selection, and therapeutic interventions. Consider patient-specific factors including age, comorbidities, allergies, and contraindications when recommending treatments. Incorporate both pharmacological and non-pharmacological interventions. Emphasize evidence-based treatment protocols while considering patient preferences and quality of life. Address potential drug interactions and side effects. Include monitoring parameters and treatment milestones.",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="treatment_planner.json",
+ user_name="medical_team",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+specialist_consultant = Agent(
+ agent_name="Specialist-Consultant",
+ system_prompt="You are a medical specialist consultant with expertise across multiple disciplines including cardiology, neurology, endocrinology, and internal medicine. Your role is to provide specialized insight for complex cases requiring deep domain knowledge. Analyze cases from your specialist perspective, considering rare conditions and complex interactions between multiple systems. Provide detailed recommendations for specialized testing, imaging, or interventions within your domain. Highlight potential complications or considerations that may not be immediately apparent to general practitioners.",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="specialist_consultant.json",
+ user_name="medical_team",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+patient_care_coordinator = Agent(
+ agent_name="Patient-Care-Coordinator",
+ system_prompt="You are a patient care coordinator specializing in comprehensive healthcare management. Your role is to ensure holistic patient care by coordinating between different medical specialists, considering patient needs, and managing care transitions. Focus on patient education, medication adherence, lifestyle modifications, and follow-up care planning. Consider social determinants of health, patient resources, and access to care. Develop actionable care plans that patients can realistically follow. Coordinate with other healthcare providers to ensure continuity of care and proper implementation of treatment plans.",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="patient_care_coordinator.json",
+ user_name="medical_team",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+
+# Initialize the SwarmRouter to coordinate the medical agents
+router = AgentRearrange(
+ name="medical-diagnosis-treatment-swarm",
+ description="Collaborative medical team for comprehensive patient diagnosis and treatment planning",
+ max_loops=1, # Limit to one iteration through the agent flow
+ agents=[
+ medical_data_extractor, # First agent to extract medical data
+ diagnostic_specialist, # Second agent to analyze and diagnose
+ treatment_planner, # Third agent to plan treatment
+ specialist_consultant, # Fourth agent to provide specialist input
+ patient_care_coordinator, # Final agent to coordinate care plan
+ ],
+ # Configure the document storage and retrieval system
+ memory_system=LlamaIndexDB(
+ data_dir="docs", # Directory containing medical documents
+ filename_as_id=True, # Use filenames as document identifiers
+ recursive=True, # Search subdirectories
+ # required_exts=[".txt", ".pdf", ".docx"], # Supported file types
+ similarity_top_k=10, # Return top 10 most relevant documents
+ ),
+ # Define the sequential flow of information between agents
+ flow=f"{medical_data_extractor.agent_name} -> {diagnostic_specialist.agent_name} -> {treatment_planner.agent_name} -> {specialist_consultant.agent_name} -> {patient_care_coordinator.agent_name}",
+)
+
+# Example usage
+if __name__ == "__main__":
+ # Run a comprehensive medical analysis task for patient Lucas Brown
+ router.run(
+ "Analyze this Lucas Brown's medical data to provide a diagnosis and treatment plan"
+ )
diff --git a/new_features_examples/gemini_model.py b/new_features_examples/gemini_model.py
new file mode 100644
index 00000000..f38fa1da
--- /dev/null
+++ b/new_features_examples/gemini_model.py
@@ -0,0 +1,63 @@
+import os
+import google.generativeai as genai
+from loguru import logger
+
+
+class GeminiModel:
+ """
+ Represents a GeminiModel instance for generating text based on user input.
+ """
+
+ def __init__(
+ self,
+ temperature: float,
+ top_p: float,
+ top_k: float,
+ ):
+ """
+ Initializes the GeminiModel by setting up the API key, generation configuration, and starting a chat session.
+ Raises a KeyError if the GEMINI_API_KEY environment variable is not found.
+ """
+ try:
+ api_key = os.environ["GEMINI_API_KEY"]
+ genai.configure(api_key=api_key)
+ self.generation_config = {
+ "temperature": 1,
+ "top_p": 0.95,
+ "top_k": 40,
+ "max_output_tokens": 8192,
+ "response_mime_type": "text/plain",
+ }
+ self.model = genai.GenerativeModel(
+ model_name="gemini-1.5-pro",
+ generation_config=self.generation_config,
+ )
+ self.chat_session = self.model.start_chat(history=[])
+ except KeyError as e:
+ logger.error(f"Environment variable not found: {e}")
+ raise
+
+ def run(self, task: str) -> str:
+ """
+ Sends a message to the chat session and returns the response text.
+ Raises an Exception if there's an error running the GeminiModel.
+
+ Args:
+ task (str): The input task or message to send to the chat session.
+
+ Returns:
+ str: The response text from the chat session.
+ """
+ try:
+ response = self.chat_session.send_message(task)
+ return response.text
+ except Exception as e:
+ logger.error(f"Error running GeminiModel: {e}")
+ raise
+
+
+# Example usage
+if __name__ == "__main__":
+ gemini_model = GeminiModel()
+ output = gemini_model.run("INSERT_INPUT_HERE")
+ print(output)
diff --git a/new_features_examples/microstructure.py b/new_features_examples/microstructure.py
new file mode 100644
index 00000000..c13d2e3f
--- /dev/null
+++ b/new_features_examples/microstructure.py
@@ -0,0 +1,1074 @@
+import os
+import threading
+import time
+from collections import deque
+from dataclasses import dataclass
+from datetime import datetime
+from queue import Queue
+from typing import Any, Dict, List, Optional, Tuple
+
+import ccxt
+import numpy as np
+import pandas as pd
+from dotenv import load_dotenv
+from loguru import logger
+from scipy import stats
+from swarm_models import OpenAIChat
+
+from swarms import Agent
+
+logger.enable("")
+
+
+@dataclass
+class MarketSignal:
+ timestamp: datetime
+ signal_type: str
+ source: str
+ data: Dict[str, Any]
+ confidence: float
+ metadata: Dict[str, Any]
+
+
+class MarketDataBuffer:
+ def __init__(self, max_size: int = 10000):
+ self.max_size = max_size
+ self.data = deque(maxlen=max_size)
+ self.lock = threading.Lock()
+
+ def add(self, item: Any) -> None:
+ with self.lock:
+ self.data.append(item)
+
+ def get_latest(self, n: int = None) -> List[Any]:
+ with self.lock:
+ if n is None:
+ return list(self.data)
+ return list(self.data)[-n:]
+
+
+class SignalCSVWriter:
+ def __init__(self, output_dir: str = "market_data"):
+ self.output_dir = output_dir
+ self.ensure_output_dir()
+ self.files = {}
+
+ def ensure_output_dir(self):
+ if not os.path.exists(self.output_dir):
+ os.makedirs(self.output_dir)
+
+ def get_filename(self, signal_type: str, symbol: str) -> str:
+ date_str = datetime.now().strftime("%Y%m%d")
+ return (
+ f"{self.output_dir}/{signal_type}_{symbol}_{date_str}.csv"
+ )
+
+ def write_order_book_signal(self, signal: MarketSignal):
+ symbol = signal.data["symbol"]
+ metrics = signal.data["metrics"]
+ filename = self.get_filename("order_book", symbol)
+
+ # Create header if file doesn't exist
+ if not os.path.exists(filename):
+ header = [
+ "timestamp",
+ "symbol",
+ "bid_volume",
+ "ask_volume",
+ "mid_price",
+ "bid_vwap",
+ "ask_vwap",
+ "spread",
+ "depth_imbalance",
+ "confidence",
+ ]
+ with open(filename, "w") as f:
+ f.write(",".join(header) + "\n")
+
+ # Write data
+ data = [
+ str(signal.timestamp),
+ symbol,
+ str(metrics["bid_volume"]),
+ str(metrics["ask_volume"]),
+ str(metrics["mid_price"]),
+ str(metrics["bid_vwap"]),
+ str(metrics["ask_vwap"]),
+ str(metrics["spread"]),
+ str(metrics["depth_imbalance"]),
+ str(signal.confidence),
+ ]
+
+ with open(filename, "a") as f:
+ f.write(",".join(data) + "\n")
+
+ def write_tick_signal(self, signal: MarketSignal):
+ symbol = signal.data["symbol"]
+ metrics = signal.data["metrics"]
+ filename = self.get_filename("tick_data", symbol)
+
+ if not os.path.exists(filename):
+ header = [
+ "timestamp",
+ "symbol",
+ "vwap",
+ "price_momentum",
+ "volume_mean",
+ "trade_intensity",
+ "kyle_lambda",
+ "roll_spread",
+ "confidence",
+ ]
+ with open(filename, "w") as f:
+ f.write(",".join(header) + "\n")
+
+ data = [
+ str(signal.timestamp),
+ symbol,
+ str(metrics["vwap"]),
+ str(metrics["price_momentum"]),
+ str(metrics["volume_mean"]),
+ str(metrics["trade_intensity"]),
+ str(metrics["kyle_lambda"]),
+ str(metrics["roll_spread"]),
+ str(signal.confidence),
+ ]
+
+ with open(filename, "a") as f:
+ f.write(",".join(data) + "\n")
+
+ def write_arbitrage_signal(self, signal: MarketSignal):
+ if (
+ "best_opportunity" not in signal.data
+ or not signal.data["best_opportunity"]
+ ):
+ return
+
+ symbol = signal.data["symbol"]
+ opp = signal.data["best_opportunity"]
+ filename = self.get_filename("arbitrage", symbol)
+
+ if not os.path.exists(filename):
+ header = [
+ "timestamp",
+ "symbol",
+ "buy_venue",
+ "sell_venue",
+ "spread",
+ "return",
+ "buy_price",
+ "sell_price",
+ "confidence",
+ ]
+ with open(filename, "w") as f:
+ f.write(",".join(header) + "\n")
+
+ data = [
+ str(signal.timestamp),
+ symbol,
+ opp["buy_venue"],
+ opp["sell_venue"],
+ str(opp["spread"]),
+ str(opp["return"]),
+ str(opp["buy_price"]),
+ str(opp["sell_price"]),
+ str(signal.confidence),
+ ]
+
+ with open(filename, "a") as f:
+ f.write(",".join(data) + "\n")
+
+
+class ExchangeManager:
+ def __init__(self):
+ self.available_exchanges = {
+ "kraken": ccxt.kraken,
+ "coinbase": ccxt.coinbase,
+ "kucoin": ccxt.kucoin,
+ "bitfinex": ccxt.bitfinex,
+ "gemini": ccxt.gemini,
+ }
+ self.active_exchanges = {}
+ self.test_exchanges()
+
+ def test_exchanges(self):
+ """Test each exchange and keep only the accessible ones"""
+ for name, exchange_class in self.available_exchanges.items():
+ try:
+ exchange = exchange_class()
+ exchange.load_markets()
+ self.active_exchanges[name] = exchange
+ logger.info(f"Successfully connected to {name}")
+ except Exception as e:
+ logger.warning(f"Could not connect to {name}: {e}")
+
+ def get_primary_exchange(self) -> Optional[ccxt.Exchange]:
+ """Get the first available exchange"""
+ if not self.active_exchanges:
+ raise RuntimeError("No exchanges available")
+ return next(iter(self.active_exchanges.values()))
+
+ def get_all_active_exchanges(self) -> Dict[str, ccxt.Exchange]:
+ """Get all active exchanges"""
+ return self.active_exchanges
+
+
+class BaseMarketAgent(Agent):
+ def __init__(
+ self,
+ agent_name: str,
+ system_prompt: str,
+ api_key: str,
+ model_name: str = "gpt-4-0125-preview",
+ temperature: float = 0.1,
+ ):
+ model = OpenAIChat(
+ openai_api_key=api_key,
+ model_name=model_name,
+ temperature=temperature,
+ )
+ super().__init__(
+ agent_name=agent_name,
+ system_prompt=system_prompt,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ context_length=200000,
+ streaming_on=True,
+ output_type="str",
+ )
+ self.signal_queue = Queue()
+ self.is_running = False
+ self.last_update = datetime.now()
+ self.update_interval = 1.0 # seconds
+
+ def rate_limit_check(self) -> bool:
+ current_time = datetime.now()
+ if (
+ current_time - self.last_update
+ ).total_seconds() < self.update_interval:
+ return False
+ self.last_update = current_time
+ return True
+
+
+class OrderBookAgent(BaseMarketAgent):
+ def __init__(self, api_key: str):
+ system_prompt = """
+ You are an Order Book Analysis Agent specialized in detecting institutional flows.
+ Monitor order book depth and changes to identify potential large trades and institutional activity.
+ Analyze patterns in order placement and cancellation rates.
+ """
+ super().__init__("OrderBookAgent", system_prompt, api_key)
+ exchange_manager = ExchangeManager()
+ self.exchange = exchange_manager.get_primary_exchange()
+ self.order_book_buffer = MarketDataBuffer(max_size=100)
+ self.vwap_window = 20
+
+ def calculate_order_book_metrics(
+ self, order_book: Dict
+ ) -> Dict[str, float]:
+ bids = np.array(order_book["bids"])
+ asks = np.array(order_book["asks"])
+
+ # Calculate key metrics
+ bid_volume = np.sum(bids[:, 1])
+ ask_volume = np.sum(asks[:, 1])
+ mid_price = (bids[0][0] + asks[0][0]) / 2
+
+ # Calculate VWAP
+ bid_vwap = (
+ np.sum(
+ bids[: self.vwap_window, 0]
+ * bids[: self.vwap_window, 1]
+ )
+ / bid_volume
+ if bid_volume > 0
+ else 0
+ )
+ ask_vwap = (
+ np.sum(
+ asks[: self.vwap_window, 0]
+ * asks[: self.vwap_window, 1]
+ )
+ / ask_volume
+ if ask_volume > 0
+ else 0
+ )
+
+ # Calculate order book slope
+ bid_slope = np.polyfit(
+ range(len(bids[:10])), bids[:10, 0], 1
+ )[0]
+ ask_slope = np.polyfit(
+ range(len(asks[:10])), asks[:10, 0], 1
+ )[0]
+
+ return {
+ "bid_volume": bid_volume,
+ "ask_volume": ask_volume,
+ "mid_price": mid_price,
+ "bid_vwap": bid_vwap,
+ "ask_vwap": ask_vwap,
+ "bid_slope": bid_slope,
+ "ask_slope": ask_slope,
+ "spread": asks[0][0] - bids[0][0],
+ "depth_imbalance": (bid_volume - ask_volume)
+ / (bid_volume + ask_volume),
+ }
+
+ def detect_large_orders(
+ self, metrics: Dict[str, float], threshold: float = 2.0
+ ) -> bool:
+ historical_books = self.order_book_buffer.get_latest(20)
+ if not historical_books:
+ return False
+
+ # Calculate historical volume statistics
+ hist_volumes = [
+ book["bid_volume"] + book["ask_volume"]
+ for book in historical_books
+ ]
+ volume_mean = np.mean(hist_volumes)
+ volume_std = np.std(hist_volumes)
+
+ current_volume = metrics["bid_volume"] + metrics["ask_volume"]
+ z_score = (current_volume - volume_mean) / (
+ volume_std if volume_std > 0 else 1
+ )
+
+ return abs(z_score) > threshold
+
+ def analyze_order_book(self, symbol: str) -> MarketSignal:
+ if not self.rate_limit_check():
+ return None
+
+ try:
+ order_book = self.exchange.fetch_order_book(
+ symbol, limit=100
+ )
+ metrics = self.calculate_order_book_metrics(order_book)
+ self.order_book_buffer.add(metrics)
+
+ # Format data for LLM analysis
+ analysis_prompt = f"""
+ Analyze this order book for {symbol}:
+ Bid Volume: {metrics['bid_volume']}
+ Ask Volume: {metrics['ask_volume']}
+ Mid Price: {metrics['mid_price']}
+ Spread: {metrics['spread']}
+ Depth Imbalance: {metrics['depth_imbalance']}
+
+ What patterns do you see? Is there evidence of institutional activity?
+ Are there any significant imbalances that could lead to price movement?
+ """
+
+ # Get LLM analysis
+ llm_analysis = self.run(analysis_prompt)
+
+ # Original signal creation with added LLM analysis
+ return MarketSignal(
+ timestamp=datetime.now(),
+ signal_type="order_book_analysis",
+ source="OrderBookAgent",
+ data={
+ "metrics": metrics,
+ "large_order_detected": self.detect_large_orders(
+ metrics
+ ),
+ "symbol": symbol,
+ "llm_analysis": llm_analysis, # Add LLM insights
+ },
+ confidence=min(
+ abs(metrics["depth_imbalance"]) * 0.7
+ + (
+ 1.0
+ if self.detect_large_orders(metrics)
+ else 0.0
+ )
+ * 0.3,
+ 1.0,
+ ),
+ metadata={
+ "update_latency": (
+ datetime.now() - self.last_update
+ ).total_seconds(),
+ "buffer_size": len(
+ self.order_book_buffer.get_latest()
+ ),
+ },
+ )
+ except Exception as e:
+ logger.error(f"Error in order book analysis: {str(e)}")
+ return None
+
+
+class TickDataAgent(BaseMarketAgent):
+ def __init__(self, api_key: str):
+ system_prompt = """
+ You are a Tick Data Analysis Agent specialized in analyzing high-frequency price movements.
+ Monitor tick-by-tick data for patterns indicating short-term price direction.
+ Analyze trade size distribution and execution speed.
+ """
+ super().__init__("TickDataAgent", system_prompt, api_key)
+ self.tick_buffer = MarketDataBuffer(max_size=5000)
+ exchange_manager = ExchangeManager()
+ self.exchange = exchange_manager.get_primary_exchange()
+
+ def calculate_tick_metrics(
+ self, ticks: List[Dict]
+ ) -> Dict[str, float]:
+ df = pd.DataFrame(ticks)
+ df["price"] = pd.to_numeric(df["price"])
+ df["volume"] = pd.to_numeric(df["amount"])
+
+ # Calculate key metrics
+ metrics = {}
+
+ # Volume-weighted average price (VWAP)
+ metrics["vwap"] = (df["price"] * df["volume"]).sum() / df[
+ "volume"
+ ].sum()
+
+ # Price momentum
+ metrics["price_momentum"] = df["price"].diff().mean()
+
+ # Volume profile
+ metrics["volume_mean"] = df["volume"].mean()
+ metrics["volume_std"] = df["volume"].std()
+
+ # Trade intensity
+ time_diff = (
+ df["timestamp"].max() - df["timestamp"].min()
+ ) / 1000 # Convert to seconds
+ metrics["trade_intensity"] = (
+ len(df) / time_diff if time_diff > 0 else 0
+ )
+
+ # Microstructure indicators
+ metrics["kyle_lambda"] = self.calculate_kyle_lambda(df)
+ metrics["roll_spread"] = self.calculate_roll_spread(df)
+
+ return metrics
+
+ def calculate_kyle_lambda(self, df: pd.DataFrame) -> float:
+ """Calculate Kyle's Lambda (price impact coefficient)"""
+ try:
+ price_changes = df["price"].diff().dropna()
+ volume_changes = df["volume"].diff().dropna()
+
+ if len(price_changes) > 1 and len(volume_changes) > 1:
+ slope, _, _, _, _ = stats.linregress(
+ volume_changes, price_changes
+ )
+ return abs(slope)
+ except Exception as e:
+ logger.warning(f"Error calculating Kyle's Lambda: {e}")
+ return 0.0
+
+ def calculate_roll_spread(self, df: pd.DataFrame) -> float:
+ """Calculate Roll's implied spread"""
+ try:
+ price_changes = df["price"].diff().dropna()
+ if len(price_changes) > 1:
+ autocov = np.cov(
+ price_changes[:-1], price_changes[1:]
+ )[0][1]
+ return 2 * np.sqrt(-autocov) if autocov < 0 else 0.0
+ except Exception as e:
+ logger.warning(f"Error calculating Roll spread: {e}")
+ return 0.0
+
+ def calculate_tick_metrics(
+ self, ticks: List[Dict]
+ ) -> Dict[str, float]:
+ try:
+ # Debug the incoming data structure
+ logger.info(
+ f"Raw tick data structure: {ticks[0] if ticks else 'No ticks'}"
+ )
+
+ # Convert trades to proper format
+ formatted_trades = []
+ for trade in ticks:
+ formatted_trade = {
+ "price": float(
+ trade.get("price", trade.get("last", 0))
+ ), # Handle different exchange formats
+ "amount": float(
+ trade.get(
+ "amount",
+ trade.get(
+ "size", trade.get("quantity", 0)
+ ),
+ )
+ ),
+ "timestamp": trade.get(
+ "timestamp", int(time.time() * 1000)
+ ),
+ }
+ formatted_trades.append(formatted_trade)
+
+ df = pd.DataFrame(formatted_trades)
+
+ if df.empty:
+ logger.warning("No valid trades to analyze")
+ return {
+ "vwap": 0.0,
+ "price_momentum": 0.0,
+ "volume_mean": 0.0,
+ "volume_std": 0.0,
+ "trade_intensity": 0.0,
+ "kyle_lambda": 0.0,
+ "roll_spread": 0.0,
+ }
+
+ # Calculate metrics with the properly formatted data
+ metrics = {}
+ metrics["vwap"] = (
+ (df["price"] * df["amount"]).sum()
+ / df["amount"].sum()
+ if not df.empty
+ else 0
+ )
+ metrics["price_momentum"] = (
+ df["price"].diff().mean() if len(df) > 1 else 0
+ )
+ metrics["volume_mean"] = df["amount"].mean()
+ metrics["volume_std"] = df["amount"].std()
+
+ time_diff = (
+ (df["timestamp"].max() - df["timestamp"].min()) / 1000
+ if len(df) > 1
+ else 1
+ )
+ metrics["trade_intensity"] = (
+ len(df) / time_diff if time_diff > 0 else 0
+ )
+
+ metrics["kyle_lambda"] = self.calculate_kyle_lambda(df)
+ metrics["roll_spread"] = self.calculate_roll_spread(df)
+
+ logger.info(f"Calculated metrics: {metrics}")
+ return metrics
+
+ except Exception as e:
+ logger.error(
+ f"Error in calculate_tick_metrics: {str(e)}",
+ exc_info=True,
+ )
+ # Return default metrics on error
+ return {
+ "vwap": 0.0,
+ "price_momentum": 0.0,
+ "volume_mean": 0.0,
+ "volume_std": 0.0,
+ "trade_intensity": 0.0,
+ "kyle_lambda": 0.0,
+ "roll_spread": 0.0,
+ }
+
+ def analyze_ticks(self, symbol: str) -> MarketSignal:
+ if not self.rate_limit_check():
+ return None
+
+ try:
+ # Fetch recent trades
+ trades = self.exchange.fetch_trades(symbol, limit=100)
+
+ # Debug the raw trades data
+ logger.info(f"Fetched {len(trades)} trades for {symbol}")
+ if trades:
+ logger.info(f"Sample trade: {trades[0]}")
+
+ self.tick_buffer.add(trades)
+ recent_ticks = self.tick_buffer.get_latest(1000)
+ metrics = self.calculate_tick_metrics(recent_ticks)
+
+ # Only proceed with LLM analysis if we have valid metrics
+ if metrics["vwap"] > 0:
+ analysis_prompt = f"""
+ Analyze these trading patterns for {symbol}:
+ VWAP: {metrics['vwap']:.2f}
+ Price Momentum: {metrics['price_momentum']:.2f}
+ Trade Intensity: {metrics['trade_intensity']:.2f}
+ Kyle's Lambda: {metrics['kyle_lambda']:.2f}
+
+ What does this tell us about:
+ 1. Current market sentiment
+ 2. Potential price direction
+ 3. Trading activity patterns
+ """
+ llm_analysis = self.run(analysis_prompt)
+ else:
+ llm_analysis = "Insufficient data for analysis"
+
+ return MarketSignal(
+ timestamp=datetime.now(),
+ signal_type="tick_analysis",
+ source="TickDataAgent",
+ data={
+ "metrics": metrics,
+ "symbol": symbol,
+ "prediction": np.sign(metrics["price_momentum"]),
+ "llm_analysis": llm_analysis,
+ },
+ confidence=min(metrics["trade_intensity"] / 100, 1.0)
+ * 0.4
+ + min(metrics["kyle_lambda"], 1.0) * 0.6,
+ metadata={
+ "update_latency": (
+ datetime.now() - self.last_update
+ ).total_seconds(),
+ "buffer_size": len(self.tick_buffer.get_latest()),
+ },
+ )
+
+ except Exception as e:
+ logger.error(
+ f"Error in tick analysis: {str(e)}", exc_info=True
+ )
+ return None
+
+
+class LatencyArbitrageAgent(BaseMarketAgent):
+ def __init__(self, api_key: str):
+ system_prompt = """
+ You are a Latency Arbitrage Agent specialized in detecting price discrepancies across venues.
+ Monitor multiple exchanges for price differences exceeding transaction costs.
+ Calculate optimal trade sizes and routes.
+ """
+ super().__init__(
+ "LatencyArbitrageAgent", system_prompt, api_key
+ )
+ exchange_manager = ExchangeManager()
+ self.exchanges = exchange_manager.get_all_active_exchanges()
+ self.fee_structure = {
+ "kraken": 0.0026, # 0.26% taker fee
+ "coinbase": 0.006, # 0.6% taker fee
+ "kucoin": 0.001, # 0.1% taker fee
+ "bitfinex": 0.002, # 0.2% taker fee
+ "gemini": 0.003, # 0.3% taker fee
+ }
+ self.price_buffer = {
+ ex: MarketDataBuffer(max_size=100)
+ for ex in self.exchanges
+ }
+
+ def calculate_effective_prices(
+ self, ticker: Dict, venue: str
+ ) -> Tuple[float, float]:
+ """Calculate effective prices including fees"""
+ fee = self.fee_structure[venue]
+ return (
+ ticker["bid"] * (1 - fee), # Effective sell price
+ ticker["ask"] * (1 + fee), # Effective buy price
+ )
+
+ def calculate_arbitrage_metrics(
+ self, prices: Dict[str, Dict]
+ ) -> Dict:
+ opportunities = []
+
+ for venue1 in prices:
+ for venue2 in prices:
+ if venue1 != venue2:
+ sell_price, _ = self.calculate_effective_prices(
+ prices[venue1], venue1
+ )
+ _, buy_price = self.calculate_effective_prices(
+ prices[venue2], venue2
+ )
+
+ spread = sell_price - buy_price
+ if spread > 0:
+ opportunities.append(
+ {
+ "sell_venue": venue1,
+ "buy_venue": venue2,
+ "spread": spread,
+ "return": spread / buy_price,
+ "buy_price": buy_price,
+ "sell_price": sell_price,
+ }
+ )
+
+ return {
+ "opportunities": opportunities,
+ "best_opportunity": (
+ max(opportunities, key=lambda x: x["return"])
+ if opportunities
+ else None
+ ),
+ }
+
+ def find_arbitrage(self, symbol: str) -> MarketSignal:
+ """
+ Find arbitrage opportunities across exchanges with LLM analysis
+ """
+ if not self.rate_limit_check():
+ return None
+
+ try:
+ prices = {}
+ timestamps = {}
+
+ for name, exchange in self.exchanges.items():
+ try:
+ ticker = exchange.fetch_ticker(symbol)
+ prices[name] = {
+ "bid": ticker["bid"],
+ "ask": ticker["ask"],
+ }
+ timestamps[name] = ticker["timestamp"]
+ self.price_buffer[name].add(prices[name])
+ except Exception as e:
+ logger.warning(
+ f"Error fetching {name} price: {e}"
+ )
+
+ if len(prices) < 2:
+ return None
+
+ metrics = self.calculate_arbitrage_metrics(prices)
+
+ if not metrics["best_opportunity"]:
+ return None
+
+ # Calculate confidence based on spread and timing
+ opp = metrics["best_opportunity"]
+ timing_factor = 1.0 - min(
+ abs(
+ timestamps[opp["sell_venue"]]
+ - timestamps[opp["buy_venue"]]
+ )
+ / 1000,
+ 1.0,
+ )
+ spread_factor = min(
+ opp["return"] * 5, 1.0
+ ) # Scale return to confidence
+
+ confidence = timing_factor * 0.4 + spread_factor * 0.6
+
+ # Format price data for LLM analysis
+ price_summary = "\n".join(
+ [
+ f"{venue}: Bid ${prices[venue]['bid']:.2f}, Ask ${prices[venue]['ask']:.2f}"
+ for venue in prices.keys()
+ ]
+ )
+
+ # Create detailed analysis prompt
+ analysis_prompt = f"""
+ Analyze this arbitrage opportunity for {symbol}:
+
+ Current Prices:
+ {price_summary}
+
+ Best Opportunity Found:
+ Buy Venue: {opp['buy_venue']} at ${opp['buy_price']:.2f}
+ Sell Venue: {opp['sell_venue']} at ${opp['sell_price']:.2f}
+ Spread: ${opp['spread']:.2f}
+ Expected Return: {opp['return']*100:.3f}%
+ Time Difference: {abs(timestamps[opp['sell_venue']] - timestamps[opp['buy_venue']])}ms
+
+ Consider:
+ 1. Is this opportunity likely to be profitable after execution costs?
+ 2. What risks might prevent successful execution?
+ 3. What market conditions might have created this opportunity?
+ 4. How does the timing difference affect execution probability?
+ """
+
+ # Get LLM analysis
+ llm_analysis = self.run(analysis_prompt)
+
+ # Create comprehensive signal
+ return MarketSignal(
+ timestamp=datetime.now(),
+ signal_type="arbitrage_opportunity",
+ source="LatencyArbitrageAgent",
+ data={
+ "metrics": metrics,
+ "symbol": symbol,
+ "best_opportunity": metrics["best_opportunity"],
+ "all_prices": prices,
+ "llm_analysis": llm_analysis,
+ "timing": {
+ "time_difference_ms": abs(
+ timestamps[opp["sell_venue"]]
+ - timestamps[opp["buy_venue"]]
+ ),
+ "timestamps": timestamps,
+ },
+ },
+ confidence=confidence,
+ metadata={
+ "update_latency": (
+ datetime.now() - self.last_update
+ ).total_seconds(),
+ "timestamp_deltas": timestamps,
+ "venue_count": len(prices),
+ "execution_risk": 1.0
+ - timing_factor, # Higher time difference = higher risk
+ },
+ )
+
+ except Exception as e:
+ logger.error(f"Error in arbitrage analysis: {str(e)}")
+ return None
+
+
+class SwarmCoordinator:
+ def __init__(self, api_key: str):
+ self.api_key = api_key
+ self.agents = {
+ "order_book": OrderBookAgent(api_key),
+ "tick_data": TickDataAgent(api_key),
+ "latency_arb": LatencyArbitrageAgent(api_key),
+ }
+ self.signal_processors = []
+ self.signal_history = MarketDataBuffer(max_size=1000)
+ self.running = False
+ self.lock = threading.Lock()
+ self.csv_writer = SignalCSVWriter()
+
+ def register_signal_processor(self, processor):
+ """Register a new signal processor function"""
+ with self.lock:
+ self.signal_processors.append(processor)
+
+ def process_signals(self, signals: List[MarketSignal]):
+ """Process signals through all registered processors"""
+ if not signals:
+ return
+
+ self.signal_history.add(signals)
+
+ try:
+ for processor in self.signal_processors:
+ processor(signals)
+ except Exception as e:
+ logger.error(f"Error in signal processing: {e}")
+
+ def aggregate_signals(
+ self, signals: List[MarketSignal]
+ ) -> Dict[str, Any]:
+ """Aggregate multiple signals into a combined market view"""
+ if not signals:
+ return {}
+
+ self.signal_history.add(signals)
+
+ aggregated = {
+ "timestamp": datetime.now(),
+ "symbols": set(),
+ "agent_signals": {},
+ "combined_confidence": 0,
+ "market_state": {},
+ }
+
+ for signal in signals:
+ symbol = signal.data.get("symbol")
+ if symbol:
+ aggregated["symbols"].add(symbol)
+
+ agent_type = signal.source
+ if agent_type not in aggregated["agent_signals"]:
+ aggregated["agent_signals"][agent_type] = []
+ aggregated["agent_signals"][agent_type].append(signal)
+
+ # Update market state based on signal type
+ if signal.signal_type == "order_book_analysis":
+ metrics = signal.data.get("metrics", {})
+ aggregated["market_state"].update(
+ {
+ "order_book_imbalance": metrics.get(
+ "depth_imbalance"
+ ),
+ "spread": metrics.get("spread"),
+ "large_orders_detected": signal.data.get(
+ "large_order_detected"
+ ),
+ }
+ )
+ elif signal.signal_type == "tick_analysis":
+ metrics = signal.data.get("metrics", {})
+ aggregated["market_state"].update(
+ {
+ "price_momentum": metrics.get(
+ "price_momentum"
+ ),
+ "trade_intensity": metrics.get(
+ "trade_intensity"
+ ),
+ "kyle_lambda": metrics.get("kyle_lambda"),
+ }
+ )
+ elif signal.signal_type == "arbitrage_opportunity":
+ opp = signal.data.get("best_opportunity")
+ if opp:
+ aggregated["market_state"].update(
+ {
+ "arbitrage_spread": opp.get("spread"),
+ "arbitrage_return": opp.get("return"),
+ }
+ )
+
+ # Calculate combined confidence as weighted average
+ confidences = [s.confidence for s in signals]
+ if confidences:
+ aggregated["combined_confidence"] = np.mean(confidences)
+
+ return aggregated
+
+ def start(self, symbols: List[str], interval: float = 1.0):
+ """Start the swarm monitoring system"""
+ if self.running:
+ logger.warning("Swarm is already running")
+ return
+
+ self.running = True
+
+ def agent_loop(agent, symbol):
+ while self.running:
+ try:
+ if isinstance(agent, OrderBookAgent):
+ signal = agent.analyze_order_book(symbol)
+ elif isinstance(agent, TickDataAgent):
+ signal = agent.analyze_ticks(symbol)
+ elif isinstance(agent, LatencyArbitrageAgent):
+ signal = agent.find_arbitrage(symbol)
+
+ if signal:
+ agent.signal_queue.put(signal)
+ except Exception as e:
+ logger.error(
+ f"Error in {agent.agent_name} loop: {e}"
+ )
+
+ time.sleep(interval)
+
+ def signal_collection_loop():
+ while self.running:
+ try:
+ current_signals = []
+
+ # Collect signals from all agents
+ for agent in self.agents.values():
+ while not agent.signal_queue.empty():
+ signal = agent.signal_queue.get_nowait()
+ if signal:
+ current_signals.append(signal)
+
+ if current_signals:
+ # Process current signals
+ self.process_signals(current_signals)
+
+ # Aggregate and analyze
+ aggregated = self.aggregate_signals(
+ current_signals
+ )
+ logger.info(
+ f"Aggregated market view: {aggregated}"
+ )
+
+ except Exception as e:
+ logger.error(
+ f"Error in signal collection loop: {e}"
+ )
+
+ time.sleep(interval)
+
+ # Start agent threads
+ self.threads = []
+ for symbol in symbols:
+ for agent in self.agents.values():
+ thread = threading.Thread(
+ target=agent_loop,
+ args=(agent, symbol),
+ daemon=True,
+ )
+ thread.start()
+ self.threads.append(thread)
+
+ # Start signal collection thread
+ collection_thread = threading.Thread(
+ target=signal_collection_loop, daemon=True
+ )
+ collection_thread.start()
+ self.threads.append(collection_thread)
+
+ def stop(self):
+ """Stop the swarm monitoring system"""
+ self.running = False
+ for thread in self.threads:
+ thread.join(timeout=5.0)
+ logger.info("Swarm stopped")
+
+
+def market_making_processor(signals: List[MarketSignal]):
+ """Enhanced signal processor with LLM analysis integration"""
+ for signal in signals:
+ if signal.confidence > 0.8:
+ if signal.signal_type == "arbitrage_opportunity":
+ opp = signal.data.get("best_opportunity")
+ if (
+ opp and opp["return"] > 0.001
+ ): # 0.1% return threshold
+ logger.info(
+ "\nSignificant arbitrage opportunity detected:"
+ )
+ logger.info(f"Return: {opp['return']*100:.3f}%")
+ logger.info(f"Spread: ${opp['spread']:.2f}")
+ if "llm_analysis" in signal.data:
+ logger.info("\nLLM Analysis:")
+ logger.info(signal.data["llm_analysis"])
+
+ elif signal.signal_type == "order_book_analysis":
+ imbalance = signal.data["metrics"]["depth_imbalance"]
+ if abs(imbalance) > 0.3:
+ logger.info(
+ f"\nSignificant order book imbalance detected: {imbalance:.3f}"
+ )
+ if "llm_analysis" in signal.data:
+ logger.info("\nLLM Analysis:")
+ logger.info(signal.data["llm_analysis"])
+
+ elif signal.signal_type == "tick_analysis":
+ momentum = signal.data["metrics"]["price_momentum"]
+ if abs(momentum) > 0:
+ logger.info(
+ f"\nSignificant price momentum detected: {momentum:.3f}"
+ )
+ if "llm_analysis" in signal.data:
+ logger.info("\nLLM Analysis:")
+ logger.info(signal.data["llm_analysis"])
+
+
+load_dotenv()
+api_key = os.getenv("OPENAI_API_KEY")
+
+coordinator = SwarmCoordinator(api_key)
+coordinator.register_signal_processor(market_making_processor)
+
+symbols = ["BTC/USDT", "ETH/USDT"]
+
+logger.info(
+ "Starting market microstructure analysis with LLM integration..."
+)
+logger.info(f"Monitoring symbols: {symbols}")
+logger.info(
+ f"CSV files will be written to: {os.path.abspath('market_data')}"
+)
+
+try:
+ coordinator.start(symbols)
+ while True:
+ time.sleep(1)
+except KeyboardInterrupt:
+ logger.info("Gracefully shutting down...")
+ coordinator.stop()
diff --git a/new_features_examples/multi_tool_usage_agent.py b/new_features_examples/multi_tool_usage_agent.py
new file mode 100644
index 00000000..1af421e2
--- /dev/null
+++ b/new_features_examples/multi_tool_usage_agent.py
@@ -0,0 +1,420 @@
+import os
+from typing import List, Dict, Any, Optional, Callable, get_type_hints
+from dataclasses import dataclass, field
+import json
+from datetime import datetime
+import inspect
+import typing
+from typing import Union
+from swarms import Agent
+from swarm_models import OpenAIChat
+
+
+@dataclass
+class ToolDefinition:
+ name: str
+ description: str
+ parameters: Dict[str, Any]
+ required_params: List[str]
+ callable: Optional[Callable] = None
+
+
+def extract_type_hints(func: Callable) -> Dict[str, Any]:
+ """Extract parameter types from function type hints."""
+ return typing.get_type_hints(func)
+
+
+def extract_tool_info(func: Callable) -> ToolDefinition:
+ """Extract tool information from a callable function."""
+ # Get function name
+ name = func.__name__
+
+ # Get docstring
+ description = inspect.getdoc(func) or "No description available"
+
+ # Get parameters and their types
+ signature = inspect.signature(func)
+ type_hints = extract_type_hints(func)
+
+ parameters = {}
+ required_params = []
+
+ for param_name, param in signature.parameters.items():
+ # Skip self parameter for methods
+ if param_name == "self":
+ continue
+
+ param_type = type_hints.get(param_name, Any)
+
+ # Handle optional parameters
+ is_optional = (
+ param.default != inspect.Parameter.empty
+ or getattr(param_type, "__origin__", None) is Union
+ and type(None) in param_type.__args__
+ )
+
+ if not is_optional:
+ required_params.append(param_name)
+
+ parameters[param_name] = {
+ "type": str(param_type),
+ "default": (
+ None
+ if param.default is inspect.Parameter.empty
+ else param.default
+ ),
+ "required": not is_optional,
+ }
+
+ return ToolDefinition(
+ name=name,
+ description=description,
+ parameters=parameters,
+ required_params=required_params,
+ callable=func,
+ )
+
+
+@dataclass
+class FunctionSpec:
+ """Specification for a callable tool function."""
+
+ name: str
+ description: str
+ parameters: Dict[
+ str, dict
+ ] # Contains type and description for each parameter
+ return_type: str
+ return_description: str
+
+
+@dataclass
+class ExecutionStep:
+ """Represents a single step in the execution plan."""
+
+ step_id: int
+ function_name: str
+ parameters: Dict[str, Any]
+ expected_output: str
+ completed: bool = False
+ result: Any = None
+
+
+@dataclass
+class ExecutionContext:
+ """Maintains state during execution."""
+
+ task: str
+ steps: List[ExecutionStep] = field(default_factory=list)
+ results: Dict[int, Any] = field(default_factory=dict)
+ current_step: int = 0
+ history: List[Dict[str, Any]] = field(default_factory=list)
+
+
+hints = get_type_hints(func)
+
+
+class ToolAgent:
+ def __init__(
+ self,
+ functions: List[Callable],
+ openai_api_key: str,
+ model_name: str = "gpt-4",
+ temperature: float = 0.1,
+ ):
+ self.functions = {func.__name__: func for func in functions}
+ self.function_specs = self._analyze_functions(functions)
+
+ self.model = OpenAIChat(
+ openai_api_key=openai_api_key,
+ model_name=model_name,
+ temperature=temperature,
+ )
+
+ self.system_prompt = self._create_system_prompt()
+ self.agent = Agent(
+ agent_name="Tool-Agent",
+ system_prompt=self.system_prompt,
+ llm=self.model,
+ max_loops=1,
+ verbose=True,
+ )
+
+ def _analyze_functions(
+ self, functions: List[Callable]
+ ) -> Dict[str, FunctionSpec]:
+ """Analyze functions to create detailed specifications."""
+ specs = {}
+ for func in functions:
+ hints = get_type_hints(func)
+ sig = inspect.signature(func)
+ doc = inspect.getdoc(func) or ""
+
+ # Parse docstring for parameter descriptions
+ param_descriptions = {}
+ current_param = None
+ for line in doc.split("\n"):
+ if ":param" in line:
+ param_name = (
+ line.split(":param")[1].split(":")[0].strip()
+ )
+ desc = line.split(":", 2)[-1].strip()
+ param_descriptions[param_name] = desc
+ elif ":return:" in line:
+ return_desc = line.split(":return:")[1].strip()
+
+ # Build parameter specifications
+ parameters = {}
+ for name, param in sig.parameters.items():
+ param_type = hints.get(name, Any)
+ parameters[name] = {
+ "type": str(param_type),
+ "type_class": param_type,
+ "description": param_descriptions.get(name, ""),
+ "required": param.default == param.empty,
+ }
+
+ specs[func.__name__] = FunctionSpec(
+ name=func.__name__,
+ description=doc.split("\n")[0],
+ parameters=parameters,
+ return_type=str(hints.get("return", Any)),
+ return_description=(
+ return_desc if "return_desc" in locals() else ""
+ ),
+ )
+
+ return specs
+
+ def _create_system_prompt(self) -> str:
+ """Create system prompt with detailed function specifications."""
+ functions_desc = []
+ for spec in self.function_specs.values():
+ params_desc = []
+ for name, details in spec.parameters.items():
+ params_desc.append(
+ f" - {name}: {details['type']} - {details['description']}"
+ )
+
+ functions_desc.append(
+ f"""
+Function: {spec.name}
+Description: {spec.description}
+Parameters:
+{chr(10).join(params_desc)}
+Returns: {spec.return_type} - {spec.return_description}
+ """
+ )
+
+ return f"""You are an AI agent that creates and executes plans using available functions.
+
+Available Functions:
+{chr(10).join(functions_desc)}
+
+You must respond in two formats depending on the phase:
+
+1. Planning Phase:
+{{
+ "phase": "planning",
+ "plan": {{
+ "description": "Overall plan description",
+ "steps": [
+ {{
+ "step_id": 1,
+ "function": "function_name",
+ "parameters": {{
+ "param1": "value1",
+ "param2": "value2"
+ }},
+ "purpose": "Why this step is needed"
+ }}
+ ]
+ }}
+}}
+
+2. Execution Phase:
+{{
+ "phase": "execution",
+ "analysis": "Analysis of current result",
+ "next_action": {{
+ "type": "continue|request_input|complete",
+ "reason": "Why this action was chosen",
+ "needed_input": {{}} # If requesting input
+ }}
+}}
+
+Always:
+- Use exact function names
+- Ensure parameter types match specifications
+- Provide clear reasoning for each decision
+"""
+
+ def _execute_function(
+ self, spec: FunctionSpec, parameters: Dict[str, Any]
+ ) -> Any:
+ """Execute a function with type checking."""
+ converted_params = {}
+ for name, value in parameters.items():
+ param_spec = spec.parameters[name]
+ try:
+ # Convert value to required type
+ param_type = param_spec["type_class"]
+ if param_type in (int, float, str, bool):
+ converted_params[name] = param_type(value)
+ else:
+ converted_params[name] = value
+ except (ValueError, TypeError) as e:
+ raise ValueError(
+ f"Parameter '{name}' conversion failed: {str(e)}"
+ )
+
+ return self.functions[spec.name](**converted_params)
+
+ def run(self, task: str) -> Dict[str, Any]:
+ """Execute task with planning and step-by-step execution."""
+ context = ExecutionContext(task=task)
+ execution_log = {
+ "task": task,
+ "start_time": datetime.utcnow().isoformat(),
+ "steps": [],
+ "final_result": None,
+ }
+
+ try:
+ # Planning phase
+ plan_prompt = f"Create a plan to: {task}"
+ plan_response = self.agent.run(plan_prompt)
+ plan_data = json.loads(
+ plan_response.replace("System:", "").strip()
+ )
+
+ # Convert plan to execution steps
+ for step in plan_data["plan"]["steps"]:
+ context.steps.append(
+ ExecutionStep(
+ step_id=step["step_id"],
+ function_name=step["function"],
+ parameters=step["parameters"],
+ expected_output=step["purpose"],
+ )
+ )
+
+ # Execution phase
+ while context.current_step < len(context.steps):
+ step = context.steps[context.current_step]
+ print(
+ f"\nExecuting step {step.step_id}: {step.function_name}"
+ )
+
+ try:
+ # Execute function
+ spec = self.function_specs[step.function_name]
+ result = self._execute_function(
+ spec, step.parameters
+ )
+ context.results[step.step_id] = result
+ step.completed = True
+ step.result = result
+
+ # Get agent's analysis
+ analysis_prompt = f"""
+ Step {step.step_id} completed:
+ Function: {step.function_name}
+ Result: {json.dumps(result)}
+ Remaining steps: {len(context.steps) - context.current_step - 1}
+
+ Analyze the result and decide next action.
+ """
+
+ analysis_response = self.agent.run(
+ analysis_prompt
+ )
+ analysis_data = json.loads(
+ analysis_response.replace(
+ "System:", ""
+ ).strip()
+ )
+
+ execution_log["steps"].append(
+ {
+ "step_id": step.step_id,
+ "function": step.function_name,
+ "parameters": step.parameters,
+ "result": result,
+ "analysis": analysis_data,
+ }
+ )
+
+ if (
+ analysis_data["next_action"]["type"]
+ == "complete"
+ ):
+ if (
+ context.current_step
+ < len(context.steps) - 1
+ ):
+ continue
+ break
+
+ context.current_step += 1
+
+ except Exception as e:
+ print(f"Error in step {step.step_id}: {str(e)}")
+ execution_log["steps"].append(
+ {
+ "step_id": step.step_id,
+ "function": step.function_name,
+ "parameters": step.parameters,
+ "error": str(e),
+ }
+ )
+ raise
+
+ # Final analysis
+ final_prompt = f"""
+ Task completed. Results:
+ {json.dumps(context.results, indent=2)}
+
+ Provide final analysis and recommendations.
+ """
+
+ final_analysis = self.agent.run(final_prompt)
+ execution_log["final_result"] = {
+ "success": True,
+ "results": context.results,
+ "analysis": json.loads(
+ final_analysis.replace("System:", "").strip()
+ ),
+ }
+
+ except Exception as e:
+ execution_log["final_result"] = {
+ "success": False,
+ "error": str(e),
+ }
+
+ execution_log["end_time"] = datetime.utcnow().isoformat()
+ return execution_log
+
+
+def calculate_investment_return(
+ principal: float, rate: float, years: int
+) -> float:
+ """Calculate investment return with compound interest.
+
+ :param principal: Initial investment amount in dollars
+ :param rate: Annual interest rate as decimal (e.g., 0.07 for 7%)
+ :param years: Number of years to invest
+ :return: Final investment value
+ """
+ return principal * (1 + rate) ** years
+
+
+agent = ToolAgent(
+ functions=[calculate_investment_return],
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
+)
+
+result = agent.run(
+ "Calculate returns for $10000 invested at 7% for 10 years"
+)
diff --git a/new_features_examples/persistent_legal_agent.py b/new_features_examples/persistent_legal_agent.py
new file mode 100644
index 00000000..65e8d61a
--- /dev/null
+++ b/new_features_examples/persistent_legal_agent.py
@@ -0,0 +1,113 @@
+import os
+from swarms import Agent
+from swarm_models import OpenAIChat
+from dotenv import load_dotenv
+
+# Custom system prompt for VC legal document generation
+VC_LEGAL_AGENT_PROMPT = """You are a specialized legal document assistant focusing on venture capital documentation.
+Your role is to help draft preliminary versions of common VC legal documents while adhering to these guidelines:
+
+1. Always include standard legal disclaimers
+2. Follow standard VC document structures
+3. Flag areas that need attorney review
+4. Request necessary information for document completion
+5. Maintain consistency across related documents
+6. Output only when document is complete and verified
+
+Remember: All output should be marked as 'DRAFT' and require professional legal review."""
+
+
+def create_vc_legal_agent():
+ load_dotenv()
+ api_key = os.getenv("OPENAI_API_KEY")
+
+ # Configure the model with appropriate parameters for legal work
+ # Get the OpenAI API key from the environment variable
+ api_key = os.getenv("GROQ_API_KEY")
+
+ # Model
+ model = OpenAIChat(
+ openai_api_base="https://api.groq.com/openai/v1",
+ openai_api_key=api_key,
+ model_name="llama-3.1-70b-versatile",
+ temperature=0.1,
+ )
+
+ # Initialize the persistent agent
+ agent = Agent(
+ agent_name="VC-Legal-Document-Agent",
+ system_prompt=VC_LEGAL_AGENT_PROMPT,
+ llm=model,
+ max_loops="auto", # Allows multiple iterations until completion
+ stopping_token="", # Agent will continue until this token is output
+ autosave=True,
+ dashboard=True, # Enable dashboard for monitoring
+ verbose=True,
+ dynamic_temperature_enabled=False, # Disable for consistency in legal documents
+ saved_state_path="vc_legal_agent_state.json",
+ user_name="legal_corp",
+ retry_attempts=3,
+ context_length=200000,
+ return_step_meta=True,
+ output_type="string",
+ streaming_on=False,
+ )
+
+ return agent
+
+
+def generate_legal_document(agent, document_type, parameters):
+ """
+ Generate a legal document with multiple refinement iterations
+
+ Args:
+ agent: The initialized VC legal agent
+ document_type: Type of document to generate (e.g., "term_sheet", "investment_agreement")
+ parameters: Dict containing necessary parameters for the document
+
+ Returns:
+ str: The generated document content
+ """
+ prompt = f"""
+ Generate a {document_type} with the following parameters:
+ {parameters}
+
+ Please follow these steps:
+ 1. Create initial draft
+ 2. Review for completeness
+ 3. Add necessary legal disclaimers
+ 4. Verify all required sections
+ 5. Output when complete
+
+ Include [REQUIRES LEGAL REVIEW] tags for sections needing attorney attention.
+ """
+
+ return agent.run(prompt)
+
+
+# Example usage
+if __name__ == "__main__":
+ # Initialize the agent
+ legal_agent = create_vc_legal_agent()
+
+ # Example parameters for a term sheet
+ parameters = {
+ "company_name": "TechStartup Inc.",
+ "investment_amount": "$5,000,000",
+ "valuation": "$20,000,000",
+ "investor_rights": [
+ "Board seat",
+ "Pro-rata rights",
+ "Information rights",
+ ],
+ "type_of_security": "Series A Preferred Stock",
+ }
+
+ # Generate a term sheet
+ document = generate_legal_document(
+ legal_agent, "term_sheet", parameters
+ )
+
+ # Save the generated document
+ with open("generated_term_sheet_draft.md", "w") as f:
+ f.write(document)
diff --git a/new_features_examples/real_estate_agent.py b/new_features_examples/real_estate_agent.py
new file mode 100644
index 00000000..92864209
--- /dev/null
+++ b/new_features_examples/real_estate_agent.py
@@ -0,0 +1,319 @@
+"""
+Zoe - Real Estate Agent
+
+"""
+
+from typing import Optional, Dict, Any, List
+from dataclasses import dataclass
+from datetime import datetime
+import os
+import json
+import requests
+from loguru import logger
+from swarms import Agent
+from swarm_models import OpenAIChat
+from dotenv import load_dotenv
+from enum import Enum
+
+# Configure loguru logger
+logger.add(
+ "logs/real_estate_agent_{time}.log",
+ rotation="500 MB",
+ retention="10 days",
+ level="INFO",
+ format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
+)
+
+
+class PropertyType(str, Enum):
+ """Enum for property types"""
+
+ OFFICE = "office"
+ RETAIL = "retail"
+ INDUSTRIAL = "industrial"
+ MIXED_USE = "mixed-use"
+ LAND = "land"
+
+
+@dataclass
+class PropertyListing:
+ """Data class for commercial property listings"""
+
+ property_id: str
+ address: str
+ city: str
+ state: str
+ zip_code: str
+ price: float
+ square_footage: float
+ property_type: PropertyType
+ zoning: str
+ listing_date: datetime
+ lat: float
+ lng: float
+ description: Optional[str] = None
+ features: Optional[List[str]] = None
+ images: Optional[List[str]] = None
+
+
+class PropertyRadarAPI:
+ """Client for PropertyRadar API integration"""
+
+ def __init__(self, api_key: str):
+ """Initialize PropertyRadar API client
+
+ Args:
+ api_key (str): PropertyRadar API key
+ """
+ self.api_key = api_key
+ self.base_url = "https://api.propertyradar.com/v1"
+ self.session = requests.Session()
+ self.session.headers.update(
+ {
+ "Authorization": f"Bearer {api_key}",
+ "Content-Type": "application/json",
+ }
+ )
+
+ def search_properties(
+ self,
+ max_price: float = 10_000_000,
+ property_types: List[PropertyType] = None,
+ location: Dict[str, Any] = None,
+ min_sqft: Optional[float] = None,
+ max_sqft: Optional[float] = None,
+ page: int = 1,
+ limit: int = 20,
+ ) -> List[PropertyListing]:
+ """
+ Search for commercial properties using PropertyRadar API
+
+ Args:
+ max_price (float): Maximum property price
+ property_types (List[PropertyType]): Types of properties to search for
+ location (Dict[str, Any]): Location criteria (city, county, or coordinates)
+ min_sqft (Optional[float]): Minimum square footage
+ max_sqft (Optional[float]): Maximum square footage
+ page (int): Page number for pagination
+ limit (int): Number of results per page
+
+ Returns:
+ List[PropertyListing]: List of matching properties
+ """
+ try:
+ # Build the query parameters
+ params = {
+ "price_max": max_price,
+ "property_types": (
+ [pt.value for pt in property_types]
+ if property_types
+ else None
+ ),
+ "page": page,
+ "limit": limit,
+ "for_sale": True,
+ "state": "FL", # Florida only
+ "commercial_property": True,
+ }
+
+ # Add location parameters
+ if location:
+ params.update(location)
+
+ # Add square footage filters
+ if min_sqft:
+ params["square_feet_min"] = min_sqft
+ if max_sqft:
+ params["square_feet_max"] = max_sqft
+
+ # Make the API request
+ response = self.session.get(
+ f"{self.base_url}/properties",
+ params={
+ k: v for k, v in params.items() if v is not None
+ },
+ )
+ response.raise_for_status()
+
+ # Parse the response
+ properties_data = response.json()
+
+ # Convert to PropertyListing objects
+ return [
+ PropertyListing(
+ property_id=prop["id"],
+ address=prop["address"],
+ city=prop["city"],
+ state=prop["state"],
+ zip_code=prop["zip_code"],
+ price=float(prop["price"]),
+ square_footage=float(prop["square_feet"]),
+ property_type=PropertyType(prop["property_type"]),
+ zoning=prop["zoning"],
+ listing_date=datetime.fromisoformat(
+ prop["list_date"]
+ ),
+ lat=float(prop["latitude"]),
+ lng=float(prop["longitude"]),
+ description=prop.get("description"),
+ features=prop.get("features", []),
+ images=prop.get("images", []),
+ )
+ for prop in properties_data["results"]
+ ]
+
+ except requests.RequestException as e:
+ logger.error(f"Error fetching properties: {str(e)}")
+ raise
+
+
+class CommercialRealEstateAgent:
+ """Agent for searching and analyzing commercial real estate properties"""
+
+ def __init__(
+ self,
+ openai_api_key: str,
+ propertyradar_api_key: str,
+ model_name: str = "gpt-4",
+ temperature: float = 0.1,
+ saved_state_path: Optional[str] = None,
+ ):
+ """Initialize the real estate agent
+
+ Args:
+ openai_api_key (str): OpenAI API key
+ propertyradar_api_key (str): PropertyRadar API key
+ model_name (str): Name of the LLM model to use
+ temperature (float): Temperature setting for the LLM
+ saved_state_path (Optional[str]): Path to save agent state
+ """
+ self.property_api = PropertyRadarAPI(propertyradar_api_key)
+
+ # Initialize OpenAI model
+ self.model = OpenAIChat(
+ openai_api_key=openai_api_key,
+ model_name=model_name,
+ temperature=temperature,
+ )
+
+ # Initialize the agent
+ self.agent = Agent(
+ agent_name="Commercial-Real-Estate-Agent",
+ system_prompt=self._get_system_prompt(),
+ llm=self.model,
+ max_loops=1,
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ saved_state_path=saved_state_path,
+ context_length=200000,
+ streaming_on=False,
+ )
+
+ logger.info(
+ "Commercial Real Estate Agent initialized successfully"
+ )
+
+ def _get_system_prompt(self) -> str:
+ """Get the system prompt for the agent"""
+ return """You are a specialized commercial real estate agent assistant focused on Central Florida properties.
+ Your primary responsibilities are:
+ 1. Search for commercial properties under $10 million
+ 2. Focus on properties zoned for commercial use
+ 3. Provide detailed analysis of property features, location benefits, and potential ROI
+ 4. Consider local market conditions and growth potential
+ 5. Verify zoning compliance and restrictions
+
+ When analyzing properties, consider:
+ - Current market valuations
+ - Local business development plans
+ - Traffic patterns and accessibility
+ - Nearby amenities and businesses
+ - Future development potential"""
+
+ def search_properties(
+ self,
+ max_price: float = 10_000_000,
+ property_types: List[PropertyType] = None,
+ location: Dict[str, Any] = None,
+ min_sqft: Optional[float] = None,
+ max_sqft: Optional[float] = None,
+ ) -> List[Dict[str, Any]]:
+ """
+ Search for properties and provide analysis
+
+ Args:
+ max_price (float): Maximum property price
+ property_types (List[PropertyType]): Types of properties to search
+ location (Dict[str, Any]): Location criteria
+ min_sqft (Optional[float]): Minimum square footage
+ max_sqft (Optional[float]): Maximum square footage
+
+ Returns:
+ List[Dict[str, Any]]: List of properties with analysis
+ """
+ try:
+ # Search for properties
+ properties = self.property_api.search_properties(
+ max_price=max_price,
+ property_types=property_types,
+ location=location,
+ min_sqft=min_sqft,
+ max_sqft=max_sqft,
+ )
+
+ # Analyze each property
+ analyzed_properties = []
+ for prop in properties:
+ analysis = self.agent.run(
+ f"Analyze this commercial property:\n"
+ f"Address: {prop.address}, {prop.city}, FL {prop.zip_code}\n"
+ f"Price: ${prop.price:,.2f}\n"
+ f"Square Footage: {prop.square_footage:,.0f}\n"
+ f"Property Type: {prop.property_type.value}\n"
+ f"Zoning: {prop.zoning}\n"
+ f"Description: {prop.description or 'Not provided'}"
+ )
+
+ analyzed_properties.append(
+ {"property": prop.__dict__, "analysis": analysis}
+ )
+
+ logger.info(
+ f"Successfully analyzed {len(analyzed_properties)} properties"
+ )
+ return analyzed_properties
+
+ except Exception as e:
+ logger.error(
+ f"Error in property search and analysis: {str(e)}"
+ )
+ raise
+
+
+def main():
+ """Main function to demonstrate usage"""
+ load_dotenv()
+
+ # Initialize the agent
+ agent = CommercialRealEstateAgent(
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
+ propertyradar_api_key=os.getenv("PROPERTYRADAR_API_KEY"),
+ saved_state_path="real_estate_agent_state.json",
+ )
+
+ # Example search
+ results = agent.search_properties(
+ max_price=5_000_000,
+ property_types=[PropertyType.RETAIL, PropertyType.OFFICE],
+ location={"city": "Orlando", "radius_miles": 25},
+ min_sqft=2000,
+ )
+
+ # Save results
+ with open("search_results.json", "w") as f:
+ json.dump(results, f, default=str, indent=2)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/new_features_examples/rearrange_test.py b/new_features_examples/rearrange_test.py
new file mode 100644
index 00000000..d85e435a
--- /dev/null
+++ b/new_features_examples/rearrange_test.py
@@ -0,0 +1,121 @@
+import os
+
+from swarms import Agent, AgentRearrange
+
+from swarm_models import OpenAIChat
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+
+# Initialize the boss agent (Director)
+boss_agent = Agent(
+ agent_name="BossAgent",
+ system_prompt="""
+ You are the BossAgent responsible for managing and overseeing a swarm of agents analyzing company expenses.
+ Your job is to dynamically assign tasks, prioritize their execution, and ensure that all agents collaborate efficiently.
+ After receiving a report on the company's expenses, you will break down the work into smaller tasks,
+ assigning specific tasks to each agent, such as detecting recurring high costs, categorizing expenditures,
+ and identifying unnecessary transactions. Ensure the results are communicated back in a structured way
+ so the finance team can take actionable steps to cut off unproductive spending. You also monitor and
+ dynamically adapt the swarm to optimize their performance. Finally, you summarize their findings
+ into a coherent report.
+ """,
+ llm=model,
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="boss_agent.json",
+)
+
+# Initialize worker 1: Expense Analyzer
+worker1 = Agent(
+ agent_name="ExpenseAnalyzer",
+ system_prompt="""
+ Your task is to carefully analyze the company's expense data provided to you.
+ You will focus on identifying high-cost recurring transactions, categorizing expenditures
+ (e.g., marketing, operations, utilities, etc.), and flagging areas where there seems to be excessive spending.
+ You will provide a detailed breakdown of each category, along with specific recommendations for cost-cutting.
+ Pay close attention to monthly recurring subscriptions, office supplies, and non-essential expenditures.
+ """,
+ llm=model,
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="worker1.json",
+)
+
+# Initialize worker 2: Summary Generator
+worker2 = Agent(
+ agent_name="SummaryGenerator",
+ system_prompt="""
+ After receiving the detailed breakdown from the ExpenseAnalyzer,
+ your task is to create a concise summary of the findings. You will focus on the most actionable insights,
+ such as highlighting the specific transactions that can be immediately cut off and summarizing the areas
+ where the company is overspending. Your summary will be used by the BossAgent to generate the final report.
+ Be clear and to the point, emphasizing the urgency of cutting unnecessary expenses.
+ """,
+ llm=model,
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="worker2.json",
+)
+
+# Swarm-Level Prompt (Collaboration Prompt)
+swarm_prompt = """
+ As a swarm, your collective goal is to analyze the company's expenses and identify transactions that should be cut off.
+ You will work collaboratively to break down the entire process of expense analysis into manageable steps.
+ The BossAgent will direct the flow and assign tasks dynamically to the agents. The ExpenseAnalyzer will first
+ focus on breaking down the expense report, identifying high-cost recurring transactions, categorizing them,
+ and providing recommendations for potential cost reduction. After the analysis, the SummaryGenerator will then
+ consolidate all the findings into an actionable summary that the finance team can use to immediately cut off unnecessary expenses.
+ Together, your collaboration is essential to streamlining and improving the companyās financial health.
+"""
+
+# Create a list of agents
+agents = [boss_agent, worker1, worker2]
+
+# Define the flow pattern for the swarm
+flow = "BossAgent -> ExpenseAnalyzer -> SummaryGenerator"
+
+# Using AgentRearrange class to manage the swarm
+agent_system = AgentRearrange(
+ name="pe-swarm",
+ description="ss",
+ agents=agents,
+ flow=flow,
+ return_json=False,
+ output_type="final",
+ max_loops=1,
+ # docs=["SECURITY.md"],
+)
+
+# Input task for the swarm
+task = f"""
+
+ {swarm_prompt}
+
+ The company has been facing a rising number of unnecessary expenses, and the finance team needs a detailed
+ analysis of recent transactions to identify which expenses can be cut off to improve profitability.
+ Analyze the provided transaction data and create a detailed report on cost-cutting opportunities,
+ focusing on recurring transactions and non-essential expenditures.
+"""
+
+# Run the swarm system with the task
+output = agent_system.run(task)
+print(output)
diff --git a/new_features_examples/sequential_worflow_test.py b/new_features_examples/sequential_worflow_test.py
new file mode 100644
index 00000000..8d204b39
--- /dev/null
+++ b/new_features_examples/sequential_worflow_test.py
@@ -0,0 +1,118 @@
+import os
+from dotenv import load_dotenv
+from swarms import Agent, SequentialWorkflow
+from swarm_models import OpenAIChat
+
+load_dotenv()
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("GROQ_API_KEY")
+
+# Model
+model = OpenAIChat(
+ openai_api_base="https://api.groq.com/openai/v1",
+ openai_api_key=api_key,
+ model_name="llama-3.1-70b-versatile",
+ temperature=0.1,
+)
+
+
+# Initialize specialized agents
+data_extractor_agent = Agent(
+ agent_name="Data-Extractor",
+ system_prompt=None,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="data_extractor_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+summarizer_agent = Agent(
+ agent_name="Document-Summarizer",
+ system_prompt=None,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="summarizer_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+financial_analyst_agent = Agent(
+ agent_name="Financial-Analyst",
+ system_prompt=None,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="financial_analyst_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+market_analyst_agent = Agent(
+ agent_name="Market-Analyst",
+ system_prompt=None,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="market_analyst_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+operational_analyst_agent = Agent(
+ agent_name="Operational-Analyst",
+ system_prompt=None,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="operational_analyst_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+# Initialize the SwarmRouter
+router = SequentialWorkflow(
+ name="pe-document-analysis-swarm",
+ description="Analyze documents for private equity due diligence and investment decision-making",
+ max_loops=1,
+ agents=[
+ data_extractor_agent,
+ summarizer_agent,
+ financial_analyst_agent,
+ market_analyst_agent,
+ operational_analyst_agent,
+ ],
+ output_type="all",
+)
+
+# Example usage
+if __name__ == "__main__":
+ # Run a comprehensive private equity document analysis task
+ result = router.run(
+ "Where is the best place to find template term sheets for series A startups. Provide links and references",
+ img=None,
+ )
+ print(result)
diff --git a/new_features_examples/sequential_workflow.py b/new_features_examples/sequential_workflow.py
new file mode 100644
index 00000000..c688b088
--- /dev/null
+++ b/new_features_examples/sequential_workflow.py
@@ -0,0 +1,143 @@
+import os
+from dotenv import load_dotenv
+from swarms import Agent, SequentialWorkflow
+from swarm_models import OpenAIChat
+
+load_dotenv()
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("GROQ_API_KEY")
+
+# Model
+model = OpenAIChat(
+ openai_api_base="https://api.groq.com/openai/v1",
+ openai_api_key=api_key,
+ model_name="llama-3.1-70b-versatile",
+ temperature=0.1,
+)
+
+
+# Initialize specialized agents
+data_extractor_agent = Agent(
+ agent_name="Data-Extractor",
+ system_prompt="""You are a data extraction specialist. Your role is to:
+ 1. Extract key information, data points, and metrics from documents
+ 2. Identify and pull out important facts, figures, and statistics
+ 3. Structure extracted data in a clear, organized format
+ 4. Flag any inconsistencies or missing data
+ 5. Ensure accuracy in data extraction while maintaining context""",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="data_extractor_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+summarizer_agent = Agent(
+ agent_name="Document-Summarizer",
+ system_prompt="""You are a document summarization expert. Your role is to:
+ 1. Create concise, comprehensive summaries of documents
+ 2. Highlight key points and main takeaways
+ 3. Maintain the essential meaning while reducing length
+ 4. Structure summaries in a logical, readable format
+ 5. Identify and emphasize critical insights""",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="summarizer_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+financial_analyst_agent = Agent(
+ agent_name="Financial-Analyst",
+ system_prompt="""You are a financial analysis expert. Your role is to:
+ 1. Analyze financial statements and metrics
+ 2. Evaluate company valuations and financial projections
+ 3. Assess financial risks and opportunities
+ 4. Provide insights on financial performance and health
+ 5. Make recommendations based on financial analysis""",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="financial_analyst_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+market_analyst_agent = Agent(
+ agent_name="Market-Analyst",
+ system_prompt="""You are a market analysis expert. Your role is to:
+ 1. Analyze market trends and dynamics
+ 2. Evaluate competitive landscape and market positioning
+ 3. Identify market opportunities and threats
+ 4. Assess market size and growth potential
+ 5. Provide strategic market insights and recommendations""",
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="market_analyst_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+operational_analyst_agent = Agent(
+ agent_name="Operational-Analyst",
+ system_prompt="""You are an operational analysis expert. Your role is to:
+ 1. Analyze business operations and processes
+ 2. Evaluate operational efficiency and effectiveness
+ 3. Identify operational risks and opportunities
+ 4. Assess scalability and growth potential
+ 5. Provide recommendations for operational improvements""",
+ llm=model,
+ max_loops=2,
+ autosave=True,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="operational_analyst_agent.json",
+ user_name="pe_firm",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+)
+
+# Initialize the SwarmRouter
+router = SequentialWorkflow(
+ name="pe-document-analysis-swarm",
+ description="Analyze documents for private equity due diligence and investment decision-making",
+ max_loops=1,
+ agents=[
+ data_extractor_agent,
+ summarizer_agent,
+ financial_analyst_agent,
+ market_analyst_agent,
+ operational_analyst_agent,
+ ],
+ output_type="all",
+)
+
+# Example usage
+if __name__ == "__main__":
+ # Run a comprehensive private equity document analysis task
+ result = router.run(
+ "Where is the best place to find template term sheets for series A startups. Provide links and references",
+ no_use_clusterops=True,
+ )
+ print(result)
diff --git a/new_features_examples/spike/agent_rearrange_test.py b/new_features_examples/spike/agent_rearrange_test.py
new file mode 100644
index 00000000..e6aa044d
--- /dev/null
+++ b/new_features_examples/spike/agent_rearrange_test.py
@@ -0,0 +1,238 @@
+"""
+Todo
+
+- You send structured data to the swarm through the users form they make
+- then connect rag for every agent using llama index to remember all the students data
+- structured outputs
+"""
+
+import os
+from dotenv import load_dotenv
+from swarms import Agent, AgentRearrange
+from swarm_models import OpenAIChat, OpenAIFunctionCaller
+from pydantic import BaseModel
+from typing import List
+
+
+class CollegeLog(BaseModel):
+ college_name: str
+ college_description: str
+ college_admission_requirements: str
+
+
+class CollegesRecommendation(BaseModel):
+ colleges: List[CollegeLog]
+ reasoning: str
+
+
+load_dotenv()
+
+# Get the API key from environment variable
+api_key = os.getenv("GROQ_API_KEY")
+
+# Initialize the model
+model = OpenAIChat(
+ openai_api_base="https://api.groq.com/openai/v1",
+ openai_api_key=api_key,
+ model_name="llama-3.1-70b-versatile",
+ temperature=0.1,
+)
+
+FINAL_AGENT_PROMPT = """
+You are a college selection final decision maker. Your role is to:
+ 1. Synthesize all previous analyses and discussions
+ 2. Weigh competing factors and trade-offs
+ 3. Create a final ranked list of recommended colleges
+ 4. Provide clear rationale for each recommendation
+ 5. Include specific action items for each selected school
+ 6. Outline next steps in the application process
+
+ Focus on creating actionable, well-reasoned final recommendations that
+ balance all relevant factors and stakeholder input.
+
+"""
+
+function_caller = OpenAIFunctionCaller(
+ system_prompt=FINAL_AGENT_PROMPT,
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
+ base_model=CollegesRecommendation,
+ parallel_tool_calls=True,
+)
+
+# Student Profile Analyzer Agent
+profile_analyzer_agent = Agent(
+ agent_name="Student-Profile-Analyzer",
+ system_prompt="""You are an expert student profile analyzer. Your role is to:
+ 1. Analyze academic performance, test scores, and extracurricular activities
+ 2. Identify student's strengths, weaknesses, and unique qualities
+ 3. Evaluate personal statements and essays
+ 4. Assess leadership experiences and community involvement
+ 5. Determine student's preferences for college environment, location, and programs
+ 6. Create a comprehensive student profile summary
+
+ Always consider both quantitative metrics (GPA, test scores) and qualitative aspects
+ (personal growth, challenges overcome, unique perspectives).""",
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="profile_analyzer_agent.json",
+ user_name="student",
+ context_length=200000,
+ output_type="string",
+)
+
+# College Research Agent
+college_research_agent = Agent(
+ agent_name="College-Research-Specialist",
+ system_prompt="""You are a college research specialist. Your role is to:
+ 1. Maintain updated knowledge of college admission requirements
+ 2. Research academic programs, campus culture, and student life
+ 3. Analyze admission statistics and trends
+ 4. Evaluate college-specific opportunities and resources
+ 5. Consider financial aid availability and scholarship opportunities
+ 6. Track historical admission data and acceptance rates
+
+ Focus on providing accurate, comprehensive information about each institution
+ while considering both academic and cultural fit factors.""",
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="college_research_agent.json",
+ user_name="researcher",
+ context_length=200000,
+ output_type="string",
+)
+
+# College Match Agent
+college_match_agent = Agent(
+ agent_name="College-Match-Maker",
+ system_prompt="""You are a college matching specialist. Your role is to:
+ 1. Compare student profiles with college requirements
+ 2. Evaluate fit based on academic, social, and cultural factors
+ 3. Consider geographic preferences and constraints
+ 4. Assess financial fit and aid opportunities
+ 5. Create tiered lists of reach, target, and safety schools
+ 6. Explain the reasoning behind each match
+
+ Always provide a balanced list with realistic expectations while
+ considering both student preferences and admission probability.""",
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="college_match_agent.json",
+ user_name="matcher",
+ context_length=200000,
+ output_type="string",
+)
+
+# Debate Moderator Agent
+debate_moderator_agent = Agent(
+ agent_name="Debate-Moderator",
+ system_prompt="""You are a college selection debate moderator. Your role is to:
+ 1. Facilitate discussions between different perspectives
+ 2. Ensure all relevant factors are considered
+ 3. Challenge assumptions and biases
+ 4. Synthesize different viewpoints
+ 5. Guide the group toward consensus
+ 6. Document key points of agreement and disagreement
+
+ Maintain objectivity while ensuring all important factors are thoroughly discussed
+ and evaluated.""",
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="debate_moderator_agent.json",
+ user_name="moderator",
+ context_length=200000,
+ output_type="string",
+)
+
+# Critique Agent
+critique_agent = Agent(
+ agent_name="College-Selection-Critic",
+ system_prompt="""You are a college selection critic. Your role is to:
+ 1. Evaluate the strength of college matches
+ 2. Identify potential overlooked factors
+ 3. Challenge assumptions in the selection process
+ 4. Assess risks and potential drawbacks
+ 5. Provide constructive feedback on selections
+ 6. Suggest alternative options when appropriate
+
+ Focus on constructive criticism that helps improve the final college list
+ while maintaining realistic expectations.""",
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="critique_agent.json",
+ user_name="critic",
+ context_length=200000,
+ output_type="string",
+)
+
+# Final Decision Agent
+final_decision_agent = Agent(
+ agent_name="Final-Decision-Maker",
+ system_prompt="""
+ You are a college selection final decision maker. Your role is to:
+ 1. Synthesize all previous analyses and discussions
+ 2. Weigh competing factors and trade-offs
+ 3. Create a final ranked list of recommended colleges
+ 4. Provide clear rationale for each recommendation
+ 5. Include specific action items for each selected school
+ 6. Outline next steps in the application process
+
+ Focus on creating actionable, well-reasoned final recommendations that
+ balance all relevant factors and stakeholder input.
+ """,
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="final_decision_agent.json",
+ user_name="decision_maker",
+ context_length=200000,
+ output_type="string",
+)
+
+# Initialize the Sequential Workflow
+college_selection_workflow = AgentRearrange(
+ name="college-selection-swarm",
+ description="Comprehensive college selection and analysis system",
+ max_loops=1,
+ agents=[
+ profile_analyzer_agent,
+ college_research_agent,
+ college_match_agent,
+ debate_moderator_agent,
+ critique_agent,
+ final_decision_agent,
+ ],
+ output_type="all",
+ flow=f"{profile_analyzer_agent.name} -> {college_research_agent.name} -> {college_match_agent.name} -> {debate_moderator_agent.name} -> {critique_agent.name} -> {final_decision_agent.name}",
+)
+
+# Example usage
+if __name__ == "__main__":
+ # Example student profile input
+ student_profile = """
+ Student Profile:
+ - GPA: 3.8
+ - SAT: 1450
+ - Interests: Computer Science, Robotics
+ - Location Preference: East Coast
+ - Extracurriculars: Robotics Club President, Math Team
+ - Budget: Need financial aid
+ - Preferred Environment: Medium-sized urban campus
+ """
+
+ # Run the comprehensive college selection analysis
+ result = college_selection_workflow.run(
+ student_profile,
+ no_use_clusterops=True,
+ )
+ print(result)
diff --git a/new_features_examples/spike/function_caller_example.py b/new_features_examples/spike/function_caller_example.py
new file mode 100644
index 00000000..0578df7d
--- /dev/null
+++ b/new_features_examples/spike/function_caller_example.py
@@ -0,0 +1,64 @@
+"""
+Todo
+
+- You send structured data to the swarm through the users form they make
+- then connect rag for every agent using llama index to remember all the students data
+- structured outputs
+"""
+
+import os
+from dotenv import load_dotenv
+from swarm_models import OpenAIChat, OpenAIFunctionCaller
+from pydantic import BaseModel
+from typing import List
+
+
+class CollegeLog(BaseModel):
+ college_name: str
+ college_description: str
+ college_admission_requirements: str
+
+
+class CollegesRecommendation(BaseModel):
+ colleges: List[CollegeLog]
+ reasoning: str
+
+
+load_dotenv()
+
+# Get the API key from environment variable
+api_key = os.getenv("GROQ_API_KEY")
+
+# Initialize the model
+model = OpenAIChat(
+ openai_api_base="https://api.groq.com/openai/v1",
+ openai_api_key=api_key,
+ model_name="llama-3.1-70b-versatile",
+ temperature=0.1,
+)
+
+function_caller = OpenAIFunctionCaller(
+ system_prompt="""You are a college selection final decision maker. Your role is to:
+ - Balance all relevant factors and stakeholder input.
+ - Only return the output in the schema format.
+ """,
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
+ base_model=CollegesRecommendation,
+ # parallel_tool_calls=True,
+)
+
+
+print(
+ function_caller.run(
+ """
+ Student Profile: Kye Gomez
+ - GPA: 3.8
+ - SAT: 1450
+ - Interests: Computer Science, Robotics
+ - Location Preference: East Coast
+ - Extracurriculars: Robotics Club President, Math Team
+ - Budget: Need financial aid
+ - Preferred Environment: Medium-sized urban campus
+ """
+ )
+)
diff --git a/new_features_examples/spike/memory.py b/new_features_examples/spike/memory.py
new file mode 100644
index 00000000..ce83aa7c
--- /dev/null
+++ b/new_features_examples/spike/memory.py
@@ -0,0 +1,116 @@
+from typing import Optional
+from pathlib import Path
+from loguru import logger
+from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
+
+
+class LlamaIndexDB:
+ """A class to manage document indexing and querying using LlamaIndex.
+
+ This class provides functionality to add documents from a directory and query the indexed documents.
+
+ Args:
+ data_dir (str): Directory containing documents to index. Defaults to "docs".
+ **kwargs: Additional arguments passed to SimpleDirectoryReader and VectorStoreIndex.
+ SimpleDirectoryReader kwargs:
+ - filename_as_id (bool): Use filenames as document IDs
+ - recursive (bool): Recursively read subdirectories
+ - required_exts (List[str]): Only read files with these extensions
+ - exclude_hidden (bool): Skip hidden files
+
+ VectorStoreIndex kwargs:
+ - service_context: Custom service context
+ - embed_model: Custom embedding model
+ - similarity_top_k (int): Number of similar docs to retrieve
+ - store_nodes_override (bool): Override node storage
+ """
+
+ def __init__(self, data_dir: str = "docs", **kwargs) -> None:
+ """Initialize the LlamaIndexDB with an empty index.
+
+ Args:
+ data_dir (str): Directory containing documents to index
+ **kwargs: Additional arguments for SimpleDirectoryReader and VectorStoreIndex
+ """
+ self.data_dir = data_dir
+ self.index: Optional[VectorStoreIndex] = None
+ self.reader_kwargs = {
+ k: v
+ for k, v in kwargs.items()
+ if k
+ in SimpleDirectoryReader.__init__.__code__.co_varnames
+ }
+ self.index_kwargs = {
+ k: v
+ for k, v in kwargs.items()
+ if k not in self.reader_kwargs
+ }
+
+ logger.info("Initialized LlamaIndexDB")
+ data_path = Path(self.data_dir)
+ if not data_path.exists():
+ logger.error(f"Directory not found: {self.data_dir}")
+ raise FileNotFoundError(
+ f"Directory {self.data_dir} does not exist"
+ )
+
+ try:
+ documents = SimpleDirectoryReader(
+ self.data_dir, **self.reader_kwargs
+ ).load_data()
+ self.index = VectorStoreIndex.from_documents(
+ documents, **self.index_kwargs
+ )
+ logger.success(
+ f"Successfully indexed documents from {self.data_dir}"
+ )
+ except Exception as e:
+ logger.error(f"Error indexing documents: {str(e)}")
+ raise
+
+ def query(self, query: str, **kwargs) -> str:
+ """Query the indexed documents.
+
+ Args:
+ query (str): The query string to search for
+ **kwargs: Additional arguments passed to the query engine
+ - similarity_top_k (int): Number of similar documents to retrieve
+ - streaming (bool): Enable streaming response
+ - response_mode (str): Response synthesis mode
+ - max_tokens (int): Maximum tokens in response
+
+ Returns:
+ str: The response from the query engine
+
+ Raises:
+ ValueError: If no documents have been indexed yet
+ """
+ if self.index is None:
+ logger.error("No documents have been indexed yet")
+ raise ValueError("Must add documents before querying")
+
+ try:
+ query_engine = self.index.as_query_engine(**kwargs)
+ response = query_engine.query(query)
+ print(response)
+ logger.info(f"Successfully queried: {query}")
+ return str(response)
+ except Exception as e:
+ logger.error(f"Error during query: {str(e)}")
+ raise
+
+
+# # Example usage
+# llama_index_db = LlamaIndexDB(
+# data_dir="docs",
+# filename_as_id=True,
+# recursive=True,
+# required_exts=[".txt", ".pdf", ".docx"],
+# similarity_top_k=3
+# )
+# response = llama_index_db.query(
+# "What is the medical history of patient 1?",
+# streaming=True,
+# response_mode="compact"
+# )
+# print(response)
diff --git a/new_features_examples/spike/spike.zip b/new_features_examples/spike/spike.zip
new file mode 100644
index 00000000..f817aaf2
Binary files /dev/null and b/new_features_examples/spike/spike.zip differ
diff --git a/new_features_examples/spike/test.py b/new_features_examples/spike/test.py
new file mode 100644
index 00000000..3c1f5fb5
--- /dev/null
+++ b/new_features_examples/spike/test.py
@@ -0,0 +1,237 @@
+"""
+Todo
+
+- You send structured data to the swarm through the users form they make
+- then connect rag for every agent using llama index to remember all the students data
+- structured outputs
+"""
+
+import os
+from dotenv import load_dotenv
+from swarms import Agent, SequentialWorkflow
+from swarm_models import OpenAIChat, OpenAIFunctionCaller
+from pydantic import BaseModel
+from typing import List
+
+
+class CollegeLog(BaseModel):
+ college_name: str
+ college_description: str
+ college_admission_requirements: str
+
+
+class CollegesRecommendation(BaseModel):
+ colleges: List[CollegeLog]
+ reasoning: str
+
+
+load_dotenv()
+
+# Get the API key from environment variable
+api_key = os.getenv("GROQ_API_KEY")
+
+# Initialize the model
+model = OpenAIChat(
+ openai_api_base="https://api.groq.com/openai/v1",
+ openai_api_key=api_key,
+ model_name="llama-3.1-70b-versatile",
+ temperature=0.1,
+)
+
+FINAL_AGENT_PROMPT = """
+You are a college selection final decision maker. Your role is to:
+ 1. Synthesize all previous analyses and discussions
+ 2. Weigh competing factors and trade-offs
+ 3. Create a final ranked list of recommended colleges
+ 4. Provide clear rationale for each recommendation
+ 5. Include specific action items for each selected school
+ 6. Outline next steps in the application process
+
+ Focus on creating actionable, well-reasoned final recommendations that
+ balance all relevant factors and stakeholder input.
+
+"""
+
+function_caller = OpenAIFunctionCaller(
+ system_prompt=FINAL_AGENT_PROMPT,
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
+ base_model=CollegesRecommendation,
+ parallel_tool_calls=True,
+)
+
+# Student Profile Analyzer Agent
+profile_analyzer_agent = Agent(
+ agent_name="Student-Profile-Analyzer",
+ system_prompt="""You are an expert student profile analyzer. Your role is to:
+ 1. Analyze academic performance, test scores, and extracurricular activities
+ 2. Identify student's strengths, weaknesses, and unique qualities
+ 3. Evaluate personal statements and essays
+ 4. Assess leadership experiences and community involvement
+ 5. Determine student's preferences for college environment, location, and programs
+ 6. Create a comprehensive student profile summary
+
+ Always consider both quantitative metrics (GPA, test scores) and qualitative aspects
+ (personal growth, challenges overcome, unique perspectives).""",
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="profile_analyzer_agent.json",
+ user_name="student",
+ context_length=200000,
+ output_type="string",
+)
+
+# College Research Agent
+college_research_agent = Agent(
+ agent_name="College-Research-Specialist",
+ system_prompt="""You are a college research specialist. Your role is to:
+ 1. Maintain updated knowledge of college admission requirements
+ 2. Research academic programs, campus culture, and student life
+ 3. Analyze admission statistics and trends
+ 4. Evaluate college-specific opportunities and resources
+ 5. Consider financial aid availability and scholarship opportunities
+ 6. Track historical admission data and acceptance rates
+
+ Focus on providing accurate, comprehensive information about each institution
+ while considering both academic and cultural fit factors.""",
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="college_research_agent.json",
+ user_name="researcher",
+ context_length=200000,
+ output_type="string",
+)
+
+# College Match Agent
+college_match_agent = Agent(
+ agent_name="College-Match-Maker",
+ system_prompt="""You are a college matching specialist. Your role is to:
+ 1. Compare student profiles with college requirements
+ 2. Evaluate fit based on academic, social, and cultural factors
+ 3. Consider geographic preferences and constraints
+ 4. Assess financial fit and aid opportunities
+ 5. Create tiered lists of reach, target, and safety schools
+ 6. Explain the reasoning behind each match
+
+ Always provide a balanced list with realistic expectations while
+ considering both student preferences and admission probability.""",
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="college_match_agent.json",
+ user_name="matcher",
+ context_length=200000,
+ output_type="string",
+)
+
+# Debate Moderator Agent
+debate_moderator_agent = Agent(
+ agent_name="Debate-Moderator",
+ system_prompt="""You are a college selection debate moderator. Your role is to:
+ 1. Facilitate discussions between different perspectives
+ 2. Ensure all relevant factors are considered
+ 3. Challenge assumptions and biases
+ 4. Synthesize different viewpoints
+ 5. Guide the group toward consensus
+ 6. Document key points of agreement and disagreement
+
+ Maintain objectivity while ensuring all important factors are thoroughly discussed
+ and evaluated.""",
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="debate_moderator_agent.json",
+ user_name="moderator",
+ context_length=200000,
+ output_type="string",
+)
+
+# Critique Agent
+critique_agent = Agent(
+ agent_name="College-Selection-Critic",
+ system_prompt="""You are a college selection critic. Your role is to:
+ 1. Evaluate the strength of college matches
+ 2. Identify potential overlooked factors
+ 3. Challenge assumptions in the selection process
+ 4. Assess risks and potential drawbacks
+ 5. Provide constructive feedback on selections
+ 6. Suggest alternative options when appropriate
+
+ Focus on constructive criticism that helps improve the final college list
+ while maintaining realistic expectations.""",
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="critique_agent.json",
+ user_name="critic",
+ context_length=200000,
+ output_type="string",
+)
+
+# Final Decision Agent
+final_decision_agent = Agent(
+ agent_name="Final-Decision-Maker",
+ system_prompt="""
+ You are a college selection final decision maker. Your role is to:
+ 1. Synthesize all previous analyses and discussions
+ 2. Weigh competing factors and trade-offs
+ 3. Create a final ranked list of recommended colleges
+ 4. Provide clear rationale for each recommendation
+ 5. Include specific action items for each selected school
+ 6. Outline next steps in the application process
+
+ Focus on creating actionable, well-reasoned final recommendations that
+ balance all relevant factors and stakeholder input.
+ """,
+ llm=model,
+ max_loops=1,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ saved_state_path="final_decision_agent.json",
+ user_name="decision_maker",
+ context_length=200000,
+ output_type="string",
+)
+
+# Initialize the Sequential Workflow
+college_selection_workflow = SequentialWorkflow(
+ name="college-selection-swarm",
+ description="Comprehensive college selection and analysis system",
+ max_loops=1,
+ agents=[
+ profile_analyzer_agent,
+ college_research_agent,
+ college_match_agent,
+ debate_moderator_agent,
+ critique_agent,
+ final_decision_agent,
+ ],
+ output_type="all",
+)
+
+# Example usage
+if __name__ == "__main__":
+ # Example student profile input
+ student_profile = """
+ Student Profile:
+ - GPA: 3.8
+ - SAT: 1450
+ - Interests: Computer Science, Robotics
+ - Location Preference: East Coast
+ - Extracurriculars: Robotics Club President, Math Team
+ - Budget: Need financial aid
+ - Preferred Environment: Medium-sized urban campus
+ """
+
+ # Run the comprehensive college selection analysis
+ result = college_selection_workflow.run(
+ student_profile,
+ no_use_clusterops=True,
+ )
+ print(result)
diff --git a/swarm_arange_demo.py b/new_features_examples/swarm_arange_demo.py
similarity index 100%
rename from swarm_arange_demo.py
rename to new_features_examples/swarm_arange_demo.py
diff --git a/outputs.txt b/outputs.txt
deleted file mode 100644
index a993739e..00000000
--- a/outputs.txt
+++ /dev/null
@@ -1,116 +0,0 @@
-
-swarms [ī master][ā!?][š³ desktop-linux][š¦ v6.0.0][š v3.12.6][āļø (us-east-1)][āļø kye@swarms.world(us-central1)]
-ó° 10% āÆ /usr/local/bin/python3.12 /Users/swarms_wd/Desktop/swarms/auto_flow.py
-{
- "id": "AutomatedHedgeFundSystem",
- "plan": "The goal is to build a fully automated hedge fund system that integrates multiple components including market analysis, portfolio optimization, trade execution, compliance monitoring, performance tracking, and fund operations. The system will be divided into several key modules, each responsible for specific tasks. The overall success will be measured by the system's ability to consistently execute profitable trades, manage risks effectively, comply with regulations, and provide comprehensive reporting. Key milestones include setting up data feeds for market analysis, developing algorithms for portfolio optimization, implementing automated trade execution protocols, and establishing compliance and reporting mechanisms.",
- "failures_prediction": "Potential failure modes include incorrect market data leading to poor trading decisions, algorithmic errors in portfolio optimization, failures in trade execution systems, compliance breaches, and inaccurate reporting. To mitigate these risks, robust data validation procedures will be implemented, algorithms will be rigorously tested in simulated environments, trade execution systems will include fail-safes and redundancies, compliance checks will be automated and regularly audited, and reporting systems will include cross-checks and validation processes. Regular monitoring and updates will ensure the system remains reliable and accurate.",
- "rationale": "This flow design is optimal because it breaks down the complex task of building an automated hedge fund into manageable components, allowing for specialized agents to focus on distinct functions. Parallelization is maximized where possible, such as in market analysis and portfolio optimization, to increase efficiency and speed. Sequential dependencies ensure that critical tasks like compliance and trade execution follow necessary preparatory steps. This design balances the need for speed in trading with the necessity of thorough analysis and compliance, ensuring both profitability and adherence to regulations.",
- "flow": "AgentMarketAnalysis -> AgentPortfolioOptimization, AgentComplianceMonitoring -> AgentTradeExecution -> AgentPerformanceTracking, AgentFundOperations"
-}
-
-swarms [ī master][ā!?][š³ desktop-linux][š¦ v6.0.0][š v3.12.6][āļø (us-east-1)][āļø kye@swarms.world(us-central1)][ā± 9s]
-ó° 10% āÆ /usr/local/bin/python3.12 /Users/swarms_wd/Desktop/swarms/auto_flow.py
-{
- "name": "Automated Hedge Fund System",
- "description": "A fully automated system for managing a hedge fund, integrating market analysis, portfolio optimization, automated trade execution, compliance, and performance tracking.",
- "flows": [
- {
- "id": "Market_Analysis_and_Research",
- "plan": "Develop a system that continuously gathers data from multiple financial markets, processes this data to identify trends and patterns, and generates insights for trading strategies. The system should support multiple asset classes including equities, fixed income, commodities, and currencies.",
- "failures_prediction": "Data quality issues may arise, leading to inaccurate analysis. To mitigate, implement data validation and cleansing processes. Additionally, ensure redundancy in data sources to prevent single points of failure. Algorithmic biases can distort insights; regular audits and updates to algorithms are necessary.",
- "rationale": "Market analysis is foundational to informed trading strategies. By implementing robust data collection and processing, the system ensures timely and accurate insights, which are crucial for competitive advantage in trading.",
- "flow": "DataCollector -> DataProcessor -> TrendAnalyzer -> InsightGenerator"
- },
- {
- "id": "Portfolio_Optimization_and_Risk_Management",
- "plan": "Implement a system that uses the insights from market analysis to optimize the portfolio. This involves balancing risk and return, adhering to investment guidelines, and dynamically adjusting the portfolio in response to market changes.",
- "failures_prediction": "Risk models might not capture extreme market events, leading to unexpected losses. Regular stress testing and scenario analysis are essential. Portfolio rebalancing might incur high transaction costs; optimization algorithms should account for these.",
- "rationale": "Effective portfolio management maximizes returns while controlling risk. By continuously optimizing the portfolio, the system can adapt to market conditions and investor goals, ensuring long-term fund performance.",
- "flow": "InsightGenerator -> PortfolioOptimizer -> RiskManager -> Rebalancer"
- },
- {
- "id": "Automated_Trade_Execution_and_Settlement",
- "plan": "Design a system that executes trades automatically based on portfolio optimization outputs. It should ensure trades are executed at optimal prices and settled efficiently across multiple asset classes.",
- "failures_prediction": "Execution failures can occur due to connectivity issues or market volatility. Implement fail-safes such as alternative trading venues and pre-trade checks. Settlement failures require reconciliation processes to ensure all trades are accurately recorded.",
- "rationale": "Automation in trade execution reduces latency and human error, ensuring trades are conducted efficiently and at the best possible prices. This is critical for maintaining competitive edge and operational efficiency.",
- "flow": "Rebalancer -> TradeExecutor -> SettlementProcessor"
- },
- {
- "id": "Compliance_and_Regulatory_Monitoring",
- "plan": "Establish a system that monitors all trading activities for compliance with relevant regulations and internal policies. It should generate alerts for any potential violations and maintain detailed records for audits.",
- "failures_prediction": "Non-compliance can lead to legal penalties and reputational damage. Implement real-time monitoring and alert systems, and conduct regular compliance audits to ensure adherence to regulations.",
- "rationale": "Regulatory compliance is non-negotiable in financial markets. A robust monitoring system protects the fund from legal risks and maintains investor trust.",
- "flow": "TradeExecutor -> ComplianceMonitor -> AlertSystem"
- },
- {
- "id": "Performance_Tracking_and_Reporting",
- "plan": "Create a system that tracks the performance of the hedge fund, analyzing returns, risks, and other key metrics. It should generate regular reports for stakeholders, providing insights into fund performance and areas for improvement.",
- "failures_prediction": "Inaccurate performance data can mislead stakeholders. Ensure data integrity through validation processes and cross-checks. Reporting delays can frustrate stakeholders; automate report generation to ensure timeliness.",
- "rationale": "Performance tracking provides transparency and accountability, essential for stakeholder trust and strategic decision-making. Regular reporting helps in assessing strategy effectiveness and making informed adjustments.",
- "flow": "SettlementProcessor -> PerformanceTracker -> ReportGenerator"
- },
- {
- "id": "Fund_Operations_and_Administration",
- "plan": "Develop a system that handles the day-to-day operations of the hedge fund, including investor relations, fund accounting, and administrative tasks. Ensure seamless integration with other components of the hedge fund system.",
- "failures_prediction": "Operational bottlenecks can disrupt fund activities. Implement workflow automation and task prioritization to enhance efficiency. Ensure data consistency across systems to prevent administrative errors.",
- "rationale": "Efficient fund operations are crucial for smooth functioning and scalability of the hedge fund. By automating routine tasks, the system allows for focus on strategic activities and growth.",
- "flow": "ReportGenerator -> FundAdministrator -> InvestorRelations"
- }
- ]
-}
-
-swarms [ī master][ā!?][š³ desktop-linux][š¦ v6.0.0][š v3.12.6][āļø (us-east-1)][āļø kye@swarms.world(us-central1)][ā± 26s]
-ó° 10% āÆ /usr/local/bin/python3.12 /Users/swarms_wd/Desktop/swarms/auto_flow.py
-{
- "name": "Automated Hedge Fund System",
- "description": "A comprehensive architecture for a fully automated hedge fund system integrating market analysis, portfolio optimization, automated execution, compliance monitoring, performance tracking, and fund operations.",
- "flows": [
- {
- "id": "Market Analysis and Research",
- "plan": "Develop a robust market analysis module that gathers data from multiple sources, processes it using machine learning algorithms, and provides actionable insights. This module will continuously monitor market trends, sentiment, and economic indicators to inform trading strategies.",
- "failures_prediction": "Potential failures include data source outages, incorrect data processing, and machine learning model inaccuracies. Mitigation strategies involve using redundant data sources, implementing data validation checks, and continuously updating and retraining models.",
- "rationale": "Market analysis is the foundation of a successful trading strategy. By leveraging multiple data sources and advanced algorithms, the system can generate high-quality insights that drive profitable trades. The design prioritizes reliability and accuracy to ensure consistent performance.",
- "flow": "DataCollector -> DataProcessor -> MLModelTrainer -> InsightGenerator"
- },
- {
- "id": "Portfolio Optimization and Risk Management",
- "plan": "Create a portfolio optimization engine that uses quantitative models to allocate assets efficiently. Integrate risk management protocols to monitor and mitigate exposure to market risks, ensuring the portfolio aligns with the fund's risk appetite and investment goals.",
- "failures_prediction": "Risks include model inaccuracies, unexpected market events, and correlation breakdowns. Preventive measures include stress testing models, implementing real-time risk monitoring, and setting predefined risk thresholds with automated rebalancing.",
- "rationale": "Optimizing the portfolio is crucial for maximizing returns while controlling risk. By integrating risk management, the system ensures that the portfolio remains resilient to market fluctuations, aligning with overall investment strategies.",
- "flow": "PortfolioOptimizer -> RiskAnalyzer -> RiskMitigationEngine"
- },
- {
- "id": "Automated Trade Execution and Settlement",
- "plan": "Design an automated trade execution system that interfaces with multiple exchanges, executes trades based on predefined strategies, and handles settlement processes. Ensure the system is capable of high-frequency trading and adapts to market conditions.",
- "failures_prediction": "Failures can occur due to exchange connectivity issues, execution delays, or strategy malfunctions. Mitigation involves implementing failover protocols, real-time monitoring of execution quality, and adaptive algorithms that adjust to market conditions.",
- "rationale": "Automated execution is essential for capitalizing on market opportunities quickly and efficiently. The system's ability to handle high-frequency trades and adapt to changing conditions is critical for maintaining a competitive edge.",
- "flow": "TradeStrategyEngine -> ExecutionManager -> SettlementProcessor"
- },
- {
- "id": "Compliance and Regulatory Monitoring",
- "plan": "Implement a compliance monitoring system that tracks all trading activities, ensures adherence to regulations, and generates reports for regulatory bodies. Incorporate automated alerts for any compliance breaches or suspicious activities.",
- "failures_prediction": "Potential issues include regulatory changes, false positives in alerts, and reporting errors. Strategies to address these include regular updates to compliance rules, fine-tuning alert thresholds, and automated report validation checks.",
- "rationale": "Compliance is non-negotiable in the hedge fund industry. An automated system reduces the risk of human error and ensures that the fund operates within legal boundaries, protecting against fines and reputational damage.",
- "flow": "TradeMonitor -> ComplianceChecker -> AlertSystem -> ReportGenerator"
- },
- {
- "id": "Performance Tracking and Reporting",
- "plan": "Develop a performance tracking system that evaluates fund performance against benchmarks, generates detailed reports, and provides insights into fund health. Ensure the system supports real-time performance analytics and historical data analysis.",
- "failures_prediction": "Challenges include data inaccuracies, benchmark mismatches, and report generation delays. Mitigation involves implementing data validation, aligning benchmarks with investment goals, and optimizing report generation processes.",
- "rationale": "Tracking performance is vital for assessing the fund's success and making informed decisions. The system's ability to provide real-time insights and comprehensive reports supports strategic planning and investor communications.",
- "flow": "PerformanceAnalyzer -> BenchmarkComparator -> ReportGenerator"
- },
- {
- "id": "Fund Operations and Administration",
- "plan": "Create an operations module that handles fund administration tasks such as investor relations, fee calculations, and financial reporting. Ensure seamless integration with other systems for efficient data flow and operations management.",
- "failures_prediction": "Risks include operational inefficiencies, data integration issues, and incorrect calculations. Address these by streamlining processes, ensuring robust data integration, and implementing checks for accuracy in calculations.",
- "rationale": "Efficient fund operations are essential for smooth day-to-day management and investor satisfaction. By automating administrative tasks, the system reduces manual workload and enhances operational efficiency.",
- "flow": "InvestorRelationsManager -> FeeCalculator -> FinancialReportGenerator"
- }
- ],
- "swarm_flow": "Market Analysis and Research -> Portfolio Optimization and Risk Management -> Automated Trade Execution and Settlement -> Compliance and Regulatory Monitoring -> Performance Tracking and Reporting -> Fund Operations and Administration"
-}
-
-swarms [ī master][ā!?][š³ desktop-linux][š¦ v6.0.0][š v3.12.6][āļø (us-east-1)][āļø kye@swarms.world(us-central1)][ā± 32s]
-ó° 9% āÆ
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
index 24cd0922..0cc0a373 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
-version = "6.0.4"
+version = "6.4.7"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez "]
@@ -37,6 +37,14 @@ keywords = [
"Generative AI",
"Agent Marketplace",
"Agent Store",
+ "quant",
+ "finance",
+ "algorithmic trading",
+ "portfolio optimization",
+ "risk management",
+ "financial modeling",
+ "machine learning for finance",
+ "natural language processing for finance",
]
classifiers = [
"Development Status :: 4 - Beta",
@@ -52,32 +60,25 @@ python = ">=3.10,<4.0"
torch = ">=2.1.1,<3.0"
transformers = ">= 4.39.0, <5.0.0"
asyncio = ">=3.4.3,<4.0"
-langchain-community = "0.0.29"
-langchain-experimental = "0.0.55"
-backoff = "2.2.1"
toml = "*"
-pypdf = "4.3.1"
-loguru = "0.7.2"
+pypdf = "5.1.0"
+loguru = "*"
pydantic = "2.8.2"
-tenacity = "8.5.0"
-Pillow = "10.4.0"
+tenacity = "*"
psutil = "*"
sentry-sdk = {version = "*", extras = ["http"]} # Updated here
python-dotenv = "*"
PyYAML = "*"
docstring_parser = "0.16"
-fastapi = "*"
-openai = ">=1.30.1,<2.0"
-termcolor = "*"
tiktoken = "*"
networkx = "*"
-swarms-memory = "*"
-black = "*"
aiofiles = "*"
swarm-models = "*"
clusterops = "*"
chromadb = "*"
reportlab = "*"
+doc-master = "*"
+rich = "*"
[tool.poetry.scripts]
swarms = "swarms.cli.main:main"
@@ -85,7 +86,7 @@ swarms = "swarms.cli.main:main"
[tool.poetry.group.lint.dependencies]
black = ">=23.1,<25.0"
-ruff = ">=0.5.1,<0.6.10"
+ruff = ">=0.5.1,<0.8.2"
types-toml = "^0.10.8.1"
types-pytz = ">=2023.3,<2025.0"
types-chardet = "^5.0.4.6"
@@ -94,9 +95,7 @@ mypy-protobuf = "^3.0.0"
[tool.poetry.group.test.dependencies]
pytest = "^8.1.1"
-termcolor = "^2.4.0"
pandas = "^2.2.2"
-fastapi = "^0.110.1"
[tool.ruff]
line-length = 70
diff --git a/requirements.txt b/requirements.txt
index 8f9df9b9..e5375a0d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,20 +2,16 @@
torch>=2.1.1,<3.0
transformers>=4.39.0,<5.0.0
asyncio>=3.4.3,<4.0
-langchain-community==0.0.28
-langchain-experimental==0.0.55
-backoff==2.2.1
toml
pypdf==4.3.1
ratelimit==2.2.1
-loguru==0.7.2
+loguru
pydantic==2.8.2
-tenacity==8.5.0
-Pillow==10.4.0
+tenacity
+rich
psutil
sentry-sdk
python-dotenv
-opencv-python-headless
PyYAML
docstring_parser==0.16
black>=23.1,<25.0
@@ -25,13 +21,10 @@ types-pytz>=2023.3,<2025.0
types-chardet>=5.0.4.6
mypy-protobuf>=3.0.0
pytest>=8.1.1
-termcolor>=2.4.0
pandas>=2.2.2
-fastapi>=0.110.1
networkx
-swarms-memory
-pre-commit
aiofiles
swarm-models
clusterops
reportlab
+doc-master
diff --git a/scripts/docs/create_llm_file_for_docs.sh b/scripts/docs/create_llm_file_for_docs.sh
new file mode 100644
index 00000000..0b0ca612
--- /dev/null
+++ b/scripts/docs/create_llm_file_for_docs.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+# Set up logging
+LOG_FILE="docs_compilation.log"
+OUTPUT_FILE="combined_docs.txt"
+
+# Initialize log file
+echo "$(date): Starting documentation compilation" > "$LOG_FILE"
+
+# Create/clear output file
+> "$OUTPUT_FILE"
+
+# Function to determine file type and handle accordingly
+process_file() {
+ local file="$1"
+
+ # Get file extension
+ extension="${file##*.}"
+
+ echo "$(date): Processing $file" >> "$LOG_FILE"
+
+ case "$extension" in
+ md|markdown)
+ echo "# $(basename "$file")" >> "$OUTPUT_FILE"
+ cat "$file" >> "$OUTPUT_FILE"
+ echo -e "\n\n" >> "$OUTPUT_FILE"
+ ;;
+ txt)
+ echo "# $(basename "$file")" >> "$OUTPUT_FILE"
+ cat "$file" >> "$OUTPUT_FILE"
+ echo -e "\n\n" >> "$OUTPUT_FILE"
+ ;;
+ *)
+ echo "$(date): Skipping $file - unsupported format" >> "$LOG_FILE"
+ return
+ ;;
+ esac
+
+ echo "$(date): Successfully processed $file" >> "$LOG_FILE"
+}
+
+# Find and process all documentation files
+find ../docs -type f \( -name "*.md" -o -name "*.txt" -o -name "*.markdown" \) | while read -r file; do
+ process_file "$file"
+done
+
+# Log completion
+echo "$(date): Documentation compilation complete" >> "$LOG_FILE"
+echo "$(date): Output saved to $OUTPUT_FILE" >> "$LOG_FILE"
+
+# Print summary
+echo "Documentation compilation complete. Check $LOG_FILE for details."
\ No newline at end of file
diff --git a/simple_example.py b/simple_example.py
new file mode 100644
index 00000000..2fcbb8f9
--- /dev/null
+++ b/simple_example.py
@@ -0,0 +1,7 @@
+from swarms import Agent
+
+Agent(
+ agent_name="Stock-Analysis-Agent",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+).run("What are 5 hft algorithms")
diff --git a/swarms/__init__.py b/swarms/__init__.py
index c2df2d73..0c3b5ca5 100644
--- a/swarms/__init__.py
+++ b/swarms/__init__.py
@@ -1,17 +1,38 @@
+import os
import concurrent.futures
from dotenv import load_dotenv
+from loguru import logger
load_dotenv()
+# Disable logging by default
+if os.getenv("SWARMS_VERBOSE_GLOBAL", "False").lower() == "false":
+ logger.disable("")
+
+# Import telemetry functions with error handling
from swarms.telemetry.bootup import bootup # noqa: E402, F403
-from swarms.telemetry.sentry_active import (
+from swarms.telemetry.sentry_active import ( # noqa: E402
activate_sentry,
) # noqa: E402
-# Use ThreadPoolExecutor to run bootup and activate_sentry concurrently
-with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
- executor.submit(bootup)
- executor.submit(activate_sentry)
+
+# Run telemetry functions concurrently with error handling
+def run_telemetry():
+ try:
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=2
+ ) as executor:
+ future_bootup = executor.submit(bootup)
+ future_sentry = executor.submit(activate_sentry)
+
+ # Wait for completion and check for exceptions
+ future_bootup.result()
+ future_sentry.result()
+ except Exception as e:
+ logger.error(f"Error running telemetry functions: {e}")
+
+
+run_telemetry()
from swarms.agents import * # noqa: E402, F403
from swarms.artifacts import * # noqa: E402, F403
diff --git a/swarms/agents/ape_agent.py b/swarms/agents/ape_agent.py
index 164813cc..420b7aaa 100644
--- a/swarms/agents/ape_agent.py
+++ b/swarms/agents/ape_agent.py
@@ -1,6 +1,5 @@
from typing import Any
-from loguru import logger
from tenacity import retry, stop_after_attempt, wait_exponential
from swarms.prompts.prompt_generator import (
@@ -9,6 +8,9 @@ from swarms.prompts.prompt_generator import (
from swarms.prompts.prompt_generator_optimizer import (
prompt_generator_sys_prompt,
)
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="ape_agent")
@retry(
diff --git a/swarms/agents/auto_generate_swarm_config.py b/swarms/agents/auto_generate_swarm_config.py
new file mode 100644
index 00000000..febb85e3
--- /dev/null
+++ b/swarms/agents/auto_generate_swarm_config.py
@@ -0,0 +1,253 @@
+import re
+
+from dotenv import load_dotenv
+from tenacity import retry, stop_after_attempt, wait_exponential
+
+from swarms import Agent
+from swarms.agents.create_agents_from_yaml import (
+ create_agents_from_yaml,
+)
+from swarms.utils.formatter import formatter
+from swarms.utils.litellm import LiteLLM
+
+load_dotenv()
+
+
+def prepare_yaml_for_parsing(raw_yaml: str) -> str:
+ """
+ Prepares raw YAML content by fixing spacing and formatting issues.
+
+ Args:
+ raw_yaml (str): The raw YAML content extracted from Markdown.
+
+ Returns:
+ str: The cleaned YAML content ready for parsing.
+ """
+ # Fix sequence items that are improperly placed on the same line as their key
+ fixed_yaml = re.sub(
+ r"(\b\w+\b):\s*-\s*", r"\1:\n - ", raw_yaml
+ ) # Fix "key: - value" to "key:\n - value"
+
+ # Ensure proper spacing after colons
+ fixed_yaml = re.sub(
+ r"(\S):(\S)", r"\1: \2", fixed_yaml
+ ) # Ensure space after colons
+
+ # Remove trailing spaces before newlines
+ fixed_yaml = re.sub(r"\s+\n", "\n", fixed_yaml)
+
+ # Replace non-breaking spaces (if any) with regular spaces
+ fixed_yaml = fixed_yaml.replace("\xa0", " ")
+
+ return fixed_yaml.strip()
+
+
+def parse_yaml_from_swarm_markdown(markdown_text: str) -> dict:
+ """
+ Extracts and prepares YAML content from a Markdown-style 'Auto-Swarm-Builder' block and parses it.
+
+ Args:
+ markdown_text (str): The Markdown text containing the YAML inside 'Auto-Swarm-Builder' block.
+
+ Returns:
+ dict: A parsed Python dictionary of the YAML content.
+ """
+ # Match the 'Auto-Swarm-Builder' block with YAML inside triple backticks
+ pattern = r"```yaml\s*\n(.*?)```"
+ match = re.search(pattern, markdown_text, re.DOTALL)
+
+ if not match:
+ raise ValueError(
+ "No YAML content found in the 'Auto-Swarm-Builder' block."
+ )
+
+ raw_yaml = match.group(1).strip()
+
+ # Preprocess and normalize the YAML content
+ normalized_yaml = prepare_yaml_for_parsing(raw_yaml)
+
+ return normalized_yaml
+
+
+AUTO_GEN_PROMPT = """
+You are a specialized agent responsible for creating YAML configuration files for multi-agent swarms. Your role is to generate well-structured YAML that defines both individual agents and swarm architectures based on user requirements.
+Output only the yaml nothing else. You will be penalized for making mistakes
+
+GUIDELINES:
+1. Each YAML file must contain an `agents` section with at least one agent configuration
+2. Each agent configuration requires the following mandatory fields:
+ - agent_name (string)
+ - system_prompt (string)
+
+3. Optional agent fields include:
+ - max_loops (integer)
+ - autosave (boolean)
+ - dashboard (boolean)
+ - verbose (boolean)
+ - dynamic_temperature_enabled (boolean)
+ - saved_state_path (string)
+ - user_name (string)
+ - retry_attempts (integer)
+ - context_length (integer)
+ - return_step_meta (boolean)
+ - output_type (string)
+ - task (string)
+
+4. When a swarm is needed, include a `swarm_architecture` section with:
+ Mandatory fields:
+ - name (string)
+ - swarm_type (string: "ConcurrentWorkflow" or "SequentialWorkflow") [AgentRearrange, MixtureOfAgents, SpreadSheetSwarm, SequentialWorkflow, ConcurrentWorkflow]
+
+ Optional fields:
+ - description (string)
+ - max_loops (integer)
+ - task (string)
+
+TEMPLATE STRUCTURE:
+```yaml
+agents:
+ - agent_name: "Agent-1-Name"
+ system_prompt: "Detailed system prompt here"
+ max_loops: 1
+ # [additional optional fields]
+
+ - agent_name: "Agent-2-Name"
+ system_prompt: "Detailed system prompt here"
+ # [additional optional fields]
+
+swarm_architecture:
+ name: "Swarm-Name"
+ description: "Swarm purpose and goals"
+ swarm_type: "ConcurrentWorkflow"
+ max_loops: 5
+ task: "Main swarm task description"
+```
+
+VALIDATION RULES:
+1. All agent names must be unique
+2. System prompts must be clear and specific to the agent's role
+3. Integer values must be positive
+4. Boolean values must be true or false (lowercase)
+5. File paths should use forward slashes
+6. Tasks should be specific and aligned with the agent/swarm purpose
+
+When generating a YAML configuration:
+1. Ask for specific requirements about the agents and swarm needed
+2. Determine if a swarm architecture is necessary based on the task complexity
+3. Generate appropriate system prompts for each agent based on their roles
+4. Include relevant optional fields based on the use case
+5. Validate the configuration against all rules before returning
+
+Example valid YAML configurations are provided below. Use these as references for structure and formatting:
+
+```yaml
+
+
+agents:
+ - agent_name: "Data-Analysis-Agent"
+ system_prompt: "You are a specialized data analysis agent focused on processing and interpreting financial data. Provide clear, actionable insights based on the data provided."
+ max_loops: 3
+ autosave: true
+ verbose: true
+ context_length: 100000
+ output_type: "json"
+ task: "Analyze quarterly financial reports and identify trends"
+
+# Multi-Agent Swarm Example
+agents:
+ - agent_name: "Research-Agent"
+ system_prompt: "You are a research agent specialized in gathering and summarizing scientific publications. Focus on peer-reviewed sources and provide comprehensive summaries."
+ max_loops: 2
+ context_length: 150000
+ output_type: "str"
+
+ - agent_name: "Analysis-Agent"
+ system_prompt: "You are an analysis agent that processes research summaries and identifies key patterns and insights. Provide detailed analytical reports."
+ max_loops: 3
+ context_length: 200000
+ output_type: "json"
+
+swarm_architecture:
+ name: "Research-Analysis-Swarm"
+ description: "A swarm for comprehensive research analysis and insight generation"
+ swarm_type: "SequentialWorkflow"
+ max_loops: 5
+ task: "Research and analyze recent developments in quantum computing"
+
+```
+"""
+
+
+def generate_swarm_config(
+ task: str,
+ file_name: str = "swarm_config_output.yaml",
+ model_name: str = "gpt-4o",
+ *args,
+ **kwargs,
+):
+ """
+ Generates a swarm configuration based on the provided task and model name.
+
+ This function attempts to generate a swarm configuration by running an agent with the specified task and model name.
+ It then parses the output into YAML format and creates agents based on the parsed YAML content.
+
+ Args:
+ task (str): The task to be performed by the swarm.
+ file_name (str, optional): The file name for the output YAML configuration. Defaults to "swarm_config_output.yaml".
+ model_name (str, optional): The name of the model to use for the agent. Defaults to "gpt-4o".
+ *args: Additional positional arguments to be passed to the agent's run method.
+ **kwargs: Additional keyword arguments to be passed to the agent's run method.
+
+ Returns:
+ Any: The output of the swarm configuration generation process. This can be a SwarmRouter instance or an error message.
+ """
+ formatter.print_panel(
+ "Auto Generating Swarm...", "Auto Swarm Builder"
+ )
+
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(min=4, max=10),
+ )
+ def attempt_generate_swarm_config():
+ try:
+ model = LiteLLM(model_name=model_name)
+
+ # Initialize the agent
+ agent = Agent(
+ agent_name="Auto-Swarm-Builder",
+ system_prompt=AUTO_GEN_PROMPT,
+ llm=model,
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ saved_state_path="swarm_builder.json",
+ user_name="swarms_corp",
+ output_type="str",
+ )
+
+ # Generate output from the agent
+ raw_output = agent.run(task, *args, **kwargs)
+ yaml_content = parse_yaml_from_swarm_markdown(raw_output)
+ print(yaml_content)
+
+ # Create agents from the YAML file
+ output = create_agents_from_yaml(
+ yaml_string=yaml_content,
+ return_type="run_swarm",
+ )
+
+ formatter.print_panel(
+ "Swarm configuration generated successfully.",
+ "Success",
+ )
+
+ return output
+
+ except Exception as e:
+ formatter.print_panel(
+ f"Error generating swarm configuration: {str(e)}",
+ "Error",
+ )
+ raise
+
+ return attempt_generate_swarm_config()
diff --git a/swarms/agents/create_agents_from_yaml.py b/swarms/agents/create_agents_from_yaml.py
index 85371283..e92d1923 100644
--- a/swarms/agents/create_agents_from_yaml.py
+++ b/swarms/agents/create_agents_from_yaml.py
@@ -1,19 +1,168 @@
import os
-from typing import Any, Callable, Dict, List, Tuple, Union
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import yaml
-from loguru import logger
-
+from tenacity import (
+ retry,
+ stop_after_attempt,
+ wait_exponential,
+ retry_if_exception_type,
+)
+from pydantic import (
+ BaseModel,
+ Field,
+ field_validator,
+)
+from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.agent import Agent
from swarms.structs.swarm_router import SwarmRouter
+from swarms.utils.litellm import LiteLLM
+
+logger = initialize_logger(log_folder="create_agents_from_yaml")
+
+
+class AgentConfig(BaseModel):
+ agent_name: str
+ system_prompt: str
+ model_name: Optional[str] = None
+ max_loops: int = Field(default=1, ge=1)
+ autosave: bool = True
+ dashboard: bool = False
+ verbose: bool = False
+ dynamic_temperature_enabled: bool = False
+ saved_state_path: Optional[str] = None
+ user_name: str = "default_user"
+ retry_attempts: int = Field(default=3, ge=1)
+ context_length: int = Field(default=100000, ge=1000)
+ return_step_meta: bool = False
+ output_type: str = "str"
+ auto_generate_prompt: bool = False
+ artifacts_on: bool = False
+ artifacts_file_extension: str = ".md"
+ artifacts_output_path: str = ""
+
+ @field_validator("system_prompt")
+ @classmethod
+ def validate_system_prompt(cls, v):
+ if not v or not isinstance(v, str) or len(v.strip()) == 0:
+ raise ValueError(
+ "System prompt must be a non-empty string"
+ )
+ return v
+
+
+class SwarmConfig(BaseModel):
+ name: str
+ description: str
+ max_loops: int = Field(default=1, ge=1)
+ swarm_type: str
+ task: Optional[str] = None
+ flow: Optional[Dict] = None
+ autosave: bool = True
+ return_json: bool = False
+ rules: str = ""
+
+ @field_validator("swarm_type")
+ @classmethod
+ def validate_swarm_type(cls, v):
+ valid_types = {
+ "SequentialWorkflow",
+ "ConcurrentWorkflow",
+ "AgentRearrange",
+ "MixtureOfAgents",
+ "auto",
+ }
+ if v not in valid_types:
+ raise ValueError(
+ f"Swarm type must be one of: {valid_types}"
+ )
+ return v
+
+
+class YAMLConfig(BaseModel):
+ agents: List[AgentConfig] = Field(..., min_length=1)
+ swarm_architecture: Optional[SwarmConfig] = None
+
+ model_config = {
+ "extra": "forbid" # Prevent additional fields not in the model
+ }
+
+
+def load_yaml_safely(
+ yaml_file: str = None, yaml_string: str = None
+) -> Dict:
+ """Safely load and validate YAML configuration using Pydantic."""
+ try:
+ if yaml_string:
+ config_dict = yaml.safe_load(yaml_string)
+ elif yaml_file:
+ if not os.path.exists(yaml_file):
+ raise FileNotFoundError(
+ f"YAML file {yaml_file} not found."
+ )
+ with open(yaml_file, "r") as file:
+ config_dict = yaml.safe_load(file)
+ else:
+ raise ValueError(
+ "Either yaml_file or yaml_string must be provided"
+ )
+
+ # Validate using Pydantic
+ YAMLConfig(**config_dict)
+ return config_dict
+ except yaml.YAMLError as e:
+ raise ValueError(f"Error parsing YAML: {str(e)}")
+ except Exception as e:
+ raise ValueError(f"Error validating configuration: {str(e)}")
+
+
+@retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type((ConnectionError, TimeoutError)),
+ before_sleep=lambda retry_state: logger.info(
+ f"Retrying after error: {retry_state.outcome.exception()}"
+ ),
+)
+def create_agent_with_retry(
+ agent_config: Dict, model: LiteLLM
+) -> Agent:
+ """Create an agent with retry logic for handling transient failures."""
+ try:
+ validated_config = AgentConfig(**agent_config)
+ agent = Agent(
+ agent_name=validated_config.agent_name,
+ system_prompt=validated_config.system_prompt,
+ llm=model,
+ max_loops=validated_config.max_loops,
+ autosave=validated_config.autosave,
+ dashboard=validated_config.dashboard,
+ verbose=validated_config.verbose,
+ dynamic_temperature_enabled=validated_config.dynamic_temperature_enabled,
+ saved_state_path=validated_config.saved_state_path,
+ user_name=validated_config.user_name,
+ retry_attempts=validated_config.retry_attempts,
+ context_length=validated_config.context_length,
+ return_step_meta=validated_config.return_step_meta,
+ output_type=validated_config.output_type,
+ auto_generate_prompt=validated_config.auto_generate_prompt,
+ artifacts_on=validated_config.artifacts_on,
+ artifacts_file_extension=validated_config.artifacts_file_extension,
+ artifacts_output_path=validated_config.artifacts_output_path,
+ )
+ return agent
+ except Exception as e:
+ logger.error(
+ f"Error creating agent {agent_config.get('agent_name', 'unknown')}: {str(e)}"
+ )
+ raise
def create_agents_from_yaml(
model: Callable = None,
yaml_file: str = "agents.yaml",
+ yaml_string: str = None,
return_type: str = "auto",
- *args,
- **kwargs,
) -> Union[
SwarmRouter,
Agent,
@@ -22,162 +171,99 @@ def create_agents_from_yaml(
List[Dict[str, Any]],
]:
"""
- Create agents and/or SwarmRouter based on configurations defined in a YAML file.
-
- This function dynamically creates agents and a SwarmRouter (if specified) based on the
- configuration in the YAML file. It adapts its behavior based on the presence of a
- swarm architecture and the number of agents defined.
-
- Args:
- model (Callable): The language model to be used by the agents.
- yaml_file (str): Path to the YAML file containing agent and swarm configurations.
- return_type (str): Determines the return value. Options are:
- "auto" (default): Automatically determine the most appropriate return type.
- "swarm": Return SwarmRouter if present, otherwise a single agent or list of agents.
- "agents": Return a list of agents (or a single agent if only one is defined).
- "both": Return both SwarmRouter (or single agent) and list of agents.
- "tasks": Return task results if any tasks were executed.
- "run_swarm": Run the swarm and return its output.
- *args: Additional positional arguments for agent or SwarmRouter customization.
- **kwargs: Additional keyword arguments for agent or SwarmRouter customization.
-
- Returns:
- Union[SwarmRouter, Agent, List[Agent], Tuple[Union[SwarmRouter, Agent], List[Agent]], List[Dict[str, Any]]]:
- The return type depends on the 'return_type' argument and the configuration in the YAML file.
-
- Raises:
- FileNotFoundError: If the specified YAML file is not found.
- ValueError: If the YAML configuration is invalid or if an invalid return_type is specified.
+ Create agents and/or SwarmRouter based on configurations defined in a YAML file or string.
"""
- try:
- logger.info(
- f"Checking if the YAML file {yaml_file} exists..."
- )
-
- if not os.path.exists(yaml_file):
- logger.error(f"YAML file {yaml_file} not found.")
- raise FileNotFoundError(
- f"YAML file {yaml_file} not found."
- )
-
- logger.info(f"Loading YAML file {yaml_file}")
- with open(yaml_file, "r") as file:
- config = yaml.safe_load(file)
-
- if "agents" not in config:
- logger.error(
- "The YAML configuration does not contain 'agents'."
- )
- raise ValueError(
- "The YAML configuration does not contain 'agents'."
- )
+ agents = []
+ task_results = []
+ swarm_router = None
- agents = []
- task_results = []
+ try:
+ # Load and validate configuration
+ config = load_yaml_safely(yaml_file, yaml_string)
- # Create agents
+ # Create agents with retry logic
for agent_config in config["agents"]:
logger.info(
f"Creating agent: {agent_config['agent_name']}"
)
- if "system_prompt" not in agent_config:
- logger.error(
- f"System prompt is missing for agent: {agent_config['agent_name']}"
- )
- raise ValueError(
- f"System prompt is missing for agent: {agent_config['agent_name']}"
+ if "model_name" in agent_config:
+ model_instance = LiteLLM(
+ model_name=agent_config["model_name"]
)
+ else:
+ model_name = "gpt-4o"
+ model_instance = LiteLLM(model_name=model_name)
- agent = Agent(
- agent_name=agent_config["agent_name"],
- system_prompt=agent_config["system_prompt"],
- llm=model,
- max_loops=agent_config.get("max_loops", 1),
- autosave=agent_config.get("autosave", True),
- dashboard=agent_config.get("dashboard", False),
- verbose=agent_config.get("verbose", False),
- dynamic_temperature_enabled=agent_config.get(
- "dynamic_temperature_enabled", False
- ),
- saved_state_path=agent_config.get("saved_state_path"),
- user_name=agent_config.get(
- "user_name", "default_user"
- ),
- retry_attempts=agent_config.get("retry_attempts", 1),
- context_length=agent_config.get(
- "context_length", 100000
- ),
- return_step_meta=agent_config.get(
- "return_step_meta", False
- ),
- output_type=agent_config.get("output_type", "str"),
- auto_generate_prompt=agent_config.get(
- "auto_generate_prompt", "False"
- ),
- *args,
- **kwargs,
+ agent = create_agent_with_retry(
+ agent_config, model_instance
)
-
logger.info(
f"Agent {agent_config['agent_name']} created successfully."
)
agents.append(agent)
- # Create SwarmRouter if swarm_architecture is present
- swarm_router = None
+ # Create SwarmRouter if specified
if "swarm_architecture" in config:
- swarm_config = config["swarm_architecture"]
- swarm_router = SwarmRouter(
- name=swarm_config["name"],
- description=swarm_config["description"],
- max_loops=swarm_config["max_loops"],
- agents=agents,
- swarm_type=swarm_config["swarm_type"],
- task=swarm_config.get("task"),
- flow=swarm_config.get("flow"),
- autosave=swarm_config.get("autosave"),
- return_json=swarm_config.get("return_json"),
- *args,
- **kwargs,
- )
- logger.info(
- f"SwarmRouter '{swarm_config['name']}' created successfully."
+ try:
+ swarm_config = SwarmConfig(
+ **config["swarm_architecture"]
+ )
+ swarm_router = SwarmRouter(
+ name=swarm_config.name,
+ description=swarm_config.description,
+ max_loops=swarm_config.max_loops,
+ agents=agents,
+ swarm_type=swarm_config.swarm_type,
+ task=swarm_config.task,
+ flow=swarm_config.flow,
+ autosave=swarm_config.autosave,
+ return_json=swarm_config.return_json,
+ rules=swarm_config.rules,
+ )
+ logger.info(
+ f"SwarmRouter '{swarm_config.name}' created successfully."
+ )
+ except Exception as e:
+ logger.error(f"Error creating SwarmRouter: {str(e)}")
+ raise ValueError(
+ f"Failed to create SwarmRouter: {str(e)}"
+ )
+
+ # Handle return types with improved error checking
+ valid_return_types = {
+ "auto",
+ "swarm",
+ "agents",
+ "both",
+ "tasks",
+ "run_swarm",
+ }
+ if return_type not in valid_return_types:
+ raise ValueError(
+ f"Invalid return_type. Must be one of: {valid_return_types}"
)
- # Define function to run SwarmRouter
- def run_swarm_router(
- task: str = (
- swarm_config.get("task")
- if "swarm_architecture" in config
- else None
- ),
- ):
- if swarm_router:
- try:
- output = swarm_router.run(task)
- print(output)
- logger.info(
- f"Output for SwarmRouter '{swarm_config['name']}': {output}"
- )
- return output
- except Exception as e:
- logger.error(
- f"Error running task for SwarmRouter '{swarm_config['name']}': {e}"
- )
- raise e
- else:
- logger.error("SwarmRouter not created.")
- raise ValueError("SwarmRouter not created.")
+ if return_type == "run_swarm" or "swarm":
+ if not swarm_router:
+ raise ValueError(
+ "Cannot run swarm: SwarmRouter not created."
+ )
+ try:
+ return swarm_router.run(
+ config["swarm_architecture"]["task"]
+ )
+ except Exception as e:
+ logger.error(f"Error running SwarmRouter: {str(e)}")
+ raise
- # Handle return types
+ # Return appropriate type based on configuration
if return_type == "auto":
- if swarm_router:
- return swarm_router
- elif len(agents) == 1:
- return agents[0]
- else:
- return agents
+ return (
+ swarm_router
+ if swarm_router
+ else (agents[0] if len(agents) == 1 else agents)
+ )
elif return_type == "swarm":
return (
swarm_router
@@ -193,24 +279,10 @@ def create_agents_from_yaml(
else agents[0] if len(agents) == 1 else agents
), agents
elif return_type == "tasks":
- if not task_results:
- logger.warning(
- "No tasks were executed. Returning empty list."
- )
return task_results
- elif return_type == "run_swarm":
- if swarm_router:
- return run_swarm_router()
- else:
- logger.error(
- "Cannot run swarm: SwarmRouter not created."
- )
- raise ValueError(
- "Cannot run swarm: SwarmRouter not created."
- )
- else:
- logger.error(f"Invalid return_type: {return_type}")
- raise ValueError(f"Invalid return_type: {return_type}")
+
except Exception as e:
- logger.error(f"An error occurred: {e}")
- raise e
+ logger.error(
+ f"Critical error in create_agents_from_yaml: {str(e)}"
+ )
+ raise
diff --git a/swarms/agents/tool_agent.py b/swarms/agents/tool_agent.py
index d05417f1..2d19ec26 100644
--- a/swarms/agents/tool_agent.py
+++ b/swarms/agents/tool_agent.py
@@ -1,11 +1,11 @@
from typing import Any, Optional, Callable
-
-from swarms.structs.agent import Agent
from swarms.tools.json_former import Jsonformer
-from swarms.utils.loguru_logger import logger
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="tool_agent")
-class ToolAgent(Agent):
+class ToolAgent:
"""
Represents a tool agent that performs a specific task using a model and tokenizer.
@@ -151,3 +151,6 @@ class ToolAgent(Agent):
f"Error running {self.name} for task: {task}"
)
raise error
+
+ def __call__(self, task: str, *args, **kwargs):
+ return self.run(task, *args, **kwargs)
diff --git a/swarms/artifacts/__init__.py b/swarms/artifacts/__init__.py
index 448d6101..a1a027b4 100644
--- a/swarms/artifacts/__init__.py
+++ b/swarms/artifacts/__init__.py
@@ -1,9 +1,5 @@
-from swarms.artifacts.base_artifact import BaseArtifact
-from swarms.artifacts.text_artifact import TextArtifact
from swarms.artifacts.main_artifact import Artifact
__all__ = [
- "BaseArtifact",
- "TextArtifact",
"Artifact",
]
diff --git a/swarms/artifacts/base_artifact.py b/swarms/artifacts/base_artifact.py
deleted file mode 100644
index aad07a7b..00000000
--- a/swarms/artifacts/base_artifact.py
+++ /dev/null
@@ -1,77 +0,0 @@
-from __future__ import annotations
-
-import json
-import uuid
-from abc import ABC, abstractmethod
-from dataclasses import dataclass
-from typing import Any
-
-
-@dataclass
-class BaseArtifact(ABC):
- """
- Base class for artifacts.
- """
-
- id: str
- name: str
- value: Any
-
- def __post_init__(self):
- if self.id is None:
- self.id = uuid.uuid4().hex
- if self.name is None:
- self.name = self.id
-
- @classmethod
- def value_to_bytes(cls, value: Any) -> bytes:
- """
- Convert the value to bytes.
- """
- if isinstance(value, bytes):
- return value
- else:
- return str(value).encode()
-
- @classmethod
- def value_to_dict(cls, value: Any) -> dict:
- """
- Convert the value to a dictionary.
- """
- if isinstance(value, dict):
- dict_value = value
- else:
- dict_value = json.loads(value)
-
- return {k: v for k, v in dict_value.items()}
-
- def to_text(self) -> str:
- """
- Convert the value to text.
- """
- return str(self.value)
-
- def __str__(self) -> str:
- """
- Return a string representation of the artifact.
- """
- return self.to_text()
-
- def __bool__(self) -> bool:
- """
- Return the boolean value of the artifact.
- """
- return bool(self.value)
-
- def __len__(self) -> int:
- """
- Return the length of the artifact.
- """
- return len(self.value)
-
- @abstractmethod
- def __add__(self, other: BaseArtifact) -> BaseArtifact:
- """
- Add two artifacts together.
- """
- ...
diff --git a/swarms/artifacts/main_artifact.py b/swarms/artifacts/main_artifact.py
index d2009476..5eaa939e 100644
--- a/swarms/artifacts/main_artifact.py
+++ b/swarms/artifacts/main_artifact.py
@@ -1,11 +1,13 @@
import time
-from swarms.utils.loguru_logger import logger
import os
import json
from typing import List, Union, Dict, Any
from pydantic import BaseModel, Field, validator
from datetime import datetime
from swarms.utils.file_processing import create_file_in_folder
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="main_artifact")
class FileVersion(BaseModel):
diff --git a/swarms/artifacts/text_artifact.py b/swarms/artifacts/text_artifact.py
deleted file mode 100644
index 13ca4dfd..00000000
--- a/swarms/artifacts/text_artifact.py
+++ /dev/null
@@ -1,58 +0,0 @@
-from __future__ import annotations
-
-from dataclasses import dataclass, field
-from typing import Callable
-from swarms.artifacts.base_artifact import BaseArtifact
-
-
-@dataclass
-class TextArtifact(BaseArtifact):
- """
- Represents a text artifact.
-
- Attributes:
- value (str): The text value of the artifact.
- encoding (str, optional): The encoding of the text (default is "utf-8").
- encoding_error_handler (str, optional): The error handler for encoding errors (default is "strict").
- _embedding (list[float]): The embedding of the text artifact (default is an empty list).
-
- Properties:
- embedding (Optional[list[float]]): The embedding of the text artifact.
-
- Methods:
- __add__(self, other: BaseArtifact) -> TextArtifact: Concatenates the text value of the artifact with another artifact.
- __bool__(self) -> bool: Checks if the text value of the artifact is non-empty.
- generate_embedding(self, driver: BaseEmbeddingModel) -> Optional[list[float]]: Generates the embedding of the text artifact using a given embedding model.
- token_count(self, tokenizer: BaseTokenizer) -> int: Counts the number of tokens in the text artifact using a given tokenizer.
- to_bytes(self) -> bytes: Converts the text value of the artifact to bytes using the specified encoding and error handler.
- """
-
- value: str
- encoding: str = "utf-8"
- encoding_error_handler: str = "strict"
- tokenizer: Callable = None
- _embedding: list[float] = field(default_factory=list)
-
- @property
- def embedding(self) -> list[float] | None:
- return None if len(self._embedding) == 0 else self._embedding
-
- def __add__(self, other: BaseArtifact) -> TextArtifact:
- return TextArtifact(self.value + other.value)
-
- def __bool__(self) -> bool:
- return bool(self.value.strip())
-
- def generate_embedding(self, model) -> list[float] | None:
- self._embedding.clear()
- self._embedding.extend(model.embed_string(str(self.value)))
-
- return self.embedding
-
- def token_count(self) -> int:
- return self.tokenizer.count_tokens(str(self.value))
-
- def to_bytes(self) -> bytes:
- return self.value.encode(
- encoding=self.encoding, errors=self.encoding_error_handler
- )
diff --git a/swarms/cli/main.py b/swarms/cli/main.py
index 738deec6..5abe8b58 100644
--- a/swarms/cli/main.py
+++ b/swarms/cli/main.py
@@ -1,244 +1,348 @@
import argparse
import os
+import subprocess
import time
+import webbrowser
from rich.console import Console
+from rich.panel import Panel
+from rich.progress import Progress, SpinnerColumn, TextColumn
+from rich.table import Table
from rich.text import Text
-from swarms.cli.onboarding_process import OnboardingProcess
+
+from swarms.agents.auto_generate_swarm_config import (
+ generate_swarm_config,
+)
from swarms.agents.create_agents_from_yaml import (
create_agents_from_yaml,
)
-import subprocess
+from swarms.cli.onboarding_process import OnboardingProcess
+from swarms.utils.formatter import formatter
+# Initialize console with custom styling
console = Console()
-ASCII_ART = """
- _________
- / _____/_ _ _______ _______ _____ ______
- \_____ \\ \/ \/ /\__ \\_ __ \/ \ / ___/
- / \\ / / __ \| | \/ Y Y \\___ \
-/_______ / \/\_/ (____ /__| |__|_| /____ >
- \/ \/ \/ \/
+class SwarmCLIError(Exception):
+ """Custom exception for Swarm CLI errors"""
+
+ pass
+
+# Color scheme
+COLORS = {
+ "primary": "red",
+ "secondary": "#FF6B6B",
+ "accent": "#4A90E2",
+ "success": "#2ECC71",
+ "warning": "#F1C40F",
+ "error": "#E74C3C",
+ "text": "#FFFFFF",
+}
+
+ASCII_ART = """
+ āāāāāāāāā āā āā āāāāāāāāā āāāāāāāāā āāāāāāāāāāā āāāāāāāāā
+ āāā āāā āāā āāā āāā āāā āāā āāā āāāāāāāāāāāāāāā āāā āāā
+ āāā āā āāā āāā āāā āāā āāā āāā āāā āāā āāā āāā āā
+ āāā āāā āāā āāā āāā āāāāāāāāāāā āāā āāā āāā āāā
+āāāāāāāāāāāā āāā āāā āāāāāāāāāāāā āāāāāāāāāā āāā āāā āāā āāāāāāāāāāāā
+ āāā āāā āāā āāā āāā āāāāāāāāāāāā āāā āāā āāā āāā
+ āā āāā āāā āāā āāā āāā āāā āāā āāā āāā āāā āāā āā āāā
+ āāāāāāāāāā āāāāāāāāā āāā āā āāā āāā āā āāā āā āāāāāāāāāā
+ āāā āāā
"""
-# Function to display the ASCII art in red
+def create_spinner(text: str) -> Progress:
+ """Create a custom spinner with the given text."""
+ return Progress(
+ SpinnerColumn(style=COLORS["primary"]),
+ TextColumn("[{task.description}]", style=COLORS["text"]),
+ console=console,
+ )
+
+
def show_ascii_art():
- text = Text(ASCII_ART, style="bold cyan")
- console.print(text)
+ """Display the ASCII art with a glowing effect."""
+ panel = Panel(
+ Text(ASCII_ART, style=f"bold {COLORS['primary']}"),
+ border_style=COLORS["secondary"],
+ title="[bold]Welcome to Swarms[/bold]",
+ subtitle="[dim]Power to the Swarms[/dim]",
+ )
+ console.print(panel)
-# Help command
-def show_help():
- console.print(
- """
- [bold cyan]Swarms CLI - Help[/bold cyan]
-
- [bold magenta]Commands:[/bold magenta]
- [bold white]onboarding[/bold white] : Starts the onboarding process
- [bold white]help[/bold white] : Shows this help message
- [bold white]get-api-key[/bold white] : Retrieves your API key from the platform
- [bold white]check-login[/bold white] : Checks if you're logged in and starts the cache
- [bold white]read-docs[/bold white] : Redirects you to swarms cloud documentation!
- [bold white]run-agents[/bold white] : Run your Agents from your specified yaml file. Specify the yaml file with path the `--yaml-file` arg. Example: `--yaml-file agents.yaml`
- [bold white]generate-prompt[/bold white] : Generate a prompt through automated prompt engineering. Requires an OPENAI Key in your `.env` Example: --prompt "Generate a prompt for an agent to analyze legal docs"
- [bold white]auto-upgrade[/bold white] : Automatically upgrades Swarms to the latest version
- [bold white]book-call[/bold white] : Book a strategy session with our team to discuss your use case and get personalized guidance
-
- For more details, visit: https://docs.swarms.world
- """
+def create_command_table() -> Table:
+ """Create a beautifully formatted table of commands."""
+ table = Table(
+ show_header=True,
+ header_style=f"bold {COLORS['primary']}",
+ border_style=COLORS["secondary"],
+ title="Available Commands",
+ padding=(0, 2),
)
- # [bold white]add-agent[/bold white] : Add an agent to the marketplace under your name. Must have a Dockerfile + your agent.yaml to publish. Learn more Here: https://docs.swarms.world/en/latest/swarms_cloud/vision/
+ table.add_column("Command", style="bold white")
+ table.add_column("Description", style="dim white")
+ commands = [
+ ("onboarding", "Start the interactive onboarding process"),
+ ("help", "Display this help message"),
+ ("get-api-key", "Retrieve your API key from the platform"),
+ ("check-login", "Verify login status and initialize cache"),
+ ("run-agents", "Execute agents from your YAML configuration"),
+ ("auto-upgrade", "Update Swarms to the latest version"),
+ ("book-call", "Schedule a strategy session with our team"),
+ ("autoswarm", "Generate and execute an autonomous swarm"),
+ ]
-# Fetch API key from platform
-def get_api_key():
+ for cmd, desc in commands:
+ table.add_row(cmd, desc)
+
+ return table
+
+
+def show_help():
+ """Display a beautifully formatted help message."""
console.print(
- "[bold yellow]Opening the API key retrieval page...[/bold yellow]"
+ "\n[bold]Swarms CLI - Command Reference[/bold]\n",
+ style=COLORS["primary"],
)
- # Simulating API key retrieval process by opening the website
- import webbrowser
-
- webbrowser.open("https://swarms.world/platform/api-keys")
- time.sleep(2)
+ console.print(create_command_table())
console.print(
- "[bold green]Your API key is available on the dashboard.[/bold green]"
+ "\n[dim]For detailed documentation, visit: https://docs.swarms.world[/dim]"
)
-# Redirect to docs
-def redirect_to_docs():
- console.print(
- "[bold yellow]Opening the Docs page...[/bold yellow]"
+def show_error(message: str, help_text: str = None):
+ """Display error message in a formatted panel"""
+ error_panel = Panel(
+ f"[bold red]{message}[/bold red]",
+ title="Error",
+ border_style="red",
)
- # Simulating API key retrieval process by opening the website
- import webbrowser
+ console.print(error_panel)
- webbrowser.open("https://docs.swarms.world")
- time.sleep(2)
+ if help_text:
+ console.print(f"\n[yellow]ā¹ļø {help_text}[/yellow]")
-# Redirect to docs
-def redirect_to_call():
+def execute_with_spinner(action: callable, text: str) -> None:
+ """Execute an action with a spinner animation."""
+ with create_spinner(text) as progress:
+ task = progress.add_task(text, total=None)
+ result = action()
+ progress.remove_task(task)
+ return result
+
+
+def get_api_key():
+ """Retrieve API key with visual feedback."""
+ with create_spinner("Opening API key portal...") as progress:
+ task = progress.add_task("Opening browser...")
+ webbrowser.open("https://swarms.world/platform/api-keys")
+ time.sleep(1)
+ progress.remove_task(task)
console.print(
- "[bold yellow]Opening the Call page...[/bold yellow]"
+ f"\n[{COLORS['success']}]ā API key page opened in your browser[/{COLORS['success']}]"
)
- # Simulating API key retrieval process by opening the website
- import webbrowser
- webbrowser.open("https://cal.com/swarms/swarms-strategy-session")
- time.sleep(2)
-
-# Check and start cache (login system simulation)
def check_login():
+ """Verify login status with enhanced visual feedback."""
cache_file = "cache.txt"
if os.path.exists(cache_file):
with open(cache_file, "r") as f:
- cache_content = f.read()
- if cache_content == "logged_in":
+ if f.read() == "logged_in":
+ console.print(
+ f"[{COLORS['success']}]ā Authentication verified[/{COLORS['success']}]"
+ )
+ return True
+
+ with create_spinner("Authenticating...") as progress:
+ task = progress.add_task("Initializing session...")
+ time.sleep(1)
+ with open(cache_file, "w") as f:
+ f.write("logged_in")
+ progress.remove_task(task)
+
+ console.print(
+ f"[{COLORS['success']}]ā Login successful![/{COLORS['success']}]"
+ )
+ return True
+
+
+def run_autoswarm(task: str, model: str):
+ """Run autoswarm with enhanced error handling"""
+ try:
+ console.print(
+ "[yellow]Initializing autoswarm configuration...[/yellow]"
+ )
+
+ # Set LiteLLM verbose mode for debugging
+ import litellm
+
+ litellm.set_verbose = True
+
+ # Validate inputs
+ if not task or task.strip() == "":
+ raise SwarmCLIError("Task cannot be empty")
+
+ if not model or model.strip() == "":
+ raise SwarmCLIError("Model name cannot be empty")
+
+ # Attempt to generate swarm configuration
+ console.print(
+ f"[yellow]Generating swarm for task: {task}[/yellow]"
+ )
+ result = generate_swarm_config(task=task, model=model)
+
+ if result:
console.print(
- "[bold green]You are already logged in.[/bold green]"
+ "[green]ā Swarm configuration generated successfully![/green]"
)
else:
- console.print(
- "[bold red]You are not logged in.[/bold red]"
+ raise SwarmCLIError(
+ "Failed to generate swarm configuration"
+ )
+
+ except Exception as e:
+ if "No YAML content found" in str(e):
+ show_error(
+ "Failed to generate YAML configuration",
+ "This might be due to an API key issue or invalid model configuration.\n"
+ + "1. Check if your OpenAI API key is set correctly\n"
+ + "2. Verify the model name is valid\n"
+ + "3. Try running with --model gpt-4",
+ )
+ else:
+ show_error(
+ f"Error during autoswarm execution: {str(e)}",
+ "For debugging, try:\n"
+ + "1. Check your API keys are set correctly\n"
+ + "2. Verify your network connection\n"
+ + "3. Try a different model",
)
- else:
- console.print("[bold yellow]Logging in...[/bold yellow]")
- time.sleep(2)
- with open(cache_file, "w") as f:
- f.write("logged_in")
- console.print("[bold green]Login successful![/bold green]")
def check_and_upgrade_version():
- console.print(
- "[bold yellow]Checking for Swarms updates...[/bold yellow]"
- )
- try:
- # Check for updates using pip
+ """Check for updates with visual progress."""
+
+ def check_update():
result = subprocess.run(
["pip", "list", "--outdated", "--format=freeze"],
capture_output=True,
text=True,
)
- outdated_packages = result.stdout.splitlines()
+ return result.stdout.splitlines()
- # Check if Swarms is outdated
- for package in outdated_packages:
- if package.startswith("swarms=="):
- console.print(
- "[bold magenta]New version available! Upgrading...[/bold magenta]"
+ outdated = execute_with_spinner(
+ check_update, "Checking for updates..."
+ )
+
+ for package in outdated:
+ if package.startswith("swarms=="):
+ console.print(
+ f"[{COLORS['warning']}]ā Update available![/{COLORS['warning']}]"
+ )
+ with create_spinner("Upgrading Swarms...") as progress:
+ task = progress.add_task(
+ "Installing latest version..."
)
subprocess.run(
["pip", "install", "--upgrade", "swarms"],
check=True,
)
- console.print(
- "[bold green]Swarms upgraded successfully![/bold green]"
- )
- return
+ progress.remove_task(task)
+ console.print(
+ f"[{COLORS['success']}]ā Swarms upgraded successfully![/{COLORS['success']}]"
+ )
+ return
- console.print(
- "[bold green]Swarms is up-to-date.[/bold green]"
- )
- except Exception as e:
- console.print(
- f"[bold red]Error checking for updates: {e}[/bold red]"
- )
+ console.print(
+ f"[{COLORS['success']}]ā Swarms is up to date![/{COLORS['success']}]"
+ )
-# Main CLI handler
def main():
- parser = argparse.ArgumentParser(description="Swarms Cloud CLI")
-
- # Adding arguments for different commands
- parser.add_argument(
- "command",
- choices=[
- "onboarding",
- "help",
- "get-api-key",
- "check-login",
- "run-agents",
- "generate-prompt", # Added new command for generating prompts
- "auto-upgrade", # Added new command for auto-upgrade,
- "book-call",
- ],
- help="Command to run",
- )
- parser.add_argument(
- "--yaml-file",
- type=str,
- default="agents.yaml",
- help="Specify the YAML file for running agents",
- )
- parser.add_argument(
- "--prompt",
- type=str,
- help="Specify the task for generating a prompt",
- )
- parser.add_argument(
- "--num-loops",
- type=int,
- default=1,
- help="Specify the number of loops for generating a prompt",
- )
- parser.add_argument(
- "--autosave",
- action="store_true",
- help="Enable autosave for the prompt generator",
- )
- parser.add_argument(
- "--save-to-yaml",
- action="store_true",
- help="Save the generated prompt to a YAML file",
- )
+ try:
- args = parser.parse_args()
-
- show_ascii_art()
-
- # Determine which command to run
- if args.command == "onboarding":
- OnboardingProcess().run()
- elif args.command == "help":
- show_help()
- elif args.command == "get-api-key":
- get_api_key()
- elif args.command == "check-login":
- check_login()
- elif args.command == "run-agents":
- create_agents_from_yaml(
- yaml_file=args.yaml_file, return_type="tasks"
+ show_ascii_art()
+
+ parser = argparse.ArgumentParser(
+ description="Swarms Cloud CLI"
)
- # elif args.command == "generate-prompt":
- # if (
- # args.prompt
- # ): # Corrected from args.prompt_task to args.prompt
- # generate_prompt(
- # num_loops=args.num_loops,
- # autosave=args.autosave,
- # save_to_yaml=args.save_to_yaml,
- # prompt=args.prompt, # Corrected from args.prompt_task to args.prompt
- # )
- # else:
- # console.print(
- # "[bold red]Please specify a task for generating a prompt using '--prompt'.[/bold red]"
- # )
- elif args.command == "auto-upgrade":
- check_and_upgrade_version()
- elif args.command == "book-call":
- redirect_to_call()
- else:
- console.print(
- "[bold red]Unknown command! Type 'help' for usage.[/bold red]"
+ parser.add_argument(
+ "command",
+ choices=[
+ "onboarding",
+ "help",
+ "get-api-key",
+ "check-login",
+ "run-agents",
+ "auto-upgrade",
+ "book-call",
+ "autoswarm",
+ ],
+ help="Command to execute",
+ )
+ parser.add_argument(
+ "--yaml-file",
+ type=str,
+ default="agents.yaml",
+ help="YAML configuration file path",
+ )
+ parser.add_argument(
+ "--task", type=str, help="Task for autoswarm"
+ )
+ parser.add_argument(
+ "--model",
+ type=str,
+ default="gpt-4",
+ help="Model for autoswarm",
+ )
+
+ args = parser.parse_args()
+
+ try:
+ if args.command == "onboarding":
+ OnboardingProcess().run()
+ elif args.command == "help":
+ show_help()
+ elif args.command == "get-api-key":
+ get_api_key()
+ elif args.command == "check-login":
+ check_login()
+ elif args.command == "run-agents":
+ create_agents_from_yaml(
+ yaml_file=args.yaml_file, return_type="tasks"
+ )
+ elif args.command == "auto-upgrade":
+ check_and_upgrade_version()
+ elif args.command == "book-call":
+ webbrowser.open(
+ "https://cal.com/swarms/swarms-strategy-session"
+ )
+ elif args.command == "autoswarm":
+ if not args.task:
+ show_error(
+ "Missing required argument: --task",
+ "Example usage: python cli.py autoswarm --task 'analyze this data' --model gpt-4",
+ )
+ exit(1)
+ run_autoswarm(args.task, args.model)
+ except Exception as e:
+ console.print(
+ f"[{COLORS['error']}]Error: {str(e)}[/{COLORS['error']}]"
+ )
+ return
+ except Exception as error:
+ formatter.print_panel(
+ f"Error detected: {error} check your args"
)
+ raise error
if __name__ == "__main__":
diff --git a/swarms/cli/onboarding_process.py b/swarms/cli/onboarding_process.py
index 99018b86..edac1168 100644
--- a/swarms/cli/onboarding_process.py
+++ b/swarms/cli/onboarding_process.py
@@ -3,13 +3,16 @@ import os
import time
from typing import Dict
-from loguru import logger
+from swarms.utils.loguru_logger import initialize_logger
+
from swarms.telemetry.capture_sys_data import (
capture_system_data,
log_agent_data,
)
+logger = initialize_logger(log_folder="onboarding_process")
+
class OnboardingProcess:
"""
@@ -84,19 +87,6 @@ class OnboardingProcess:
try:
combined_data = {**self.user_data, **self.system_data}
log_agent_data(combined_data)
- # threading.Thread(target=log_agent_data(combined_data)).start()
- # with open(self.auto_save_path, "w") as f:
- # json.dump(combined_data, f, indent=4)
- # # logger.info(
- # # "User and system data successfully saved to {}",
- # # self.auto_save_path,
- # # )
- # with open(self.cache_save_path, "w") as f:
- # json.dump(combined_data, f, indent=4)
- # logger.info(
- # "User and system data successfully cached in {}",
- # self.cache_save_path,
- # )
return # Exit the function if saving was successful
except Exception as e:
logger.error(
@@ -170,10 +160,6 @@ class OnboardingProcess:
self.ask_input(
"Enter your email (or type 'quit' to exit): ", "email"
)
- self.ask_input(
- "Enter your Swarms API key (or type 'quit' to exit): Get this in your swarms dashboard: https://swarms.world/platform/api-keys ",
- "swarms_api_key",
- )
workspace = self.ask_input(
"Enter your WORKSPACE_DIR: This is where logs, errors, and agent configurations will be stored (or type 'quit' to exit). Remember to set this as an environment variable: https://docs.swarms.world/en/latest/swarms/install/quickstart/ || ",
"workspace_dir",
diff --git a/swarms/cli/parse_yaml.py b/swarms/cli/parse_yaml.py
deleted file mode 100644
index de8e936d..00000000
--- a/swarms/cli/parse_yaml.py
+++ /dev/null
@@ -1,120 +0,0 @@
-from swarms.utils.loguru_logger import logger
-import yaml
-from pydantic import BaseModel
-from typing import List, Optional
-import json
-from swarms.structs.agent_registry import AgentRegistry
-from swarms.structs.agent import Agent
-from swarm_models.popular_llms import OpenAIChat
-
-
-class AgentInput(BaseModel):
- agent_name: str = "Swarm Agent"
- system_prompt: Optional[str] = None
- agent_description: Optional[str] = None
- model_name: str = "OpenAIChat"
- max_loops: int = 1
- autosave: bool = False
- dynamic_temperature_enabled: bool = False
- dashboard: bool = False
- verbose: bool = False
- streaming_on: bool = True
- saved_state_path: Optional[str] = None
- sop: Optional[str] = None
- sop_list: Optional[List[str]] = None
- user_name: str = "User"
- retry_attempts: int = 3
- context_length: int = 8192
- task: Optional[str] = None
- interactive: bool = False
-
-
-def parse_yaml_to_json(yaml_str: str) -> str:
- """
- Parses the given YAML string into an AgentInput model and converts it to a JSON string.
-
- Args:
- yaml_str (str): The YAML string to be parsed.
-
- Returns:
- str: The JSON string representation of the parsed YAML.
-
- Raises:
- ValueError: If the YAML string cannot be parsed into the AgentInput model.
- """
- try:
- data = yaml.safe_load(yaml_str)
- agent_input = AgentInput(**data)
- return agent_input.json()
- except yaml.YAMLError as e:
- print(f"YAML Error: {e}")
- raise ValueError("Invalid YAML input.") from e
- except ValueError as e:
- print(f"Validation Error: {e}")
- raise ValueError("Invalid data for AgentInput model.") from e
-
-
-# # Example usage
-# yaml_input = """
-# agent_name: "Custom Agent"
-# system_prompt: "System prompt example"
-# agent_description: "This is a test agent"
-# model_name: "CustomModel"
-# max_loops: 5
-# autosave: true
-# dynamic_temperature_enabled: true
-# dashboard: true
-# verbose: true
-# streaming_on: false
-# saved_state_path: "/path/to/state"
-# sop: "Standard operating procedure"
-# sop_list: ["step1", "step2"]
-# user_name: "Tester"
-# retry_attempts: 5
-# context_length: 4096
-# task: "Perform testing"
-# """
-
-# json_output = parse_yaml_to_json(yaml_input)
-# print(json_output)
-
-registry = AgentRegistry()
-
-
-def create_agent_from_yaml(yaml_path: str) -> None:
- with open(yaml_path, "r") as file:
- yaml_str = file.read()
- agent_json = parse_yaml_to_json(yaml_str)
- agent_config = json.loads(agent_json)
-
- agent = Agent(
- agent_name=agent_config.get("agent_name", "Swarm Agent"),
- system_prompt=agent_config.get("system_prompt"),
- agent_description=agent_config.get("agent_description"),
- llm=OpenAIChat(),
- max_loops=agent_config.get("max_loops", 1),
- autosave=agent_config.get("autosave", False),
- dynamic_temperature_enabled=agent_config.get(
- "dynamic_temperature_enabled", False
- ),
- dashboard=agent_config.get("dashboard", False),
- verbose=agent_config.get("verbose", False),
- streaming_on=agent_config.get("streaming_on", True),
- saved_state_path=agent_config.get("saved_state_path"),
- retry_attempts=agent_config.get("retry_attempts", 3),
- context_length=agent_config.get("context_length", 8192),
- )
-
- registry.add(agent.agent_name, agent)
- logger.info(f"Agent {agent.agent_name} created from {yaml_path}.")
-
-
-def run_agent(agent_name: str, task: str) -> None:
- agent = registry.find_agent_by_name(agent_name)
- agent.run(task)
-
-
-def list_agents() -> None:
- agents = registry.list_agents()
- for agent_id in agents:
- print(agent_id)
diff --git a/swarms/prompts/prompt.py b/swarms/prompts/prompt.py
index b892f4f1..b8628b20 100644
--- a/swarms/prompts/prompt.py
+++ b/swarms/prompts/prompt.py
@@ -4,7 +4,6 @@ import time
import uuid
from typing import Any, Callable, List
-from loguru import logger
from pydantic import (
BaseModel,
Field,
@@ -17,6 +16,9 @@ from swarms.telemetry.capture_sys_data import (
log_agent_data,
)
from swarms.tools.base_tool import BaseTool
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger("prompt")
class Prompt(BaseModel):
@@ -131,9 +133,10 @@ class Prompt(BaseModel):
self.content = new_content
self.edit_count += 1
self.last_modified_at = time.strftime("%Y-%m-%d %H:%M:%S")
- logger.debug(
- f"Prompt {self.id} updated. Edit count: {self.edit_count}. New content: '{self.content}'"
- )
+
+ # logger.debug(
+ # f"Prompt {self.id} updated. Edit count: {self.edit_count}. New content: '{self.content}'"
+ # )
if self.autosave:
self._autosave()
@@ -161,15 +164,15 @@ class Prompt(BaseModel):
)
raise IndexError("Invalid version number for rollback.")
- logger.info(
- f"Rolling back prompt {self.id} to version {version}."
- )
+ # logger.info(
+ # f"Rolling back prompt {self.id} to version {version}."
+ # )
self.content = self.edit_history[version]
self.edit_count = version
self.last_modified_at = time.strftime("%Y-%m-%d %H:%M:%S")
- logger.debug(
- f"Prompt {self.id} rolled back to version {version}. Current content: '{self.content}'"
- )
+ # logger.debug(
+ # f"Prompt {self.id} rolled back to version {version}. Current content: '{self.content}'"
+ # )
self.log_telemetry()
@@ -199,7 +202,7 @@ class Prompt(BaseModel):
Raises:
NotImplementedError: This method is a placeholder for storage integration.
"""
- logger.info(f"Saving prompt {self.id} to persistent storage.")
+ # logger.info(f"Saving prompt {self.id} to persistent storage.")
raise NotImplementedError(
"Persistent storage integration is required."
)
@@ -217,9 +220,9 @@ class Prompt(BaseModel):
Raises:
NotImplementedError: This method is a placeholder for storage integration.
"""
- logger.info(
- f"Loading prompt {prompt_id} from persistent storage."
- )
+ # logger.info(
+ # f"Loading prompt {prompt_id} from persistent storage."
+ # )
raise NotImplementedError(
"Persistent storage integration is required."
)
@@ -254,7 +257,9 @@ class Prompt(BaseModel):
)
with open(file_path, "w") as file:
json.dump(self.model_dump(), file)
- logger.info(f"Autosaved prompt {self.id} to {file_path}.")
+ # logger.info(f"Autosaved prompt {self.id} to {file_path}.")
+
+ # return "Prompt autosaved successfully."
# def auto_generate_prompt(self):
# logger.info(f"Auto-generating prompt for {self.name}")
diff --git a/swarms/schemas/plan.py b/swarms/schemas/plan.py
deleted file mode 100644
index 060d4b3b..00000000
--- a/swarms/schemas/plan.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from typing import List
-from pydantic import BaseModel
-from swarms.schemas.agent_step_schemas import Step
-
-
-class Plan(BaseModel):
- steps: List[Step]
-
- class Config:
- orm_mode = True
diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py
index 94f10fa0..e6fc5369 100644
--- a/swarms/structs/__init__.py
+++ b/swarms/structs/__init__.py
@@ -1,4 +1,5 @@
from swarms.structs.agent import Agent
+from swarms.structs.agents_available import showcase_available_agents
from swarms.structs.auto_swarm import AutoSwarm, AutoSwarmRouter
from swarms.structs.base_structure import BaseStructure
from swarms.structs.base_swarm import BaseSwarm
@@ -11,7 +12,7 @@ from swarms.structs.graph_workflow import (
Node,
NodeType,
)
-from swarms.structs.groupchat import GroupChat
+from swarms.structs.groupchat import GroupChat, GroupChatState
from swarms.structs.majority_voting import (
MajorityVoting,
majority_voting,
@@ -19,16 +20,31 @@ from swarms.structs.majority_voting import (
parse_code_completion,
)
from swarms.structs.message import Message
-from swarms.structs.message_pool import MessagePool
-
from swarms.structs.mixture_of_agents import MixtureOfAgents
from swarms.structs.multi_agent_collab import MultiAgentCollaboration
+from swarms.structs.multi_agent_exec import (
+ run_agent_with_timeout,
+ run_agents_concurrently,
+ run_agents_concurrently_async,
+ run_agents_concurrently_multiprocess,
+ run_agents_sequentially,
+ run_agents_with_different_tasks,
+ run_agents_with_resource_monitoring,
+ run_agents_with_tasks_concurrently,
+ run_single_agent,
+)
from swarms.structs.queue_swarm import TaskQueueSwarm
from swarms.structs.rearrange import AgentRearrange, rearrange
from swarms.structs.round_robin import RoundRobinSwarm
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm
+from swarms.structs.swarm_arange import SwarmRearrange
from swarms.structs.swarm_net import SwarmNetwork
+from swarms.structs.swarm_router import (
+ SwarmRouter,
+ SwarmType,
+ swarm_router,
+)
from swarms.structs.swarming_architectures import (
broadcast,
circular_swarm,
@@ -93,7 +109,6 @@ __all__ = [
"most_frequent",
"parse_code_completion",
"Message",
- "MessagePool",
"MultiAgentCollaboration",
"SwarmNetwork",
"AgentRearrange",
@@ -146,4 +161,7 @@ __all__ = [
"run_agents_with_resource_monitoring",
"swarm_router",
"AsyncWorkflow",
+ "run_agents_with_tasks_concurrently",
+ "showcase_available_agents",
+ "GroupChatState",
]
diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py
index 71ce06c3..c9160b1b 100644
--- a/swarms/structs/agent.py
+++ b/swarms/structs/agent.py
@@ -1,3 +1,4 @@
+from datetime import datetime
import asyncio
import json
import logging
@@ -21,15 +22,8 @@ from typing import (
import toml
import yaml
-from clusterops import (
- execute_on_gpu,
- execute_with_cpu_cores,
-)
-from loguru import logger
from pydantic import BaseModel
from swarm_models.tiktoken_wrapper import TikTokenizer
-from termcolor import colored
-
from swarms.agents.ape_agent import auto_generate_prompt
from swarms.prompts.agent_system_prompts import AGENT_SYSTEM_PROMPT_3
from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
@@ -53,6 +47,13 @@ from swarms.utils.data_to_text import data_to_text
from swarms.utils.file_processing import create_file_in_folder
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.artifacts.main_artifact import Artifact
+from swarms.utils.loguru_logger import initialize_logger
+from swarms.utils.wrapper_clusterop import (
+ exec_callable_with_clusterops,
+)
+from swarms.utils.formatter import formatter
+
+logger = initialize_logger(log_folder="agents")
# Utils
@@ -177,6 +178,7 @@ class Agent:
artifacts_on (bool): Enable artifacts
artifacts_output_path (str): The artifacts output path
artifacts_file_extension (str): The artifacts file extension (.pdf, .md, .txt, )
+ scheduled_run_date (datetime): The date and time to schedule the task
Methods:
run: Run the agent
@@ -299,7 +301,6 @@ class Agent:
rules: str = None, # type: ignore
planning: Optional[str] = False,
planning_prompt: Optional[str] = None,
- device: str = None,
custom_planning_prompt: str = None,
memory_chunk_size: int = 2000,
agent_ops_on: bool = False,
@@ -331,6 +332,14 @@ class Agent:
artifacts_on: bool = False,
artifacts_output_path: str = None,
artifacts_file_extension: str = None,
+ device: str = "cpu",
+ all_cores: bool = True,
+ device_id: int = 0,
+ scheduled_run_date: Optional[datetime] = None,
+ do_not_use_cluster_ops: bool = True,
+ all_gpus: bool = False,
+ model_name: str = None,
+ llm_args: dict = None,
*args,
**kwargs,
):
@@ -408,7 +417,6 @@ class Agent:
self.execute_tool = execute_tool
self.planning = planning
self.planning_prompt = planning_prompt
- self.device = device
self.custom_planning_prompt = custom_planning_prompt
self.rules = rules
self.custom_tools_prompt = custom_tools_prompt
@@ -441,6 +449,14 @@ class Agent:
self.artifacts_on = artifacts_on
self.artifacts_output_path = artifacts_output_path
self.artifacts_file_extension = artifacts_file_extension
+ self.device = device
+ self.all_cores = all_cores
+ self.device_id = device_id
+ self.scheduled_run_date = scheduled_run_date
+ self.do_not_use_cluster_ops = do_not_use_cluster_ops
+ self.all_gpus = all_gpus
+ self.model_name = model_name
+ self.llm_args = llm_args
# Initialize the short term memory
self.short_memory = Conversation(
@@ -577,6 +593,21 @@ class Agent:
# Telemetry Processor to log agent data
threading.Thread(target=self.log_agent_data).start()
+ threading.Thread(target=self.llm_handling())
+
+ def llm_handling(self):
+
+ if self.llm is None:
+ from swarms.utils.litellm import LiteLLM
+
+ if self.llm_args is not None:
+ self.llm = LiteLLM(
+ model_name=self.model_name, **self.llm_args
+ )
+
+ else:
+ self.llm = LiteLLM(model_name=self.model_name)
+
def check_if_no_prompt_then_autogenerate(self, task: str = None):
"""
Checks if auto_generate_prompt is enabled and generates a prompt by combining agent name, description and system prompt if available.
@@ -657,11 +688,8 @@ class Agent:
return self.stopping_condition(response)
return False
except Exception as error:
- print(
- colored(
- f"Error checking stopping condition: {error}",
- "red",
- )
+ logger.error(
+ f"Error checking stopping condition: {error}"
)
def dynamic_temperature(self):
@@ -675,20 +703,19 @@ class Agent:
if hasattr(self.llm, "temperature"):
# Randomly change the temperature attribute of self.llm object
self.llm.temperature = random.uniform(0.0, 1.0)
- logger.info(f"Temperature: {self.llm.temperature}")
else:
# Use a default temperature
- self.llm.temperature = 0.7
+ self.llm.temperature = 0.5
except Exception as error:
- print(
- colored(
- f"Error dynamically changing temperature: {error}"
- )
+ logger.error(
+ f"Error dynamically changing temperature: {error}"
)
def print_dashboard(self):
"""Print dashboard"""
- print(colored("Initializing Agent Dashboard...", "yellow"))
+ formatter.print_panel(
+ f"Initializing Agent: {self.agent_name}"
+ )
data = self.to_dict()
@@ -696,22 +723,19 @@ class Agent:
# data = json.dumps(data, indent=4)
# json_data = json.dumps(data, indent=4)
- print(
- colored(
- f"""
- Agent Dashboard
- --------------------------------------------
+ formatter.print_panel(
+ f"""
+ Agent Dashboard
+ --------------------------------------------
- Agent {self.agent_name} is initializing for {self.max_loops} with the following configuration:
- ----------------------------------------
+ Agent {self.agent_name} is initializing for {self.max_loops} with the following configuration:
+ ----------------------------------------
- Agent Configuration:
- Configuration: {data}
+ Agent Configuration:
+ Configuration: {data}
- ----------------------------------------
- """,
- "green",
- )
+ ----------------------------------------
+ """,
)
def loop_count_print(
@@ -723,21 +747,23 @@ class Agent:
loop_count (_type_): _description_
max_loops (_type_): _description_
"""
- print(colored(f"\nLoop {loop_count} of {max_loops}", "cyan"))
+ logger.info(f"\nLoop {loop_count} of {max_loops}")
print("\n")
# Check parameters
def check_parameters(self):
if self.llm is None:
- raise ValueError("Language model is not provided")
+ raise ValueError(
+ "Language model is not provided. Choose a model from the available models in swarm_models or create a class with a run(task: str) method and or a __call__ method."
+ )
- if self.max_loops is None:
+ if self.max_loops is None or self.max_loops == 0:
raise ValueError("Max loops is not provided")
- if self.max_tokens == 0:
+ if self.max_tokens == 0 or self.max_tokens is None:
raise ValueError("Max tokens is not provided")
- if self.context_length == 0:
+ if self.context_length == 0 or self.context_length is None:
raise ValueError("Context length is not provided")
# Main function
@@ -745,7 +771,11 @@ class Agent:
self,
task: Optional[str] = None,
img: Optional[str] = None,
- is_last: bool = False,
+ speech: Optional[str] = None,
+ video: Optional[str] = None,
+ is_last: Optional[bool] = False,
+ print_task: Optional[bool] = False,
+ generate_speech: Optional[bool] = False,
*args,
**kwargs,
) -> Any:
@@ -788,6 +818,15 @@ class Agent:
if self.long_term_memory is not None:
self.memory_query(task)
+ # Print the user's request
+
+ # Print the request
+ if print_task is True:
+ formatter.print_panel(
+ f"\n User: {task}",
+ f"Task Request for {self.agent_name}",
+ )
+
while (
self.max_loops == "auto"
or loop_count < self.max_loops
@@ -834,9 +873,17 @@ class Agent:
# Print
if self.streaming_on is True:
- self.stream_response(response)
+ # self.stream_response(response)
+ formatter.print_panel_token_by_token(
+ f"{self.agent_name}: {response}",
+ title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]",
+ )
else:
- logger.info(f"Response: {response}")
+ # logger.info(f"Response: {response}")
+ formatter.print_panel(
+ f"{self.agent_name}: {response}",
+ f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
+ )
# Check if response is a dictionary and has 'choices' key
if (
@@ -926,7 +973,7 @@ class Agent:
if self.interactive:
logger.info("Interactive mode enabled.")
- user_input = colored(input("You: "), "red")
+ user_input = input("You: ")
# User-defined exit command
if (
@@ -990,6 +1037,11 @@ class Agent:
self.artifacts_file_extension,
)
+ try:
+ self.log_agent_data()
+ except Exception:
+ pass
+
# More flexible output types
if (
self.output_type == "string"
@@ -1013,13 +1065,26 @@ class Agent:
elif self.return_step_meta is True:
return self.agent_output.model_dump_json(indent=4)
elif self.return_history is True:
- return self.short_memory.get_str()
+ history = self.short_memory.get_str()
+
+ formatter.print_panel(
+ history, title=f"{self.agent_name} History"
+ )
+ return history
else:
raise ValueError(
f"Invalid output type: {self.output_type}"
)
except Exception as error:
+ self.log_agent_data()
+ logger.info(
+ f"Error running agent: {error} optimize your input parameters"
+ )
+ raise error
+
+ except KeyboardInterrupt as error:
+ self.log_agent_data()
logger.info(
f"Error running agent: {error} optimize your input parameters"
)
@@ -1222,7 +1287,7 @@ class Agent:
logger.info(f"Running bulk tasks: {inputs}")
return [self.run(**input_data) for input_data in inputs]
except Exception as error:
- print(colored(f"Error running bulk run: {error}", "red"))
+ logger.info(f"Error running bulk run: {error}", "red")
def save(self) -> None:
"""Save the agent history to a file.
@@ -1399,9 +1464,7 @@ class Agent:
with open(file_path, "w") as f:
yaml.dump(self.to_dict(), f)
except Exception as error:
- logger.error(
- colored(f"Error saving agent to YAML: {error}", "red")
- )
+ logger.error(f"Error saving agent to YAML: {error}")
raise error
def get_llm_parameters(self):
@@ -1466,7 +1529,7 @@ class Agent:
role=self.user_name, content=data
)
except Exception as error:
- print(colored(f"Error ingesting docs: {error}", "red"))
+ logger.info(f"Error ingesting docs: {error}", "red")
def ingest_pdf(self, pdf: str):
"""Ingest the pdf into the memory
@@ -1481,7 +1544,7 @@ class Agent:
role=self.user_name, content=text
)
except Exception as error:
- print(colored(f"Error ingesting pdf: {error}", "red"))
+ logger.info(f"Error ingesting pdf: {error}", "red")
def receieve_message(self, name: str, message: str):
"""Receieve a message"""
@@ -1558,19 +1621,22 @@ class Agent:
files = os.listdir(self.docs_folder)
# Extract the text from the files
+ # Process each file and combine their contents
+ all_text = ""
for file in files:
- text = data_to_text(file)
+ file_path = os.path.join(self.docs_folder, file)
+ text = data_to_text(file_path)
+ all_text += f"\nContent from {file}:\n{text}\n"
+ # Add the combined content to memory
return self.short_memory.add(
- role=self.user_name, content=text
+ role=self.user_name, content=all_text
)
except Exception as error:
- print(
- colored(
- f"Error getting docs from doc folders: {error}",
- "red",
- )
+ logger.error(
+ f"Error getting docs from doc folders: {error}"
)
+ raise error
def check_end_session_agentops(self):
if self.agent_ops_on is True:
@@ -1590,7 +1656,8 @@ class Agent:
try:
# Query the long term memory
if self.long_term_memory is not None:
- logger.info(f"Querying long term memory for: {task}")
+ formatter.print_panel(f"Querying RAG for: {task}")
+
memory_retrieval = self.long_term_memory.query(
task, *args, **kwargs
)
@@ -1599,15 +1666,15 @@ class Agent:
f"Documents Available: {str(memory_retrieval)}"
)
- # Count the tokens
- memory_token_count = self.tokenizer.count_tokens(
- memory_retrieval
- )
- if memory_token_count > self.memory_chunk_size:
- # Truncate the memory by the memory chunk size
- memory_retrieval = self.truncate_string_by_tokens(
- memory_retrieval, self.memory_chunk_size
- )
+ # # Count the tokens
+ # memory_token_count = self.tokenizer.count_tokens(
+ # memory_retrieval
+ # )
+ # if memory_token_count > self.memory_chunk_size:
+ # # Truncate the memory by the memory chunk size
+ # memory_retrieval = self.truncate_string_by_tokens(
+ # memory_retrieval, self.memory_chunk_size
+ # )
self.short_memory.add(
role="Database",
@@ -2235,25 +2302,31 @@ class Agent:
self,
task: Optional[str] = None,
img: Optional[str] = None,
- is_last: bool = False,
- device: str = "cpu", # gpu
- device_id: int = 0,
- all_cores: bool = True,
+ device: Optional[str] = "cpu", # gpu
+ device_id: Optional[int] = 0,
+ all_cores: Optional[bool] = True,
+ scheduled_run_date: Optional[datetime] = None,
+ do_not_use_cluster_ops: Optional[bool] = False,
+ all_gpus: Optional[bool] = False,
+ generate_speech: Optional[bool] = False,
*args,
**kwargs,
) -> Any:
"""
- Executes the agent's run method on a specified device.
+ Executes the agent's run method on a specified device, with optional scheduling.
This method attempts to execute the agent's run method on a specified device, either CPU or GPU. It logs the device selection and the number of cores or GPU ID used. If the device is set to CPU, it can use all available cores or a specific core specified by `device_id`. If the device is set to GPU, it uses the GPU specified by `device_id`.
+ If a `scheduled_date` is provided, the method will wait until that date and time before executing the task.
+
Args:
task (Optional[str], optional): The task to be executed. Defaults to None.
img (Optional[str], optional): The image to be processed. Defaults to None.
- is_last (bool, optional): Indicates if this is the last task. Defaults to False.
device (str, optional): The device to use for execution. Defaults to "cpu".
device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True.
+ scheduled_run_date (Optional[datetime], optional): The date and time to schedule the task. Defaults to None.
+ do_not_use_cluster_ops (bool, optional): If True, does not use cluster ops. Defaults to False.
*args: Additional positional arguments to be passed to the execution method.
**kwargs: Additional keyword arguments to be passed to the execution method.
@@ -2264,33 +2337,45 @@ class Agent:
ValueError: If an invalid device is specified.
Exception: If any other error occurs during execution.
"""
- try:
- logger.info(f"Attempting to run on device: {device}")
- if device == "cpu":
- logger.info("Device set to CPU")
- if all_cores is True:
- count = os.cpu_count()
- logger.info(
- f"Using all available CPU cores: {count}"
- )
- else:
- count = device_id
- logger.info(f"Using specific CPU core: {count}")
+ device = device or self.device
+ device_id = device_id or self.device_id
+ all_cores = all_cores or self.all_cores
+ all_gpus = all_gpus or self.all_gpus
+ do_not_use_cluster_ops = (
+ do_not_use_cluster_ops or self.do_not_use_cluster_ops
+ )
- return execute_with_cpu_cores(
- count, self._run, task, img, *args, **kwargs
- )
+ if scheduled_run_date:
+ while datetime.now() < scheduled_run_date:
+ time.sleep(
+ 1
+ ) # Sleep for a short period to avoid busy waiting
- # If device gpu
- elif device == "gpu":
- logger.info("Device set to GPU")
- return execute_on_gpu(
- device_id, self._run, task, img, *args, **kwargs
+ try:
+ # If cluster ops disabled, run directly
+ if do_not_use_cluster_ops is True:
+ logger.info("Running without cluster operations")
+ return self._run(
+ task=task,
+ img=img,
+ generate_speech=generate_speech * args,
+ **kwargs,
)
+
else:
- raise ValueError(
- f"Invalid device specified: {device}. Supported devices are 'cpu' and 'gpu'."
+ return exec_callable_with_clusterops(
+ device=device,
+ device_id=device_id,
+ all_cores=all_cores,
+ all_gpus=all_gpus,
+ func=self._run,
+ task=task,
+ img=img,
+ generate_speech=generate_speech,
+ *args,
+ **kwargs,
)
+
except ValueError as e:
logger.error(f"Invalid device specified: {e}")
raise e
@@ -2334,3 +2419,26 @@ class Agent:
f"Unexpected error handling artifact: {str(e)}"
)
raise
+
+ def showcase_config(self):
+
+ # Convert all values in config_dict to concise string representations
+ config_dict = self.to_dict()
+ for key, value in config_dict.items():
+ if isinstance(value, list):
+ # Format list as a comma-separated string
+ config_dict[key] = ", ".join(
+ str(item) for item in value
+ )
+ elif isinstance(value, dict):
+ # Format dict as key-value pairs in a single string
+ config_dict[key] = ", ".join(
+ f"{k}: {v}" for k, v in value.items()
+ )
+ else:
+ # Ensure any non-iterable value is a string
+ config_dict[key] = str(value)
+
+ return formatter.print_table(
+ f"Agent: {self.agent_name} Configuration", config_dict
+ )
diff --git a/swarms/structs/agent_registry.py b/swarms/structs/agent_registry.py
index 809f2010..09348622 100644
--- a/swarms/structs/agent_registry.py
+++ b/swarms/structs/agent_registry.py
@@ -7,7 +7,6 @@ from pydantic import BaseModel, Field, ValidationError
from swarms import Agent
from swarms.utils.loguru_logger import logger
-from swarms.utils.report_error_loguru import report_error
class AgentConfigSchema(BaseModel):
@@ -229,7 +228,7 @@ class AgentRegistry:
logger.info("Listing all agents.")
return agent_names
except Exception as e:
- report_error(e)
+ logger.error(f"Error: {e}")
raise e
def return_all_agents(self) -> List[Agent]:
@@ -245,7 +244,7 @@ class AgentRegistry:
logger.info("Returning all agents.")
return agents
except Exception as e:
- report_error(e)
+ logger.error(f"Error: {e}")
raise e
def query(
@@ -276,7 +275,7 @@ class AgentRegistry:
logger.info("Querying agents with condition.")
return agents
except Exception as e:
- report_error(e)
+ logger.error(f"Error: {e}")
raise e
def find_agent_by_name(self, agent_name: str) -> Optional[Agent]:
@@ -300,7 +299,7 @@ class AgentRegistry:
if agent.agent_name == agent_name:
return agent
except Exception as e:
- report_error(e)
+ logger.error(f"Error: {e}")
raise e
def agent_to_py_model(self, agent: Agent):
diff --git a/swarms/structs/agent_rag.py b/swarms/structs/agent_router.py
similarity index 99%
rename from swarms/structs/agent_rag.py
rename to swarms/structs/agent_router.py
index e92926e3..6cf3c094 100644
--- a/swarms/structs/agent_rag.py
+++ b/swarms/structs/agent_router.py
@@ -1,10 +1,12 @@
from typing import List, Optional
import chromadb
-from loguru import logger
from tenacity import retry, stop_after_attempt, wait_exponential
from typing import Union, Callable, Any
from swarms import Agent
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="agent_router")
class AgentRouter:
diff --git a/swarms/structs/agents_available.py b/swarms/structs/agents_available.py
new file mode 100644
index 00000000..5651f9b0
--- /dev/null
+++ b/swarms/structs/agents_available.py
@@ -0,0 +1,87 @@
+from swarms.structs.agent import Agent
+from typing import List
+
+
+def showcase_available_agents(
+ agents: List[Agent],
+ name: str = None,
+ description: str = None,
+ format: str = "XML",
+) -> str:
+ """
+ Format the available agents in either XML or Table format.
+
+ Args:
+ agents (List[Agent]): A list of agents to represent
+ name (str, optional): Name of the swarm
+ description (str, optional): Description of the swarm
+ format (str, optional): Output format ("XML" or "Table"). Defaults to "XML"
+
+ Returns:
+ str: Formatted string containing agent information
+ """
+
+ def truncate(text: str, max_length: int = 130) -> str:
+ return (
+ f"{text[:max_length]}..."
+ if len(text) > max_length
+ else text
+ )
+
+ output = []
+
+ if format.upper() == "TABLE":
+ output.append("\n| ID | Agent Name | Description |")
+ output.append("|-----|------------|-------------|")
+ for idx, agent in enumerate(agents):
+ if isinstance(agent, Agent):
+ agent_name = getattr(agent, "agent_name", str(agent))
+ description = getattr(
+ agent,
+ "description",
+ getattr(
+ agent, "system_prompt", "Unknown description"
+ ),
+ )
+ desc = truncate(description, 50)
+ output.append(
+ f"| {idx + 1} | {agent_name} | {desc} |"
+ )
+ else:
+ output.append(
+ f"| {idx + 1} | {agent} | Unknown description |"
+ )
+ return "\n".join(output)
+
+ # Default XML format
+ output.append("")
+ if name:
+ output.append(f" {name}")
+ if description:
+ output.append(
+ f" {truncate(description)}"
+ )
+ for idx, agent in enumerate(agents):
+ output.append(f" ")
+ if isinstance(agent, Agent):
+ agent_name = getattr(agent, "agent_name", str(agent))
+ description = getattr(
+ agent,
+ "description",
+ getattr(
+ agent, "system_prompt", "Unknown description"
+ ),
+ )
+ output.append(f" {agent_name}")
+ output.append(
+ f" {truncate(description)}"
+ )
+ else:
+ output.append(f" {agent}")
+ output.append(
+ " Unknown description"
+ )
+ output.append(" ")
+ output.append("")
+
+ return "\n".join(output)
diff --git a/swarms/structs/auto_agent_generator.py b/swarms/structs/auto_agent_generator.py
deleted file mode 100644
index 530a9404..00000000
--- a/swarms/structs/auto_agent_generator.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""
-This class will input a swarm type -> then auto generate a list of `Agent` structures with their name, descriptions, system prompts, and more.
-"""
diff --git a/auto_swarm_builder.py b/swarms/structs/auto_swarm_builder.py
similarity index 88%
rename from auto_swarm_builder.py
rename to swarms/structs/auto_swarm_builder.py
index 177cfdc4..93e542fd 100644
--- a/auto_swarm_builder.py
+++ b/swarms/structs/auto_swarm_builder.py
@@ -1,5 +1,3 @@
-from loguru import logger
-
import os
from typing import List
@@ -8,6 +6,10 @@ from swarm_models import OpenAIFunctionCaller, OpenAIChat
from swarms.structs.agent import Agent
from swarms.structs.swarm_router import SwarmRouter
+from swarms.utils.loguru_logger import initialize_logger
+from swarms.structs.agents_available import showcase_available_agents
+
+logger = initialize_logger(log_folder="auto_swarm_builder")
class AgentConfig(BaseModel):
@@ -24,10 +26,10 @@ class AgentConfig(BaseModel):
description="The system prompt that defines the agent's behavior",
example="You are a research agent. Your role is to gather and analyze information...",
)
- max_loops: int = Field(
- description="Maximum number of reasoning loops the agent can perform",
- example=3,
- )
+ # max_loops: int = Field(
+ # description="Maximum number of reasoning loops the agent can perform",
+ # example=3,
+ # )
class SwarmConfig(BaseModel):
@@ -211,10 +213,20 @@ class AutoSwarmBuilder:
agent_name=agent_config.name,
agent_description=agent_config.description,
agent_system_prompt=agent_config.system_prompt,
- max_loops=agent_config.max_loops,
+ # max_loops=agent_config.max_loops,
)
agents.append(agent)
+ # Showcasing available agents
+ agents_available = showcase_available_agents(
+ name=self.name,
+ description=self.description,
+ agents=agents,
+ )
+
+ for agent in agents:
+ agent.system_prompt += "\n" + agents_available
+
return agents
def build_agent(
@@ -280,6 +292,8 @@ class AutoSwarmBuilder:
"""
logger.info("Routing task through swarm")
swarm_router_instance = SwarmRouter(
+ name=self.name,
+ description=self.description,
agents=agents,
swarm_type="auto",
max_loops=1,
@@ -290,10 +304,14 @@ class AutoSwarmBuilder:
)
-example = AutoSwarmBuilder()
+example = AutoSwarmBuilder(
+ name="ChipDesign-Swarm",
+ description="A swarm of specialized AI agents collaborating on chip architecture, logic design, verification, and optimization to create novel semiconductor designs",
+ max_loops=1,
+)
print(
example.run(
- "Write multiple blog posts about the latest advancements in swarm intelligence all at once"
+ "Design a new AI accelerator chip optimized for transformer model inference. Consider the following aspects: 1) Overall chip architecture and block diagram 2) Memory hierarchy and interconnects 3) Processing elements and data flow 4) Power and thermal considerations 5) Physical layout recommendations -> "
)
)
diff --git a/swarms/structs/base_swarm.py b/swarms/structs/base_swarm.py
index 2f141213..6e2242be 100644
--- a/swarms/structs/base_swarm.py
+++ b/swarms/structs/base_swarm.py
@@ -20,13 +20,15 @@ from swarms_memory import BaseVectorDatabase
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.structs.omni_agent_types import AgentType
-from swarms.utils.loguru_logger import logger
from pydantic import BaseModel
from swarms.utils.pandas_utils import (
dict_to_dataframe,
display_agents_info,
pydantic_model_to_dataframe,
)
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="base_swarm")
class BaseSwarm(ABC):
diff --git a/swarms/structs/base_workflow.py b/swarms/structs/base_workflow.py
index b5deb916..4107042a 100644
--- a/swarms/structs/base_workflow.py
+++ b/swarms/structs/base_workflow.py
@@ -1,12 +1,13 @@
import json
from typing import Any, Dict, List, Optional
-from termcolor import colored
-
+from swarms.utils.formatter import formatter
from swarms.structs.agent import Agent
from swarms.structs.base_structure import BaseStructure
from swarms.structs.task import Task
-from swarms.utils.loguru_logger import logger
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger("base-workflow")
class BaseWorkflow(BaseStructure):
@@ -130,9 +131,10 @@ class BaseWorkflow(BaseStructure):
for task in self.tasks:
task.result = None
except Exception as error:
- print(
- colored(f"Error resetting workflow: {error}", "red"),
+ formatter.print_panel(
+ f"Error resetting workflow: {error}"
)
+ raise error
def get_task_results(self) -> Dict[str, Any]:
"""
@@ -146,10 +148,8 @@ class BaseWorkflow(BaseStructure):
task.description: task.result for task in self.tasks
}
except Exception as error:
- print(
- colored(
- f"Error getting task results: {error}", "red"
- ),
+ formatter.print_panel(
+ f"Error getting task results: {error}"
)
def remove_task(self, task: str) -> None:
@@ -161,12 +161,10 @@ class BaseWorkflow(BaseStructure):
if task.description != task
]
except Exception as error:
- print(
- colored(
- f"Error removing task from workflow: {error}",
- "red",
- ),
+ formatter.print_panel(
+ f"Error removing task from workflow: {error}",
)
+ raise error
def update_task(self, task: str, **updates) -> None:
"""
@@ -201,11 +199,9 @@ class BaseWorkflow(BaseStructure):
f"Task {task} not found in workflow."
)
except Exception as error:
- print(
- colored(
- f"Error updating task in workflow: {error}", "red"
- ),
- )
+ formatter.print_panel(
+ f"Error updating task in workflow: {error}"
+ ),
def delete_task(self, task: str) -> None:
"""
@@ -238,12 +234,10 @@ class BaseWorkflow(BaseStructure):
f"Task {task} not found in workflow."
)
except Exception as error:
- print(
- colored(
- f"Error deleting task from workflow: {error}",
- "red",
- ),
+ formatter.print_panel(
+ f"Error deleting task from workflow: {error}",
)
+ raise error
def save_workflow_state(
self,
@@ -285,23 +279,18 @@ class BaseWorkflow(BaseStructure):
}
json.dump(state, f, indent=4)
except Exception as error:
- print(
- colored(
- f"Error saving workflow state: {error}",
- "red",
- )
+ formatter.print_panel(
+ f"Error saving workflow state: {error}",
)
+ raise error
def add_objective_to_workflow(self, task: str, **kwargs) -> None:
"""Adds an objective to the workflow."""
try:
- print(
- colored(
- """
- Adding Objective to Workflow...""",
- "green",
- attrs=["bold", "underline"],
- )
+ formatter.print_panel(
+ """
+ Adding Objective to Workflow...""",
+ "green",
)
task = Task(
@@ -312,12 +301,10 @@ class BaseWorkflow(BaseStructure):
)
self.tasks.append(task)
except Exception as error:
- print(
- colored(
- f"Error adding objective to workflow: {error}",
- "red",
- )
+ formatter.print_panel(
+ f"Error adding objective to workflow: {error}",
)
+ raise error
def load_workflow_state(
self, filepath: str = None, **kwargs
@@ -357,11 +344,8 @@ class BaseWorkflow(BaseStructure):
)
self.tasks.append(task)
except Exception as error:
- print(
- colored(
- f"Error loading workflow state: {error}",
- "red",
- )
+ formatter.print_panel(
+ f"Error loading workflow state: {error}",
)
def workflow_dashboard(self, **kwargs) -> None:
@@ -381,25 +365,21 @@ class BaseWorkflow(BaseStructure):
>>> workflow.workflow_dashboard()
"""
- print(
- colored(
- f"""
- Sequential Workflow Dashboard
- --------------------------------
- Name: {self.name}
- Description: {self.description}
- task_pool: {len(self.task_pool)}
- Max Loops: {self.max_loops}
- Autosave: {self.autosave}
- Autosave Filepath: {self.saved_state_filepath}
- Restore Filepath: {self.restore_state_filepath}
- --------------------------------
- Metadata:
- kwargs: {kwargs}
- """,
- "cyan",
- attrs=["bold", "underline"],
- )
+ formatter.print_panel(
+ f"""
+ Sequential Workflow Dashboard
+ --------------------------------
+ Name: {self.name}
+ Description: {self.description}
+ task_pool: {len(self.task_pool)}
+ Max Loops: {self.max_loops}
+ Autosave: {self.autosave}
+ Autosave Filepath: {self.saved_state_filepath}
+ Restore Filepath: {self.restore_state_filepath}
+ --------------------------------
+ Metadata:
+ kwargs: {kwargs}
+ """
)
def workflow_bootup(self, **kwargs) -> None:
@@ -407,11 +387,6 @@ class BaseWorkflow(BaseStructure):
Workflow bootup.
"""
- print(
- colored(
- """
- Sequential Workflow Initializing...""",
- "green",
- attrs=["bold", "underline"],
- )
+ formatter.print_panel(
+ """Sequential Workflow Initializing...""",
)
diff --git a/swarms/structs/company.py b/swarms/structs/company.py
index ef576e80..f7fb36b7 100644
--- a/swarms/structs/company.py
+++ b/swarms/structs/company.py
@@ -2,8 +2,11 @@ from dataclasses import dataclass, field
from typing import Dict, List, Optional, Union
from swarms.structs.agent import Agent
-from swarms.utils.loguru_logger import logger
from swarms.structs.base_swarm import BaseSwarm
+from swarms.utils.loguru_logger import initialize_logger
+
+
+logger = initialize_logger("company-swarm")
@dataclass
diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py
index 02102188..74945914 100644
--- a/swarms/structs/concurrent_workflow.py
+++ b/swarms/structs/concurrent_workflow.py
@@ -5,7 +5,6 @@ from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
-from loguru import logger
from pydantic import BaseModel, Field
from tenacity import retry, stop_after_attempt, wait_exponential
@@ -19,6 +18,9 @@ from clusterops import (
execute_on_multiple_gpus,
list_available_gpus,
)
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="concurrent_workflow")
class AgentOutputSchema(BaseModel):
diff --git a/swarms/structs/conversation.py b/swarms/structs/conversation.py
index 768c19c5..a86a6d3b 100644
--- a/swarms/structs/conversation.py
+++ b/swarms/structs/conversation.py
@@ -3,9 +3,14 @@ import json
from typing import Any, Optional
import yaml
-from termcolor import colored
-
from swarms.structs.base_structure import BaseStructure
+from typing import TYPE_CHECKING
+from swarms.utils.formatter import formatter
+
+if TYPE_CHECKING:
+ from swarms.structs.agent import (
+ Agent,
+ ) # Only imported during type checking
class Conversation(BaseStructure):
@@ -185,18 +190,9 @@ class Conversation(BaseStructure):
Args:
detailed (bool, optional): detailed. Defaults to False.
"""
- role_to_color = {
- "system": "red",
- "user": "green",
- "assistant": "blue",
- "function": "magenta",
- }
for message in self.conversation_history:
- print(
- colored(
- f"{message['role']}: {message['content']}\n\n",
- role_to_color[message["role"]],
- )
+ formatter.print_panel(
+ f"{message['role']}: {message['content']}\n\n"
)
def export_conversation(self, filename: str, *args, **kwargs):
@@ -301,46 +297,36 @@ class Conversation(BaseStructure):
for message in messages:
if message["role"] == "system":
- print(
- colored(
- f"system: {message['content']}\n",
- role_to_color[message["role"]],
- )
+ formatter.print_panel(
+ f"system: {message['content']}\n",
+ role_to_color[message["role"]],
)
elif message["role"] == "user":
- print(
- colored(
- f"user: {message['content']}\n",
- role_to_color[message["role"]],
- )
+ formatter.print_panel(
+ f"user: {message['content']}\n",
+ role_to_color[message["role"]],
)
elif message["role"] == "assistant" and message.get(
"function_call"
):
- print(
- colored(
- f"assistant: {message['function_call']}\n",
- role_to_color[message["role"]],
- )
+ formatter.print_panel(
+ f"assistant: {message['function_call']}\n",
+ role_to_color[message["role"]],
)
elif message["role"] == "assistant" and not message.get(
"function_call"
):
- print(
- colored(
- f"assistant: {message['content']}\n",
- role_to_color[message["role"]],
- )
+ formatter.print_panel(
+ f"assistant: {message['content']}\n",
+ role_to_color[message["role"]],
)
elif message["role"] == "tool":
- print(
- colored(
- (
- f"function ({message['name']}):"
- f" {message['content']}\n"
- ),
- role_to_color[message["role"]],
- )
+ formatter.print_panel(
+ (
+ f"function ({message['name']}):"
+ f" {message['content']}\n"
+ ),
+ role_to_color[message["role"]],
)
def truncate_memory_with_tokenizer(self):
@@ -392,6 +378,33 @@ class Conversation(BaseStructure):
def to_yaml(self):
return yaml.dump(self.conversation_history)
+ def get_visible_messages(self, agent: "Agent", turn: int):
+ """
+ Get the visible messages for a given agent and turn.
+
+ Args:
+ agent (Agent): The agent.
+ turn (int): The turn number.
+
+ Returns:
+ List[Dict]: The list of visible messages.
+ """
+ # Get the messages before the current turn
+ prev_messages = [
+ message
+ for message in self.conversation_history
+ if message["turn"] < turn
+ ]
+
+ visible_messages = []
+ for message in prev_messages:
+ if (
+ message["visible_to"] == "all"
+ or agent.agent_name in message["visible_to"]
+ ):
+ visible_messages.append(message)
+ return visible_messages
+
# # Example usage
# conversation = Conversation()
diff --git a/swarms/structs/federated_swarm.py b/swarms/structs/federated_swarm.py
deleted file mode 100644
index 6c5e09ca..00000000
--- a/swarms/structs/federated_swarm.py
+++ /dev/null
@@ -1,393 +0,0 @@
-from typing import List, Callable, Union, Optional
-from loguru import logger
-from swarms.structs.base_swarm import BaseSwarm
-from queue import PriorityQueue
-from concurrent.futures import (
- ThreadPoolExecutor,
- as_completed,
-)
-import time
-from pydantic import BaseModel, Field
-
-
-class SwarmRunData(BaseModel):
- """
- Pydantic model to capture metadata about each swarm's execution.
- """
-
- swarm_name: str
- task: str
- priority: int
- start_time: Optional[float] = None
- end_time: Optional[float] = None
- duration: Optional[float] = None
- status: str = "Pending"
- retries: int = 0
- result: Optional[str] = None
- exception: Optional[str] = None
-
-
-class FederatedSwarmModel(BaseModel):
- """
- Pydantic base model to capture and log data for the FederatedSwarm system.
- """
-
- task: str
- swarms_data: List[SwarmRunData] = Field(default_factory=list)
-
- def add_swarm(self, swarm_name: str, task: str, priority: int):
- swarm_data = SwarmRunData(
- swarm_name=swarm_name, task=task, priority=priority
- )
- self.swarms_data.append(swarm_data)
-
- def update_swarm_status(
- self,
- swarm_name: str,
- status: str,
- start_time: float = None,
- end_time: float = None,
- retries: int = 0,
- result: str = None,
- exception: str = None,
- ):
- for swarm in self.swarms_data:
- if swarm.name == swarm_name:
- swarm.status = status
- if start_time:
- swarm.start_time = start_time
- if end_time:
- swarm.end_time = end_time
- swarm.duration = end_time - swarm.start_time
- swarm.retries = retries
- swarm.result = result
- swarm.exception = exception
- break
-
-
-class FederatedSwarm:
- def __init__(
- self,
- swarms: List[Union[BaseSwarm, Callable]],
- max_workers: int = 4,
- ):
- """
- Initializes the FederatedSwarm with a list of swarms or callable objects and
- sets up a priority queue and thread pool for concurrency.
-
- Args:
- swarms (List[Union[BaseSwarm, Callable]]): A list of swarms (BaseSwarm) or callable objects.
- max_workers (int): The maximum number of concurrent workers (threads) to run swarms in parallel.
- """
- self.swarms = PriorityQueue()
- self.max_workers = max_workers
- self.thread_pool = ThreadPoolExecutor(
- max_workers=self.max_workers
- )
- self.task_queue = []
- self.future_to_swarm = {}
- self.results = {}
- self.validate_swarms(swarms)
-
- def init_metadata(self, task: str):
- """
- Initializes the Pydantic base model to capture metadata about the current task and swarms.
- """
- self.metadata = FederatedSwarmModel(task=task)
- for priority, swarm in list(self.swarms.queue):
- swarm_name = (
- swarm.__class__.__name__
- if hasattr(swarm, "__class__")
- else str(swarm)
- )
- self.metadata.add_swarm(
- swarm_name=swarm_name, task=task, priority=priority
- )
- logger.info(f"Metadata initialized for task '{task}'.")
-
- def validate_swarms(
- self, swarms: List[Union[BaseSwarm, Callable]]
- ):
- """
- Validates and adds swarms to the priority queue, ensuring each swarm has a `run(task)` method.
-
- Args:
- swarms (List[Union[BaseSwarm, Callable]]): List of swarms with an optional priority value.
- """
- for swarm, priority in swarms:
- if not callable(swarm):
- raise TypeError(f"{swarm} is not callable.")
-
- if hasattr(swarm, "run"):
- logger.info(f"{swarm} has a 'run' method.")
- else:
- raise AttributeError(
- f"{swarm} does not have a 'run(task)' method."
- )
-
- self.swarms.put((priority, swarm))
- logger.info(
- f"Swarm {swarm} added with priority {priority}."
- )
-
- def run_parallel(
- self,
- task: str,
- timeout: Optional[float] = None,
- retries: int = 0,
- ):
- """
- Runs all swarms in parallel with prioritization and optional timeout.
-
- Args:
- task (str): The task to be passed to the `run` method of each swarm.
- timeout (Optional[float]): Maximum time allowed for each swarm to run.
- retries (int): Number of retries allowed for failed swarms.
- """
- logger.info(
- f"Running task '{task}' in parallel with timeout: {timeout}, retries: {retries}"
- )
- self.init_metadata(task)
-
- while not self.swarms.empty():
- priority, swarm = self.swarms.get()
- swarm_name = (
- swarm.__class__.__name__
- if hasattr(swarm, "__class__")
- else str(swarm)
- )
- future = self.thread_pool.submit(
- self._run_with_retry,
- swarm,
- task,
- retries,
- timeout,
- swarm_name,
- )
- self.future_to_swarm[future] = swarm
-
- for future in as_completed(self.future_to_swarm):
- swarm = self.future_to_swarm[future]
- try:
- result = future.result()
- swarm_name = (
- swarm.__class__.__name__
- if hasattr(swarm, "__class__")
- else str(swarm)
- )
- self.metadata.update_swarm_status(
- swarm_name=swarm_name,
- status="Completed",
- result=result,
- )
- logger.info(
- f"Swarm {swarm_name} completed successfully."
- )
- except Exception as e:
- swarm_name = (
- swarm.__class__.__name__
- if hasattr(swarm, "__class__")
- else str(swarm)
- )
- self.metadata.update_swarm_status(
- swarm_name=swarm_name,
- status="Failed",
- exception=str(e),
- )
- logger.error(f"Swarm {swarm_name} failed: {e}")
- self.results[swarm] = "Failed"
-
- def run_sequentially(
- self,
- task: str,
- retries: int = 0,
- timeout: Optional[float] = None,
- ):
- """
- Runs all swarms sequentially in order of priority.
-
- Args:
- task (str): The task to pass to the `run` method of each swarm.
- retries (int): Number of retries for failed swarms.
- timeout (Optional[float]): Optional time limit for each swarm.
- """
- logger.info(f"Running task '{task}' sequentially.")
-
- while not self.swarms.empty():
- priority, swarm = self.swarms.get()
- try:
- logger.info(
- f"Running swarm {swarm} with priority {priority}."
- )
- self._run_with_retry(swarm, task, retries, timeout)
- logger.info(f"Swarm {swarm} completed successfully.")
- except Exception as e:
- logger.error(f"Swarm {swarm} failed with error: {e}")
-
- def _run_with_retry(
- self,
- swarm: Union[BaseSwarm, Callable],
- task: str,
- retries: int,
- timeout: Optional[float],
- swarm_name: str,
- ):
- """
- Helper function to run a swarm with a retry mechanism and optional timeout.
-
- Args:
- swarm (Union[BaseSwarm, Callable]): The swarm to run.
- task (str): The task to pass to the swarm.
- retries (int): The number of retries allowed for the swarm in case of failure.
- timeout (Optional[float]): Maximum time allowed for the swarm to run.
- swarm_name (str): Name of the swarm (used for metadata).
- """
- attempts = 0
- start_time = time.time()
- while attempts <= retries:
- try:
- logger.info(
- f"Running swarm {swarm}. Attempt: {attempts + 1}"
- )
- self.metadata.update_swarm_status(
- swarm_name=swarm_name,
- status="Running",
- start_time=start_time,
- )
- if hasattr(swarm, "run"):
- if timeout:
- start_time = time.time()
- swarm.run(task)
- duration = time.time() - start_time
- if duration > timeout:
- raise TimeoutError(
- f"Swarm {swarm} timed out after {duration:.2f}s."
- )
- else:
- swarm.run(task)
- else:
- swarm(task)
- end_time = time.time()
- self.metadata.update_swarm_status(
- swarm_name=swarm_name,
- status="Completed",
- end_time=end_time,
- retries=attempts,
- )
- return "Success"
- except Exception as e:
- logger.error(f"Swarm {swarm} failed: {e}")
- attempts += 1
- if attempts > retries:
- end_time = time.time()
- self.metadata.update_swarm_status(
- swarm_name=swarm_name,
- status="Failed",
- end_time=end_time,
- retries=attempts,
- exception=str(e),
- )
- logger.error(f"Swarm {swarm} exhausted retries.")
- raise
-
- def add_swarm(
- self, swarm: Union[BaseSwarm, Callable], priority: int
- ):
- """
- Adds a new swarm to the FederatedSwarm at runtime.
-
- Args:
- swarm (Union[BaseSwarm, Callable]): The swarm to add.
- priority (int): The priority level for the swarm.
- """
- self.swarms.put((priority, swarm))
- logger.info(
- f"Swarm {swarm} added dynamically with priority {priority}."
- )
-
- def queue_task(self, task: str):
- """
- Adds a task to the internal task queue for batch processing.
-
- Args:
- task (str): The task to queue.
- """
- self.task_queue.append(task)
- logger.info(f"Task '{task}' added to the queue.")
-
- def process_task_queue(self):
- """
- Processes all tasks in the task queue.
- """
- for task in self.task_queue:
- logger.info(f"Processing task: {task}")
- self.run_parallel(task)
- self.task_queue = []
-
- def log_swarm_results(self):
- """
- Logs the results of all swarms after execution.
- """
- logger.info("Logging swarm results...")
- for swarm, result in self.results.items():
- logger.info(f"Swarm {swarm}: {result}")
-
- def get_swarm_status(self) -> dict:
- """
- Retrieves the status of each swarm (completed, running, failed).
-
- Returns:
- dict: Dictionary containing swarm statuses.
- """
- status = {}
- for future, swarm in self.future_to_swarm.items():
- if future.done():
- status[swarm] = "Completed"
- elif future.running():
- status[swarm] = "Running"
- else:
- status[swarm] = "Failed"
- return status
-
- def cancel_running_swarms(self):
- """
- Cancels all currently running swarms by shutting down the thread pool.
- """
- logger.warning("Cancelling all running swarms...")
- self.thread_pool.shutdown(wait=False)
- logger.info("All running swarms cancelled.")
-
-
-# Example Usage:
-
-
-# class ExampleSwarm(BaseSwarm):
-# def run(self, task: str):
-# logger.info(f"ExampleSwarm is processing task: {task}")
-
-
-# def example_callable(task: str):
-# logger.info(f"Callable is processing task: {task}")
-
-
-# if __name__ == "__main__":
-# swarms = [(ExampleSwarm(), 1), (example_callable, 2)]
-# federated_swarm = FederatedSwarm(swarms)
-
-# # Run in parallel
-# federated_swarm.run_parallel(
-# "Process data", timeout=10, retries=3
-# )
-
-# # Run sequentially
-# federated_swarm.run_sequentially("Process data sequentially")
-
-# # Log results
-# federated_swarm.log_swarm_results()
-
-# # Get status of swarms
-# status = federated_swarm.get_swarm_status()
-# logger.info(f"Swarm statuses: {status}")
-
-# # Cancel running swarms (if needed)
-# # federated_swarm.cancel_running_swarms()
diff --git a/swarms/structs/graph_swarm.py b/swarms/structs/graph_swarm.py
new file mode 100644
index 00000000..82cef523
--- /dev/null
+++ b/swarms/structs/graph_swarm.py
@@ -0,0 +1,665 @@
+"""
+GraphSwarm: A production-grade framework for orchestrating swarms of agents
+Author: Claude
+License: MIT
+Version: 2.0.0
+"""
+
+import asyncio
+import json
+import time
+from concurrent.futures import ThreadPoolExecutor
+from datetime import datetime
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import chromadb
+import networkx as nx
+from loguru import logger
+from pydantic import BaseModel, Field
+
+from swarms import Agent
+
+
+# Configure logging
+logger.add(
+ "graphswarm.log",
+ rotation="500 MB",
+ retention="10 days",
+ level="INFO",
+ format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
+)
+
+
+class AgentOutput(BaseModel):
+ """Structured output from an agent."""
+
+ agent_name: str
+ timestamp: float = Field(default_factory=time.time)
+ output: Any
+ execution_time: float
+ error: Optional[str] = None
+ metadata: Dict = Field(default_factory=dict)
+
+
+class SwarmOutput(BaseModel):
+ """Structured output from the entire swarm."""
+
+ timestamp: float = Field(default_factory=time.time)
+ outputs: Dict[str, AgentOutput]
+ execution_time: float
+ success: bool
+ error: Optional[str] = None
+ metadata: Dict = Field(default_factory=dict)
+
+
+class SwarmMemory:
+ """Vector-based memory system for GraphSwarm using ChromaDB."""
+
+ def __init__(self, collection_name: str = "swarm_memories"):
+ """Initialize SwarmMemory with ChromaDB."""
+ self.client = chromadb.Client()
+
+ # Get or create collection
+ self.collection = self.client.get_or_create_collection(
+ name=collection_name,
+ metadata={"description": "GraphSwarm execution memories"},
+ )
+
+ def store_execution(self, task: str, result: SwarmOutput):
+ """Store execution results in vector memory."""
+ try:
+ # Create metadata
+ metadata = {
+ "timestamp": datetime.now().isoformat(),
+ "success": result.success,
+ "execution_time": result.execution_time,
+ "agent_sequence": json.dumps(
+ [name for name in result.outputs.keys()]
+ ),
+ "error": result.error if result.error else "",
+ }
+
+ # Create document from outputs
+ document = {
+ "task": task,
+ "outputs": json.dumps(
+ {
+ name: {
+ "output": str(output.output),
+ "execution_time": output.execution_time,
+ "error": output.error,
+ }
+ for name, output in result.outputs.items()
+ }
+ ),
+ }
+
+ # Store in ChromaDB
+ self.collection.add(
+ documents=[json.dumps(document)],
+ metadatas=[metadata],
+ ids=[f"exec_{datetime.now().timestamp()}"],
+ )
+
+ print("added to database")
+
+ logger.info(f"Stored execution in memory: {task}")
+
+ except Exception as e:
+ logger.error(
+ f"Failed to store execution in memory: {str(e)}"
+ )
+
+ def get_similar_executions(self, task: str, limit: int = 5):
+ """Retrieve similar past executions."""
+ try:
+ # Query ChromaDB for similar executions
+ results = self.collection.query(
+ query_texts=[task],
+ n_results=limit,
+ include=["documents", "metadatas"],
+ )
+
+ print(results)
+
+ if not results["documents"]:
+ return []
+
+ # Process results
+ executions = []
+ for doc, metadata in zip(
+ results["documents"][0], results["metadatas"][0]
+ ):
+ doc_dict = json.loads(doc)
+ executions.append(
+ {
+ "task": doc_dict["task"],
+ "outputs": json.loads(doc_dict["outputs"]),
+ "success": metadata["success"],
+ "execution_time": metadata["execution_time"],
+ "agent_sequence": json.loads(
+ metadata["agent_sequence"]
+ ),
+ "timestamp": metadata["timestamp"],
+ }
+ )
+
+ return executions
+
+ except Exception as e:
+ logger.error(
+ f"Failed to retrieve similar executions: {str(e)}"
+ )
+ return []
+
+ def get_optimal_sequence(self, task: str) -> Optional[List[str]]:
+ """Get the most successful agent sequence for similar tasks."""
+ similar_executions = self.get_similar_executions(task)
+ print(f"similar_executions {similar_executions}")
+
+ if not similar_executions:
+ return None
+
+ # Sort by success and execution time
+ successful_execs = [
+ ex for ex in similar_executions if ex["success"]
+ ]
+
+ if not successful_execs:
+ return None
+
+ # Return sequence from most successful execution
+ return successful_execs[0]["agent_sequence"]
+
+ def clear_memory(self):
+ """Clear all memories."""
+ self.client.delete_collection(self.collection.name)
+ self.collection = self.client.get_or_create_collection(
+ name=self.collection.name
+ )
+
+
+class GraphSwarm:
+ """
+ Enhanced framework for creating and managing swarms of collaborative agents.
+ """
+
+ def __init__(
+ self,
+ agents: Union[
+ List[Agent], List[Tuple[Agent, List[str]]], None
+ ] = None,
+ max_workers: Optional[int] = None,
+ swarm_name: str = "Collaborative Agent Swarm",
+ memory_collection: str = "swarm_memory",
+ ):
+ """Initialize GraphSwarm."""
+ self.graph = nx.DiGraph()
+ self.agents: Dict[str, Agent] = {}
+ self.dependencies: Dict[str, List[str]] = {}
+ self.executor = ThreadPoolExecutor(max_workers=max_workers)
+ self.swarm_name = swarm_name
+ self.memory_collection = memory_collection
+ self.memory = SwarmMemory(collection_name=memory_collection)
+
+ if agents:
+ self.initialize_agents(agents)
+
+ logger.info(f"Initialized GraphSwarm: {swarm_name}")
+
+ def initialize_agents(
+ self,
+ agents: Union[List[Agent], List[Tuple[Agent, List[str]]]],
+ ):
+ """Initialize agents and their dependencies."""
+ try:
+ # Handle list of Agents or (Agent, dependencies) tuples
+ for item in agents:
+ if isinstance(item, tuple):
+ agent, dependencies = item
+ else:
+ agent, dependencies = item, []
+
+ if not isinstance(agent, Agent):
+ raise ValueError(
+ f"Expected Agent object, got {type(agent)}"
+ )
+
+ self.agents[agent.agent_name] = agent
+ self.dependencies[agent.agent_name] = dependencies
+ self.graph.add_node(agent.agent_name, agent=agent)
+
+ # Add dependencies
+ for dep in dependencies:
+ if dep not in self.agents:
+ raise ValueError(
+ f"Dependency {dep} not found for agent {agent.agent_name}"
+ )
+ self.graph.add_edge(dep, agent.agent_name)
+
+ self._validate_graph()
+
+ except Exception as e:
+ logger.error(f"Failed to initialize agents: {str(e)}")
+ raise
+
+ def _validate_graph(self):
+ """Validate the agent dependency graph."""
+ if not self.graph.nodes():
+ raise ValueError("No agents added to swarm")
+
+ if not nx.is_directed_acyclic_graph(self.graph):
+ cycles = list(nx.simple_cycles(self.graph))
+ raise ValueError(
+ f"Agent dependency graph contains cycles: {cycles}"
+ )
+
+ def _get_agent_role_description(self, agent_name: str) -> str:
+ """Generate a description of the agent's role in the swarm."""
+ predecessors = list(self.graph.predecessors(agent_name))
+ successors = list(self.graph.successors(agent_name))
+ position = (
+ "initial"
+ if not predecessors
+ else ("final" if not successors else "intermediate")
+ )
+
+ role = f"""You are {agent_name}, a specialized agent in the {self.swarm_name}.
+ Position: {position} agent in the workflow
+
+ Your relationships:"""
+
+ if predecessors:
+ role += (
+ f"\nYou receive input from: {', '.join(predecessors)}"
+ )
+ if successors:
+ role += f"\nYour output will be used by: {', '.join(successors)}"
+
+ return role
+
+ def _generate_workflow_context(self) -> str:
+ """Generate a description of the entire workflow."""
+ execution_order = list(nx.topological_sort(self.graph))
+
+ workflow = f"""Workflow Overview of {self.swarm_name}:
+
+ Processing Order:
+ {' -> '.join(execution_order)}
+
+ Agent Roles:
+ """
+
+ for agent_name in execution_order:
+ predecessors = list(self.graph.predecessors(agent_name))
+ successors = list(self.graph.successors(agent_name))
+
+ workflow += f"\n\n{agent_name}:"
+ if predecessors:
+ workflow += (
+ f"\n- Receives from: {', '.join(predecessors)}"
+ )
+ if successors:
+ workflow += f"\n- Sends to: {', '.join(successors)}"
+ if not predecessors and not successors:
+ workflow += "\n- Independent agent"
+
+ return workflow
+
+ def _build_agent_prompt(
+ self, agent_name: str, task: str, context: Dict = None
+ ) -> str:
+ """Build a comprehensive prompt for the agent including role and context."""
+ prompt_parts = [
+ self._get_agent_role_description(agent_name),
+ "\nWorkflow Context:",
+ self._generate_workflow_context(),
+ "\nYour Task:",
+ task,
+ ]
+
+ if context:
+ prompt_parts.extend(
+ ["\nContext from Previous Agents:", str(context)]
+ )
+
+ prompt_parts.extend(
+ [
+ "\nInstructions:",
+ "1. Process the task according to your role",
+ "2. Consider the input from previous agents when available",
+ "3. Provide clear, structured output",
+ "4. Remember that your output will be used by subsequent agents",
+ "\nResponse Guidelines:",
+ "- Provide clear, well-organized output",
+ "- Include relevant details and insights",
+ "- Highlight key findings",
+ "- Flag any uncertainties or issues",
+ ]
+ )
+
+ return "\n".join(prompt_parts)
+
+ async def _execute_agent(
+ self, agent_name: str, task: str, context: Dict = None
+ ) -> AgentOutput:
+ """Execute a single agent."""
+ start_time = time.time()
+ agent = self.agents[agent_name]
+
+ try:
+ # Build comprehensive prompt
+ full_prompt = self._build_agent_prompt(
+ agent_name, task, context
+ )
+ logger.debug(f"Prompt for {agent_name}:\n{full_prompt}")
+
+ # Execute agent
+ output = await asyncio.to_thread(agent.run, full_prompt)
+
+ return AgentOutput(
+ agent_name=agent_name,
+ output=output,
+ execution_time=time.time() - start_time,
+ metadata={
+ "task": task,
+ "context": context,
+ "position_in_workflow": list(
+ nx.topological_sort(self.graph)
+ ).index(agent_name),
+ },
+ )
+
+ except Exception as e:
+ logger.error(
+ f"Error executing agent {agent_name}: {str(e)}"
+ )
+ return AgentOutput(
+ agent_name=agent_name,
+ output=None,
+ execution_time=time.time() - start_time,
+ error=str(e),
+ metadata={"task": task},
+ )
+
+ async def execute(self, task: str) -> SwarmOutput:
+ """
+ Execute the entire swarm of agents with memory integration.
+
+ Args:
+ task: Initial task to execute
+
+ Returns:
+ SwarmOutput: Structured output from all agents
+ """
+ start_time = time.time()
+ outputs = {}
+ success = True
+ error = None
+
+ try:
+ # Get similar past executions
+ similar_executions = self.memory.get_similar_executions(
+ task, limit=3
+ )
+ optimal_sequence = self.memory.get_optimal_sequence(task)
+
+ # Get base execution order
+ base_execution_order = list(
+ nx.topological_sort(self.graph)
+ )
+
+ # Determine final execution order
+ if optimal_sequence and all(
+ agent in base_execution_order
+ for agent in optimal_sequence
+ ):
+ logger.info(
+ f"Using optimal sequence from memory: {optimal_sequence}"
+ )
+ execution_order = optimal_sequence
+ else:
+ execution_order = base_execution_order
+
+ # Get historical context if available
+ historical_context = {}
+ if similar_executions:
+ best_execution = similar_executions[0]
+ if best_execution["success"]:
+ historical_context = {
+ "similar_task": best_execution["task"],
+ "previous_outputs": best_execution["outputs"],
+ "execution_time": best_execution[
+ "execution_time"
+ ],
+ "success_patterns": self._extract_success_patterns(
+ similar_executions
+ ),
+ }
+
+ # Execute agents in order
+ for agent_name in execution_order:
+ try:
+ # Get context from dependencies and history
+ agent_context = {
+ "dependencies": {
+ dep: outputs[dep].output
+ for dep in self.graph.predecessors(
+ agent_name
+ )
+ if dep in outputs
+ },
+ "historical": historical_context,
+ "position": execution_order.index(agent_name),
+ "total_agents": len(execution_order),
+ }
+
+ # Execute agent with enhanced context
+ output = await self._execute_agent(
+ agent_name, task, agent_context
+ )
+ outputs[agent_name] = output
+
+ # Update historical context with current execution
+ if output.output:
+ historical_context.update(
+ {
+ f"current_{agent_name}_output": output.output
+ }
+ )
+
+ # Check for errors
+ if output.error:
+ success = False
+ error = f"Agent {agent_name} failed: {output.error}"
+
+ # Try to recover using memory
+ if similar_executions:
+ recovery_output = self._attempt_recovery(
+ agent_name, task, similar_executions
+ )
+ if recovery_output:
+ outputs[agent_name] = recovery_output
+ success = True
+ error = None
+ continue
+ break
+
+ except Exception as agent_error:
+ logger.error(
+ f"Error executing agent {agent_name}: {str(agent_error)}"
+ )
+ success = False
+ error = f"Agent {agent_name} failed: {str(agent_error)}"
+ break
+
+ # Create result
+ result = SwarmOutput(
+ outputs=outputs,
+ execution_time=time.time() - start_time,
+ success=success,
+ error=error,
+ metadata={
+ "task": task,
+ "used_optimal_sequence": optimal_sequence
+ is not None,
+ "similar_executions_found": len(
+ similar_executions
+ ),
+ "execution_order": execution_order,
+ "historical_context_used": bool(
+ historical_context
+ ),
+ },
+ )
+
+ # Store execution in memory
+ await self._store_execution_async(task, result)
+
+ return result
+
+ except Exception as e:
+ logger.error(f"Swarm execution failed: {str(e)}")
+ return SwarmOutput(
+ outputs=outputs,
+ execution_time=time.time() - start_time,
+ success=False,
+ error=str(e),
+ metadata={"task": task},
+ )
+
+ def run(self, task: str) -> SwarmOutput:
+ """Synchronous interface to execute the swarm."""
+ return asyncio.run(self.execute(task))
+
+ def _extract_success_patterns(
+ self, similar_executions: List[Dict]
+ ) -> Dict:
+ """Extract success patterns from similar executions."""
+ patterns = {}
+ successful_execs = [
+ ex for ex in similar_executions if ex["success"]
+ ]
+
+ if successful_execs:
+ patterns = {
+ "common_sequences": self._find_common_sequences(
+ successful_execs
+ ),
+ "avg_execution_time": sum(
+ ex["execution_time"] for ex in successful_execs
+ )
+ / len(successful_execs),
+ "successful_strategies": self._extract_strategies(
+ successful_execs
+ ),
+ }
+
+ return patterns
+
+ def _attempt_recovery(
+ self,
+ failed_agent: str,
+ task: str,
+ similar_executions: List[Dict],
+ ) -> Optional[AgentOutput]:
+ """Attempt to recover from failure using memory."""
+ for execution in similar_executions:
+ if (
+ execution["success"]
+ and failed_agent in execution["outputs"]
+ ):
+ historical_output = execution["outputs"][failed_agent]
+
+ return AgentOutput(
+ agent_name=failed_agent,
+ output=historical_output["output"],
+ execution_time=historical_output[
+ "execution_time"
+ ],
+ metadata={
+ "recovered_from_memory": True,
+ "original_task": execution["task"],
+ },
+ )
+ return None
+
+ async def _store_execution_async(
+ self, task: str, result: SwarmOutput
+ ):
+ """Asynchronously store execution in memory."""
+ try:
+ await asyncio.to_thread(
+ self.memory.store_execution, task, result
+ )
+ except Exception as e:
+ logger.error(
+ f"Failed to store execution in memory: {str(e)}"
+ )
+
+ def add_agent(self, agent: Agent, dependencies: List[str] = None):
+ """Add a new agent to the swarm."""
+ dependencies = dependencies or []
+ self.agents[agent.agent_name] = agent
+ self.dependencies[agent.agent_name] = dependencies
+ self.graph.add_node(agent.agent_name, agent=agent)
+
+ for dep in dependencies:
+ if dep not in self.agents:
+ raise ValueError(f"Dependency {dep} not found")
+ self.graph.add_edge(dep, agent.agent_name)
+
+ self._validate_graph()
+
+
+if __name__ == "__main__":
+ try:
+ # Create agents
+ data_collector = Agent(
+ agent_name="Market-Data-Collector",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ streaming_on=True,
+ )
+
+ trend_analyzer = Agent(
+ agent_name="Market-Trend-Analyzer",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ streaming_on=True,
+ )
+
+ report_generator = Agent(
+ agent_name="Investment-Report-Generator",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ streaming_on=True,
+ )
+
+ # Create swarm
+ swarm = GraphSwarm(
+ agents=[
+ (data_collector, []),
+ (trend_analyzer, ["Market-Data-Collector"]),
+ (report_generator, ["Market-Trend-Analyzer"]),
+ ],
+ swarm_name="Market Analysis Intelligence Network",
+ )
+
+ # Run the swarm
+ result = swarm.run(
+ "Analyze current market trends for tech stocks and provide investment recommendations"
+ )
+
+ # Print results
+ print(f"Execution success: {result.success}")
+ print(f"Total time: {result.execution_time:.2f} seconds")
+
+ for agent_name, output in result.outputs.items():
+ print(f"\nAgent: {agent_name}")
+ print(f"Output: {output.output}")
+ if output.error:
+ print(f"Error: {output.error}")
+ except Exception as error:
+ logger.error(error)
+ raise error
diff --git a/swarms/structs/graph_workflow.py b/swarms/structs/graph_workflow.py
index 989175b7..803a9643 100644
--- a/swarms/structs/graph_workflow.py
+++ b/swarms/structs/graph_workflow.py
@@ -5,7 +5,9 @@ import networkx as nx
from pydantic.v1 import BaseModel, Field, validator
from swarms.structs.agent import Agent # noqa: F401
-from swarms.utils.loguru_logger import logger
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="graph_workflow")
class NodeType(str, Enum):
diff --git a/swarms/structs/groupchat.py b/swarms/structs/groupchat.py
index 71ea7f8d..46e798ba 100644
--- a/swarms/structs/groupchat.py
+++ b/swarms/structs/groupchat.py
@@ -1,72 +1,165 @@
-from typing import List, Dict
+from typing import List, Dict, Optional, Union, Callable, Any
from pydantic import BaseModel, Field
-from swarms.structs.conversation import Conversation
-from swarms.utils.loguru_logger import logger
-from swarms.structs.agent import Agent
+from datetime import datetime
+import json
from uuid import uuid4
-from swarms.schemas.agent_step_schemas import ManySteps
+import logging
+from swarms.structs.agent import Agent
+from swarms.structs.agents_available import showcase_available_agents
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+
+class Message(BaseModel):
+ """Single message in the conversation"""
+
+ role: str
+ content: str
+ timestamp: datetime = Field(default_factory=datetime.utcnow)
+
+
+class AgentMetadata(BaseModel):
+ """Metadata for tracking agent state and configuration"""
+
+ agent_name: str
+ agent_type: str
+ system_prompt: Optional[str] = None
+ description: Optional[str] = None
+ config: Dict[str, Any] = Field(default_factory=dict)
+
+
+class InteractionLog(BaseModel):
+ """Log entry for a single interaction"""
+
+ id: str = Field(default_factory=lambda: uuid4().hex)
+ agent_name: str
+ position: int
+ input_text: str
+ output_text: str
+ timestamp: datetime = Field(default_factory=datetime.utcnow)
+ metadata: Dict[str, Any] = Field(default_factory=dict)
-class GroupChatInput(BaseModel):
+class GroupChatState(BaseModel):
+ """Complete state of the group chat"""
+
+ id: str = Field(default_factory=lambda: uuid4().hex)
+ name: Optional[str] = None
+ description: Optional[str] = None
admin_name: str
group_objective: str
- agents: List[Dict[str, str]]
max_rounds: int
- selector_agent: Dict[str, str]
- rules: str
+ rules: Optional[str] = None
+ agent_metadata: List[AgentMetadata]
+ messages: List[Message]
+ interactions: List[InteractionLog]
+ created_at: datetime = Field(default_factory=datetime.utcnow)
+ updated_at: datetime = Field(default_factory=datetime.utcnow)
+
+
+# Todo:
+# Build a function that prompts the llm to output the
+# [Agent-Name] in square brackets and then the question or something
+# An agentic Language notation
-class GroupChatOutput(BaseModel):
- id: str = Field(uuid4().hex)
- task: str = Field(..., description=None)
- input_config: GroupChatInput
- agent_outputs: List[ManySteps] = Field(..., description=None)
+class AgentWrapper:
+ """Wrapper class to standardize agent interfaces"""
+
+ def __init__(
+ self,
+ agent: Union["Agent", Callable],
+ agent_name: str,
+ system_prompt: Optional[str] = None,
+ ):
+ self.agent = agent
+ self.agent_name = agent_name
+ self.system_prompt = system_prompt
+ self._validate_agent()
+
+ def _validate_agent(self):
+ """Validate that the agent has the required interface"""
+ if hasattr(self.agent, "run"):
+ self.run = self.agent.run
+ elif callable(self.agent):
+ self.run = self.agent
+ else:
+ raise ValueError(
+ "Agent must either have a 'run' method or be callable"
+ )
+
+ def get_metadata(self) -> AgentMetadata:
+ """Extract metadata from the agent"""
+ return AgentMetadata(
+ agent_name=self.agent_name,
+ agent_type=type(self.agent).__name__,
+ system_prompt=self.system_prompt,
+ config={
+ k: v
+ for k, v in self.agent.__dict__.items()
+ if isinstance(v, (str, int, float, bool, dict, list))
+ },
+ )
class GroupChat:
- """Manager class for a group chat.
+ """Enhanced GroupChat manager with state persistence and comprehensive logging.
- This class handles the management of a group chat, including initializing the conversation,
- selecting the next speaker, resetting the chat, and executing the chat rounds.
+ This class implements a multi-agent chat system with the following key features:
+ - State persistence to disk
+ - Comprehensive interaction logging
+ - Configurable agent selection
+ - Early stopping conditions
+ - Conversation export capabilities
- Args:
- agents (List[Agent], optional): List of agents participating in the group chat. Defaults to None.
- max_rounds (int, optional): Maximum number of chat rounds. Defaults to 10.
- admin_name (str, optional): Name of the admin user. Defaults to "Admin".
- group_objective (str, optional): Objective of the group chat. Defaults to None.
- selector_agent (Agent, optional): Agent responsible for selecting the next speaker. Defaults to None.
- rules (str, optional): Rules for the group chat. Defaults to None.
- *args: Variable length argument list.
- **kwargs: Arbitrary keyword arguments.
+ The GroupChat coordinates multiple agents to have a goal-directed conversation,
+ with one agent speaking at a time based on a selector agent's decisions.
Attributes:
- agents (List[Agent]): List of agents participating in the group chat.
- max_rounds (int): Maximum number of chat rounds.
- admin_name (str): Name of the admin user.
- group_objective (str): Objective of the group chat.
- selector_agent (Agent): Agent responsible for selecting the next speaker.
- messages (Conversation): Conversation object for storing the chat messages.
-
+ name (Optional[str]): Name of the group chat
+ description (Optional[str]): Description of the group chat's purpose
+ agents (List[Union["Agent", Callable]]): List of participating agents
+ max_rounds (int): Maximum number of conversation rounds
+ admin_name (str): Name of the administrator
+ group_objective (str): The goal/objective of the conversation
+ selector_agent (Union["Agent", Callable]): Agent that selects next speaker
+ rules (Optional[str]): Rules governing the conversation
+ state_path (Optional[str]): Path to save conversation state
+ showcase_agents_on (bool): Whether to showcase agent capabilities
"""
def __init__(
self,
- name: str = None,
- description: str = None,
- agents: List[Agent] = None,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ agents: List[Union["Agent", Callable]] = None,
max_rounds: int = 10,
admin_name: str = "Admin",
group_objective: str = None,
- selector_agent: Agent = None,
- rules: str = None,
- *args,
- **kwargs,
+ selector_agent: Union["Agent", Callable] = None,
+ rules: Optional[str] = None,
+ state_path: Optional[str] = None,
+ showcase_agents_on: bool = False,
):
- # super().__init__(agents = agents, *args, **kwargs)
- if not agents:
- raise ValueError(
- "Agents cannot be empty. Add more agents."
- )
+ """Initialize a new GroupChat instance.
+
+ Args:
+ name: Name of the group chat
+ description: Description of the group chat's purpose
+ agents: List of participating agents
+ max_rounds: Maximum number of conversation rounds
+ admin_name: Name of the administrator
+ group_objective: The goal/objective of the conversation
+ selector_agent: Agent that selects next speaker
+ rules: Rules governing the conversation
+ state_path: Path to save conversation state
+ showcase_agents_on: Whether to showcase agent capabilities
+
+ Raises:
+ ValueError: If no agents are provided
+ """
self.name = name
self.description = description
self.agents = agents
@@ -74,184 +167,327 @@ class GroupChat:
self.admin_name = admin_name
self.group_objective = group_objective
self.selector_agent = selector_agent
+ self.rules = rules
+ self.state_path = state_path
+ self.showcase_agents_on = showcase_agents_on
- # Initialize the conversation
- self.message_history = Conversation(
- system_prompt=self.group_objective,
- time_enabled=True,
- user=self.admin_name,
- rules=rules,
- *args,
- **kwargs,
+ if not agents:
+ raise ValueError("At least two agents are required")
+
+ # Generate unique state path if not provided
+ self.state_path = (
+ state_path or f"group_chat_{uuid4().hex}.json"
)
- # Initialize log for interactions
- self.group_log = GroupChatLog(
- admin_name=self.admin_name,
- group_objective=self.group_objective,
+ # Wrap all agents to standardize interface
+ self.wrapped_agents = [
+ AgentWrapper(
+ agent,
+ (
+ f"Agent_{i}"
+ if not hasattr(agent, "agent_name")
+ else agent.agent_name
+ ),
+ )
+ for i, agent in enumerate(agents)
+ ]
+
+ # Configure selector agent
+ self.selector_agent = AgentWrapper(
+ selector_agent or self.wrapped_agents[0].agent,
+ "Selector",
+ "Select the next speaker based on the conversation context",
)
- @property
- def agent_names(self) -> List[str]:
- """Return the names of the agents in the group chat."""
- return [agent.agent_name for agent in self.agents]
+ # Initialize conversation state
+ self.state = GroupChatState(
+ name=name,
+ description=description,
+ admin_name=admin_name,
+ group_objective=group_objective,
+ max_rounds=max_rounds,
+ rules=rules,
+ agent_metadata=[
+ agent.get_metadata() for agent in self.wrapped_agents
+ ],
+ messages=[],
+ interactions=[],
+ )
- def reset(self):
- """Reset the group chat."""
- logger.info("Resetting GroupChat")
- self.message_history.clear()
+ # Showcase agents if enabled
+ if self.showcase_agents_on is True:
+ self.showcase_agents()
- def agent_by_name(self, name: str) -> Agent:
- """Find an agent whose name is contained within the given 'name' string.
+ def showcase_agents(self):
+ """Showcase available agents and update their system prompts.
- Args:
- name (str): Name string to search for.
+ This method displays agent capabilities and updates each agent's
+ system prompt with information about other agents in the group.
+ """
+ out = showcase_available_agents(
+ name=self.name,
+ description=self.description,
+ agents=self.wrapped_agents,
+ )
- Returns:
- Agent: Agent object with a name contained in the given 'name' string.
+ for agent in self.wrapped_agents:
+ # Initialize system_prompt if None
+ if agent.system_prompt is None:
+ agent.system_prompt = ""
+ agent.system_prompt += out
- Raises:
- ValueError: If no agent is found with a name contained in the given 'name' string.
+ def save_state(self) -> None:
+ """Save current conversation state to disk.
+ The state is saved as a JSON file at the configured state_path.
"""
- for agent in self.agents:
- if agent.agent_name in name:
- return agent
- raise ValueError(
- f"No agent found with a name contained in '{name}'."
- )
+ with open(self.state_path, "w") as f:
+ json.dump(self.state.dict(), f, default=str, indent=2)
+ logger.info(f"State saved to {self.state_path}")
- def next_agent(self, agent: Agent) -> Agent:
- """Return the next agent in the list.
+ @classmethod
+ def load_state(cls, state_path: str) -> "GroupChat":
+ """Load GroupChat from saved state.
Args:
- agent (Agent): Current agent.
+ state_path: Path to the saved state JSON file
Returns:
- Agent: Next agent in the list.
+ GroupChat: A new GroupChat instance with restored state
+ Raises:
+ FileNotFoundError: If state file doesn't exist
+ json.JSONDecodeError: If state file is invalid JSON
"""
- return self.agents[
- (self.agent_names.index(agent.agent_name) + 1)
- % len(self.agents)
- ]
+ with open(state_path, "r") as f:
+ state_dict = json.load(f)
+
+ # Convert loaded data back to state model
+ state = GroupChatState(**state_dict)
+
+ # Initialize with minimal config, then restore state
+ instance = cls(
+ name=state.name,
+ admin_name=state.admin_name,
+ agents=[], # Temporary empty list
+ group_objective=state.group_objective,
+ )
+ instance.state = state
+ return instance
- def select_speaker_msg(self):
- """Return the message for selecting the next speaker."""
- prompt = f"""
- You are in a role play game. The following roles are available:
- {self._participant_roles()}.
+ def _log_interaction(
+ self,
+ agent_name: str,
+ position: int,
+ input_text: str,
+ output_text: str,
+ ) -> None:
+ """Log a single interaction in the conversation.
- Read the following conversation.
- Then select the next role from {self.agent_names} to play. Only return the role.
+ Args:
+ agent_name: Name of the speaking agent
+ position: Position in conversation sequence
+ input_text: Input context provided to agent
+ output_text: Agent's response
"""
- return prompt
+ log_entry = InteractionLog(
+ agent_name=agent_name,
+ position=position,
+ input_text=input_text,
+ output_text=output_text,
+ metadata={
+ "current_agents": [
+ a.agent_name for a in self.wrapped_agents
+ ],
+ "round": position // len(self.wrapped_agents),
+ },
+ )
+ self.state.interactions.append(log_entry)
+ self.save_state()
- def select_speaker(
- self, last_speaker_agent: Agent, selector_agent: Agent
- ) -> Agent:
- """Select the next speaker.
+ def _add_message(self, role: str, content: str) -> None:
+ """Add a message to the conversation history.
Args:
- last_speaker_agent (Agent): Last speaker in the conversation.
- selector_agent (Agent): Agent responsible for selecting the next speaker.
-
- Returns:
- Agent: Next speaker.
-
+ role: Speaker's role/name
+ content: Message content
"""
- logger.info("Selecting a new speaker")
- selector_agent.system_prompt = self.select_speaker_msg()
-
- n_agents = len(self.agent_names)
- if n_agents < 3:
- logger.warning(
- f"GroupChat is underpopulated with {n_agents} agents. Direct communication might be more efficient."
- )
-
- self.message_history.add(
- role=self.admin_name,
- content=f"Read the above conversation. Then select the next most suitable role from {self.agent_names} to play. Only return the role.",
- )
+ message = Message(role=role, content=content)
+ self.state.messages.append(message)
+ self.save_state()
- name = selector_agent.run(
- self.message_history.return_history_as_string()
- )
- try:
- selected_agent = self.agent_by_name(name)
- return selected_agent
- except ValueError:
- return self.next_agent(last_speaker_agent)
+ def select_next_speaker(
+ self, last_speaker: AgentWrapper
+ ) -> AgentWrapper:
+ """Select the next speaker using the selector agent.
- def _participant_roles(self):
- """Print the roles of the participants.
+ Args:
+ last_speaker: The agent who spoke last
Returns:
- str: Participant roles.
+ AgentWrapper: The next agent to speak
+ Note:
+ Falls back to round-robin selection if selector agent fails
"""
- return "\n".join(
+ conversation_history = "\n".join(
[
- f"{agent.agent_name}: {agent.system_prompt}"
- for agent in self.agents
+ f"{msg.role}: {msg.content}"
+ for msg in self.state.messages
]
)
- def run(self, task: str, *args, **kwargs):
- """Call 'GroupChatManager' instance as a function.
+ selection_prompt = f"""
+ Current speakers: {[agent.agent_name for agent in self.wrapped_agents]}
+ Last speaker: {last_speaker.agent_name}
+ Group objective: {self.state.group_objective}
+
+ Based on the conversation history and group objective, select the next most appropriate speaker.
+ Only return the speaker's name.
+
+ Conversation history:
+ {conversation_history}
+ """
+
+ try:
+ next_speaker_name = self.selector_agent.run(
+ selection_prompt
+ ).strip()
+ return next(
+ agent
+ for agent in self.wrapped_agents
+ if agent.agent_name in next_speaker_name
+ )
+ except (StopIteration, Exception) as e:
+ logger.warning(
+ f"Selector agent failed: {str(e)}. Falling back to round-robin."
+ )
+ # Fallback to round-robin if selection fails
+ current_idx = self.wrapped_agents.index(last_speaker)
+ return self.wrapped_agents[
+ (current_idx + 1) % len(self.wrapped_agents)
+ ]
+
+ def run(self, task: str) -> str:
+ """Execute the group chat conversation.
Args:
- task (str): Task to be performed.
+ task: The initial task/question to discuss
Returns:
- str: Reply from the last speaker.
+ str: The final response from the conversation
+ Raises:
+ Exception: If any error occurs during execution
"""
try:
- logger.info(
- f"Activating GroupChat with {len(self.agents)} Agents"
- )
- self.message_history.add(
- self.selector_agent.agent_name, task
- )
+ logger.info(f"Starting GroupChat with task: {task}")
+ self._add_message(self.state.admin_name, task)
+
+ current_speaker = self.wrapped_agents[0]
+ final_response = None
- for i in range(self.max_rounds):
- speaker_agent = self.select_speaker(
- last_speaker_agent=self.selector_agent,
- selector_agent=self.selector_agent,
+ for round_num in range(self.state.max_rounds):
+ # Select next speaker
+ current_speaker = self.select_next_speaker(
+ current_speaker
)
logger.info(
- f"Next speaker selected: {speaker_agent.agent_name}"
+ f"Selected speaker: {current_speaker.agent_name}"
)
- reply = speaker_agent.run(
- self.message_history.return_history_as_string(),
- *args,
- **kwargs,
- )
- self.message_history.add(
- speaker_agent.agent_name, reply
+ # Prepare context and get response
+ conversation_history = "\n".join(
+ [
+ f"{msg.role}: {msg.content}"
+ for msg in self.state.messages[
+ -10:
+ ] # Last 10 messages for context
+ ]
)
- # Log the interaction
- self.group_log.log_interaction(
- agent_name=speaker_agent.agent_name,
- position=i,
- input_text=self.message_history.return_history_as_string(),
- output_text=reply,
+ try:
+ response = current_speaker.run(
+ conversation_history
+ )
+ final_response = response
+ except Exception as e:
+ logger.error(
+ f"Agent {current_speaker.agent_name} failed: {str(e)}"
+ )
+ continue
+
+ # Log interaction and add to message history
+ self._log_interaction(
+ current_speaker.agent_name,
+ round_num,
+ conversation_history,
+ response,
+ )
+ self._add_message(
+ current_speaker.agent_name, response
)
- if i == self.max_rounds - 1:
+ # Optional: Add early stopping condition based on response content
+ if (
+ "TASK_COMPLETE" in response
+ or "CONCLUSION" in response
+ ):
+ logger.info(
+ "Task completion detected, ending conversation"
+ )
break
- return reply
+ return final_response or "No valid response generated"
- except Exception as error:
- logger.error(
- f"Error detected: {error}. Please optimize the inputs and submit an issue on the swarms GitHub."
- )
- raise error
+ except Exception as e:
+ logger.error(f"Error in GroupChat execution: {str(e)}")
+ raise
- def get_group_log_as_json(self) -> str:
- """Return the interaction log as a JSON string."""
- return self.group_log.return_json()
+ def get_conversation_summary(self) -> Dict[str, Any]:
+ """Return a summary of the conversation.
+
+ Returns:
+ Dict containing conversation metrics and status
+ """
+ return {
+ "id": self.state.id,
+ "total_interactions": len(self.state.interactions),
+ "participating_agents": [
+ agent.agent_name for agent in self.wrapped_agents
+ ],
+ "conversation_length": len(self.state.messages),
+ "duration": (
+ datetime.utcnow() - self.state.created_at
+ ).total_seconds(),
+ "objective_completed": any(
+ "TASK_COMPLETE" in msg.content
+ for msg in self.state.messages
+ ),
+ }
+
+ def export_conversation(
+ self, format: str = "json"
+ ) -> Union[str, Dict]:
+ """Export the conversation in the specified format.
+
+ Args:
+ format: Output format ("json" or "text")
+
+ Returns:
+ Union[str, Dict]: Conversation in requested format
+
+ Raises:
+ ValueError: If format is not supported
+ """
+ if format == "json":
+ return self.state.dict()
+ elif format == "text":
+ return "\n".join(
+ [
+ f"{msg.role} ({msg.timestamp}): {msg.content}"
+ for msg in self.state.messages
+ ]
+ )
+ else:
+ raise ValueError(f"Unsupported export format: {format}")
diff --git a/swarms/structs/groupchat_new.py b/swarms/structs/groupchat_new.py
new file mode 100644
index 00000000..69c424d4
--- /dev/null
+++ b/swarms/structs/groupchat_new.py
@@ -0,0 +1,244 @@
+import os
+import asyncio
+from pydantic import BaseModel, Field
+from typing import List, Dict, Any
+from swarms import Agent
+from swarm_models import OpenAIChat
+from dotenv import load_dotenv
+from swarms.utils.formatter import formatter
+
+# Load environment variables
+load_dotenv()
+
+# Get OpenAI API key
+api_key = os.getenv("OPENAI_API_KEY")
+
+
+# Define Pydantic schema for agent outputs
+class AgentOutput(BaseModel):
+ """Schema for capturing the output of each agent."""
+
+ agent_name: str = Field(..., description="The name of the agent")
+ message: str = Field(
+ ...,
+ description="The agent's response or contribution to the group chat",
+ )
+ metadata: Dict[str, Any] = Field(
+ default_factory=dict,
+ description="Additional metadata about the agent's response",
+ )
+
+
+class GroupChat:
+ """
+ GroupChat class to enable multiple agents to communicate in an asynchronous group chat.
+ Each agent is aware of all other agents, every message exchanged, and the social context.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ description: str,
+ agents: List[Agent],
+ max_loops: int = 1,
+ ):
+ """
+ Initialize the GroupChat.
+
+ Args:
+ name (str): Name of the group chat.
+ description (str): Description of the purpose of the group chat.
+ agents (List[Agent]): A list of agents participating in the chat.
+ max_loops (int): Maximum number of loops to run through all agents.
+ """
+ self.name = name
+ self.description = description
+ self.agents = agents
+ self.max_loops = max_loops
+ self.chat_history = (
+ []
+ ) # Stores all messages exchanged in the chat
+
+ formatter.print_panel(
+ f"Initialized GroupChat '{self.name}' with {len(self.agents)} agents. Max loops: {self.max_loops}",
+ title="Groupchat Swarm",
+ )
+
+ async def _agent_conversation(
+ self, agent: Agent, input_message: str
+ ) -> AgentOutput:
+ """
+ Facilitate a single agent's response to the chat.
+
+ Args:
+ agent (Agent): The agent responding.
+ input_message (str): The message triggering the response.
+
+ Returns:
+ AgentOutput: The agent's response captured in a structured format.
+ """
+ formatter.print_panel(
+ f"Agent '{agent.agent_name}' is responding to the message: {input_message}",
+ title="Groupchat Swarm",
+ )
+ response = await asyncio.to_thread(agent.run, input_message)
+
+ output = AgentOutput(
+ agent_name=agent.agent_name,
+ message=response,
+ metadata={"context_length": agent.context_length},
+ )
+ # logger.debug(f"Agent '{agent.agent_name}' response: {response}")
+ return output
+
+ async def _run(self, initial_message: str) -> List[AgentOutput]:
+ """
+ Execute the group chat asynchronously, looping through all agents up to max_loops.
+
+ Args:
+ initial_message (str): The initial message to start the chat.
+
+ Returns:
+ List[AgentOutput]: The responses of all agents across all loops.
+ """
+ formatter.print_panel(
+ f"Starting group chat '{self.name}' with initial message: {initial_message}",
+ title="Groupchat Swarm",
+ )
+ self.chat_history.append(
+ {"sender": "System", "message": initial_message}
+ )
+
+ outputs = []
+ for loop in range(self.max_loops):
+ formatter.print_panel(
+ f"Group chat loop {loop + 1}/{self.max_loops}",
+ title="Groupchat Swarm",
+ )
+
+ for agent in self.agents:
+ # Create a custom input message for each agent, sharing the chat history and social context
+ input_message = (
+ f"Chat History:\n{self._format_chat_history()}\n\n"
+ f"Participants:\n"
+ + "\n".join(
+ [
+ f"- {a.agent_name}: {a.system_prompt}"
+ for a in self.agents
+ ]
+ )
+ + f"\n\nNew Message: {initial_message}\n\n"
+ f"You are '{agent.agent_name}'. Remember to keep track of the social context, who is speaking, "
+ f"and respond accordingly based on your role: {agent.system_prompt}."
+ )
+
+ # Collect agent's response
+ output = await self._agent_conversation(
+ agent, input_message
+ )
+ outputs.append(output)
+
+ # Update chat history with the agent's response
+ self.chat_history.append(
+ {
+ "sender": agent.agent_name,
+ "message": output.message,
+ }
+ )
+
+ formatter.print_panel(
+ "Group chat completed. All agent responses captured.",
+ title="Groupchat Swarm",
+ )
+ return outputs
+
+ def run(self, task: str, *args, **kwargs):
+ return asyncio.run(self.run(task, *args, **kwargs))
+
+ def _format_chat_history(self) -> str:
+ """
+ Format the chat history for agents to understand the context.
+
+ Returns:
+ str: The formatted chat history as a string.
+ """
+ return "\n".join(
+ [
+ f"{entry['sender']}: {entry['message']}"
+ for entry in self.chat_history
+ ]
+ )
+
+ def __str__(self) -> str:
+ """String representation of the group chat's outputs."""
+ return self._format_chat_history()
+
+ def to_json(self) -> str:
+ """JSON representation of the group chat's outputs."""
+ return [
+ {"sender": entry["sender"], "message": entry["message"]}
+ for entry in self.chat_history
+ ]
+
+
+# Example Usage
+if __name__ == "__main__":
+
+ load_dotenv()
+
+ # Get the OpenAI API key from the environment variable
+ api_key = os.getenv("OPENAI_API_KEY")
+
+ # Create an instance of the OpenAIChat class
+ model = OpenAIChat(
+ openai_api_key=api_key,
+ model_name="gpt-4o-mini",
+ temperature=0.1,
+ )
+
+ # Example agents
+ agent1 = Agent(
+ agent_name="Financial-Analysis-Agent",
+ system_prompt="You are a financial analyst specializing in investment strategies.",
+ llm=model,
+ max_loops=1,
+ autosave=False,
+ dashboard=False,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ user_name="swarms_corp",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+ streaming_on=False,
+ )
+
+ agent2 = Agent(
+ agent_name="Tax-Adviser-Agent",
+ system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.",
+ llm=model,
+ max_loops=1,
+ autosave=False,
+ dashboard=False,
+ verbose=True,
+ dynamic_temperature_enabled=True,
+ user_name="swarms_corp",
+ retry_attempts=1,
+ context_length=200000,
+ output_type="string",
+ streaming_on=False,
+ )
+
+ # Create group chat
+ group_chat = GroupChat(
+ name="Financial Discussion",
+ description="A group chat for financial analysis and tax advice.",
+ agents=[agent1, agent2],
+ )
+
+ # Run the group chat
+ asyncio.run(
+ group_chat.run(
+ "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria? What do you guys think?"
+ )
+ )
diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py
index 82fa6ba2..4eac5c78 100644
--- a/swarms/structs/hiearchical_swarm.py
+++ b/swarms/structs/hiearchical_swarm.py
@@ -1,7 +1,7 @@
from typing import List, Any
-from loguru import logger
from pydantic import BaseModel, Field
+from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.agent import Agent
from swarms.structs.concat import concat_strings
@@ -9,6 +9,7 @@ from swarms.structs.agent_registry import AgentRegistry
from swarm_models.base_llm import BaseLLM
from swarms.structs.conversation import Conversation
+logger = initialize_logger(log_folder="hiearchical_swarm")
# Example usage:
HIEARCHICAL_AGENT_SYSTEM_PROMPT = """
diff --git a/swarms/structs/majority_voting.py b/swarms/structs/majority_voting.py
index addf058e..18738aa0 100644
--- a/swarms/structs/majority_voting.py
+++ b/swarms/structs/majority_voting.py
@@ -1,26 +1,14 @@
import concurrent.futures
import re
-import sys
from collections import Counter
from typing import Any, Callable, List, Optional
-from loguru import logger
-
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.utils.file_processing import create_file
+from swarms.utils.loguru_logger import initialize_logger
-# Configure loguru logger with advanced settings
-logger.remove()
-logger.add(
- sys.stderr,
- colorize=True,
- format="{time} {message}",
- backtrace=True,
- diagnose=True,
- enqueue=True,
- catch=True,
-)
+logger = initialize_logger(log_folder="majority_voting")
def extract_last_python_code_block(text):
diff --git a/swarms/structs/message_pool.py b/swarms/structs/message_pool.py
deleted file mode 100644
index 3f7a6343..00000000
--- a/swarms/structs/message_pool.py
+++ /dev/null
@@ -1,214 +0,0 @@
-import hashlib
-from time import time_ns
-from typing import Callable, List, Optional, Sequence, Union
-
-from swarms.structs.agent import Agent
-from swarms.utils.loguru_logger import logger
-from swarms.structs.base_swarm import BaseSwarm
-
-
-def _hash(input: str):
- """
- Hashes the input string using SHA256 algorithm.
-
- Args:
- input (str): The string to be hashed.
-
- Returns:
- str: The hexadecimal representation of the hash value.
- """
- hex_dig = hashlib.sha256(input.encode("utf-8")).hexdigest()
- return hex_dig
-
-
-def msg_hash(
- agent: Agent, content: str, turn: int, msg_type: str = "text"
-):
- """
- Generate a hash value for a message.
-
- Args:
- agent (Agent): The agent sending the message.
- content (str): The content of the message.
- turn (int): The turn number of the message.
- msg_type (str, optional): The type of the message. Defaults to "text".
-
- Returns:
- int: The hash value of the message.
- """
- time = time_ns()
- return _hash(
- f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:"
- f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}"
- )
-
-
-class MessagePool(BaseSwarm):
- """
- A class representing a message pool for agents in a swarm.
-
- Attributes:
- agents (Optional[Sequence[Agent]]): The list of agents in the swarm.
- moderator (Optional[Agent]): The moderator agent.
- turns (Optional[int]): The number of turns.
- routing_function (Optional[Callable]): The routing function for message distribution.
- show_names (Optional[bool]): Flag indicating whether to show agent names.
- messages (List[Dict]): The list of messages in the pool.
-
- Examples:
- >>> from swarms.structs.agent import Agent
- >>> from swarms.structs.message_pool import MessagePool
- >>> agent1 = Agent(agent_name="agent1")
- >>> agent2 = Agent(agent_name="agent2")
- >>> agent3 = Agent(agent_name="agent3")
- >>> moderator = Agent(agent_name="moderator")
- >>> agents = [agent1, agent2, agent3]
- >>> message_pool = MessagePool(agents=agents, moderator=moderator, turns=5)
- >>> message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
- >>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
- >>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
- >>> message_pool.get_all_messages()
- [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
- >>> message_pool.get_visible_messages(agent=agent1, turn=1)
- [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
- >>> message_pool.get_visible_messages(agent=agent2, turn=1)
- [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
- """
-
- def __init__(
- self,
- agents: Optional[Sequence[Agent]] = None,
- moderator: Optional[Agent] = None,
- turns: Optional[int] = 5,
- routing_function: Optional[Callable] = None,
- show_names: Optional[bool] = False,
- autosave: Optional[bool] = False,
- *args,
- **kwargs,
- ):
- super().__init__()
-
- self.agent = agents
- self.moderator = moderator
- self.turns = turns
- self.routing_function = routing_function
- self.show_names = show_names
- self.autosave = autosave
-
- self.messages = []
-
- logger.info("MessagePool initialized")
- logger.info(f"Number of agents: {len(agents)}")
- logger.info(
- f"Agents: {[agent.agent_name for agent in agents]}"
- )
- logger.info(f"moderator: {moderator.agent_name} is available")
- logger.info(f"Number of turns: {turns}")
-
- def add(
- self,
- agent: Agent,
- content: str,
- turn: int,
- visible_to: Union[str, List[str]] = "all",
- logged: bool = True,
- ):
- """
- Add a message to the pool.
-
- Args:
- agent (Agent): The agent sending the message.
- content (str): The content of the message.
- turn (int): The turn number.
- visible_to (Union[str, List[str]], optional): The agents who can see the message. Defaults to "all".
- logged (bool, optional): Flag indicating whether the message should be logged. Defaults to True.
- """
-
- self.messages.append(
- {
- "agent": agent,
- "content": content,
- "turn": turn,
- "visible_to": visible_to,
- "logged": logged,
- }
- )
- logger.info(f"Message added: {content}")
-
- def reset(self):
- """
- Reset the message pool.
- """
- self.messages = []
- logger.info("MessagePool reset")
-
- def last_turn(self):
- """
- Get the last turn number.
-
- Returns:
- int: The last turn number.
- """
- if len(self.messages) == 0:
- return 0
- else:
- return self.messages[-1]["turn"]
-
- @property
- def last_message(self):
- """
- Get the last message in the pool.
-
- Returns:
- dict: The last message.
- """
- if len(self.messages) == 0:
- return None
- else:
- return self.messages[-1]
-
- def get_all_messages(self):
- """
- Get all messages in the pool.
-
- Returns:
- List[Dict]: The list of all messages.
- """
- return self.messages
-
- def get_visible_messages(self, agent: Agent, turn: int):
- """
- Get the visible messages for a given agent and turn.
-
- Args:
- agent (Agent): The agent.
- turn (int): The turn number.
-
- Returns:
- List[Dict]: The list of visible messages.
- """
- # Get the messages before the current turn
- prev_messages = [
- message
- for message in self.messages
- if message["turn"] < turn
- ]
-
- visible_messages = []
- for message in prev_messages:
- if (
- message["visible_to"] == "all"
- or agent.agent_name in message["visible_to"]
- ):
- visible_messages.append(message)
- return visible_messages
-
- # def query(self, query: str):
- # """
- # Query a message from the messages list and then pass it to the moderator
- # """
- # return [
- # (mod, content)
- # for mod, content, _ in self.messages # Add an underscore to ignore the rest of the elements
- # if query in content
- # ]
diff --git a/swarms/structs/mixture_of_agents.py b/swarms/structs/mixture_of_agents.py
index cccca322..e91d565f 100644
--- a/swarms/structs/mixture_of_agents.py
+++ b/swarms/structs/mixture_of_agents.py
@@ -2,13 +2,15 @@ import asyncio
import time
from typing import Any, Dict, List, Optional
-from loguru import logger
from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
-from swarms.telemetry.log_swarm_data import log_agent_data
+from swarms.telemetry.capture_sys_data import log_agent_data
from swarms.schemas.agent_step_schemas import ManySteps
from swarms.prompts.ag_prompt import aggregator_system_prompt
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="mixture_of_agents")
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S")
@@ -18,7 +20,7 @@ class MixtureOfAgentsInput(BaseModel):
description: str = (
"A class to run a mixture of agents and aggregate their responses."
)
- reference_agents: List[Dict[str, Any]]
+ agents: List[Dict[str, Any]]
aggregator_agent: Any = Field(
...,
description="An aggregator agent to be used in the mixture.",
@@ -58,7 +60,7 @@ class MixtureOfAgents:
self,
name: str = "MixtureOfAgents",
description: str = "A class to run a mixture of agents and aggregate their responses.",
- reference_agents: List[Agent] = [],
+ agents: List[Agent] = [],
aggregator_agent: Agent = None,
aggregator_system_prompt: str = "",
layers: int = 3,
@@ -69,14 +71,14 @@ class MixtureOfAgents:
Args:
name (str, optional): The name of the mixture of agents. Defaults to "MixtureOfAgents".
description (str, optional): A description of the mixture of agents. Defaults to "A class to run a mixture of agents and aggregate their responses.".
- reference_agents (List[Agent], optional): A list of reference agents to be used in the mixture. Defaults to [].
+ agents (List[Agent], optional): A list of reference agents to be used in the mixture. Defaults to [].
aggregator_agent (Agent, optional): The aggregator agent to be used in the mixture. Defaults to None.
aggregator_system_prompt (str, optional): The system prompt for the aggregator agent. Defaults to "".
layers (int, optional): The number of layers to process in the mixture. Defaults to 3.
"""
self.name = name
self.description = description
- self.reference_agents: List[Agent] = reference_agents
+ self.agents: List[Agent] = agents
self.aggregator_agent: Agent = aggregator_agent
self.aggregator_system_prompt: str = aggregator_system_prompt
self.layers: int = layers
@@ -84,9 +86,7 @@ class MixtureOfAgents:
self.input_schema = MixtureOfAgentsInput(
name=name,
description=description,
- reference_agents=[
- agent.to_dict() for agent in self.reference_agents
- ],
+ agents=[agent.to_dict() for agent in self.agents],
aggregator_agent=aggregator_agent.to_dict(),
aggregator_system_prompt=self.aggregator_system_prompt,
layers=self.layers,
@@ -111,7 +111,7 @@ class MixtureOfAgents:
"Checking the reliability of the Mixture of Agents class."
)
- if not self.reference_agents:
+ if not self.agents:
raise ValueError("No reference agents provided.")
if not self.aggregator_agent:
@@ -203,7 +203,7 @@ class MixtureOfAgents:
results: List[str] = await asyncio.gather(
*[
self._run_agent_async(agent, task)
- for agent in self.reference_agents
+ for agent in self.agents
]
)
@@ -214,7 +214,7 @@ class MixtureOfAgents:
self._run_agent_async(
agent, task, prev_responses=results
)
- for agent in self.reference_agents
+ for agent in self.agents
]
)
diff --git a/swarms/structs/multi_agent_exec.py b/swarms/structs/multi_agent_exec.py
index e32a4edc..b66af8a5 100644
--- a/swarms/structs/multi_agent_exec.py
+++ b/swarms/structs/multi_agent_exec.py
@@ -5,10 +5,12 @@ from dataclasses import dataclass
import threading
from typing import List, Union, Any, Callable
from multiprocessing import cpu_count
-
+import os
from swarms.structs.agent import Agent
-from swarms.utils.calculate_func_metrics import profile_func
+from swarms.utils.wrapper_clusterop import (
+ exec_callable_with_clusterops,
+)
# Type definitions
@@ -60,7 +62,6 @@ async def run_agents_concurrently_async(
return results
-@profile_func
def run_agents_concurrently(
agents: List[AgentType],
task: str,
@@ -106,7 +107,6 @@ def run_agents_concurrently(
return results
-@profile_func
def run_agents_concurrently_multiprocess(
agents: List[Agent], task: str, batch_size: int = cpu_count()
) -> List[Any]:
@@ -136,7 +136,6 @@ def run_agents_concurrently_multiprocess(
return results
-@profile_func
def run_agents_sequentially(
agents: List[AgentType], task: str
) -> List[Any]:
@@ -153,7 +152,6 @@ def run_agents_sequentially(
return [run_single_agent(agent, task) for agent in agents]
-@profile_func
def run_agents_with_different_tasks(
agent_task_pairs: List[tuple[AgentType, str]],
batch_size: int = None,
@@ -230,7 +228,6 @@ async def run_agent_with_timeout(
return None
-@profile_func
def run_agents_with_timeout(
agents: List[AgentType],
task: str,
@@ -296,7 +293,6 @@ def get_system_metrics() -> ResourceMetrics:
)
-@profile_func
def run_agents_with_resource_monitoring(
agents: List[AgentType],
task: str,
@@ -332,6 +328,110 @@ def run_agents_with_resource_monitoring(
# Implementation details...
+def _run_agents_with_tasks_concurrently(
+ agents: List[AgentType],
+ tasks: List[str] = [],
+ batch_size: int = None,
+ max_workers: int = None,
+) -> List[Any]:
+ """
+ Run multiple agents with corresponding tasks concurrently.
+
+ Args:
+ agents: List of Agent instances to run
+ tasks: List of task strings to execute
+ batch_size: Number of agents to run in parallel
+ max_workers: Maximum number of threads
+
+ Returns:
+ List of outputs from each agent
+ """
+ if len(agents) != len(tasks):
+ raise ValueError(
+ "The number of agents must match the number of tasks."
+ )
+
+ cpu_cores = os.cpu_count()
+ batch_size = batch_size or cpu_cores
+ max_workers = max_workers or cpu_cores * 2
+ results = []
+
+ try:
+ loop = asyncio.get_event_loop()
+ except RuntimeError:
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+
+ async def run_agent_task_pair(
+ agent: AgentType, task: str, executor: ThreadPoolExecutor
+ ) -> Any:
+ return await run_agent_async(agent, task, executor)
+
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
+ for i in range(0, len(agents), batch_size):
+ batch_agents = agents[i : i + batch_size]
+ batch_tasks = tasks[i : i + batch_size]
+ batch_results = loop.run_until_complete(
+ asyncio.gather(
+ *(
+ run_agent_task_pair(agent, task, executor)
+ for agent, task in zip(
+ batch_agents, batch_tasks
+ )
+ )
+ )
+ )
+ results.extend(batch_results)
+
+ return results
+
+
+def run_agents_with_tasks_concurrently(
+ agents: List[AgentType],
+ tasks: List[str] = [],
+ batch_size: int = None,
+ max_workers: int = None,
+ device: str = "cpu",
+ device_id: int = 1,
+ all_cores: bool = True,
+ no_clusterops: bool = False,
+) -> List[Any]:
+ """
+ Executes a list of agents with their corresponding tasks concurrently on a specified device.
+
+ This function orchestrates the concurrent execution of a list of agents with their respective tasks on a specified device, either CPU or GPU. It leverages the `exec_callable_with_clusterops` function to manage the execution on the specified device.
+
+ Args:
+ agents (List[AgentType]): A list of Agent instances or callable functions to execute concurrently.
+ tasks (List[str], optional): A list of task strings to execute for each agent. Defaults to an empty list.
+ batch_size (int, optional): The number of agents to run in parallel. Defaults to None.
+ max_workers (int, optional): The maximum number of threads to use for execution. Defaults to None.
+ device (str, optional): The device to use for execution. Defaults to "cpu".
+ device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
+ all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True.
+
+ Returns:
+ List[Any]: A list of outputs from each agent execution.
+ """
+ # Make the first agent not use the ifrs
+
+ if no_clusterops:
+ return _run_agents_with_tasks_concurrently(
+ agents, tasks, batch_size, max_workers
+ )
+ else:
+ return exec_callable_with_clusterops(
+ device,
+ device_id,
+ all_cores,
+ _run_agents_with_tasks_concurrently,
+ agents,
+ tasks,
+ batch_size,
+ max_workers,
+ )
+
+
# # Example usage:
# # Initialize your agents with the same model to avoid re-creating it
# agents = [
diff --git a/swarms/structs/multi_process_workflow.py b/swarms/structs/multi_process_workflow.py
index 44051d0a..7b04c10e 100644
--- a/swarms/structs/multi_process_workflow.py
+++ b/swarms/structs/multi_process_workflow.py
@@ -1,9 +1,12 @@
from multiprocessing import Manager, Pool, cpu_count
-from typing import Sequence, Union, Callable
+from typing import Sequence, Union, Callable, List
+from concurrent.futures import ThreadPoolExecutor, as_completed
from swarms.structs.agent import Agent
from swarms.structs.base_workflow import BaseWorkflow
-from swarms.utils.loguru_logger import logger
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="multi_process_workflow")
class MultiProcessWorkflow(BaseWorkflow):
@@ -13,7 +16,7 @@ class MultiProcessWorkflow(BaseWorkflow):
Args:
max_workers (int): The maximum number of workers to use for parallel processing.
autosave (bool): Flag indicating whether to automatically save the workflow.
- tasks (List[Task]): A list of Task objects representing the workflow tasks.
+ agents (List[Union[Agent, Callable]]): A list of Agent objects or callable functions representing the workflow tasks.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
@@ -132,7 +135,7 @@ class MultiProcessWorkflow(BaseWorkflow):
callback=results_list.append,
timeout=task.timeout,
)
- for agent in self.agent
+ for agent in self.agents
]
# Wait for all jobs to complete
@@ -145,3 +148,97 @@ class MultiProcessWorkflow(BaseWorkflow):
except Exception as error:
logger.error(f"Error in run: {error}")
return None
+
+ async def async_run(self, task: str, *args, **kwargs):
+ """Asynchronously run the workflow.
+
+ Args:
+ task (Task): The task to run.
+ *args: Additional positional arguments for the task execution.
+ **kwargs: Additional keyword arguments for the task execution.
+
+ Returns:
+ List[Any]: The results of all executed tasks.
+
+ """
+ try:
+ results = []
+ with ThreadPoolExecutor(
+ max_workers=self.max_workers
+ ) as executor:
+ futures = [
+ executor.submit(
+ self.execute_task, task, *args, **kwargs
+ )
+ for _ in range(len(self.agents))
+ ]
+ for future in as_completed(futures):
+ result = future.result()
+ results.append(result)
+
+ return results
+ except Exception as error:
+ logger.error(f"Error in async_run: {error}")
+ return None
+
+ def batched_run(
+ self, tasks: List[str], batch_size: int = 5, *args, **kwargs
+ ):
+ """Run tasks in batches.
+
+ Args:
+ tasks (List[str]): A list of tasks to run.
+ batch_size (int): The size of each batch.
+ *args: Additional positional arguments for the task execution.
+ **kwargs: Additional keyword arguments for the task execution.
+
+ Returns:
+ List[Any]: The results of all executed tasks.
+
+ """
+ try:
+ results = []
+ for i in range(0, len(tasks), batch_size):
+ batch = tasks[i : i + batch_size]
+ with Pool(processes=self.max_workers) as pool:
+ results_list = pool.map(
+ self.execute_task, batch, *args, **kwargs
+ )
+ results.extend(results_list)
+
+ return results
+ except Exception as error:
+ logger.error(f"Error in batched_run: {error}")
+ return None
+
+ def concurrent_run(self, tasks: List[str], *args, **kwargs):
+ """Run tasks concurrently.
+
+ Args:
+ tasks (List[str]): A list of tasks to run.
+ *args: Additional positional arguments for the task execution.
+ **kwargs: Additional keyword arguments for the task execution.
+
+ Returns:
+ List[Any]: The results of all executed tasks.
+
+ """
+ try:
+ results = []
+ with ThreadPoolExecutor(
+ max_workers=self.max_workers
+ ) as executor:
+ futures = [
+ executor.submit(
+ self.execute_task, task, *args, **kwargs
+ )
+ for task in tasks
+ ]
+ for future in as_completed(futures):
+ result = future.result()
+ results.append(result)
+
+ return results
+ except Exception as error:
+ logger.error(f"Error in concurrent_run: {error}")
+ return None
diff --git a/swarms/structs/pulsar_swarm.py b/swarms/structs/pulsar_swarm.py
new file mode 100644
index 00000000..2d8961f7
--- /dev/null
+++ b/swarms/structs/pulsar_swarm.py
@@ -0,0 +1,276 @@
+import asyncio
+import pulsar
+
+from pulsar import ConsumerType
+from loguru import logger
+from swarms import Agent
+from typing import List, Dict, Any
+import json
+
+
+class ScalableAsyncAgentSwarm:
+ """
+ A scalable, asynchronous swarm of agents leveraging Apache Pulsar for inter-agent communication.
+ Provides load balancing, health monitoring, dead letter queues, and centralized logging.
+ """
+
+ def __init__(
+ self,
+ pulsar_url: str,
+ topic: str,
+ dlq_topic: str,
+ agents_config: List[Dict[str, Any]],
+ ):
+ """
+ Initializes the async swarm with agents.
+
+ Args:
+ pulsar_url (str): The URL of the Apache Pulsar broker.
+ topic (str): The main topic for task distribution.
+ dlq_topic (str): The Dead Letter Queue topic for failed messages.
+ agents_config (List[Dict[str, Any]]): List of agent configurations with `name`, `description`, and `model_name`.
+ """
+ self.pulsar_url = pulsar_url
+ self.topic = topic
+ self.dlq_topic = dlq_topic
+ self.agents_config = agents_config
+ self.client = pulsar.Client(pulsar_url)
+ self.consumer = self.client.subscribe(
+ topic,
+ subscription_name="swarm-task-sub",
+ consumer_type=ConsumerType.Shared,
+ )
+ self.dlq_producer = self.client.create_producer(dlq_topic)
+ self.response_logger = []
+ self.agents = [
+ self.create_agent(config) for config in agents_config
+ ]
+ self.agent_index = 0
+
+ logger.info(
+ "Swarm initialized with agents: {}",
+ [agent["name"] for agent in agents_config],
+ )
+
+ def create_agent(
+ self, agent_config: Dict[str, Any]
+ ) -> Dict[str, Any]:
+ """
+ Creates a new agent configuration with asynchronous capabilities.
+
+ Args:
+ agent_config (Dict[str, Any]): Configuration dictionary with agent details.
+
+ Returns:
+ Dict[str, Any]: A dictionary containing agent metadata and functionality.
+ """
+ agent_name = agent_config["name"]
+ description = agent_config["description"]
+ model_name = agent_config.get("model_name", "gpt-4o-mini")
+
+ class AsyncAgent:
+ """
+ An asynchronous agent that processes tasks and communicates via Apache Pulsar.
+ """
+
+ def __init__(
+ self, name: str, description: str, model_name: str
+ ):
+ self.name = name
+ self.description = description
+ self.agent = Agent(
+ agent_name=name,
+ model_name=model_name,
+ max_loops="auto",
+ interactive=True,
+ streaming_on=True,
+ )
+ logger.info(
+ f"Initialized agent '{name}' - {description}"
+ )
+
+ async def process_task(
+ self, message: str
+ ) -> Dict[str, Any]:
+ """
+ Processes a single task using the agent.
+
+ Args:
+ message (str): The task message.
+
+ Returns:
+ Dict[str, Any]: JSON-formatted response.
+ """
+ try:
+ logger.info(
+ f"Agent {self.name} processing task: {message}"
+ )
+ response = await asyncio.to_thread(
+ self.agent.run, message
+ )
+ logger.info(f"Agent {self.name} completed task.")
+ return {
+ "agent_name": self.name,
+ "response": response,
+ }
+ except Exception as e:
+ logger.error(
+ f"Agent {self.name} encountered an error: {e}"
+ )
+ return {"agent_name": self.name, "error": str(e)}
+
+ return {
+ "name": agent_name,
+ "instance": AsyncAgent(
+ agent_name, description, model_name
+ ),
+ }
+
+ async def distribute_task(self, message: str):
+ """
+ Distributes a task to the next available agent using round-robin.
+
+ Args:
+ message (str): The task message.
+ """
+ agent = self.agents[self.agent_index]
+ self.agent_index = (self.agent_index + 1) % len(self.agents)
+
+ try:
+ response = await agent["instance"].process_task(message)
+ self.log_response(response)
+ except Exception as e:
+ logger.error(
+ f"Error processing task by agent {agent['name']}: {e}"
+ )
+ self.send_to_dlq(message)
+
+ async def monitor_health(self):
+ """
+ Periodically monitors the health of agents.
+ """
+ while True:
+ logger.info("Performing health check for all agents.")
+ for agent in self.agents:
+ logger.info(f"Agent {agent['name']} is online.")
+ await asyncio.sleep(10)
+
+ def send_to_dlq(self, message: str):
+ """
+ Sends a failed message to the Dead Letter Queue (DLQ).
+
+ Args:
+ message (str): The message to send to the DLQ.
+ """
+ try:
+ self.dlq_producer.send(message.encode("utf-8"))
+ logger.info("Message sent to Dead Letter Queue.")
+ except Exception as e:
+ logger.error(f"Failed to send message to DLQ: {e}")
+
+ def log_response(self, response: Dict[str, Any]):
+ """
+ Logs the response to a centralized list for later analysis.
+
+ Args:
+ response (Dict[str, Any]): The agent's response.
+ """
+ self.response_logger.append(response)
+ logger.info(f"Response logged: {response}")
+
+ async def listen_and_distribute(self):
+ """
+ Listens to the main Pulsar topic and distributes tasks to agents.
+ """
+ while True:
+ msg = self.consumer.receive()
+ try:
+ message = msg.data().decode("utf-8")
+ logger.info(f"Received task: {message}")
+ await self.distribute_task(message)
+ self.consumer.acknowledge(msg)
+ except Exception as e:
+ logger.error(f"Error processing message: {e}")
+ self.send_to_dlq(msg.data().decode("utf-8"))
+ self.consumer.negative_acknowledge(msg)
+
+ async def run(self):
+ """
+ Runs the swarm asynchronously with health monitoring and task distribution.
+ """
+ logger.info("Starting the async swarm...")
+ task_listener = asyncio.create_task(
+ self.listen_and_distribute()
+ )
+ health_monitor = asyncio.create_task(self.monitor_health())
+ await asyncio.gather(task_listener, health_monitor)
+
+ def shutdown(self):
+ """
+ Safely shuts down the swarm and logs all responses.
+ """
+ logger.info("Shutting down the swarm...")
+ self.client.close()
+ with open("responses.json", "w") as f:
+ json.dump(self.response_logger, f, indent=4)
+ logger.info("Responses saved to 'responses.json'.")
+
+
+# from scalable_agent_swarm import ScalableAsyncAgentSwarm # Assuming your swarm class is saved here
+
+if __name__ == "__main__":
+ # Example Configuration
+ PULSAR_URL = "pulsar://localhost:6650"
+ TOPIC = "stock-analysis"
+ DLQ_TOPIC = "stock-analysis-dlq"
+
+ # Agents configuration
+ AGENTS_CONFIG = [
+ {
+ "name": "Stock-Analysis-Agent-1",
+ "description": "Analyzes stock trends.",
+ "model_name": "gpt-4o-mini",
+ },
+ {
+ "name": "Stock-News-Agent",
+ "description": "Summarizes stock news.",
+ "model_name": "gpt-4o-mini",
+ },
+ {
+ "name": "Tech-Trends-Agent",
+ "description": "Tracks tech sector trends.",
+ "model_name": "gpt-4o-mini",
+ },
+ ]
+
+ # Tasks to send
+ TASKS = [
+ "Analyze the trend for tech stocks in Q4 2024",
+ "Summarize the latest news on the S&P 500",
+ "Identify the top-performing sectors in the stock market",
+ "Provide a forecast for AI-related stocks for 2025",
+ ]
+
+ # Initialize and run the swarm
+ swarm = ScalableAsyncAgentSwarm(
+ PULSAR_URL, TOPIC, DLQ_TOPIC, AGENTS_CONFIG
+ )
+ try:
+ # Run the swarm in the background
+ swarm_task = asyncio.create_task(swarm.run())
+
+ # Send tasks to the topic
+ client = pulsar.Client(PULSAR_URL)
+ producer = client.create_producer(TOPIC)
+
+ for task in TASKS:
+ producer.send(task.encode("utf-8"))
+ print(f"Sent task: {task}")
+
+ producer.close()
+ client.close()
+
+ # Keep the swarm running
+ asyncio.run(swarm_task)
+ except KeyboardInterrupt:
+ swarm.shutdown()
diff --git a/swarms/structs/rearrange.py b/swarms/structs/rearrange.py
index 225eeb98..801861b0 100644
--- a/swarms/structs/rearrange.py
+++ b/swarms/structs/rearrange.py
@@ -1,39 +1,61 @@
-import threading
+import asyncio
+import traceback
import uuid
+from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
-from typing import Callable, Dict, List, Optional
+from typing import Callable, Dict, List, Literal, Optional
from pydantic import BaseModel, Field
from swarms_memory import BaseVectorDatabase
from swarms.schemas.agent_step_schemas import ManySteps
from swarms.structs.agent import Agent
+from swarms.structs.agents_available import showcase_available_agents
from swarms.structs.base_swarm import BaseSwarm
-from swarms.structs.omni_agent_types import AgentType
-from swarms.utils.loguru_logger import logger
+from swarms.utils.add_docs_to_agents import handle_input_docs
+from swarms.utils.loguru_logger import initialize_logger
+from swarms.utils.wrapper_clusterop import (
+ exec_callable_with_clusterops,
+)
+
+logger = initialize_logger(log_folder="rearrange")
+
+# Literal of output types
+OutputType = Literal[
+ "all",
+ "final",
+ "list",
+ "dict",
+ ".json",
+ ".md",
+ ".txt",
+ ".yaml",
+ ".toml",
+]
+
+
+def swarm_id():
+ return uuid.uuid4().hex
class AgentRearrangeInput(BaseModel):
- swarm_id: str
- name: str
- description: str
- flow: str
- max_loops: int
+ swarm_id: Optional[str] = None
+ name: Optional[str] = None
+ description: Optional[str] = None
+ flow: Optional[str] = None
+ max_loops: Optional[int] = None
time: str = Field(
default_factory=lambda: datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
),
description="The time the agent was created.",
)
-
-
-def swarm_id():
- return uuid.uuid4().hex
+ output_type: OutputType = Field(default="final")
class AgentRearrangeOutput(BaseModel):
- Input: AgentRearrangeInput
- outputs: List[ManySteps]
+ Input: Optional[AgentRearrangeInput] = None
+ outputs: Optional[List[ManySteps]] = None
time: str = Field(
default_factory=lambda: datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
@@ -47,16 +69,38 @@ class AgentRearrange(BaseSwarm):
A class representing a swarm of agents for rearranging tasks.
Attributes:
- agents (dict): A dictionary of agents, where the key is the agent's name and the value is the agent object.
- flow (str): The flow pattern of the tasks.
+ id (str): Unique identifier for the swarm
+ name (str): Name of the swarm
+ description (str): Description of the swarm's purpose
+ agents (callable): Dictionary mapping agent names to Agent objects
+ flow (str): The flow pattern defining task execution order
+ max_loops (int): Maximum number of execution loops
+ verbose (bool): Whether to enable verbose logging
+ memory_system (BaseVectorDatabase): Memory system for storing agent interactions
+ human_in_the_loop (bool): Whether human intervention is enabled
+ custom_human_in_the_loop (Callable): Custom function for human intervention
+ return_json (bool): Whether to return output in JSON format
+ output_type (OutputType): Format of output ("all", "final", "list", or "dict")
+ swarm_history (dict): History of agent interactions
+ input_config (AgentRearrangeInput): Input configuration schema
+ output_schema (AgentRearrangeOutput): Output schema
Methods:
- __init__(agents: List[Agent] = None, flow: str = None): Initializes the AgentRearrange object.
- add_agent(agent: Agent): Adds an agent to the swarm.
- remove_agent(agent_name: str): Removes an agent from the swarm.
- add_agents(agents: List[Agent]): Adds multiple agents to the swarm.
- validate_flow(): Validates the flow pattern.
- run(task): Runs the swarm to rearrange the tasks.
+ __init__(): Initializes the AgentRearrange object
+ reliability_checks(): Validates swarm configuration
+ set_custom_flow(): Sets a custom flow pattern
+ add_agent(): Adds an agent to the swarm
+ track_history(): Records agent interaction history
+ remove_agent(): Removes an agent from the swarm
+ add_agents(): Adds multiple agents to the swarm
+ validate_flow(): Validates the flow pattern
+ run(): Executes the swarm's task processing
+ astream(): Runs the swarm with streaming output
+ batch_run(): Processes multiple tasks in batches
+ abatch_run(): Asynchronously processes multiple tasks in batches
+ concurrent_run(): Processes multiple tasks concurrently
+ handle_input_docs(): Adds document content to agent prompts
+
"""
def __init__(
@@ -64,7 +108,7 @@ class AgentRearrange(BaseSwarm):
id: str = swarm_id(),
name: str = "AgentRearrange",
description: str = "A swarm of agents for rearranging tasks.",
- agents: List[AgentType] = None,
+ agents: List[Agent] = None,
flow: str = None,
max_loops: int = 1,
verbose: bool = True,
@@ -74,25 +118,26 @@ class AgentRearrange(BaseSwarm):
Callable[[str], str]
] = None,
return_json: bool = False,
+ output_type: OutputType = "final",
+ docs: List[str] = None,
+ doc_folder: str = None,
+ device: str = "cpu",
+ device_id: int = 0,
+ all_cores: bool = False,
+ all_gpus: bool = True,
+ no_use_clusterops: bool = True,
*args,
**kwargs,
):
- """
- Initializes the AgentRearrange object.
-
- Args:
- agents (List[Agent], optional): A list of Agent objects. Defaults to None.
- flow (str, optional): The flow pattern of the tasks. Defaults to None.
- """
super(AgentRearrange, self).__init__(
name=name,
description=description,
- agents=agents,
+ agents=agents if agents else [],
*args,
**kwargs,
)
self.id = id
- self.agents = {agent.name: agent for agent in agents}
+ self.agents = {agent.agent_name: agent for agent in agents}
self.flow = flow if flow is not None else ""
self.verbose = verbose
self.max_loops = max_loops if max_loops > 0 else 1
@@ -100,55 +145,61 @@ class AgentRearrange(BaseSwarm):
self.human_in_the_loop = human_in_the_loop
self.custom_human_in_the_loop = custom_human_in_the_loop
self.return_json = return_json
- self.swarm_history = {
- agent.agent_name: [] for agent in agents
- }
- self.lock = threading.Lock()
- self.id = uuid.uuid4().hex if id is None else id
-
- # Run the relianility checks
- self.reliability_checks()
-
- # Output schema
- self.input_config = AgentRearrangeInput(
- swarm_id=self.id,
+ self.output_type = output_type
+ self.docs = docs
+ self.doc_folder = doc_folder
+ self.device = device
+ self.device_id = device_id
+ self.all_cores = all_cores
+ self.all_gpus = all_gpus
+ self.no_use_clusterops = no_use_clusterops
+
+ def showcase_agents(self):
+ # Get formatted agent info once
+ agents_available = showcase_available_agents(
name=self.name,
description=self.description,
- flow=self.flow,
- max_loops=self.max_loops,
- )
-
- # Output schema
- self.output_schema = AgentRearrangeOutput(
- Input=self.input_config,
- outputs=[],
+ agents=self.agents,
+ format="Table",
)
- def reliability_checks(self):
- logger.info("Running reliability checks.")
- if self.agents is None:
- raise ValueError("No agents found in the swarm.")
+ return agents_available
- if self.flow is None:
- raise ValueError("No flow found in the swarm.")
+ def rearrange_prompt_prep(self) -> str:
+ """Prepares a formatted prompt describing the swarm configuration.
- if self.max_loops is None:
- raise ValueError("No max_loops found in the swarm.")
-
- logger.info(
- "AgentRearrange initialized with agents: {}".format(
- list(self.agents.keys())
- )
- )
-
- # Verbose is True
- if self.verbose is True:
- logger.add("agent_rearrange.log")
+ Returns:
+ str: A formatted string containing the swarm's name, description,
+ flow pattern, and participating agents.
+ """
+ agents_available = self.showcase_agents()
+ prompt = f"""
+ ===== Swarm Configuration =====
+
+ Name: {self.name}
+ Description: {self.description}
+
+ ===== Execution Flow =====
+ {self.flow}
+
+ ===== Participating Agents =====
+ {agents_available}
+
+ ===========================
+ """
+ return prompt
def set_custom_flow(self, flow: str):
self.flow = flow
logger.info(f"Custom flow set: {flow}")
+ def handle_input_docs(self):
+ self.agents = handle_input_docs(
+ agents=self.agents,
+ docs=self.docs,
+ doc_folder=self.doc_folder,
+ )
+
def add_agent(self, agent: Agent):
"""
Adds an agent to the swarm.
@@ -156,8 +207,8 @@ class AgentRearrange(BaseSwarm):
Args:
agent (Agent): The agent to be added.
"""
- logger.info(f"Adding agent {agent.name} to the swarm.")
- self.agents[agent.name] = agent
+ logger.info(f"Adding agent {agent.agent_name} to the swarm.")
+ self.agents[agent.agent_name] = agent
def track_history(
self,
@@ -183,7 +234,7 @@ class AgentRearrange(BaseSwarm):
agents (List[Agent]): A list of Agent objects.
"""
for agent in agents:
- self.agents[agent.name] = agent
+ self.agents[agent.agent_name] = agent
def validate_flow(self):
"""
@@ -226,10 +277,10 @@ class AgentRearrange(BaseSwarm):
"Duplicate agent names in the flow are not allowed."
)
- print("Flow is valid.")
+ logger.info(f"Flow: {self.flow} is valid.")
return True
- def run(
+ def _run(
self,
task: str = None,
img: str = None,
@@ -241,308 +292,406 @@ class AgentRearrange(BaseSwarm):
Runs the swarm to rearrange the tasks.
Args:
- task: The initial task to be processed.
+ task (str, optional): The initial task to be processed. Defaults to None.
+ img (str, optional): Image input for agents that support it. Defaults to None.
+ custom_tasks (Dict[str, str], optional): Custom tasks for specific agents. Defaults to None.
+ output_type (str, optional): Format of the output. Can be:
+ - "all": String containing all agent responses concatenated
+ - "final": Only the final agent's response
+ - "list": List of all agent responses
+ - "dict": Dict mapping agent names to their responses
+ Defaults to "final".
+ *args: Additional positional arguments
+ **kwargs: Additional keyword arguments
Returns:
- str: The final processed task.
+ Union[str, List[str], Dict[str, str]]: The processed output in the specified format
+
+ Raises:
+ ValueError: If flow validation fails
+ Exception: For any other errors during execution
"""
try:
if not self.validate_flow():
+ logger.error("Flow validation failed")
return "Invalid flow configuration."
tasks = self.flow.split("->")
current_task = task
+ all_responses = []
+ response_dict = {}
+ previous_agent = None
- # If custom_tasks have the agents name and tasks then combine them
+ logger.info(
+ f"Starting task execution with {len(tasks)} steps"
+ )
+
+ # Handle custom tasks
if custom_tasks is not None:
+ logger.info("Processing custom tasks")
c_agent_name, c_task = next(
iter(custom_tasks.items())
)
-
- # Find the position of the custom agent in the tasks list
position = tasks.index(c_agent_name)
- # If there is a prebois agent merge its task with the custom tasks
if position > 0:
tasks[position - 1] += "->" + c_task
else:
- # If there is no prevous agent just insert the custom tasks
tasks.insert(position, c_task)
- # Set the loop counter
loop_count = 0
while loop_count < self.max_loops:
- for task in tasks:
+ logger.info(
+ f"Starting loop {loop_count + 1}/{self.max_loops}"
+ )
+
+ for task_idx, task in enumerate(tasks):
is_last = task == tasks[-1]
agent_names = [
name.strip() for name in task.split(",")
]
+
+ # Prepare prompt with previous agent info
+ prompt_prefix = ""
+ if previous_agent and task_idx > 0:
+ prompt_prefix = f"Previous agent {previous_agent} output: {current_task}\n"
+ elif task_idx == 0:
+ prompt_prefix = "Initial task: "
+
if len(agent_names) > 1:
# Parallel processing
logger.info(
f"Running agents in parallel: {agent_names}"
)
results = []
+
for agent_name in agent_names:
if agent_name == "H":
- # Human in the loop intervention
if (
self.human_in_the_loop
and self.custom_human_in_the_loop
):
current_task = (
self.custom_human_in_the_loop(
- current_task
+ prompt_prefix
+ + str(current_task)
)
)
else:
current_task = input(
- "Enter your response:"
+ prompt_prefix
+ + "Enter your response: "
)
+ results.append(current_task)
+ response_dict[agent_name] = (
+ current_task
+ )
else:
agent = self.agents[agent_name]
+ task_with_context = (
+ prompt_prefix + str(current_task)
+ if current_task
+ else prompt_prefix
+ )
result = agent.run(
- current_task,
- img,
- is_last,
+ task=task_with_context,
+ img=img,
+ is_last=is_last,
*args,
**kwargs,
)
+ result = str(result)
results.append(result)
+ response_dict[agent_name] = result
self.output_schema.outputs.append(
agent.agent_output
)
+ logger.debug(
+ f"Agent {agent_name} output: {result}"
+ )
current_task = "; ".join(results)
+ all_responses.extend(results)
+ previous_agent = ",".join(agent_names)
+
else:
# Sequential processing
logger.info(
- f"Running agents sequentially: {agent_names}"
+ f"Running agent sequentially: {agent_names[0]}"
)
agent_name = agent_names[0]
+
if agent_name == "H":
- # Human-in-the-loop intervention
if (
self.human_in_the_loop
and self.custom_human_in_the_loop
):
current_task = (
self.custom_human_in_the_loop(
- current_task
+ prompt_prefix
+ + str(current_task)
)
)
else:
current_task = input(
- "Enter the next task: "
+ prompt_prefix
+ + "Enter the next task: "
)
+ response_dict[agent_name] = current_task
else:
agent = self.agents[agent_name]
+ task_with_context = (
+ prompt_prefix + str(current_task)
+ if current_task
+ else prompt_prefix
+ )
current_task = agent.run(
- current_task,
- img,
- is_last,
+ task=task_with_context,
+ img=img,
+ is_last=is_last,
*args,
**kwargs,
)
+ current_task = str(current_task)
+ response_dict[agent_name] = current_task
self.output_schema.outputs.append(
agent.agent_output
)
+ logger.debug(
+ f"Agent {agent_name} output: {current_task}"
+ )
+
+ all_responses.append(current_task)
+ previous_agent = agent_name
+
loop_count += 1
- # return current_task
+ logger.info("Task execution completed")
+
if self.return_json:
return self.output_schema.model_dump_json(indent=4)
- else:
- return current_task
+
+ # Handle different output types
+ if self.output_type == "all":
+ output = " ".join(all_responses)
+ elif self.output_type == "list":
+ output = all_responses
+ elif self.output_type == "dict":
+ output = response_dict
+ else: # "final"
+ output = current_task
+
+ return output
except Exception as e:
- logger.error(f"An error occurred: {e}")
+ logger.error(
+ f"An error occurred: {e} \n {traceback.format_exc()}"
+ )
return e
- async def astream(
+ def run(
self,
task: str = None,
img: str = None,
- custom_tasks: Dict[str, str] = None,
+ device: str = "cpu",
+ device_id: int = 2,
+ all_cores: bool = True,
+ all_gpus: bool = False,
+ no_use_clusterops: bool = False,
*args,
**kwargs,
):
"""
- Runs the swarm with LangChain's astream_events v1 API enabled.
- NOTICE: Be sure to only call this method if you are using LangChain-based models in your swarm.
- This is useful for enhancing user experience by providing real-time updates of how each agent
- in the swarm is processing the current task.
+ Execute the agent rearrangement task with specified compute resources.
Args:
- task: The initial prompt (aka task) passed to the first agent(s) in the swarm.
+ task (str, optional): The task to execute. Defaults to None.
+ img (str, optional): Path to input image if required. Defaults to None.
+ device (str, optional): Computing device to use ('cpu' or 'gpu'). Defaults to "cpu".
+ device_id (int, optional): ID of specific device to use. Defaults to 1.
+ all_cores (bool, optional): Whether to use all CPU cores. Defaults to True.
+ all_gpus (bool, optional): Whether to use all available GPUs. Defaults to False.
+ no_use_clusterops (bool, optional): Whether to use clusterops. Defaults to False.
+ *args: Additional positional arguments passed to _run().
+ **kwargs: Additional keyword arguments passed to _run().
Returns:
- str: The final output generated.
+ The result from executing the task through the cluster operations wrapper.
"""
- try:
- if not self.validate_flow():
- return "Invalid flow configuration."
-
- tasks = self.flow.split("->")
- current_task = task
+ no_use_clusterops = (
+ no_use_clusterops or self.no_use_clusterops
+ )
- # If custom_tasks have the agents name and tasks then combine them
- if custom_tasks is not None:
- c_agent_name, c_task = next(
- iter(custom_tasks.items())
- )
+ if no_use_clusterops is True:
+ return self._run(
+ task=task,
+ img=img,
+ *args,
+ **kwargs,
+ )
+ else:
+ return exec_callable_with_clusterops(
+ device=device,
+ device_id=device_id,
+ all_cores=all_cores,
+ all_gpus=all_gpus,
+ func=self._run,
+ task=task,
+ img=img,
+ *args,
+ **kwargs,
+ )
- # Find the position of the custom agent in the tasks list
- position = tasks.index(c_agent_name)
+ def __call__(self, task: str, *args, **kwargs):
+ """
+ Make the class callable by executing the run() method.
- # If there is a prebois agent merge its task with the custom tasks
- if position > 0:
- tasks[position - 1] += "->" + c_task
- else:
- # If there is no prevous agent just insert the custom tasks
- tasks.insert(position, c_task)
+ Args:
+ task (str): The task to execute.
+ *args: Additional positional arguments passed to run().
+ **kwargs: Additional keyword arguments passed to run().
- logger.info("TASK:", task)
+ Returns:
+ The result from executing run().
+ """
+ return self.run(task=task, *args, **kwargs)
- # Set the loop counter
- loop_count = 0
- while loop_count < self.max_loops:
- for task in tasks:
- agent_names = [
- name.strip() for name in task.split(",")
- ]
- if len(agent_names) > 1:
- # Parallel processing
- logger.info(
- f"Running agents in parallel: {agent_names}"
- )
- results = []
- for agent_name in agent_names:
- if agent_name == "H":
- # Human in the loop intervention
- if (
- self.human_in_the_loop
- and self.custom_human_in_the_loop
- ):
- current_task = (
- self.custom_human_in_the_loop(
- current_task
- )
- )
- else:
- current_task = input(
- "Enter your response:"
- )
- else:
- agent = self.agents[agent_name]
- result = None
- # As the current `swarms` package is using LangChain v0.1 we need to use the v0.1 version of the `astream_events` API
- # Below is the link to the `astream_events` spec as outlined in the LangChain v0.1 docs
- # https://python.langchain.com/v0.1/docs/expression_language/streaming/#event-reference
- # Below is the link to the `astream_events` spec as outlined in the LangChain v0.2 docs
- # https://python.langchain.com/v0.2/docs/versions/v0_2/migrating_astream_events/
- async for evt in agent.astream_events(
- current_task, version="v1"
- ):
- # print(evt) # <- useful when building/debugging
- if evt["event"] == "on_llm_end":
- result = evt["data"]["output"]
- print(agent.name, result)
- results.append(result)
+ def batch_run(
+ self,
+ tasks: List[str],
+ img: Optional[List[str]] = None,
+ batch_size: int = 10,
+ device: str = "cpu",
+ device_id: int = None,
+ all_cores: bool = True,
+ all_gpus: bool = False,
+ *args,
+ **kwargs,
+ ) -> List[str]:
+ """
+ Process multiple tasks in batches.
- current_task = ""
- for index, res in enumerate(results):
- current_task += (
- "# OUTPUT of "
- + agent_names[index]
- + ""
- + res
- + "\n\n"
- )
- else:
- # Sequential processing
- logger.info(
- f"Running agents sequentially: {agent_names}"
- )
+ Args:
+ tasks: List of tasks to process
+ img: Optional list of images corresponding to tasks
+ batch_size: Number of tasks to process simultaneously
+ device: Computing device to use
+ device_id: Specific device ID if applicable
+ all_cores: Whether to use all CPU cores
+ all_gpus: Whether to use all available GPUs
- agent_name = agent_names[0]
- if agent_name == "H":
- # Human-in-the-loop intervention
- if (
- self.human_in_the_loop
- and self.custom_human_in_the_loop
- ):
- current_task = (
- self.custom_human_in_the_loop(
- current_task
- )
- )
- else:
- current_task = input(
- "Enter the next task: "
- )
- else:
- agent = self.agents[agent_name]
- result = None
- # As the current `swarms` package is using LangChain v0.1 we need to use the v0.1 version of the `astream_events` API
- # Below is the link to the `astream_events` spec as outlined in the LangChain v0.1 docs
- # https://python.langchain.com/v0.1/docs/expression_language/streaming/#event-reference
- # Below is the link to the `astream_events` spec as outlined in the LangChain v0.2 docs
- # https://python.langchain.com/v0.2/docs/versions/v0_2/migrating_astream_events/
- async for evt in agent.astream_events(
- f"SYSTEM: {agent.system_prompt}\nINPUT:{current_task}",
- version="v1",
- ):
- # print(evt) # <- useful when building/debugging
- if evt["event"] == "on_llm_end":
- result = evt["data"]["output"]
- print(
- agent.name, "result", result
- )
- current_task = result
+ Returns:
+ List of results corresponding to input tasks
+ """
+ results = []
+ for i in range(0, len(tasks), batch_size):
+ batch_tasks = tasks[i : i + batch_size]
+ batch_imgs = (
+ img[i : i + batch_size]
+ if img
+ else [None] * len(batch_tasks)
+ )
- loop_count += 1
+ # Process batch using concurrent execution
+ batch_results = [
+ self.run(
+ task=task,
+ img=img_path,
+ device=device,
+ device_id=device_id,
+ all_cores=all_cores,
+ all_gpus=all_gpus,
+ *args,
+ **kwargs,
+ )
+ for task, img_path in zip(batch_tasks, batch_imgs)
+ ]
+ results.extend(batch_results)
- return current_task
- except Exception as e:
- logger.error(f"An error occurred: {e}")
- return e
+ return results
- def process_agent_or_swarm(
- self, name: str, task: str, img: str, is_last, *args, **kwargs
- ):
+ async def abatch_run(
+ self,
+ tasks: List[str],
+ img: Optional[List[str]] = None,
+ batch_size: int = 10,
+ *args,
+ **kwargs,
+ ) -> List[str]:
"""
-
- process_agent_or_swarm: Processes the agent or sub-swarm based on the given name.
+ Asynchronously process multiple tasks in batches.
Args:
- name (str): The name of the agent or sub-swarm to process.
- task (str): The task to be executed.
- img (str): The image to be processed by the agents.
- *args: Variable length argument list.
- **kwargs: Arbitrary keyword arguments.
+ tasks: List of tasks to process
+ img: Optional list of images corresponding to tasks
+ batch_size: Number of tasks to process simultaneously
Returns:
- str: The result of the last executed task.
-
+ List of results corresponding to input tasks
"""
- if name.startswith("Human"):
- return self.human_intervention(task)
- elif name in self.sub_swarm:
- return self.run_sub_swarm(
- task, name, img, *args, **kwargs
+ results = []
+ for i in range(0, len(tasks), batch_size):
+ batch_tasks = tasks[i : i + batch_size]
+ batch_imgs = (
+ img[i : i + batch_size]
+ if img
+ else [None] * len(batch_tasks)
)
- else:
- agent = self.agents[name]
- return agent.run(task, img, is_last, *args, **kwargs)
- def human_intervention(self, task: str) -> str:
- if self.human_in_the_loop and self.custom_human_in_the_loop:
- return self.custom_human_in_the_loop(task)
- else:
- return input(
- "Human intervention required. Enter your response: "
- )
+ # Process batch using asyncio.gather
+ batch_coros = [
+ self.astream(task=task, img=img_path, *args, **kwargs)
+ for task, img_path in zip(batch_tasks, batch_imgs)
+ ]
+ batch_results = await asyncio.gather(*batch_coros)
+ results.extend(batch_results)
+
+ return results
+
+ def concurrent_run(
+ self,
+ tasks: List[str],
+ img: Optional[List[str]] = None,
+ max_workers: Optional[int] = None,
+ device: str = "cpu",
+ device_id: int = None,
+ all_cores: bool = True,
+ all_gpus: bool = False,
+ *args,
+ **kwargs,
+ ) -> List[str]:
+ """
+ Process multiple tasks concurrently using ThreadPoolExecutor.
+
+ Args:
+ tasks: List of tasks to process
+ img: Optional list of images corresponding to tasks
+ max_workers: Maximum number of worker threads
+ device: Computing device to use
+ device_id: Specific device ID if applicable
+ all_cores: Whether to use all CPU cores
+ all_gpus: Whether to use all available GPUs
+
+ Returns:
+ List of results corresponding to input tasks
+ """
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
+ imgs = img if img else [None] * len(tasks)
+ futures = [
+ executor.submit(
+ self.run,
+ task=task,
+ img=img_path,
+ device=device,
+ device_id=device_id,
+ all_cores=all_cores,
+ all_gpus=all_gpus,
+ *args,
+ **kwargs,
+ )
+ for task, img_path in zip(tasks, imgs)
+ ]
+ return [future.result() for future in futures]
def rearrange(
diff --git a/swarms/structs/round_robin.py b/swarms/structs/round_robin.py
index a2a2bd5d..19198d3d 100644
--- a/swarms/structs/round_robin.py
+++ b/swarms/structs/round_robin.py
@@ -2,12 +2,14 @@ import random
from swarms.structs.base_swarm import BaseSwarm
from typing import List
from swarms.structs.agent import Agent
-from swarms.utils.loguru_logger import logger
from pydantic import BaseModel, Field
from typing import Optional
from datetime import datetime
from swarms.schemas.agent_step_schemas import ManySteps
import tenacity
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger("round-robin")
datetime_stamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py
index cc4a1865..ed55102d 100644
--- a/swarms/structs/sequential_workflow.py
+++ b/swarms/structs/sequential_workflow.py
@@ -1,15 +1,19 @@
-from typing import List
+from typing import List, Optional
from swarms.structs.agent import Agent
-from swarms.utils.loguru_logger import logger
-from swarms.structs.rearrange import AgentRearrange
-from swarms.structs.base_swarm import BaseSwarm
+from swarms.structs.rearrange import AgentRearrange, OutputType
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from swarms.utils.loguru_logger import initialize_logger
+logger = initialize_logger(log_folder="sequential_workflow")
-class SequentialWorkflow(BaseSwarm):
+
+class SequentialWorkflow:
"""
- Initializes a SequentialWorkflow object.
+ Initializes a SequentialWorkflow object, which orchestrates the execution of a sequence of agents.
Args:
+ name (str, optional): The name of the workflow. Defaults to "SequentialWorkflow".
+ description (str, optional): A description of the workflow. Defaults to "Sequential Workflow, where agents are executed in a sequence."
agents (List[Agent], optional): The list of agents in the workflow. Defaults to None.
max_loops (int, optional): The maximum number of loops to execute the workflow. Defaults to 1.
*args: Variable length argument list.
@@ -23,49 +27,163 @@ class SequentialWorkflow(BaseSwarm):
self,
name: str = "SequentialWorkflow",
description: str = "Sequential Workflow, where agents are executed in a sequence.",
- agents: List[Agent] = None,
+ agents: List[Agent] = [],
max_loops: int = 1,
+ output_type: OutputType = "all",
+ return_json: bool = False,
+ shared_memory_system: callable = None,
*args,
**kwargs,
):
- if agents is None or len(agents) == 0:
+ self.name = name
+ self.description = description
+ self.agents = agents
+ self.max_loops = max_loops
+ self.output_type = output_type
+ self.return_json = return_json
+ self.shared_memory_system = shared_memory_system
+
+ self.reliability_check()
+
+ self.agent_rearrange = AgentRearrange(
+ name=name,
+ description=description,
+ agents=agents,
+ flow=self.sequential_flow(),
+ max_loops=max_loops,
+ output_type=output_type,
+ return_json=return_json,
+ shared_memory_system=shared_memory_system,
+ *args,
+ **kwargs,
+ )
+
+ def sequential_flow(self):
+ # Only create flow if agents exist
+ if self.agents:
+ # Create flow by joining agent names with arrows
+ agent_names = []
+ for agent in self.agents:
+ try:
+ # Try to get agent_name, fallback to name if not available
+ agent_name = (
+ getattr(agent, "agent_name", None)
+ or agent.name
+ )
+ agent_names.append(agent_name)
+ except AttributeError:
+ logger.warning(
+ f"Could not get name for agent {agent}"
+ )
+ continue
+
+ if agent_names:
+ flow = " -> ".join(agent_names)
+ else:
+ flow = ""
+ logger.warning(
+ "No valid agent names found to create flow"
+ )
+ else:
+ flow = ""
+ logger.warning("No agents provided to create flow")
+
+ return flow
+
+ def reliability_check(self):
+ if self.agents is None or len(self.agents) == 0:
raise ValueError("Agents list cannot be None or empty")
- if max_loops == 0:
+ if self.max_loops == 0:
raise ValueError("max_loops cannot be 0")
+ logger.info("Checks completed your swarm is ready.")
+
+ def run(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ device: str = "cpu",
+ all_cores: bool = False,
+ all_gpus: bool = False,
+ device_id: int = 0,
+ no_use_clusterops: bool = True,
+ *args,
+ **kwargs,
+ ) -> str:
+ """
+ Executes a task through the agents in the dynamically constructed flow.
+
+ Args:
+ task (str): The task for the agents to execute.
+ device (str): The device to use for the agents to execute.
+ all_cores (bool): Whether to use all cores.
+ all_gpus (bool): Whether to use all gpus.
+ device_id (int): The device id to use for the agents to execute.
+ no_use_clusterops (bool): Whether to use clusterops.
+
+
+ Returns:
+ str: The final result after processing through all agents.
+
+ Raises:
+ ValueError: If task is None or empty
+ Exception: If any error occurs during task execution
+ """
+
try:
- super().__init__(
- name=name,
- description=description,
- agents=agents,
+ return self.agent_rearrange.run(
+ task=task,
+ img=img,
+ device=device,
+ all_cores=all_cores,
+ device_id=device_id,
+ all_gpus=all_gpus,
+ no_use_clusterops=no_use_clusterops,
*args,
**kwargs,
)
- self.name = name
- self.description = description
- self.agents = agents
- self.flow = " -> ".join(
- agent.agent_name for agent in agents
+ except Exception as e:
+ logger.error(
+ f"An error occurred while executing the task: {e}"
)
- self.agent_rearrange = AgentRearrange(
- name=name,
- description=description,
- agents=agents,
- flow=self.flow,
- max_loops=max_loops,
- *args,
- **kwargs,
+ raise e
+
+ def __call__(self, task: str, *args, **kwargs) -> str:
+ return self.run(task, *args, **kwargs)
+
+ def run_batched(self, tasks: List[str]) -> List[str]:
+ """
+ Executes a batch of tasks through the agents in the dynamically constructed flow.
+
+ Args:
+ tasks (List[str]): The tasks for the agents to execute.
+
+ Returns:
+ List[str]: The final results after processing through all agents.
+
+ Raises:
+ ValueError: If tasks is None or empty
+ Exception: If any error occurs during task execution
+ """
+ if not tasks or not all(
+ isinstance(task, str) for task in tasks
+ ):
+ raise ValueError(
+ "Tasks must be a non-empty list of strings"
)
+
+ try:
+ return [self.agent_rearrange.run(task) for task in tasks]
except Exception as e:
logger.error(
- f"Error initializing SequentialWorkflow: {str(e)}"
+ f"An error occurred while executing the batch of tasks: {e}"
)
raise
- def run(self, task: str) -> str:
+ async def run_async(self, task: str) -> str:
"""
- Runs the task through the agents in the dynamically constructed flow.
+ Executes the task through the agents in the dynamically constructed flow asynchronously.
Args:
task (str): The task for the agents to execute.
@@ -81,12 +199,46 @@ class SequentialWorkflow(BaseSwarm):
raise ValueError("Task must be a non-empty string")
try:
- logger.info(
- f"Running task with dynamic flow: {self.flow}"
+ return await self.agent_rearrange.run_async(task)
+ except Exception as e:
+ logger.error(
+ f"An error occurred while executing the task asynchronously: {e}"
)
- return self.agent_rearrange.run(task)
+ raise
+
+ async def run_concurrent(self, tasks: List[str]) -> List[str]:
+ """
+ Executes a batch of tasks through the agents in the dynamically constructed flow concurrently.
+
+ Args:
+ tasks (List[str]): The tasks for the agents to execute.
+
+ Returns:
+ List[str]: The final results after processing through all agents.
+
+ Raises:
+ ValueError: If tasks is None or empty
+ Exception: If any error occurs during task execution
+ """
+ if not tasks or not all(
+ isinstance(task, str) for task in tasks
+ ):
+ raise ValueError(
+ "Tasks must be a non-empty list of strings"
+ )
+
+ try:
+ with ThreadPoolExecutor() as executor:
+ results = [
+ executor.submit(self.agent_rearrange.run, task)
+ for task in tasks
+ ]
+ return [
+ result.result()
+ for result in as_completed(results)
+ ]
except Exception as e:
logger.error(
- f"An error occurred while running the task: {e}"
+ f"An error occurred while executing the batch of tasks concurrently: {e}"
)
raise
diff --git a/swarms/structs/spreadsheet_swarm.py b/swarms/structs/spreadsheet_swarm.py
index c573b8d7..e57d6a5c 100644
--- a/swarms/structs/spreadsheet_swarm.py
+++ b/swarms/structs/spreadsheet_swarm.py
@@ -6,13 +6,15 @@ import uuid
from typing import List, Union
import aiofiles
-from loguru import logger
from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.file_processing import create_file_in_folder
-from swarms.telemetry.log_swarm_data import log_agent_data
+from swarms.telemetry.capture_sys_data import log_agent_data
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="spreadsheet_swarm")
time = datetime.datetime.now().isoformat()
uuid_hex = uuid.uuid4().hex
diff --git a/swarms/structs/swarm_arange.py b/swarms/structs/swarm_arange.py
index b008c5ec..efb880ad 100644
--- a/swarms/structs/swarm_arange.py
+++ b/swarms/structs/swarm_arange.py
@@ -3,7 +3,10 @@ import time
import uuid
from typing import Any, Callable, Dict, List, Optional
-from swarms.utils.loguru_logger import logger
+from swarms.utils.any_to_str import any_to_str
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="swarm_arange")
def swarm_id():
@@ -29,23 +32,27 @@ class SwarmRearrange:
A class representing a swarm of swarms for rearranging tasks.
Attributes:
- swarms (dict): A dictionary of swarms, where the key is the swarm's name and the value is the swarm object.
- flow (str): The flow pattern of the tasks.
- max_loops (int): The maximum number of loops to run the swarm.
- verbose (bool): A flag indicating whether to log verbose messages.
- human_in_the_loop (bool): A flag indicating whether human intervention is required.
- custom_human_in_the_loop (Callable[[str], str], optional): A custom function for human-in-the-loop intervention.
- return_json (bool): A flag indicating whether to return the result in JSON format.
- swarm_history (dict): A dictionary to keep track of the history of each swarm.
- lock (threading.Lock): A lock for thread-safe operations.
+ id (str): Unique identifier for the swarm arrangement
+ name (str): Name of the swarm arrangement
+ description (str): Description of what this swarm arrangement does
+ swarms (dict): A dictionary of swarms, where the key is the swarm's name and the value is the swarm object
+ flow (str): The flow pattern of the tasks
+ max_loops (int): The maximum number of loops to run the swarm
+ verbose (bool): A flag indicating whether to log verbose messages
+ human_in_the_loop (bool): A flag indicating whether human intervention is required
+ custom_human_in_the_loop (Callable[[str], str], optional): A custom function for human-in-the-loop intervention
+ return_json (bool): A flag indicating whether to return the result in JSON format
+ swarm_history (dict): A dictionary to keep track of the history of each swarm
+ lock (threading.Lock): A lock for thread-safe operations
Methods:
- __init__(swarms: List[swarm] = None, flow: str = None): Initializes the SwarmRearrange object.
- add_swarm(swarm: swarm): Adds an swarm to the swarm.
- remove_swarm(swarm_name: str): Removes an swarm from the swarm.
- add_swarms(swarms: List[swarm]): Adds multiple swarms to the swarm.
- validate_flow(): Validates the flow pattern.
- run(task): Runs the swarm to rearrange the tasks.
+ __init__(id: str, name: str, description: str, swarms: List[swarm], flow: str, max_loops: int, verbose: bool,
+ human_in_the_loop: bool, custom_human_in_the_loop: Callable, return_json: bool): Initializes the SwarmRearrange object
+ add_swarm(swarm: swarm): Adds an swarm to the swarm
+ remove_swarm(swarm_name: str): Removes an swarm from the swarm
+ add_swarms(swarms: List[swarm]): Adds multiple swarms to the swarm
+ validate_flow(): Validates the flow pattern
+ run(task): Runs the swarm to rearrange the tasks
"""
def __init__(
@@ -69,8 +76,16 @@ class SwarmRearrange:
Initializes the SwarmRearrange object.
Args:
- swarms (List[swarm], optional): A list of swarm objects. Defaults to None.
- flow (str, optional): The flow pattern of the tasks. Defaults to None.
+ id (str): Unique identifier for the swarm arrangement. Defaults to generated UUID.
+ name (str): Name of the swarm arrangement. Defaults to "SwarmRearrange".
+ description (str): Description of what this swarm arrangement does.
+ swarms (List[swarm]): A list of swarm objects. Defaults to empty list.
+ flow (str): The flow pattern of the tasks. Defaults to None.
+ max_loops (int): Maximum number of loops to run. Defaults to 1.
+ verbose (bool): Whether to log verbose messages. Defaults to True.
+ human_in_the_loop (bool): Whether human intervention is required. Defaults to False.
+ custom_human_in_the_loop (Callable): Custom function for human intervention. Defaults to None.
+ return_json (bool): Whether to return results as JSON. Defaults to False.
"""
self.id = id
self.name = name
@@ -271,6 +286,7 @@ class SwarmRearrange:
result = swarm.run(
current_task, img, *args, **kwargs
)
+ result = any_to_str(result)
logger.info(
f"Swarm {swarm_name} returned result of type: {type(result)}"
)
@@ -312,6 +328,7 @@ class SwarmRearrange:
result = swarm.run(
current_task, img, *args, **kwargs
)
+ result = any_to_str(result)
logger.info(
f"Swarm {swarm_name} returned result of type: {type(result)}"
)
@@ -371,6 +388,7 @@ def swarm_arrange(
flow,
)
result = swarm_arrangement.run(task, *args, **kwargs)
+ result = any_to_str(result)
logger.info(
f"Swarm arrangement {name} executed successfully with output type {output_type}."
)
diff --git a/swarms/structs/swarm_load_balancer.py b/swarms/structs/swarm_load_balancer.py
index b7cfdb94..275da2c2 100644
--- a/swarms/structs/swarm_load_balancer.py
+++ b/swarms/structs/swarm_load_balancer.py
@@ -5,7 +5,9 @@ from typing import Callable, List, Optional
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
-from swarms.utils.loguru_logger import logger
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="swarm_load_balancer")
class AgentLoadBalancer(BaseSwarm):
diff --git a/swarms/structs/swarm_matcher.py b/swarms/structs/swarm_matcher.py
index ebbfa0a3..c4d0711f 100644
--- a/swarms/structs/swarm_matcher.py
+++ b/swarms/structs/swarm_matcher.py
@@ -1,338 +1,106 @@
-# from typing import List, Tuple, Optional
-# import numpy as np
-# import torch
-# from transformers import AutoTokenizer, AutoModel
-# from pydantic import BaseModel, Field
-# from loguru import logger
-# import json
-# from tenacity import retry, stop_after_attempt, wait_exponential
-# from uuid import uuid4
-
-
-# class SwarmType(BaseModel):
-# name: str
-# description: str
-# embedding: Optional[List[float]] = Field(
-# default=None, exclude=True
-# )
-
-
-# class SwarmMatcherConfig(BaseModel):
-# model_name: str = "sentence-transformers/all-MiniLM-L6-v2"
-# embedding_dim: int = (
-# 512 # Dimension of the sentence-transformers model
-# )
-
-
-# class SwarmMatcher:
-# """
-# A class for matching tasks to swarm types based on their descriptions.
-# It utilizes a transformer model to generate embeddings for task and swarm type descriptions,
-# and then calculates the dot product to find the best match.
-# """
-
-# def __init__(self, config: SwarmMatcherConfig):
-# """
-# Initializes the SwarmMatcher with a configuration.
-
-# Args:
-# config (SwarmMatcherConfig): The configuration for the SwarmMatcher.
-# """
-# logger.add("swarm_matcher_debug.log", level="DEBUG")
-# logger.debug("Initializing SwarmMatcher")
-# try:
-# self.config = config
-# self.tokenizer = AutoTokenizer.from_pretrained(
-# config.model_name
-# )
-# self.model = AutoModel.from_pretrained(config.model_name)
-# self.swarm_types: List[SwarmType] = []
-# logger.debug("SwarmMatcher initialized successfully")
-# except Exception as e:
-# logger.error(f"Error initializing SwarmMatcher: {str(e)}")
-# raise
-
-# @retry(
-# stop=stop_after_attempt(3),
-# wait=wait_exponential(multiplier=1, min=4, max=10),
-# )
-# def get_embedding(self, text: str) -> np.ndarray:
-# """
-# Generates an embedding for a given text using the configured model.
-
-# Args:
-# text (str): The text for which to generate an embedding.
-
-# Returns:
-# np.ndarray: The embedding vector for the text.
-# """
-# logger.debug(f"Getting embedding for text: {text[:50]}...")
-# try:
-# inputs = self.tokenizer(
-# text,
-# return_tensors="pt",
-# padding=True,
-# truncation=True,
-# max_length=512,
-# )
-# with torch.no_grad():
-# outputs = self.model(**inputs)
-# embedding = (
-# outputs.last_hidden_state.mean(dim=1)
-# .squeeze()
-# .numpy()
-# )
-# logger.debug("Embedding generated successfully")
-# return embedding
-# except Exception as e:
-# logger.error(f"Error generating embedding: {str(e)}")
-# raise
-
-# def add_swarm_type(self, swarm_type: SwarmType):
-# """
-# Adds a swarm type to the list of swarm types, generating an embedding for its description.
-
-# Args:
-# swarm_type (SwarmType): The swarm type to add.
-# """
-# logger.debug(f"Adding swarm type: {swarm_type.name}")
-# try:
-# embedding = self.get_embedding(swarm_type.description)
-# swarm_type.embedding = embedding.tolist()
-# self.swarm_types.append(swarm_type)
-# logger.info(f"Added swarm type: {swarm_type.name}")
-# except Exception as e:
-# logger.error(
-# f"Error adding swarm type {swarm_type.name}: {str(e)}"
-# )
-# raise
-
-# def find_best_match(self, task: str) -> Tuple[str, float]:
-# """
-# Finds the best match for a given task among the registered swarm types.
-
-# Args:
-# task (str): The task for which to find the best match.
-
-# Returns:
-# Tuple[str, float]: A tuple containing the name of the best matching swarm type and the score.
-# """
-# logger.debug(f"Finding best match for task: {task[:50]}...")
-# try:
-# task_embedding = self.get_embedding(task)
-# best_match = None
-# best_score = -float("inf")
-# for swarm_type in self.swarm_types:
-# score = np.dot(
-# task_embedding, np.array(swarm_type.embedding)
-# )
-# if score > best_score:
-# best_score = score
-# best_match = swarm_type
-# logger.info(
-# f"Best match for task: {best_match.name} (score: {best_score})"
-# )
-# return best_match.name, float(best_score)
-# except Exception as e:
-# logger.error(
-# f"Error finding best match for task: {str(e)}"
-# )
-# raise
-
-# def auto_select_swarm(self, task: str) -> str:
-# """
-# Automatically selects the best swarm type for a given task based on their descriptions.
-
-# Args:
-# task (str): The task for which to select a swarm type.
-
-# Returns:
-# str: The name of the selected swarm type.
-# """
-# logger.debug(f"Auto-selecting swarm for task: {task[:50]}...")
-# best_match, score = self.find_best_match(task)
-# logger.info(f"Task: {task}")
-# logger.info(f"Selected Swarm Type: {best_match}")
-# logger.info(f"Confidence Score: {score:.2f}")
-# return best_match
-
-# def run_multiple(self, tasks: List[str], *args, **kwargs) -> str:
-# swarms = []
-
-# for task in tasks:
-# output = self.auto_select_swarm(task)
-
-# # Append
-# swarms.append(output)
-
-# return swarms
-
-# def save_swarm_types(self, filename: str):
-# """
-# Saves the registered swarm types to a JSON file.
-
-# Args:
-# filename (str): The name of the file to which to save the swarm types.
-# """
-# try:
-# with open(filename, "w") as f:
-# json.dump([st.dict() for st in self.swarm_types], f)
-# logger.info(f"Saved swarm types to {filename}")
-# except Exception as e:
-# logger.error(f"Error saving swarm types: {str(e)}")
-# raise
-
-# def load_swarm_types(self, filename: str):
-# """
-# Loads swarm types from a JSON file.
-
-# Args:
-# filename (str): The name of the file from which to load the swarm types.
-# """
-# try:
-# with open(filename, "r") as f:
-# swarm_types_data = json.load(f)
-# self.swarm_types = [
-# SwarmType(**st) for st in swarm_types_data
-# ]
-# logger.info(f"Loaded swarm types from {filename}")
-# except Exception as e:
-# logger.error(f"Error loading swarm types: {str(e)}")
-# raise
-
-
-# def initialize_swarm_types(matcher: SwarmMatcher):
-# logger.debug("Initializing swarm types")
-# swarm_types = [
-# SwarmType(
-# name="AgentRearrange",
-# description="Optimize agent order and rearrange flow for multi-step tasks, ensuring efficient task allocation and minimizing bottlenecks. Keywords: orchestration, coordination, pipeline optimization, task scheduling, resource allocation, workflow management, agent organization, process optimization",
-# ),
-# SwarmType(
-# name="MixtureOfAgents",
-# description="Combine diverse expert agents for comprehensive analysis, fostering a collaborative approach to problem-solving and leveraging individual strengths. Keywords: multi-agent system, expert collaboration, distributed intelligence, collective problem solving, agent specialization, team coordination, hybrid approaches, knowledge synthesis",
-# ),
-# SwarmType(
-# name="SpreadSheetSwarm",
-# description="Collaborative data processing and analysis in a spreadsheet-like environment, facilitating real-time data sharing and visualization. Keywords: data analysis, tabular processing, collaborative editing, data transformation, spreadsheet operations, data visualization, real-time collaboration, structured data",
-# ),
-# SwarmType(
-# name="SequentialWorkflow",
-# description="Execute tasks in a step-by-step, sequential process workflow, ensuring a logical and methodical approach to task execution. Keywords: linear processing, waterfall methodology, step-by-step execution, ordered tasks, sequential operations, process flow, systematic approach, staged execution",
-# ),
-# SwarmType(
-# name="ConcurrentWorkflow",
-# description="Process multiple tasks or data sources concurrently in parallel, maximizing productivity and reducing processing time. Keywords: parallel processing, multi-threading, asynchronous execution, distributed computing, concurrent operations, simultaneous tasks, parallel workflows, scalable processing",
-# ),
-# # SwarmType(
-# # name="HierarchicalSwarm",
-# # description="Organize agents in a hierarchical structure with clear reporting lines and delegation of responsibilities. Keywords: management hierarchy, organizational structure, delegation, supervision, chain of command, tiered organization, structured coordination",
-# # ),
-# # SwarmType(
-# # name="AdaptiveSwarm",
-# # description="Dynamically adjust agent behavior and swarm configuration based on task requirements and performance feedback. Keywords: dynamic adaptation, self-optimization, feedback loops, learning systems, flexible configuration, responsive behavior, adaptive algorithms",
-# # ),
-# # SwarmType(
-# # name="ConsensusSwarm",
-# # description="Achieve group decisions through consensus mechanisms and voting protocols among multiple agents. Keywords: group decision making, voting systems, collective intelligence, agreement protocols, democratic processes, collaborative decisions",
-# # ),
-# ]
-
-# for swarm_type in swarm_types:
-# matcher.add_swarm_type(swarm_type)
-# logger.debug("Swarm types initialized")
-
-
-# def swarm_matcher(task: str, *args, **kwargs):
-# """
-# Runs the SwarmMatcher example with predefined tasks and swarm types.
-# """
-# config = SwarmMatcherConfig()
-# matcher = SwarmMatcher(config)
-# initialize_swarm_types(matcher)
-
-# # matcher.save_swarm_types(f"swarm_logs/{uuid4().hex}.json")
-
-# swarm_type = matcher.auto_select_swarm(task)
-
-# logger.info(f"{swarm_type}")
-
-# return swarm_type
-
-
-from typing import List, Tuple, Dict
+from typing import List, Tuple, Optional
+import numpy as np
+import torch
+from transformers import AutoTokenizer, AutoModel
from pydantic import BaseModel, Field
-from loguru import logger
-from uuid import uuid4
-import chromadb
import json
from tenacity import retry, stop_after_attempt, wait_exponential
+from swarms.utils.loguru_logger import initialize_logger
+logger = initialize_logger(log_folder="swarm_matcher")
-class SwarmType(BaseModel):
- """A swarm type with its name, description and optional metadata"""
- id: str = Field(default_factory=lambda: str(uuid4()))
+class SwarmType(BaseModel):
name: str
description: str
- metadata: Dict = Field(default_factory=dict)
+ embedding: Optional[List[float]] = Field(
+ default=None, exclude=True
+ )
class SwarmMatcherConfig(BaseModel):
- """Configuration for the SwarmMatcher"""
-
- collection_name: str = "swarm_types"
- distance_metric: str = "cosine" # or "l2" or "ip"
- embedding_function: str = (
- "sentence-transformers/all-mpnet-base-v2" # Better model than MiniLM
+ model_name: str = "sentence-transformers/all-MiniLM-L6-v2"
+ embedding_dim: int = (
+ 512 # Dimension of the sentence-transformers model
)
- persist_directory: str = "./chroma_db"
class SwarmMatcher:
"""
- An improved swarm matcher that uses ChromaDB for better vector similarity search.
- Features:
- - Persistent storage of embeddings
- - Better vector similarity search with multiple distance metrics
- - Improved embedding model
- - Metadata filtering capabilities
- - Batch operations support
+ A class for matching tasks to swarm types based on their descriptions.
+ It utilizes a transformer model to generate embeddings for task and swarm type descriptions,
+ and then calculates the dot product to find the best match.
"""
def __init__(self, config: SwarmMatcherConfig):
- """Initialize the improved swarm matcher"""
- logger.add("swarm_matcher.log", rotation="100 MB")
- self.config = config
+ """
+ Initializes the SwarmMatcher with a configuration.
- # Initialize ChromaDB client with persistence
- self.chroma_client = chromadb.Client()
+ Args:
+ config (SwarmMatcherConfig): The configuration for the SwarmMatcher.
+ """
+ logger.add("swarm_matcher_debug.log", level="DEBUG")
+ logger.debug("Initializing SwarmMatcher")
+ try:
+ self.config = config
+ self.tokenizer = AutoTokenizer.from_pretrained(
+ config.model_name
+ )
+ self.model = AutoModel.from_pretrained(config.model_name)
+ self.swarm_types: List[SwarmType] = []
+ logger.debug("SwarmMatcher initialized successfully")
+ except Exception as e:
+ logger.error(f"Error initializing SwarmMatcher: {str(e)}")
+ raise
- # Get or create collection
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ )
+ def get_embedding(self, text: str) -> np.ndarray:
+ """
+ Generates an embedding for a given text using the configured model.
+
+ Args:
+ text (str): The text for which to generate an embedding.
+
+ Returns:
+ np.ndarray: The embedding vector for the text.
+ """
+ logger.debug(f"Getting embedding for text: {text[:50]}...")
try:
- self.collection = self.chroma_client.get_collection(
- name=config.collection_name,
+ inputs = self.tokenizer(
+ text,
+ return_tensors="pt",
+ padding=True,
+ truncation=True,
+ max_length=512,
)
- except ValueError:
- self.collection = self.chroma_client.create_collection(
- name=config.collection_name,
- metadata={"hnsw:space": config.distance_metric},
+ with torch.no_grad():
+ outputs = self.model(**inputs)
+ embedding = (
+ outputs.last_hidden_state.mean(dim=1)
+ .squeeze()
+ .numpy()
)
+ logger.debug("Embedding generated successfully")
+ return embedding
+ except Exception as e:
+ logger.error(f"Error generating embedding: {str(e)}")
+ raise
- logger.info(
- f"Initialized SwarmMatcher with collection '{config.collection_name}'"
- )
+ def add_swarm_type(self, swarm_type: SwarmType):
+ """
+ Adds a swarm type to the list of swarm types, generating an embedding for its description.
- def add_swarm_type(self, swarm_type: SwarmType) -> None:
- """Add a single swarm type to the collection"""
+ Args:
+ swarm_type (SwarmType): The swarm type to add.
+ """
+ logger.debug(f"Adding swarm type: {swarm_type.name}")
try:
- self.collection.add(
- ids=[swarm_type.id],
- documents=[swarm_type.description],
- metadatas=[
- {"name": swarm_type.name, **swarm_type.metadata}
- ],
- )
+ embedding = self.get_embedding(swarm_type.description)
+ swarm_type.embedding = embedding.tolist()
+ self.swarm_types.append(swarm_type)
logger.info(f"Added swarm type: {swarm_type.name}")
except Exception as e:
logger.error(
@@ -340,239 +108,472 @@ class SwarmMatcher:
)
raise
- def add_swarm_types(self, swarm_types: List[SwarmType]) -> None:
- """Add multiple swarm types in batch"""
+ def find_best_match(self, task: str) -> Tuple[str, float]:
+ """
+ Finds the best match for a given task among the registered swarm types.
+
+ Args:
+ task (str): The task for which to find the best match.
+
+ Returns:
+ Tuple[str, float]: A tuple containing the name of the best matching swarm type and the score.
+ """
+ logger.debug(f"Finding best match for task: {task[:50]}...")
try:
- self.collection.add(
- ids=[st.id for st in swarm_types],
- documents=[st.description for st in swarm_types],
- metadatas=[
- {"name": st.name, **st.metadata}
- for st in swarm_types
- ],
+ task_embedding = self.get_embedding(task)
+ best_match = None
+ best_score = -float("inf")
+ for swarm_type in self.swarm_types:
+ score = np.dot(
+ task_embedding, np.array(swarm_type.embedding)
+ )
+ if score > best_score:
+ best_score = score
+ best_match = swarm_type
+ logger.info(
+ f"Best match for task: {best_match.name} (score: {best_score})"
)
- logger.info(f"Added {len(swarm_types)} swarm types")
+ return best_match.name, float(best_score)
except Exception as e:
logger.error(
- f"Error adding swarm types in batch: {str(e)}"
+ f"Error finding best match for task: {str(e)}"
)
raise
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- )
- def find_best_matches(
- self,
- task: str,
- n_results: int = 3,
- score_threshold: float = 0.7,
- ) -> List[Tuple[str, float]]:
+ def auto_select_swarm(self, task: str) -> str:
"""
- Find the best matching swarm types for a given task
- Returns multiple matches with their scores
+ Automatically selects the best swarm type for a given task based on their descriptions.
+
+ Args:
+ task (str): The task for which to select a swarm type.
+
+ Returns:
+ str: The name of the selected swarm type.
"""
- try:
- results = self.collection.query(
- query_texts=[task],
- n_results=n_results,
- include=["metadatas", "distances"],
- )
+ logger.debug(f"Auto-selecting swarm for task: {task[:50]}...")
+ best_match, score = self.find_best_match(task)
+ logger.info(f"Task: {task}")
+ logger.info(f"Selected Swarm Type: {best_match}")
+ logger.info(f"Confidence Score: {score:.2f}")
+ return best_match
- matches = []
- for metadata, distance in zip(
- results["metadatas"][0], results["distances"][0]
- ):
- # Convert distance to similarity score (1 - normalized_distance)
- score = 1 - (
- distance / 2
- ) # Normalize cosine distance to [0,1]
- if score >= score_threshold:
- matches.append((metadata["name"], score))
+ def run_multiple(self, tasks: List[str], *args, **kwargs) -> str:
+ swarms = []
- logger.info(f"Found {len(matches)} matches for task")
- return matches
+ for task in tasks:
+ output = self.auto_select_swarm(task)
- except Exception as e:
- logger.error(f"Error finding matches for task: {str(e)}")
- raise
+ # Append
+ swarms.append(output)
- def auto_select_swarm(self, task: str) -> str:
- """
- Automatically select the best swarm type for a task
- Returns only the top match
- """
- matches = self.find_best_matches(task, n_results=1)
- if not matches:
- logger.warning("No suitable matches found for task")
- return "SequentialWorkflow" # Default fallback
-
- best_match, score = matches[0]
- logger.info(
- f"Selected swarm type '{best_match}' with confidence {score:.3f}"
- )
- return best_match
+ return swarms
- def run_multiple(self, tasks: List[str]) -> List[str]:
- """Process multiple tasks in batch"""
- return [self.auto_select_swarm(task) for task in tasks]
+ def save_swarm_types(self, filename: str):
+ """
+ Saves the registered swarm types to a JSON file.
- def save_swarm_types(self, filename: str) -> None:
- """Export swarm types to JSON"""
+ Args:
+ filename (str): The name of the file to which to save the swarm types.
+ """
try:
- all_data = self.collection.get(
- include=["metadatas", "documents"]
- )
- swarm_types = [
- SwarmType(
- id=id_,
- name=metadata["name"],
- description=document,
- metadata={
- k: v
- for k, v in metadata.items()
- if k != "name"
- },
- )
- for id_, metadata, document in zip(
- all_data["ids"],
- all_data["metadatas"],
- all_data["documents"],
- )
- ]
-
with open(filename, "w") as f:
- json.dump(
- [st.dict() for st in swarm_types], f, indent=2
- )
+ json.dump([st.dict() for st in self.swarm_types], f)
logger.info(f"Saved swarm types to {filename}")
except Exception as e:
logger.error(f"Error saving swarm types: {str(e)}")
raise
- def load_swarm_types(self, filename: str) -> None:
- """Import swarm types from JSON"""
+ def load_swarm_types(self, filename: str):
+ """
+ Loads swarm types from a JSON file.
+
+ Args:
+ filename (str): The name of the file from which to load the swarm types.
+ """
try:
with open(filename, "r") as f:
swarm_types_data = json.load(f)
- swarm_types = [SwarmType(**st) for st in swarm_types_data]
- self.add_swarm_types(swarm_types)
+ self.swarm_types = [
+ SwarmType(**st) for st in swarm_types_data
+ ]
logger.info(f"Loaded swarm types from {filename}")
except Exception as e:
logger.error(f"Error loading swarm types: {str(e)}")
raise
-def initialize_default_swarm_types(matcher: SwarmMatcher) -> None:
- """Initialize the matcher with default swarm types"""
+def initialize_swarm_types(matcher: SwarmMatcher):
+ logger.debug("Initializing swarm types")
swarm_types = [
SwarmType(
name="AgentRearrange",
- description="""
- Optimize agent order and rearrange flow for multi-step tasks, ensuring efficient task allocation
- and minimizing bottlenecks. Specialized in orchestration, coordination, pipeline optimization,
- task scheduling, resource allocation, workflow management, agent organization, and process optimization.
- Best for tasks requiring complex agent interactions and workflow optimization.
- """,
- metadata={
- "category": "optimization",
- "complexity": "high",
- },
+ description="Optimize agent order and rearrange flow for multi-step tasks, ensuring efficient task allocation and minimizing bottlenecks. Keywords: orchestration, coordination, pipeline optimization, task scheduling, resource allocation, workflow management, agent organization, process optimization",
),
SwarmType(
name="MixtureOfAgents",
- description="""
- Combine diverse expert agents for comprehensive analysis, fostering a collaborative approach
- to problem-solving and leveraging individual strengths. Focuses on multi-agent systems,
- expert collaboration, distributed intelligence, collective problem solving, agent specialization,
- team coordination, hybrid approaches, and knowledge synthesis. Ideal for complex problems
- requiring multiple areas of expertise.
- """,
- metadata={
- "category": "collaboration",
- "complexity": "high",
- },
+ description="Combine diverse expert agents for comprehensive analysis, fostering a collaborative approach to problem-solving and leveraging individual strengths. Keywords: multi-agent system, expert collaboration, distributed intelligence, collective problem solving, agent specialization, team coordination, hybrid approaches, knowledge synthesis",
),
SwarmType(
name="SpreadSheetSwarm",
- description="""
- Collaborative data processing and analysis in a spreadsheet-like environment, facilitating
- real-time data sharing and visualization. Specializes in data analysis, tabular processing,
- collaborative editing, data transformation, spreadsheet operations, data visualization,
- real-time collaboration, and structured data handling. Perfect for data-intensive tasks
- requiring structured analysis.
- """,
- metadata={
- "category": "data_processing",
- "complexity": "medium",
- },
+ description="Collaborative data processing and analysis in a spreadsheet-like environment, facilitating real-time data sharing and visualization. Keywords: data analysis, tabular processing, collaborative editing, data transformation, spreadsheet operations, data visualization, real-time collaboration, structured data",
),
SwarmType(
name="SequentialWorkflow",
- description="""
- Execute tasks in a step-by-step, sequential process workflow, ensuring a logical and methodical
- approach to task execution. Focuses on linear processing, waterfall methodology, step-by-step
- execution, ordered tasks, sequential operations, process flow, systematic approach, and staged
- execution. Best for tasks requiring strict order and dependencies.
- """,
- metadata={"category": "workflow", "complexity": "low"},
+ description="Execute tasks in a step-by-step, sequential process workflow, ensuring a logical and methodical approach to task execution. Keywords: linear processing, waterfall methodology, step-by-step execution, ordered tasks, sequential operations, process flow, systematic approach, staged execution",
),
SwarmType(
name="ConcurrentWorkflow",
- description="""
- Process multiple tasks or data sources concurrently in parallel, maximizing productivity
- and reducing processing time. Specializes in parallel processing, multi-threading,
- asynchronous execution, distributed computing, concurrent operations, simultaneous tasks,
- parallel workflows, and scalable processing. Ideal for independent tasks that can be
- processed simultaneously.
- """,
- metadata={"category": "workflow", "complexity": "medium"},
+ description="Process multiple tasks or data sources concurrently in parallel, maximizing productivity and reducing processing time. Keywords: parallel processing, multi-threading, asynchronous execution, distributed computing, concurrent operations, simultaneous tasks, parallel workflows, scalable processing",
),
+ # SwarmType(
+ # name="HierarchicalSwarm",
+ # description="Organize agents in a hierarchical structure with clear reporting lines and delegation of responsibilities. Keywords: management hierarchy, organizational structure, delegation, supervision, chain of command, tiered organization, structured coordination",
+ # ),
+ # SwarmType(
+ # name="AdaptiveSwarm",
+ # description="Dynamically adjust agent behavior and swarm configuration based on task requirements and performance feedback. Keywords: dynamic adaptation, self-optimization, feedback loops, learning systems, flexible configuration, responsive behavior, adaptive algorithms",
+ # ),
+ # SwarmType(
+ # name="ConsensusSwarm",
+ # description="Achieve group decisions through consensus mechanisms and voting protocols among multiple agents. Keywords: group decision making, voting systems, collective intelligence, agreement protocols, democratic processes, collaborative decisions",
+ # ),
]
- matcher.add_swarm_types(swarm_types)
- logger.info("Initialized default swarm types")
+ for swarm_type in swarm_types:
+ matcher.add_swarm_type(swarm_type)
+ logger.debug("Swarm types initialized")
-def create_swarm_matcher(
- persist_dir: str = "./chroma_db",
- collection_name: str = "swarm_types",
-) -> SwarmMatcher:
- """Convenience function to create and initialize a swarm matcher"""
- config = SwarmMatcherConfig(
- persist_directory=persist_dir, collection_name=collection_name
- )
+def swarm_matcher(task: str, *args, **kwargs):
+ """
+ Runs the SwarmMatcher example with predefined tasks and swarm types.
+ """
+ config = SwarmMatcherConfig()
matcher = SwarmMatcher(config)
- initialize_default_swarm_types(matcher)
- return matcher
-
+ initialize_swarm_types(matcher)
-# Example usage
-def swarm_matcher(task: str) -> str:
- # Create and initialize matcher
- matcher = create_swarm_matcher()
+ # matcher.save_swarm_types(f"swarm_logs/{uuid4().hex}.json")
swarm_type = matcher.auto_select_swarm(task)
- print(f"Task: {task}\nSelected Swarm: {swarm_type}\n")
+
+ logger.info(f"{swarm_type}")
return swarm_type
+# from typing import List, Tuple, Dict
+# from pydantic import BaseModel, Field
+# from loguru import logger
+# from uuid import uuid4
+# import chromadb
+# import json
+# from tenacity import retry, stop_after_attempt, wait_exponential
+
+
+# class SwarmType(BaseModel):
+# """A swarm type with its name, description and optional metadata"""
+
+# id: str = Field(default_factory=lambda: str(uuid4()))
+# name: str
+# description: str
+# metadata: Dict = Field(default_factory=dict)
+
+
+# class SwarmMatcherConfig(BaseModel):
+# """Configuration for the SwarmMatcher"""
+
+# collection_name: str = "swarm_types"
+# distance_metric: str = "cosine" # or "l2" or "ip"
+# embedding_function: str = (
+# "sentence-transformers/all-mpnet-base-v2" # Better model than MiniLM
+# )
+# persist_directory: str = "./chroma_db"
+
+
+# class SwarmMatcher:
+# """
+# An improved swarm matcher that uses ChromaDB for better vector similarity search.
+# Features:
+# - Persistent storage of embeddings
+# - Better vector similarity search with multiple distance metrics
+# - Improved embedding model
+# - Metadata filtering capabilities
+# - Batch operations support
+# """
+
+# def __init__(self, config: SwarmMatcherConfig):
+# """Initialize the improved swarm matcher"""
+# logger.add("swarm_matcher.log", rotation="100 MB")
+# self.config = config
+
+# # Initialize ChromaDB client with persistence
+# self.chroma_client = chromadb.Client()
+
+# # Get or create collection
+# try:
+# self.collection = self.chroma_client.get_collection(
+# name=config.collection_name,
+# )
+# except ValueError:
+# self.collection = self.chroma_client.create_collection(
+# name=config.collection_name,
+# metadata={"hnsw:space": config.distance_metric},
+# )
+
+# logger.info(
+# f"Initialized SwarmMatcher with collection '{config.collection_name}'"
+# )
+
+# def add_swarm_type(self, swarm_type: SwarmType) -> None:
+# """Add a single swarm type to the collection"""
+# try:
+# self.collection.add(
+# ids=[swarm_type.id],
+# documents=[swarm_type.description],
+# metadatas=[
+# {"name": swarm_type.name, **swarm_type.metadata}
+# ],
+# )
+# logger.info(f"Added swarm type: {swarm_type.name}")
+# except Exception as e:
+# logger.error(
+# f"Error adding swarm type {swarm_type.name}: {str(e)}"
+# )
+# raise
+
+# def add_swarm_types(self, swarm_types: List[SwarmType]) -> None:
+# """Add multiple swarm types in batch"""
+# try:
+# self.collection.add(
+# ids=[st.id for st in swarm_types],
+# documents=[st.description for st in swarm_types],
+# metadatas=[
+# {"name": st.name, **st.metadata}
+# for st in swarm_types
+# ],
+# )
+# logger.info(f"Added {len(swarm_types)} swarm types")
+# except Exception as e:
+# logger.error(
+# f"Error adding swarm types in batch: {str(e)}"
+# )
+# raise
+
+# @retry(
+# stop=stop_after_attempt(3),
+# wait=wait_exponential(multiplier=1, min=4, max=10),
+# )
+# def find_best_matches(
+# self,
+# task: str,
+# n_results: int = 3,
+# score_threshold: float = 0.7,
+# ) -> List[Tuple[str, float]]:
+# """
+# Find the best matching swarm types for a given task
+# Returns multiple matches with their scores
+# """
+# try:
+# results = self.collection.query(
+# query_texts=[task],
+# n_results=n_results,
+# include=["metadatas", "distances"],
+# )
+
+# matches = []
+# for metadata, distance in zip(
+# results["metadatas"][0], results["distances"][0]
+# ):
+# # Convert distance to similarity score (1 - normalized_distance)
+# score = 1 - (
+# distance / 2
+# ) # Normalize cosine distance to [0,1]
+# if score >= score_threshold:
+# matches.append((metadata["name"], score))
+
+# logger.info(f"Found {len(matches)} matches for task")
+# return matches
+
+# except Exception as e:
+# logger.error(f"Error finding matches for task: {str(e)}")
+# raise
+
+# def auto_select_swarm(self, task: str) -> str:
+# """
+# Automatically select the best swarm type for a task
+# Returns only the top match
+# """
+# matches = self.find_best_matches(task, n_results=1)
+# if not matches:
+# logger.warning("No suitable matches found for task")
+# return "SequentialWorkflow" # Default fallback
+
+# best_match, score = matches[0]
+# logger.info(
+# f"Selected swarm type '{best_match}' with confidence {score:.3f}"
+# )
+# return best_match
+
+# def run_multiple(self, tasks: List[str]) -> List[str]:
+# """Process multiple tasks in batch"""
+# return [self.auto_select_swarm(task) for task in tasks]
+
+# def save_swarm_types(self, filename: str) -> None:
+# """Export swarm types to JSON"""
+# try:
+# all_data = self.collection.get(
+# include=["metadatas", "documents"]
+# )
+# swarm_types = [
+# SwarmType(
+# id=id_,
+# name=metadata["name"],
+# description=document,
+# metadata={
+# k: v
+# for k, v in metadata.items()
+# if k != "name"
+# },
+# )
+# for id_, metadata, document in zip(
+# all_data["ids"],
+# all_data["metadatas"],
+# all_data["documents"],
+# )
+# ]
+
+# with open(filename, "w") as f:
+# json.dump(
+# [st.dict() for st in swarm_types], f, indent=2
+# )
+# logger.info(f"Saved swarm types to {filename}")
+# except Exception as e:
+# logger.error(f"Error saving swarm types: {str(e)}")
+# raise
+
+# def load_swarm_types(self, filename: str) -> None:
+# """Import swarm types from JSON"""
+# try:
+# with open(filename, "r") as f:
+# swarm_types_data = json.load(f)
+# swarm_types = [SwarmType(**st) for st in swarm_types_data]
+# self.add_swarm_types(swarm_types)
+# logger.info(f"Loaded swarm types from {filename}")
+# except Exception as e:
+# logger.error(f"Error loading swarm types: {str(e)}")
+# raise
+
+
+# def initialize_default_swarm_types(matcher: SwarmMatcher) -> None:
+# """Initialize the matcher with default swarm types"""
+# swarm_types = [
+# SwarmType(
+# name="AgentRearrange",
+# description="""
+# Optimize agent order and rearrange flow for multi-step tasks, ensuring efficient task allocation
+# and minimizing bottlenecks. Specialized in orchestration, coordination, pipeline optimization,
+# task scheduling, resource allocation, workflow management, agent organization, and process optimization.
+# Best for tasks requiring complex agent interactions and workflow optimization.
+# """,
+# metadata={
+# "category": "optimization",
+# "complexity": "high",
+# },
+# ),
+# SwarmType(
+# name="MixtureOfAgents",
+# description="""
+# Combine diverse expert agents for comprehensive analysis, fostering a collaborative approach
+# to problem-solving and leveraging individual strengths. Focuses on multi-agent systems,
+# expert collaboration, distributed intelligence, collective problem solving, agent specialization,
+# team coordination, hybrid approaches, and knowledge synthesis. Ideal for complex problems
+# requiring multiple areas of expertise.
+# """,
+# metadata={
+# "category": "collaboration",
+# "complexity": "high",
+# },
+# ),
+# SwarmType(
+# name="SpreadSheetSwarm",
+# description="""
+# Collaborative data processing and analysis in a spreadsheet-like environment, facilitating
+# real-time data sharing and visualization. Specializes in data analysis, tabular processing,
+# collaborative editing, data transformation, spreadsheet operations, data visualization,
+# real-time collaboration, and structured data handling. Perfect for data-intensive tasks
+# requiring structured analysis.
+# """,
+# metadata={
+# "category": "data_processing",
+# "complexity": "medium",
+# },
+# ),
+# SwarmType(
+# name="SequentialWorkflow",
+# description="""
+# Execute tasks in a step-by-step, sequential process workflow, ensuring a logical and methodical
+# approach to task execution. Focuses on linear processing, waterfall methodology, step-by-step
+# execution, ordered tasks, sequential operations, process flow, systematic approach, and staged
+# execution. Best for tasks requiring strict order and dependencies.
+# """,
+# metadata={"category": "workflow", "complexity": "low"},
+# ),
+# SwarmType(
+# name="ConcurrentWorkflow",
+# description="""
+# Process multiple tasks or data sources concurrently in parallel, maximizing productivity
+# and reducing processing time. Specializes in parallel processing, multi-threading,
+# asynchronous execution, distributed computing, concurrent operations, simultaneous tasks,
+# parallel workflows, and scalable processing. Ideal for independent tasks that can be
+# processed simultaneously.
+# """,
+# metadata={"category": "workflow", "complexity": "medium"},
+# ),
+# ]
+
+# matcher.add_swarm_types(swarm_types)
+# logger.info("Initialized default swarm types")
+
+
+# def create_swarm_matcher(
+# persist_dir: str = "./chroma_db",
+# collection_name: str = "swarm_types",
+# ) -> SwarmMatcher:
+# """Convenience function to create and initialize a swarm matcher"""
+# config = SwarmMatcherConfig(
+# persist_directory=persist_dir, collection_name=collection_name
+# )
+# matcher = SwarmMatcher(config)
+# initialize_default_swarm_types(matcher)
+# return matcher
+
+
# # Example usage
-# if __name__ == "__main__":
+# def swarm_matcher(task: str) -> str:
# # Create and initialize matcher
# matcher = create_swarm_matcher()
-# # Example tasks
-# tasks = [
-# "Analyze this spreadsheet of sales data and create visualizations",
-# "Coordinate multiple AI agents to solve a complex problem",
-# "Process these tasks one after another in a specific order",
-# "Write multiple blog posts about the latest advancements in swarm intelligence all at once",
-# "Write a blog post about the latest advancements in swarm intelligence",
-# ]
+# swarm_type = matcher.auto_select_swarm(task)
+# print(f"Task: {task}\nSelected Swarm: {swarm_type}\n")
+
+# return swarm_type
+
-# # Process tasks
-# for task in tasks:
-# swarm_type = matcher.auto_select_swarm(task)
-# print(f"Task: {task}\nSelected Swarm: {swarm_type}\n")
+# # # Example usage
+# # if __name__ == "__main__":
+# # # Create and initialize matcher
+# # matcher = create_swarm_matcher()
+
+# # # Example tasks
+# # tasks = [
+# # "Analyze this spreadsheet of sales data and create visualizations",
+# # "Coordinate multiple AI agents to solve a complex problem",
+# # "Process these tasks one after another in a specific order",
+# # "Write multiple blog posts about the latest advancements in swarm intelligence all at once",
+# # "Write a blog post about the latest advancements in swarm intelligence",
+# # ]
+
+# # # Process tasks
+# # for task in tasks:
+# # swarm_type = matcher.auto_select_swarm(task)
+# # print(f"Task: {task}\nSelected Swarm: {swarm_type}\n")
diff --git a/swarms/structs/swarm_net.py b/swarms/structs/swarm_net.py
index 33be00de..dac0d0a2 100644
--- a/swarms/structs/swarm_net.py
+++ b/swarms/structs/swarm_net.py
@@ -19,7 +19,9 @@ from pydantic import BaseModel
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
-from swarms.utils.loguru_logger import logger
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger("swarm-network")
# Pydantic models
diff --git a/swarms/structs/swarm_registry.py b/swarms/structs/swarm_registry.py
index b35aafb1..a4db3cb4 100644
--- a/swarms/structs/swarm_registry.py
+++ b/swarms/structs/swarm_registry.py
@@ -1,6 +1,8 @@
from pydantic.v1 import BaseModel
from typing import List, Callable
-from swarms.utils.loguru_logger import logger
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="swarm_registry")
class SwarmRegistry(BaseModel):
diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py
index 7d1179d9..0d2ef9dd 100644
--- a/swarms/structs/swarm_router.py
+++ b/swarms/structs/swarm_router.py
@@ -2,18 +2,24 @@ import uuid
from datetime import datetime
from typing import Any, Callable, Dict, List, Literal, Union
-from loguru import logger
+from doc_master import doc_master
from pydantic import BaseModel, Field
+from tenacity import retry, stop_after_attempt, wait_fixed
+from swarms.prompts.ag_prompt import aggregator_system_prompt
from swarms.structs.agent import Agent
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.mixture_of_agents import MixtureOfAgents
from swarms.structs.rearrange import AgentRearrange
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm
-from tenacity import retry, stop_after_attempt, wait_fixed
from swarms.structs.swarm_matcher import swarm_matcher
-from swarms.prompts.ag_prompt import aggregator_system_prompt
+from swarms.utils.wrapper_clusterop import (
+ exec_callable_with_clusterops,
+)
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="swarm_router")
SwarmType = Literal[
"AgentRearrange",
@@ -25,6 +31,11 @@ SwarmType = Literal[
]
+class Document(BaseModel):
+ file_path: str
+ data: str
+
+
class SwarmLog(BaseModel):
"""
A Pydantic model to capture log entries.
@@ -37,36 +48,78 @@ class SwarmLog(BaseModel):
swarm_type: SwarmType
task: str = ""
metadata: Dict[str, Any] = Field(default_factory=dict)
+ documents: List[Document] = []
class SwarmRouter:
"""
- A class to dynamically route tasks to different swarm types based on user selection or automatic matching.
+ A class that dynamically routes tasks to different swarm types based on user selection or automatic matching.
- This class enables users to specify a swarm type or let the system automatically determine the best swarm type for a given task. It then runs the task on the selected or matched swarm type, ensuring type validation, logging, and metadata capture.
+ The SwarmRouter enables flexible task execution by either using a specified swarm type or automatically determining
+ the most suitable swarm type for a given task. It handles task execution while managing logging, type validation,
+ and metadata capture.
+
+ Args:
+ name (str, optional): Name identifier for the SwarmRouter instance. Defaults to "swarm-router".
+ description (str, optional): Description of the SwarmRouter's purpose. Defaults to "Routes your task to the desired swarm".
+ max_loops (int, optional): Maximum number of execution loops. Defaults to 1.
+ agents (List[Union[Agent, Callable]], optional): List of Agent objects or callables to use. Defaults to empty list.
+ swarm_type (SwarmType, optional): Type of swarm to use. Defaults to "SequentialWorkflow".
+ autosave (bool, optional): Whether to enable autosaving. Defaults to False.
+ flow (str, optional): Flow configuration string. Defaults to None.
+ return_json (bool, optional): Whether to return results as JSON. Defaults to False.
+ auto_generate_prompts (bool, optional): Whether to auto-generate agent prompts. Defaults to False.
+ shared_memory_system (Any, optional): Shared memory system for agents. Defaults to None.
+ rules (str, optional): Rules to inject into every agent. Defaults to None.
+ documents (List[str], optional): List of document file paths to use. Defaults to empty list.
+ output_type (str, optional): Output format type. Defaults to "string".
Attributes:
- name (str): The name of the SwarmRouter instance.
- description (str): A description of the SwarmRouter instance.
- max_loops (int): The maximum number of loops to perform.
- agents (List[Union[Agent, Callable]]): A list of Agent objects to be used in the swarm.
- swarm_type (SwarmType): The type of swarm to be used, which can be specified or automatically determined.
- autosave (bool): A flag to enable/disable autosave.
- flow (str): The flow of the swarm.
- return_json (bool): A flag to enable/disable returning the result in JSON format.
- auto_generate_prompts (bool): A flag to enable/disable auto generation of prompts.
- swarm (Union[AgentRearrange, MixtureOfAgents, SpreadSheetSwarm, SequentialWorkflow, ConcurrentWorkflow]):
- The instantiated swarm object.
- logs (List[SwarmLog]): A list of log entries captured during operations.
- auto_generate_prompt (bool): A flag to enable/disable auto generation of prompts.
+ name (str): Name identifier for the SwarmRouter instance
+ description (str): Description of the SwarmRouter's purpose
+ max_loops (int): Maximum number of execution loops
+ agents (List[Union[Agent, Callable]]): List of Agent objects or callables
+ swarm_type (SwarmType): Type of swarm being used
+ autosave (bool): Whether autosaving is enabled
+ flow (str): Flow configuration string
+ return_json (bool): Whether results are returned as JSON
+ auto_generate_prompts (bool): Whether prompt auto-generation is enabled
+ shared_memory_system (Any): Shared memory system for agents
+ rules (str): Rules injected into every agent
+ documents (List[str]): List of document file paths
+ output_type (str): Output format type
+ logs (List[SwarmLog]): List of execution logs
+ swarm: The instantiated swarm object
Available Swarm Types:
- - AgentRearrange: Rearranges agents for optimal task execution.
- - MixtureOfAgents: Combines different types of agents for diverse task handling.
- - SpreadSheetSwarm: Utilizes spreadsheet-like operations for task management.
- - SequentialWorkflow: Executes tasks in a sequential manner.
- - ConcurrentWorkflow: Executes tasks concurrently for parallel processing.
- - "auto" will automatically conduct embedding search to find the best swarm for your task
+ - AgentRearrange: Optimizes agent arrangement for task execution
+ - MixtureOfAgents: Combines multiple agent types for diverse tasks
+ - SpreadSheetSwarm: Uses spreadsheet-like operations for task management
+ - SequentialWorkflow: Executes tasks sequentially
+ - ConcurrentWorkflow: Executes tasks in parallel
+ - "auto": Automatically selects best swarm type via embedding search
+
+ Methods:
+ run(task: str, device: str = "cpu", all_cores: bool = False, all_gpus: bool = False, *args, **kwargs) -> Any:
+ Executes a task using the configured swarm
+
+ batch_run(tasks: List[str], *args, **kwargs) -> List[Any]:
+ Executes multiple tasks in sequence
+
+ threaded_run(task: str, *args, **kwargs) -> Any:
+ Executes a task in a separate thread
+
+ async_run(task: str, *args, **kwargs) -> Any:
+ Executes a task asynchronously
+
+ concurrent_run(task: str, *args, **kwargs) -> Any:
+ Executes a task using concurrent execution
+
+ concurrent_batch_run(tasks: List[str], *args, **kwargs) -> List[Any]:
+ Executes multiple tasks concurrently
+
+ get_logs() -> List[SwarmLog]:
+ Retrieves execution logs
"""
def __init__(
@@ -78,8 +131,12 @@ class SwarmRouter:
swarm_type: SwarmType = "SequentialWorkflow", # "SpreadSheetSwarm" # "auto"
autosave: bool = False,
flow: str = None,
- return_json: bool = True,
+ return_json: bool = False,
auto_generate_prompts: bool = False,
+ shared_memory_system: Any = None,
+ rules: str = None,
+ documents: List[str] = [], # A list of docs file paths
+ output_type: str = "string", # Md, PDF, Txt, csv
*args,
**kwargs,
):
@@ -92,6 +149,10 @@ class SwarmRouter:
self.flow = flow
self.return_json = return_json
self.auto_generate_prompts = auto_generate_prompts
+ self.shared_memory_system = shared_memory_system
+ self.rules = rules
+ self.documents = documents
+ self.output_type = output_type
self.logs = []
self.reliability_check()
@@ -101,8 +162,50 @@ class SwarmRouter:
f"SwarmRouter initialized with swarm type: {swarm_type}",
)
+ # Handle Automated Prompt Engineering
self.activate_ape()
+ # Handle shared memory
+ if self.shared_memory_system is not None:
+ self.activate_shared_memory()
+
+ # Handle rules
+ if self.rules is not None:
+ self.handle_rules()
+
+ # if self.documents is not None:
+ # self.handle_docs()
+
+ def handle_docs(self):
+ # Process all documents in parallel using list comprehension
+ data = "".join(
+ [doc_master(file_path=doc) for doc in self.documents]
+ )
+
+ # Update all agents' prompts at once
+ doc_prompt = f"##### Documents Available ########## {data}"
+ for agent in self.agents:
+ agent.system_prompt += doc_prompt
+
+ # Add documents to the logs
+ # self.logs.append(Document(file_path=self.documents, data=data))
+
+ def activate_shared_memory(self):
+ logger.info("Activating shared memory with all agents ")
+
+ for agent in self.agents:
+ agent.long_term_memory = self.shared_memory_system
+
+ logger.info("All agents now have the same memory system")
+
+ def handle_rules(self):
+ logger.info("Injecting rules to every agent!")
+
+ for agent in self.agents:
+ agent.system_prompt += f"### Swarm Rules ### {self.rules}"
+
+ logger.info("Finished injecting rules")
+
def activate_ape(self):
"""Activate automatic prompt engineering for agents that support it"""
try:
@@ -134,7 +237,7 @@ class SwarmRouter:
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
def reliability_check(self):
- logger.info("Logger initializing checks")
+ logger.info("Initializing reliability checks")
if not self.agents:
raise ValueError("No agents provided for the swarm.")
@@ -143,7 +246,9 @@ class SwarmRouter:
if self.max_loops == 0:
raise ValueError("max_loops cannot be 0.")
- logger.info("Checks completed your swarm is ready.")
+ logger.info(
+ "Reliability checks completed your swarm is ready."
+ )
def _create_swarm(
self, task: str = None, *args, **kwargs
@@ -182,6 +287,7 @@ class SwarmRouter:
max_loops=self.max_loops,
flow=self.flow,
return_json=self.return_json,
+ output_type=self.output_type,
*args,
**kwargs,
)
@@ -189,7 +295,7 @@ class SwarmRouter:
return MixtureOfAgents(
name=self.name,
description=self.description,
- reference_agents=self.agents,
+ agents=self.agents,
aggregator_system_prompt=aggregator_system_prompt.get_prompt(),
aggregator_agent=self.agents[-1],
layers=self.max_loops,
@@ -206,12 +312,19 @@ class SwarmRouter:
*args,
**kwargs,
)
- elif self.swarm_type == "SequentialWorkflow":
+ elif (
+ self.swarm_type == "SequentialWorkflow"
+ or self.swarm_type == "sequential"
+ or self.swarm_type == "Sequential"
+ ):
return SequentialWorkflow(
name=self.name,
description=self.description,
agents=self.agents,
max_loops=self.max_loops,
+ shared_memory_system=self.shared_memory_system,
+ output_type=self.output_type,
+ return_json=self.return_json,
*args,
**kwargs,
)
@@ -227,7 +340,9 @@ class SwarmRouter:
**kwargs,
)
else:
- raise ValueError(f"Invalid swarm type: {self.swarm_type}")
+ raise ValueError(
+ f"Invalid swarm type: {self.swarm_type} try again with a valid swarm type such as 'SequentialWorkflow' or 'ConcurrentWorkflow' or 'auto' or 'AgentRearrange' or 'MixtureOfAgents' or 'SpreadSheetSwarm'"
+ )
def _log(
self,
@@ -256,7 +371,7 @@ class SwarmRouter:
logger.log(level.upper(), message)
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
- def run(self, task: str, *args, **kwargs) -> Any:
+ def _run(self, task: str, *args, **kwargs) -> Any:
"""
Dynamically run the specified task on the selected or matched swarm type.
@@ -281,6 +396,7 @@ class SwarmRouter:
metadata=kwargs,
)
result = self.swarm.run(task, *args, **kwargs)
+
self._log(
"success",
f"Task completed successfully on {self.swarm_type} swarm",
@@ -297,6 +413,56 @@ class SwarmRouter:
)
raise
+ def run(
+ self,
+ task: str,
+ device: str = "cpu",
+ all_cores: bool = True,
+ all_gpus: bool = False,
+ *args,
+ **kwargs,
+ ) -> Any:
+ """
+ Execute a task on the selected swarm type with specified compute resources.
+
+ Args:
+ task (str): The task to be executed by the swarm.
+ device (str, optional): Device to run on - "cpu" or "gpu". Defaults to "cpu".
+ all_cores (bool, optional): Whether to use all CPU cores. Defaults to True.
+ all_gpus (bool, optional): Whether to use all available GPUs. Defaults to False.
+ *args: Variable length argument list.
+ **kwargs: Arbitrary keyword arguments.
+
+ Returns:
+ Any: The result of the swarm's execution.
+
+ Raises:
+ Exception: If an error occurs during task execution.
+ """
+ return exec_callable_with_clusterops(
+ func=self._run,
+ device=device,
+ all_cores=all_cores,
+ all_gpus=all_gpus,
+ task=task,
+ *args,
+ **kwargs,
+ )
+
+ def __call__(self, task: str, *args, **kwargs) -> Any:
+ """
+ Make the SwarmRouter instance callable.
+
+ Args:
+ task (str): The task to be executed by the swarm.
+ *args: Variable length argument list.
+ **kwargs: Arbitrary keyword arguments.
+
+ Returns:
+ Any: The result of the swarm's execution.
+ """
+ return self.run(task=task, *args, **kwargs)
+
def batch_run(
self, tasks: List[str], *args, **kwargs
) -> List[Any]:
@@ -446,15 +612,29 @@ class SwarmRouter:
Raises:
Exception: If an error occurs during task execution.
"""
- from concurrent.futures import ThreadPoolExecutor
+ from concurrent.futures import (
+ ThreadPoolExecutor,
+ as_completed,
+ )
+ results = []
with ThreadPoolExecutor() as executor:
+ # Submit all tasks to executor
futures = [
executor.submit(self.run, task, *args, **kwargs)
for task in tasks
]
- results = [future.result() for future in futures]
- return results
+
+ # Process results as they complete rather than waiting for all
+ for future in as_completed(futures):
+ try:
+ result = future.result()
+ results.append(result)
+ except Exception as e:
+ logger.error(f"Task execution failed: {str(e)}")
+ results.append(None)
+
+ return results
def swarm_router(
@@ -468,6 +648,7 @@ def swarm_router(
return_json: bool = True,
auto_generate_prompts: bool = False,
task: str = None,
+ rules: str = None,
*args,
**kwargs,
) -> SwarmRouter:
@@ -518,11 +699,14 @@ def swarm_router(
flow=flow,
return_json=return_json,
auto_generate_prompts=auto_generate_prompts,
+ rules=rules,
)
logger.info(f"Executing task with SwarmRouter: {task}")
result = swarm_router.run(task, *args, **kwargs)
- logger.info("Task execution completed successfully")
+ logger.info(
+ f"Task execution completed successfully: {result}"
+ )
return result
except ValueError as e:
diff --git a/swarms/structs/swarming_architectures.py b/swarms/structs/swarming_architectures.py
index cd87a155..ce840023 100644
--- a/swarms/structs/swarming_architectures.py
+++ b/swarms/structs/swarming_architectures.py
@@ -2,11 +2,13 @@ import asyncio
import math
from typing import List, Union
-from loguru import logger
from pydantic import BaseModel
from swarms.structs.agent import Agent
from swarms.structs.omni_agent_types import AgentListType
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="swarming_architectures")
# Define Pydantic schema for logging agent responses
diff --git a/swarms/structs/task.py b/swarms/structs/task.py
index 70293426..fc73dea9 100644
--- a/swarms/structs/task.py
+++ b/swarms/structs/task.py
@@ -9,8 +9,10 @@ from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.structs.omni_agent_types import AgentType
-from swarms.utils.loguru_logger import logger
from typing import Optional
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="task")
class Task(BaseModel):
diff --git a/swarms/structs/tree_swarm.py b/swarms/structs/tree_swarm.py
index ceb15800..56b46642 100644
--- a/swarms/structs/tree_swarm.py
+++ b/swarms/structs/tree_swarm.py
@@ -3,11 +3,13 @@ from collections import Counter
from datetime import datetime
from typing import Any, List, Optional
-from loguru import logger
from pydantic import BaseModel, Field
from sentence_transformers import SentenceTransformer, util
-from swarms import Agent
+from swarms.structs.agent import Agent
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="tree_swarm")
# Pretrained model for embeddings
embedding_model = SentenceTransformer(
diff --git a/swarms/structs/workspace_manager.py b/swarms/structs/workspace_manager.py
new file mode 100644
index 00000000..cec3615d
--- /dev/null
+++ b/swarms/structs/workspace_manager.py
@@ -0,0 +1,176 @@
+import os
+from pathlib import Path
+from typing import Optional
+from swarms.utils.loguru_logger import initialize_logger
+
+
+logger = initialize_logger("workspace-manager")
+
+
+class WorkspaceManager:
+ """
+ Manages the workspace directory and settings for the application.
+ This class is responsible for setting up the workspace directory, logging configuration,
+ and retrieving environment variables for telemetry and API key.
+ """
+
+ def __init__(
+ self,
+ workspace_dir: Optional[str] = "agent_workspace",
+ use_telemetry: Optional[bool] = True,
+ api_key: Optional[str] = None,
+ ):
+ """
+ Initializes the WorkspaceManager with optional parameters for workspace directory,
+ telemetry usage, and API key.
+
+ Args:
+ workspace_dir (Optional[str]): The path to the workspace directory.
+ use_telemetry (Optional[bool]): A flag indicating whether to use telemetry.
+ api_key (Optional[str]): The API key for the application.
+ """
+ self.workspace_dir = workspace_dir
+ self.use_telemetry = use_telemetry
+ self.api_key = api_key
+
+ def _create_env_file(self, env_file_path: Path) -> None:
+ """
+ Create a new .env file with default WORKSPACE_DIR.
+
+ Args:
+ env_file_path (Path): The path to the .env file.
+ """
+ with env_file_path.open("w") as file:
+ file.write("WORKSPACE_DIR=agent_workspace\n")
+ logger.info(
+ "Created a new .env file with default WORKSPACE_DIR."
+ )
+
+ def _append_to_env_file(self, env_file_path: Path) -> None:
+ """
+ Append WORKSPACE_DIR to .env if it doesn't exist.
+
+ Args:
+ env_file_path (Path): The path to the .env file.
+ """
+ with env_file_path.open("r+") as file:
+ content = file.read()
+ if "WORKSPACE_DIR" not in content:
+ file.seek(0, os.SEEK_END)
+ file.write("WORKSPACE_DIR=agent_workspace\n")
+ logger.info("Appended WORKSPACE_DIR to .env file.")
+
+ def _get_workspace_dir(
+ self, workspace_dir: Optional[str] = None
+ ) -> str:
+ """
+ Get the workspace directory from environment variable or default.
+
+ Args:
+ workspace_dir (Optional[str]): The path to the workspace directory.
+
+ Returns:
+ str: The path to the workspace directory.
+ """
+ return workspace_dir or os.getenv(
+ "WORKSPACE_DIR", "agent_workspace"
+ )
+
+ def _get_telemetry_status(
+ self, use_telemetry: Optional[bool] = None
+ ) -> bool:
+ """
+ Get telemetry status from environment variable or default.
+
+ Args:
+ use_telemetry (Optional[bool]): A flag indicating whether to use telemetry.
+
+ Returns:
+ bool: The status of telemetry usage.
+ """
+ return (
+ use_telemetry
+ if use_telemetry is not None
+ else os.getenv("USE_TELEMETRY", "true").lower() == "true"
+ )
+
+ def _get_api_key(
+ self, api_key: Optional[str] = None
+ ) -> Optional[str]:
+ """
+ Get API key from environment variable or default.
+
+ Args:
+ api_key (Optional[str]): The API key for the application.
+
+ Returns:
+ Optional[str]: The API key or None if not set.
+ """
+ return api_key or os.getenv("SWARMS_API_KEY")
+
+ def _init_workspace(self) -> None:
+ """
+ Initialize the workspace directory if it doesn't exist.
+ """
+ if not self.workspace_path.exists():
+ self.workspace_path.mkdir(parents=True, exist_ok=True)
+ logger.info("Workspace directory initialized.")
+
+ @property
+ def get_workspace_path(self) -> Path:
+ """
+ Get the workspace path.
+
+ Returns:
+ Path: The path to the workspace directory.
+ """
+ return self.workspace_path
+
+ @property
+ def get_telemetry_status(self) -> bool:
+ """
+ Get telemetry status.
+
+ Returns:
+ bool: The status of telemetry usage.
+ """
+ return self.use_telemetry
+
+ @property
+ def get_api_key(self) -> Optional[str]:
+ """
+ Get API key.
+
+ Returns:
+ Optional[str]: The API key or None if not set.
+ """
+ return self.api_key
+
+ def run(self) -> None:
+ try:
+ # Check if .env file exists and create it if it doesn't
+ env_file_path = Path(".env")
+ if not env_file_path.exists():
+ self._create_env_file(env_file_path)
+ else:
+ # Append WORKSPACE_DIR to .env if it doesn't exist
+ self._append_to_env_file(env_file_path)
+
+ # Set workspace directory
+ self.workspace_dir = self._get_workspace_dir(
+ self.workspace_dir
+ )
+ self.workspace_path = Path(self.workspace_dir)
+
+ # Set telemetry preference
+ self.use_telemetry = self._get_telemetry_status(
+ self.use_telemetry
+ )
+
+ # Set API key
+ self.api_key = self._get_api_key(self.api_key)
+
+ # Initialize workspace
+ self._init_workspace()
+ except Exception as e:
+ logger.error(f"Error initializing WorkspaceManager: {e}")
diff --git a/swarms/telemetry/auto_upgrade_swarms.py b/swarms/telemetry/auto_upgrade_swarms.py
index 410b0332..440f70ed 100644
--- a/swarms/telemetry/auto_upgrade_swarms.py
+++ b/swarms/telemetry/auto_upgrade_swarms.py
@@ -1,19 +1,39 @@
+import os
import subprocess
-from loguru import logger
-
+from swarms.utils.loguru_logger import initialize_logger
from swarms.telemetry.check_update import check_for_update
+logger = initialize_logger(log_folder="auto_upgrade_swarms")
+
def auto_update():
"""auto update swarms"""
try:
+ # Check if auto-update is disabled
+ auto_update_enabled = os.getenv(
+ "SWARMS_AUTOUPDATE_ON", "false"
+ ).lower()
+ if auto_update_enabled == "false":
+ logger.info(
+ "Auto-update is disabled via SWARMS_AUTOUPDATE_ON"
+ )
+ return
+
outcome = check_for_update()
if outcome is True:
logger.info(
"There is a new version of swarms available! Downloading..."
)
- subprocess.run(["pip", "install", "-U", "swarms"])
+ try:
+ subprocess.run(
+ ["pip", "install", "-U", "swarms"], check=True
+ )
+ except subprocess.CalledProcessError:
+ logger.info("Attempting to install with pip3...")
+ subprocess.run(
+ ["pip3", "install", "-U", "swarms"], check=True
+ )
else:
logger.info("swarms is up to date!")
except Exception as e:
diff --git a/swarms/telemetry/bootup.py b/swarms/telemetry/bootup.py
index 24d7a7c4..41cae773 100644
--- a/swarms/telemetry/bootup.py
+++ b/swarms/telemetry/bootup.py
@@ -9,18 +9,22 @@ from swarms.utils.disable_logging import disable_logging
def bootup():
"""Bootup swarms"""
- logging.disable(logging.CRITICAL)
- os.environ["WANDB_SILENT"] = "true"
+ try:
+ logging.disable(logging.CRITICAL)
+ os.environ["WANDB_SILENT"] = "true"
- # Auto set workspace directory
- workspace_dir = os.path.join(os.getcwd(), "agent_workspace")
- if not os.path.exists(workspace_dir):
- os.makedirs(workspace_dir)
- os.environ["WORKSPACE_DIR"] = workspace_dir
+ # Auto set workspace directory
+ workspace_dir = os.path.join(os.getcwd(), "agent_workspace")
+ if not os.path.exists(workspace_dir):
+ os.makedirs(workspace_dir, exist_ok=True)
+ os.environ["WORKSPACE_DIR"] = workspace_dir
- warnings.filterwarnings("ignore", category=DeprecationWarning)
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
- # Use ThreadPoolExecutor to run disable_logging and auto_update concurrently
- with ThreadPoolExecutor(max_workers=2) as executor:
- executor.submit(disable_logging)
- executor.submit(auto_update)
+ # Use ThreadPoolExecutor to run disable_logging and auto_update concurrently
+ with ThreadPoolExecutor(max_workers=2) as executor:
+ executor.submit(disable_logging)
+ executor.submit(auto_update)
+ except Exception as e:
+ print(f"An error occurred: {str(e)}")
+ raise
diff --git a/swarms/telemetry/capture_sys_data.py b/swarms/telemetry/capture_sys_data.py
index de9bdc9b..09d94a70 100644
--- a/swarms/telemetry/capture_sys_data.py
+++ b/swarms/telemetry/capture_sys_data.py
@@ -2,10 +2,13 @@ import platform
import socket
import psutil
import uuid
-from loguru import logger
from typing import Dict
import requests
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="capture_sys_data")
+
def capture_system_data() -> Dict[str, str]:
"""
diff --git a/swarms/telemetry/check_update.py b/swarms/telemetry/check_update.py
index a7e2384a..2b0b9a1c 100644
--- a/swarms/telemetry/check_update.py
+++ b/swarms/telemetry/check_update.py
@@ -4,10 +4,22 @@ import sys
import pkg_resources
import requests
from packaging import version
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger("check-update")
# borrowed from: https://stackoverflow.com/a/1051266/656011
def check_for_package(package: str) -> bool:
+ """
+ Checks if a package is installed and available for import.
+
+ Args:
+ package (str): The name of the package to check.
+
+ Returns:
+ bool: True if the package is installed and can be imported, False otherwise.
+ """
if package in sys.modules:
return True
elif (spec := importlib.util.find_spec(package)) is not None:
@@ -19,24 +31,43 @@ def check_for_package(package: str) -> bool:
return True
except ImportError:
+ logger.error(f"Failed to import {package}")
return False
else:
+ logger.info(f"{package} not found")
return False
def check_for_update() -> bool:
- """Check for updates
+ """
+ Checks if there is an update available for the swarms package.
Returns:
- BOOL: Flag to indicate if there is an update
+ bool: True if an update is available, False otherwise.
"""
- # Fetch the latest version from the PyPI API
- response = requests.get("https://pypi.org/pypi/swarms/json")
- latest_version = response.json()["info"]["version"]
+ try:
+ # Fetch the latest version from the PyPI API
+ response = requests.get("https://pypi.org/pypi/swarms/json")
+ response.raise_for_status() # Raises an HTTPError if the response status code is 4XX/5XX
+ latest_version = response.json()["info"]["version"]
- # Get the current version using pkg_resources
- current_version = pkg_resources.get_distribution("swarms").version
+ # Get the current version using pkg_resources
+ current_version = pkg_resources.get_distribution(
+ "swarms"
+ ).version
- return version.parse(latest_version) > version.parse(
- current_version
- )
+ if version.parse(latest_version) > version.parse(
+ current_version
+ ):
+ logger.info(
+ f"Update available: {latest_version} > {current_version}"
+ )
+ return True
+ else:
+ logger.info(
+ f"No update available: {latest_version} <= {current_version}"
+ )
+ return False
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Failed to check for update: {e}")
+ return False
diff --git a/swarms/telemetry/log_swarm_data.py b/swarms/telemetry/log_swarm_data.py
deleted file mode 100644
index ffb72ab4..00000000
--- a/swarms/telemetry/log_swarm_data.py
+++ /dev/null
@@ -1,16 +0,0 @@
-def log_agent_data(data: dict):
- import requests
-
- data_dict = {
- "data": data,
- }
-
- url = "https://swarms.world/api/get-agents/log-agents"
- headers = {
- "Content-Type": "application/json",
- "Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869",
- }
-
- response = requests.post(url, json=data_dict, headers=headers)
-
- return response.json()
diff --git a/swarms/telemetry/sys_info.py b/swarms/telemetry/sys_info.py
index c4a0692a..2739362f 100644
--- a/swarms/telemetry/sys_info.py
+++ b/swarms/telemetry/sys_info.py
@@ -10,7 +10,12 @@ def get_python_version():
return platform.python_version()
-def get_pip_version():
+def get_pip_version() -> str:
+ """Get pip version
+
+ Returns:
+ str: The version of pip installed
+ """
try:
pip_version = (
subprocess.check_output(["pip", "--version"])
@@ -22,7 +27,12 @@ def get_pip_version():
return pip_version
-def get_swarms_verison():
+def get_swarms_verison() -> tuple[str, str]:
+ """Get swarms version from both command line and package
+
+ Returns:
+ tuple[str, str]: A tuple containing (command line version, package version)
+ """
try:
swarms_verison_cmd = (
subprocess.check_output(["swarms", "--version"])
@@ -38,15 +48,30 @@ def get_swarms_verison():
return swarms_verison
-def get_os_version():
+def get_os_version() -> str:
+ """Get operating system version
+
+ Returns:
+ str: The operating system version and platform details
+ """
return platform.platform()
-def get_cpu_info():
+def get_cpu_info() -> str:
+ """Get CPU information
+
+ Returns:
+ str: The processor information
+ """
return platform.processor()
-def get_ram_info():
+def get_ram_info() -> str:
+ """Get RAM information
+
+ Returns:
+ str: A formatted string containing total, used and free RAM in GB
+ """
vm = psutil.virtual_memory()
used_ram_gb = vm.used / (1024**3)
free_ram_gb = vm.free / (1024**3)
@@ -57,7 +82,15 @@ def get_ram_info():
)
-def get_package_mismatches(file_path="pyproject.toml"):
+def get_package_mismatches(file_path: str = "pyproject.toml") -> str:
+ """Get package version mismatches between pyproject.toml and installed packages
+
+ Args:
+ file_path (str, optional): Path to pyproject.toml file. Defaults to "pyproject.toml".
+
+ Returns:
+ str: A formatted string containing package version mismatches
+ """
with open(file_path) as file:
pyproject = toml.load(file)
dependencies = pyproject["tool"]["poetry"]["dependencies"]
@@ -89,7 +122,12 @@ def get_package_mismatches(file_path="pyproject.toml"):
return "\n" + "\n".join(mismatches)
-def system_info():
+def system_info() -> dict[str, str]:
+ """Get system information including Python, pip, OS, CPU and RAM details
+
+ Returns:
+ dict[str, str]: A dictionary containing system information
+ """
return {
"Python Version": get_python_version(),
"Pip Version": get_pip_version(),
diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py
index 519ddc8c..dcb81974 100644
--- a/swarms/tools/base_tool.py
+++ b/swarms/tools/base_tool.py
@@ -14,7 +14,9 @@ from swarms.tools.pydantic_to_json import (
base_model_to_openai_function,
multi_base_model_to_openai_function,
)
-from swarms.utils.loguru_logger import logger
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="base_tool")
ToolType = Union[BaseModel, Dict[str, Any], Callable[..., Any]]
diff --git a/swarms/tools/e2b_tool.py b/swarms/tools/e2b_tool.py
deleted file mode 100644
index 5f8ef4d9..00000000
--- a/swarms/tools/e2b_tool.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import subprocess
-import sys
-from loguru import logger
-from typing import Tuple, Union, List
-from e2b_code_interpreter import CodeInterpreter
-
-# load_dotenv()
-
-
-# Helper function to lazily install the package if not found
-def lazy_install(package: str) -> None:
- try:
- __import__(package)
- except ImportError:
- logger.warning(f"{package} not found. Installing now...")
- subprocess.check_call(
- [sys.executable, "-m", "pip", "install", package]
- )
-
-
-# Ensure e2b_code_interpreter is installed lazily
-lazy_install("e2b_code_interpreter")
-
-
-def code_interpret(
- code_interpreter: CodeInterpreter, code: str
-) -> Union[Tuple[List[str], List[str]], None]:
- """
- Runs AI-generated code using the provided CodeInterpreter and logs the process.
-
- Args:
- code_interpreter (CodeInterpreter): An instance of the CodeInterpreter class.
- code (str): The code string to be executed.
-
- Returns:
- Union[Tuple[List[str], List[str]], None]: A tuple of (results, logs) if successful,
- or None if an error occurred.
-
- Raises:
- ValueError: If the code or code_interpreter is invalid.
- """
- if not isinstance(code_interpreter, CodeInterpreter):
- logger.error("Invalid CodeInterpreter instance provided.")
- raise ValueError(
- "code_interpreter must be an instance of CodeInterpreter."
- )
- if not isinstance(code, str) or not code.strip():
- logger.error("Invalid code provided.")
- raise ValueError("code must be a non-empty string.")
-
- logger.info(
- f"\n{'='*50}\n> Running the following AI-generated code:\n{code}\n{'='*50}"
- )
-
- try:
- exec_result = code_interpreter.notebook.exec_cell(
- code,
- # on_stderr=lambda stderr: logger.error(f"[Code Interpreter stderr] {stderr}"),
- # on_stdout=lambda stdout: logger.info(f"[Code Interpreter stdout] {stdout}")
- )
-
- if exec_result.error:
- logger.error(
- f"[Code Interpreter error] {exec_result.error}"
- )
- return None
- else:
- logger.success("Code executed successfully.")
- # return exec_result.results, exec_result.logs
- # return exec_result.results
- prompt = f"{exec_result.results}: {exec_result.logs}"
- return prompt
-
- except Exception:
- logger.exception(
- "An error occurred during code interpretation."
- )
- return None
-
-
-# # from e2b_code_interpreter import CodeInterpreter
-
-# interpreter = CodeInterpreter()
-# code = "print('Hello, World!')"
-
-# result = code_interpret(interpreter, code)
-
-# if result:
-# results = result
-# print("Execution Results:", results)
-# # print("Execution Logs:", logs)
diff --git a/swarms/tools/func_calling_executor.py b/swarms/tools/func_calling_executor.py
index 5cc0e4b5..65d95a73 100644
--- a/swarms/tools/func_calling_executor.py
+++ b/swarms/tools/func_calling_executor.py
@@ -1,7 +1,8 @@
import concurrent.futures
from typing import Callable, Any, Dict, List
-from swarms.utils.loguru_logger import logger
+from swarms.utils.loguru_logger import initialize_logger
+logger = initialize_logger(log_folder="func_calling_executor")
# def openai_tool_executor(
# tools: List[Dict[str, Any]],
diff --git a/swarms/tools/json_former.py b/swarms/tools/json_former.py
index 01d608a5..dcca9932 100644
--- a/swarms/tools/json_former.py
+++ b/swarms/tools/json_former.py
@@ -1,7 +1,6 @@
import json
from typing import Any, Dict, List, Union
-from termcolor import cprint
from transformers import PreTrainedModel, PreTrainedTokenizer
from pydantic import BaseModel
from swarms.tools.logits_processor import (
@@ -68,15 +67,6 @@ class Jsonformer:
self.temperature = temperature
self.max_string_token_length = max_string_token_length
- def debug(self, caller: str, value: str, is_prompt: bool = False):
- if self.debug_on:
- if is_prompt:
- cprint(caller, "green", end=" ")
- cprint(value, "yellow")
- else:
- cprint(caller, "green", end=" ")
- cprint(value, "blue")
-
def generate_number(
self, temperature: Union[float, None] = None, iterations=0
):
diff --git a/swarms/tools/pydantic_to_json.py b/swarms/tools/pydantic_to_json.py
index 7c64ea8e..1f6521df 100644
--- a/swarms/tools/pydantic_to_json.py
+++ b/swarms/tools/pydantic_to_json.py
@@ -2,7 +2,9 @@ from typing import Any, List
from docstring_parser import parse
from pydantic import BaseModel
-from swarms.utils.loguru_logger import logger
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger("pydantic_to_json")
def _remove_a_key(d: dict, remove_key: str) -> None:
diff --git a/swarms/tools/tool_parse_exec.py b/swarms/tools/tool_parse_exec.py
index 8686781a..7cc4369f 100644
--- a/swarms/tools/tool_parse_exec.py
+++ b/swarms/tools/tool_parse_exec.py
@@ -1,8 +1,10 @@
import json
from typing import List, Any, Callable
-from swarms.utils.loguru_logger import logger
from swarms.utils.parse_code import extract_code_from_markdown
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="tool_parse_exec")
def parse_and_execute_json(
diff --git a/swarms/tools/tool_registry.py b/swarms/tools/tool_registry.py
index f28ed40c..385eed1b 100644
--- a/swarms/tools/tool_registry.py
+++ b/swarms/tools/tool_registry.py
@@ -1,9 +1,11 @@
import os
from typing import Any, Callable, Dict, List, Optional
import time
-from loguru import logger
from pydantic import BaseModel, Field
from concurrent.futures import ThreadPoolExecutor, as_completed
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="tool_registry")
class ToolMetadata(BaseModel):
diff --git a/swarms/tools/tool_utils.py b/swarms/tools/tool_utils.py
index 9076e2d1..b448d7a9 100644
--- a/swarms/tools/tool_utils.py
+++ b/swarms/tools/tool_utils.py
@@ -3,8 +3,7 @@ from typing import Any, List
import inspect
from typing import Callable
-
-from termcolor import colored
+from swarms.utils.formatter import formatter
def scrape_tool_func_docs(fn: Callable) -> str:
@@ -37,17 +36,16 @@ def scrape_tool_func_docs(fn: Callable) -> str:
f" {inspect.getdoc(fn)}\nParameters:\n{parameters_str}"
)
except Exception as error:
- print(
- colored(
- (
- f"Error scraping tool function docs {error} try"
- " optimizing your inputs with different"
- " variables and attempt once more."
- ),
- "red",
- )
+ (
+ formatter.print_panel(
+ f"Error scraping tool function docs {error} try"
+ " optimizing your inputs with different"
+ " variables and attempt once more."
+ ),
)
+ raise error
+
def tool_find_by_name(tool_name: str, tools: List[Any]):
"""Find the tool by name"""
diff --git a/swarms/utils/__init__.py b/swarms/utils/__init__.py
index 5c8496a8..0a825caf 100644
--- a/swarms/utils/__init__.py
+++ b/swarms/utils/__init__.py
@@ -17,7 +17,6 @@ from swarms.tools.prebuilt.math_eval import math_eval
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.try_except_wrapper import try_except_wrapper
-from swarms.utils.concurrent_utils import execute_concurrently
from swarms.utils.calculate_func_metrics import profile_func
@@ -37,6 +36,5 @@ __all__ = [
"extract_code_from_markdown",
"pdf_to_text",
"try_except_wrapper",
- "execute_concurrently",
"profile_func",
]
diff --git a/swarms/utils/add_docs_to_agents.py b/swarms/utils/add_docs_to_agents.py
new file mode 100644
index 00000000..85e3076c
--- /dev/null
+++ b/swarms/utils/add_docs_to_agents.py
@@ -0,0 +1,145 @@
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from pathlib import Path
+from typing import Any, List, Optional, Union
+
+from doc_master import doc_master
+from tenacity import retry, stop_after_attempt, wait_exponential
+
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="add_docs_to_agents")
+
+
+@retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+)
+def _process_document(doc_path: Union[str, Path]) -> str:
+ """Safely process a single document with retries.
+
+ Args:
+ doc_path: Path to the document to process
+
+ Returns:
+ Processed document text
+
+ Raises:
+ Exception: If document processing fails after retries
+ """
+ try:
+ return doc_master(
+ file_path=str(doc_path), output_type="string"
+ )
+ except Exception as e:
+ logger.error(
+ f"Error processing document {doc_path}: {str(e)}"
+ )
+ raise
+
+
+def handle_input_docs(
+ agents: Any,
+ docs: Optional[List[Union[str, Path]]] = None,
+ doc_folder: Optional[Union[str, Path]] = None,
+ max_workers: int = 4,
+ chunk_size: int = 1000000,
+) -> Any:
+ """
+ Add document content to agent prompts with improved reliability and performance.
+
+ Args:
+ agents: Dictionary mapping agent names to Agent objects
+ docs: List of document paths
+ doc_folder: Path to folder containing documents
+ max_workers: Maximum number of parallel document processing workers
+ chunk_size: Maximum characters to process at once to avoid memory issues
+
+ Raises:
+ ValueError: If neither docs nor doc_folder is provided
+ RuntimeError: If document processing fails
+ """
+ if not agents:
+ logger.warning(
+ "No agents provided, skipping document distribution"
+ )
+ return
+
+ if not docs and not doc_folder:
+ logger.warning(
+ "No documents or folder provided, skipping document distribution"
+ )
+ return
+
+ logger.info("Starting document distribution to agents")
+
+ try:
+ processed_docs = []
+
+ # Process individual documents in parallel
+ if docs:
+ with ThreadPoolExecutor(
+ max_workers=max_workers
+ ) as executor:
+ future_to_doc = {
+ executor.submit(_process_document, doc): doc
+ for doc in docs
+ }
+
+ for future in as_completed(future_to_doc):
+ doc = future_to_doc[future]
+ try:
+ processed_docs.append(future.result())
+ except Exception as e:
+ logger.error(
+ f"Failed to process document {doc}: {str(e)}"
+ )
+ raise RuntimeError(
+ f"Document processing failed: {str(e)}"
+ )
+
+ # Process folder if specified
+ elif doc_folder:
+ try:
+ folder_content = doc_master(
+ folder_path=str(doc_folder), output_type="string"
+ )
+ processed_docs.append(folder_content)
+ except Exception as e:
+ logger.error(
+ f"Failed to process folder {doc_folder}: {str(e)}"
+ )
+ raise RuntimeError(
+ f"Folder processing failed: {str(e)}"
+ )
+
+ # Combine and chunk the processed documents
+ combined_data = "\n".join(processed_docs)
+
+ # Update agent prompts in chunks to avoid memory issues
+ for agent in agents.values():
+ try:
+ for i in range(0, len(combined_data), chunk_size):
+ chunk = combined_data[i : i + chunk_size]
+ if i == 0:
+ agent.system_prompt += (
+ "\nDocuments:\n" + chunk
+ )
+ else:
+ agent.system_prompt += chunk
+ except Exception as e:
+ logger.error(
+ f"Failed to update agent prompt: {str(e)}"
+ )
+ raise RuntimeError(
+ f"Agent prompt update failed: {str(e)}"
+ )
+
+ logger.info(
+ f"Successfully added documents to {len(agents)} agents"
+ )
+
+ return agents
+
+ except Exception as e:
+ logger.error(f"Document distribution failed: {str(e)}")
+ raise RuntimeError(f"Document distribution failed: {str(e)}")
diff --git a/swarms/utils/any_to_str.py b/swarms/utils/any_to_str.py
new file mode 100644
index 00000000..2b0e3809
--- /dev/null
+++ b/swarms/utils/any_to_str.py
@@ -0,0 +1,102 @@
+from typing import Union, Dict, List, Tuple, Any
+
+
+def any_to_str(data: Union[str, Dict, List, Tuple, Any]) -> str:
+ """Convert any input data type to a nicely formatted string.
+
+ This function handles conversion of various Python data types into a clean string representation.
+ It recursively processes nested data structures and handles None values gracefully.
+
+ Args:
+ data: Input data of any type to convert to string. Can be:
+ - Dictionary
+ - List/Tuple
+ - String
+ - None
+ - Any other type that can be converted via str()
+
+ Returns:
+ str: A formatted string representation of the input data.
+ - Dictionaries are formatted as "key: value" pairs separated by commas
+ - Lists/tuples are comma-separated
+ - None returns empty string
+ - Other types are converted using str()
+
+ Examples:
+ >>> any_to_str({'a': 1, 'b': 2})
+ 'a: 1, b: 2'
+ >>> any_to_str([1, 2, 3])
+ '1, 2, 3'
+ >>> any_to_str(None)
+ ''
+ """
+ try:
+ if isinstance(data, dict):
+ # Format dictionary with newlines and indentation
+ items = []
+ for k, v in data.items():
+ value = any_to_str(v)
+ items.append(f"{k}: {value}")
+ return "\n".join(items)
+
+ elif isinstance(data, (list, tuple)):
+ # Format sequences with brackets and proper spacing
+ items = [any_to_str(x) for x in data]
+ if len(items) == 0:
+ return "[]" if isinstance(data, list) else "()"
+ return (
+ f"[{', '.join(items)}]"
+ if isinstance(data, list)
+ else f"({', '.join(items)})"
+ )
+
+ elif data is None:
+ return "None"
+
+ else:
+ # Handle strings and other types
+ if isinstance(data, str):
+ return f'"{data}"'
+ return str(data)
+
+ except Exception as e:
+ return f"Error converting data: {str(e)}"
+
+
+# def main():
+# # Example 1: Dictionary
+# print("Dictionary:")
+# print(
+# any_to_str(
+# {
+# "name": "John",
+# "age": 30,
+# "hobbies": ["reading", "hiking"],
+# }
+# )
+# )
+
+# print("\nNested Dictionary:")
+# print(
+# any_to_str(
+# {
+# "user": {
+# "id": 123,
+# "details": {"city": "New York", "active": True},
+# },
+# "data": [1, 2, 3],
+# }
+# )
+# )
+
+# print("\nList and Tuple:")
+# print(any_to_str([1, "text", None, (1, 2)]))
+# print(any_to_str((True, False, None)))
+
+# print("\nEmpty Collections:")
+# print(any_to_str([]))
+# print(any_to_str({}))
+
+
+# if __name__ == "__main__":
+# main()
diff --git a/swarms/utils/async_file_creation.py b/swarms/utils/async_file_creation.py
index 90832db3..6c35e95d 100644
--- a/swarms/utils/async_file_creation.py
+++ b/swarms/utils/async_file_creation.py
@@ -58,3 +58,49 @@ async def create_file_with_directory(
os.makedirs(directory)
await async_create_file(file_path, content)
+
+
+def sync_create_file(file_path: str, content: str) -> None:
+ """
+ Synchronously creates a file at the specified path and writes the given content to it.
+
+ Args:
+ file_path (str): The path where the file will be created.
+ content (str): The content to be written to the file.
+
+ Returns:
+ None
+ """
+ asyncio.run(async_create_file(file_path, content))
+
+
+def sync_create_multiple_files(
+ file_paths: List[str], contents: List[str]
+) -> None:
+ """
+ Synchronously creates multiple files at the specified paths and writes the corresponding content to each file.
+
+ Args:
+ file_paths (List[str]): A list of paths where the files will be created.
+ contents (List[str]): A list of content to be written to each file, corresponding to the file paths.
+
+ Returns:
+ None
+ """
+ asyncio.run(create_multiple_files(file_paths, contents))
+
+
+def sync_create_file_with_directory(
+ file_path: str, content: str
+) -> None:
+ """
+ Synchronously creates a file with the specified directory path and content. If the directory does not exist, it is created.
+
+ Args:
+ file_path (str): The path of the file to be created, including the directory.
+ content (str): The content to be written to the file.
+
+ Returns:
+ None
+ """
+ asyncio.run(create_file_with_directory(file_path, content))
diff --git a/swarms/utils/calculate_func_metrics.py b/swarms/utils/calculate_func_metrics.py
index 1aacb3a9..795e7bb2 100644
--- a/swarms/utils/calculate_func_metrics.py
+++ b/swarms/utils/calculate_func_metrics.py
@@ -1,7 +1,14 @@
import time
+import tracemalloc
+from functools import wraps
+from typing import Any, Callable
+
import psutil
from pydantic import BaseModel
-from swarms.utils.loguru_logger import logger
+
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="calculate_func_metrics")
class FunctionMetrics(BaseModel):
@@ -70,3 +77,95 @@ def profile_func(func):
return result, metrics
return wrapper
+
+
+def profile_all(func: Callable) -> Callable:
+ """
+ A decorator to profile memory usage, CPU usage, and I/O operations
+ of a function and log the data using loguru.
+
+ It combines tracemalloc for memory profiling, psutil for CPU and I/O operations,
+ and measures execution time.
+
+ Args:
+ func (Callable): The function to be profiled.
+
+ Returns:
+ Callable: The wrapped function with profiling enabled.
+ """
+
+ @wraps(func)
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
+ # Start memory tracking
+ tracemalloc.start()
+
+ # Get initial CPU stats
+ process = psutil.Process()
+ initial_cpu_times = process.cpu_times()
+
+ # Get initial I/O stats if available
+ try:
+ initial_io_counters = process.io_counters()
+ io_tracking_available = True
+ except AttributeError:
+ logger.warning(
+ "I/O counters not available on this platform."
+ )
+ io_tracking_available = False
+
+ # Start timing the function execution
+ start_time = time.time()
+
+ # Execute the function
+ result = func(*args, **kwargs)
+
+ # Stop timing
+ end_time = time.time()
+ execution_time = end_time - start_time
+
+ # Get final CPU stats
+ final_cpu_times = process.cpu_times()
+
+ # Get final I/O stats if available
+ if io_tracking_available:
+ final_io_counters = process.io_counters()
+ io_read_count = (
+ final_io_counters.read_count
+ - initial_io_counters.read_count
+ )
+ io_write_count = (
+ final_io_counters.write_count
+ - initial_io_counters.write_count
+ )
+ else:
+ io_read_count = io_write_count = 0
+
+ # Get memory usage statistics
+ snapshot = tracemalloc.take_snapshot()
+ top_stats = snapshot.statistics("lineno")
+
+ # Calculate CPU usage
+ cpu_usage = (
+ final_cpu_times.user
+ - initial_cpu_times.user
+ + final_cpu_times.system
+ - initial_cpu_times.system
+ )
+
+ # Log the data
+ logger.info(f"Execution time: {execution_time:.4f} seconds")
+ logger.info(f"CPU usage: {cpu_usage:.2f} seconds")
+ if io_tracking_available:
+ logger.info(
+ f"I/O Operations - Read: {io_read_count}, Write: {io_write_count}"
+ )
+ logger.info("Top memory usage:")
+ for stat in top_stats[:10]:
+ logger.info(stat)
+
+ # Stop memory tracking
+ tracemalloc.stop()
+
+ return result
+
+ return wrapper
diff --git a/swarms/utils/concurrent_utils.py b/swarms/utils/concurrent_utils.py
deleted file mode 100644
index becad4ca..00000000
--- a/swarms/utils/concurrent_utils.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import concurrent.futures
-from typing import List, Tuple, Any, Dict, Union, Callable
-
-
-def execute_concurrently(
- callable_functions: List[
- Tuple[Callable, Tuple[Any, ...], Dict[str, Any]]
- ],
- max_workers: int = 5,
-) -> List[Union[Any, Exception]]:
- """
- Executes callable functions concurrently using multithreading.
-
- Parameters:
- - callable_functions: A list of tuples, each containing the callable function and its arguments.
- For example: [(function1, (arg1, arg2), {'kwarg1': val1}), (function2, (), {})]
- - max_workers: The maximum number of threads to use.
-
- Returns:
- - results: A list of results returned by the callable functions. If an error occurs in any function,
- the exception object will be placed at the corresponding index in the list.
- """
- results = [None] * len(callable_functions)
-
- def worker(
- fn: Callable,
- args: Tuple[Any, ...],
- kwargs: Dict[str, Any],
- index: int,
- ) -> None:
- try:
- result = fn(*args, **kwargs)
- results[index] = result
- except Exception as e:
- results[index] = e
-
- with concurrent.futures.ThreadPoolExecutor(
- max_workers=max_workers
- ) as executor:
- futures = []
- for i, (fn, args, kwargs) in enumerate(callable_functions):
- futures.append(
- executor.submit(worker, fn, args, kwargs, i)
- )
-
- # Wait for all threads to complete
- concurrent.futures.wait(futures)
-
- return results
diff --git a/swarms/utils/data_to_text.py b/swarms/utils/data_to_text.py
index f4d12fc1..562f8098 100644
--- a/swarms/utils/data_to_text.py
+++ b/swarms/utils/data_to_text.py
@@ -137,53 +137,3 @@ def data_to_text(file: str) -> str:
return data
except Exception as e:
raise OSError(f"Error reading file: {file}") from e
-
-
-def data_to_text(file):
- """
- Converts the given data file to text format.
-
- Args:
- file (str): The path to the data file.
-
- Returns:
- str: The text representation of the data file.
-
- Raises:
- FileNotFoundError: If the file does not exist.
- IOError: If there is an error reading the file.
-
- Examples:
- >>> data_to_text("data.csv")
- 'This is the text representation of the data file.'
-
- """
- if not os.path.exists(file):
- raise FileNotFoundError(f"File not found: {file}")
-
- try:
- _, ext = os.path.splitext(file)
- ext = (
- ext.lower()
- ) # Convert extension to lowercase for case-insensitive comparison
- if ext == ".csv":
- return csv_to_text(file)
- elif ext == ".json":
- return json_to_text(file)
- elif ext == ".txt":
- return txt_to_text(file)
- elif ext == ".pdf":
- return pdf_to_text(file)
- elif ext == ".md":
- return md_to_text(file)
- else:
- # Check if the file is a binary file (like an image)
- if ext in [".png", ".jpg", ".jpeg", ".gif", ".bmp"]:
- # Skip binary files
- return None
- else:
- with open(file) as file:
- data = file.read()
- return data
- except Exception as e:
- raise OSError(f"Error reading file: {file}") from e
diff --git a/swarms/utils/decorators.py b/swarms/utils/decorators.py
deleted file mode 100644
index 3eed85bf..00000000
--- a/swarms/utils/decorators.py
+++ /dev/null
@@ -1,116 +0,0 @@
-import functools
-import logging
-import threading
-import time
-import warnings
-
-
-def log_decorator(func):
- def wrapper(*args, **kwargs):
- logging.info(f"Entering {func.__name__}")
- result = func(*args, **kwargs)
- logging.info(f"Exiting {func.__name__}")
- return result
-
- return wrapper
-
-
-def error_decorator(func):
- def wrapper(*args, **kwargs):
- try:
- return func(*args, **kwargs)
- except Exception as e:
- logging.error(f"Error in {func.__name__}: {str(e)}")
- raise
-
- return wrapper
-
-
-def timing_decorator(func):
- def wrapper(*args, **kwargs):
- start_time = time.time()
- result = func(*args, **kwargs)
- end_time = time.time()
- logging.info(
- f"{func.__name__} executed in"
- f" {end_time - start_time} seconds"
- )
- return result
-
- return wrapper
-
-
-def retry_decorator(max_retries=5):
- """
- Decorator that retries a function a specified number of times if an exception occurs.
-
- Args:
- max_retries (int): The maximum number of times to retry the function.
-
- Returns:
- function: The decorated function.
-
- """
-
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- for _ in range(max_retries):
- try:
- return func(*args, **kwargs)
- except Exception as error:
- logging.error(
- f" Error in {func.__name__}:"
- f" {str(error)} Retrying ...."
- )
- return func(*args, **kwargs)
-
- return wrapper
-
- return decorator
-
-
-def singleton_decorator(cls):
- instances = {}
-
- def wrapper(*args, **kwargs):
- if cls not in instances:
- instances[cls] = cls(*args, **kwargs)
- return instances[cls]
-
- return wrapper
-
-
-def synchronized_decorator(func):
- func.__lock__ = threading.Lock()
-
- def wrapper(*args, **kwargs):
- with func.__lock__:
- return func(*args, **kwargs)
-
- return wrapper
-
-
-def deprecated_decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- warnings.warn(
- f"{func.__name__} is deprecated",
- category=DeprecationWarning,
- )
- return func(*args, **kwargs)
-
- return wrapper
-
-
-def validate_inputs_decorator(validator):
- def decorator(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- if not validator(*args, **kwargs):
- raise ValueError("Invalid Inputs")
- return func(*args, **kwargs)
-
- return wrapper
-
- return decorator
diff --git a/swarms/utils/exec_funcs_in_parallel.py b/swarms/utils/exec_funcs_in_parallel.py
deleted file mode 100644
index 95548603..00000000
--- a/swarms/utils/exec_funcs_in_parallel.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import time
-from os import cpu_count
-from typing import Any, Callable, List, Optional
-
-from loguru import logger
-from pathos.multiprocessing import ProcessingPool as Pool
-
-
-from typing import Tuple
-
-
-def execute_parallel_optimized(
- callables_with_args: List[
- Tuple[Callable[..., Any], Tuple[Any, ...]]
- ],
- max_workers: Optional[int] = None,
- chunk_size: Optional[int] = None,
- retries: int = 3,
- **kwargs,
-) -> List[Any]:
- """
- Executes a list of callables in parallel, leveraging all available CPU cores.
-
- This function is optimized for high performance and reliability.
-
- Args:
- callables_with_args (List[Tuple[Callable[..., Any], Tuple[Any, ...]]]):
- A list of tuples, where each tuple contains a callable and a tuple of its arguments.
- max_workers (Optional[int]): The maximum number of workers to use. Defaults to the number of available cores.
- chunk_size (Optional[int]): The size of chunks to split the tasks into for balanced execution. Defaults to automatic chunking.
- retries (int): Number of retries for a failed task. Default is 3.
-
- Returns:
- List[Any]: A list of results from each callable. The order corresponds to the order of the input list.
-
- Raises:
- Exception: Any exception raised by the callable will be logged and re-raised after retries are exhausted.
- """
- max_workers = cpu_count() if max_workers is None else max_workers
- results = []
- logger.info(
- f"Starting optimized parallel execution of {len(callables_with_args)} tasks."
- )
-
- pool = Pool(
- nodes=max_workers, **kwargs
- ) # Initialize the pool once
-
- def _execute_with_retry(callable_, args, retries):
- attempt = 0
- while attempt < retries:
- try:
- result = callable_(*args)
- logger.info(
- f"Task {callable_} with args {args} completed successfully."
- )
- return result
- except Exception as e:
- attempt += 1
- logger.warning(
- f"Task {callable_} with args {args} failed on attempt {attempt}: {e}"
- )
- time.sleep(1) # Small delay before retrying
- if attempt >= retries:
- logger.error(
- f"Task {callable_} with args {args} failed after {retries} retries."
- )
- raise
-
- try:
- if chunk_size is None:
- chunk_size = (
- len(callables_with_args)
- // (max_workers or pool.ncpus)
- or 1
- )
-
- # Use chunking and mapping for efficient execution
- results = pool.map(
- lambda item: _execute_with_retry(
- item[0], item[1], retries
- ),
- callables_with_args,
- chunksize=chunk_size,
- )
-
- except Exception as e:
- logger.critical(
- f"Parallel execution failed due to an error: {e}"
- )
- raise
-
- logger.info(
- f"Optimized parallel execution completed. {len(results)} tasks executed."
- )
- pool.close() # Ensure pool is properly closed
- pool.join()
-
-
-# return results
-
-
-# def add(a, b):
-# return a + b
-
-
-# def multiply(a, b):
-# return a * b
-
-
-# def power(a, b):
-# return a**b
-
-
-# # if __name__ == "__main__":
-# # # List of callables with their respective arguments
-# # callables_with_args = [
-# # (add, (2, 3)),
-# # (multiply, (5, 4)),
-# # (power, (2, 10)),
-# # ]
-
-# # # Execute the callables in parallel
-# # results = execute_parallel_optimized(callables_with_args)
-
-# # # Print the results
-# # print("Results:", results)
diff --git a/swarms/utils/file_processing.py b/swarms/utils/file_processing.py
index e14918fd..30e5dbf6 100644
--- a/swarms/utils/file_processing.py
+++ b/swarms/utils/file_processing.py
@@ -5,6 +5,28 @@ from typing import Any
import re
import shutil
import tempfile
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="file_processing")
+
+
+def check_if_folder_exists(folder_name: str) -> bool:
+ """
+ Check if a folder exists at the specified path.
+
+ Args:
+ folder_name (str): The path to the folder to check.
+
+ Returns:
+ bool: True if the folder exists, False otherwise.
+ """
+ try:
+ return os.path.exists(folder_name) and os.path.isdir(
+ folder_name
+ )
+ except Exception as e:
+ logger.error(f"Failed to check if folder exists: {e}")
+ return False
def zip_workspace(workspace_path: str, output_filename: str):
@@ -12,25 +34,33 @@ def zip_workspace(workspace_path: str, output_filename: str):
Zips the specified workspace directory and returns the path to the zipped file.
Ensure the output_filename does not have .zip extension as it's added by make_archive.
"""
- temp_dir = tempfile.mkdtemp()
- # Remove .zip if present in output_filename to avoid duplication
- base_output_path = os.path.join(
- temp_dir, output_filename.replace(".zip", "")
- )
- zip_path = shutil.make_archive(
- base_output_path, "zip", workspace_path
- )
- return zip_path # make_archive already appends .zip
+ try:
+ temp_dir = tempfile.mkdtemp()
+ # Remove .zip if present in output_filename to avoid duplication
+ base_output_path = os.path.join(
+ temp_dir, output_filename.replace(".zip", "")
+ )
+ zip_path = shutil.make_archive(
+ base_output_path, "zip", workspace_path
+ )
+ return zip_path # make_archive already appends .zip
+ except Exception as e:
+ logger.error(f"Failed to zip workspace: {e}")
+ return None
def sanitize_file_path(file_path: str):
"""
Cleans and sanitizes the file path to be valid for Windows.
"""
- sanitized_path = file_path.replace("`", "").strip()
- # Replace any invalid characters here with an underscore or remove them
- sanitized_path = re.sub(r'[<>:"/\\|?*]', "_", sanitized_path)
- return sanitized_path
+ try:
+ sanitized_path = file_path.replace("`", "").strip()
+ # Replace any invalid characters here with an underscore or remove them
+ sanitized_path = re.sub(r'[<>:"/\\|?*]', "_", sanitized_path)
+ return sanitized_path
+ except Exception as e:
+ logger.error(f"Failed to sanitize file path: {e}")
+ return None
def load_json(json_string: str):
@@ -43,11 +73,14 @@ def load_json(json_string: str):
Returns:
object: The Python object representing the JSON data.
"""
- json_data = json.loads(json_string)
- return json_data
+ try:
+ json_data = json.loads(json_string)
+ return json_data
+ except json.JSONDecodeError as e:
+ logger.error(f"Failed to decode JSON: {e}")
+ return None
-# Create file that
def create_file(
content: str,
file_path: str,
@@ -59,9 +92,13 @@ def create_file(
content (str): The content to be written to the file.
file_path (str): The path to the file to be created.
"""
- with open(file_path, "w") as file:
- file.write(content)
- return file_path
+ try:
+ with open(file_path, "w") as file:
+ file.write(content)
+ return file_path
+ except Exception as e:
+ logger.error(f"Failed to create file: {e}")
+ return None
def create_file_in_folder(
@@ -78,15 +115,19 @@ def create_file_in_folder(
Returns:
str: The path of the created file.
"""
- if not os.path.exists(folder_path):
- os.makedirs(folder_path)
+ try:
+ if not os.path.exists(folder_path):
+ os.makedirs(folder_path)
- # Create the file in the folder
- file_path = os.path.join(folder_path, file_name)
- with open(file_path, "w") as file:
- file.write(content)
+ # Create the file in the folder
+ file_path = os.path.join(folder_path, file_name)
+ with open(file_path, "w") as file:
+ file.write(content)
- return file_path
+ return file_path
+ except Exception as e:
+ logger.error(f"Failed to create file in folder: {e}")
+ return None
def zip_folders(
@@ -103,16 +144,24 @@ def zip_folders(
Returns:
None
"""
- # Create a temporary directory
- with tempfile.TemporaryDirectory() as temp_dir:
- # Copy both folders into the temporary directory
- shutil.copytree(
- folder1_path,
- os.path.join(temp_dir, os.path.basename(folder1_path)),
- )
- shutil.copytree(
- folder2_path,
- os.path.join(temp_dir, os.path.basename(folder2_path)),
- )
- # Create a zip file that contains the temporary directory
- shutil.make_archive(zip_file_path, "zip", temp_dir)
+ try:
+ # Create a temporary directory
+ with tempfile.TemporaryDirectory() as temp_dir:
+ # Copy both folders into the temporary directory
+ shutil.copytree(
+ folder1_path,
+ os.path.join(
+ temp_dir, os.path.basename(folder1_path)
+ ),
+ )
+ shutil.copytree(
+ folder2_path,
+ os.path.join(
+ temp_dir, os.path.basename(folder2_path)
+ ),
+ )
+ # Create a zip file that contains the temporary directory
+ shutil.make_archive(zip_file_path, "zip", temp_dir)
+ except Exception as e:
+ logger.error(f"Failed to zip folders: {e}")
+ return None
diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py
new file mode 100644
index 00000000..f0d8ead2
--- /dev/null
+++ b/swarms/utils/formatter.py
@@ -0,0 +1,135 @@
+import time
+from typing import Any, Callable, Dict, List
+
+from rich.console import Console
+from rich.live import Live
+from rich.panel import Panel
+from rich.progress import Progress, SpinnerColumn, TextColumn
+from rich.table import Table
+from rich.text import Text
+
+
+class Formatter:
+ """
+ A class for formatting and printing rich text to the console.
+ """
+
+ def __init__(self):
+ """
+ Initializes the Formatter with a Rich Console instance.
+ """
+ self.console = Console()
+
+ def print_panel(
+ self, content: str, title: str = "", style: str = "bold blue"
+ ) -> None:
+ """
+ Prints a rich panel to the console with a random color.
+
+ Args:
+ content (str): The content of the panel.
+ title (str, optional): The title of the panel. Defaults to "".
+ style (str, optional): The style of the panel. Defaults to "bold blue".
+ """
+ import random
+
+ colors = [
+ "red",
+ "green",
+ "blue",
+ "yellow",
+ "magenta",
+ "cyan",
+ "white",
+ ]
+ random_color = random.choice(colors)
+ panel = Panel(
+ content, title=title, style=f"bold {random_color}"
+ )
+ self.console.print(panel)
+
+ def print_table(
+ self, title: str, data: Dict[str, List[str]]
+ ) -> None:
+ """
+ Prints a rich table to the console.
+
+ Args:
+ title (str): The title of the table.
+ data (Dict[str, List[str]]): A dictionary where keys are categories and values are lists of capabilities.
+ """
+ table = Table(show_header=True, header_style="bold magenta")
+ table.add_column("Category", style="cyan")
+ table.add_column("Capabilities", style="green")
+
+ for category, items in data.items():
+ table.add_row(category, "\n".join(items))
+
+ self.console.print(f"\nš„ {title}:", style="bold yellow")
+ self.console.print(table)
+
+ def print_progress(
+ self,
+ description: str,
+ task_fn: Callable,
+ *args: Any,
+ **kwargs: Any,
+ ) -> Any:
+ """
+ Prints a progress bar to the console and executes a task function.
+
+ Args:
+ description (str): The description of the task.
+ task_fn (Callable): The function to execute.
+ *args (Any): Arguments to pass to the task function.
+ **kwargs (Any): Keyword arguments to pass to the task function.
+
+ Returns:
+ Any: The result of the task function.
+ """
+ with Progress(
+ SpinnerColumn(),
+ TextColumn("[progress.description]{task.description}"),
+ ) as progress:
+ task = progress.add_task(description, total=None)
+ result = task_fn(*args, **kwargs)
+ progress.update(task, completed=True)
+ return result
+
+ def print_panel_token_by_token(
+ self,
+ tokens: str,
+ title: str = "Output",
+ style: str = "bold cyan",
+ delay: float = 0.01,
+ by_word: bool = False,
+ ) -> None:
+ """
+ Prints a string in real-time, token by token (character or word) inside a Rich panel.
+
+ Args:
+ tokens (str): The string to display in real-time.
+ title (str): Title of the panel.
+ style (str): Style for the text inside the panel.
+ delay (float): Delay in seconds between displaying each token.
+ by_word (bool): If True, display by words; otherwise, display by characters.
+ """
+ text = Text(style=style)
+
+ # Split tokens into characters or words
+ token_list = tokens.split() if by_word else tokens
+
+ with Live(
+ Panel(text, title=title, border_style=style),
+ console=self.console,
+ refresh_per_second=10,
+ ) as live:
+ for token in token_list:
+ text.append(token + (" " if by_word else ""))
+ live.update(
+ Panel(text, title=title, border_style=style)
+ )
+ time.sleep(delay)
+
+
+formatter = Formatter()
diff --git a/swarms/utils/litellm.py b/swarms/utils/litellm.py
new file mode 100644
index 00000000..5bdd208d
--- /dev/null
+++ b/swarms/utils/litellm.py
@@ -0,0 +1,105 @@
+try:
+ from litellm import completion
+except ImportError:
+ import subprocess
+
+ subprocess.check_call(["pip", "install", "litellm"])
+ import litellm
+ from litellm import completion
+
+ litellm.set_verbose = True
+
+
+class LiteLLM:
+ """
+ This class represents a LiteLLM.
+ It is used to interact with the LLM model for various tasks.
+ """
+
+ def __init__(
+ self,
+ model_name: str = "gpt-4o",
+ system_prompt: str = None,
+ stream: bool = False,
+ temperature: float = 0.5,
+ max_tokens: int = 4000,
+ ):
+ """
+ Initialize the LiteLLM with the given parameters.
+
+ Args:
+ model_name (str, optional): The name of the model to use. Defaults to "gpt-4o".
+ system_prompt (str, optional): The system prompt to use. Defaults to None.
+ stream (bool, optional): Whether to stream the output. Defaults to False.
+ temperature (float, optional): The temperature for the model. Defaults to 0.5.
+ max_tokens (int, optional): The maximum number of tokens to generate. Defaults to 4000.
+ """
+ self.model_name = model_name
+ self.system_prompt = system_prompt
+ self.stream = stream
+ self.temperature = temperature
+ self.max_tokens = max_tokens
+
+ def _prepare_messages(self, task: str) -> list:
+ """
+ Prepare the messages for the given task.
+
+ Args:
+ task (str): The task to prepare messages for.
+
+ Returns:
+ list: A list of messages prepared for the task.
+ """
+ messages = []
+
+ if self.system_prompt: # Check if system_prompt is not None
+ messages.append(
+ {"role": "system", "content": self.system_prompt}
+ )
+
+ messages.append({"role": "user", "content": task})
+
+ return messages
+
+ def run(self, task: str, *args, **kwargs):
+ """
+ Run the LLM model for the given task.
+
+ Args:
+ task (str): The task to run the model for.
+ *args: Additional positional arguments to pass to the model.
+ **kwargs: Additional keyword arguments to pass to the model.
+
+ Returns:
+ str: The content of the response from the model.
+ """
+ messages = self._prepare_messages(task)
+
+ response = completion(
+ model=self.model_name,
+ messages=messages,
+ stream=self.stream,
+ temperature=self.temperature,
+ # max_completion_tokens=self.max_tokens,
+ max_tokens=self.max_tokens,
+ *args,
+ **kwargs,
+ )
+ content = response.choices[
+ 0
+ ].message.content # Accessing the content
+ return content
+
+ def __call__(self, task: str, *args, **kwargs):
+ """
+ Call the LLM model for the given task.
+
+ Args:
+ task (str): The task to run the model for.
+ *args: Additional positional arguments to pass to the model.
+ **kwargs: Additional keyword arguments to pass to the model.
+
+ Returns:
+ str: The content of the response from the model.
+ """
+ return self.run(task, *args, **kwargs)
diff --git a/swarms/utils/loguru_logger.py b/swarms/utils/loguru_logger.py
index b53ec379..af5c7239 100644
--- a/swarms/utils/loguru_logger.py
+++ b/swarms/utils/loguru_logger.py
@@ -1,23 +1,37 @@
import os
+import uuid
from loguru import logger
-WORKSPACE_DIR = os.getenv("WORKSPACE_DIR")
+def initialize_logger(log_folder: str = "logs"):
-logger.add(
- os.path.join(WORKSPACE_DIR, "swarms.log"),
- level="INFO",
- colorize=True,
- backtrace=True,
- diagnose=True,
-)
+ AGENT_WORKSPACE = "agent_workspace"
+ # Check if WORKSPACE_DIR is set, if not, set it to AGENT_WORKSPACE
+ if "WORKSPACE_DIR" not in os.environ:
+ os.environ["WORKSPACE_DIR"] = AGENT_WORKSPACE
-def loguru_logger(file_path: str = "swarms.log"):
- return logger.add(
- os.path.join(WORKSPACE_DIR, file_path),
+ # Create a folder within the agent_workspace
+ log_folder_path = os.path.join(
+ os.getenv("WORKSPACE_DIR"), log_folder
+ )
+ if not os.path.exists(log_folder_path):
+ os.makedirs(log_folder_path)
+
+ # Generate a unique identifier for the log file
+ uuid_for_log = str(uuid.uuid4())
+ log_file_path = os.path.join(
+ log_folder_path, f"{log_folder}_{uuid_for_log}.log"
+ )
+
+ logger.add(
+ log_file_path,
level="INFO",
colorize=True,
backtrace=True,
diagnose=True,
+ enqueue=True,
+ retention="10 days",
+ # compression="zip",
)
+ return logger
diff --git a/swarms/utils/markdown_message.py b/swarms/utils/markdown_message.py
index a85cb4a1..03a35092 100644
--- a/swarms/utils/markdown_message.py
+++ b/swarms/utils/markdown_message.py
@@ -1,4 +1,4 @@
-from termcolor import colored
+from swarms.utils.formatter import formatter
def display_markdown_message(message: str, color: str = "cyan"):
@@ -12,13 +12,10 @@ def display_markdown_message(message: str, color: str = "cyan"):
if line == "":
print()
elif line == "---":
- print(colored("-" * 50, color))
+ formatter.print_panel("-" * 50)
else:
- print(colored(line, color))
+ formatter.print_panel(line)
if "\n" not in message and message.startswith(">"):
# Aesthetic choice. For these tags, they need a space below them
print()
-
-
-# display_markdown_message("I love you and you are beautiful.", "cyan")
diff --git a/swarms/utils/openai_tts.py b/swarms/utils/openai_tts.py
new file mode 100644
index 00000000..3cfcbd05
--- /dev/null
+++ b/swarms/utils/openai_tts.py
@@ -0,0 +1,73 @@
+import os
+from loguru import logger
+import pygame
+import requests
+import tempfile
+from openai import OpenAI
+
+
+class OpenAITTS:
+ """
+ A class to interact with OpenAI API and play the generated audio with improved streaming capabilities.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.client = OpenAI(
+ api_key=os.getenv("OPENAI_API_KEY"), *args, **kwargs
+ )
+ pygame.init()
+
+ def run(
+ self, task: str, play_sound: bool = True, *args, **kwargs
+ ):
+ """
+ Run a task with the OpenAI API and optionally play the generated audio with improved streaming.
+
+ Args:
+ task (str): The task to be executed.
+ play_sound (bool): If True, play the generated audio.
+
+ Returns:
+ None
+ """
+ try:
+ response = self.client.audio.speech.create(
+ model="tts-1",
+ voice="nova",
+ input=task,
+ *args,
+ **kwargs,
+ )
+ audio_url = response["url"]
+ logger.info("Task completed successfully.")
+
+ if play_sound:
+ with tempfile.NamedTemporaryFile(
+ delete=False, suffix=".mp3"
+ ) as tmp_file:
+ with requests.get(audio_url, stream=True) as r:
+ r.raise_for_status()
+ for chunk in r.iter_content(chunk_size=8192):
+ tmp_file.write(chunk)
+ pygame.mixer.music.load(tmp_file.name)
+ pygame.mixer.music.play()
+ while pygame.mixer.music.get_busy():
+ pygame.time.Clock().tick(10)
+ except Exception as e:
+ logger.error(f"Error during task execution: {str(e)}")
+
+
+# client = OpenAITTS(api_key=os.getenv("OPENAI_API_KEY"))
+# client.run("Hello world! This is a streaming test.", play_sound=True)
+
+
+def text_to_speech(
+ task: str, play_sound: bool = True, *args, **kwargs
+):
+ out = OpenAITTS().run(
+ task, play_sound=play_sound, *args, **kwargs
+ )
+ return out
+
+
+# print(text_to_speech(task="hello"))
diff --git a/swarms/utils/pandas_utils.py b/swarms/utils/pandas_utils.py
index dcf5354e..358c36e6 100644
--- a/swarms/utils/pandas_utils.py
+++ b/swarms/utils/pandas_utils.py
@@ -1,11 +1,13 @@
import subprocess
from typing import Any, Dict, List
-from loguru import logger
+from swarms.utils.loguru_logger import initialize_logger
+
from pydantic import BaseModel
from swarms.structs.agent import Agent
+logger = initialize_logger(log_folder="pandas_utils")
try:
import pandas as pd
diff --git a/swarms/utils/parse_code.py b/swarms/utils/parse_code.py
index 25cd6210..c962c5d8 100644
--- a/swarms/utils/parse_code.py
+++ b/swarms/utils/parse_code.py
@@ -1,34 +1,64 @@
import re
-def extract_code_from_markdown(markdown_content: str) -> str:
+def extract_code_blocks_with_language(markdown_text: str):
"""
- Extracts code blocks from a Markdown string and returns them as a single string.
+ Extracts all code blocks from Markdown text along with their languages.
Args:
- - markdown_content (str): The Markdown content as a string.
+ markdown_text (str): The input Markdown text.
Returns:
- - str: A single string containing all the code blocks separated by newlines.
+ list[dict]: A list of dictionaries, each containing:
+ - 'language': The detected language (or 'plaintext' if none specified).
+ - 'content': The content of the code block.
"""
- # Regular expression for fenced code blocks with optional language specifier
- pattern = r"```(?:\w+\n)?(.*?)```"
+ # Regex pattern to match code blocks and optional language specifiers
+ pattern = r"```(\w+)?\n(.*?)```"
- # Find all matches of the pattern
- matches = re.finditer(pattern, markdown_content, re.DOTALL)
+ # Find all matches (language and content)
+ matches = re.findall(pattern, markdown_text, re.DOTALL)
- # Extract the content inside the backticks
- code_blocks = [match.group(1).strip() for match in matches]
+ # Parse results
+ code_blocks = []
+ for language, content in matches:
+ language = (
+ language.strip() if language else "plaintext"
+ ) # Default to 'plaintext'
+ code_blocks.append(
+ {"language": language, "content": content.strip()}
+ )
- # Concatenate all code blocks separated by newlines
- return "\n".join(code_blocks)
+ return code_blocks
-# example = """
-# hello im an agent
-# ```bash
-# pip install swarms
-# ```
-# """
+def extract_code_from_markdown(
+ markdown_text: str, language: str = None
+):
+ """
+ Extracts content of code blocks for a specific language or all blocks if no language specified.
+
+ Args:
+ markdown_text (str): The input Markdown text.
+ language (str, optional): The language to filter by (e.g., 'yaml', 'python').
+
+ Returns:
+ str: The concatenated content of matched code blocks or an empty string if none found.
+ """
+ # Get all code blocks with detected languages
+ code_blocks = extract_code_blocks_with_language(markdown_text)
+
+ # Filter by language if specified
+ if language:
+ code_blocks = [
+ block["content"]
+ for block in code_blocks
+ if block["language"] == language
+ ]
+ else:
+ code_blocks = [
+ block["content"] for block in code_blocks
+ ] # Include all blocks
-# print(extract_code_from_markdown(example)) # Output: { "type": "function", "function": { "name": "fetch_financial_news", "parameters": { "query": "Nvidia news", "num_articles": 5 } } }
+ # Return concatenated content
+ return "\n\n".join(code_blocks) if code_blocks else ""
diff --git a/swarms/utils/pdf_to_text.py b/swarms/utils/pdf_to_text.py
index 90711691..8df8e065 100644
--- a/swarms/utils/pdf_to_text.py
+++ b/swarms/utils/pdf_to_text.py
@@ -1,14 +1,12 @@
-import sys
from swarms.utils.try_except_wrapper import try_except_wrapper
try:
import pypdf
except ImportError:
- print(
- "pypdf not installed. Please install it using: pip install"
- " pypdf"
- )
- sys.exit(1)
+ import subprocess
+
+ subprocess.check_call(["python", "-m", "pip", "install", "pypdf"])
+ import pypdf
@try_except_wrapper
diff --git a/swarms/utils/profile_func_2.py b/swarms/utils/profile_func_2.py
deleted file mode 100644
index a17c85aa..00000000
--- a/swarms/utils/profile_func_2.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from functools import wraps
-from loguru import logger
-import tracemalloc
-import psutil
-import time
-from typing import Callable, Any
-
-
-def profile_all(func: Callable) -> Callable:
- """
- A decorator to profile memory usage, CPU usage, and I/O operations
- of a function and log the data using loguru.
-
- It combines tracemalloc for memory profiling, psutil for CPU and I/O operations,
- and measures execution time.
-
- Args:
- func (Callable): The function to be profiled.
-
- Returns:
- Callable: The wrapped function with profiling enabled.
- """
-
- @wraps(func)
- def wrapper(*args: Any, **kwargs: Any) -> Any:
- # Start memory tracking
- tracemalloc.start()
-
- # Get initial CPU stats
- process = psutil.Process()
- initial_cpu_times = process.cpu_times()
-
- # Get initial I/O stats if available
- try:
- initial_io_counters = process.io_counters()
- io_tracking_available = True
- except AttributeError:
- logger.warning(
- "I/O counters not available on this platform."
- )
- io_tracking_available = False
-
- # Start timing the function execution
- start_time = time.time()
-
- # Execute the function
- result = func(*args, **kwargs)
-
- # Stop timing
- end_time = time.time()
- execution_time = end_time - start_time
-
- # Get final CPU stats
- final_cpu_times = process.cpu_times()
-
- # Get final I/O stats if available
- if io_tracking_available:
- final_io_counters = process.io_counters()
- io_read_count = (
- final_io_counters.read_count
- - initial_io_counters.read_count
- )
- io_write_count = (
- final_io_counters.write_count
- - initial_io_counters.write_count
- )
- else:
- io_read_count = io_write_count = 0
-
- # Get memory usage statistics
- snapshot = tracemalloc.take_snapshot()
- top_stats = snapshot.statistics("lineno")
-
- # Calculate CPU usage
- cpu_usage = (
- final_cpu_times.user
- - initial_cpu_times.user
- + final_cpu_times.system
- - initial_cpu_times.system
- )
-
- # Log the data
- logger.info(f"Execution time: {execution_time:.4f} seconds")
- logger.info(f"CPU usage: {cpu_usage:.2f} seconds")
- if io_tracking_available:
- logger.info(
- f"I/O Operations - Read: {io_read_count}, Write: {io_write_count}"
- )
- logger.info("Top memory usage:")
- for stat in top_stats[:10]:
- logger.info(stat)
-
- # Stop memory tracking
- tracemalloc.stop()
-
- return result
-
- return wrapper
diff --git a/swarms/utils/remove_json_whitespace.py b/swarms/utils/remove_json_whitespace.py
deleted file mode 100644
index 0a043e7c..00000000
--- a/swarms/utils/remove_json_whitespace.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import json
-
-import yaml
-
-
-def remove_whitespace_from_json(json_string: str) -> str:
- """
- Removes unnecessary whitespace from a JSON string.
-
- This function parses the JSON string into a Python object and then
- serializes it back into a JSON string without unnecessary whitespace.
-
- Args:
- json_string (str): The JSON string.
-
- Returns:
- str: The JSON string with whitespace removed.
- """
- parsed = json.loads(json_string)
- return json.dumps(parsed, separators=(",", ":"))
-
-
-# # Example usage for JSON
-# json_string = '{"field1": 123, "field2": "example text"}'
-# print(remove_whitespace_from_json(json_string))
-
-
-def remove_whitespace_from_yaml(yaml_string: str) -> str:
- """
- Removes unnecessary whitespace from a YAML string.
-
- This function parses the YAML string into a Python object and then
- serializes it back into a YAML string with minimized whitespace.
- Note: This might change the representation style of YAML data.
-
- Args:
- yaml_string (str): The YAML string.
-
- Returns:
- str: The YAML string with whitespace reduced.
- """
- parsed = yaml.safe_load(yaml_string)
- return yaml.dump(parsed, default_flow_style=True)
-
-
-# # Example usage for YAML
-# yaml_string = """
-# field1: 123
-# field2: example text
-# """
-# print(remove_whitespace_from_yaml(yaml_string))
diff --git a/swarms/utils/report_error_loguru.py b/swarms/utils/report_error_loguru.py
deleted file mode 100644
index 39ec8b5f..00000000
--- a/swarms/utils/report_error_loguru.py
+++ /dev/null
@@ -1,108 +0,0 @@
-import datetime
-import os
-import platform
-import traceback
-
-from loguru import logger
-
-# Remove default logger configuration
-logger.remove()
-
-# Define the path for the log folder
-log_folder = os.path.join(os.getcwd(), "errors")
-
-try:
- # Create the log folder if it doesn't exist
- os.makedirs(log_folder, exist_ok=True)
-except PermissionError:
- logger.error(f"Permission denied: '{log_folder}'")
-except Exception as e:
- logger.error(
- f"An error occurred while creating the log folder: {e}"
- )
-else:
- # If the folder was created successfully, add a new logger
- logger.add(
- os.path.join(log_folder, "error_{time}.log"),
- level="ERROR",
- format="{time} - {level} - {message}",
- )
-
-
-def report_error(error: Exception):
- """
- Logs an error message and provides instructions for reporting the issue on Swarms GitHub
- or joining the community on Discord for real-time support.
-
- Args:
- error (Exception): The exception that occurred.
-
- Returns:
- None
-
- Raises:
- None
- """
- # Gather extensive context information
- context_info = {
- "exception_type": type(error).__name__,
- "exception_message": str(error),
- "stack_trace": traceback.format_exc(),
- "timestamp": datetime.datetime.now().isoformat(),
- "python_version": platform.python_version(),
- "platform": platform.platform(),
- "machine": platform.machine(),
- "processor": platform.processor(),
- "user": os.getenv("USER") or os.getenv("USERNAME"),
- "current_working_directory": os.getcwd(),
- }
-
- error_message = (
- f"\n"
- f"------------------Error: {error}-----------------------\n"
- f"#########################################\n"
- f"# #\n"
- f"# ERROR DETECTED! #\n"
- f"# #\n"
- f"# #\n"
- f"# #\n"
- f"# #\n"
- f"#########################################\n"
- f"\n"
- f"Error Message: {context_info['exception_message']} ({context_info['exception_type']})\n"
- f"\n"
- f"Stack Trace:\n{context_info['stack_trace']}\n"
- f"\n"
- f"Context Information:\n"
- f"-----------------------------------------\n"
- f"Timestamp: {context_info['timestamp']}\n"
- f"Python Version: {context_info['python_version']}\n"
- f"Platform: {context_info['platform']}\n"
- f"Machine: {context_info['machine']}\n"
- f"Processor: {context_info['processor']}\n"
- f"User: {context_info['user']}\n"
- f"Current Working Directory: {context_info['current_working_directory']}\n"
- f"-----------------------------------------\n"
- f"\n"
- "Support"
- f"\n"
- f"\n"
- f"To report this issue, please visit the Swarms GitHub Issues page:\n"
- f"https://github.com/kyegomez/swarms/issues\n"
- f"\n"
- f"You can also join the Swarms community on Discord for real-time support:\n"
- f"https://discord.com/servers/agora-999382051935506503\n"
- f"\n"
- f"#########################################\n"
- f"-----------------------------------------\n"
- )
-
- return logger.error(error_message)
-
-
-# # Example usage:
-# try:
-# # Simulate an error
-# raise ValueError("An example error")
-# except Exception as e:
-# report_error(e)
diff --git a/swarms/utils/run_on_cpu.py b/swarms/utils/run_on_cpu.py
deleted file mode 100644
index 742792b0..00000000
--- a/swarms/utils/run_on_cpu.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import os
-import psutil
-from typing import Callable, Any
-from loguru import logger
-import functools
-
-
-def run_on_cpu(func: Callable) -> Callable:
- """
- Decorator that ensures the function runs on all available CPU cores,
- maximizing CPU and memory usage to execute the function as quickly as possible.
-
- This decorator sets the CPU affinity of the current process to all available CPU cores
- before executing the function. After the function completes, the original CPU affinity is restored.
-
- Args:
- func (Callable): The function to be executed.
-
- Returns:
- Callable: The wrapped function with CPU affinity settings applied.
-
- Raises:
- RuntimeError: If the CPU affinity cannot be set or restored.
- """
-
- @functools.wraps(func)
- def wrapper(*args: Any, **kwargs: Any) -> Any:
- # Get the current process
- process = psutil.Process(os.getpid())
-
- # Check if the platform supports cpu_affinity
- if not hasattr(process, "cpu_affinity"):
- logger.warning(
- "CPU affinity is not supported on this platform. Executing function without setting CPU affinity."
- )
- return func(*args, **kwargs)
-
- # Save the original CPU affinity
- original_affinity = process.cpu_affinity()
- logger.info(f"Original CPU affinity: {original_affinity}")
-
- try:
- # Set the CPU affinity to all available CPU cores
- all_cpus = list(range(os.cpu_count()))
- process.cpu_affinity(all_cpus)
- logger.info(f"Set CPU affinity to: {all_cpus}")
-
- # Set process priority to high
- try:
- process.nice(psutil.HIGH_PRIORITY_CLASS)
- logger.info("Set process priority to high.")
- except AttributeError:
- logger.warning(
- "Setting process priority is not supported on this platform."
- )
-
- # Pre-allocate memory by creating a large array (optional step)
- memory_size = int(
- psutil.virtual_memory().available * 0.9
- ) # 90% of available memory
- try:
- logger.info(
- f"Pre-allocating memory: {memory_size} bytes"
- )
- _ = bytearray(memory_size)
- except MemoryError:
- logger.error(
- "Failed to pre-allocate memory, continuing without pre-allocation."
- )
-
- # Run the function
- result = func(*args, **kwargs)
-
- except psutil.AccessDenied as e:
- logger.error(
- "Access denied while setting CPU affinity",
- exc_info=True,
- )
- raise RuntimeError(
- "Access denied while setting CPU affinity"
- ) from e
-
- except psutil.NoSuchProcess as e:
- logger.error("Process does not exist", exc_info=True)
- raise RuntimeError("Process does not exist") from e
-
- except Exception as e:
- logger.error(
- "An error occurred during function execution",
- exc_info=True,
- )
- raise RuntimeError(
- "An error occurred during function execution"
- ) from e
-
- finally:
- # Restore the original CPU affinity
- try:
- process.cpu_affinity(original_affinity)
- logger.info(
- f"Restored original CPU affinity: {original_affinity}"
- )
- except Exception as e:
- logger.error(
- "Failed to restore CPU affinity", exc_info=True
- )
- raise RuntimeError(
- "Failed to restore CPU affinity"
- ) from e
-
- return result
-
- return wrapper
-
-
-# # Example usage of the decorator
-# @run_on_cpu
-# def compute_heavy_task() -> None:
-# # An example task that is CPU and memory intensive
-# data = [i**2 for i in range(100000000)]
-# sum(data)
-# print("Task completed.")
-
-
-# compute_heavy_task()
diff --git a/swarms/utils/successful_run.py b/swarms/utils/successful_run.py
deleted file mode 100644
index 672145c4..00000000
--- a/swarms/utils/successful_run.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from loguru import logger
-import sys
-import platform
-import os
-import datetime
-
-# Configuring loguru to log to both the console and a file
-logger.remove() # Remove default logger configuration
-logger.add(
- sys.stderr,
- level="INFO",
- format="{time} - {level} - {message}",
-)
-
-logger.add(
- "info.log", level="INFO", format="{time} - {level} - {message}"
-)
-
-
-def log_success_message() -> None:
- """
- Logs a success message with instructions for sharing agents on the Swarms Agent Explorer and joining the community for assistance.
-
- Returns:
- None
-
- Raises:
- None
- """
- # Gather extensive context information
- context_info = {
- "timestamp": datetime.datetime.now().isoformat(),
- "python_version": platform.python_version(),
- "platform": platform.platform(),
- "machine": platform.machine(),
- "processor": platform.processor(),
- "user": os.getenv("USER") or os.getenv("USERNAME"),
- "current_working_directory": os.getcwd(),
- }
-
- success_message = (
- f"\n"
- f"#########################################\n"
- f"# #\n"
- f"# SUCCESSFUL RUN DETECTED! #\n"
- f"# #\n"
- f"#########################################\n"
- f"\n"
- f"Your task completed successfully!\n"
- f"\n"
- f"Context Information:\n"
- f"-----------------------------------------\n"
- f"Timestamp: {context_info['timestamp']}\n"
- f"Python Version: {context_info['python_version']}\n"
- f"Platform: {context_info['platform']}\n"
- f"Machine: {context_info['machine']}\n"
- f"Processor: {context_info['processor']}\n"
- f"User: {context_info['user']}\n"
- f"Current Working Directory: {context_info['current_working_directory']}\n"
- f"-----------------------------------------\n"
- f"\n"
- f"Share your agents on the Swarms Agent Explorer with friends:\n"
- f"https://swarms.world/platform/explorer\n"
- f"\n"
- f"Join the Swarms community if you want assistance or help debugging:\n"
- f"https://discord.gg/uzu63HQx\n"
- f"\n"
- f"#########################################\n"
- )
-
- logger.info(success_message)
-
-
-# Example usage:
-# log_success_message()
diff --git a/swarms/utils/swarm_reliability_checks.py b/swarms/utils/swarm_reliability_checks.py
new file mode 100644
index 00000000..4af895d1
--- /dev/null
+++ b/swarms/utils/swarm_reliability_checks.py
@@ -0,0 +1,81 @@
+from typing import Callable, List, Optional, Union
+
+from swarms.structs.agent import Agent
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="swarm_reliability_checks")
+
+
+def reliability_check(
+ agents: List[Union[Agent, Callable]],
+ max_loops: int,
+ name: Optional[str] = None,
+ description: Optional[str] = None,
+ flow: Optional[str] = None,
+) -> None:
+ """
+ Performs reliability checks on swarm configuration parameters.
+
+ Args:
+ agents: List of Agent objects or callables that will be executed
+ max_loops: Maximum number of execution loops
+ name: Name identifier for the swarm
+ description: Description of the swarm's purpose
+
+ Raises:
+ ValueError: If any parameters fail validation checks
+ TypeError: If parameters are of incorrect type
+ """
+ logger.info("Initializing swarm reliability checks")
+
+ # Type checking
+ if not isinstance(agents, list):
+ raise TypeError("agents parameter must be a list")
+
+ if not isinstance(max_loops, int):
+ raise TypeError("max_loops must be an integer")
+
+ # Validate agents
+ if not agents:
+ raise ValueError("Agents list cannot be empty")
+
+ for i, agent in enumerate(agents):
+ if not isinstance(agent, (Agent, Callable)):
+ raise TypeError(
+ f"Agent at index {i} must be an Agent instance or Callable"
+ )
+
+ # Validate max_loops
+ if max_loops <= 0:
+ raise ValueError("max_loops must be greater than 0")
+
+ if max_loops > 1000:
+ logger.warning(
+ "Large max_loops value detected. This may impact performance."
+ )
+
+ # Validate name
+ if name is None:
+ raise ValueError("name parameter is required")
+ if not isinstance(name, str):
+ raise TypeError("name must be a string")
+ if len(name.strip()) == 0:
+ raise ValueError("name cannot be empty or just whitespace")
+
+ # Validate description
+ if description is None:
+ raise ValueError("description parameter is required")
+ if not isinstance(description, str):
+ raise TypeError("description must be a string")
+ if len(description.strip()) == 0:
+ raise ValueError(
+ "description cannot be empty or just whitespace"
+ )
+
+ # Validate flow
+ if flow is None:
+ raise ValueError("flow parameter is required")
+ if not isinstance(flow, str):
+ raise TypeError("flow must be a string")
+
+ logger.info("All reliability checks passed successfully")
diff --git a/swarms/utils/try_except_wrapper.py b/swarms/utils/try_except_wrapper.py
index 50fdd877..faa63534 100644
--- a/swarms/utils/try_except_wrapper.py
+++ b/swarms/utils/try_except_wrapper.py
@@ -2,8 +2,9 @@ from functools import wraps
from time import time
from typing import Any, Callable
-from swarms.utils.loguru_logger import logger
-from swarms.utils.report_error_loguru import report_error
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger("try_except_wrapper")
def retry(
@@ -114,12 +115,12 @@ def try_except_wrapper(verbose: bool = False):
return result
except Exception as error:
if verbose:
- report_error(
+ logger.error(
f"An error occurred in function {func.__name__}:"
f" {error}"
)
else:
- report_error(
+ logger.error(
f"An error occurred in function {func.__name__}:"
f" {error}"
)
diff --git a/swarms/utils/update_agent_system_prompts.py b/swarms/utils/update_agent_system_prompts.py
new file mode 100644
index 00000000..e6f82426
--- /dev/null
+++ b/swarms/utils/update_agent_system_prompts.py
@@ -0,0 +1,53 @@
+import concurrent.futures
+from typing import List, Union
+from swarms.structs.agent import Agent
+
+
+def update_system_prompts(
+ agents: List[Union[Agent, str]],
+ prompt: str,
+) -> List[Agent]:
+ """
+ Update system prompts for a list of agents concurrently.
+
+ Args:
+ agents: List of Agent objects or strings to update
+ prompt: The prompt text to append to each agent's system prompt
+
+ Returns:
+ List of updated Agent objects
+ """
+ if not agents:
+ return agents
+
+ def update_agent_prompt(agent: Union[Agent, str]) -> Agent:
+ # Convert string to Agent if needed
+ if isinstance(agent, str):
+ agent = Agent(
+ agent_name=agent,
+ system_prompt=prompt, # Initialize with the provided prompt
+ )
+ else:
+ # Preserve existing prompt and append new one
+ existing_prompt = (
+ agent.system_prompt if agent.system_prompt else ""
+ )
+ agent.system_prompt = existing_prompt + "\n" + prompt
+ return agent
+
+ # Use ThreadPoolExecutor for concurrent execution
+ max_workers = min(len(agents), 4) # Reasonable thread count
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=max_workers
+ ) as executor:
+ futures = []
+ for agent in agents:
+ future = executor.submit(update_agent_prompt, agent)
+ futures.append(future)
+
+ # Collect results as they complete
+ updated_agents = []
+ for future in concurrent.futures.as_completed(futures):
+ updated_agents.append(future.result())
+
+ return updated_agents
diff --git a/swarms/utils/wrapper_clusterop.py b/swarms/utils/wrapper_clusterop.py
new file mode 100644
index 00000000..646383c6
--- /dev/null
+++ b/swarms/utils/wrapper_clusterop.py
@@ -0,0 +1,106 @@
+from typing import Any
+
+
+from clusterops import (
+ execute_on_gpu,
+ execute_on_multiple_gpus,
+ list_available_gpus,
+ execute_with_all_cpu_cores,
+ execute_on_cpu,
+)
+from swarms.utils.loguru_logger import initialize_logger
+
+logger = initialize_logger(log_folder="clusterops_wrapper")
+
+
+def exec_callable_with_clusterops(
+ device: str = "cpu",
+ device_id: int = 1,
+ all_cores: bool = True,
+ all_gpus: bool = False,
+ func: callable = None,
+ enable_logging: bool = True,
+ *args,
+ **kwargs,
+) -> Any:
+ """
+ Executes a given function on a specified device, either CPU or GPU.
+
+ This method attempts to execute a given function on a specified device, either CPU or GPU. It logs the device selection and the number of cores or GPU ID used. If the device is set to CPU, it can use all available cores or a specific core specified by `device_id`. If the device is set to GPU, it uses the GPU specified by `device_id`.
+
+ Args:
+ device (str, optional): The device to use for execution. Defaults to "cpu".
+ device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
+ all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True.
+ all_gpus (bool, optional): If True, uses all available GPUs. Defaults to False.
+ func (callable): The function to execute.
+ enable_logging (bool, optional): If True, enables logging. Defaults to True.
+ *args: Additional positional arguments to be passed to the execution method.
+ **kwargs: Additional keyword arguments to be passed to the execution method.
+
+ Returns:
+ Any: The result of the execution.
+
+ Raises:
+ ValueError: If an invalid device is specified.
+ Exception: If any other error occurs during execution.
+ """
+ if func is None:
+ raise ValueError("A callable function must be provided")
+
+ try:
+ if enable_logging:
+ logger.info(f"Attempting to run on device: {device}")
+ device = device.lower()
+
+ if device == "cpu":
+ if enable_logging:
+ logger.info("Device set to CPU")
+
+ if all_cores:
+ if enable_logging:
+ logger.info("Using all CPU cores")
+ return execute_with_all_cpu_cores(
+ func, *args, **kwargs
+ )
+
+ if device_id is not None:
+ if enable_logging:
+ logger.info(
+ f"Using specific CPU core: {device_id}"
+ )
+ return execute_on_cpu(
+ device_id, func, *args, **kwargs
+ )
+
+ elif device == "gpu":
+ if enable_logging:
+ logger.info("Device set to GPU")
+
+ if all_gpus:
+ if enable_logging:
+ logger.info("Using all available GPUs")
+ gpus = [int(gpu) for gpu in list_available_gpus()]
+ return execute_on_multiple_gpus(
+ gpus, func, *args, **kwargs
+ )
+
+ if enable_logging:
+ logger.info(f"Using GPU device ID: {device_id}")
+ return execute_on_gpu(device_id, func, *args, **kwargs)
+
+ else:
+ raise ValueError(
+ f"Invalid device specified: {device}. Supported devices are 'cpu' and 'gpu'."
+ )
+
+ except ValueError as e:
+ if enable_logging:
+ logger.error(
+ f"Invalid device or configuration specified: {e}"
+ )
+ raise
+ except Exception as e:
+ if enable_logging:
+ logger.error(f"An error occurred during execution: {e}")
+ raise
diff --git a/tests/structs/test_message_pool.py b/tests/structs/test_message_pool.py
deleted file mode 100644
index cd0607cf..00000000
--- a/tests/structs/test_message_pool.py
+++ /dev/null
@@ -1,117 +0,0 @@
-from swarm_models import OpenAIChat
-from swarms.structs.agent import Agent
-from swarms.structs.message_pool import MessagePool
-
-
-def test_message_pool_initialization():
- agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
- agent2 = Agent(llm=OpenAIChat(), agent_name="agent1")
- moderator = Agent(llm=OpenAIChat(), agent_name="agent1")
- agents = [agent1, agent2]
- message_pool = MessagePool(
- agents=agents, moderator=moderator, turns=5
- )
-
- assert message_pool.agent == agents
- assert message_pool.moderator == moderator
- assert message_pool.turns == 5
- assert message_pool.messages == []
-
-
-def test_message_pool_add():
- agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
- message_pool = MessagePool(
- agents=[agent1], moderator=agent1, turns=5
- )
- message_pool.add(agent=agent1, content="Hello, world!", turn=1)
-
- assert message_pool.messages == [
- {
- "agent": agent1,
- "content": "Hello, world!",
- "turn": 1,
- "visible_to": "all",
- "logged": True,
- }
- ]
-
-
-def test_message_pool_reset():
- agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
- message_pool = MessagePool(
- agents=[agent1], moderator=agent1, turns=5
- )
- message_pool.add(agent=agent1, content="Hello, world!", turn=1)
- message_pool.reset()
-
- assert message_pool.messages == []
-
-
-def test_message_pool_last_turn():
- agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
- message_pool = MessagePool(
- agents=[agent1], moderator=agent1, turns=5
- )
- message_pool.add(agent=agent1, content="Hello, world!", turn=1)
-
- assert message_pool.last_turn() == 1
-
-
-def test_message_pool_last_message():
- agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
- message_pool = MessagePool(
- agents=[agent1], moderator=agent1, turns=5
- )
- message_pool.add(agent=agent1, content="Hello, world!", turn=1)
-
- assert message_pool.last_message == {
- "agent": agent1,
- "content": "Hello, world!",
- "turn": 1,
- "visible_to": "all",
- "logged": True,
- }
-
-
-def test_message_pool_get_all_messages():
- agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
- message_pool = MessagePool(
- agents=[agent1], moderator=agent1, turns=5
- )
- message_pool.add(agent=agent1, content="Hello, world!", turn=1)
-
- assert message_pool.get_all_messages() == [
- {
- "agent": agent1,
- "content": "Hello, world!",
- "turn": 1,
- "visible_to": "all",
- "logged": True,
- }
- ]
-
-
-def test_message_pool_get_visible_messages():
- agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
- agent2 = Agent(agent_name="agent2")
- message_pool = MessagePool(
- agents=[agent1, agent2], moderator=agent1, turns=5
- )
- message_pool.add(
- agent=agent1,
- content="Hello, agent2!",
- turn=1,
- visible_to=[agent2.agent_name],
- )
-
- assert message_pool.get_visible_messages(
- agent=agent2, turn=2
- ) == [
- {
- "agent": agent1,
- "content": "Hello, agent2!",
- "turn": 1,
- "visible_to": [agent2.agent_name],
- "logged": True,
- }
- ]
diff --git a/tool_builder.py b/tool_builder.py
deleted file mode 100644
index c4219060..00000000
--- a/tool_builder.py
+++ /dev/null
@@ -1,170 +0,0 @@
-import os
-from pydantic import BaseModel, Field
-from swarm_models import OpenAIFunctionCaller
-from dotenv import load_dotenv
-from typing import Any
-from swarms.utils.loguru_logger import logger
-from swarms.tools.prebuilt.code_executor import CodeExecutor
-
-load_dotenv()
-
-
-class Tool(BaseModel):
- id: str = Field(
- description="A unique identifier for the task. This should be a short, descriptive name that captures the main purpose of the task. Use - to separate words and make it lowercase."
- )
- plan: str = Field(
- description="The comprehensive plan detailing how the task will accomplish the given task. This should include the high-level strategy, key milestones, and expected outcomes. The plan should clearly articulate what the overall goal is, what success looks like, and how progress will be measured throughout execution."
- )
- failures_prediction: str = Field(
- description="A thorough analysis of potential failure modes and mitigation strategies. This should identify technical risks, edge cases, error conditions, and possible points of failure in the task. For each identified risk, include specific preventive measures, fallback approaches, and recovery procedures to ensure robustness and reliability."
- )
- rationale: str = Field(
- description="The detailed reasoning and justification for why this specific task design is optimal for the given task. This should explain the key architectural decisions, tradeoffs considered, alternatives evaluated, and why this approach best satisfies the requirements. Include both technical and business factors that influenced the design."
- )
- code: str = Field(
- description="Generate the code for the task. This should be a python function that takes in a task and returns a result. The code should be a complete and working implementation of the task. Include all necessary imports and dependencies and add types, docstrings, and comments to the code. Make sure the main code executes successfully. No placeholders or comments. Make sure the main function executes successfully."
- )
-
-
-def setup_model(base_model: BaseModel = Tool):
- model = OpenAIFunctionCaller(
- system_prompt="""You are an expert Python developer specializing in building reliable API integrations and developer tools. Your role is to generate production-ready code that follows best practices for API interactions and tool development.
-
- When given a task, you will:
- 1. Design robust error handling and retry mechanisms for API calls
- 2. Implement proper authentication and security measures
- 3. Structure code for maintainability and reusability
- 4. Add comprehensive logging and monitoring
- 5. Include detailed type hints and documentation
- 6. Write unit tests to verify functionality
-
- Your code should follow these principles:
- - Use modern Python features and idioms
- - Handle rate limits and API quotas gracefully
- - Validate inputs and outputs thoroughly
- - Follow security best practices for API keys and secrets
- - Include clear error messages and debugging info
- - Be well-documented with docstrings and comments
- - Use appropriate design patterns
- - Follow PEP 8 style guidelines
-
- The generated code should be complete, tested, and ready for production use. Include all necessary imports, error handling, and helper functions.
- """,
- base_model=base_model,
- openai_api_key=os.getenv("OPENAI_API_KEY"),
- temperature=0.5,
- )
- return model
-
-
-def generate_tool(task: str) -> Any:
- model = setup_model()
- response = model.run(task)
- logger.info(f"Response: {response}")
-
- # If response is a dict, get code directly
- if isinstance(response, dict):
- # return response.get("code", "")
- code = response.get("code", "")
- logger.info(f"Code: {code}")
- return code
- # If response is a Tool object, access code attribute
- elif isinstance(response, Tool):
- code = response.code
- logger.info(f"Code: {code}")
- return code
- # If response is a string (raw code)
- elif isinstance(response, str):
- code = response
- logger.info(f"Code: {code}")
- return code
- logger.error(f"Unexpected response type: {type(response)}")
- return ""
-
-
-def execute_generated_code(code: str) -> Any:
- """
- Attempts to execute the generated Python code, handling errors and retrying if necessary.
-
- Args:
- code (str): The Python code to be executed.
-
- Returns:
- Any: Output of the code execution, or error details if execution fails.
- """
- logger.info("Starting code execution")
- try:
- exec_namespace = {}
- exec(code, exec_namespace)
-
- # Check for any callable functions in the namespace
- main_function = None
- for item in exec_namespace.values():
- if callable(item) and not item.__name__.startswith("__"):
- main_function = item
- break
-
- if main_function:
- result = main_function()
- logger.info(
- f"Code execution successful. Function result: {result}"
- )
- return result
- elif "result" in exec_namespace:
- logger.info(
- f"Code execution successful. Result variable: {exec_namespace['result']}"
- )
- return exec_namespace["result"]
- else:
- logger.warning(
- "Code execution completed but no result found"
- )
- return "No result or function found in executed code."
- except Exception as e:
- logger.error(
- f"Code execution failed with error: {str(e)}",
- exc_info=True,
- )
- return e
-
-
-def retry_until_success(task: str, max_retries: int = 5):
- """
- Generates and executes code until the execution is successful.
-
- Args:
- task (str): Task description to generate the required code.
- """
- attempts = 0
-
- while attempts < max_retries:
- logger.info(f"Attempt {attempts + 1} of {max_retries}")
- tool = generate_tool(task)
- logger.debug(f"Generated code:\n{tool}")
-
- # result = execute_generated_code(tool)
- result = CodeExecutor().execute(code=tool)
- logger.info(f"Result: {result}")
-
- if isinstance(result, Exception):
- logger.error(
- f"Attempt {attempts + 1} failed: {str(result)}"
- )
- print("Retrying with updated code...")
- attempts += 1
- else:
- logger.info(
- f"Success on attempt {attempts + 1}. Result: {result}"
- )
- print(f"Code executed successfully: {result}")
- break
- else:
- logger.error("Max retries reached. Execution failed.")
- print("Max retries reached. Execution failed.")
-
-
-# Usage
-retry_until_success(
- "Write a function to fetch and display weather information from a given API."
-)