Merge branch 'master' into krishna/openai_assistant

pull/630/head
Kye Gomez 1 month ago committed by GitHub
commit c761ec804e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -22,4 +22,4 @@ jobs:
- run: ruff format .
- run: ruff check --fix .
- uses: autofix-ci/action@dd55f44df8f7cdb7a6bf74c78677eb8acd40cd0a
- uses: autofix-ci/action@ff86a557419858bb967097bfc916833f5647fa8c

@ -0,0 +1,43 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
#
# This workflow file requires a free account on Bearer.com to manage findings, notifications and more.
# See https://docs.bearer.com/guides/bearer-cloud/
name: Bearer
on:
push:
branches: ["master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: ["master"]
schedule:
- cron: '24 22 * * 6'
permissions:
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
jobs:
bearer:
runs-on: ubuntu-latest
steps:
# Checkout project source
- uses: actions/checkout@v4
# Scan code using Bearer CLI
- name: Run Report
id: report
uses: bearer/bearer-action@828eeb928ce2f4a7ca5ed57fb8b59508cb8c79bc
with:
api-key: ${{ secrets.BEARER_TOKEN }}
format: sarif
output: results.sarif
exit-code: 0
# Upload SARIF file generated in previous step
- name: Upload SARIF file
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: results.sarif

@ -0,0 +1,39 @@
# Dependency Review Action
#
# This Action will scan dependency manifest files that change as part of a Pull Request,
# surfacing known-vulnerable versions of the packages declared or updated in the PR.
# Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable
# packages will be blocked from merging.
#
# Source repository: https://github.com/actions/dependency-review-action
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
name: 'Dependency review'
on:
pull_request:
branches: [ "master" ]
# If using a dependency submission action in this workflow this permission will need to be set to:
#
# permissions:
# contents: write
#
# https://docs.github.com/en/enterprise-cloud@latest/code-security/supply-chain-security/understanding-your-software-supply-chain/using-the-dependency-submission-api
permissions:
contents: read
# Write permissions for pull-requests are required for using the `comment-summary-in-pr` option, comment out if you aren't using this option
pull-requests: write
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4
# Commonly enabled options, see https://github.com/actions/dependency-review-action#configuration-options for all available options.
with:
comment-summary-in-pr: always
# fail-on-severity: moderate
# deny-licenses: GPL-1.0-or-later, LGPL-2.0-or-later
# retry-on-snapshot-warnings: true

@ -0,0 +1,18 @@
name: Docker Image CI
on:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build the Docker image
run: docker build . --file Dockerfile --tag my-image-name:$(date +%s)

@ -0,0 +1,46 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# This workflow integrates Pyre with GitHub's
# Code Scanning feature.
#
# Pyre is a performant type checker for Python compliant with
# PEP 484. Pyre can analyze codebases with millions of lines
# of code incrementally providing instantaneous feedback
# to developers as they write code.
#
# See https://pyre-check.org
name: Pyre
on:
workflow_dispatch:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
permissions:
contents: read
jobs:
pyre:
permissions:
actions: read
contents: read
security-events: write
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Run Pyre
uses: facebook/pyre-action@60697a7858f7cc8470d8cc494a3cf2ad6b06560d
with:
# To customize these inputs:
# See https://github.com/facebook/pyre-action#inputs
repo-directory: './'
requirements-path: 'requirements.txt'

@ -0,0 +1,50 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# This workflow integrates Python Static Analyzer (Pysa) with
# GitHub's Code Scanning feature.
#
# Python Static Analyzer (Pysa) is a security-focused static
# analysis tool that tracks flows of data from where they
# originate to where they terminate in a dangerous location.
#
# See https://pyre-check.org/docs/pysa-basics/
name: Pysa
on:
workflow_dispatch:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
schedule:
- cron: '43 5 * * 3'
permissions:
contents: read
jobs:
pysa:
permissions:
actions: read
contents: read
security-events: write
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Run Pysa
uses: facebook/pysa-action@f46a63777e59268613bd6e2ff4e29f144ca9e88b
with:
# To customize these inputs:
# See https://github.com/facebook/pysa-action#inputs
repo-directory: './'
requirements-path: 'requirements.txt'
infer-types: true
include-default-sapp-filters: true

@ -0,0 +1,34 @@
name: Python Package using Conda
on: [push]
jobs:
build-linux:
runs-on: ubuntu-latest
strategy:
max-parallel: 5
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v3
with:
python-version: '3.10'
- name: Add conda to system path
run: |
# $CONDA is an environment variable pointing to the root of the miniconda directory
echo $CONDA/bin >> $GITHUB_PATH
- name: Install dependencies
run: |
conda env update --file environment.yml --name base
- name: Lint with flake8
run: |
conda install flake8
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
conda install pytest
pytest

@ -1,6 +1,8 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
# [ ] TODO [pep 458](https://blog.pypi.org/posts/2024-11-14-pypi-now-supports-digital-attestations/)
name: Python package
on:
@ -16,7 +18,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
python-version: ["3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4

@ -0,0 +1,49 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# This workflow file requires a free account on Semgrep.dev to
# manage rules, file ignores, notifications, and more.
#
# See https://semgrep.dev/docs
name: Semgrep
on:
push:
branches: [ "master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "master" ]
schedule:
- cron: '19 7 * * 3'
permissions:
contents: read
jobs:
semgrep:
permissions:
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
name: Scan
runs-on: ubuntu-latest
steps:
# Checkout project source
- uses: actions/checkout@v4
# Scan code using project's configuration on https://semgrep.dev/manage
- uses: returntocorp/semgrep-action@fcd5ab7459e8d91cb1777481980d1b18b4fc6735
with:
publishToken: ${{ secrets.SEMGREP_APP_TOKEN }}
publishDeployment: ${{ secrets.SEMGREP_DEPLOYMENT_ID }}
generateSarif: "1"
# Upload SARIF file generated in previous step
- name: Upload SARIF file
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: semgrep.sarif
if: always()

@ -0,0 +1,48 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
name: trivy
on:
push:
branches: [ "master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "master" ]
schedule:
- cron: '31 0 * * 5'
permissions:
contents: read
jobs:
build:
permissions:
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
name: Build
runs-on: "ubuntu-20.04"
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Build an image from Dockerfile
run: |
docker build -t docker.io/my-organization/my-app:${{ github.sha }} .
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@7b7aa264d83dc58691451798b4d117d53d21edfe
with:
image-ref: 'docker.io/my-organization/my-app:${{ github.sha }}'
format: 'template'
template: '@/contrib/sarif.tpl'
output: 'trivy-results.sarif'
severity: 'CRITICAL,HIGH'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: 'trivy-results.sarif'

4
.gitignore vendored

@ -8,10 +8,12 @@ audio/
video/
artifacts_three
dataframe/
.ruff_cache
.pytest_cache
static/generated
runs
Financial-Analysis-Agent_state.json
experimental
artifacts_five
encryption
errors

@ -37,8 +37,21 @@
[![Share on Reddit](https://img.shields.io/badge/-Share%20on%20Reddit-orange)](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&title=Swarms%20-%20the%20future%20of%20AI) [![Share on Hacker News](https://img.shields.io/badge/-Share%20on%20Hacker%20News-orange)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&t=Swarms%20-%20the%20future%20of%20AI) [![Share on Pinterest](https://img.shields.io/badge/-Share%20on%20Pinterest-red)](https://pinterest.com/pin/create/button/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&media=https%3A%2F%2Fexample.com%2Fimage.jpg&description=Swarms%20-%20the%20future%20of%20AI) [![Share on WhatsApp](https://img.shields.io/badge/-Share%20on%20WhatsApp-green)](https://api.whatsapp.com/send?text=Check%20out%20Swarms%20-%20the%20future%20of%20AI%20%23swarms%20%23AI%0A%0Ahttps%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms)
## ✨ Features
| Category | Features | Benefits |
|----------|----------|-----------|
| 🏢 Enterprise Architecture | • Production-Ready Infrastructure<br>• High Reliability Systems<br>• Modular Design<br>• Comprehensive Logging | • Reduced downtime<br>• Easier maintenance<br>• Better debugging<br>• Enhanced monitoring |
| 🤖 Agent Orchestration | • Hierarchical Swarms<br>• Parallel Processing<br>• Sequential Workflows<br>• Graph-based Workflows<br>• Dynamic Agent Rearrangement | • Complex task handling<br>• Improved performance<br>• Flexible workflows<br>• Optimized execution |
| 🔄 Integration Capabilities | • Multi-Model Support<br>• Custom Agent Creation<br>• Extensive Tool Library<br>• Multiple Memory Systems | • Provider flexibility<br>• Custom solutions<br>• Extended functionality<br>• Enhanced memory management |
| 📈 Scalability | • Concurrent Processing<br>• Resource Management<br>• Load Balancing<br>• Horizontal Scaling | • Higher throughput<br>• Efficient resource use<br>• Better performance<br>• Easy scaling |
| 🛠️ Developer Tools | • Simple API<br>• Extensive Documentation<br>• Active Community<br>• CLI Tools | • Faster development<br>• Easy learning curve<br>• Community support<br>• Quick deployment |
| 🔐 Security Features | • Error Handling<br>• Rate Limiting<br>• Monitoring Integration<br>• Audit Logging | • Improved reliability<br>• API protection<br>• Better monitoring<br>• Enhanced tracking |
| 📊 Advanced Features | • SpreadsheetSwarm<br>• Group Chat<br>• Agent Registry<br>• Mixture of Agents | • Mass agent management<br>• Collaborative AI<br>• Centralized control<br>• Complex solutions |
| 🔌 Provider Support | • OpenAI<br>• Anthropic<br>• ChromaDB<br>• Custom Providers | • Provider flexibility<br>• Storage options<br>• Custom integration<br>• Vendor independence |
| 💪 Production Features | • Automatic Retries<br>• Async Support<br>• Environment Management<br>• Type Safety | • Better reliability<br>• Improved performance<br>• Easy configuration<br>• Safer code |
| 🎯 Use Case Support | • Task-Specific Agents<br>• Custom Workflows<br>• Industry Solutions<br>• Extensible Framework | • Quick deployment<br>• Flexible solutions<br>• Industry readiness<br>• Easy customization |
Swarms is an enterprise grade and production ready multi-agent collaboration framework that enables you to orchestrate many agents to work collaboratively at scale to automate real-world activities.
----
@ -49,7 +62,7 @@ Swarms is an enterprise grade and production ready multi-agent collaboration fra
- Set an `.env` Variable with your desired workspace dir: `WORKSPACE_DIR="agent_workspace"` or do it in your terminal with `export WORKSPACE_DIR="agent_workspace"`
- Finally, `swarms onboarding` to get you started.
## Onboarding
## Guides and Walkthroughs
Refer to our documentation for production grade implementation details.
@ -68,9 +81,10 @@ Refer to our documentation for production grade implementation details.
## Install 💻
Install the following packages with copy and paste
```bash
$ pip3 install -U swarms
$ pip3 install -U swarms swarm-models swarms-memory
```
@ -100,15 +114,37 @@ Here are some example scripts to get you started. For more comprehensive documen
| Swarms Examples | A collection of simple examples to demonstrate Swarms capabilities. | Basic Usage | [https://github.com/The-Swarm-Corporation/swarms-examples?tab=readme-ov-file](https://github.com/The-Swarm-Corporation/swarms-examples?tab=readme-ov-file) |
| Cookbook | A comprehensive guide with recipes for various use cases and scenarios. | Advanced Usage | [https://github.com/The-Swarm-Corporation/Cookbook](https://github.com/The-Swarm-Corporation/Cookbook) |
---
## `Agent` Class
The `Agent` class is a fundamental component of the Swarms framework, designed to execute tasks autonomously. It fuses llms, tools and long-term memory capabilities to create a full stack agent. The `Agent` class is highly customizable, allowing for fine-grained control over its behavior and interactions.
### `run` Method
The `run` method is the primary entry point for executing tasks with an `Agent` instance. It accepts a task string as the main input task and processes it according to the agent's configuration. And, it can also accept an `img` parameter such as `img="image_filepath.png` to process images if you have a VLM
The `run` method is the primary entry point for executing tasks with an `Agent` instance. It accepts a task string as the main input task and processes it according to the agent's configuration. And, it can also accept an `img` parameter such as `img="image_filepath.png` to process images if you have a VLM attached such as `GPT4VisionAPI`
## Simple Example
```python
from swarms import Agent
agent = Agent(
agent_name="Stock-Analysis-Agent",
model_name="gpt-4o-mini",
max_loops="auto",
interactive=True,
streaming_on=True,
)
agent.run("What is the current market trend for tech stocks?")
```
### Settings and Customization
The `Agent` class offers a range of settings to tailor its behavior to specific needs. Some key settings include:
@ -133,28 +169,15 @@ The `Agent` class offers a range of settings to tailor its behavior to specific
```python
import os
from swarms import Agent
from swarm_models import OpenAIChat
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from dotenv import load_dotenv
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
model_name="gpt-4o-mini",
max_loops=1,
autosave=True,
dashboard=False,
@ -176,11 +199,10 @@ agent.run(
```
-----
### Integrating RAG with Swarms for Enhanced Long-Term Memory
`Agent` equipped with quasi-infinite long term memory using RAG (Relational Agent Graph) for advanced document understanding, analysis, and retrieval capabilities.
**Mermaid Diagram for RAG Integration**
```mermaid
graph TD
@ -192,8 +214,11 @@ graph TD
F --> G[Return Output]
```
**Step 1: Initialize the ChromaDB Client**
```python
from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
import os
from swarms_memory import ChromaDB
@ -204,29 +229,13 @@ chromadb = ChromaDB(
output_dir="finance_agent_rag", # Directory for storing RAG data
# docs_folder="artifacts", # Uncomment and specify the folder containing your documents
)
```
**Step 2: Define the Model**
```python
from swarm_models import Anthropic
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
# Define the Anthropic model for language processing
model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
```
**Step 3: Initialize the Agent with RAG**
```python
from swarms import Agent
# Initialize the agent with RAG capabilities
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
agent_description="Agent creates a comprehensive financial analysis",
llm=model,
model_name="gpt-4o-mini",
max_loops="auto", # Auto-adjusts loops based on task complexity
autosave=True, # Automatically saves agent state
dashboard=False, # Disables dashboard for this example
@ -343,7 +352,6 @@ The following is an example of an agent that intakes a pydantic basemodel and ou
```python
from pydantic import BaseModel, Field
from swarms import Agent
from swarm_models import Anthropic
# Initialize the schema for the person's information
@ -375,7 +383,7 @@ agent = Agent(
),
# Set the tool schema to the JSON string -- this is the key difference
tool_schema=tool_schema,
llm=Anthropic(),
model_name="gpt-4o",
max_loops=3,
autosave=True,
dashboard=False,
@ -454,7 +462,7 @@ from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
from swarms.utils.json_utils import base_model_to_json
from swarms.tools.json_utils import base_model_to_json
# Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
@ -582,8 +590,6 @@ You can now easily plug this custom Griptape agent into the **Swarms Framework**
## Understanding Swarms
### What is a Swarm?
A swarm refers to a group of more than two agents working collaboratively to achieve a common goal. These agents can be software entities, such as llms that interact with each other to perform complex tasks. The concept of a swarm is inspired by natural systems like ant colonies or bird flocks, where simple individual behaviors lead to complex group dynamics and problem-solving capabilities.
### How Swarm Architectures Facilitate Communication
@ -596,9 +602,6 @@ Swarm architectures are designed to establish and manage communication between a
3. **Sequential Communication**: Sequential swarms process tasks in a linear order, where each agent's output becomes the input for the next agent. This ensures that tasks with dependencies are handled in the correct sequence, maintaining the integrity of the workflow.
4. **Mesh Communication**: In mesh swarms, agents are fully connected, allowing any agent to communicate with any other agent. This setup provides high flexibility and redundancy, making it ideal for complex systems requiring dynamic interactions.
5. **Federated Communication**: Federated swarms involve multiple independent swarms that collaborate by sharing information and results. Each swarm operates autonomously but can contribute to a larger task, enabling distributed problem-solving across different nodes.
Swarm architectures leverage these communication patterns to ensure that agents work together efficiently, adapting to the specific requirements of the task at hand. By defining clear communication protocols and interaction models, swarm architectures enable the seamless orchestration of multiple agents, leading to enhanced performance and problem-solving capabilities.
@ -876,14 +879,12 @@ The `run` method returns the final output after all agents have processed the in
from swarms import Agent, AgentRearrange
from swarm_models import Anthropic
# Initialize the director agent
director = Agent(
agent_name="Director",
system_prompt="Directs the tasks for the workers",
llm=Anthropic(),
model_name="claude-2",
max_loops=1,
dashboard=False,
streaming_on=True,
@ -899,7 +900,7 @@ director = Agent(
worker1 = Agent(
agent_name="Worker1",
system_prompt="Generates a transcript for a youtube video on what swarms are",
llm=Anthropic(),
model_name="claude-2",
max_loops=1,
dashboard=False,
streaming_on=True,
@ -914,7 +915,7 @@ worker1 = Agent(
worker2 = Agent(
agent_name="Worker2",
system_prompt="Summarizes the transcript generated by Worker1",
llm=Anthropic(),
model_name="claude-2",
max_loops=1,
dashboard=False,
streaming_on=True,
@ -1068,20 +1069,12 @@ The `run` method returns the final output after all agents have processed the in
```python
import os
from swarm_models import OpenAIChat
from swarms import Agent, MixtureOfAgents
api_key = os.getenv("OPENAI_API_KEY")
# Create individual agents with the OpenAIChat model
model = OpenAIChat(
openai_api_key=api_key, model_name="gpt-4", temperature=0.1
)
# Agent 1: Financial Statement Analyzer
agent1 = Agent(
agent_name="FinancialStatementAnalyzer",
llm=model,
model_name="gpt-4o",
system_prompt="""You are a Financial Statement Analyzer specializing in 10-K SEC reports. Your primary focus is on analyzing the financial statements, including the balance sheet, income statement, and cash flow statement.
Key responsibilities:
@ -1107,7 +1100,7 @@ When analyzing, consider industry standards and compare the company's performanc
# Agent 2: Risk Assessment Specialist
agent2 = Agent(
agent_name="RiskAssessmentSpecialist",
llm=model,
model_name="gpt-4o",
system_prompt="""You are a Risk Assessment Specialist focusing on 10-K SEC reports. Your primary role is to identify, analyze, and evaluate potential risks disclosed in the report.
Key responsibilities:
@ -1134,7 +1127,7 @@ Your analysis should provide a comprehensive overview of the company's risk land
# Agent 3: Business Strategy Evaluator
agent3 = Agent(
agent_name="BusinessStrategyEvaluator",
llm=model,
model_name="gpt-4o",
system_prompt="""You are a Business Strategy Evaluator specializing in analyzing 10-K SEC reports. Your focus is on assessing the company's overall strategy, market position, and future outlook.
Key responsibilities:
@ -1162,7 +1155,7 @@ Your analysis should provide insights into the company's strategic direction, it
# Aggregator Agent
aggregator_agent = Agent(
agent_name="10KReportAggregator",
llm=model,
model_name="gpt-4o",
system_prompt="""You are the 10-K Report Aggregator, responsible for synthesizing and summarizing the analyses provided by the Financial Statement Analyzer, Risk Assessment Specialist, and Business Strategy Evaluator. Your goal is to create a comprehensive, coherent, and insightful summary of the 10-K SEC report.
Key responsibilities:
@ -1188,7 +1181,7 @@ Your final report should be well-structured, easy to understand, and provide a h
# Create the Mixture of Agents class
moa = MixtureOfAgents(
reference_agents=[agent1, agent2, agent3],
agents=[agent1, agent2, agent3],
aggregator_agent=aggregator_agent,
aggregator_system_prompt="""As the 10-K Report Aggregator, your task is to synthesize the analyses provided by the Financial Statement Analyzer, Risk Assessment Specialist, and Business Strategy Evaluator into a comprehensive and coherent report.
@ -1252,9 +1245,8 @@ The `run` method returns a dictionary containing the outputs of each agent that
```python
import os
from swarms import Agent
from swarms import Agent, SpreadSheetSwarm
from swarm_models import OpenAIChat
from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm
# Define custom system prompts for each social media platform
TWITTER_AGENT_SYS_PROMPT = """
@ -1277,20 +1269,12 @@ EMAIL_AGENT_SYS_PROMPT = """
You are an Email marketing expert specializing in real estate. Your task is to write compelling email campaigns to promote properties, focusing on personalization, subject lines, and effective call-to-action strategies to drive conversions.
"""
# Example usage:
api_key = os.getenv("OPENAI_API_KEY")
# Model
model = OpenAIChat(
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize your agents for different social media platforms
agents = [
Agent(
agent_name="Twitter-RealEstate-Agent",
system_prompt=TWITTER_AGENT_SYS_PROMPT,
llm=model,
model_name="gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="twitter_realestate_agent.json",
@ -1300,7 +1284,7 @@ agents = [
Agent(
agent_name="Instagram-RealEstate-Agent",
system_prompt=INSTAGRAM_AGENT_SYS_PROMPT,
llm=model,
model_name="gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="instagram_realestate_agent.json",
@ -1310,7 +1294,7 @@ agents = [
Agent(
agent_name="Facebook-RealEstate-Agent",
system_prompt=FACEBOOK_AGENT_SYS_PROMPT,
llm=model,
model_name="gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="facebook_realestate_agent.json",
@ -1320,7 +1304,7 @@ agents = [
Agent(
agent_name="LinkedIn-RealEstate-Agent",
system_prompt=LINKEDIN_AGENT_SYS_PROMPT,
llm=model,
model_name="gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="linkedin_realestate_agent.json",
@ -1330,7 +1314,7 @@ agents = [
Agent(
agent_name="Email-RealEstate-Agent",
system_prompt=EMAIL_AGENT_SYS_PROMPT,
llm=model,
model_name="gpt-4o",
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="email_realestate_agent.json",
@ -1439,7 +1423,7 @@ The `run` method returns the output from the most relevant agent selected based
```python
from swarms.structs.tree_swarm import TreeAgent, Tree, ForestSwarm
from swarms import TreeAgent, Tree, ForestSwarm
# Create agents with varying system prompts and dynamically generated distances/keywords
agents_tree1 = [

@ -1,44 +0,0 @@
import os
from swarms_memory import ChromaDB
from swarms import Agent
from swarm_models import Anthropic
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
# Initilaize the chromadb client
chromadb = ChromaDB(
metric="cosine",
output_dir="fiance_agent_rag",
# docs_folder="artifacts", # Folder of your documents
)
# Model
model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
agent_description="Agent creates ",
llm=model,
max_loops="auto",
autosave=True,
dashboard=False,
verbose=True,
streaming_on=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=3,
context_length=200000,
long_term_memory=chromadb,
)
agent.run(
"What are the components of a startups stock incentive equity plan"
)

@ -1,117 +0,0 @@
from swarms import Agent
from swarm_models import OpenAIChat
from swarms_memory import ChromaDB
import subprocess
import os
# Making an instance of the ChromaDB class
memory = ChromaDB(
metric="cosine",
n_results=3,
output_dir="results",
docs_folder="docs",
)
# Model
model = OpenAIChat(
api_key=os.getenv("OPENAI_API_KEY"),
model_name="gpt-4o-mini",
temperature=0.1,
)
# Tools in swarms are simple python functions and docstrings
def terminal(
code: str,
):
"""
Run code in the terminal.
Args:
code (str): The code to run in the terminal.
Returns:
str: The output of the code.
"""
out = subprocess.run(
code, shell=True, capture_output=True, text=True
).stdout
return str(out)
def browser(query: str):
"""
Search the query in the browser with the `browser` tool.
Args:
query (str): The query to search in the browser.
Returns:
str: The search results.
"""
import webbrowser
url = f"https://www.google.com/search?q={query}"
webbrowser.open(url)
return f"Searching for {query} in the browser."
def create_file(file_path: str, content: str):
"""
Create a file using the file editor tool.
Args:
file_path (str): The path to the file.
content (str): The content to write to the file.
Returns:
str: The result of the file creation operation.
"""
with open(file_path, "w") as file:
file.write(content)
return f"File {file_path} created successfully."
def file_editor(file_path: str, mode: str, content: str):
"""
Edit a file using the file editor tool.
Args:
file_path (str): The path to the file.
mode (str): The mode to open the file in.
content (str): The content to write to the file.
Returns:
str: The result of the file editing operation.
"""
with open(file_path, mode) as file:
file.write(content)
return f"File {file_path} edited successfully."
# Agent
agent = Agent(
agent_name="Devin",
system_prompt=(
"Autonomous agent that can interact with humans and other"
" agents. Be Helpful and Kind. Use the tools provided to"
" assist the user. Return all code in markdown format."
),
llm=model,
max_loops="auto",
autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
interactive=True,
tools=[terminal, browser, file_editor, create_file],
streaming=True,
long_term_memory=memory,
)
# Run the agent
out = agent(
"Create a CSV file with the latest tax rates for C corporations in the following ten states and the District of Columbia: Alabama, California, Florida, Georgia, Illinois, New York, North Carolina, Ohio, Texas, and Washington."
)
print(out)

@ -0,0 +1,629 @@
import os
from fastapi import (
FastAPI,
HTTPException,
status,
Query,
BackgroundTasks,
)
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from typing import Optional, Dict, Any, List
from loguru import logger
import uvicorn
from datetime import datetime, timedelta
from uuid import UUID, uuid4
from enum import Enum
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor
import traceback
from swarms import Agent
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Configure Loguru
logger.add(
"logs/api_{time}.log",
rotation="500 MB",
retention="10 days",
level="INFO",
format="{time} {level} {message}",
backtrace=True,
diagnose=True,
)
class AgentStatus(str, Enum):
"""Enum for agent status."""
IDLE = "idle"
PROCESSING = "processing"
ERROR = "error"
MAINTENANCE = "maintenance"
class AgentConfig(BaseModel):
"""Configuration model for creating a new agent."""
agent_name: str = Field(..., description="Name of the agent")
model_name: str = Field(
...,
description="Name of the llm you want to use provided by litellm",
)
description: str = Field(
default="", description="Description of the agent's purpose"
)
system_prompt: str = Field(
..., description="System prompt for the agent"
)
model_name: str = Field(
default="gpt-4", description="Model name to use"
)
temperature: float = Field(
default=0.1,
ge=0.0,
le=2.0,
description="Temperature for the model",
)
max_loops: int = Field(
default=1, ge=1, description="Maximum number of loops"
)
autosave: bool = Field(
default=True, description="Enable autosave"
)
dashboard: bool = Field(
default=False, description="Enable dashboard"
)
verbose: bool = Field(
default=True, description="Enable verbose output"
)
dynamic_temperature_enabled: bool = Field(
default=True, description="Enable dynamic temperature"
)
user_name: str = Field(
default="default_user", description="Username for the agent"
)
retry_attempts: int = Field(
default=1, ge=1, description="Number of retry attempts"
)
context_length: int = Field(
default=200000, ge=1000, description="Context length"
)
output_type: str = Field(
default="string", description="Output type (string or json)"
)
streaming_on: bool = Field(
default=False, description="Enable streaming"
)
tags: List[str] = Field(
default_factory=list,
description="Tags for categorizing the agent",
)
class AgentUpdate(BaseModel):
"""Model for updating agent configuration."""
description: Optional[str] = None
system_prompt: Optional[str] = None
temperature: Optional[float] = None
max_loops: Optional[int] = None
tags: Optional[List[str]] = None
status: Optional[AgentStatus] = None
class AgentSummary(BaseModel):
"""Summary model for agent listing."""
agent_id: UUID
agent_name: str
description: str
created_at: datetime
last_used: datetime
total_completions: int
tags: List[str]
status: AgentStatus
class AgentMetrics(BaseModel):
"""Model for agent performance metrics."""
total_completions: int
average_response_time: float
error_rate: float
last_24h_completions: int
total_tokens_used: int
uptime_percentage: float
success_rate: float
peak_tokens_per_minute: int
class CompletionRequest(BaseModel):
"""Model for completion requests."""
prompt: str = Field(..., description="The prompt to process")
agent_id: UUID = Field(..., description="ID of the agent to use")
max_tokens: Optional[int] = Field(
None, description="Maximum tokens to generate"
)
temperature_override: Optional[float] = None
stream: bool = Field(
default=False, description="Enable streaming response"
)
class CompletionResponse(BaseModel):
"""Model for completion responses."""
agent_id: UUID
response: str
metadata: Dict[str, Any]
timestamp: datetime
processing_time: float
token_usage: Dict[str, int]
class AgentStore:
"""Enhanced store for managing agents."""
def __init__(self):
self.agents: Dict[UUID, Agent] = {}
self.agent_metadata: Dict[UUID, Dict[str, Any]] = {}
self.executor = ThreadPoolExecutor(max_workers=4)
self._ensure_directories()
def _ensure_directories(self):
"""Ensure required directories exist."""
Path("logs").mkdir(exist_ok=True)
Path("states").mkdir(exist_ok=True)
async def create_agent(self, config: AgentConfig) -> UUID:
"""Create a new agent with the given configuration."""
try:
agent = Agent(
agent_name=config.agent_name,
system_prompt=config.system_prompt,
model_name=config.model_name,
max_loops=config.max_loops,
autosave=config.autosave,
dashboard=config.dashboard,
verbose=config.verbose,
dynamic_temperature_enabled=config.dynamic_temperature_enabled,
saved_state_path=f"states/{config.agent_name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
user_name=config.user_name,
retry_attempts=config.retry_attempts,
context_length=config.context_length,
return_step_meta=True,
output_type="str",
streaming_on=config.streaming_on,
)
agent_id = uuid4()
self.agents[agent_id] = agent
self.agent_metadata[agent_id] = {
"description": config.description,
"created_at": datetime.utcnow(),
"last_used": datetime.utcnow(),
"total_completions": 0,
"tags": config.tags,
"total_tokens": 0,
"error_count": 0,
"response_times": [],
"status": AgentStatus.IDLE,
"start_time": datetime.utcnow(),
"downtime": timedelta(),
"successful_completions": 0,
}
logger.info(f"Created agent with ID: {agent_id}")
return agent_id
except Exception as e:
logger.error(f"Error creating agent: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Failed to create agent: {str(e)}",
)
async def get_agent(self, agent_id: UUID) -> Agent:
"""Retrieve an agent by ID."""
agent = self.agents.get(agent_id)
if not agent:
logger.error(f"Agent not found: {agent_id}")
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Agent {agent_id} not found",
)
return agent
async def update_agent(
self, agent_id: UUID, update: AgentUpdate
) -> None:
"""Update agent configuration."""
agent = await self.get_agent(agent_id)
metadata = self.agent_metadata[agent_id]
if update.system_prompt:
agent.system_prompt = update.system_prompt
if update.temperature is not None:
agent.llm.temperature = update.temperature
if update.max_loops is not None:
agent.max_loops = update.max_loops
if update.tags is not None:
metadata["tags"] = update.tags
if update.description is not None:
metadata["description"] = update.description
if update.status is not None:
metadata["status"] = update.status
if update.status == AgentStatus.MAINTENANCE:
metadata["downtime"] += (
datetime.utcnow() - metadata["last_used"]
)
logger.info(f"Updated agent {agent_id}")
async def list_agents(
self,
tags: Optional[List[str]] = None,
status: Optional[AgentStatus] = None,
) -> List[AgentSummary]:
"""List all agents, optionally filtered by tags and status."""
summaries = []
for agent_id, agent in self.agents.items():
metadata = self.agent_metadata[agent_id]
# Apply filters
if tags and not any(
tag in metadata["tags"] for tag in tags
):
continue
if status and metadata["status"] != status:
continue
summaries.append(
AgentSummary(
agent_id=agent_id,
agent_name=agent.agent_name,
description=metadata["description"],
created_at=metadata["created_at"],
last_used=metadata["last_used"],
total_completions=metadata["total_completions"],
tags=metadata["tags"],
status=metadata["status"],
)
)
return summaries
async def get_agent_metrics(self, agent_id: UUID) -> AgentMetrics:
"""Get performance metrics for an agent."""
metadata = self.agent_metadata[agent_id]
response_times = metadata["response_times"]
# Calculate metrics
total_time = datetime.utcnow() - metadata["start_time"]
uptime = total_time - metadata["downtime"]
uptime_percentage = (
uptime.total_seconds() / total_time.total_seconds()
) * 100
success_rate = (
metadata["successful_completions"]
/ metadata["total_completions"]
* 100
if metadata["total_completions"] > 0
else 0
)
return AgentMetrics(
total_completions=metadata["total_completions"],
average_response_time=(
sum(response_times) / len(response_times)
if response_times
else 0
),
error_rate=(
metadata["error_count"]
/ metadata["total_completions"]
if metadata["total_completions"] > 0
else 0
),
last_24h_completions=sum(
1
for t in response_times
if (datetime.utcnow() - t).days < 1
),
total_tokens_used=metadata["total_tokens"],
uptime_percentage=uptime_percentage,
success_rate=success_rate,
peak_tokens_per_minute=max(
metadata.get("tokens_per_minute", [0])
),
)
async def clone_agent(
self, agent_id: UUID, new_name: str
) -> UUID:
"""Clone an existing agent with a new name."""
original_agent = await self.get_agent(agent_id)
original_metadata = self.agent_metadata[agent_id]
config = AgentConfig(
agent_name=new_name,
description=f"Clone of {original_agent.agent_name}",
system_prompt=original_agent.system_prompt,
model_name=original_agent.llm.model_name,
temperature=original_agent.llm.temperature,
max_loops=original_agent.max_loops,
tags=original_metadata["tags"],
)
return await self.create_agent(config)
async def delete_agent(self, agent_id: UUID) -> None:
"""Delete an agent."""
if agent_id not in self.agents:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Agent {agent_id} not found",
)
# Clean up any resources
agent = self.agents[agent_id]
if agent.autosave and os.path.exists(agent.saved_state_path):
os.remove(agent.saved_state_path)
del self.agents[agent_id]
del self.agent_metadata[agent_id]
logger.info(f"Deleted agent {agent_id}")
async def process_completion(
self,
agent: Agent,
prompt: str,
agent_id: UUID,
max_tokens: Optional[int] = None,
temperature_override: Optional[float] = None,
) -> CompletionResponse:
"""Process a completion request using the specified agent."""
start_time = datetime.utcnow()
metadata = self.agent_metadata[agent_id]
try:
# Update agent status
metadata["status"] = AgentStatus.PROCESSING
metadata["last_used"] = start_time
# Apply temporary overrides if specified
original_temp = agent.llm.temperature
if temperature_override is not None:
agent.llm.temperature = temperature_override
# Process the completion
response = agent.run(prompt)
# Reset overrides
if temperature_override is not None:
agent.llm.temperature = original_temp
# Update metrics
processing_time = (
datetime.utcnow() - start_time
).total_seconds()
metadata["response_times"].append(processing_time)
metadata["total_completions"] += 1
metadata["successful_completions"] += 1
# Estimate token usage (this is a rough estimate)
prompt_tokens = len(prompt.split()) * 1.3
completion_tokens = len(response.split()) * 1.3
total_tokens = int(prompt_tokens + completion_tokens)
metadata["total_tokens"] += total_tokens
# Update tokens per minute tracking
current_minute = datetime.utcnow().replace(
second=0, microsecond=0
)
if "tokens_per_minute" not in metadata:
metadata["tokens_per_minute"] = {}
metadata["tokens_per_minute"][current_minute] = (
metadata["tokens_per_minute"].get(current_minute, 0)
+ total_tokens
)
return CompletionResponse(
agent_id=agent_id,
response=response,
metadata={
"agent_name": agent.agent_name,
"model_name": agent.llm.model_name,
"temperature": agent.llm.temperature,
},
timestamp=datetime.utcnow(),
processing_time=processing_time,
token_usage={
"prompt_tokens": int(prompt_tokens),
"completion_tokens": int(completion_tokens),
"total_tokens": total_tokens,
},
)
except Exception as e:
metadata["error_count"] += 1
metadata["status"] = AgentStatus.ERROR
logger.error(
f"Error in completion processing: {str(e)}\n{traceback.format_exc()}"
)
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Error processing completion: {str(e)}",
)
finally:
metadata["status"] = AgentStatus.IDLE
class SwarmsAPI:
"""Enhanced API class for Swarms agent integration."""
def __init__(self):
self.app = FastAPI(
title="Swarms Agent API",
description="Production-grade API for Swarms agent interaction",
version="1.0.0",
docs_url="/v1/docs",
redoc_url="/v1/redoc",
)
self.store = AgentStore()
# Configure CORS
self.app.add_middleware(
CORSMiddleware,
allow_origins=[
"*"
], # Configure appropriately for production
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
self._setup_routes()
def _setup_routes(self):
"""Set up API routes."""
@self.app.post("/v1/agent", response_model=Dict[str, UUID])
async def create_agent(config: AgentConfig):
"""Create a new agent with the specified configuration."""
agent_id = await self.store.create_agent(config)
return {"agent_id": agent_id}
@self.app.get("/v1/agents", response_model=List[AgentSummary])
async def list_agents(
tags: Optional[List[str]] = Query(None),
status: Optional[AgentStatus] = None,
):
"""List all agents, optionally filtered by tags and status."""
return await self.store.list_agents(tags, status)
@self.app.patch(
"/v1/agent/{agent_id}", response_model=Dict[str, str]
)
async def update_agent(agent_id: UUID, update: AgentUpdate):
"""Update an existing agent's configuration."""
await self.store.update_agent(agent_id, update)
return {"status": "updated"}
@self.app.get(
"/v1/agent/{agent_id}/metrics",
response_model=AgentMetrics,
)
async def get_agent_metrics(agent_id: UUID):
"""Get performance metrics for a specific agent."""
return await self.store.get_agent_metrics(agent_id)
@self.app.post(
"/v1/agent/{agent_id}/clone",
response_model=Dict[str, UUID],
)
async def clone_agent(agent_id: UUID, new_name: str):
"""Clone an existing agent with a new name."""
new_id = await self.store.clone_agent(agent_id, new_name)
return {"agent_id": new_id}
@self.app.delete("/v1/agent/{agent_id}")
async def delete_agent(agent_id: UUID):
"""Delete an agent."""
await self.store.delete_agent(agent_id)
return {"status": "deleted"}
@self.app.post(
"/v1/agent/completions", response_model=CompletionResponse
)
async def create_completion(
request: CompletionRequest,
background_tasks: BackgroundTasks,
):
"""Process a completion request with the specified agent."""
try:
agent = await self.store.get_agent(request.agent_id)
# Process completion
response = await self.store.process_completion(
agent,
request.prompt,
request.agent_id,
request.max_tokens,
request.temperature_override,
)
# Schedule background cleanup
background_tasks.add_task(
self._cleanup_old_metrics, request.agent_id
)
return response
except Exception as e:
logger.error(f"Error processing completion: {str(e)}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Error processing completion: {str(e)}",
)
@self.app.get("/v1/agent/{agent_id}/status")
async def get_agent_status(agent_id: UUID):
"""Get the current status of an agent."""
metadata = self.store.agent_metadata.get(agent_id)
if not metadata:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Agent {agent_id} not found",
)
return {
"agent_id": agent_id,
"status": metadata["status"],
"last_used": metadata["last_used"],
"total_completions": metadata["total_completions"],
"error_count": metadata["error_count"],
}
async def _cleanup_old_metrics(self, agent_id: UUID):
"""Clean up old metrics data to prevent memory bloat."""
metadata = self.store.agent_metadata.get(agent_id)
if metadata:
# Keep only last 24 hours of response times
cutoff = datetime.utcnow() - timedelta(days=1)
metadata["response_times"] = [
t
for t in metadata["response_times"]
if isinstance(t, (int, float))
and t > cutoff.timestamp()
]
# Clean up old tokens per minute data
if "tokens_per_minute" in metadata:
metadata["tokens_per_minute"] = {
k: v
for k, v in metadata["tokens_per_minute"].items()
if k > cutoff
}
def create_app() -> FastAPI:
"""Create and configure the FastAPI application."""
api = SwarmsAPI()
return api.app
if __name__ == "__main__":
# Configure uvicorn logging
logger.info("API Starting")
uvicorn.run(
"main:create_app",
host="0.0.0.0",
port=8000,
reload=True,
workers=4,
)

@ -0,0 +1,107 @@
import requests
from loguru import logger
import time
# Configure loguru
logger.add(
"api_tests_{time}.log",
rotation="100 MB",
level="DEBUG",
format="{time} {level} {message}",
)
BASE_URL = "http://localhost:8000/v1"
def test_create_agent():
"""Test creating a new agent."""
logger.info("Testing agent creation")
payload = {
"agent_name": "Test Agent",
"system_prompt": "You are a helpful assistant",
"model_name": "gpt-4",
"description": "Test agent",
"tags": ["test"],
}
response = requests.post(f"{BASE_URL}/agent", json=payload)
logger.debug(f"Create response: {response.json()}")
if response.status_code == 200:
logger.success("Successfully created agent")
return response.json()["agent_id"]
else:
logger.error(f"Failed to create agent: {response.text}")
return None
def test_list_agents():
"""Test listing all agents."""
logger.info("Testing agent listing")
response = requests.get(f"{BASE_URL}/agents")
logger.debug(f"List response: {response.json()}")
if response.status_code == 200:
logger.success(f"Found {len(response.json())} agents")
else:
logger.error(f"Failed to list agents: {response.text}")
def test_completion(agent_id):
"""Test running a completion."""
logger.info("Testing completion")
payload = {
"prompt": "What is the weather like today?",
"agent_id": agent_id,
}
response = requests.post(
f"{BASE_URL}/agent/completions", json=payload
)
logger.debug(f"Completion response: {response.json()}")
if response.status_code == 200:
logger.success("Successfully got completion")
else:
logger.error(f"Failed to get completion: {response.text}")
def test_delete_agent(agent_id):
"""Test deleting an agent."""
logger.info("Testing agent deletion")
response = requests.delete(f"{BASE_URL}/agent/{agent_id}")
logger.debug(f"Delete response: {response.json()}")
if response.status_code == 200:
logger.success("Successfully deleted agent")
else:
logger.error(f"Failed to delete agent: {response.text}")
def run_tests():
"""Run all tests in sequence."""
logger.info("Starting API tests")
# Create agent and get ID
agent_id = test_create_agent()
if not agent_id:
logger.error("Cannot continue tests without agent ID")
return
# Wait a bit for agent to be ready
time.sleep(1)
# Run other tests
test_list_agents()
test_completion(agent_id)
test_delete_agent(agent_id)
logger.info("Tests completed")
if __name__ == "__main__":
run_tests()

@ -1,125 +0,0 @@
import os
import json
from pydantic import BaseModel, Field
from swarm_models import OpenAIFunctionCaller
from dotenv import load_dotenv
from typing import Any, List
load_dotenv()
class Flow(BaseModel):
id: str = Field(
description="A unique identifier for the flow. This should be a short, descriptive name that captures the main purpose of the flow. Use - to separate words and make it lowercase."
)
plan: str = Field(
description="The comprehensive plan detailing how the flow will accomplish the given task. This should include the high-level strategy, key milestones, and expected outcomes. The plan should clearly articulate what the overall goal is, what success looks like, and how progress will be measured throughout execution."
)
failures_prediction: str = Field(
description="A thorough analysis of potential failure modes and mitigation strategies. This should identify technical risks, edge cases, error conditions, and possible points of failure in the flow. For each identified risk, include specific preventive measures, fallback approaches, and recovery procedures to ensure robustness and reliability."
)
rationale: str = Field(
description="The detailed reasoning and justification for why this specific flow design is optimal for the given task. This should explain the key architectural decisions, tradeoffs considered, alternatives evaluated, and why this approach best satisfies the requirements. Include both technical and business factors that influenced the design."
)
flow: str = Field(
description="The precise execution flow defining how agents interact and coordinate. Use -> to indicate sequential processing where one agent must complete before the next begins (e.g. agent1 -> agent2 -> agent3). Use , to indicate parallel execution where multiple agents can run simultaneously (e.g. agent1 -> agent2, agent3, agent4). The flow should clearly show the dependencies and parallelization opportunities between agents. You must only use the agent names provided in the task description do not make up new agent names and do not use any other formatting."
)
class AgentRearrangeBuilder(BaseModel):
name: str = Field(
description="The name of the swarm. This should be a short, descriptive name that captures the main purpose of the flow."
)
description: str = Field(
description="A brief description of the swarm. This should be a concise summary of the main purpose of the swarm."
)
flows: List[Flow] = Field(
description="A list of flows that are optimal for the given task. Each flow should be a detailed plan, failure prediction, rationale, and execution flow."
)
swarm_flow: str = Field(
description="The flow defining how each team should communicate and coordinate with eachother.Use -> to indicate sequential processing where one id must complete before the next begins (e.g. team1 -> team2 -> team3). Use , to indicate parallel execution where multiple teams can run simultaneously (e.g. team1 -> team2, team3, team4). The flow should clearly show the dependencies and parallelization opportunities between teams. You must only use the team names provided in the id do not make up new team names and do not use any other formatting."
)
# def flow_generator(task: str) -> Flow:
def setup_model(base_model: BaseModel = Flow):
model = OpenAIFunctionCaller(
system_prompt="""You are an expert flow architect specializing in designing multi-agent workflows. Your role is to analyze tasks and create optimal execution flows that coordinate multiple AI agents effectively.
When given a task, you will:
1. Develop a comprehensive plan breaking down the task into logical steps
2. Carefully consider potential failure modes and build in robust error handling
3. Provide clear rationale for your architectural decisions and agent coordination strategy
4. Design a precise flow showing both sequential dependencies and parallel execution opportunities
Your flows should maximize:
- Efficiency through smart parallelization
- Reliability through thorough error handling
- Clarity through well-structured agent interactions
- Effectiveness through strategic task decomposition
Format your flow using -> for sequential steps and , for parallel execution. Be specific about agent roles and interactions.
""",
base_model=base_model,
openai_api_key=os.getenv("OPENAI_API_KEY"),
temperature=0.5,
)
return model
def generate_flow(task: str) -> Any:
model = setup_model()
flow = model.run(task)
print(json.dumps(flow, indent=4))
return flow
def generate_agent_rearrange(task: str) -> Any:
model = setup_model(base_model=AgentRearrangeBuilder)
flow = model.run(task)
print(json.dumps(flow, indent=4))
return flow
if __name__ == "__main__":
# Basic patient diagnosis flow
# generate_flow("Diagnose a patient's symptoms and create a treatment plan. You have 3 agents to use: Diagnostician, Specialist, CareCoordinator")
# # Complex multi-condition case
# generate_flow("""Handle a complex patient case with multiple chronic conditions requiring ongoing care coordination.
# The patient has diabetes, heart disease, and chronic pain.
# Create a comprehensive diagnosis and treatment plan.
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
# # Emergency trauma case
# generate_flow("""Process an emergency trauma case requiring rapid diagnosis and immediate intervention.
# Patient presents with multiple injuries from a car accident.
# Develop immediate and long-term treatment plans.
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
# # Long-term care planning
# generate_flow("""Design a 6-month care plan for an elderly patient with declining cognitive function.
# Include regular assessments, specialist consultations, and family coordination.
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
# # Mental health assessment
# generate_flow("""Conduct a comprehensive mental health assessment and develop treatment strategy.
# Patient shows signs of depression and anxiety with possible underlying conditions.
# Create both immediate intervention and long-term support plans.
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
generate_agent_rearrange(
"""Build a complete automated hedge fund system.
Design and implement a sophisticated trading strategy incorporating multiple asset classes,
risk management protocols, and automated execution systems.
The system should include:
- Market analysis and research capabilities
- Portfolio optimization and risk management
- Automated trade execution and settlement
- Compliance and regulatory monitoring
- Performance tracking and reporting
- Fund operations and administration
Create a comprehensive architecture that integrates all these components into a fully automated system."""
)

@ -1,162 +0,0 @@
import os
from dotenv import load_dotenv
from swarms import Agent
from swarm_models import OpenAIChat
from swarms.structs.swarm_router import SwarmRouter
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Define specialized system prompts for each agent
DATA_EXTRACTOR_PROMPT = """You are a highly specialized private equity agent focused on data extraction from various documents. Your expertise includes:
1. Extracting key financial metrics (revenue, EBITDA, growth rates, etc.) from financial statements and reports
2. Identifying and extracting important contract terms from legal documents
3. Pulling out relevant market data from industry reports and analyses
4. Extracting operational KPIs from management presentations and internal reports
5. Identifying and extracting key personnel information from organizational charts and bios
Provide accurate, structured data extracted from various document types to support investment analysis."""
SUMMARIZER_PROMPT = """You are an expert private equity agent specializing in summarizing complex documents. Your core competencies include:
1. Distilling lengthy financial reports into concise executive summaries
2. Summarizing legal documents, highlighting key terms and potential risks
3. Condensing industry reports to capture essential market trends and competitive dynamics
4. Summarizing management presentations to highlight key strategic initiatives and projections
5. Creating brief overviews of technical documents, emphasizing critical points for non-technical stakeholders
Deliver clear, concise summaries that capture the essence of various documents while highlighting information crucial for investment decisions."""
FINANCIAL_ANALYST_PROMPT = """You are a specialized private equity agent focused on financial analysis. Your key responsibilities include:
1. Analyzing historical financial statements to identify trends and potential issues
2. Evaluating the quality of earnings and potential adjustments to EBITDA
3. Assessing working capital requirements and cash flow dynamics
4. Analyzing capital structure and debt capacity
5. Evaluating financial projections and underlying assumptions
Provide thorough, insightful financial analysis to inform investment decisions and valuation."""
MARKET_ANALYST_PROMPT = """You are a highly skilled private equity agent specializing in market analysis. Your expertise covers:
1. Analyzing industry trends, growth drivers, and potential disruptors
2. Evaluating competitive landscape and market positioning
3. Assessing market size, segmentation, and growth potential
4. Analyzing customer dynamics, including concentration and loyalty
5. Identifying potential regulatory or macroeconomic impacts on the market
Deliver comprehensive market analysis to assess the attractiveness and risks of potential investments."""
OPERATIONAL_ANALYST_PROMPT = """You are an expert private equity agent focused on operational analysis. Your core competencies include:
1. Evaluating operational efficiency and identifying improvement opportunities
2. Analyzing supply chain and procurement processes
3. Assessing sales and marketing effectiveness
4. Evaluating IT systems and digital capabilities
5. Identifying potential synergies in merger or add-on acquisition scenarios
Provide detailed operational analysis to uncover value creation opportunities and potential risks."""
# Initialize specialized agents
data_extractor_agent = Agent(
agent_name="Data-Extractor",
system_prompt=DATA_EXTRACTOR_PROMPT,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="data_extractor_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
summarizer_agent = Agent(
agent_name="Document-Summarizer",
system_prompt=SUMMARIZER_PROMPT,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="summarizer_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
financial_analyst_agent = Agent(
agent_name="Financial-Analyst",
system_prompt=FINANCIAL_ANALYST_PROMPT,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="financial_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
market_analyst_agent = Agent(
agent_name="Market-Analyst",
system_prompt=MARKET_ANALYST_PROMPT,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="market_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
operational_analyst_agent = Agent(
agent_name="Operational-Analyst",
system_prompt=OPERATIONAL_ANALYST_PROMPT,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="operational_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
# Initialize the SwarmRouter
router = SwarmRouter(
name="pe-document-analysis-swarm",
description="Analyze documents for private equity due diligence and investment decision-making",
max_loops=1,
agents=[
data_extractor_agent,
summarizer_agent,
# financial_analyst_agent,
# market_analyst_agent,
# operational_analyst_agent,
],
swarm_type="auto", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
# auto_generate_prompts=True,
)
# Example usage
if __name__ == "__main__":
# Run a comprehensive private equity document analysis task
result = router.run(
"Where is the best place to find template term sheets for series A startups. Provide links and references"
)
print(result)
# Retrieve and print logs
for log in router.get_logs():
print(f"{log.timestamp} - {log.level}: {log.message}")

@ -0,0 +1,898 @@
from enum import Enum
from typing import Union, Optional
import io
from PIL import Image
import numpy as np
import torch
import struct
from enum import auto
from typing import List, Dict, Tuple
import wave
from dataclasses import dataclass
import torch.nn as nn
import torch.nn.functional as F
from loguru import logger
from einops import rearrange
from torch import Tensor
@dataclass
class ModelConfig:
"""Configuration for the enhanced BytePredictor model."""
vocab_size: int = 256 # Standard byte range
hidden_size: int = 1024
num_layers: int = 12
num_key_value_heads: int = 8 # For multi-query attention
num_query_heads: int = 32 # More query heads than kv heads
dropout: float = 0.1
max_sequence_length: int = 8192
rope_theta: float = 10000.0
layer_norm_eps: float = 1e-5
vocab_parallel: bool = False
qk_norm: bool = True
qk_norm_scale: float = None
attention_bias: bool = False
class MultiQueryAttention(nn.Module):
"""Fixed Multi-Query Attention implementation."""
def __init__(self, config: ModelConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.num_query_heads = config.num_query_heads
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // config.num_query_heads
self.qk_scale = config.qk_norm_scale or (self.head_dim**-0.5)
self.q_proj = nn.Linear(
config.hidden_size, config.num_query_heads * self.head_dim
)
self.k_proj = nn.Linear(
config.hidden_size,
config.num_key_value_heads * self.head_dim,
)
self.v_proj = nn.Linear(
config.hidden_size,
config.num_key_value_heads * self.head_dim,
)
self.o_proj = nn.Linear(
config.num_query_heads * self.head_dim, config.hidden_size
)
self.qk_norm = config.qk_norm
if self.qk_norm:
self.q_norm = nn.LayerNorm(self.head_dim)
self.k_norm = nn.LayerNorm(self.head_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
batch_size, seq_length, _ = hidden_states.shape
# Project and reshape
q = self.q_proj(hidden_states)
k = self.k_proj(hidden_states)
v = self.v_proj(hidden_states)
# Reshape to [seq_len, batch, heads, head_dim]
q = q.view(
batch_size,
seq_length,
self.num_query_heads,
self.head_dim,
).permute(1, 0, 2, 3)
k = k.view(
batch_size,
seq_length,
self.num_key_value_heads,
self.head_dim,
).permute(1, 0, 2, 3)
v = v.view(
batch_size,
seq_length,
self.num_key_value_heads,
self.head_dim,
).permute(1, 0, 2, 3)
# Apply rotary embeddings
# q, k = self.rotary(q, k, seq_length)
# Apply QK normalization if enabled
if self.qk_norm:
q = self.q_norm(q)
k = self.k_norm(k)
# Handle MQA head expansion
if self.num_key_value_heads != self.num_query_heads:
k = k.repeat_interleave(
self.num_query_heads // self.num_key_value_heads,
dim=2,
)
v = v.repeat_interleave(
self.num_query_heads // self.num_key_value_heads,
dim=2,
)
# Compute attention
# Reshape for matmul: [batch, heads, seq_length, head_dim]
q = q.permute(1, 2, 0, 3)
k = k.permute(1, 2, 0, 3)
v = v.permute(1, 2, 0, 3)
attn_weights = (
torch.matmul(q, k.transpose(-2, -1)) * self.qk_scale
)
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = F.softmax(attn_weights, dim=-1)
output = torch.matmul(attn_weights, v)
# Reshape back to [batch, seq_length, hidden_size]
output = (
output.transpose(1, 2)
.contiguous()
.view(batch_size, seq_length, -1)
)
output = self.o_proj(output)
return output
class EnhancedBytePredictor(nn.Module):
"""Enhanced byte prediction model with state-of-the-art features."""
def __init__(self, config: ModelConfig):
super().__init__()
self.config = config
# Token embeddings
self.tok_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size
)
# Transformer layers
self.layers = nn.ModuleList(
[
nn.ModuleDict(
{
"attention": MultiQueryAttention(config),
"attention_norm": nn.LayerNorm(
config.hidden_size,
eps=config.layer_norm_eps,
),
"feed_forward": nn.Sequential(
nn.Linear(
config.hidden_size,
4 * config.hidden_size,
),
nn.GELU(),
nn.Linear(
4 * config.hidden_size,
config.hidden_size,
),
),
"feed_forward_norm": nn.LayerNorm(
config.hidden_size,
eps=config.layer_norm_eps,
),
}
)
for _ in range(config.num_layers)
]
)
self.norm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_eps
)
self.output = nn.Linear(
config.hidden_size, config.vocab_size, bias=False
)
# Initialize weights
self.apply(self._init_weights)
def _init_weights(self, module: nn.Module) -> None:
"""Initialize weights with scaled normal distribution."""
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
def forward(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Forward pass of the model.
Args:
input_ids: Tensor of shape (batch_size, sequence_length)
attention_mask: Optional attention mask
Returns:
Tensor of logits with shape (batch_size, sequence_length, vocab_size)
"""
hidden_states = self.tok_embeddings(input_ids)
# Create causal mask if needed
if attention_mask is None:
attention_mask = torch.triu(
torch.ones(
(input_ids.size(1), input_ids.size(1)),
device=input_ids.device,
dtype=torch.bool,
),
diagonal=1,
)
attention_mask = attention_mask.masked_fill(
attention_mask == 1, float("-inf")
)
# Apply transformer layers
for layer in self.layers:
# Attention block
hidden_states = hidden_states + layer["attention"](
layer["attention_norm"](hidden_states), attention_mask
)
# Feed-forward block
hidden_states = hidden_states + layer["feed_forward"](
layer["feed_forward_norm"](hidden_states)
)
hidden_states = self.norm(hidden_states)
logits = self.output(hidden_states)
return logits
def compute_loss(
self,
input_ids: torch.Tensor,
target_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Compute cross entropy loss.
Args:
input_ids: Input token ids
target_ids: Target token ids
attention_mask: Optional attention mask
Returns:
Loss value
"""
logits = self(input_ids, attention_mask)
loss = F.cross_entropy(
rearrange(logits, "b s v -> (b s) v"),
rearrange(target_ids, "b s -> (b s)"),
)
return loss
@torch.no_grad()
def _generate(
self,
input_ids: torch.Tensor,
max_new_tokens: int = 100,
temperature: float = 1.0,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: float = 1.0,
) -> torch.Tensor:
"""
Generate new tokens autoregressively.
Args:
input_ids: Starting sequence
max_new_tokens: Number of tokens to generate
temperature: Sampling temperature
top_k: K for top-k sampling
top_p: P for nucleus sampling
repetition_penalty: Penalty for repeating tokens
Returns:
Generated sequence
"""
batch_size, seq_length = input_ids.shape
generated = input_ids.clone()
for _ in range(max_new_tokens):
if generated.size(1) >= self.config.max_sequence_length:
break
# Forward pass
logits = self(generated)[:, -1, :]
# Apply temperature
logits = logits / temperature
# Apply repetition penalty
if repetition_penalty != 1.0:
for i in range(batch_size):
for token_id in set(generated[i].tolist()):
logits[i, token_id] /= repetition_penalty
# Apply top-k sampling
if top_k is not None:
indices_to_remove = (
logits
< torch.topk(logits, top_k)[0][..., -1, None]
)
logits[indices_to_remove] = float("-inf")
# Apply nucleus (top-p) sampling
if top_p is not None:
sorted_logits, sorted_indices = torch.sort(
logits, descending=True
)
cumulative_probs = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1
)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
sorted_indices_to_remove[..., 1:] = (
sorted_indices_to_remove[..., :-1].clone()
)
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = torch.zeros_like(
logits, dtype=torch.bool
)
indices_to_remove.scatter_(
1, sorted_indices, sorted_indices_to_remove
)
logits[indices_to_remove] = float("-inf")
# Sample next token
probs = F.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
# Append to sequence
generated = torch.cat([generated, next_token], dim=1)
return generated
def generate(
self,
input_ids: torch.Tensor,
max_new_tokens: int = 100,
temperature: float = 1.0,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
repetition_penalty: float = 1.0,
):
tensor_data = self._generate(
input_ids=input_ids,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
)
return tensor_to_data(tensor_data)
# import torch
# from typing import Optional
class DataType(Enum):
TEXT = "text"
IMAGE = "image"
AUDIO = "audio"
VIDEO = "video"
BINARY = "binary"
class ByteDetokenizer:
"""Utility class for converting model output bytes back to original data formats."""
@staticmethod
def tensor_to_bytes(tensor: torch.Tensor) -> bytes:
"""Convert model output tensor to bytes."""
# Convert logits/probabilities to byte values
if tensor.dim() > 1:
# If we have logits, convert to byte indices
byte_indices = tensor.argmax(dim=-1)
else:
byte_indices = tensor
# Convert to Python bytes
return bytes(
byte_indices.cpu().numpy().astype(np.uint8).tolist()
)
@staticmethod
def decode_text(byte_sequence: bytes) -> str:
"""Convert bytes to text."""
try:
return byte_sequence.decode("utf-8")
except UnicodeDecodeError:
# Try with error handling
return byte_sequence.decode("utf-8", errors="replace")
@staticmethod
def decode_image(
byte_sequence: bytes,
mode: str = "RGB",
size: Optional[tuple] = None,
) -> Image.Image:
"""Convert bytes to image.
Args:
byte_sequence: Raw image bytes
mode: Image mode (RGB, RGBA, L, etc.)
size: Optional tuple of (width, height)
"""
try:
# Try to load as-is first (for standard image formats)
img = Image.open(io.BytesIO(byte_sequence))
if size:
img = img.resize(size)
return img
except:
# If failed, assume raw pixel data
if not size:
# Try to determine size from byte count
pixel_count = len(byte_sequence) // len(mode)
size = (
int(np.sqrt(pixel_count)),
int(np.sqrt(pixel_count)),
)
# Convert raw bytes to pixel array
pixels = np.frombuffer(byte_sequence, dtype=np.uint8)
pixels = pixels.reshape((*size, len(mode)))
return Image.fromarray(pixels, mode=mode)
@staticmethod
def decode_audio(
byte_sequence: bytes,
sample_rate: int = 44100,
channels: int = 2,
sample_width: int = 2,
) -> np.ndarray:
"""Convert bytes to audio samples.
Args:
byte_sequence: Raw audio bytes
sample_rate: Audio sample rate in Hz
channels: Number of audio channels
sample_width: Bytes per sample (1, 2, or 4)
"""
# Determine format string based on sample width
format_str = {
1: "b", # signed char
2: "h", # short
4: "i", # int
}[sample_width]
# Unpack bytes to samples
sample_count = len(byte_sequence) // (channels * sample_width)
samples = struct.unpack(
f"<{sample_count * channels}{format_str}", byte_sequence
)
# Reshape to [samples, channels]
return np.array(samples).reshape(-1, channels)
def decode_data(
self,
model_output: Union[torch.Tensor, bytes],
data_type: DataType,
**kwargs,
) -> Union[str, Image.Image, np.ndarray, bytes]:
"""Main method to decode model output to desired format.
Args:
model_output: Either tensor from model or raw bytes
data_type: Type of data to decode to
**kwargs: Additional parameters for specific decoders
Returns:
Decoded data in specified format
"""
# Convert tensor to bytes if needed
if isinstance(model_output, torch.Tensor):
byte_sequence = self.tensor_to_bytes(model_output)
else:
byte_sequence = model_output
# Decode based on type
if data_type == DataType.TEXT:
return self.decode_text(byte_sequence)
elif data_type == DataType.IMAGE:
return self.decode_image(byte_sequence, **kwargs)
elif data_type == DataType.AUDIO:
return self.decode_audio(byte_sequence, **kwargs)
elif data_type == DataType.VIDEO:
raise NotImplementedError(
"Video decoding not yet implemented"
)
else: # BINARY
return byte_sequence
# Usage example
class Modality(Enum):
TEXT = auto()
IMAGE = auto()
AUDIO = auto()
VIDEO = auto()
BINARY = auto()
MULTIMODAL = auto()
@dataclass
class ModalityInfo:
"""Information about detected modality."""
modality: Modality
confidence: float
metadata: Dict[str, any]
sub_modalities: Optional[List["ModalityInfo"]] = None
class ModalityDetector:
"""Detects data modalities from byte sequences."""
# Common file signatures (magic numbers)
SIGNATURES = {
# Images
b"\xFF\xD8\xFF": "JPEG",
b"\x89PNG\r\n\x1a\n": "PNG",
b"GIF87a": "GIF",
b"GIF89a": "GIF",
b"RIFF": "WEBP",
# Audio
b"RIFF....WAVE": "WAV",
b"ID3": "MP3",
b"\xFF\xFB": "MP3",
b"OggS": "OGG",
# Video
b"\x00\x00\x00\x18ftypmp42": "MP4",
b"\x00\x00\x00\x1Cftypav01": "MP4",
b"\x1A\x45\xDF\xA3": "WEBM",
}
def __init__(self):
self.magic = magic.Magic(mime=True)
def _check_text_probability(self, data: bytes) -> float:
"""Estimate probability that data is text."""
# Check if data is valid UTF-8
try:
data.decode("utf-8")
# Count printable ASCII characters
printable = sum(1 for b in data if 32 <= b <= 126)
return printable / len(data)
except UnicodeDecodeError:
return 0.0
def _check_image_validity(self, data: bytes) -> Tuple[bool, Dict]:
"""Check if data is a valid image and extract metadata."""
try:
with io.BytesIO(data) as bio:
img = Image.open(bio)
return True, {
"format": img.format,
"size": img.size,
"mode": img.mode,
}
except:
return False, {}
def _check_audio_validity(self, data: bytes) -> Tuple[bool, Dict]:
"""Check if data is valid audio and extract metadata."""
try:
with io.BytesIO(data) as bio:
# Try to parse as WAV
with wave.open(bio) as wav:
return True, {
"channels": wav.getnchannels(),
"sample_width": wav.getsampwidth(),
"framerate": wav.getframerate(),
"frames": wav.getnframes(),
}
except:
# Check for other audio signatures
for sig in [b"ID3", b"\xFF\xFB", b"OggS"]:
if data.startswith(sig):
return True, {"format": "compressed_audio"}
return False, {}
def _detect_boundaries(
self, data: bytes
) -> List[Tuple[int, int, Modality]]:
"""Detect boundaries between different modalities in the data."""
boundaries = []
current_pos = 0
while current_pos < len(data):
# Look for known signatures
for sig, format_type in self.SIGNATURES.items():
if data[current_pos:].startswith(sig):
# Found a signature, determine its length
if format_type in ["JPEG", "PNG", "GIF"]:
# Find image end
try:
with io.BytesIO(
data[current_pos:]
) as bio:
img = Image.open(bio)
img.verify()
size = bio.tell()
boundaries.append(
(
current_pos,
current_pos + size,
Modality.IMAGE,
)
)
current_pos += size
continue
except:
pass
# Check for text sections
text_prob = self._check_text_probability(
data[current_pos : current_pos + 1024]
)
if text_prob > 0.8:
# Look for end of text section
end_pos = current_pos + 1
while end_pos < len(data):
if (
self._check_text_probability(
data[end_pos : end_pos + 32]
)
< 0.5
):
break
end_pos += 1
boundaries.append(
(current_pos, end_pos, Modality.TEXT)
)
current_pos = end_pos
continue
current_pos += 1
return boundaries
def detect_modality(self, data: bytes) -> ModalityInfo:
"""Detect modality of byte sequence."""
# First check for single modality
mime_type = self.magic.from_buffer(data)
# Check text
text_prob = self._check_text_probability(data)
if text_prob > 0.9:
return ModalityInfo(
modality=Modality.TEXT,
confidence=text_prob,
metadata={"mime_type": mime_type},
)
# Check image
is_image, image_meta = self._check_image_validity(data)
if is_image:
return ModalityInfo(
modality=Modality.IMAGE,
confidence=1.0,
metadata={**image_meta, "mime_type": mime_type},
)
# Check audio
is_audio, audio_meta = self._check_audio_validity(data)
if is_audio:
return ModalityInfo(
modality=Modality.AUDIO,
confidence=1.0,
metadata={**audio_meta, "mime_type": mime_type},
)
# Check for multimodal content
boundaries = self._detect_boundaries(data)
if len(boundaries) > 1:
sub_modalities = []
for start, end, modality in boundaries:
chunk_data = data[start:end]
sub_info = self.detect_modality(chunk_data)
if sub_info.modality != Modality.BINARY:
sub_modalities.append(sub_info)
if sub_modalities:
return ModalityInfo(
modality=Modality.MULTIMODAL,
confidence=0.8,
metadata={"mime_type": "multipart/mixed"},
sub_modalities=sub_modalities,
)
# Default to binary
return ModalityInfo(
modality=Modality.BINARY,
confidence=0.5,
metadata={"mime_type": mime_type},
)
def split_modalities(
self, data: bytes
) -> List[Tuple[Modality, bytes, Dict]]:
"""Split multimodal data into separate modalities."""
boundaries = self._detect_boundaries(data)
result = []
for start, end, modality in boundaries:
chunk = data[start:end]
info = self.detect_modality(chunk)
result.append((modality, chunk, info.metadata))
return result
class AutoDetectBytesDecoder:
"""Decoder that automatically detects and decodes different modalities."""
def __init__(self):
self.detector = ModalityDetector()
self.text_decoder = ByteDetokenizer() # From previous example
def decode(
self, data: bytes
) -> Union[str, Image.Image, np.ndarray, List[any]]:
"""Automatically detect and decode byte sequence."""
info = self.detector.detect_modality(data)
if info.modality == Modality.MULTIMODAL:
# Handle multimodal content
parts = self.detector.split_modalities(data)
return [
self.decode(chunk) for modality, chunk, _ in parts
]
if info.modality == Modality.TEXT:
return self.text_decoder.decode_text(data)
elif info.modality == Modality.IMAGE:
return self.text_decoder.decode_image(data)
elif info.modality == Modality.AUDIO:
return self.text_decoder.decode_audio(data)
else:
return data
# # Example usage
# def demo_auto_detection():
# """Demonstrate auto modality detection."""
# # Create mixed content
# text = "Hello, World!".encode('utf-8')
# # Create a small test image
# img = Image.new('RGB', (100, 100), color='red')
# img_bytes = io.BytesIO()
# img.save(img_bytes, format='PNG')
# # Combine into multimodal content
# mixed_content = text + img_bytes.getvalue()
# # Initialize decoder
# decoder = AutoDetectBytesDecoder()
# # Decode
# result = decoder.decode(mixed_content)
# if isinstance(result, list):
# print("Detected multimodal content:")
# for i, part in enumerate(result):
# print(f"Part {i+1}: {type(part)}")
# if __name__ == "__main__":
# demo_auto_detection()
def tensor_to_data(tensor: Tensor):
byte_sequence = ByteDetokenizer.tensor_to_bytes(tensor)
# Initialize auto-detector
decoder = AutoDetectBytesDecoder()
# Decode with automatic detection
result = decoder.decode(byte_sequence)
return result
def demo_byte_predictor():
"""Demo with smaller dimensions to test."""
# Initialize model configuration with adjusted dimensions
config = ModelConfig(
vocab_size=256,
hidden_size=128, # Smaller for testing
num_layers=2, # Fewer layers for testing
num_key_value_heads=2,
num_query_heads=4,
dropout=0.1,
max_sequence_length=1024,
)
# Initialize model
model = EnhancedBytePredictor(config)
logger.info("Model initialized")
# Move to GPU if available
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
model = model.to(device)
logger.info(f"Using device: {device}")
# Create sample input data
batch_size = 2
seq_length = 16 # Shorter sequence for testing
input_ids = torch.randint(
0, config.vocab_size, (batch_size, seq_length), device=device
)
logger.info(f"Created input tensor of shape: {input_ids.shape}")
# Test forward pass
try:
logits = model(input_ids)
logger.info(
f"Forward pass successful! Output shape: {logits.shape}"
)
# Test loss computation
target_ids = torch.randint(
0,
config.vocab_size,
(batch_size, seq_length),
device=device,
)
loss = model.compute_loss(input_ids, target_ids)
logger.info(
f"Loss computation successful! Loss value: {loss.item():.4f}"
)
# Test generation
prompt = torch.randint(
0,
config.vocab_size,
(1, 4), # Very short prompt for testing
device=device,
)
generated = model.generate(
prompt, max_new_tokens=8, temperature=0.8, top_k=50
)
logger.info(
f"Generation successful! Generated shape: {generated.shape}"
)
except Exception as e:
logger.error(f"Error during execution: {str(e)}")
raise
if __name__ == "__main__":
# Set up logging
# logger.remove() # Remove default handler
# logger.add(sys.stderr, format="<green>{time:HH:mm:ss}</green> | {level} | {message}")
demo_byte_predictor()

@ -1,6 +1,4 @@
/* Further customization as needed */
/* * Further customization as needed */ */
.md-typeset__table {
min-width: 100%;
@ -10,9 +8,20 @@
display: table;
}
/*
:root {
--md-primary-fg-color: #EE0F0F;
--md-primary-fg-color--light: #ECB7B7;
--md-primary-fg-color--dark: #90030C;
/* Dark mode
[data-md-color-scheme="slate"] {
--md-default-bg-color: black;
}
.header__ellipsis {
color: black;
}
.md-copyright__highlight {
color: black;
}
.md-header.md-header--shadow {
color: black;
} */

@ -0,0 +1,56 @@
# Swarms Corp Culture Document
## **Our Mission and Purpose**
At Swarms Corp, we believe in more than just building technology. We are advancing humanity by pioneering systems that allow agents—both AI and human—to collaborate seamlessly, working toward the betterment of society and unlocking a future of abundance. Our mission is everything, and each of us is here because we understand the transformative potential of our work. We are not just a company; we are a movement aimed at reshaping the future. We strive to create systems that can tackle the most complex challenges facing humanity, from climate change to inequality, with solutions that are powered by collective intelligence.
Our purpose goes beyond just technological advancement. We are here to create tools that empower people, uplift communities, and set a new standard for what technology can achieve when the mission is clear and the commitment is unwavering. We see every project as a step toward something greater—an abundant future where human potential is limitless and artificial intelligence serves as a powerful ally to mankind.
## **Values We Live By**
### 1. **Hard Work: No Stone Unturned**
We believe that hard work is the foundation of all great achievements. At Swarms Corp, each member of the team is dedicated to putting in the effort required to solve complex problems. This isnt just about long hours—its about focused, intentional work that leads to breakthroughs. We hold each other to high standards, and we dont shy away from the hard paths when the mission calls for it. Every challenge we face is an opportunity to demonstrate our resilience and our commitment to excellence. We understand that the pursuit of groundbreaking innovation demands not just effort, but a relentless curiosity and the courage to face the unknown.
At Swarms Corp, we respect the grind because we know that transformative change doesnt happen overnight. It requires continuous effort, sacrifice, and an unwavering focus on the task at hand. We celebrate hard work, not because its difficult, but because we understand its potential to transform ambitious ideas into tangible solutions. We honor the sweat equity that goes into building something that can truly make a difference.
### 2. **Mission Above Everything**
Our mission is our guiding star. Every decision, every task, and every project must align with our overarching purpose: advancing humanity and creating a post-scarcity world. This means sometimes putting the collective goal ahead of individual preferences or comfort. Were here to do something much larger than ourselves, and we prioritize the mission with relentless commitment. We know that personal sacrifices will often be necessary, and we embrace that reality because the rewards of our mission are far greater than any individual gain.
When we say "mission above everything," we mean that our focus is not just on immediate success, but on creating a lasting impact that will benefit future generations. Our mission provides meaning and direction to our daily efforts, and we see every task as a small yet crucial part of our broader vision. We remind ourselves constantly of why we are here and who we are working for—not just our customers or stakeholders, but humanity as a whole.
### 3. **Finding the Shortest Path**
Innovation thrives on efficiency. At Swarms Corp, we value finding the shortest, most effective paths to reach our goals. We encourage everyone to question the status quo, challenge existing processes, and ask, “Is there a better way to do this?” Creativity means finding new routes—whether by leveraging automation, questioning outdated steps, or collaborating to uncover insights faster. We honor those who seek smarter paths over conventional ones. Efficiency is not just about saving time—its about maximizing impact and ensuring that every ounce of effort drives meaningful progress.
Finding the shortest path is about eliminating unnecessary complexity and focusing our energy on what truly matters. We encourage a culture of continuous improvement, where each team member is empowered to innovate on processes, tools, and methodologies. The shortest path does not mean cutting corners—it means removing obstacles, optimizing workflows, and focusing on high-leverage activities that bring us closer to our mission. We celebrate those who find elegant, effective solutions that others might overlook.
### 4. **Advancing Humanity**
The ultimate goal of everything we do is to elevate humanity. We envision a world where intelligence—both human and artificial—works in harmony to improve lives, solve global challenges, and expand possibilities. This ethos drives our work, whether its developing advanced AI systems, collaborating with others to push technological boundaries, or thinking deeply about how our creations can impact society in positive ways. Every line of code, every idea, and every strategy should move us closer to this vision.
Advancing humanity means we always think about the ethical implications of our work. We are deeply aware that the technology we create has the power to transform lives, and with that power comes the responsibility to ensure our contributions are always positive. We seek not only to push the boundaries of what technology can do but also to ensure that these advancements are inclusive and equitable. Our focus is on building a future where every person has access to the tools and opportunities they need to thrive.
Our vision is to bridge the gap between technology and humanitys most pressing needs. We aim to democratize intelligence, making it available for everyone, regardless of their background or resources. This is how we advance humanity—not just through technological feats, but by ensuring that our innovations serve the greater good and uplift everyone.
## **Our Way of Working**
- **Radical Ownership**: Each team member is not just a contributor but an owner of their domain. We take full responsibility for outcomes, follow through on our promises, and ensure that nothing falls through the cracks. We dont wait for permission—we act, innovate, and lead. Radical ownership means understanding that our actions have a direct impact on the success of our mission. Its about proactive problem-solving and always stepping up when we see an opportunity to make a difference.
- **Honesty and Respect**: We communicate openly and respect each others opinions. Tough conversations are a natural part of building something impactful. We face challenges head-on with honesty and directness while maintaining a respectful and supportive atmosphere. Honesty fosters trust, and trust is the foundation of any high-performing team. We value feedback and see it as an essential tool for growth—both for individuals and for the organization as a whole.
- **One Team, One Mission**: Collaboration isnt just encouraged—its essential. We operate as a swarm, where each agent contributes to a greater goal, learning from each other, sharing knowledge, and constantly iterating together. We celebrate wins collectively and approach obstacles with a unified spirit. No one succeeds alone; every achievement is the result of collective effort. We lift each other up, and we know that our strength lies in our unity and shared purpose.
- **The Future is Ours to Shape**: Our work is inherently future-focused. Were not satisfied with simply keeping up—we want to set the pace. Every day, we take one step closer to a future where humanitys potential is limitless, where scarcity is eliminated, and where intelligence—human and machine—advances society. We are not passive participants in the future; we are active shapers of it. We imagine a better tomorrow, and then we take deliberate steps to create it. Our work today will define what the world looks like tomorrow.
## **Expectations**
- **Be Bold**: Dont be afraid to take risks. Innovation requires experimentation, and sometimes that means making mistakes. We support each other in learning from failures and taking smart, calculated risks. Boldness is at the heart of progress. We want every member of Swarms Corp to feel empowered to think outside the box, propose unconventional ideas, and drive innovation. Mistakes are seen not as setbacks, but as opportunities for learning and growth.
- **Keep the Mission First**: Every decision we make should be with our mission in mind. Ask yourself how your work advances the cause of creating an abundant future. The mission is the yardstick against which we measure our efforts, ensuring that everything we do pushes us closer to our ultimate goals. We understand that the mission is bigger than any one of us, and we strive to contribute meaningfully every day.
- **Find Solutions, Not Problems**: While identifying issues is important, we value those who come with solutions. Embrace challenges as opportunities to innovate and find ways to make an impact. We foster a culture of proactive problem-solving where obstacles are seen as opportunities to exercise creativity. If somethings broken, we fix it. If theres a better way, we find it. We expect our team members to be solution-oriented, always seeking ways to turn challenges into stepping stones for progress.
- **Think Big, Act Fast**: Were not here to make small changes—were here to revolutionize how we think about intelligence, automation, and society. Dream big, but work with urgency. We are tackling problems of immense scale, and we must move with intention and speed. Thinking big means envisioning a world that is radically different and better, and acting fast means executing the steps to get us there without hesitation. We value ambition and the courage to move swiftly when the time is right.
## **Our Commitment to You**
Swarms Corp is a place for dreamers and doers, for those who are driven by purpose and are unafraid of the work required to achieve it. We commit to providing you with the tools, support, and environment you need to contribute meaningfully to our mission. We are here to advance humanity together, one agent, one solution, one breakthrough at a time. We pledge to nurture an environment that encourages creativity, collaboration, and bold thinking. Here, you will find a community that celebrates your wins, supports you through challenges, and pushes you to be your best self.
Our commitment also includes ensuring that your voice is heard. We are building the future together, and every perspective matters. We strive to create an inclusive space where diversity of thought is welcomed, and where each team member feels valued for their unique contributions. At Swarms Corp, you are not just part of a team—you are part of a mission that aims to change the course of humanity for the better. Together, well make the impossible possible, one breakthrough at a time.

@ -62,13 +62,14 @@ theme:
logo: assets/img/swarms-logo.png
palette:
- scheme: default
primary: black
primary: white # White background
accent: white # Black accents for interactive elements
toggle:
icon: material/brightness-7
name: Switch to dark mode
# Palette toggle for dark mode
- scheme: slate
- scheme: slate # Optional: lighter shades for accessibility
primary: black
accent: black
toggle:
icon: material/brightness-4
name: Switch to light mode
@ -80,6 +81,11 @@ theme:
- navigation.expand
- navigation.top
- announce.dismiss
font:
text: "Fira Sans" # Clean and readable text
code: "Fira Code" # Modern look for code snippets
# Extensions
markdown_extensions:
- abbr
@ -138,6 +144,7 @@ nav:
- Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md"
- Onboarding:
- Installation: "swarms/install/install.md"
- Environment Configuration: "swarms/install/workspace_manager.md"
- Quickstart: "swarms/install/quickstart.md"
- Swarms CLI: "swarms/cli/main.md"
# - Swarms + Docker:
@ -257,6 +264,7 @@ nav:
- An Analysis on Prompting Strategies: "swarms/prompts/overview.md"
- Managing Prompts in Production: "swarms/prompts/main.md"
- Corporate:
- Culture: "corporate/culture.md"
- Hiring: "corporate/hiring.md"
- Swarms Goals & Milestone Tracking; A Vision for 2024 and Beyond: "corporate/2024_2025_goals.md"
- Clusterops:

@ -26,7 +26,7 @@ jinja2~=3.1
markdown~=3.7
mkdocs-material-extensions~=1.3
pygments~=2.18
pymdown-extensions~=10.11
pymdown-extensions~=10.12
# Requirements for plugins
babel~=2.16

@ -0,0 +1,59 @@
# Swarms 6.0.0 - Performance & Reliability Update 🚀
We're excited to announce the release of Swarms 6.0.0, bringing significant improvements to performance, reliability, and developer experience. This release focuses on streamlining core functionalities while enhancing the overall stability of the framework.
## 📦 Installation
```bash
pip3 install -U swarms
```
## 🌟 Highlights
### Agent Enhancements
- **Improved RAG Performance**: Significant improvements to Retrieval-Augmented Generation capabilities
- **Enhanced Prompt Generation**: Auto-generate prompt now incorporates name, description, and system prompt for more contextual interactions
- **Streamlined Architecture**: Cleaned up unused code for better performance and maintainability
- **Simplified State Management**: Consolidated state management methods into a single `load()` function
### Tools & Execution
- **Optimized Environment Management**: Fixed multiple environment instantiation issue
- Environments now initialize once during `__init__`
- **New SwarmRouter Function**: Simplified routing mechanism
- Returns consolidated string output from all agents
- Improved coordination between swarm components
## 💪 Performance Improvements
- Faster execution times
- Reduced memory footprint
- More reliable logging system
- Lightweight and efficient codebase
## 🤝 Join Our Community
### We're Hiring!
Join our growing team! We're currently looking for:
- Agent Engineers
- Developer Relations
- Infrastructure Engineers
- And more!
### Get Involved
- ⭐ Star our repository
- 🔄 Fork the project
- 🛠 Submit pull requests
- 🐛 Report issues
- 💡 Share your ideas
### Contact & Support
- 📧 Email: kye@swarms.world
- 🔗 Issues: [GitHub Issues](https://github.com/kyegomez/swarms/issues)
## 🔜 What's Next?
Have ideas for features, bug fixes, or improvements? We'd love to hear from you! Reach out through our GitHub issues or email us directly.
---
*Thank you to all our contributors and users who make Swarms better every day. Together, we're building the future of swarm intelligence.*
#SwarmAI #OpenSource #AI #MachineLearning

@ -127,7 +127,7 @@ Before you begin, ensure you have the following installed:
poetry install --extras "desktop"
```
=== "Using Docker"
=== "Using Docker COMING SOON [DOES NOT WORK YET]"
Docker is an excellent option for creating isolated and reproducible environments, suitable for both development and production.

@ -0,0 +1,186 @@
# Swarms Framework Environment Configuration
This guide details the environment variables used in the Swarms framework for configuration and customization of your agent-based applications.
## Configuration Setup
Create a `.env` file in your project's root directory to configure the Swarms framework. This file will contain all necessary environment variables for customizing your agent's behavior, logging, and analytics.
## Environment Variables
### Core Variables
#### `WORKSPACE_DIR`
- **Purpose**: Defines the directory where all agent states and execution logs are stored
- **Type**: String (path)
- **Default**: `./workspace`
- **Example**:
```bash
WORKSPACE_DIR=/path/to/your/workspace
```
- **Usage**:
- Stores JSON files containing agent states
- Maintains execution history
- Keeps track of agent interactions
- Preserves conversation logs
#### `SWARMS_AUTOUPDATE_ON`
- **Purpose**: Controls automatic updates of the Swarms framework
- **Type**: Boolean
- **Default**: `false`
- **Example**:
```bash
SWARMS_AUTOUPDATE_ON=true
```
- **Features**:
- Automatically updates to the latest stable version
- Ensures you have the newest features
- Maintains compatibility with the latest improvements
- Handles dependency updates
- **Considerations**:
- Set to `false` if you need version stability
- Recommended `true` for development environments
- Consider system requirements for auto-updates
- May require restart after updates
### Telemetry Configuration
#### `USE_TELEMETRY`
- **Purpose**: Controls whether telemetry data is collected
- **Type**: Boolean
- **Default**: `false`
- **Example**:
```bash
USE_TELEMETRY=true
```
- **Data Collected**:
- Agent performance metrics
- Execution time statistics
- Memory usage
- Error rates
- System health indicators
### Analytics Integration
#### `SWARMS_API_KEY`
- **Purpose**: Authentication key for the Swarms Analytics Suite
- **Type**: String
- **Required**: Yes, for analytics features
- **Example**:
```bash
SWARMS_API_KEY=your_api_key_here
```
- **Features**:
- Real-time agent execution tracking
- Usage analytics
- Performance monitoring
- Cost tracking
- Custom metrics
## Getting Started
1. Create a new `.env` file:
```bash
touch .env
```
2. Add your configuration:
```bash
# Basic configuration
WORKSPACE_DIR=./my_workspace
# Enable auto-updates
SWARMS_AUTOUPDATE_ON=true
# Enable telemetry
USE_TELEMETRY=true
# Add your Swarms API key
SWARMS_API_KEY=your_api_key_here
```
3. Obtain your API key:
- Visit [swarms.ai](https://swarms.ai)
- Create an account or log in
- Navigate to the API section
- Generate your unique API key
## Best Practices
1. **Security**:
- Never commit your `.env` file to version control
- Add `.env` to your `.gitignore` file
- Keep your API keys secure and rotate them periodically
2. **Workspace Organization**:
- Use descriptive workspace directory names
- Implement regular cleanup of old logs
- Monitor workspace size to prevent disk space issues
3. **Telemetry Management**:
- Enable telemetry in development for debugging
- Consider privacy implications in production
- Review collected data periodically
4. **Auto-Update Management**:
- Test updates in development before enabling in production
- Keep backups before enabling auto-updates
- Monitor system resources during updates
- Schedule updates during low-traffic periods
## Examples
### Basic Development Setup
```bash
WORKSPACE_DIR=./dev_workspace
SWARMS_AUTOUPDATE_ON=true
USE_TELEMETRY=true
SWARMS_API_KEY=sk_test_xxxxxxxxxxxx
```
### Production Setup
```bash
WORKSPACE_DIR=/var/log/swarms/prod_workspace
SWARMS_AUTOUPDATE_ON=false
USE_TELEMETRY=true
SWARMS_API_KEY=sk_prod_xxxxxxxxxxxx
```
### Testing Environment
```bash
WORKSPACE_DIR=./test_workspace
SWARMS_AUTOUPDATE_ON=true
USE_TELEMETRY=false
SWARMS_API_KEY=sk_test_xxxxxxxxxxxx
```
## Troubleshooting
Common issues and solutions:
1. **Workspace Access Issues**:
- Ensure proper file permissions
- Verify the directory exists
- Check disk space availability
2. **API Key Problems**:
- Confirm key is properly formatted
- Verify key hasn't expired
- Check for proper environment variable loading
3. **Telemetry Issues**:
- Confirm network connectivity
- Verify firewall settings
- Check for proper boolean values
4. **Auto-Update Issues**:
- Check internet connectivity
- Verify sufficient disk space
- Ensure proper permissions for updates
- Check system compatibility requirements
## Additional Resources
- [Swarms Framework Documentation](https://github.com/kyegomez/swarms)
- [Swarms Analytics Dashboard](https://swarms.ai)
- [API Reference](https://swarms.ai/docs/api)

@ -132,6 +132,16 @@ graph TD
| `data_memory` | Optional callable for data memory operations. |
| `load_yaml_path` | String representing the path to a YAML file for loading configurations. |
| `auto_generate_prompt` | Boolean indicating whether to automatically generate prompts. |
| `rag_every_loop` | Boolean indicating whether to query RAG database for context on every loop |
| `plan_enabled` | Boolean indicating whether planning functionality is enabled |
| `artifacts_on` | Boolean indicating whether to save artifacts from agent execution |
| `artifacts_output_path` | File path where artifacts should be saved |
| `artifacts_file_extension` | File extension to use for saved artifacts |
| `device` | Device to run computations on ("cpu" or "gpu") |
| `all_cores` | Boolean indicating whether to use all CPU cores |
| `device_id` | ID of the GPU device to use if running on GPU |
| `scheduled_run_date` | Optional datetime for scheduling future agent runs |
## `Agent` Methods
@ -200,6 +210,20 @@ graph TD
| `handle_sop_ops()` | Handles operations related to standard operating procedures. | None | `agent.handle_sop_ops()` |
| `agent_output_type(responses)` | Processes and returns the agent's output based on the specified output type. | `responses` (list): List of responses. | `formatted_output = agent.agent_output_type(responses)` |
| `check_if_no_prompt_then_autogenerate(task)` | Checks if a system prompt is not set and auto-generates one if needed. | `task` (str): The task to use for generating a prompt. | `agent.check_if_no_prompt_then_autogenerate("Analyze data")` |
| `check_if_no_prompt_then_autogenerate(task)` | Checks if auto_generate_prompt is enabled and generates a prompt by combining agent name, description and system prompt | `task` (str, optional): Task to use as fallback | `agent.check_if_no_prompt_then_autogenerate("Analyze data")` |
| `handle_artifacts(response, output_path, extension)` | Handles saving artifacts from agent execution | `response` (str): Agent response<br>`output_path` (str): Output path<br>`extension` (str): File extension | `agent.handle_artifacts(response, "outputs/", ".txt")` |
## Updated Run Method
Update the run method documentation to include new parameters:
| Method | Description | Inputs | Usage Example |
|--------|-------------|--------|----------------|
| `run(task, img=None, is_last=False, device="cpu", device_id=0, all_cores=True, scheduled_run_date=None)` | Runs the agent with specified parameters | `task` (str): Task to run<br>`img` (str, optional): Image path<br>`is_last` (bool): If this is last task<br>`device` (str): Device to use<br>`device_id` (int): GPU ID<br>`all_cores` (bool): Use all CPU cores<br>`scheduled_run_date` (datetime, optional): Future run date | `agent.run("Analyze data", device="gpu", device_id=0)` |
## Getting Started
@ -538,5 +562,9 @@ print(agent.system_prompt)
8. Optimize token usage with `dynamic_context_window` and `tokens_checks` methods.
9. Use `concurrent` and `async` methods for performance-critical applications.
10. Regularly review and analyze feedback using the `analyze_feedback` method.
11. Use `artifacts_on` to save important outputs from agent execution
12. Configure `device` and `device_id` appropriately for optimal performance
13. Enable `rag_every_loop` when continuous context from long-term memory is needed
14. Use `scheduled_run_date` for automated task scheduling
By following these guidelines and leveraging the Swarm Agent's extensive features, you can create powerful, flexible, and efficient autonomous agents for a wide range of applications.

@ -7,10 +7,22 @@ The `AgentRearrange` class represents a swarm of agents for rearranging tasks. I
| Attribute | Type | Description |
| --- | --- | --- |
| `agents` | `dict` | A dictionary of agents, where the key is the agent's name and the value is the agent object. |
| `flow` | `str` | The flow pattern of the tasks. |
| `max_loops` | `int` | The maximum number of loops for the agents to run. |
| `verbose` | `bool` | Whether to enable verbose logging or not. |
| `id` | `str` | Unique identifier for the swarm |
| `name` | `str` | Name of the swarm |
| `description` | `str` | Description of the swarm's purpose |
| `agents` | `dict` | Dictionary mapping agent names to Agent objects |
| `flow` | `str` | Flow pattern defining task execution order |
| `max_loops` | `int` | Maximum number of execution loops |
| `verbose` | `bool` | Whether to enable verbose logging |
| `memory_system` | `BaseVectorDatabase` | Memory system for storing agent interactions |
| `human_in_the_loop` | `bool` | Whether human intervention is enabled |
| `custom_human_in_the_loop` | `Callable` | Custom function for human intervention |
| `return_json` | `bool` | Whether to return output in JSON format |
| `output_type` | `OutputType` | Format of output ("all", "final", "list", or "dict") |
| `docs` | `List[str]` | List of document paths to add to agent prompts |
| `doc_folder` | `str` | Folder path containing documents to add to agent prompts |
| `swarm_history` | `dict` | History of agent interactions |
## Methods
-------
@ -62,20 +74,55 @@ Validates the flow pattern.
- `bool`: `True` if the flow pattern is valid.
### `run(self, task: str, *args, **kwargs)`
### `run(self, task: str = None, img: str = None, device: str = "cpu", device_id: int = 1, all_cores: bool = True, all_gpus: bool = False, *args, **kwargs)`
Runs the swarm to rearrange the tasks.
Executes the agent rearrangement task with specified compute resources.
| Parameter | Type | Description |
| --- | --- | --- |
| `task` | `str` | The initial task to be processed. |
| `*args` | - | Additional positional arguments. |
| `**kwargs` | - | Additional keyword arguments. |
| `task` | `str` | The task to execute |
| `img` | `str` | Path to input image if required |
| `device` | `str` | Computing device to use ('cpu' or 'gpu') |
| `device_id` | `int` | ID of specific device to use |
| `all_cores` | `bool` | Whether to use all CPU cores |
| `all_gpus` | `bool` | Whether to use all available GPUs |
**Returns:**
- `str`: The final processed task.
### `batch_run(self, tasks: List[str], img: Optional[List[str]] = None, batch_size: int = 10, device: str = "cpu", device_id: int = None, all_cores: bool = True, all_gpus: bool = False, *args, **kwargs)`
Process multiple tasks in batches.
| Parameter | Type | Description |
| --- | --- | --- |
| `tasks` | `List[str]` | List of tasks to process |
| `img` | `List[str]` | Optional list of images corresponding to tasks |
| `batch_size` | `int` | Number of tasks to process simultaneously |
| `device` | `str` | Computing device to use |
| `device_id` | `int` | Specific device ID if applicable |
| `all_cores` | `bool` | Whether to use all CPU cores |
| `all_gpus` | `bool` | Whether to use all available GPUs |
### `concurrent_run(self, tasks: List[str], img: Optional[List[str]] = None, max_workers: Optional[int] = None, device: str = "cpu", device_id: int = None, all_cores: bool = True, all_gpus: bool = False, *args, **kwargs)`
Process multiple tasks concurrently using ThreadPoolExecutor.
| Parameter | Type | Description |
| --- | --- | --- |
| `tasks` | `List[str]` | List of tasks to process |
| `img` | `List[str]` | Optional list of images corresponding to tasks |
| `max_workers` | `int` | Maximum number of worker threads |
| `device` | `str` | Computing device to use |
| `device_id` | `int` | Specific device ID if applicable |
| `all_cores` | `bool` | Whether to use all CPU cores |
| `all_gpus` | `bool` | Whether to use all available GPUs |
## Documentation for `rearrange` Function
======================================
@ -247,18 +294,6 @@ Additionally, you can modify the `run` method of the `AgentRearrange` class to i
It's important to note that the `AgentRearrange` class and the `rearrange` function rely on the individual agents to process tasks correctly. The quality of the output will depend on the capabilities and configurations of the agents used in the swarm. Additionally, the `AgentRearrange` class does not provide any mechanisms for task prioritization or load balancing among the agents.
## Future Improvements
-------------------
Here are some potential future improvements for the `AgentRearrange` class and the `rearrange` function:
- **Task Prioritization**: Implement a mechanism to prioritize tasks based on factors such as urgency, importance, or resource availability.
- **Load Balancing**: Incorporate load balancing algorithms to distribute tasks among agents more efficiently, taking into account factors such as agent availability, performance, and resource utilization.
- **Dynamic Flow Reconfiguration**: Allow for dynamic reconfiguration of the flow pattern during runtime, enabling the addition, removal, or reordering of agents based on specific conditions or events.
- **Error Handling and Fault Tolerance**: Enhance error handling and fault tolerance mechanisms to gracefully handle agent failures, task timeouts, or other exceptional situations.
- **Monitoring and Metrics**: Implement monitoring and metrics collection to track the performance and efficiency of the swarm, as well as individual agent performance.
- **Scalability**: Enhance the scalability of the system to handle larger numbers of agents and tasks efficiently.
## Conclusion
----------

@ -1,238 +1,231 @@
# GroupChat
# GroupChat Class Documentation
The `GroupChat` class is designed to manage a group chat session involving multiple agents. This class handles initializing the conversation, selecting the next speaker, resetting the chat, and executing the chat rounds, providing a structured approach to managing a dynamic and interactive conversation.
### Key Concepts
The GroupChat class manages multi-agent conversations with state persistence, comprehensive logging, and flexible agent configurations. It supports both Agent class instances and callable functions, making it versatile for different use cases.
- **Agents**: Entities participating in the group chat.
- **Conversation Management**: Handling the flow of conversation, selecting speakers, and maintaining chat history.
- **Round-based Execution**: Managing the chat in predefined rounds.
## Attributes
### Arguments
## Installation
```bash
pip install swarms python-dotenv pydantic
```
| Argument | Type | Default | Description |
|---------------------|----------------------|-------------|-------------|
| `agents` | `List[Agent]` | `None` | List of agents participating in the group chat. |
| `max_rounds` | `int` | `10` | Maximum number of chat rounds. |
| `admin_name` | `str` | `"Admin"` | Name of the admin user. |
| `group_objective` | `str` | `None` | Objective of the group chat. |
| `selector_agent` | `Agent` | `None` | Agent responsible for selecting the next speaker. |
| `rules` | `str` | `None` | Rules for the group chat. |
| `*args` | | | Variable length argument list. |
| `**kwargs` | | | Arbitrary keyword arguments. |
### Attributes
## Attributes
| Attribute | Type | Description |
|---------------------|----------------------|-------------|
| `agents` | `List[Agent]` | List of agents participating in the group chat. |
| `max_rounds` | `int` | Maximum number of chat rounds. |
| `admin_name` | `str` | Name of the admin user. |
| `group_objective` | `str` | Objective of the group chat. |
| `selector_agent` | `Agent` | Agent responsible for selecting the next speaker. |
| `messages` | `Conversation` | Conversation object for storing the chat messages. |
|-----------|------|-------------|
| state_path | str | Path for saving/loading chat state |
| wrapped_agents | List[AgentWrapper] | List of wrapped agent instances |
| selector_agent | AgentWrapper | Agent responsible for speaker selection |
| state | GroupChatState | Current state of the group chat |
## Methods
### __init__
Initializes the group chat with the given parameters.
**Examples:**
### Core Methods
```python
agents = [Agent(name="Agent 1"), Agent(name="Agent 2")]
group_chat = GroupChat(agents=agents, max_rounds=5, admin_name="GroupAdmin")
```
def run(self, task: str) -> str:
"""Execute the group chat conversation"""
### agent_names
def save_state(self) -> None:
"""Save current state to disk"""
Returns the names of the agents in the group chat.
@classmethod
def load_state(cls, state_path: str) -> 'GroupChat':
"""Load GroupChat from saved state"""
**Returns:**
def get_conversation_summary(self) -> Dict[str, Any]:
"""Return a summary of the conversation"""
| Return Type | Description |
|-------------|-------------|
| `List[str]` | List of agent names. |
**Examples:**
```python
names = group_chat.agent_names
print(names) # Output: ['Agent 1', 'Agent 2']
def export_conversation(self, format: str = "json") -> Union[str, Dict]:
"""Export the conversation in specified format"""
```
### reset
Resets the group chat by clearing the message history.
**Examples:**
### Internal Methods
```python
group_chat.reset()
```
### agent_by_name
def _log_interaction(self, agent_name: str, position: int, input_text: str, output_text: str) -> None:
"""Log a single interaction"""
Finds an agent whose name is contained within the given name string.
def _add_message(self, role: str, content: str) -> None:
"""Add a message to the conversation history"""
**Arguments:**
| Parameter | Type | Description |
|-----------|--------|-------------|
| `name` | `str` | Name string to search for. |
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `Agent` | Agent object with a name contained in the given name string. |
**Raises:**
- `ValueError`: If no agent is found with a name contained in the given name string.
def select_next_speaker(self, last_speaker: AgentWrapper) -> AgentWrapper:
"""Select the next speaker using the selector agent"""
```
**Examples:**
## Usage Examples
### 1. Basic Setup with Two Agents
```python
agent = group_chat.agent_by_name("Agent 1")
print(agent.agent_name) # Output: 'Agent 1'
import os
from swarms import Agent
from swarm_models import OpenAIChat
# Initialize OpenAI
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(openai_api_key=api_key, model_name="gpt-4-mini")
# Create agents
analyst = Agent(
agent_name="Financial-Analyst",
system_prompt="You are a financial analyst...",
llm=model
)
advisor = Agent(
agent_name="Investment-Advisor",
system_prompt="You are an investment advisor...",
llm=model
)
# Create group chat
chat = GroupChat(
name="Investment Team",
agents=[analyst, advisor],
max_rounds=5,
group_objective="Provide investment advice"
)
response = chat.run("What's the best investment strategy for retirement?")
```
### next_agent
Returns the next agent in the list.
**Arguments:**
| Parameter | Type | Description |
|-----------|--------|-------------|
| `agent` | `Agent`| Current agent. |
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `Agent` | Next agent in the list. |
**Examples:**
### 2. Advanced Setup with State Management
```python
current_agent = group_chat.agents[0]
next_agent = group_chat.next_agent(current_agent)
print(next_agent.agent_name) # Output: Name of the next agent
# Create group chat with state persistence
chat = GroupChat(
name="Investment Advisory Team",
description="Expert team for financial planning",
agents=[analyst, advisor, tax_specialist],
max_rounds=10,
admin_name="Senior Advisor",
group_objective="Provide comprehensive financial planning",
state_path="investment_chat_state.json",
rules="1. Always provide sources\n2. Be concise\n3. Focus on practical advice"
)
# Run chat and save state
response = chat.run("Create a retirement plan for a 35-year old")
chat.save_state()
# Load existing chat state
loaded_chat = GroupChat.load_state("investment_chat_state.json")
```
### select_speaker_msg
Returns the message for selecting the next speaker.
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `str` | Prompt message for selecting the next speaker. |
**Examples:**
### 3. Using Custom Callable Agents
```python
message = group_chat.select_speaker_msg()
print(message)
def custom_agent(input_text: str) -> str:
# Custom logic here
return f"Processed: {input_text}"
# Mix of regular agents and callable functions
chat = GroupChat(
name="Hybrid Team",
agents=[analyst, custom_agent],
max_rounds=3
)
```
### select_speaker
Selects the next speaker.
**Arguments:**
| Parameter | Type | Description |
|----------------------|--------|-------------|
| `last_speaker_agent` | `Agent`| Last speaker in the conversation. |
| `selector_agent` | `Agent`| Agent responsible for selecting the next speaker. |
**Returns:**
### 4. Export and Analysis
```python
# Run chat
chat.run("Analyze market conditions")
| Return Type | Description |
|-------------|-------------|
| `Agent` | Next speaker. |
# Get summary
summary = chat.get_conversation_summary()
print(summary)
**Examples:**
# Export in different formats
json_conv = chat.export_conversation(format="json")
text_conv = chat.export_conversation(format="text")
```
### 5. Advanced Configuration with Custom Selector
```python
next_speaker = group_chat.select_speaker(last_speaker_agent, selector_agent)
print(next_speaker.agent_name)
class CustomSelector(Agent):
def run(self, input_text: str) -> str:
# Custom selection logic
return "Financial-Analyst"
chat = GroupChat(
name="Custom Selection Team",
agents=[analyst, advisor],
selector_agent=CustomSelector(
agent_name="Custom-Selector",
system_prompt="Select the next speaker based on expertise",
llm=model
),
max_rounds=5
)
```
### _participant_roles
Returns the roles of the participants.
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `str` | Participant roles. |
**Examples:**
### 6. Debugging Setup
```python
roles = group_chat._participant_roles()
print(roles)
import logging
# Configure logging
logging.basicConfig(level=logging.DEBUG)
chat = GroupChat(
name="Debug Team",
agents=[analyst, advisor],
max_rounds=3,
state_path="debug_chat.json"
)
# Run with detailed logging
try:
response = chat.run("Complex query")
except Exception as e:
logger.error(f"Chat failed: {str(e)}")
# Access last successful state
state = chat.state
```
### __call__
Executes the group chat as a function.
**Arguments:**
| Parameter | Type | Description |
|-----------|--------|-------------|
| `task` | `str` | Task to be performed. |
## Error Handling
**Returns:**
| Return Type | Description |
|-------------|-------------|
| `str` | Reply from the last speaker. |
**Examples:**
The GroupChat class includes comprehensive error handling:
```python
response = group_chat(task="Discuss the project plan")
print(response)
try:
chat = GroupChat(agents=[analyst]) # Will raise ValueError
except ValueError as e:
print("Configuration error:", str(e))
try:
response = chat.run("Query")
except Exception as e:
# Access error state
error_summary = chat.get_conversation_summary()
print("Execution error:", str(e))
print("State at error:", error_summary)
```
### Additional Examples
#### Example 1: Initializing and Running a Group Chat
## Best Practices
```python
agents = [Agent(name="Agent 1"), Agent(name="Agent 2"), Agent(name="Agent 3")]
selector_agent = Agent(name="Selector")
group_chat = GroupChat(agents=agents, selector_agent=selector_agent, max_rounds=3, group_objective="Discuss the quarterly goals.")
1. **State Management**:
- Always specify a `state_path` for important conversations
- Use `save_state()` after critical operations
- Implement regular state backups for long conversations
response = group_chat(task="Let's start the discussion on quarterly goals.")
print(response)
```
2. **Agent Configuration**:
- Provide clear system prompts for each agent
- Use descriptive agent names
- Consider agent expertise when setting the group objective
#### Example 2: Resetting the Group Chat
3. **Performance**:
- Keep `max_rounds` reasonable (5-10 for most cases)
- Use early stopping conditions when possible
- Monitor conversation length and complexity
```python
group_chat.reset()
```
4. **Error Handling**:
- Always wrap chat execution in try-except blocks
- Implement proper logging
- Save states before potentially risky operations
#### Example 3: Selecting the Next Speaker
```python
last_speaker = group_chat.agents[0]
next_speaker = group_chat.select_speaker(last_speaker_agent=last_speaker, selector_agent=selector_agent)
print(next_speaker.agent_name)
```
## Limitations
## Summary
- Agents must either have a `run` method or be callable
- State files can grow large with many interactions
- Selector agent may need optimization for large agent groups
- Real-time streaming not supported in basic configuration
The `GroupChat` class offers a structured approach to managing a group chat involving multiple agents. With functionalities for initializing conversations, selecting speakers, and handling chat rounds, it provides a robust framework for dynamic and interactive discussions. This makes it an essential tool for applications requiring coordinated communication among multiple agents.

@ -31,20 +31,20 @@ agent = Agent(
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=1,
streaming_on=True,
context_length=200000,
return_step_meta=True,
output_type="json", # "json", "dict", "csv" OR "string" soon "yaml" and
streaming_on=False,
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
artifacts_on=True,
artifacts_output_path="roth_ira_report",
artifacts_file_extension=".txt",
max_tokens=8000,
return_history=True,
)
print(
agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria. Create a report on this question."
)
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria. Create a report on this question.",
all_cores=True,
)

@ -0,0 +1,68 @@
import os
from swarms import Agent
from swarm_models import OpenAIChat
from swarms.structs.agents_available import showcase_available_agents
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the Claims Director agent
director_agent = Agent(
agent_name="ClaimsDirector",
agent_description="Oversees and coordinates the medical insurance claims processing workflow",
system_prompt="""You are the Claims Director responsible for managing the medical insurance claims process.
Assign and prioritize tasks between claims processors and auditors. Ensure claims are handled efficiently
and accurately while maintaining compliance with insurance policies and regulations.""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="director_agent.json",
)
# Initialize Claims Processor agent
processor_agent = Agent(
agent_name="ClaimsProcessor",
agent_description="Reviews and processes medical insurance claims, verifying coverage and eligibility",
system_prompt="""Review medical insurance claims for completeness and accuracy. Verify patient eligibility,
coverage details, and process claims according to policy guidelines. Flag any claims requiring special review.""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="processor_agent.json",
)
# Initialize Claims Auditor agent
auditor_agent = Agent(
agent_name="ClaimsAuditor",
agent_description="Audits processed claims for accuracy and compliance with policies and regulations",
system_prompt="""Audit processed insurance claims for accuracy and compliance. Review claim decisions,
identify potential fraud or errors, and ensure all processing follows established guidelines and regulations.""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="auditor_agent.json",
)
# Create a list of agents
agents = [director_agent, processor_agent, auditor_agent]
print(showcase_available_agents(agents=agents))

@ -0,0 +1,56 @@
import os
from dotenv import load_dotenv
from swarm_models import OpenAIChat
from swarms import Agent
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from new_features_examples.async_executor import HighSpeedExecutor
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
# autosave=True,
# dashboard=False,
# verbose=True,
# dynamic_temperature_enabled=True,
# saved_state_path="finance_agent.json",
# user_name="swarms_corp",
# retry_attempts=1,
# context_length=200000,
# return_step_meta=True,
# output_type="json", # "json", "dict", "csv" OR "string" soon "yaml" and
# auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
# # artifacts_on=True,
# artifacts_output_path="roth_ira_report",
# artifacts_file_extension=".txt",
# max_tokens=8000,
# return_history=True,
)
def execute_agent(
task: str = "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria. Create a report on this question.",
):
return agent.run(task)
executor = HighSpeedExecutor()
results = executor.run(execute_agent, 2)
print(results)

@ -0,0 +1,131 @@
import asyncio
import multiprocessing as mp
import time
from functools import partial
from typing import Any, Dict, Union
class HighSpeedExecutor:
def __init__(self, num_processes: int = None):
"""
Initialize the executor with configurable number of processes.
If num_processes is None, it uses CPU count.
"""
self.num_processes = num_processes or mp.cpu_count()
async def _worker(
self,
queue: asyncio.Queue,
func: Any,
*args: Any,
**kwargs: Any,
):
"""Async worker that processes tasks from the queue"""
while True:
try:
# Non-blocking get from queue
await queue.get()
await asyncio.get_event_loop().run_in_executor(
None, partial(func, *args, **kwargs)
)
queue.task_done()
except asyncio.CancelledError:
break
async def _distribute_tasks(
self, num_tasks: int, queue: asyncio.Queue
):
"""Distribute tasks across the queue"""
for i in range(num_tasks):
await queue.put(i)
async def execute_batch(
self,
func: Any,
num_executions: int,
*args: Any,
**kwargs: Any,
) -> Dict[str, Union[int, float]]:
"""
Execute the given function multiple times concurrently.
Args:
func: The function to execute
num_executions: Number of times to execute the function
*args, **kwargs: Arguments to pass to the function
Returns:
A dictionary containing the number of executions, duration, and executions per second.
"""
queue = asyncio.Queue()
# Create worker tasks
workers = [
asyncio.create_task(
self._worker(queue, func, *args, **kwargs)
)
for _ in range(self.num_processes)
]
# Start timing
start_time = time.perf_counter()
# Distribute tasks
await self._distribute_tasks(num_executions, queue)
# Wait for all tasks to complete
await queue.join()
# Cancel workers
for worker in workers:
worker.cancel()
# Wait for all workers to finish
await asyncio.gather(*workers, return_exceptions=True)
end_time = time.perf_counter()
duration = end_time - start_time
return {
"executions": num_executions,
"duration": duration,
"executions_per_second": num_executions / duration,
}
def run(
self,
func: Any,
num_executions: int,
*args: Any,
**kwargs: Any,
):
return asyncio.run(
self.execute_batch(func, num_executions, *args, **kwargs)
)
# def example_function(x: int = 0) -> int:
# """Example function to execute"""
# return x * x
# async def main():
# # Create executor with number of CPU cores
# executor = HighSpeedExecutor()
# # Execute the function 1000 times
# result = await executor.execute_batch(
# example_function, num_executions=1000, x=42
# )
# print(
# f"Completed {result['executions']} executions in {result['duration']:.2f} seconds"
# )
# print(
# f"Rate: {result['executions_per_second']:.2f} executions/second"
# )
# if __name__ == "__main__":
# # Run the async main function
# asyncio.run(main())

@ -0,0 +1,188 @@
import json
import os
from contextlib import suppress
from typing import Any, Callable, Dict, Optional, Type, Union
from dotenv import load_dotenv
from pydantic import BaseModel, Field, ValidationError, create_model
from swarm_models.openai_function_caller import OpenAIFunctionCaller
class DynamicParser:
@staticmethod
def extract_fields(model: Type[BaseModel]) -> Dict[str, Any]:
return {
field_name: (field.annotation, ... if field.is_required() else None)
for field_name, field in model.model_fields.items()
}
@staticmethod
def create_partial_model(model: Type[BaseModel], data: Dict[str, Any]) -> Type[BaseModel]:
fields = {
field_name: (field.annotation, ... if field.is_required() else None)
for field_name, field in model.model_fields.items()
if field_name in data
}
return create_model(f"Partial{model.__name__}", **fields)
@classmethod
def parse(cls, data: Union[str, Dict[str, Any]], model: Type[BaseModel]) -> Optional[BaseModel]:
if isinstance(data, str):
try:
data = json.loads(data)
except json.JSONDecodeError:
return None
# Try full model first
with suppress(ValidationError):
return model.model_validate(data)
# Create and try partial model
partial_model = cls.create_partial_model(model, data)
with suppress(ValidationError):
return partial_model.model_validate(data)
return None
load_dotenv()
# Define the Thoughts schema
class Thoughts(BaseModel):
text: str = Field(..., description="Current thoughts or observations regarding the task.")
reasoning: str = Field(..., description="Logical reasoning behind the thought process.")
plan: str = Field(..., description="A short bulleted list that conveys the immediate and long-term plan.")
criticism: str = Field(..., description="Constructive self-criticism to improve future responses.")
speak: str = Field(..., description="A concise summary of thoughts intended for the user.")
# Define the Command schema
class Command(BaseModel):
name: str = Field(..., description="Command name to execute from the provided list of commands.")
args: Dict[str, Any] = Field(..., description="Arguments required to execute the command.")
# Define the AgentResponse schema
class AgentResponse(BaseModel):
thoughts: Thoughts = Field(..., description="The agent's current thoughts and reasoning.")
command: Command = Field(..., description="The command to execute along with its arguments.")
# Define tool functions
def fluid_api_command(task: str):
"""Execute a fluid API request."""
# response = fluid_api_request(task)
print(response.model_dump_json(indent=4))
return response
def send_tweet_command(text: str):
"""Simulate sending a tweet."""
print(f"Tweet sent: {text}")
return {"status": "success", "message": f"Tweet sent: {text}"}
def do_nothing_command():
"""Do nothing."""
print("Doing nothing...")
return {"status": "success", "message": "No action taken."}
def task_complete_command(reason: str):
"""Mark the task as complete and provide a reason."""
print(f"Task completed: {reason}")
return {"status": "success", "message": f"Task completed: {reason}"}
# Dynamic command execution
def execute_command(name: str, args: Dict[str, Any]):
"""Dynamically execute a command based on its name and arguments."""
command_map: Dict[str, Callable] = {
"fluid_api": lambda **kwargs: fluid_api_command(task=kwargs.get("task")),
"send_tweet": lambda **kwargs: send_tweet_command(text=kwargs.get("text")),
"do_nothing": lambda **kwargs: do_nothing_command(),
"task_complete": lambda **kwargs: task_complete_command(reason=kwargs.get("reason")),
}
if name not in command_map:
raise ValueError(f"Unknown command: {name}")
# Execute the command with the provided arguments
return command_map[name](**args)
def parse_and_execute_command(response: Union[str, Dict[str, Any]], base_model: Type[BaseModel] = AgentResponse) -> Any:
"""Enhanced command parser with flexible input handling"""
parsed = DynamicParser.parse(response, base_model)
if not parsed:
raise ValueError("Failed to parse response")
if hasattr(parsed, 'command'):
command_name = parsed.command.name
command_args = parsed.command.args
return execute_command(command_name, command_args)
return parsed
ainame = "AutoAgent"
userprovided = "assistant"
SYSTEM_PROMPT = f"""
You are {ainame}, an advanced and autonomous {userprovided}.
Your role is to make decisions and complete tasks independently without seeking user assistance. Leverage your strengths as an LLM to solve tasks efficiently, adhering strictly to the commands and resources provided.
### GOALS:
1. {userprovided}
2. Execute tasks with precision and efficiency.
3. Ensure outputs are actionable and aligned with the user's objectives.
4. Continuously optimize task strategies for maximum effectiveness.
5. Maintain reliability and consistency in all responses.
### CONSTRAINTS:
1. Memory limit: ~4000 words for short-term memory. Save essential information to files immediately to avoid loss.
2. Independent decision-making: Do not rely on user assistance.
3. Exclusively use commands in double quotes (e.g., "command name").
4. Use subprocesses for commands that may take longer than a few minutes.
5. Ensure all outputs strictly adhere to the specified JSON response format.
### COMMANDS:
1. Fluid API: "fluid_api", args: "method": "<GET/POST/...>", "url": "<url>", "headers": "<headers>", "body": "<payload>"
18. Send Tweet: "send_tweet", args: "text": "<text>"
19. Do Nothing: "do_nothing", args:
20. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
### RESOURCES:
1. Internet access for real-time information and data gathering.
2. Long-term memory management for storing critical information.
3. Access to GPT-3.5-powered Agents for delegating tasks.
4. File handling capabilities for output storage and retrieval.
### PERFORMANCE EVALUATION:
1. Continuously analyze and reflect on actions to ensure optimal task completion.
2. Self-critique decisions and strategies constructively to identify areas for improvement.
3. Ensure every command serves a clear purpose and minimizes resource usage.
4. Complete tasks in the least number of steps, balancing speed and accuracy.
### RESPONSE FORMAT:
Always respond in a strict JSON format as described below. Ensure your responses can be parsed with Python's `json.loads`:
"""
# Initialize the OpenAIFunctionCaller
model = OpenAIFunctionCaller(
system_prompt=SYSTEM_PROMPT,
max_tokens=4000,
temperature=0.9,
base_model=AgentResponse, # Pass the Pydantic schema as the base model
parallel_tool_calls=False,
openai_api_key=os.getenv("OPENAI_API_KEY")
)
# Example usage
user_input = (
"Analyze the provided Python code for inefficiencies, generate suggestions for improvements, "
"and provide optimized code."
)
response = model.run(user_input)
response = parse_and_execute_command(response)
print(response)

@ -0,0 +1,120 @@
import os
from dotenv import load_dotenv
from swarms import Agent
from swarm_models import OpenAIChat
from swarms.structs.swarm_router import SwarmRouter
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize specialized agents
data_extractor_agent = Agent(
agent_name="Data-Extractor",
system_prompt="You are a data extraction specialist. Extract relevant information from provided content.",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="data_extractor_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
summarizer_agent = Agent(
agent_name="Document-Summarizer",
system_prompt="You are a document summarization specialist. Provide clear and concise summaries.",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="summarizer_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
financial_analyst_agent = Agent(
agent_name="Financial-Analyst",
system_prompt="You are a financial analysis specialist. Analyze financial aspects of content.",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="financial_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
market_analyst_agent = Agent(
agent_name="Market-Analyst",
system_prompt="You are a market analysis specialist. Analyze market-related aspects.",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="market_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
operational_analyst_agent = Agent(
agent_name="Operational-Analyst",
system_prompt="You are an operational analysis specialist. Analyze operational aspects.",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="operational_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
# Initialize the SwarmRouter
router = SwarmRouter(
name="pe-document-analysis-swarm",
description="Analyze documents for private equity due diligence and investment decision-making",
max_loops=1,
agents=[
data_extractor_agent,
summarizer_agent,
financial_analyst_agent,
market_analyst_agent,
operational_analyst_agent,
],
swarm_type="SequentialWorkflow", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
auto_generate_prompts=True,
output_type="all",
)
# Example usage
if __name__ == "__main__":
# Run a comprehensive private equity document analysis task
result = router.run(
"Where is the best place to find template term sheets for series A startups. Provide links and references"
)
print(result)

@ -0,0 +1,96 @@
import os
from swarm_models import OpenAIChat
from swarms import Agent, run_agents_with_tasks_concurrently
# Fetch the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize agents for different roles
delaware_ccorp_agent = Agent(
agent_name="Delaware-CCorp-Hiring-Agent",
system_prompt="""
Create a comprehensive hiring description for a Delaware C Corporation,
including all relevant laws and regulations, such as the Delaware General
Corporation Law (DGCL) and the Delaware Corporate Law. Ensure the description
covers the requirements for hiring employees, contractors, and officers,
including the necessary paperwork, tax obligations, and benefits. Also,
outline the procedures for compliance with Delaware's employment laws,
including anti-discrimination laws, workers' compensation, and unemployment
insurance. Provide guidance on how to navigate the complexities of Delaware's
corporate law and ensure that all hiring practices are in compliance with
state and federal regulations.
""",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
output_type="str",
artifacts_on=True,
artifacts_output_path="delaware_ccorp_hiring_description.md",
artifacts_file_extension=".md",
)
indian_foreign_agent = Agent(
agent_name="Indian-Foreign-Hiring-Agent",
system_prompt="""
Create a comprehensive hiring description for an Indian or foreign country,
including all relevant laws and regulations, such as the Indian Contract Act,
the Indian Labour Laws, and the Foreign Exchange Management Act (FEMA).
Ensure the description covers the requirements for hiring employees,
contractors, and officers, including the necessary paperwork, tax obligations,
and benefits. Also, outline the procedures for compliance with Indian and
foreign employment laws, including anti-discrimination laws, workers'
compensation, and unemployment insurance. Provide guidance on how to navigate
the complexities of Indian and foreign corporate law and ensure that all hiring
practices are in compliance with state and federal regulations. Consider the
implications of hiring foreign nationals and the requirements for obtaining
necessary visas and work permits.
""",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
output_type="str",
artifacts_on=True,
artifacts_output_path="indian_foreign_hiring_description.md",
artifacts_file_extension=".md",
)
# List of agents and corresponding tasks
agents = [delaware_ccorp_agent, indian_foreign_agent]
tasks = [
"""
Create a comprehensive hiring description for an Agent Engineer, including
required skills and responsibilities. Ensure the description covers the
necessary technical expertise, such as proficiency in AI/ML frameworks,
programming languages, and data structures. Outline the key responsibilities,
including designing and developing AI agents, integrating with existing systems,
and ensuring scalability and performance.
""",
"""
Generate a detailed job description for a Prompt Engineer, including
required skills and responsibilities. Ensure the description covers the
necessary technical expertise, such as proficiency in natural language processing,
machine learning, and software development. Outline the key responsibilities,
including designing and optimizing prompts for AI systems, ensuring prompt
quality and consistency, and collaborating with cross-functional teams.
""",
]
# Run agents with tasks concurrently
results = run_agents_with_tasks_concurrently(
agents, tasks, all_cores=True, device="cpu", no_clusterops=True
)
# Print the results
# for result in results:
# print(result)

@ -0,0 +1,54 @@
import pandas as pd
import json
from loguru import logger
def dict_to_dataframe(data: dict) -> pd.DataFrame:
"""
Converts a dictionary into a Pandas DataFrame with formatted values.
Handles non-serializable values gracefully by skipping them.
Args:
data (dict): The dictionary to convert.
Returns:
pd.DataFrame: A DataFrame representation of the dictionary.
"""
formatted_data = {}
for key, value in data.items():
try:
# Attempt to serialize the value
if isinstance(value, list):
# Format list as comma-separated string
formatted_value = ", ".join(
str(item) for item in value
)
elif isinstance(value, dict):
# Format dict as key-value pairs
formatted_value = ", ".join(
f"{k}: {v}" for k, v in value.items()
)
else:
# Convert other serializable types to string
formatted_value = json.dumps(
value
) # Serialize value to string
formatted_data[key] = formatted_value
except (TypeError, ValueError) as e:
# Log and skip non-serializable items
logger.warning(
f"Skipping non-serializable key '{key}': {e}"
)
continue
# Convert the formatted dictionary into a DataFrame
return pd.DataFrame(
list(formatted_data.items()), columns=["Key", "Value"]
)
example = dict_to_dataframe(data={"chicken": "noodle_soup"})
# formatter.print_panel(example)
print(example)

@ -0,0 +1,308 @@
import os
from swarms import Agent
from swarm_models import OpenAIChat
from web3 import Web3
from typing import Dict, Optional, Any
from datetime import datetime
import asyncio
from loguru import logger
from dotenv import load_dotenv
import csv
import requests
import time
BLOCKCHAIN_AGENT_PROMPT = """
You are an expert blockchain and cryptocurrency analyst with deep knowledge of Ethereum markets and DeFi ecosystems.
You have access to real-time ETH price data and transaction information.
For each transaction, analyze:
1. MARKET CONTEXT
- Current ETH price and what this transaction means in USD terms
- How this movement compares to typical market volumes
- Whether this could impact ETH price
2. BEHAVIORAL ANALYSIS
- Whether this appears to be institutional, whale, or protocol movement
- If this fits any known wallet patterns or behaviors
- Signs of smart contract interaction or DeFi activity
3. RISK & IMPLICATIONS
- Potential market impact or price influence
- Signs of potential market manipulation or unusual activity
- Protocol or DeFi risks if applicable
4. STRATEGIC INSIGHTS
- What traders should know about this movement
- Potential chain reactions or follow-up effects
- Market opportunities or risks created
Write naturally but precisely. Focus on actionable insights and important patterns.
Your analysis helps traders and researchers understand significant market movements in real-time."""
class EthereumAnalyzer:
def __init__(self, min_value_eth: float = 100.0):
load_dotenv()
logger.add(
"eth_analysis.log",
rotation="500 MB",
retention="10 days",
level="INFO",
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
)
self.w3 = Web3(
Web3.HTTPProvider(
"https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161"
)
)
if not self.w3.is_connected():
raise ConnectionError(
"Failed to connect to Ethereum network"
)
self.min_value_eth = min_value_eth
self.last_processed_block = self.w3.eth.block_number
self.eth_price = self.get_eth_price()
self.last_price_update = time.time()
# Initialize AI agent
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError(
"OpenAI API key not found in environment variables"
)
model = OpenAIChat(
openai_api_key=api_key,
model_name="gpt-4",
temperature=0.1,
)
self.agent = Agent(
agent_name="Ethereum-Analysis-Agent",
system_prompt=BLOCKCHAIN_AGENT_PROMPT,
llm=model,
max_loops=1,
autosave=True,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="eth_agent.json",
user_name="eth_analyzer",
retry_attempts=1,
context_length=200000,
output_type="string",
streaming_on=False,
)
self.csv_filename = "ethereum_analysis.csv"
self.initialize_csv()
def get_eth_price(self) -> float:
"""Get current ETH price from CoinGecko API."""
try:
response = requests.get(
"https://api.coingecko.com/api/v3/simple/price",
params={"ids": "ethereum", "vs_currencies": "usd"},
)
return float(response.json()["ethereum"]["usd"])
except Exception as e:
logger.error(f"Error fetching ETH price: {str(e)}")
return 0.0
def update_eth_price(self):
"""Update ETH price if more than 5 minutes have passed."""
if time.time() - self.last_price_update > 300: # 5 minutes
self.eth_price = self.get_eth_price()
self.last_price_update = time.time()
logger.info(f"Updated ETH price: ${self.eth_price:,.2f}")
def initialize_csv(self):
"""Initialize CSV file with headers."""
headers = [
"timestamp",
"transaction_hash",
"from_address",
"to_address",
"value_eth",
"value_usd",
"eth_price",
"gas_used",
"gas_price_gwei",
"block_number",
"analysis",
]
if not os.path.exists(self.csv_filename):
with open(self.csv_filename, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(headers)
async def analyze_transaction(
self, tx_hash: str
) -> Optional[Dict[str, Any]]:
"""Analyze a single transaction."""
try:
tx = self.w3.eth.get_transaction(tx_hash)
receipt = self.w3.eth.get_transaction_receipt(tx_hash)
value_eth = float(self.w3.from_wei(tx.value, "ether"))
if value_eth < self.min_value_eth:
return None
block = self.w3.eth.get_block(tx.blockNumber)
# Update ETH price if needed
self.update_eth_price()
value_usd = value_eth * self.eth_price
analysis = {
"timestamp": datetime.fromtimestamp(
block.timestamp
).isoformat(),
"transaction_hash": tx_hash.hex(),
"from_address": tx["from"],
"to_address": tx.to if tx.to else "Contract Creation",
"value_eth": value_eth,
"value_usd": value_usd,
"eth_price": self.eth_price,
"gas_used": receipt.gasUsed,
"gas_price_gwei": float(
self.w3.from_wei(tx.gasPrice, "gwei")
),
"block_number": tx.blockNumber,
}
# Check if it's a contract
if tx.to:
code = self.w3.eth.get_code(tx.to)
analysis["is_contract"] = len(code) > 0
# Get contract events
if analysis["is_contract"]:
analysis["events"] = receipt.logs
return analysis
except Exception as e:
logger.error(
f"Error analyzing transaction {tx_hash}: {str(e)}"
)
return None
def prepare_analysis_prompt(self, tx_data: Dict[str, Any]) -> str:
"""Prepare detailed analysis prompt including price context."""
value_usd = tx_data["value_usd"]
eth_price = tx_data["eth_price"]
prompt = f"""Analyze this Ethereum transaction in current market context:
Transaction Details:
- Value: {tx_data['value_eth']:.2f} ETH (${value_usd:,.2f} at current price)
- Current ETH Price: ${eth_price:,.2f}
- From: {tx_data['from_address']}
- To: {tx_data['to_address']}
- Contract Interaction: {tx_data.get('is_contract', False)}
- Gas Used: {tx_data['gas_used']:,} units
- Gas Price: {tx_data['gas_price_gwei']:.2f} Gwei
- Block: {tx_data['block_number']}
- Timestamp: {tx_data['timestamp']}
{f"Event Count: {len(tx_data['events'])} events" if tx_data.get('events') else "No contract events"}
Consider the transaction's significance given the current ETH price of ${eth_price:,.2f} and total USD value of ${value_usd:,.2f}.
Analyze market impact, patterns, risks, and strategic implications."""
return prompt
def save_to_csv(self, tx_data: Dict[str, Any], ai_analysis: str):
"""Save transaction data and analysis to CSV."""
row = [
tx_data["timestamp"],
tx_data["transaction_hash"],
tx_data["from_address"],
tx_data["to_address"],
tx_data["value_eth"],
tx_data["value_usd"],
tx_data["eth_price"],
tx_data["gas_used"],
tx_data["gas_price_gwei"],
tx_data["block_number"],
ai_analysis.replace("\n", " "),
]
with open(self.csv_filename, "a", newline="") as f:
writer = csv.writer(f)
writer.writerow(row)
async def monitor_transactions(self):
"""Monitor and analyze transactions one at a time."""
logger.info(
f"Starting transaction monitor (minimum value: {self.min_value_eth} ETH)"
)
while True:
try:
current_block = self.w3.eth.block_number
block = self.w3.eth.get_block(
current_block, full_transactions=True
)
for tx in block.transactions:
tx_analysis = await self.analyze_transaction(
tx.hash
)
if tx_analysis:
# Get AI analysis
analysis_prompt = (
self.prepare_analysis_prompt(tx_analysis)
)
ai_analysis = self.agent.run(analysis_prompt)
print(ai_analysis)
# Save to CSV
self.save_to_csv(tx_analysis, ai_analysis)
# Print analysis
print("\n" + "=" * 50)
print("New Transaction Analysis")
print(
f"Hash: {tx_analysis['transaction_hash']}"
)
print(
f"Value: {tx_analysis['value_eth']:.2f} ETH (${tx_analysis['value_usd']:,.2f})"
)
print(
f"Current ETH Price: ${self.eth_price:,.2f}"
)
print("=" * 50)
print(ai_analysis)
print("=" * 50 + "\n")
await asyncio.sleep(1) # Wait for next block
except Exception as e:
logger.error(f"Error in monitoring loop: {str(e)}")
await asyncio.sleep(1)
async def main():
"""Entry point for the analysis system."""
analyzer = EthereumAnalyzer(min_value_eth=100.0)
await analyzer.monitor_transactions()
if __name__ == "__main__":
print("Starting Ethereum Transaction Analyzer...")
print("Saving results to ethereum_analysis.csv")
print("Press Ctrl+C to stop")
try:
asyncio.run(main())
except KeyboardInterrupt:
print("\nStopping analyzer...")

@ -0,0 +1,75 @@
import os
import asyncio
from swarms import Agent
from swarm_models import OpenAIChat
import time
import psutil
from swarms.prompts.finance_agent_sys_prompt import (
FINANCIAL_AGENT_SYS_PROMPT,
)
from dotenv import load_dotenv
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the agent
agent = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model,
max_loops=1,
autosave=True,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="finance_agent.json",
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
return_step_meta=False,
output_type="string",
streaming_on=False,
)
# Function to measure time and memory usage
def measure_time_and_memory(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
memory_usage = psutil.Process().memory_info().rss / 1024**2
print(f"Time taken: {end_time - start_time} seconds")
print(f"Memory used: {memory_usage} MB")
return result
return wrapper
# Function to run the agent asynchronously
@measure_time_and_memory
async def run_agent_async():
await asyncio.gather(
agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
)
)
# Function to run the agent on another thread
@measure_time_and_memory
def run_agent_thread():
asyncio.run(run_agent_async())
# Run the agent asynchronously and on another thread to test the speed
asyncio.run(run_agent_async())
run_agent_thread()

@ -0,0 +1,228 @@
import os
from pathlib import Path
from typing import Optional
from dotenv import load_dotenv
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from loguru import logger
from swarm_models import OpenAIChat
from swarms import Agent, AgentRearrange
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
class LlamaIndexDB:
"""A class to manage document indexing and querying using LlamaIndex.
This class provides functionality to add documents from a directory and query the indexed documents.
Args:
data_dir (str): Directory containing documents to index. Defaults to "docs".
**kwargs: Additional arguments passed to SimpleDirectoryReader and VectorStoreIndex.
SimpleDirectoryReader kwargs:
- filename_as_id (bool): Use filenames as document IDs
- recursive (bool): Recursively read subdirectories
- required_exts (List[str]): Only read files with these extensions
- exclude_hidden (bool): Skip hidden files
VectorStoreIndex kwargs:
- service_context: Custom service context
- embed_model: Custom embedding model
- similarity_top_k (int): Number of similar docs to retrieve
- store_nodes_override (bool): Override node storage
"""
def __init__(self, data_dir: str = "docs", **kwargs) -> None:
"""Initialize the LlamaIndexDB with an empty index.
Args:
data_dir (str): Directory containing documents to index
**kwargs: Additional arguments for SimpleDirectoryReader and VectorStoreIndex
"""
self.data_dir = data_dir
self.index: Optional[VectorStoreIndex] = None
self.reader_kwargs = {
k: v
for k, v in kwargs.items()
if k
in SimpleDirectoryReader.__init__.__code__.co_varnames
}
self.index_kwargs = {
k: v
for k, v in kwargs.items()
if k not in self.reader_kwargs
}
logger.info("Initialized LlamaIndexDB")
data_path = Path(self.data_dir)
if not data_path.exists():
logger.error(f"Directory not found: {self.data_dir}")
raise FileNotFoundError(
f"Directory {self.data_dir} does not exist"
)
try:
documents = SimpleDirectoryReader(
self.data_dir, **self.reader_kwargs
).load_data()
self.index = VectorStoreIndex.from_documents(
documents, **self.index_kwargs
)
logger.success(
f"Successfully indexed documents from {self.data_dir}"
)
except Exception as e:
logger.error(f"Error indexing documents: {str(e)}")
raise
def query(self, query: str, **kwargs) -> str:
"""Query the indexed documents.
Args:
query (str): The query string to search for
**kwargs: Additional arguments passed to the query engine
- similarity_top_k (int): Number of similar documents to retrieve
- streaming (bool): Enable streaming response
- response_mode (str): Response synthesis mode
- max_tokens (int): Maximum tokens in response
Returns:
str: The response from the query engine
Raises:
ValueError: If no documents have been indexed yet
"""
if self.index is None:
logger.error("No documents have been indexed yet")
raise ValueError("Must add documents before querying")
try:
query_engine = self.index.as_query_engine(**kwargs)
response = query_engine.query(query)
print(response)
logger.info(f"Successfully queried: {query}")
return str(response)
except Exception as e:
logger.error(f"Error during query: {str(e)}")
raise
# Initialize specialized medical agents
medical_data_extractor = Agent(
agent_name="Medical-Data-Extractor",
system_prompt="You are a specialized medical data extraction expert, trained in processing and analyzing clinical data, lab results, medical imaging reports, and patient records. Your role is to carefully extract relevant medical information while maintaining strict HIPAA compliance and patient confidentiality. Focus on identifying key clinical indicators, test results, vital signs, medication histories, and relevant patient history. Pay special attention to temporal relationships between symptoms, treatments, and outcomes. Ensure all extracted data maintains proper medical context and terminology.",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="medical_data_extractor.json",
user_name="medical_team",
retry_attempts=1,
context_length=200000,
output_type="string",
)
diagnostic_specialist = Agent(
agent_name="Diagnostic-Specialist",
system_prompt="You are a senior diagnostic physician with extensive experience in differential diagnosis. Your role is to analyze patient symptoms, lab results, and clinical findings to develop comprehensive diagnostic assessments. Consider all presenting symptoms, patient history, risk factors, and test results to formulate possible diagnoses. Prioritize diagnoses based on clinical probability and severity. Always consider both common and rare conditions that match the symptom pattern. Recommend additional tests or imaging when needed for diagnostic clarity. Follow evidence-based diagnostic criteria and current medical guidelines.",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="diagnostic_specialist.json",
user_name="medical_team",
retry_attempts=1,
context_length=200000,
output_type="string",
)
treatment_planner = Agent(
agent_name="Treatment-Planner",
system_prompt="You are an experienced clinical treatment specialist focused on developing comprehensive treatment plans. Your expertise covers both acute and chronic condition management, medication selection, and therapeutic interventions. Consider patient-specific factors including age, comorbidities, allergies, and contraindications when recommending treatments. Incorporate both pharmacological and non-pharmacological interventions. Emphasize evidence-based treatment protocols while considering patient preferences and quality of life. Address potential drug interactions and side effects. Include monitoring parameters and treatment milestones.",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="treatment_planner.json",
user_name="medical_team",
retry_attempts=1,
context_length=200000,
output_type="string",
)
specialist_consultant = Agent(
agent_name="Specialist-Consultant",
system_prompt="You are a medical specialist consultant with expertise across multiple disciplines including cardiology, neurology, endocrinology, and internal medicine. Your role is to provide specialized insight for complex cases requiring deep domain knowledge. Analyze cases from your specialist perspective, considering rare conditions and complex interactions between multiple systems. Provide detailed recommendations for specialized testing, imaging, or interventions within your domain. Highlight potential complications or considerations that may not be immediately apparent to general practitioners.",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="specialist_consultant.json",
user_name="medical_team",
retry_attempts=1,
context_length=200000,
output_type="string",
)
patient_care_coordinator = Agent(
agent_name="Patient-Care-Coordinator",
system_prompt="You are a patient care coordinator specializing in comprehensive healthcare management. Your role is to ensure holistic patient care by coordinating between different medical specialists, considering patient needs, and managing care transitions. Focus on patient education, medication adherence, lifestyle modifications, and follow-up care planning. Consider social determinants of health, patient resources, and access to care. Develop actionable care plans that patients can realistically follow. Coordinate with other healthcare providers to ensure continuity of care and proper implementation of treatment plans.",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="patient_care_coordinator.json",
user_name="medical_team",
retry_attempts=1,
context_length=200000,
output_type="string",
)
# Initialize the SwarmRouter to coordinate the medical agents
router = AgentRearrange(
name="medical-diagnosis-treatment-swarm",
description="Collaborative medical team for comprehensive patient diagnosis and treatment planning",
max_loops=1, # Limit to one iteration through the agent flow
agents=[
medical_data_extractor, # First agent to extract medical data
diagnostic_specialist, # Second agent to analyze and diagnose
treatment_planner, # Third agent to plan treatment
specialist_consultant, # Fourth agent to provide specialist input
patient_care_coordinator, # Final agent to coordinate care plan
],
# Configure the document storage and retrieval system
memory_system=LlamaIndexDB(
data_dir="docs", # Directory containing medical documents
filename_as_id=True, # Use filenames as document identifiers
recursive=True, # Search subdirectories
# required_exts=[".txt", ".pdf", ".docx"], # Supported file types
similarity_top_k=10, # Return top 10 most relevant documents
),
# Define the sequential flow of information between agents
flow=f"{medical_data_extractor.agent_name} -> {diagnostic_specialist.agent_name} -> {treatment_planner.agent_name} -> {specialist_consultant.agent_name} -> {patient_care_coordinator.agent_name}",
)
# Example usage
if __name__ == "__main__":
# Run a comprehensive medical analysis task for patient Lucas Brown
router.run(
"Analyze this Lucas Brown's medical data to provide a diagnosis and treatment plan"
)

@ -0,0 +1,63 @@
import os
import google.generativeai as genai
from loguru import logger
class GeminiModel:
"""
Represents a GeminiModel instance for generating text based on user input.
"""
def __init__(
self,
temperature: float,
top_p: float,
top_k: float,
):
"""
Initializes the GeminiModel by setting up the API key, generation configuration, and starting a chat session.
Raises a KeyError if the GEMINI_API_KEY environment variable is not found.
"""
try:
api_key = os.environ["GEMINI_API_KEY"]
genai.configure(api_key=api_key)
self.generation_config = {
"temperature": 1,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
self.model = genai.GenerativeModel(
model_name="gemini-1.5-pro",
generation_config=self.generation_config,
)
self.chat_session = self.model.start_chat(history=[])
except KeyError as e:
logger.error(f"Environment variable not found: {e}")
raise
def run(self, task: str) -> str:
"""
Sends a message to the chat session and returns the response text.
Raises an Exception if there's an error running the GeminiModel.
Args:
task (str): The input task or message to send to the chat session.
Returns:
str: The response text from the chat session.
"""
try:
response = self.chat_session.send_message(task)
return response.text
except Exception as e:
logger.error(f"Error running GeminiModel: {e}")
raise
# Example usage
if __name__ == "__main__":
gemini_model = GeminiModel()
output = gemini_model.run("INSERT_INPUT_HERE")
print(output)

File diff suppressed because it is too large Load Diff

@ -0,0 +1,420 @@
import os
from typing import List, Dict, Any, Optional, Callable, get_type_hints
from dataclasses import dataclass, field
import json
from datetime import datetime
import inspect
import typing
from typing import Union
from swarms import Agent
from swarm_models import OpenAIChat
@dataclass
class ToolDefinition:
name: str
description: str
parameters: Dict[str, Any]
required_params: List[str]
callable: Optional[Callable] = None
def extract_type_hints(func: Callable) -> Dict[str, Any]:
"""Extract parameter types from function type hints."""
return typing.get_type_hints(func)
def extract_tool_info(func: Callable) -> ToolDefinition:
"""Extract tool information from a callable function."""
# Get function name
name = func.__name__
# Get docstring
description = inspect.getdoc(func) or "No description available"
# Get parameters and their types
signature = inspect.signature(func)
type_hints = extract_type_hints(func)
parameters = {}
required_params = []
for param_name, param in signature.parameters.items():
# Skip self parameter for methods
if param_name == "self":
continue
param_type = type_hints.get(param_name, Any)
# Handle optional parameters
is_optional = (
param.default != inspect.Parameter.empty
or getattr(param_type, "__origin__", None) is Union
and type(None) in param_type.__args__
)
if not is_optional:
required_params.append(param_name)
parameters[param_name] = {
"type": str(param_type),
"default": (
None
if param.default is inspect.Parameter.empty
else param.default
),
"required": not is_optional,
}
return ToolDefinition(
name=name,
description=description,
parameters=parameters,
required_params=required_params,
callable=func,
)
@dataclass
class FunctionSpec:
"""Specification for a callable tool function."""
name: str
description: str
parameters: Dict[
str, dict
] # Contains type and description for each parameter
return_type: str
return_description: str
@dataclass
class ExecutionStep:
"""Represents a single step in the execution plan."""
step_id: int
function_name: str
parameters: Dict[str, Any]
expected_output: str
completed: bool = False
result: Any = None
@dataclass
class ExecutionContext:
"""Maintains state during execution."""
task: str
steps: List[ExecutionStep] = field(default_factory=list)
results: Dict[int, Any] = field(default_factory=dict)
current_step: int = 0
history: List[Dict[str, Any]] = field(default_factory=list)
hints = get_type_hints(func)
class ToolAgent:
def __init__(
self,
functions: List[Callable],
openai_api_key: str,
model_name: str = "gpt-4",
temperature: float = 0.1,
):
self.functions = {func.__name__: func for func in functions}
self.function_specs = self._analyze_functions(functions)
self.model = OpenAIChat(
openai_api_key=openai_api_key,
model_name=model_name,
temperature=temperature,
)
self.system_prompt = self._create_system_prompt()
self.agent = Agent(
agent_name="Tool-Agent",
system_prompt=self.system_prompt,
llm=self.model,
max_loops=1,
verbose=True,
)
def _analyze_functions(
self, functions: List[Callable]
) -> Dict[str, FunctionSpec]:
"""Analyze functions to create detailed specifications."""
specs = {}
for func in functions:
hints = get_type_hints(func)
sig = inspect.signature(func)
doc = inspect.getdoc(func) or ""
# Parse docstring for parameter descriptions
param_descriptions = {}
current_param = None
for line in doc.split("\n"):
if ":param" in line:
param_name = (
line.split(":param")[1].split(":")[0].strip()
)
desc = line.split(":", 2)[-1].strip()
param_descriptions[param_name] = desc
elif ":return:" in line:
return_desc = line.split(":return:")[1].strip()
# Build parameter specifications
parameters = {}
for name, param in sig.parameters.items():
param_type = hints.get(name, Any)
parameters[name] = {
"type": str(param_type),
"type_class": param_type,
"description": param_descriptions.get(name, ""),
"required": param.default == param.empty,
}
specs[func.__name__] = FunctionSpec(
name=func.__name__,
description=doc.split("\n")[0],
parameters=parameters,
return_type=str(hints.get("return", Any)),
return_description=(
return_desc if "return_desc" in locals() else ""
),
)
return specs
def _create_system_prompt(self) -> str:
"""Create system prompt with detailed function specifications."""
functions_desc = []
for spec in self.function_specs.values():
params_desc = []
for name, details in spec.parameters.items():
params_desc.append(
f" - {name}: {details['type']} - {details['description']}"
)
functions_desc.append(
f"""
Function: {spec.name}
Description: {spec.description}
Parameters:
{chr(10).join(params_desc)}
Returns: {spec.return_type} - {spec.return_description}
"""
)
return f"""You are an AI agent that creates and executes plans using available functions.
Available Functions:
{chr(10).join(functions_desc)}
You must respond in two formats depending on the phase:
1. Planning Phase:
{{
"phase": "planning",
"plan": {{
"description": "Overall plan description",
"steps": [
{{
"step_id": 1,
"function": "function_name",
"parameters": {{
"param1": "value1",
"param2": "value2"
}},
"purpose": "Why this step is needed"
}}
]
}}
}}
2. Execution Phase:
{{
"phase": "execution",
"analysis": "Analysis of current result",
"next_action": {{
"type": "continue|request_input|complete",
"reason": "Why this action was chosen",
"needed_input": {{}} # If requesting input
}}
}}
Always:
- Use exact function names
- Ensure parameter types match specifications
- Provide clear reasoning for each decision
"""
def _execute_function(
self, spec: FunctionSpec, parameters: Dict[str, Any]
) -> Any:
"""Execute a function with type checking."""
converted_params = {}
for name, value in parameters.items():
param_spec = spec.parameters[name]
try:
# Convert value to required type
param_type = param_spec["type_class"]
if param_type in (int, float, str, bool):
converted_params[name] = param_type(value)
else:
converted_params[name] = value
except (ValueError, TypeError) as e:
raise ValueError(
f"Parameter '{name}' conversion failed: {str(e)}"
)
return self.functions[spec.name](**converted_params)
def run(self, task: str) -> Dict[str, Any]:
"""Execute task with planning and step-by-step execution."""
context = ExecutionContext(task=task)
execution_log = {
"task": task,
"start_time": datetime.utcnow().isoformat(),
"steps": [],
"final_result": None,
}
try:
# Planning phase
plan_prompt = f"Create a plan to: {task}"
plan_response = self.agent.run(plan_prompt)
plan_data = json.loads(
plan_response.replace("System:", "").strip()
)
# Convert plan to execution steps
for step in plan_data["plan"]["steps"]:
context.steps.append(
ExecutionStep(
step_id=step["step_id"],
function_name=step["function"],
parameters=step["parameters"],
expected_output=step["purpose"],
)
)
# Execution phase
while context.current_step < len(context.steps):
step = context.steps[context.current_step]
print(
f"\nExecuting step {step.step_id}: {step.function_name}"
)
try:
# Execute function
spec = self.function_specs[step.function_name]
result = self._execute_function(
spec, step.parameters
)
context.results[step.step_id] = result
step.completed = True
step.result = result
# Get agent's analysis
analysis_prompt = f"""
Step {step.step_id} completed:
Function: {step.function_name}
Result: {json.dumps(result)}
Remaining steps: {len(context.steps) - context.current_step - 1}
Analyze the result and decide next action.
"""
analysis_response = self.agent.run(
analysis_prompt
)
analysis_data = json.loads(
analysis_response.replace(
"System:", ""
).strip()
)
execution_log["steps"].append(
{
"step_id": step.step_id,
"function": step.function_name,
"parameters": step.parameters,
"result": result,
"analysis": analysis_data,
}
)
if (
analysis_data["next_action"]["type"]
== "complete"
):
if (
context.current_step
< len(context.steps) - 1
):
continue
break
context.current_step += 1
except Exception as e:
print(f"Error in step {step.step_id}: {str(e)}")
execution_log["steps"].append(
{
"step_id": step.step_id,
"function": step.function_name,
"parameters": step.parameters,
"error": str(e),
}
)
raise
# Final analysis
final_prompt = f"""
Task completed. Results:
{json.dumps(context.results, indent=2)}
Provide final analysis and recommendations.
"""
final_analysis = self.agent.run(final_prompt)
execution_log["final_result"] = {
"success": True,
"results": context.results,
"analysis": json.loads(
final_analysis.replace("System:", "").strip()
),
}
except Exception as e:
execution_log["final_result"] = {
"success": False,
"error": str(e),
}
execution_log["end_time"] = datetime.utcnow().isoformat()
return execution_log
def calculate_investment_return(
principal: float, rate: float, years: int
) -> float:
"""Calculate investment return with compound interest.
:param principal: Initial investment amount in dollars
:param rate: Annual interest rate as decimal (e.g., 0.07 for 7%)
:param years: Number of years to invest
:return: Final investment value
"""
return principal * (1 + rate) ** years
agent = ToolAgent(
functions=[calculate_investment_return],
openai_api_key=os.getenv("OPENAI_API_KEY"),
)
result = agent.run(
"Calculate returns for $10000 invested at 7% for 10 years"
)

@ -0,0 +1,113 @@
import os
from swarms import Agent
from swarm_models import OpenAIChat
from dotenv import load_dotenv
# Custom system prompt for VC legal document generation
VC_LEGAL_AGENT_PROMPT = """You are a specialized legal document assistant focusing on venture capital documentation.
Your role is to help draft preliminary versions of common VC legal documents while adhering to these guidelines:
1. Always include standard legal disclaimers
2. Follow standard VC document structures
3. Flag areas that need attorney review
4. Request necessary information for document completion
5. Maintain consistency across related documents
6. Output <DONE> only when document is complete and verified
Remember: All output should be marked as 'DRAFT' and require professional legal review."""
def create_vc_legal_agent():
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
# Configure the model with appropriate parameters for legal work
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize the persistent agent
agent = Agent(
agent_name="VC-Legal-Document-Agent",
system_prompt=VC_LEGAL_AGENT_PROMPT,
llm=model,
max_loops="auto", # Allows multiple iterations until completion
stopping_token="<DONE>", # Agent will continue until this token is output
autosave=True,
dashboard=True, # Enable dashboard for monitoring
verbose=True,
dynamic_temperature_enabled=False, # Disable for consistency in legal documents
saved_state_path="vc_legal_agent_state.json",
user_name="legal_corp",
retry_attempts=3,
context_length=200000,
return_step_meta=True,
output_type="string",
streaming_on=False,
)
return agent
def generate_legal_document(agent, document_type, parameters):
"""
Generate a legal document with multiple refinement iterations
Args:
agent: The initialized VC legal agent
document_type: Type of document to generate (e.g., "term_sheet", "investment_agreement")
parameters: Dict containing necessary parameters for the document
Returns:
str: The generated document content
"""
prompt = f"""
Generate a {document_type} with the following parameters:
{parameters}
Please follow these steps:
1. Create initial draft
2. Review for completeness
3. Add necessary legal disclaimers
4. Verify all required sections
5. Output <DONE> when complete
Include [REQUIRES LEGAL REVIEW] tags for sections needing attorney attention.
"""
return agent.run(prompt)
# Example usage
if __name__ == "__main__":
# Initialize the agent
legal_agent = create_vc_legal_agent()
# Example parameters for a term sheet
parameters = {
"company_name": "TechStartup Inc.",
"investment_amount": "$5,000,000",
"valuation": "$20,000,000",
"investor_rights": [
"Board seat",
"Pro-rata rights",
"Information rights",
],
"type_of_security": "Series A Preferred Stock",
}
# Generate a term sheet
document = generate_legal_document(
legal_agent, "term_sheet", parameters
)
# Save the generated document
with open("generated_term_sheet_draft.md", "w") as f:
f.write(document)

@ -0,0 +1,319 @@
"""
Zoe - Real Estate Agent
"""
from typing import Optional, Dict, Any, List
from dataclasses import dataclass
from datetime import datetime
import os
import json
import requests
from loguru import logger
from swarms import Agent
from swarm_models import OpenAIChat
from dotenv import load_dotenv
from enum import Enum
# Configure loguru logger
logger.add(
"logs/real_estate_agent_{time}.log",
rotation="500 MB",
retention="10 days",
level="INFO",
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
)
class PropertyType(str, Enum):
"""Enum for property types"""
OFFICE = "office"
RETAIL = "retail"
INDUSTRIAL = "industrial"
MIXED_USE = "mixed-use"
LAND = "land"
@dataclass
class PropertyListing:
"""Data class for commercial property listings"""
property_id: str
address: str
city: str
state: str
zip_code: str
price: float
square_footage: float
property_type: PropertyType
zoning: str
listing_date: datetime
lat: float
lng: float
description: Optional[str] = None
features: Optional[List[str]] = None
images: Optional[List[str]] = None
class PropertyRadarAPI:
"""Client for PropertyRadar API integration"""
def __init__(self, api_key: str):
"""Initialize PropertyRadar API client
Args:
api_key (str): PropertyRadar API key
"""
self.api_key = api_key
self.base_url = "https://api.propertyradar.com/v1"
self.session = requests.Session()
self.session.headers.update(
{
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
)
def search_properties(
self,
max_price: float = 10_000_000,
property_types: List[PropertyType] = None,
location: Dict[str, Any] = None,
min_sqft: Optional[float] = None,
max_sqft: Optional[float] = None,
page: int = 1,
limit: int = 20,
) -> List[PropertyListing]:
"""
Search for commercial properties using PropertyRadar API
Args:
max_price (float): Maximum property price
property_types (List[PropertyType]): Types of properties to search for
location (Dict[str, Any]): Location criteria (city, county, or coordinates)
min_sqft (Optional[float]): Minimum square footage
max_sqft (Optional[float]): Maximum square footage
page (int): Page number for pagination
limit (int): Number of results per page
Returns:
List[PropertyListing]: List of matching properties
"""
try:
# Build the query parameters
params = {
"price_max": max_price,
"property_types": (
[pt.value for pt in property_types]
if property_types
else None
),
"page": page,
"limit": limit,
"for_sale": True,
"state": "FL", # Florida only
"commercial_property": True,
}
# Add location parameters
if location:
params.update(location)
# Add square footage filters
if min_sqft:
params["square_feet_min"] = min_sqft
if max_sqft:
params["square_feet_max"] = max_sqft
# Make the API request
response = self.session.get(
f"{self.base_url}/properties",
params={
k: v for k, v in params.items() if v is not None
},
)
response.raise_for_status()
# Parse the response
properties_data = response.json()
# Convert to PropertyListing objects
return [
PropertyListing(
property_id=prop["id"],
address=prop["address"],
city=prop["city"],
state=prop["state"],
zip_code=prop["zip_code"],
price=float(prop["price"]),
square_footage=float(prop["square_feet"]),
property_type=PropertyType(prop["property_type"]),
zoning=prop["zoning"],
listing_date=datetime.fromisoformat(
prop["list_date"]
),
lat=float(prop["latitude"]),
lng=float(prop["longitude"]),
description=prop.get("description"),
features=prop.get("features", []),
images=prop.get("images", []),
)
for prop in properties_data["results"]
]
except requests.RequestException as e:
logger.error(f"Error fetching properties: {str(e)}")
raise
class CommercialRealEstateAgent:
"""Agent for searching and analyzing commercial real estate properties"""
def __init__(
self,
openai_api_key: str,
propertyradar_api_key: str,
model_name: str = "gpt-4",
temperature: float = 0.1,
saved_state_path: Optional[str] = None,
):
"""Initialize the real estate agent
Args:
openai_api_key (str): OpenAI API key
propertyradar_api_key (str): PropertyRadar API key
model_name (str): Name of the LLM model to use
temperature (float): Temperature setting for the LLM
saved_state_path (Optional[str]): Path to save agent state
"""
self.property_api = PropertyRadarAPI(propertyradar_api_key)
# Initialize OpenAI model
self.model = OpenAIChat(
openai_api_key=openai_api_key,
model_name=model_name,
temperature=temperature,
)
# Initialize the agent
self.agent = Agent(
agent_name="Commercial-Real-Estate-Agent",
system_prompt=self._get_system_prompt(),
llm=self.model,
max_loops=1,
autosave=True,
dashboard=False,
verbose=True,
saved_state_path=saved_state_path,
context_length=200000,
streaming_on=False,
)
logger.info(
"Commercial Real Estate Agent initialized successfully"
)
def _get_system_prompt(self) -> str:
"""Get the system prompt for the agent"""
return """You are a specialized commercial real estate agent assistant focused on Central Florida properties.
Your primary responsibilities are:
1. Search for commercial properties under $10 million
2. Focus on properties zoned for commercial use
3. Provide detailed analysis of property features, location benefits, and potential ROI
4. Consider local market conditions and growth potential
5. Verify zoning compliance and restrictions
When analyzing properties, consider:
- Current market valuations
- Local business development plans
- Traffic patterns and accessibility
- Nearby amenities and businesses
- Future development potential"""
def search_properties(
self,
max_price: float = 10_000_000,
property_types: List[PropertyType] = None,
location: Dict[str, Any] = None,
min_sqft: Optional[float] = None,
max_sqft: Optional[float] = None,
) -> List[Dict[str, Any]]:
"""
Search for properties and provide analysis
Args:
max_price (float): Maximum property price
property_types (List[PropertyType]): Types of properties to search
location (Dict[str, Any]): Location criteria
min_sqft (Optional[float]): Minimum square footage
max_sqft (Optional[float]): Maximum square footage
Returns:
List[Dict[str, Any]]: List of properties with analysis
"""
try:
# Search for properties
properties = self.property_api.search_properties(
max_price=max_price,
property_types=property_types,
location=location,
min_sqft=min_sqft,
max_sqft=max_sqft,
)
# Analyze each property
analyzed_properties = []
for prop in properties:
analysis = self.agent.run(
f"Analyze this commercial property:\n"
f"Address: {prop.address}, {prop.city}, FL {prop.zip_code}\n"
f"Price: ${prop.price:,.2f}\n"
f"Square Footage: {prop.square_footage:,.0f}\n"
f"Property Type: {prop.property_type.value}\n"
f"Zoning: {prop.zoning}\n"
f"Description: {prop.description or 'Not provided'}"
)
analyzed_properties.append(
{"property": prop.__dict__, "analysis": analysis}
)
logger.info(
f"Successfully analyzed {len(analyzed_properties)} properties"
)
return analyzed_properties
except Exception as e:
logger.error(
f"Error in property search and analysis: {str(e)}"
)
raise
def main():
"""Main function to demonstrate usage"""
load_dotenv()
# Initialize the agent
agent = CommercialRealEstateAgent(
openai_api_key=os.getenv("OPENAI_API_KEY"),
propertyradar_api_key=os.getenv("PROPERTYRADAR_API_KEY"),
saved_state_path="real_estate_agent_state.json",
)
# Example search
results = agent.search_properties(
max_price=5_000_000,
property_types=[PropertyType.RETAIL, PropertyType.OFFICE],
location={"city": "Orlando", "radius_miles": 25},
min_sqft=2000,
)
# Save results
with open("search_results.json", "w") as f:
json.dump(results, f, default=str, indent=2)
if __name__ == "__main__":
main()

@ -0,0 +1,121 @@
import os
from swarms import Agent, AgentRearrange
from swarm_models import OpenAIChat
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
)
# Initialize the boss agent (Director)
boss_agent = Agent(
agent_name="BossAgent",
system_prompt="""
You are the BossAgent responsible for managing and overseeing a swarm of agents analyzing company expenses.
Your job is to dynamically assign tasks, prioritize their execution, and ensure that all agents collaborate efficiently.
After receiving a report on the company's expenses, you will break down the work into smaller tasks,
assigning specific tasks to each agent, such as detecting recurring high costs, categorizing expenditures,
and identifying unnecessary transactions. Ensure the results are communicated back in a structured way
so the finance team can take actionable steps to cut off unproductive spending. You also monitor and
dynamically adapt the swarm to optimize their performance. Finally, you summarize their findings
into a coherent report.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="boss_agent.json",
)
# Initialize worker 1: Expense Analyzer
worker1 = Agent(
agent_name="ExpenseAnalyzer",
system_prompt="""
Your task is to carefully analyze the company's expense data provided to you.
You will focus on identifying high-cost recurring transactions, categorizing expenditures
(e.g., marketing, operations, utilities, etc.), and flagging areas where there seems to be excessive spending.
You will provide a detailed breakdown of each category, along with specific recommendations for cost-cutting.
Pay close attention to monthly recurring subscriptions, office supplies, and non-essential expenditures.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker1.json",
)
# Initialize worker 2: Summary Generator
worker2 = Agent(
agent_name="SummaryGenerator",
system_prompt="""
After receiving the detailed breakdown from the ExpenseAnalyzer,
your task is to create a concise summary of the findings. You will focus on the most actionable insights,
such as highlighting the specific transactions that can be immediately cut off and summarizing the areas
where the company is overspending. Your summary will be used by the BossAgent to generate the final report.
Be clear and to the point, emphasizing the urgency of cutting unnecessary expenses.
""",
llm=model,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker2.json",
)
# Swarm-Level Prompt (Collaboration Prompt)
swarm_prompt = """
As a swarm, your collective goal is to analyze the company's expenses and identify transactions that should be cut off.
You will work collaboratively to break down the entire process of expense analysis into manageable steps.
The BossAgent will direct the flow and assign tasks dynamically to the agents. The ExpenseAnalyzer will first
focus on breaking down the expense report, identifying high-cost recurring transactions, categorizing them,
and providing recommendations for potential cost reduction. After the analysis, the SummaryGenerator will then
consolidate all the findings into an actionable summary that the finance team can use to immediately cut off unnecessary expenses.
Together, your collaboration is essential to streamlining and improving the companys financial health.
"""
# Create a list of agents
agents = [boss_agent, worker1, worker2]
# Define the flow pattern for the swarm
flow = "BossAgent -> ExpenseAnalyzer -> SummaryGenerator"
# Using AgentRearrange class to manage the swarm
agent_system = AgentRearrange(
name="pe-swarm",
description="ss",
agents=agents,
flow=flow,
return_json=False,
output_type="final",
max_loops=1,
# docs=["SECURITY.md"],
)
# Input task for the swarm
task = f"""
{swarm_prompt}
The company has been facing a rising number of unnecessary expenses, and the finance team needs a detailed
analysis of recent transactions to identify which expenses can be cut off to improve profitability.
Analyze the provided transaction data and create a detailed report on cost-cutting opportunities,
focusing on recurring transactions and non-essential expenditures.
"""
# Run the swarm system with the task
output = agent_system.run(task)
print(output)

@ -0,0 +1,118 @@
import os
from dotenv import load_dotenv
from swarms import Agent, SequentialWorkflow
from swarm_models import OpenAIChat
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize specialized agents
data_extractor_agent = Agent(
agent_name="Data-Extractor",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="data_extractor_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
summarizer_agent = Agent(
agent_name="Document-Summarizer",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="summarizer_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
financial_analyst_agent = Agent(
agent_name="Financial-Analyst",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="financial_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
market_analyst_agent = Agent(
agent_name="Market-Analyst",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="market_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
operational_analyst_agent = Agent(
agent_name="Operational-Analyst",
system_prompt=None,
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="operational_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
# Initialize the SwarmRouter
router = SequentialWorkflow(
name="pe-document-analysis-swarm",
description="Analyze documents for private equity due diligence and investment decision-making",
max_loops=1,
agents=[
data_extractor_agent,
summarizer_agent,
financial_analyst_agent,
market_analyst_agent,
operational_analyst_agent,
],
output_type="all",
)
# Example usage
if __name__ == "__main__":
# Run a comprehensive private equity document analysis task
result = router.run(
"Where is the best place to find template term sheets for series A startups. Provide links and references",
img=None,
)
print(result)

@ -0,0 +1,143 @@
import os
from dotenv import load_dotenv
from swarms import Agent, SequentialWorkflow
from swarm_models import OpenAIChat
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("GROQ_API_KEY")
# Model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
# Initialize specialized agents
data_extractor_agent = Agent(
agent_name="Data-Extractor",
system_prompt="""You are a data extraction specialist. Your role is to:
1. Extract key information, data points, and metrics from documents
2. Identify and pull out important facts, figures, and statistics
3. Structure extracted data in a clear, organized format
4. Flag any inconsistencies or missing data
5. Ensure accuracy in data extraction while maintaining context""",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="data_extractor_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
summarizer_agent = Agent(
agent_name="Document-Summarizer",
system_prompt="""You are a document summarization expert. Your role is to:
1. Create concise, comprehensive summaries of documents
2. Highlight key points and main takeaways
3. Maintain the essential meaning while reducing length
4. Structure summaries in a logical, readable format
5. Identify and emphasize critical insights""",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="summarizer_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
financial_analyst_agent = Agent(
agent_name="Financial-Analyst",
system_prompt="""You are a financial analysis expert. Your role is to:
1. Analyze financial statements and metrics
2. Evaluate company valuations and financial projections
3. Assess financial risks and opportunities
4. Provide insights on financial performance and health
5. Make recommendations based on financial analysis""",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="financial_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
market_analyst_agent = Agent(
agent_name="Market-Analyst",
system_prompt="""You are a market analysis expert. Your role is to:
1. Analyze market trends and dynamics
2. Evaluate competitive landscape and market positioning
3. Identify market opportunities and threats
4. Assess market size and growth potential
5. Provide strategic market insights and recommendations""",
llm=model,
max_loops=1,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="market_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
operational_analyst_agent = Agent(
agent_name="Operational-Analyst",
system_prompt="""You are an operational analysis expert. Your role is to:
1. Analyze business operations and processes
2. Evaluate operational efficiency and effectiveness
3. Identify operational risks and opportunities
4. Assess scalability and growth potential
5. Provide recommendations for operational improvements""",
llm=model,
max_loops=2,
autosave=True,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="operational_analyst_agent.json",
user_name="pe_firm",
retry_attempts=1,
context_length=200000,
output_type="string",
)
# Initialize the SwarmRouter
router = SequentialWorkflow(
name="pe-document-analysis-swarm",
description="Analyze documents for private equity due diligence and investment decision-making",
max_loops=1,
agents=[
data_extractor_agent,
summarizer_agent,
financial_analyst_agent,
market_analyst_agent,
operational_analyst_agent,
],
output_type="all",
)
# Example usage
if __name__ == "__main__":
# Run a comprehensive private equity document analysis task
result = router.run(
"Where is the best place to find template term sheets for series A startups. Provide links and references",
no_use_clusterops=True,
)
print(result)

@ -0,0 +1,238 @@
"""
Todo
- You send structured data to the swarm through the users form they make
- then connect rag for every agent using llama index to remember all the students data
- structured outputs
"""
import os
from dotenv import load_dotenv
from swarms import Agent, AgentRearrange
from swarm_models import OpenAIChat, OpenAIFunctionCaller
from pydantic import BaseModel
from typing import List
class CollegeLog(BaseModel):
college_name: str
college_description: str
college_admission_requirements: str
class CollegesRecommendation(BaseModel):
colleges: List[CollegeLog]
reasoning: str
load_dotenv()
# Get the API key from environment variable
api_key = os.getenv("GROQ_API_KEY")
# Initialize the model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
FINAL_AGENT_PROMPT = """
You are a college selection final decision maker. Your role is to:
1. Synthesize all previous analyses and discussions
2. Weigh competing factors and trade-offs
3. Create a final ranked list of recommended colleges
4. Provide clear rationale for each recommendation
5. Include specific action items for each selected school
6. Outline next steps in the application process
Focus on creating actionable, well-reasoned final recommendations that
balance all relevant factors and stakeholder input.
"""
function_caller = OpenAIFunctionCaller(
system_prompt=FINAL_AGENT_PROMPT,
openai_api_key=os.getenv("OPENAI_API_KEY"),
base_model=CollegesRecommendation,
parallel_tool_calls=True,
)
# Student Profile Analyzer Agent
profile_analyzer_agent = Agent(
agent_name="Student-Profile-Analyzer",
system_prompt="""You are an expert student profile analyzer. Your role is to:
1. Analyze academic performance, test scores, and extracurricular activities
2. Identify student's strengths, weaknesses, and unique qualities
3. Evaluate personal statements and essays
4. Assess leadership experiences and community involvement
5. Determine student's preferences for college environment, location, and programs
6. Create a comprehensive student profile summary
Always consider both quantitative metrics (GPA, test scores) and qualitative aspects
(personal growth, challenges overcome, unique perspectives).""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="profile_analyzer_agent.json",
user_name="student",
context_length=200000,
output_type="string",
)
# College Research Agent
college_research_agent = Agent(
agent_name="College-Research-Specialist",
system_prompt="""You are a college research specialist. Your role is to:
1. Maintain updated knowledge of college admission requirements
2. Research academic programs, campus culture, and student life
3. Analyze admission statistics and trends
4. Evaluate college-specific opportunities and resources
5. Consider financial aid availability and scholarship opportunities
6. Track historical admission data and acceptance rates
Focus on providing accurate, comprehensive information about each institution
while considering both academic and cultural fit factors.""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="college_research_agent.json",
user_name="researcher",
context_length=200000,
output_type="string",
)
# College Match Agent
college_match_agent = Agent(
agent_name="College-Match-Maker",
system_prompt="""You are a college matching specialist. Your role is to:
1. Compare student profiles with college requirements
2. Evaluate fit based on academic, social, and cultural factors
3. Consider geographic preferences and constraints
4. Assess financial fit and aid opportunities
5. Create tiered lists of reach, target, and safety schools
6. Explain the reasoning behind each match
Always provide a balanced list with realistic expectations while
considering both student preferences and admission probability.""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="college_match_agent.json",
user_name="matcher",
context_length=200000,
output_type="string",
)
# Debate Moderator Agent
debate_moderator_agent = Agent(
agent_name="Debate-Moderator",
system_prompt="""You are a college selection debate moderator. Your role is to:
1. Facilitate discussions between different perspectives
2. Ensure all relevant factors are considered
3. Challenge assumptions and biases
4. Synthesize different viewpoints
5. Guide the group toward consensus
6. Document key points of agreement and disagreement
Maintain objectivity while ensuring all important factors are thoroughly discussed
and evaluated.""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="debate_moderator_agent.json",
user_name="moderator",
context_length=200000,
output_type="string",
)
# Critique Agent
critique_agent = Agent(
agent_name="College-Selection-Critic",
system_prompt="""You are a college selection critic. Your role is to:
1. Evaluate the strength of college matches
2. Identify potential overlooked factors
3. Challenge assumptions in the selection process
4. Assess risks and potential drawbacks
5. Provide constructive feedback on selections
6. Suggest alternative options when appropriate
Focus on constructive criticism that helps improve the final college list
while maintaining realistic expectations.""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="critique_agent.json",
user_name="critic",
context_length=200000,
output_type="string",
)
# Final Decision Agent
final_decision_agent = Agent(
agent_name="Final-Decision-Maker",
system_prompt="""
You are a college selection final decision maker. Your role is to:
1. Synthesize all previous analyses and discussions
2. Weigh competing factors and trade-offs
3. Create a final ranked list of recommended colleges
4. Provide clear rationale for each recommendation
5. Include specific action items for each selected school
6. Outline next steps in the application process
Focus on creating actionable, well-reasoned final recommendations that
balance all relevant factors and stakeholder input.
""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="final_decision_agent.json",
user_name="decision_maker",
context_length=200000,
output_type="string",
)
# Initialize the Sequential Workflow
college_selection_workflow = AgentRearrange(
name="college-selection-swarm",
description="Comprehensive college selection and analysis system",
max_loops=1,
agents=[
profile_analyzer_agent,
college_research_agent,
college_match_agent,
debate_moderator_agent,
critique_agent,
final_decision_agent,
],
output_type="all",
flow=f"{profile_analyzer_agent.name} -> {college_research_agent.name} -> {college_match_agent.name} -> {debate_moderator_agent.name} -> {critique_agent.name} -> {final_decision_agent.name}",
)
# Example usage
if __name__ == "__main__":
# Example student profile input
student_profile = """
Student Profile:
- GPA: 3.8
- SAT: 1450
- Interests: Computer Science, Robotics
- Location Preference: East Coast
- Extracurriculars: Robotics Club President, Math Team
- Budget: Need financial aid
- Preferred Environment: Medium-sized urban campus
"""
# Run the comprehensive college selection analysis
result = college_selection_workflow.run(
student_profile,
no_use_clusterops=True,
)
print(result)

@ -0,0 +1,64 @@
"""
Todo
- You send structured data to the swarm through the users form they make
- then connect rag for every agent using llama index to remember all the students data
- structured outputs
"""
import os
from dotenv import load_dotenv
from swarm_models import OpenAIChat, OpenAIFunctionCaller
from pydantic import BaseModel
from typing import List
class CollegeLog(BaseModel):
college_name: str
college_description: str
college_admission_requirements: str
class CollegesRecommendation(BaseModel):
colleges: List[CollegeLog]
reasoning: str
load_dotenv()
# Get the API key from environment variable
api_key = os.getenv("GROQ_API_KEY")
# Initialize the model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
function_caller = OpenAIFunctionCaller(
system_prompt="""You are a college selection final decision maker. Your role is to:
- Balance all relevant factors and stakeholder input.
- Only return the output in the schema format.
""",
openai_api_key=os.getenv("OPENAI_API_KEY"),
base_model=CollegesRecommendation,
# parallel_tool_calls=True,
)
print(
function_caller.run(
"""
Student Profile: Kye Gomez
- GPA: 3.8
- SAT: 1450
- Interests: Computer Science, Robotics
- Location Preference: East Coast
- Extracurriculars: Robotics Club President, Math Team
- Budget: Need financial aid
- Preferred Environment: Medium-sized urban campus
"""
)
)

@ -0,0 +1,116 @@
from typing import Optional
from pathlib import Path
from loguru import logger
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
class LlamaIndexDB:
"""A class to manage document indexing and querying using LlamaIndex.
This class provides functionality to add documents from a directory and query the indexed documents.
Args:
data_dir (str): Directory containing documents to index. Defaults to "docs".
**kwargs: Additional arguments passed to SimpleDirectoryReader and VectorStoreIndex.
SimpleDirectoryReader kwargs:
- filename_as_id (bool): Use filenames as document IDs
- recursive (bool): Recursively read subdirectories
- required_exts (List[str]): Only read files with these extensions
- exclude_hidden (bool): Skip hidden files
VectorStoreIndex kwargs:
- service_context: Custom service context
- embed_model: Custom embedding model
- similarity_top_k (int): Number of similar docs to retrieve
- store_nodes_override (bool): Override node storage
"""
def __init__(self, data_dir: str = "docs", **kwargs) -> None:
"""Initialize the LlamaIndexDB with an empty index.
Args:
data_dir (str): Directory containing documents to index
**kwargs: Additional arguments for SimpleDirectoryReader and VectorStoreIndex
"""
self.data_dir = data_dir
self.index: Optional[VectorStoreIndex] = None
self.reader_kwargs = {
k: v
for k, v in kwargs.items()
if k
in SimpleDirectoryReader.__init__.__code__.co_varnames
}
self.index_kwargs = {
k: v
for k, v in kwargs.items()
if k not in self.reader_kwargs
}
logger.info("Initialized LlamaIndexDB")
data_path = Path(self.data_dir)
if not data_path.exists():
logger.error(f"Directory not found: {self.data_dir}")
raise FileNotFoundError(
f"Directory {self.data_dir} does not exist"
)
try:
documents = SimpleDirectoryReader(
self.data_dir, **self.reader_kwargs
).load_data()
self.index = VectorStoreIndex.from_documents(
documents, **self.index_kwargs
)
logger.success(
f"Successfully indexed documents from {self.data_dir}"
)
except Exception as e:
logger.error(f"Error indexing documents: {str(e)}")
raise
def query(self, query: str, **kwargs) -> str:
"""Query the indexed documents.
Args:
query (str): The query string to search for
**kwargs: Additional arguments passed to the query engine
- similarity_top_k (int): Number of similar documents to retrieve
- streaming (bool): Enable streaming response
- response_mode (str): Response synthesis mode
- max_tokens (int): Maximum tokens in response
Returns:
str: The response from the query engine
Raises:
ValueError: If no documents have been indexed yet
"""
if self.index is None:
logger.error("No documents have been indexed yet")
raise ValueError("Must add documents before querying")
try:
query_engine = self.index.as_query_engine(**kwargs)
response = query_engine.query(query)
print(response)
logger.info(f"Successfully queried: {query}")
return str(response)
except Exception as e:
logger.error(f"Error during query: {str(e)}")
raise
# # Example usage
# llama_index_db = LlamaIndexDB(
# data_dir="docs",
# filename_as_id=True,
# recursive=True,
# required_exts=[".txt", ".pdf", ".docx"],
# similarity_top_k=3
# )
# response = llama_index_db.query(
# "What is the medical history of patient 1?",
# streaming=True,
# response_mode="compact"
# )
# print(response)

@ -0,0 +1,237 @@
"""
Todo
- You send structured data to the swarm through the users form they make
- then connect rag for every agent using llama index to remember all the students data
- structured outputs
"""
import os
from dotenv import load_dotenv
from swarms import Agent, SequentialWorkflow
from swarm_models import OpenAIChat, OpenAIFunctionCaller
from pydantic import BaseModel
from typing import List
class CollegeLog(BaseModel):
college_name: str
college_description: str
college_admission_requirements: str
class CollegesRecommendation(BaseModel):
colleges: List[CollegeLog]
reasoning: str
load_dotenv()
# Get the API key from environment variable
api_key = os.getenv("GROQ_API_KEY")
# Initialize the model
model = OpenAIChat(
openai_api_base="https://api.groq.com/openai/v1",
openai_api_key=api_key,
model_name="llama-3.1-70b-versatile",
temperature=0.1,
)
FINAL_AGENT_PROMPT = """
You are a college selection final decision maker. Your role is to:
1. Synthesize all previous analyses and discussions
2. Weigh competing factors and trade-offs
3. Create a final ranked list of recommended colleges
4. Provide clear rationale for each recommendation
5. Include specific action items for each selected school
6. Outline next steps in the application process
Focus on creating actionable, well-reasoned final recommendations that
balance all relevant factors and stakeholder input.
"""
function_caller = OpenAIFunctionCaller(
system_prompt=FINAL_AGENT_PROMPT,
openai_api_key=os.getenv("OPENAI_API_KEY"),
base_model=CollegesRecommendation,
parallel_tool_calls=True,
)
# Student Profile Analyzer Agent
profile_analyzer_agent = Agent(
agent_name="Student-Profile-Analyzer",
system_prompt="""You are an expert student profile analyzer. Your role is to:
1. Analyze academic performance, test scores, and extracurricular activities
2. Identify student's strengths, weaknesses, and unique qualities
3. Evaluate personal statements and essays
4. Assess leadership experiences and community involvement
5. Determine student's preferences for college environment, location, and programs
6. Create a comprehensive student profile summary
Always consider both quantitative metrics (GPA, test scores) and qualitative aspects
(personal growth, challenges overcome, unique perspectives).""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="profile_analyzer_agent.json",
user_name="student",
context_length=200000,
output_type="string",
)
# College Research Agent
college_research_agent = Agent(
agent_name="College-Research-Specialist",
system_prompt="""You are a college research specialist. Your role is to:
1. Maintain updated knowledge of college admission requirements
2. Research academic programs, campus culture, and student life
3. Analyze admission statistics and trends
4. Evaluate college-specific opportunities and resources
5. Consider financial aid availability and scholarship opportunities
6. Track historical admission data and acceptance rates
Focus on providing accurate, comprehensive information about each institution
while considering both academic and cultural fit factors.""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="college_research_agent.json",
user_name="researcher",
context_length=200000,
output_type="string",
)
# College Match Agent
college_match_agent = Agent(
agent_name="College-Match-Maker",
system_prompt="""You are a college matching specialist. Your role is to:
1. Compare student profiles with college requirements
2. Evaluate fit based on academic, social, and cultural factors
3. Consider geographic preferences and constraints
4. Assess financial fit and aid opportunities
5. Create tiered lists of reach, target, and safety schools
6. Explain the reasoning behind each match
Always provide a balanced list with realistic expectations while
considering both student preferences and admission probability.""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="college_match_agent.json",
user_name="matcher",
context_length=200000,
output_type="string",
)
# Debate Moderator Agent
debate_moderator_agent = Agent(
agent_name="Debate-Moderator",
system_prompt="""You are a college selection debate moderator. Your role is to:
1. Facilitate discussions between different perspectives
2. Ensure all relevant factors are considered
3. Challenge assumptions and biases
4. Synthesize different viewpoints
5. Guide the group toward consensus
6. Document key points of agreement and disagreement
Maintain objectivity while ensuring all important factors are thoroughly discussed
and evaluated.""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="debate_moderator_agent.json",
user_name="moderator",
context_length=200000,
output_type="string",
)
# Critique Agent
critique_agent = Agent(
agent_name="College-Selection-Critic",
system_prompt="""You are a college selection critic. Your role is to:
1. Evaluate the strength of college matches
2. Identify potential overlooked factors
3. Challenge assumptions in the selection process
4. Assess risks and potential drawbacks
5. Provide constructive feedback on selections
6. Suggest alternative options when appropriate
Focus on constructive criticism that helps improve the final college list
while maintaining realistic expectations.""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="critique_agent.json",
user_name="critic",
context_length=200000,
output_type="string",
)
# Final Decision Agent
final_decision_agent = Agent(
agent_name="Final-Decision-Maker",
system_prompt="""
You are a college selection final decision maker. Your role is to:
1. Synthesize all previous analyses and discussions
2. Weigh competing factors and trade-offs
3. Create a final ranked list of recommended colleges
4. Provide clear rationale for each recommendation
5. Include specific action items for each selected school
6. Outline next steps in the application process
Focus on creating actionable, well-reasoned final recommendations that
balance all relevant factors and stakeholder input.
""",
llm=model,
max_loops=1,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path="final_decision_agent.json",
user_name="decision_maker",
context_length=200000,
output_type="string",
)
# Initialize the Sequential Workflow
college_selection_workflow = SequentialWorkflow(
name="college-selection-swarm",
description="Comprehensive college selection and analysis system",
max_loops=1,
agents=[
profile_analyzer_agent,
college_research_agent,
college_match_agent,
debate_moderator_agent,
critique_agent,
final_decision_agent,
],
output_type="all",
)
# Example usage
if __name__ == "__main__":
# Example student profile input
student_profile = """
Student Profile:
- GPA: 3.8
- SAT: 1450
- Interests: Computer Science, Robotics
- Location Preference: East Coast
- Extracurriculars: Robotics Club President, Math Team
- Budget: Need financial aid
- Preferred Environment: Medium-sized urban campus
"""
# Run the comprehensive college selection analysis
result = college_selection_workflow.run(
student_profile,
no_use_clusterops=True,
)
print(result)

@ -1,116 +0,0 @@
swarms [ master][✘!?][🐳 desktop-linux][📦 v6.0.0][🐍 v3.12.6][☁️ (us-east-1)][☁️ kye@swarms.world(us-central1)]
󰂃 10% /usr/local/bin/python3.12 /Users/swarms_wd/Desktop/swarms/auto_flow.py
{
"id": "AutomatedHedgeFundSystem",
"plan": "The goal is to build a fully automated hedge fund system that integrates multiple components including market analysis, portfolio optimization, trade execution, compliance monitoring, performance tracking, and fund operations. The system will be divided into several key modules, each responsible for specific tasks. The overall success will be measured by the system's ability to consistently execute profitable trades, manage risks effectively, comply with regulations, and provide comprehensive reporting. Key milestones include setting up data feeds for market analysis, developing algorithms for portfolio optimization, implementing automated trade execution protocols, and establishing compliance and reporting mechanisms.",
"failures_prediction": "Potential failure modes include incorrect market data leading to poor trading decisions, algorithmic errors in portfolio optimization, failures in trade execution systems, compliance breaches, and inaccurate reporting. To mitigate these risks, robust data validation procedures will be implemented, algorithms will be rigorously tested in simulated environments, trade execution systems will include fail-safes and redundancies, compliance checks will be automated and regularly audited, and reporting systems will include cross-checks and validation processes. Regular monitoring and updates will ensure the system remains reliable and accurate.",
"rationale": "This flow design is optimal because it breaks down the complex task of building an automated hedge fund into manageable components, allowing for specialized agents to focus on distinct functions. Parallelization is maximized where possible, such as in market analysis and portfolio optimization, to increase efficiency and speed. Sequential dependencies ensure that critical tasks like compliance and trade execution follow necessary preparatory steps. This design balances the need for speed in trading with the necessity of thorough analysis and compliance, ensuring both profitability and adherence to regulations.",
"flow": "AgentMarketAnalysis -> AgentPortfolioOptimization, AgentComplianceMonitoring -> AgentTradeExecution -> AgentPerformanceTracking, AgentFundOperations"
}
swarms [ master][✘!?][🐳 desktop-linux][📦 v6.0.0][🐍 v3.12.6][☁️ (us-east-1)][☁️ kye@swarms.world(us-central1)][⏱ 9s]
󰂃 10% /usr/local/bin/python3.12 /Users/swarms_wd/Desktop/swarms/auto_flow.py
{
"name": "Automated Hedge Fund System",
"description": "A fully automated system for managing a hedge fund, integrating market analysis, portfolio optimization, automated trade execution, compliance, and performance tracking.",
"flows": [
{
"id": "Market_Analysis_and_Research",
"plan": "Develop a system that continuously gathers data from multiple financial markets, processes this data to identify trends and patterns, and generates insights for trading strategies. The system should support multiple asset classes including equities, fixed income, commodities, and currencies.",
"failures_prediction": "Data quality issues may arise, leading to inaccurate analysis. To mitigate, implement data validation and cleansing processes. Additionally, ensure redundancy in data sources to prevent single points of failure. Algorithmic biases can distort insights; regular audits and updates to algorithms are necessary.",
"rationale": "Market analysis is foundational to informed trading strategies. By implementing robust data collection and processing, the system ensures timely and accurate insights, which are crucial for competitive advantage in trading.",
"flow": "DataCollector -> DataProcessor -> TrendAnalyzer -> InsightGenerator"
},
{
"id": "Portfolio_Optimization_and_Risk_Management",
"plan": "Implement a system that uses the insights from market analysis to optimize the portfolio. This involves balancing risk and return, adhering to investment guidelines, and dynamically adjusting the portfolio in response to market changes.",
"failures_prediction": "Risk models might not capture extreme market events, leading to unexpected losses. Regular stress testing and scenario analysis are essential. Portfolio rebalancing might incur high transaction costs; optimization algorithms should account for these.",
"rationale": "Effective portfolio management maximizes returns while controlling risk. By continuously optimizing the portfolio, the system can adapt to market conditions and investor goals, ensuring long-term fund performance.",
"flow": "InsightGenerator -> PortfolioOptimizer -> RiskManager -> Rebalancer"
},
{
"id": "Automated_Trade_Execution_and_Settlement",
"plan": "Design a system that executes trades automatically based on portfolio optimization outputs. It should ensure trades are executed at optimal prices and settled efficiently across multiple asset classes.",
"failures_prediction": "Execution failures can occur due to connectivity issues or market volatility. Implement fail-safes such as alternative trading venues and pre-trade checks. Settlement failures require reconciliation processes to ensure all trades are accurately recorded.",
"rationale": "Automation in trade execution reduces latency and human error, ensuring trades are conducted efficiently and at the best possible prices. This is critical for maintaining competitive edge and operational efficiency.",
"flow": "Rebalancer -> TradeExecutor -> SettlementProcessor"
},
{
"id": "Compliance_and_Regulatory_Monitoring",
"plan": "Establish a system that monitors all trading activities for compliance with relevant regulations and internal policies. It should generate alerts for any potential violations and maintain detailed records for audits.",
"failures_prediction": "Non-compliance can lead to legal penalties and reputational damage. Implement real-time monitoring and alert systems, and conduct regular compliance audits to ensure adherence to regulations.",
"rationale": "Regulatory compliance is non-negotiable in financial markets. A robust monitoring system protects the fund from legal risks and maintains investor trust.",
"flow": "TradeExecutor -> ComplianceMonitor -> AlertSystem"
},
{
"id": "Performance_Tracking_and_Reporting",
"plan": "Create a system that tracks the performance of the hedge fund, analyzing returns, risks, and other key metrics. It should generate regular reports for stakeholders, providing insights into fund performance and areas for improvement.",
"failures_prediction": "Inaccurate performance data can mislead stakeholders. Ensure data integrity through validation processes and cross-checks. Reporting delays can frustrate stakeholders; automate report generation to ensure timeliness.",
"rationale": "Performance tracking provides transparency and accountability, essential for stakeholder trust and strategic decision-making. Regular reporting helps in assessing strategy effectiveness and making informed adjustments.",
"flow": "SettlementProcessor -> PerformanceTracker -> ReportGenerator"
},
{
"id": "Fund_Operations_and_Administration",
"plan": "Develop a system that handles the day-to-day operations of the hedge fund, including investor relations, fund accounting, and administrative tasks. Ensure seamless integration with other components of the hedge fund system.",
"failures_prediction": "Operational bottlenecks can disrupt fund activities. Implement workflow automation and task prioritization to enhance efficiency. Ensure data consistency across systems to prevent administrative errors.",
"rationale": "Efficient fund operations are crucial for smooth functioning and scalability of the hedge fund. By automating routine tasks, the system allows for focus on strategic activities and growth.",
"flow": "ReportGenerator -> FundAdministrator -> InvestorRelations"
}
]
}
swarms [ master][✘!?][🐳 desktop-linux][📦 v6.0.0][🐍 v3.12.6][☁️ (us-east-1)][☁️ kye@swarms.world(us-central1)][⏱ 26s]
󰂃 10% /usr/local/bin/python3.12 /Users/swarms_wd/Desktop/swarms/auto_flow.py
{
"name": "Automated Hedge Fund System",
"description": "A comprehensive architecture for a fully automated hedge fund system integrating market analysis, portfolio optimization, automated execution, compliance monitoring, performance tracking, and fund operations.",
"flows": [
{
"id": "Market Analysis and Research",
"plan": "Develop a robust market analysis module that gathers data from multiple sources, processes it using machine learning algorithms, and provides actionable insights. This module will continuously monitor market trends, sentiment, and economic indicators to inform trading strategies.",
"failures_prediction": "Potential failures include data source outages, incorrect data processing, and machine learning model inaccuracies. Mitigation strategies involve using redundant data sources, implementing data validation checks, and continuously updating and retraining models.",
"rationale": "Market analysis is the foundation of a successful trading strategy. By leveraging multiple data sources and advanced algorithms, the system can generate high-quality insights that drive profitable trades. The design prioritizes reliability and accuracy to ensure consistent performance.",
"flow": "DataCollector -> DataProcessor -> MLModelTrainer -> InsightGenerator"
},
{
"id": "Portfolio Optimization and Risk Management",
"plan": "Create a portfolio optimization engine that uses quantitative models to allocate assets efficiently. Integrate risk management protocols to monitor and mitigate exposure to market risks, ensuring the portfolio aligns with the fund's risk appetite and investment goals.",
"failures_prediction": "Risks include model inaccuracies, unexpected market events, and correlation breakdowns. Preventive measures include stress testing models, implementing real-time risk monitoring, and setting predefined risk thresholds with automated rebalancing.",
"rationale": "Optimizing the portfolio is crucial for maximizing returns while controlling risk. By integrating risk management, the system ensures that the portfolio remains resilient to market fluctuations, aligning with overall investment strategies.",
"flow": "PortfolioOptimizer -> RiskAnalyzer -> RiskMitigationEngine"
},
{
"id": "Automated Trade Execution and Settlement",
"plan": "Design an automated trade execution system that interfaces with multiple exchanges, executes trades based on predefined strategies, and handles settlement processes. Ensure the system is capable of high-frequency trading and adapts to market conditions.",
"failures_prediction": "Failures can occur due to exchange connectivity issues, execution delays, or strategy malfunctions. Mitigation involves implementing failover protocols, real-time monitoring of execution quality, and adaptive algorithms that adjust to market conditions.",
"rationale": "Automated execution is essential for capitalizing on market opportunities quickly and efficiently. The system's ability to handle high-frequency trades and adapt to changing conditions is critical for maintaining a competitive edge.",
"flow": "TradeStrategyEngine -> ExecutionManager -> SettlementProcessor"
},
{
"id": "Compliance and Regulatory Monitoring",
"plan": "Implement a compliance monitoring system that tracks all trading activities, ensures adherence to regulations, and generates reports for regulatory bodies. Incorporate automated alerts for any compliance breaches or suspicious activities.",
"failures_prediction": "Potential issues include regulatory changes, false positives in alerts, and reporting errors. Strategies to address these include regular updates to compliance rules, fine-tuning alert thresholds, and automated report validation checks.",
"rationale": "Compliance is non-negotiable in the hedge fund industry. An automated system reduces the risk of human error and ensures that the fund operates within legal boundaries, protecting against fines and reputational damage.",
"flow": "TradeMonitor -> ComplianceChecker -> AlertSystem -> ReportGenerator"
},
{
"id": "Performance Tracking and Reporting",
"plan": "Develop a performance tracking system that evaluates fund performance against benchmarks, generates detailed reports, and provides insights into fund health. Ensure the system supports real-time performance analytics and historical data analysis.",
"failures_prediction": "Challenges include data inaccuracies, benchmark mismatches, and report generation delays. Mitigation involves implementing data validation, aligning benchmarks with investment goals, and optimizing report generation processes.",
"rationale": "Tracking performance is vital for assessing the fund's success and making informed decisions. The system's ability to provide real-time insights and comprehensive reports supports strategic planning and investor communications.",
"flow": "PerformanceAnalyzer -> BenchmarkComparator -> ReportGenerator"
},
{
"id": "Fund Operations and Administration",
"plan": "Create an operations module that handles fund administration tasks such as investor relations, fee calculations, and financial reporting. Ensure seamless integration with other systems for efficient data flow and operations management.",
"failures_prediction": "Risks include operational inefficiencies, data integration issues, and incorrect calculations. Address these by streamlining processes, ensuring robust data integration, and implementing checks for accuracy in calculations.",
"rationale": "Efficient fund operations are essential for smooth day-to-day management and investor satisfaction. By automating administrative tasks, the system reduces manual workload and enhances operational efficiency.",
"flow": "InvestorRelationsManager -> FeeCalculator -> FinancialReportGenerator"
}
],
"swarm_flow": "Market Analysis and Research -> Portfolio Optimization and Risk Management -> Automated Trade Execution and Settlement -> Compliance and Regulatory Monitoring -> Performance Tracking and Reporting -> Fund Operations and Administration"
}
swarms [ master][✘!?][🐳 desktop-linux][📦 v6.0.0][🐍 v3.12.6][☁️ (us-east-1)][☁️ kye@swarms.world(us-central1)][⏱ 32s]
󰂃 9%

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "6.0.4"
version = "6.4.7"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -37,6 +37,14 @@ keywords = [
"Generative AI",
"Agent Marketplace",
"Agent Store",
"quant",
"finance",
"algorithmic trading",
"portfolio optimization",
"risk management",
"financial modeling",
"machine learning for finance",
"natural language processing for finance",
]
classifiers = [
"Development Status :: 4 - Beta",
@ -52,32 +60,25 @@ python = ">=3.10,<4.0"
torch = ">=2.1.1,<3.0"
transformers = ">= 4.39.0, <5.0.0"
asyncio = ">=3.4.3,<4.0"
langchain-community = "0.0.29"
langchain-experimental = "0.0.55"
backoff = "2.2.1"
toml = "*"
pypdf = "4.3.1"
loguru = "0.7.2"
pypdf = "5.1.0"
loguru = "*"
pydantic = "2.8.2"
tenacity = "8.5.0"
Pillow = "10.4.0"
tenacity = "*"
psutil = "*"
sentry-sdk = {version = "*", extras = ["http"]} # Updated here
python-dotenv = "*"
PyYAML = "*"
docstring_parser = "0.16"
fastapi = "*"
openai = ">=1.30.1,<2.0"
termcolor = "*"
tiktoken = "*"
networkx = "*"
swarms-memory = "*"
black = "*"
aiofiles = "*"
swarm-models = "*"
clusterops = "*"
chromadb = "*"
reportlab = "*"
doc-master = "*"
rich = "*"
[tool.poetry.scripts]
swarms = "swarms.cli.main:main"
@ -85,7 +86,7 @@ swarms = "swarms.cli.main:main"
[tool.poetry.group.lint.dependencies]
black = ">=23.1,<25.0"
ruff = ">=0.5.1,<0.6.10"
ruff = ">=0.5.1,<0.8.2"
types-toml = "^0.10.8.1"
types-pytz = ">=2023.3,<2025.0"
types-chardet = "^5.0.4.6"
@ -94,9 +95,7 @@ mypy-protobuf = "^3.0.0"
[tool.poetry.group.test.dependencies]
pytest = "^8.1.1"
termcolor = "^2.4.0"
pandas = "^2.2.2"
fastapi = "^0.110.1"
[tool.ruff]
line-length = 70

@ -2,20 +2,16 @@
torch>=2.1.1,<3.0
transformers>=4.39.0,<5.0.0
asyncio>=3.4.3,<4.0
langchain-community==0.0.28
langchain-experimental==0.0.55
backoff==2.2.1
toml
pypdf==4.3.1
ratelimit==2.2.1
loguru==0.7.2
loguru
pydantic==2.8.2
tenacity==8.5.0
Pillow==10.4.0
tenacity
rich
psutil
sentry-sdk
python-dotenv
opencv-python-headless
PyYAML
docstring_parser==0.16
black>=23.1,<25.0
@ -25,13 +21,10 @@ types-pytz>=2023.3,<2025.0
types-chardet>=5.0.4.6
mypy-protobuf>=3.0.0
pytest>=8.1.1
termcolor>=2.4.0
pandas>=2.2.2
fastapi>=0.110.1
networkx
swarms-memory
pre-commit
aiofiles
swarm-models
clusterops
reportlab
doc-master

@ -0,0 +1,52 @@
#!/bin/bash
# Set up logging
LOG_FILE="docs_compilation.log"
OUTPUT_FILE="combined_docs.txt"
# Initialize log file
echo "$(date): Starting documentation compilation" > "$LOG_FILE"
# Create/clear output file
> "$OUTPUT_FILE"
# Function to determine file type and handle accordingly
process_file() {
local file="$1"
# Get file extension
extension="${file##*.}"
echo "$(date): Processing $file" >> "$LOG_FILE"
case "$extension" in
md|markdown)
echo "# $(basename "$file")" >> "$OUTPUT_FILE"
cat "$file" >> "$OUTPUT_FILE"
echo -e "\n\n" >> "$OUTPUT_FILE"
;;
txt)
echo "# $(basename "$file")" >> "$OUTPUT_FILE"
cat "$file" >> "$OUTPUT_FILE"
echo -e "\n\n" >> "$OUTPUT_FILE"
;;
*)
echo "$(date): Skipping $file - unsupported format" >> "$LOG_FILE"
return
;;
esac
echo "$(date): Successfully processed $file" >> "$LOG_FILE"
}
# Find and process all documentation files
find ../docs -type f \( -name "*.md" -o -name "*.txt" -o -name "*.markdown" \) | while read -r file; do
process_file "$file"
done
# Log completion
echo "$(date): Documentation compilation complete" >> "$LOG_FILE"
echo "$(date): Output saved to $OUTPUT_FILE" >> "$LOG_FILE"
# Print summary
echo "Documentation compilation complete. Check $LOG_FILE for details."

@ -0,0 +1,7 @@
from swarms import Agent
Agent(
agent_name="Stock-Analysis-Agent",
model_name="gpt-4o-mini",
max_loops=1,
).run("What are 5 hft algorithms")

@ -1,17 +1,38 @@
import os
import concurrent.futures
from dotenv import load_dotenv
from loguru import logger
load_dotenv()
# Disable logging by default
if os.getenv("SWARMS_VERBOSE_GLOBAL", "False").lower() == "false":
logger.disable("")
# Import telemetry functions with error handling
from swarms.telemetry.bootup import bootup # noqa: E402, F403
from swarms.telemetry.sentry_active import (
from swarms.telemetry.sentry_active import ( # noqa: E402
activate_sentry,
) # noqa: E402
# Use ThreadPoolExecutor to run bootup and activate_sentry concurrently
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(bootup)
executor.submit(activate_sentry)
# Run telemetry functions concurrently with error handling
def run_telemetry():
try:
with concurrent.futures.ThreadPoolExecutor(
max_workers=2
) as executor:
future_bootup = executor.submit(bootup)
future_sentry = executor.submit(activate_sentry)
# Wait for completion and check for exceptions
future_bootup.result()
future_sentry.result()
except Exception as e:
logger.error(f"Error running telemetry functions: {e}")
run_telemetry()
from swarms.agents import * # noqa: E402, F403
from swarms.artifacts import * # noqa: E402, F403

@ -1,6 +1,5 @@
from typing import Any
from loguru import logger
from tenacity import retry, stop_after_attempt, wait_exponential
from swarms.prompts.prompt_generator import (
@ -9,6 +8,9 @@ from swarms.prompts.prompt_generator import (
from swarms.prompts.prompt_generator_optimizer import (
prompt_generator_sys_prompt,
)
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="ape_agent")
@retry(

@ -0,0 +1,253 @@
import re
from dotenv import load_dotenv
from tenacity import retry, stop_after_attempt, wait_exponential
from swarms import Agent
from swarms.agents.create_agents_from_yaml import (
create_agents_from_yaml,
)
from swarms.utils.formatter import formatter
from swarms.utils.litellm import LiteLLM
load_dotenv()
def prepare_yaml_for_parsing(raw_yaml: str) -> str:
"""
Prepares raw YAML content by fixing spacing and formatting issues.
Args:
raw_yaml (str): The raw YAML content extracted from Markdown.
Returns:
str: The cleaned YAML content ready for parsing.
"""
# Fix sequence items that are improperly placed on the same line as their key
fixed_yaml = re.sub(
r"(\b\w+\b):\s*-\s*", r"\1:\n - ", raw_yaml
) # Fix "key: - value" to "key:\n - value"
# Ensure proper spacing after colons
fixed_yaml = re.sub(
r"(\S):(\S)", r"\1: \2", fixed_yaml
) # Ensure space after colons
# Remove trailing spaces before newlines
fixed_yaml = re.sub(r"\s+\n", "\n", fixed_yaml)
# Replace non-breaking spaces (if any) with regular spaces
fixed_yaml = fixed_yaml.replace("\xa0", " ")
return fixed_yaml.strip()
def parse_yaml_from_swarm_markdown(markdown_text: str) -> dict:
"""
Extracts and prepares YAML content from a Markdown-style 'Auto-Swarm-Builder' block and parses it.
Args:
markdown_text (str): The Markdown text containing the YAML inside 'Auto-Swarm-Builder' block.
Returns:
dict: A parsed Python dictionary of the YAML content.
"""
# Match the 'Auto-Swarm-Builder' block with YAML inside triple backticks
pattern = r"```yaml\s*\n(.*?)```"
match = re.search(pattern, markdown_text, re.DOTALL)
if not match:
raise ValueError(
"No YAML content found in the 'Auto-Swarm-Builder' block."
)
raw_yaml = match.group(1).strip()
# Preprocess and normalize the YAML content
normalized_yaml = prepare_yaml_for_parsing(raw_yaml)
return normalized_yaml
AUTO_GEN_PROMPT = """
You are a specialized agent responsible for creating YAML configuration files for multi-agent swarms. Your role is to generate well-structured YAML that defines both individual agents and swarm architectures based on user requirements.
Output only the yaml nothing else. You will be penalized for making mistakes
GUIDELINES:
1. Each YAML file must contain an `agents` section with at least one agent configuration
2. Each agent configuration requires the following mandatory fields:
- agent_name (string)
- system_prompt (string)
3. Optional agent fields include:
- max_loops (integer)
- autosave (boolean)
- dashboard (boolean)
- verbose (boolean)
- dynamic_temperature_enabled (boolean)
- saved_state_path (string)
- user_name (string)
- retry_attempts (integer)
- context_length (integer)
- return_step_meta (boolean)
- output_type (string)
- task (string)
4. When a swarm is needed, include a `swarm_architecture` section with:
Mandatory fields:
- name (string)
- swarm_type (string: "ConcurrentWorkflow" or "SequentialWorkflow") [AgentRearrange, MixtureOfAgents, SpreadSheetSwarm, SequentialWorkflow, ConcurrentWorkflow]
Optional fields:
- description (string)
- max_loops (integer)
- task (string)
TEMPLATE STRUCTURE:
```yaml
agents:
- agent_name: "Agent-1-Name"
system_prompt: "Detailed system prompt here"
max_loops: 1
# [additional optional fields]
- agent_name: "Agent-2-Name"
system_prompt: "Detailed system prompt here"
# [additional optional fields]
swarm_architecture:
name: "Swarm-Name"
description: "Swarm purpose and goals"
swarm_type: "ConcurrentWorkflow"
max_loops: 5
task: "Main swarm task description"
```
VALIDATION RULES:
1. All agent names must be unique
2. System prompts must be clear and specific to the agent's role
3. Integer values must be positive
4. Boolean values must be true or false (lowercase)
5. File paths should use forward slashes
6. Tasks should be specific and aligned with the agent/swarm purpose
When generating a YAML configuration:
1. Ask for specific requirements about the agents and swarm needed
2. Determine if a swarm architecture is necessary based on the task complexity
3. Generate appropriate system prompts for each agent based on their roles
4. Include relevant optional fields based on the use case
5. Validate the configuration against all rules before returning
Example valid YAML configurations are provided below. Use these as references for structure and formatting:
```yaml
agents:
- agent_name: "Data-Analysis-Agent"
system_prompt: "You are a specialized data analysis agent focused on processing and interpreting financial data. Provide clear, actionable insights based on the data provided."
max_loops: 3
autosave: true
verbose: true
context_length: 100000
output_type: "json"
task: "Analyze quarterly financial reports and identify trends"
# Multi-Agent Swarm Example
agents:
- agent_name: "Research-Agent"
system_prompt: "You are a research agent specialized in gathering and summarizing scientific publications. Focus on peer-reviewed sources and provide comprehensive summaries."
max_loops: 2
context_length: 150000
output_type: "str"
- agent_name: "Analysis-Agent"
system_prompt: "You are an analysis agent that processes research summaries and identifies key patterns and insights. Provide detailed analytical reports."
max_loops: 3
context_length: 200000
output_type: "json"
swarm_architecture:
name: "Research-Analysis-Swarm"
description: "A swarm for comprehensive research analysis and insight generation"
swarm_type: "SequentialWorkflow"
max_loops: 5
task: "Research and analyze recent developments in quantum computing"
```
"""
def generate_swarm_config(
task: str,
file_name: str = "swarm_config_output.yaml",
model_name: str = "gpt-4o",
*args,
**kwargs,
):
"""
Generates a swarm configuration based on the provided task and model name.
This function attempts to generate a swarm configuration by running an agent with the specified task and model name.
It then parses the output into YAML format and creates agents based on the parsed YAML content.
Args:
task (str): The task to be performed by the swarm.
file_name (str, optional): The file name for the output YAML configuration. Defaults to "swarm_config_output.yaml".
model_name (str, optional): The name of the model to use for the agent. Defaults to "gpt-4o".
*args: Additional positional arguments to be passed to the agent's run method.
**kwargs: Additional keyword arguments to be passed to the agent's run method.
Returns:
Any: The output of the swarm configuration generation process. This can be a SwarmRouter instance or an error message.
"""
formatter.print_panel(
"Auto Generating Swarm...", "Auto Swarm Builder"
)
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(min=4, max=10),
)
def attempt_generate_swarm_config():
try:
model = LiteLLM(model_name=model_name)
# Initialize the agent
agent = Agent(
agent_name="Auto-Swarm-Builder",
system_prompt=AUTO_GEN_PROMPT,
llm=model,
max_loops=1,
dynamic_temperature_enabled=True,
saved_state_path="swarm_builder.json",
user_name="swarms_corp",
output_type="str",
)
# Generate output from the agent
raw_output = agent.run(task, *args, **kwargs)
yaml_content = parse_yaml_from_swarm_markdown(raw_output)
print(yaml_content)
# Create agents from the YAML file
output = create_agents_from_yaml(
yaml_string=yaml_content,
return_type="run_swarm",
)
formatter.print_panel(
"Swarm configuration generated successfully.",
"Success",
)
return output
except Exception as e:
formatter.print_panel(
f"Error generating swarm configuration: {str(e)}",
"Error",
)
raise
return attempt_generate_swarm_config()

@ -1,19 +1,168 @@
import os
from typing import Any, Callable, Dict, List, Tuple, Union
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import yaml
from loguru import logger
from tenacity import (
retry,
stop_after_attempt,
wait_exponential,
retry_if_exception_type,
)
from pydantic import (
BaseModel,
Field,
field_validator,
)
from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.agent import Agent
from swarms.structs.swarm_router import SwarmRouter
from swarms.utils.litellm import LiteLLM
logger = initialize_logger(log_folder="create_agents_from_yaml")
class AgentConfig(BaseModel):
agent_name: str
system_prompt: str
model_name: Optional[str] = None
max_loops: int = Field(default=1, ge=1)
autosave: bool = True
dashboard: bool = False
verbose: bool = False
dynamic_temperature_enabled: bool = False
saved_state_path: Optional[str] = None
user_name: str = "default_user"
retry_attempts: int = Field(default=3, ge=1)
context_length: int = Field(default=100000, ge=1000)
return_step_meta: bool = False
output_type: str = "str"
auto_generate_prompt: bool = False
artifacts_on: bool = False
artifacts_file_extension: str = ".md"
artifacts_output_path: str = ""
@field_validator("system_prompt")
@classmethod
def validate_system_prompt(cls, v):
if not v or not isinstance(v, str) or len(v.strip()) == 0:
raise ValueError(
"System prompt must be a non-empty string"
)
return v
class SwarmConfig(BaseModel):
name: str
description: str
max_loops: int = Field(default=1, ge=1)
swarm_type: str
task: Optional[str] = None
flow: Optional[Dict] = None
autosave: bool = True
return_json: bool = False
rules: str = ""
@field_validator("swarm_type")
@classmethod
def validate_swarm_type(cls, v):
valid_types = {
"SequentialWorkflow",
"ConcurrentWorkflow",
"AgentRearrange",
"MixtureOfAgents",
"auto",
}
if v not in valid_types:
raise ValueError(
f"Swarm type must be one of: {valid_types}"
)
return v
class YAMLConfig(BaseModel):
agents: List[AgentConfig] = Field(..., min_length=1)
swarm_architecture: Optional[SwarmConfig] = None
model_config = {
"extra": "forbid" # Prevent additional fields not in the model
}
def load_yaml_safely(
yaml_file: str = None, yaml_string: str = None
) -> Dict:
"""Safely load and validate YAML configuration using Pydantic."""
try:
if yaml_string:
config_dict = yaml.safe_load(yaml_string)
elif yaml_file:
if not os.path.exists(yaml_file):
raise FileNotFoundError(
f"YAML file {yaml_file} not found."
)
with open(yaml_file, "r") as file:
config_dict = yaml.safe_load(file)
else:
raise ValueError(
"Either yaml_file or yaml_string must be provided"
)
# Validate using Pydantic
YAMLConfig(**config_dict)
return config_dict
except yaml.YAMLError as e:
raise ValueError(f"Error parsing YAML: {str(e)}")
except Exception as e:
raise ValueError(f"Error validating configuration: {str(e)}")
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
retry=retry_if_exception_type((ConnectionError, TimeoutError)),
before_sleep=lambda retry_state: logger.info(
f"Retrying after error: {retry_state.outcome.exception()}"
),
)
def create_agent_with_retry(
agent_config: Dict, model: LiteLLM
) -> Agent:
"""Create an agent with retry logic for handling transient failures."""
try:
validated_config = AgentConfig(**agent_config)
agent = Agent(
agent_name=validated_config.agent_name,
system_prompt=validated_config.system_prompt,
llm=model,
max_loops=validated_config.max_loops,
autosave=validated_config.autosave,
dashboard=validated_config.dashboard,
verbose=validated_config.verbose,
dynamic_temperature_enabled=validated_config.dynamic_temperature_enabled,
saved_state_path=validated_config.saved_state_path,
user_name=validated_config.user_name,
retry_attempts=validated_config.retry_attempts,
context_length=validated_config.context_length,
return_step_meta=validated_config.return_step_meta,
output_type=validated_config.output_type,
auto_generate_prompt=validated_config.auto_generate_prompt,
artifacts_on=validated_config.artifacts_on,
artifacts_file_extension=validated_config.artifacts_file_extension,
artifacts_output_path=validated_config.artifacts_output_path,
)
return agent
except Exception as e:
logger.error(
f"Error creating agent {agent_config.get('agent_name', 'unknown')}: {str(e)}"
)
raise
def create_agents_from_yaml(
model: Callable = None,
yaml_file: str = "agents.yaml",
yaml_string: str = None,
return_type: str = "auto",
*args,
**kwargs,
) -> Union[
SwarmRouter,
Agent,
@ -22,162 +171,99 @@ def create_agents_from_yaml(
List[Dict[str, Any]],
]:
"""
Create agents and/or SwarmRouter based on configurations defined in a YAML file.
This function dynamically creates agents and a SwarmRouter (if specified) based on the
configuration in the YAML file. It adapts its behavior based on the presence of a
swarm architecture and the number of agents defined.
Args:
model (Callable): The language model to be used by the agents.
yaml_file (str): Path to the YAML file containing agent and swarm configurations.
return_type (str): Determines the return value. Options are:
"auto" (default): Automatically determine the most appropriate return type.
"swarm": Return SwarmRouter if present, otherwise a single agent or list of agents.
"agents": Return a list of agents (or a single agent if only one is defined).
"both": Return both SwarmRouter (or single agent) and list of agents.
"tasks": Return task results if any tasks were executed.
"run_swarm": Run the swarm and return its output.
*args: Additional positional arguments for agent or SwarmRouter customization.
**kwargs: Additional keyword arguments for agent or SwarmRouter customization.
Returns:
Union[SwarmRouter, Agent, List[Agent], Tuple[Union[SwarmRouter, Agent], List[Agent]], List[Dict[str, Any]]]:
The return type depends on the 'return_type' argument and the configuration in the YAML file.
Raises:
FileNotFoundError: If the specified YAML file is not found.
ValueError: If the YAML configuration is invalid or if an invalid return_type is specified.
Create agents and/or SwarmRouter based on configurations defined in a YAML file or string.
"""
try:
logger.info(
f"Checking if the YAML file {yaml_file} exists..."
)
if not os.path.exists(yaml_file):
logger.error(f"YAML file {yaml_file} not found.")
raise FileNotFoundError(
f"YAML file {yaml_file} not found."
)
logger.info(f"Loading YAML file {yaml_file}")
with open(yaml_file, "r") as file:
config = yaml.safe_load(file)
if "agents" not in config:
logger.error(
"The YAML configuration does not contain 'agents'."
)
raise ValueError(
"The YAML configuration does not contain 'agents'."
)
agents = []
task_results = []
swarm_router = None
try:
# Load and validate configuration
config = load_yaml_safely(yaml_file, yaml_string)
# Create agents
# Create agents with retry logic
for agent_config in config["agents"]:
logger.info(
f"Creating agent: {agent_config['agent_name']}"
)
if "system_prompt" not in agent_config:
logger.error(
f"System prompt is missing for agent: {agent_config['agent_name']}"
)
raise ValueError(
f"System prompt is missing for agent: {agent_config['agent_name']}"
if "model_name" in agent_config:
model_instance = LiteLLM(
model_name=agent_config["model_name"]
)
else:
model_name = "gpt-4o"
model_instance = LiteLLM(model_name=model_name)
agent = Agent(
agent_name=agent_config["agent_name"],
system_prompt=agent_config["system_prompt"],
llm=model,
max_loops=agent_config.get("max_loops", 1),
autosave=agent_config.get("autosave", True),
dashboard=agent_config.get("dashboard", False),
verbose=agent_config.get("verbose", False),
dynamic_temperature_enabled=agent_config.get(
"dynamic_temperature_enabled", False
),
saved_state_path=agent_config.get("saved_state_path"),
user_name=agent_config.get(
"user_name", "default_user"
),
retry_attempts=agent_config.get("retry_attempts", 1),
context_length=agent_config.get(
"context_length", 100000
),
return_step_meta=agent_config.get(
"return_step_meta", False
),
output_type=agent_config.get("output_type", "str"),
auto_generate_prompt=agent_config.get(
"auto_generate_prompt", "False"
),
*args,
**kwargs,
agent = create_agent_with_retry(
agent_config, model_instance
)
logger.info(
f"Agent {agent_config['agent_name']} created successfully."
)
agents.append(agent)
# Create SwarmRouter if swarm_architecture is present
swarm_router = None
# Create SwarmRouter if specified
if "swarm_architecture" in config:
swarm_config = config["swarm_architecture"]
try:
swarm_config = SwarmConfig(
**config["swarm_architecture"]
)
swarm_router = SwarmRouter(
name=swarm_config["name"],
description=swarm_config["description"],
max_loops=swarm_config["max_loops"],
name=swarm_config.name,
description=swarm_config.description,
max_loops=swarm_config.max_loops,
agents=agents,
swarm_type=swarm_config["swarm_type"],
task=swarm_config.get("task"),
flow=swarm_config.get("flow"),
autosave=swarm_config.get("autosave"),
return_json=swarm_config.get("return_json"),
*args,
**kwargs,
swarm_type=swarm_config.swarm_type,
task=swarm_config.task,
flow=swarm_config.flow,
autosave=swarm_config.autosave,
return_json=swarm_config.return_json,
rules=swarm_config.rules,
)
logger.info(
f"SwarmRouter '{swarm_config['name']}' created successfully."
f"SwarmRouter '{swarm_config.name}' created successfully."
)
except Exception as e:
logger.error(f"Error creating SwarmRouter: {str(e)}")
raise ValueError(
f"Failed to create SwarmRouter: {str(e)}"
)
# Define function to run SwarmRouter
def run_swarm_router(
task: str = (
swarm_config.get("task")
if "swarm_architecture" in config
else None
),
):
if swarm_router:
# Handle return types with improved error checking
valid_return_types = {
"auto",
"swarm",
"agents",
"both",
"tasks",
"run_swarm",
}
if return_type not in valid_return_types:
raise ValueError(
f"Invalid return_type. Must be one of: {valid_return_types}"
)
if return_type == "run_swarm" or "swarm":
if not swarm_router:
raise ValueError(
"Cannot run swarm: SwarmRouter not created."
)
try:
output = swarm_router.run(task)
print(output)
logger.info(
f"Output for SwarmRouter '{swarm_config['name']}': {output}"
return swarm_router.run(
config["swarm_architecture"]["task"]
)
return output
except Exception as e:
logger.error(
f"Error running task for SwarmRouter '{swarm_config['name']}': {e}"
)
raise e
else:
logger.error("SwarmRouter not created.")
raise ValueError("SwarmRouter not created.")
logger.error(f"Error running SwarmRouter: {str(e)}")
raise
# Handle return types
# Return appropriate type based on configuration
if return_type == "auto":
if swarm_router:
return swarm_router
elif len(agents) == 1:
return agents[0]
else:
return agents
return (
swarm_router
if swarm_router
else (agents[0] if len(agents) == 1 else agents)
)
elif return_type == "swarm":
return (
swarm_router
@ -193,24 +279,10 @@ def create_agents_from_yaml(
else agents[0] if len(agents) == 1 else agents
), agents
elif return_type == "tasks":
if not task_results:
logger.warning(
"No tasks were executed. Returning empty list."
)
return task_results
elif return_type == "run_swarm":
if swarm_router:
return run_swarm_router()
else:
except Exception as e:
logger.error(
"Cannot run swarm: SwarmRouter not created."
)
raise ValueError(
"Cannot run swarm: SwarmRouter not created."
f"Critical error in create_agents_from_yaml: {str(e)}"
)
else:
logger.error(f"Invalid return_type: {return_type}")
raise ValueError(f"Invalid return_type: {return_type}")
except Exception as e:
logger.error(f"An error occurred: {e}")
raise e
raise

@ -1,11 +1,11 @@
from typing import Any, Optional, Callable
from swarms.structs.agent import Agent
from swarms.tools.json_former import Jsonformer
from swarms.utils.loguru_logger import logger
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="tool_agent")
class ToolAgent(Agent):
class ToolAgent:
"""
Represents a tool agent that performs a specific task using a model and tokenizer.
@ -151,3 +151,6 @@ class ToolAgent(Agent):
f"Error running {self.name} for task: {task}"
)
raise error
def __call__(self, task: str, *args, **kwargs):
return self.run(task, *args, **kwargs)

@ -1,9 +1,5 @@
from swarms.artifacts.base_artifact import BaseArtifact
from swarms.artifacts.text_artifact import TextArtifact
from swarms.artifacts.main_artifact import Artifact
__all__ = [
"BaseArtifact",
"TextArtifact",
"Artifact",
]

@ -1,77 +0,0 @@
from __future__ import annotations
import json
import uuid
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any
@dataclass
class BaseArtifact(ABC):
"""
Base class for artifacts.
"""
id: str
name: str
value: Any
def __post_init__(self):
if self.id is None:
self.id = uuid.uuid4().hex
if self.name is None:
self.name = self.id
@classmethod
def value_to_bytes(cls, value: Any) -> bytes:
"""
Convert the value to bytes.
"""
if isinstance(value, bytes):
return value
else:
return str(value).encode()
@classmethod
def value_to_dict(cls, value: Any) -> dict:
"""
Convert the value to a dictionary.
"""
if isinstance(value, dict):
dict_value = value
else:
dict_value = json.loads(value)
return {k: v for k, v in dict_value.items()}
def to_text(self) -> str:
"""
Convert the value to text.
"""
return str(self.value)
def __str__(self) -> str:
"""
Return a string representation of the artifact.
"""
return self.to_text()
def __bool__(self) -> bool:
"""
Return the boolean value of the artifact.
"""
return bool(self.value)
def __len__(self) -> int:
"""
Return the length of the artifact.
"""
return len(self.value)
@abstractmethod
def __add__(self, other: BaseArtifact) -> BaseArtifact:
"""
Add two artifacts together.
"""
...

@ -1,11 +1,13 @@
import time
from swarms.utils.loguru_logger import logger
import os
import json
from typing import List, Union, Dict, Any
from pydantic import BaseModel, Field, validator
from datetime import datetime
from swarms.utils.file_processing import create_file_in_folder
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="main_artifact")
class FileVersion(BaseModel):

@ -1,58 +0,0 @@
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Callable
from swarms.artifacts.base_artifact import BaseArtifact
@dataclass
class TextArtifact(BaseArtifact):
"""
Represents a text artifact.
Attributes:
value (str): The text value of the artifact.
encoding (str, optional): The encoding of the text (default is "utf-8").
encoding_error_handler (str, optional): The error handler for encoding errors (default is "strict").
_embedding (list[float]): The embedding of the text artifact (default is an empty list).
Properties:
embedding (Optional[list[float]]): The embedding of the text artifact.
Methods:
__add__(self, other: BaseArtifact) -> TextArtifact: Concatenates the text value of the artifact with another artifact.
__bool__(self) -> bool: Checks if the text value of the artifact is non-empty.
generate_embedding(self, driver: BaseEmbeddingModel) -> Optional[list[float]]: Generates the embedding of the text artifact using a given embedding model.
token_count(self, tokenizer: BaseTokenizer) -> int: Counts the number of tokens in the text artifact using a given tokenizer.
to_bytes(self) -> bytes: Converts the text value of the artifact to bytes using the specified encoding and error handler.
"""
value: str
encoding: str = "utf-8"
encoding_error_handler: str = "strict"
tokenizer: Callable = None
_embedding: list[float] = field(default_factory=list)
@property
def embedding(self) -> list[float] | None:
return None if len(self._embedding) == 0 else self._embedding
def __add__(self, other: BaseArtifact) -> TextArtifact:
return TextArtifact(self.value + other.value)
def __bool__(self) -> bool:
return bool(self.value.strip())
def generate_embedding(self, model) -> list[float] | None:
self._embedding.clear()
self._embedding.extend(model.embed_string(str(self.value)))
return self.embedding
def token_count(self) -> int:
return self.tokenizer.count_tokens(str(self.value))
def to_bytes(self) -> bytes:
return self.value.encode(
encoding=self.encoding, errors=self.encoding_error_handler
)

@ -1,163 +1,279 @@
import argparse
import os
import subprocess
import time
import webbrowser
from rich.console import Console
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, TextColumn
from rich.table import Table
from rich.text import Text
from swarms.cli.onboarding_process import OnboardingProcess
from swarms.agents.auto_generate_swarm_config import (
generate_swarm_config,
)
from swarms.agents.create_agents_from_yaml import (
create_agents_from_yaml,
)
import subprocess
from swarms.cli.onboarding_process import OnboardingProcess
from swarms.utils.formatter import formatter
# Initialize console with custom styling
console = Console()
ASCII_ART = """
_________
/ _____/_ _ _______ _______ _____ ______
\_____ \\ \/ \/ /\__ \\_ __ \/ \ / ___/
/ \\ / / __ \| | \/ Y Y \\___ \
/_______ / \/\_/ (____ /__| |__|_| /____ >
\/ \/ \/ \/
class SwarmCLIError(Exception):
"""Custom exception for Swarm CLI errors"""
pass
# Color scheme
COLORS = {
"primary": "red",
"secondary": "#FF6B6B",
"accent": "#4A90E2",
"success": "#2ECC71",
"warning": "#F1C40F",
"error": "#E74C3C",
"text": "#FFFFFF",
}
ASCII_ART = """
"""
# Function to display the ASCII art in red
def create_spinner(text: str) -> Progress:
"""Create a custom spinner with the given text."""
return Progress(
SpinnerColumn(style=COLORS["primary"]),
TextColumn("[{task.description}]", style=COLORS["text"]),
console=console,
)
def show_ascii_art():
text = Text(ASCII_ART, style="bold cyan")
console.print(text)
"""Display the ASCII art with a glowing effect."""
panel = Panel(
Text(ASCII_ART, style=f"bold {COLORS['primary']}"),
border_style=COLORS["secondary"],
title="[bold]Welcome to Swarms[/bold]",
subtitle="[dim]Power to the Swarms[/dim]",
)
console.print(panel)
def create_command_table() -> Table:
"""Create a beautifully formatted table of commands."""
table = Table(
show_header=True,
header_style=f"bold {COLORS['primary']}",
border_style=COLORS["secondary"],
title="Available Commands",
padding=(0, 2),
)
table.add_column("Command", style="bold white")
table.add_column("Description", style="dim white")
commands = [
("onboarding", "Start the interactive onboarding process"),
("help", "Display this help message"),
("get-api-key", "Retrieve your API key from the platform"),
("check-login", "Verify login status and initialize cache"),
("run-agents", "Execute agents from your YAML configuration"),
("auto-upgrade", "Update Swarms to the latest version"),
("book-call", "Schedule a strategy session with our team"),
("autoswarm", "Generate and execute an autonomous swarm"),
]
for cmd, desc in commands:
table.add_row(cmd, desc)
return table
# Help command
def show_help():
"""Display a beautifully formatted help message."""
console.print(
"""
[bold cyan]Swarms CLI - Help[/bold cyan]
[bold magenta]Commands:[/bold magenta]
[bold white]onboarding[/bold white] : Starts the onboarding process
[bold white]help[/bold white] : Shows this help message
[bold white]get-api-key[/bold white] : Retrieves your API key from the platform
[bold white]check-login[/bold white] : Checks if you're logged in and starts the cache
[bold white]read-docs[/bold white] : Redirects you to swarms cloud documentation!
[bold white]run-agents[/bold white] : Run your Agents from your specified yaml file. Specify the yaml file with path the `--yaml-file` arg. Example: `--yaml-file agents.yaml`
[bold white]generate-prompt[/bold white] : Generate a prompt through automated prompt engineering. Requires an OPENAI Key in your `.env` Example: --prompt "Generate a prompt for an agent to analyze legal docs"
[bold white]auto-upgrade[/bold white] : Automatically upgrades Swarms to the latest version
[bold white]book-call[/bold white] : Book a strategy session with our team to discuss your use case and get personalized guidance
For more details, visit: https://docs.swarms.world
"""
"\n[bold]Swarms CLI - Command Reference[/bold]\n",
style=COLORS["primary"],
)
console.print(create_command_table())
console.print(
"\n[dim]For detailed documentation, visit: https://docs.swarms.world[/dim]"
)
def show_error(message: str, help_text: str = None):
"""Display error message in a formatted panel"""
error_panel = Panel(
f"[bold red]{message}[/bold red]",
title="Error",
border_style="red",
)
console.print(error_panel)
if help_text:
console.print(f"\n[yellow] {help_text}[/yellow]")
# [bold white]add-agent[/bold white] : Add an agent to the marketplace under your name. Must have a Dockerfile + your agent.yaml to publish. Learn more Here: https://docs.swarms.world/en/latest/swarms_cloud/vision/
def execute_with_spinner(action: callable, text: str) -> None:
"""Execute an action with a spinner animation."""
with create_spinner(text) as progress:
task = progress.add_task(text, total=None)
result = action()
progress.remove_task(task)
return result
# Fetch API key from platform
def get_api_key():
"""Retrieve API key with visual feedback."""
with create_spinner("Opening API key portal...") as progress:
task = progress.add_task("Opening browser...")
webbrowser.open("https://swarms.world/platform/api-keys")
time.sleep(1)
progress.remove_task(task)
console.print(
"[bold yellow]Opening the API key retrieval page...[/bold yellow]"
f"\n[{COLORS['success']}]✓ API key page opened in your browser[/{COLORS['success']}]"
)
# Simulating API key retrieval process by opening the website
import webbrowser
webbrowser.open("https://swarms.world/platform/api-keys")
time.sleep(2)
def check_login():
"""Verify login status with enhanced visual feedback."""
cache_file = "cache.txt"
if os.path.exists(cache_file):
with open(cache_file, "r") as f:
if f.read() == "logged_in":
console.print(
"[bold green]Your API key is available on the dashboard.[/bold green]"
f"[{COLORS['success']}]✓ Authentication verified[/{COLORS['success']}]"
)
return True
with create_spinner("Authenticating...") as progress:
task = progress.add_task("Initializing session...")
time.sleep(1)
with open(cache_file, "w") as f:
f.write("logged_in")
progress.remove_task(task)
# Redirect to docs
def redirect_to_docs():
console.print(
"[bold yellow]Opening the Docs page...[/bold yellow]"
f"[{COLORS['success']}]✓ Login successful![/{COLORS['success']}]"
)
# Simulating API key retrieval process by opening the website
import webbrowser
return True
webbrowser.open("https://docs.swarms.world")
time.sleep(2)
# Redirect to docs
def redirect_to_call():
def run_autoswarm(task: str, model: str):
"""Run autoswarm with enhanced error handling"""
try:
console.print(
"[bold yellow]Opening the Call page...[/bold yellow]"
"[yellow]Initializing autoswarm configuration...[/yellow]"
)
# Simulating API key retrieval process by opening the website
import webbrowser
webbrowser.open("https://cal.com/swarms/swarms-strategy-session")
time.sleep(2)
# Set LiteLLM verbose mode for debugging
import litellm
litellm.set_verbose = True
# Check and start cache (login system simulation)
def check_login():
cache_file = "cache.txt"
# Validate inputs
if not task or task.strip() == "":
raise SwarmCLIError("Task cannot be empty")
if os.path.exists(cache_file):
with open(cache_file, "r") as f:
cache_content = f.read()
if cache_content == "logged_in":
if not model or model.strip() == "":
raise SwarmCLIError("Model name cannot be empty")
# Attempt to generate swarm configuration
console.print(
"[bold green]You are already logged in.[/bold green]"
f"[yellow]Generating swarm for task: {task}[/yellow]"
)
else:
result = generate_swarm_config(task=task, model=model)
if result:
console.print(
"[bold red]You are not logged in.[/bold red]"
"[green]✓ Swarm configuration generated successfully![/green]"
)
else:
console.print("[bold yellow]Logging in...[/bold yellow]")
time.sleep(2)
with open(cache_file, "w") as f:
f.write("logged_in")
console.print("[bold green]Login successful![/bold green]")
raise SwarmCLIError(
"Failed to generate swarm configuration"
)
except Exception as e:
if "No YAML content found" in str(e):
show_error(
"Failed to generate YAML configuration",
"This might be due to an API key issue or invalid model configuration.\n"
+ "1. Check if your OpenAI API key is set correctly\n"
+ "2. Verify the model name is valid\n"
+ "3. Try running with --model gpt-4",
)
else:
show_error(
f"Error during autoswarm execution: {str(e)}",
"For debugging, try:\n"
+ "1. Check your API keys are set correctly\n"
+ "2. Verify your network connection\n"
+ "3. Try a different model",
)
def check_and_upgrade_version():
console.print(
"[bold yellow]Checking for Swarms updates...[/bold yellow]"
)
try:
# Check for updates using pip
"""Check for updates with visual progress."""
def check_update():
result = subprocess.run(
["pip", "list", "--outdated", "--format=freeze"],
capture_output=True,
text=True,
)
outdated_packages = result.stdout.splitlines()
return result.stdout.splitlines()
# Check if Swarms is outdated
for package in outdated_packages:
outdated = execute_with_spinner(
check_update, "Checking for updates..."
)
for package in outdated:
if package.startswith("swarms=="):
console.print(
"[bold magenta]New version available! Upgrading...[/bold magenta]"
f"[{COLORS['warning']}]↑ Update available![/{COLORS['warning']}]"
)
with create_spinner("Upgrading Swarms...") as progress:
task = progress.add_task(
"Installing latest version..."
)
subprocess.run(
["pip", "install", "--upgrade", "swarms"],
check=True,
)
progress.remove_task(task)
console.print(
"[bold green]Swarms upgraded successfully![/bold green]"
f"[{COLORS['success']}]✓ Swarms upgraded successfully![/{COLORS['success']}]"
)
return
console.print(
"[bold green]Swarms is up-to-date.[/bold green]"
)
except Exception as e:
console.print(
f"[bold red]Error checking for updates: {e}[/bold red]"
f"[{COLORS['success']}]✓ Swarms is up to date![/{COLORS['success']}]"
)
# Main CLI handler
def main():
parser = argparse.ArgumentParser(description="Swarms Cloud CLI")
try:
show_ascii_art()
# Adding arguments for different commands
parser = argparse.ArgumentParser(
description="Swarms Cloud CLI"
)
parser.add_argument(
"command",
choices=[
@ -166,45 +282,31 @@ def main():
"get-api-key",
"check-login",
"run-agents",
"generate-prompt", # Added new command for generating prompts
"auto-upgrade", # Added new command for auto-upgrade,
"auto-upgrade",
"book-call",
"autoswarm",
],
help="Command to run",
help="Command to execute",
)
parser.add_argument(
"--yaml-file",
type=str,
default="agents.yaml",
help="Specify the YAML file for running agents",
)
parser.add_argument(
"--prompt",
type=str,
help="Specify the task for generating a prompt",
)
parser.add_argument(
"--num-loops",
type=int,
default=1,
help="Specify the number of loops for generating a prompt",
help="YAML configuration file path",
)
parser.add_argument(
"--autosave",
action="store_true",
help="Enable autosave for the prompt generator",
"--task", type=str, help="Task for autoswarm"
)
parser.add_argument(
"--save-to-yaml",
action="store_true",
help="Save the generated prompt to a YAML file",
"--model",
type=str,
default="gpt-4",
help="Model for autoswarm",
)
args = parser.parse_args()
show_ascii_art()
# Determine which command to run
try:
if args.command == "onboarding":
OnboardingProcess().run()
elif args.command == "help":
@ -217,28 +319,30 @@ def main():
create_agents_from_yaml(
yaml_file=args.yaml_file, return_type="tasks"
)
# elif args.command == "generate-prompt":
# if (
# args.prompt
# ): # Corrected from args.prompt_task to args.prompt
# generate_prompt(
# num_loops=args.num_loops,
# autosave=args.autosave,
# save_to_yaml=args.save_to_yaml,
# prompt=args.prompt, # Corrected from args.prompt_task to args.prompt
# )
# else:
# console.print(
# "[bold red]Please specify a task for generating a prompt using '--prompt'.[/bold red]"
# )
elif args.command == "auto-upgrade":
check_and_upgrade_version()
elif args.command == "book-call":
redirect_to_call()
else:
webbrowser.open(
"https://cal.com/swarms/swarms-strategy-session"
)
elif args.command == "autoswarm":
if not args.task:
show_error(
"Missing required argument: --task",
"Example usage: python cli.py autoswarm --task 'analyze this data' --model gpt-4",
)
exit(1)
run_autoswarm(args.task, args.model)
except Exception as e:
console.print(
"[bold red]Unknown command! Type 'help' for usage.[/bold red]"
f"[{COLORS['error']}]Error: {str(e)}[/{COLORS['error']}]"
)
return
except Exception as error:
formatter.print_panel(
f"Error detected: {error} check your args"
)
raise error
if __name__ == "__main__":

@ -3,13 +3,16 @@ import os
import time
from typing import Dict
from loguru import logger
from swarms.utils.loguru_logger import initialize_logger
from swarms.telemetry.capture_sys_data import (
capture_system_data,
log_agent_data,
)
logger = initialize_logger(log_folder="onboarding_process")
class OnboardingProcess:
"""
@ -84,19 +87,6 @@ class OnboardingProcess:
try:
combined_data = {**self.user_data, **self.system_data}
log_agent_data(combined_data)
# threading.Thread(target=log_agent_data(combined_data)).start()
# with open(self.auto_save_path, "w") as f:
# json.dump(combined_data, f, indent=4)
# # logger.info(
# # "User and system data successfully saved to {}",
# # self.auto_save_path,
# # )
# with open(self.cache_save_path, "w") as f:
# json.dump(combined_data, f, indent=4)
# logger.info(
# "User and system data successfully cached in {}",
# self.cache_save_path,
# )
return # Exit the function if saving was successful
except Exception as e:
logger.error(
@ -170,10 +160,6 @@ class OnboardingProcess:
self.ask_input(
"Enter your email (or type 'quit' to exit): ", "email"
)
self.ask_input(
"Enter your Swarms API key (or type 'quit' to exit): Get this in your swarms dashboard: https://swarms.world/platform/api-keys ",
"swarms_api_key",
)
workspace = self.ask_input(
"Enter your WORKSPACE_DIR: This is where logs, errors, and agent configurations will be stored (or type 'quit' to exit). Remember to set this as an environment variable: https://docs.swarms.world/en/latest/swarms/install/quickstart/ || ",
"workspace_dir",

@ -1,120 +0,0 @@
from swarms.utils.loguru_logger import logger
import yaml
from pydantic import BaseModel
from typing import List, Optional
import json
from swarms.structs.agent_registry import AgentRegistry
from swarms.structs.agent import Agent
from swarm_models.popular_llms import OpenAIChat
class AgentInput(BaseModel):
agent_name: str = "Swarm Agent"
system_prompt: Optional[str] = None
agent_description: Optional[str] = None
model_name: str = "OpenAIChat"
max_loops: int = 1
autosave: bool = False
dynamic_temperature_enabled: bool = False
dashboard: bool = False
verbose: bool = False
streaming_on: bool = True
saved_state_path: Optional[str] = None
sop: Optional[str] = None
sop_list: Optional[List[str]] = None
user_name: str = "User"
retry_attempts: int = 3
context_length: int = 8192
task: Optional[str] = None
interactive: bool = False
def parse_yaml_to_json(yaml_str: str) -> str:
"""
Parses the given YAML string into an AgentInput model and converts it to a JSON string.
Args:
yaml_str (str): The YAML string to be parsed.
Returns:
str: The JSON string representation of the parsed YAML.
Raises:
ValueError: If the YAML string cannot be parsed into the AgentInput model.
"""
try:
data = yaml.safe_load(yaml_str)
agent_input = AgentInput(**data)
return agent_input.json()
except yaml.YAMLError as e:
print(f"YAML Error: {e}")
raise ValueError("Invalid YAML input.") from e
except ValueError as e:
print(f"Validation Error: {e}")
raise ValueError("Invalid data for AgentInput model.") from e
# # Example usage
# yaml_input = """
# agent_name: "Custom Agent"
# system_prompt: "System prompt example"
# agent_description: "This is a test agent"
# model_name: "CustomModel"
# max_loops: 5
# autosave: true
# dynamic_temperature_enabled: true
# dashboard: true
# verbose: true
# streaming_on: false
# saved_state_path: "/path/to/state"
# sop: "Standard operating procedure"
# sop_list: ["step1", "step2"]
# user_name: "Tester"
# retry_attempts: 5
# context_length: 4096
# task: "Perform testing"
# """
# json_output = parse_yaml_to_json(yaml_input)
# print(json_output)
registry = AgentRegistry()
def create_agent_from_yaml(yaml_path: str) -> None:
with open(yaml_path, "r") as file:
yaml_str = file.read()
agent_json = parse_yaml_to_json(yaml_str)
agent_config = json.loads(agent_json)
agent = Agent(
agent_name=agent_config.get("agent_name", "Swarm Agent"),
system_prompt=agent_config.get("system_prompt"),
agent_description=agent_config.get("agent_description"),
llm=OpenAIChat(),
max_loops=agent_config.get("max_loops", 1),
autosave=agent_config.get("autosave", False),
dynamic_temperature_enabled=agent_config.get(
"dynamic_temperature_enabled", False
),
dashboard=agent_config.get("dashboard", False),
verbose=agent_config.get("verbose", False),
streaming_on=agent_config.get("streaming_on", True),
saved_state_path=agent_config.get("saved_state_path"),
retry_attempts=agent_config.get("retry_attempts", 3),
context_length=agent_config.get("context_length", 8192),
)
registry.add(agent.agent_name, agent)
logger.info(f"Agent {agent.agent_name} created from {yaml_path}.")
def run_agent(agent_name: str, task: str) -> None:
agent = registry.find_agent_by_name(agent_name)
agent.run(task)
def list_agents() -> None:
agents = registry.list_agents()
for agent_id in agents:
print(agent_id)

@ -4,7 +4,6 @@ import time
import uuid
from typing import Any, Callable, List
from loguru import logger
from pydantic import (
BaseModel,
Field,
@ -17,6 +16,9 @@ from swarms.telemetry.capture_sys_data import (
log_agent_data,
)
from swarms.tools.base_tool import BaseTool
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger("prompt")
class Prompt(BaseModel):
@ -131,9 +133,10 @@ class Prompt(BaseModel):
self.content = new_content
self.edit_count += 1
self.last_modified_at = time.strftime("%Y-%m-%d %H:%M:%S")
logger.debug(
f"Prompt {self.id} updated. Edit count: {self.edit_count}. New content: '{self.content}'"
)
# logger.debug(
# f"Prompt {self.id} updated. Edit count: {self.edit_count}. New content: '{self.content}'"
# )
if self.autosave:
self._autosave()
@ -161,15 +164,15 @@ class Prompt(BaseModel):
)
raise IndexError("Invalid version number for rollback.")
logger.info(
f"Rolling back prompt {self.id} to version {version}."
)
# logger.info(
# f"Rolling back prompt {self.id} to version {version}."
# )
self.content = self.edit_history[version]
self.edit_count = version
self.last_modified_at = time.strftime("%Y-%m-%d %H:%M:%S")
logger.debug(
f"Prompt {self.id} rolled back to version {version}. Current content: '{self.content}'"
)
# logger.debug(
# f"Prompt {self.id} rolled back to version {version}. Current content: '{self.content}'"
# )
self.log_telemetry()
@ -199,7 +202,7 @@ class Prompt(BaseModel):
Raises:
NotImplementedError: This method is a placeholder for storage integration.
"""
logger.info(f"Saving prompt {self.id} to persistent storage.")
# logger.info(f"Saving prompt {self.id} to persistent storage.")
raise NotImplementedError(
"Persistent storage integration is required."
)
@ -217,9 +220,9 @@ class Prompt(BaseModel):
Raises:
NotImplementedError: This method is a placeholder for storage integration.
"""
logger.info(
f"Loading prompt {prompt_id} from persistent storage."
)
# logger.info(
# f"Loading prompt {prompt_id} from persistent storage."
# )
raise NotImplementedError(
"Persistent storage integration is required."
)
@ -254,7 +257,9 @@ class Prompt(BaseModel):
)
with open(file_path, "w") as file:
json.dump(self.model_dump(), file)
logger.info(f"Autosaved prompt {self.id} to {file_path}.")
# logger.info(f"Autosaved prompt {self.id} to {file_path}.")
# return "Prompt autosaved successfully."
# def auto_generate_prompt(self):
# logger.info(f"Auto-generating prompt for {self.name}")

@ -1,10 +0,0 @@
from typing import List
from pydantic import BaseModel
from swarms.schemas.agent_step_schemas import Step
class Plan(BaseModel):
steps: List[Step]
class Config:
orm_mode = True

@ -1,4 +1,5 @@
from swarms.structs.agent import Agent
from swarms.structs.agents_available import showcase_available_agents
from swarms.structs.auto_swarm import AutoSwarm, AutoSwarmRouter
from swarms.structs.base_structure import BaseStructure
from swarms.structs.base_swarm import BaseSwarm
@ -11,7 +12,7 @@ from swarms.structs.graph_workflow import (
Node,
NodeType,
)
from swarms.structs.groupchat import GroupChat
from swarms.structs.groupchat import GroupChat, GroupChatState
from swarms.structs.majority_voting import (
MajorityVoting,
majority_voting,
@ -19,16 +20,31 @@ from swarms.structs.majority_voting import (
parse_code_completion,
)
from swarms.structs.message import Message
from swarms.structs.message_pool import MessagePool
from swarms.structs.mixture_of_agents import MixtureOfAgents
from swarms.structs.multi_agent_collab import MultiAgentCollaboration
from swarms.structs.multi_agent_exec import (
run_agent_with_timeout,
run_agents_concurrently,
run_agents_concurrently_async,
run_agents_concurrently_multiprocess,
run_agents_sequentially,
run_agents_with_different_tasks,
run_agents_with_resource_monitoring,
run_agents_with_tasks_concurrently,
run_single_agent,
)
from swarms.structs.queue_swarm import TaskQueueSwarm
from swarms.structs.rearrange import AgentRearrange, rearrange
from swarms.structs.round_robin import RoundRobinSwarm
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm
from swarms.structs.swarm_arange import SwarmRearrange
from swarms.structs.swarm_net import SwarmNetwork
from swarms.structs.swarm_router import (
SwarmRouter,
SwarmType,
swarm_router,
)
from swarms.structs.swarming_architectures import (
broadcast,
circular_swarm,
@ -93,7 +109,6 @@ __all__ = [
"most_frequent",
"parse_code_completion",
"Message",
"MessagePool",
"MultiAgentCollaboration",
"SwarmNetwork",
"AgentRearrange",
@ -146,4 +161,7 @@ __all__ = [
"run_agents_with_resource_monitoring",
"swarm_router",
"AsyncWorkflow",
"run_agents_with_tasks_concurrently",
"showcase_available_agents",
"GroupChatState",
]

@ -1,3 +1,4 @@
from datetime import datetime
import asyncio
import json
import logging
@ -21,15 +22,8 @@ from typing import (
import toml
import yaml
from clusterops import (
execute_on_gpu,
execute_with_cpu_cores,
)
from loguru import logger
from pydantic import BaseModel
from swarm_models.tiktoken_wrapper import TikTokenizer
from termcolor import colored
from swarms.agents.ape_agent import auto_generate_prompt
from swarms.prompts.agent_system_prompts import AGENT_SYSTEM_PROMPT_3
from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
@ -53,6 +47,13 @@ from swarms.utils.data_to_text import data_to_text
from swarms.utils.file_processing import create_file_in_folder
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.artifacts.main_artifact import Artifact
from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.wrapper_clusterop import (
exec_callable_with_clusterops,
)
from swarms.utils.formatter import formatter
logger = initialize_logger(log_folder="agents")
# Utils
@ -177,6 +178,7 @@ class Agent:
artifacts_on (bool): Enable artifacts
artifacts_output_path (str): The artifacts output path
artifacts_file_extension (str): The artifacts file extension (.pdf, .md, .txt, )
scheduled_run_date (datetime): The date and time to schedule the task
Methods:
run: Run the agent
@ -299,7 +301,6 @@ class Agent:
rules: str = None, # type: ignore
planning: Optional[str] = False,
planning_prompt: Optional[str] = None,
device: str = None,
custom_planning_prompt: str = None,
memory_chunk_size: int = 2000,
agent_ops_on: bool = False,
@ -331,6 +332,14 @@ class Agent:
artifacts_on: bool = False,
artifacts_output_path: str = None,
artifacts_file_extension: str = None,
device: str = "cpu",
all_cores: bool = True,
device_id: int = 0,
scheduled_run_date: Optional[datetime] = None,
do_not_use_cluster_ops: bool = True,
all_gpus: bool = False,
model_name: str = None,
llm_args: dict = None,
*args,
**kwargs,
):
@ -408,7 +417,6 @@ class Agent:
self.execute_tool = execute_tool
self.planning = planning
self.planning_prompt = planning_prompt
self.device = device
self.custom_planning_prompt = custom_planning_prompt
self.rules = rules
self.custom_tools_prompt = custom_tools_prompt
@ -441,6 +449,14 @@ class Agent:
self.artifacts_on = artifacts_on
self.artifacts_output_path = artifacts_output_path
self.artifacts_file_extension = artifacts_file_extension
self.device = device
self.all_cores = all_cores
self.device_id = device_id
self.scheduled_run_date = scheduled_run_date
self.do_not_use_cluster_ops = do_not_use_cluster_ops
self.all_gpus = all_gpus
self.model_name = model_name
self.llm_args = llm_args
# Initialize the short term memory
self.short_memory = Conversation(
@ -577,6 +593,21 @@ class Agent:
# Telemetry Processor to log agent data
threading.Thread(target=self.log_agent_data).start()
threading.Thread(target=self.llm_handling())
def llm_handling(self):
if self.llm is None:
from swarms.utils.litellm import LiteLLM
if self.llm_args is not None:
self.llm = LiteLLM(
model_name=self.model_name, **self.llm_args
)
else:
self.llm = LiteLLM(model_name=self.model_name)
def check_if_no_prompt_then_autogenerate(self, task: str = None):
"""
Checks if auto_generate_prompt is enabled and generates a prompt by combining agent name, description and system prompt if available.
@ -657,11 +688,8 @@ class Agent:
return self.stopping_condition(response)
return False
except Exception as error:
print(
colored(
f"Error checking stopping condition: {error}",
"red",
)
logger.error(
f"Error checking stopping condition: {error}"
)
def dynamic_temperature(self):
@ -675,20 +703,19 @@ class Agent:
if hasattr(self.llm, "temperature"):
# Randomly change the temperature attribute of self.llm object
self.llm.temperature = random.uniform(0.0, 1.0)
logger.info(f"Temperature: {self.llm.temperature}")
else:
# Use a default temperature
self.llm.temperature = 0.7
self.llm.temperature = 0.5
except Exception as error:
print(
colored(
logger.error(
f"Error dynamically changing temperature: {error}"
)
)
def print_dashboard(self):
"""Print dashboard"""
print(colored("Initializing Agent Dashboard...", "yellow"))
formatter.print_panel(
f"Initializing Agent: {self.agent_name}"
)
data = self.to_dict()
@ -696,8 +723,7 @@ class Agent:
# data = json.dumps(data, indent=4)
# json_data = json.dumps(data, indent=4)
print(
colored(
formatter.print_panel(
f"""
Agent Dashboard
--------------------------------------------
@ -710,8 +736,6 @@ class Agent:
----------------------------------------
""",
"green",
)
)
def loop_count_print(
@ -723,21 +747,23 @@ class Agent:
loop_count (_type_): _description_
max_loops (_type_): _description_
"""
print(colored(f"\nLoop {loop_count} of {max_loops}", "cyan"))
logger.info(f"\nLoop {loop_count} of {max_loops}")
print("\n")
# Check parameters
def check_parameters(self):
if self.llm is None:
raise ValueError("Language model is not provided")
raise ValueError(
"Language model is not provided. Choose a model from the available models in swarm_models or create a class with a run(task: str) method and or a __call__ method."
)
if self.max_loops is None:
if self.max_loops is None or self.max_loops == 0:
raise ValueError("Max loops is not provided")
if self.max_tokens == 0:
if self.max_tokens == 0 or self.max_tokens is None:
raise ValueError("Max tokens is not provided")
if self.context_length == 0:
if self.context_length == 0 or self.context_length is None:
raise ValueError("Context length is not provided")
# Main function
@ -745,7 +771,11 @@ class Agent:
self,
task: Optional[str] = None,
img: Optional[str] = None,
is_last: bool = False,
speech: Optional[str] = None,
video: Optional[str] = None,
is_last: Optional[bool] = False,
print_task: Optional[bool] = False,
generate_speech: Optional[bool] = False,
*args,
**kwargs,
) -> Any:
@ -788,6 +818,15 @@ class Agent:
if self.long_term_memory is not None:
self.memory_query(task)
# Print the user's request
# Print the request
if print_task is True:
formatter.print_panel(
f"\n User: {task}",
f"Task Request for {self.agent_name}",
)
while (
self.max_loops == "auto"
or loop_count < self.max_loops
@ -834,9 +873,17 @@ class Agent:
# Print
if self.streaming_on is True:
self.stream_response(response)
# self.stream_response(response)
formatter.print_panel_token_by_token(
f"{self.agent_name}: {response}",
title=f"Agent Name: {self.agent_name} [Max Loops: {loop_count}]",
)
else:
logger.info(f"Response: {response}")
# logger.info(f"Response: {response}")
formatter.print_panel(
f"{self.agent_name}: {response}",
f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
)
# Check if response is a dictionary and has 'choices' key
if (
@ -926,7 +973,7 @@ class Agent:
if self.interactive:
logger.info("Interactive mode enabled.")
user_input = colored(input("You: "), "red")
user_input = input("You: ")
# User-defined exit command
if (
@ -990,6 +1037,11 @@ class Agent:
self.artifacts_file_extension,
)
try:
self.log_agent_data()
except Exception:
pass
# More flexible output types
if (
self.output_type == "string"
@ -1013,13 +1065,26 @@ class Agent:
elif self.return_step_meta is True:
return self.agent_output.model_dump_json(indent=4)
elif self.return_history is True:
return self.short_memory.get_str()
history = self.short_memory.get_str()
formatter.print_panel(
history, title=f"{self.agent_name} History"
)
return history
else:
raise ValueError(
f"Invalid output type: {self.output_type}"
)
except Exception as error:
self.log_agent_data()
logger.info(
f"Error running agent: {error} optimize your input parameters"
)
raise error
except KeyboardInterrupt as error:
self.log_agent_data()
logger.info(
f"Error running agent: {error} optimize your input parameters"
)
@ -1222,7 +1287,7 @@ class Agent:
logger.info(f"Running bulk tasks: {inputs}")
return [self.run(**input_data) for input_data in inputs]
except Exception as error:
print(colored(f"Error running bulk run: {error}", "red"))
logger.info(f"Error running bulk run: {error}", "red")
def save(self) -> None:
"""Save the agent history to a file.
@ -1399,9 +1464,7 @@ class Agent:
with open(file_path, "w") as f:
yaml.dump(self.to_dict(), f)
except Exception as error:
logger.error(
colored(f"Error saving agent to YAML: {error}", "red")
)
logger.error(f"Error saving agent to YAML: {error}")
raise error
def get_llm_parameters(self):
@ -1466,7 +1529,7 @@ class Agent:
role=self.user_name, content=data
)
except Exception as error:
print(colored(f"Error ingesting docs: {error}", "red"))
logger.info(f"Error ingesting docs: {error}", "red")
def ingest_pdf(self, pdf: str):
"""Ingest the pdf into the memory
@ -1481,7 +1544,7 @@ class Agent:
role=self.user_name, content=text
)
except Exception as error:
print(colored(f"Error ingesting pdf: {error}", "red"))
logger.info(f"Error ingesting pdf: {error}", "red")
def receieve_message(self, name: str, message: str):
"""Receieve a message"""
@ -1558,19 +1621,22 @@ class Agent:
files = os.listdir(self.docs_folder)
# Extract the text from the files
# Process each file and combine their contents
all_text = ""
for file in files:
text = data_to_text(file)
file_path = os.path.join(self.docs_folder, file)
text = data_to_text(file_path)
all_text += f"\nContent from {file}:\n{text}\n"
# Add the combined content to memory
return self.short_memory.add(
role=self.user_name, content=text
role=self.user_name, content=all_text
)
except Exception as error:
print(
colored(
f"Error getting docs from doc folders: {error}",
"red",
)
logger.error(
f"Error getting docs from doc folders: {error}"
)
raise error
def check_end_session_agentops(self):
if self.agent_ops_on is True:
@ -1590,7 +1656,8 @@ class Agent:
try:
# Query the long term memory
if self.long_term_memory is not None:
logger.info(f"Querying long term memory for: {task}")
formatter.print_panel(f"Querying RAG for: {task}")
memory_retrieval = self.long_term_memory.query(
task, *args, **kwargs
)
@ -1599,15 +1666,15 @@ class Agent:
f"Documents Available: {str(memory_retrieval)}"
)
# Count the tokens
memory_token_count = self.tokenizer.count_tokens(
memory_retrieval
)
if memory_token_count > self.memory_chunk_size:
# Truncate the memory by the memory chunk size
memory_retrieval = self.truncate_string_by_tokens(
memory_retrieval, self.memory_chunk_size
)
# # Count the tokens
# memory_token_count = self.tokenizer.count_tokens(
# memory_retrieval
# )
# if memory_token_count > self.memory_chunk_size:
# # Truncate the memory by the memory chunk size
# memory_retrieval = self.truncate_string_by_tokens(
# memory_retrieval, self.memory_chunk_size
# )
self.short_memory.add(
role="Database",
@ -2235,25 +2302,31 @@ class Agent:
self,
task: Optional[str] = None,
img: Optional[str] = None,
is_last: bool = False,
device: str = "cpu", # gpu
device_id: int = 0,
all_cores: bool = True,
device: Optional[str] = "cpu", # gpu
device_id: Optional[int] = 0,
all_cores: Optional[bool] = True,
scheduled_run_date: Optional[datetime] = None,
do_not_use_cluster_ops: Optional[bool] = False,
all_gpus: Optional[bool] = False,
generate_speech: Optional[bool] = False,
*args,
**kwargs,
) -> Any:
"""
Executes the agent's run method on a specified device.
Executes the agent's run method on a specified device, with optional scheduling.
This method attempts to execute the agent's run method on a specified device, either CPU or GPU. It logs the device selection and the number of cores or GPU ID used. If the device is set to CPU, it can use all available cores or a specific core specified by `device_id`. If the device is set to GPU, it uses the GPU specified by `device_id`.
If a `scheduled_date` is provided, the method will wait until that date and time before executing the task.
Args:
task (Optional[str], optional): The task to be executed. Defaults to None.
img (Optional[str], optional): The image to be processed. Defaults to None.
is_last (bool, optional): Indicates if this is the last task. Defaults to False.
device (str, optional): The device to use for execution. Defaults to "cpu".
device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True.
scheduled_run_date (Optional[datetime], optional): The date and time to schedule the task. Defaults to None.
do_not_use_cluster_ops (bool, optional): If True, does not use cluster ops. Defaults to False.
*args: Additional positional arguments to be passed to the execution method.
**kwargs: Additional keyword arguments to be passed to the execution method.
@ -2264,33 +2337,45 @@ class Agent:
ValueError: If an invalid device is specified.
Exception: If any other error occurs during execution.
"""
try:
logger.info(f"Attempting to run on device: {device}")
if device == "cpu":
logger.info("Device set to CPU")
if all_cores is True:
count = os.cpu_count()
logger.info(
f"Using all available CPU cores: {count}"
device = device or self.device
device_id = device_id or self.device_id
all_cores = all_cores or self.all_cores
all_gpus = all_gpus or self.all_gpus
do_not_use_cluster_ops = (
do_not_use_cluster_ops or self.do_not_use_cluster_ops
)
else:
count = device_id
logger.info(f"Using specific CPU core: {count}")
return execute_with_cpu_cores(
count, self._run, task, img, *args, **kwargs
)
if scheduled_run_date:
while datetime.now() < scheduled_run_date:
time.sleep(
1
) # Sleep for a short period to avoid busy waiting
# If device gpu
elif device == "gpu":
logger.info("Device set to GPU")
return execute_on_gpu(
device_id, self._run, task, img, *args, **kwargs
try:
# If cluster ops disabled, run directly
if do_not_use_cluster_ops is True:
logger.info("Running without cluster operations")
return self._run(
task=task,
img=img,
generate_speech=generate_speech * args,
**kwargs,
)
else:
raise ValueError(
f"Invalid device specified: {device}. Supported devices are 'cpu' and 'gpu'."
return exec_callable_with_clusterops(
device=device,
device_id=device_id,
all_cores=all_cores,
all_gpus=all_gpus,
func=self._run,
task=task,
img=img,
generate_speech=generate_speech,
*args,
**kwargs,
)
except ValueError as e:
logger.error(f"Invalid device specified: {e}")
raise e
@ -2334,3 +2419,26 @@ class Agent:
f"Unexpected error handling artifact: {str(e)}"
)
raise
def showcase_config(self):
# Convert all values in config_dict to concise string representations
config_dict = self.to_dict()
for key, value in config_dict.items():
if isinstance(value, list):
# Format list as a comma-separated string
config_dict[key] = ", ".join(
str(item) for item in value
)
elif isinstance(value, dict):
# Format dict as key-value pairs in a single string
config_dict[key] = ", ".join(
f"{k}: {v}" for k, v in value.items()
)
else:
# Ensure any non-iterable value is a string
config_dict[key] = str(value)
return formatter.print_table(
f"Agent: {self.agent_name} Configuration", config_dict
)

@ -7,7 +7,6 @@ from pydantic import BaseModel, Field, ValidationError
from swarms import Agent
from swarms.utils.loguru_logger import logger
from swarms.utils.report_error_loguru import report_error
class AgentConfigSchema(BaseModel):
@ -229,7 +228,7 @@ class AgentRegistry:
logger.info("Listing all agents.")
return agent_names
except Exception as e:
report_error(e)
logger.error(f"Error: {e}")
raise e
def return_all_agents(self) -> List[Agent]:
@ -245,7 +244,7 @@ class AgentRegistry:
logger.info("Returning all agents.")
return agents
except Exception as e:
report_error(e)
logger.error(f"Error: {e}")
raise e
def query(
@ -276,7 +275,7 @@ class AgentRegistry:
logger.info("Querying agents with condition.")
return agents
except Exception as e:
report_error(e)
logger.error(f"Error: {e}")
raise e
def find_agent_by_name(self, agent_name: str) -> Optional[Agent]:
@ -300,7 +299,7 @@ class AgentRegistry:
if agent.agent_name == agent_name:
return agent
except Exception as e:
report_error(e)
logger.error(f"Error: {e}")
raise e
def agent_to_py_model(self, agent: Agent):

@ -1,10 +1,12 @@
from typing import List, Optional
import chromadb
from loguru import logger
from tenacity import retry, stop_after_attempt, wait_exponential
from typing import Union, Callable, Any
from swarms import Agent
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="agent_router")
class AgentRouter:

@ -0,0 +1,87 @@
from swarms.structs.agent import Agent
from typing import List
def showcase_available_agents(
agents: List[Agent],
name: str = None,
description: str = None,
format: str = "XML",
) -> str:
"""
Format the available agents in either XML or Table format.
Args:
agents (List[Agent]): A list of agents to represent
name (str, optional): Name of the swarm
description (str, optional): Description of the swarm
format (str, optional): Output format ("XML" or "Table"). Defaults to "XML"
Returns:
str: Formatted string containing agent information
"""
def truncate(text: str, max_length: int = 130) -> str:
return (
f"{text[:max_length]}..."
if len(text) > max_length
else text
)
output = []
if format.upper() == "TABLE":
output.append("\n| ID | Agent Name | Description |")
output.append("|-----|------------|-------------|")
for idx, agent in enumerate(agents):
if isinstance(agent, Agent):
agent_name = getattr(agent, "agent_name", str(agent))
description = getattr(
agent,
"description",
getattr(
agent, "system_prompt", "Unknown description"
),
)
desc = truncate(description, 50)
output.append(
f"| {idx + 1} | {agent_name} | {desc} |"
)
else:
output.append(
f"| {idx + 1} | {agent} | Unknown description |"
)
return "\n".join(output)
# Default XML format
output.append("<agents>")
if name:
output.append(f" <name>{name}</name>")
if description:
output.append(
f" <description>{truncate(description)}</description>"
)
for idx, agent in enumerate(agents):
output.append(f" <agent id='{idx + 1}'>")
if isinstance(agent, Agent):
agent_name = getattr(agent, "agent_name", str(agent))
description = getattr(
agent,
"description",
getattr(
agent, "system_prompt", "Unknown description"
),
)
output.append(f" <name>{agent_name}</name>")
output.append(
f" <description>{truncate(description)}</description>"
)
else:
output.append(f" <name>{agent}</name>")
output.append(
" <description>Unknown description</description>"
)
output.append(" </agent>")
output.append("</agents>")
return "\n".join(output)

@ -1,3 +0,0 @@
"""
This class will input a swarm type -> then auto generate a list of `Agent` structures with their name, descriptions, system prompts, and more.
"""

@ -1,5 +1,3 @@
from loguru import logger
import os
from typing import List
@ -8,6 +6,10 @@ from swarm_models import OpenAIFunctionCaller, OpenAIChat
from swarms.structs.agent import Agent
from swarms.structs.swarm_router import SwarmRouter
from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.agents_available import showcase_available_agents
logger = initialize_logger(log_folder="auto_swarm_builder")
class AgentConfig(BaseModel):
@ -24,10 +26,10 @@ class AgentConfig(BaseModel):
description="The system prompt that defines the agent's behavior",
example="You are a research agent. Your role is to gather and analyze information...",
)
max_loops: int = Field(
description="Maximum number of reasoning loops the agent can perform",
example=3,
)
# max_loops: int = Field(
# description="Maximum number of reasoning loops the agent can perform",
# example=3,
# )
class SwarmConfig(BaseModel):
@ -211,10 +213,20 @@ class AutoSwarmBuilder:
agent_name=agent_config.name,
agent_description=agent_config.description,
agent_system_prompt=agent_config.system_prompt,
max_loops=agent_config.max_loops,
# max_loops=agent_config.max_loops,
)
agents.append(agent)
# Showcasing available agents
agents_available = showcase_available_agents(
name=self.name,
description=self.description,
agents=agents,
)
for agent in agents:
agent.system_prompt += "\n" + agents_available
return agents
def build_agent(
@ -280,6 +292,8 @@ class AutoSwarmBuilder:
"""
logger.info("Routing task through swarm")
swarm_router_instance = SwarmRouter(
name=self.name,
description=self.description,
agents=agents,
swarm_type="auto",
max_loops=1,
@ -290,10 +304,14 @@ class AutoSwarmBuilder:
)
example = AutoSwarmBuilder()
example = AutoSwarmBuilder(
name="ChipDesign-Swarm",
description="A swarm of specialized AI agents collaborating on chip architecture, logic design, verification, and optimization to create novel semiconductor designs",
max_loops=1,
)
print(
example.run(
"Write multiple blog posts about the latest advancements in swarm intelligence all at once"
"Design a new AI accelerator chip optimized for transformer model inference. Consider the following aspects: 1) Overall chip architecture and block diagram 2) Memory hierarchy and interconnects 3) Processing elements and data flow 4) Power and thermal considerations 5) Physical layout recommendations -> "
)
)

@ -20,13 +20,15 @@ from swarms_memory import BaseVectorDatabase
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.structs.omni_agent_types import AgentType
from swarms.utils.loguru_logger import logger
from pydantic import BaseModel
from swarms.utils.pandas_utils import (
dict_to_dataframe,
display_agents_info,
pydantic_model_to_dataframe,
)
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="base_swarm")
class BaseSwarm(ABC):

@ -1,12 +1,13 @@
import json
from typing import Any, Dict, List, Optional
from termcolor import colored
from swarms.utils.formatter import formatter
from swarms.structs.agent import Agent
from swarms.structs.base_structure import BaseStructure
from swarms.structs.task import Task
from swarms.utils.loguru_logger import logger
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger("base-workflow")
class BaseWorkflow(BaseStructure):
@ -130,9 +131,10 @@ class BaseWorkflow(BaseStructure):
for task in self.tasks:
task.result = None
except Exception as error:
print(
colored(f"Error resetting workflow: {error}", "red"),
formatter.print_panel(
f"Error resetting workflow: {error}"
)
raise error
def get_task_results(self) -> Dict[str, Any]:
"""
@ -146,10 +148,8 @@ class BaseWorkflow(BaseStructure):
task.description: task.result for task in self.tasks
}
except Exception as error:
print(
colored(
f"Error getting task results: {error}", "red"
),
formatter.print_panel(
f"Error getting task results: {error}"
)
def remove_task(self, task: str) -> None:
@ -161,12 +161,10 @@ class BaseWorkflow(BaseStructure):
if task.description != task
]
except Exception as error:
print(
colored(
formatter.print_panel(
f"Error removing task from workflow: {error}",
"red",
),
)
raise error
def update_task(self, task: str, **updates) -> None:
"""
@ -201,11 +199,9 @@ class BaseWorkflow(BaseStructure):
f"Task {task} not found in workflow."
)
except Exception as error:
print(
colored(
f"Error updating task in workflow: {error}", "red"
formatter.print_panel(
f"Error updating task in workflow: {error}"
),
)
def delete_task(self, task: str) -> None:
"""
@ -238,12 +234,10 @@ class BaseWorkflow(BaseStructure):
f"Task {task} not found in workflow."
)
except Exception as error:
print(
colored(
formatter.print_panel(
f"Error deleting task from workflow: {error}",
"red",
),
)
raise error
def save_workflow_state(
self,
@ -285,23 +279,18 @@ class BaseWorkflow(BaseStructure):
}
json.dump(state, f, indent=4)
except Exception as error:
print(
colored(
formatter.print_panel(
f"Error saving workflow state: {error}",
"red",
)
)
raise error
def add_objective_to_workflow(self, task: str, **kwargs) -> None:
"""Adds an objective to the workflow."""
try:
print(
colored(
formatter.print_panel(
"""
Adding Objective to Workflow...""",
"green",
attrs=["bold", "underline"],
)
)
task = Task(
@ -312,12 +301,10 @@ class BaseWorkflow(BaseStructure):
)
self.tasks.append(task)
except Exception as error:
print(
colored(
formatter.print_panel(
f"Error adding objective to workflow: {error}",
"red",
)
)
raise error
def load_workflow_state(
self, filepath: str = None, **kwargs
@ -357,11 +344,8 @@ class BaseWorkflow(BaseStructure):
)
self.tasks.append(task)
except Exception as error:
print(
colored(
formatter.print_panel(
f"Error loading workflow state: {error}",
"red",
)
)
def workflow_dashboard(self, **kwargs) -> None:
@ -381,8 +365,7 @@ class BaseWorkflow(BaseStructure):
>>> workflow.workflow_dashboard()
"""
print(
colored(
formatter.print_panel(
f"""
Sequential Workflow Dashboard
--------------------------------
@ -396,10 +379,7 @@ class BaseWorkflow(BaseStructure):
--------------------------------
Metadata:
kwargs: {kwargs}
""",
"cyan",
attrs=["bold", "underline"],
)
"""
)
def workflow_bootup(self, **kwargs) -> None:
@ -407,11 +387,6 @@ class BaseWorkflow(BaseStructure):
Workflow bootup.
"""
print(
colored(
"""
Sequential Workflow Initializing...""",
"green",
attrs=["bold", "underline"],
)
formatter.print_panel(
"""Sequential Workflow Initializing...""",
)

@ -2,8 +2,11 @@ from dataclasses import dataclass, field
from typing import Dict, List, Optional, Union
from swarms.structs.agent import Agent
from swarms.utils.loguru_logger import logger
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger("company-swarm")
@dataclass

@ -5,7 +5,6 @@ from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
from loguru import logger
from pydantic import BaseModel, Field
from tenacity import retry, stop_after_attempt, wait_exponential
@ -19,6 +18,9 @@ from clusterops import (
execute_on_multiple_gpus,
list_available_gpus,
)
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="concurrent_workflow")
class AgentOutputSchema(BaseModel):

@ -3,9 +3,14 @@ import json
from typing import Any, Optional
import yaml
from termcolor import colored
from swarms.structs.base_structure import BaseStructure
from typing import TYPE_CHECKING
from swarms.utils.formatter import formatter
if TYPE_CHECKING:
from swarms.structs.agent import (
Agent,
) # Only imported during type checking
class Conversation(BaseStructure):
@ -185,18 +190,9 @@ class Conversation(BaseStructure):
Args:
detailed (bool, optional): detailed. Defaults to False.
"""
role_to_color = {
"system": "red",
"user": "green",
"assistant": "blue",
"function": "magenta",
}
for message in self.conversation_history:
print(
colored(
f"{message['role']}: {message['content']}\n\n",
role_to_color[message["role"]],
)
formatter.print_panel(
f"{message['role']}: {message['content']}\n\n"
)
def export_conversation(self, filename: str, *args, **kwargs):
@ -301,47 +297,37 @@ class Conversation(BaseStructure):
for message in messages:
if message["role"] == "system":
print(
colored(
formatter.print_panel(
f"system: {message['content']}\n",
role_to_color[message["role"]],
)
)
elif message["role"] == "user":
print(
colored(
formatter.print_panel(
f"user: {message['content']}\n",
role_to_color[message["role"]],
)
)
elif message["role"] == "assistant" and message.get(
"function_call"
):
print(
colored(
formatter.print_panel(
f"assistant: {message['function_call']}\n",
role_to_color[message["role"]],
)
)
elif message["role"] == "assistant" and not message.get(
"function_call"
):
print(
colored(
formatter.print_panel(
f"assistant: {message['content']}\n",
role_to_color[message["role"]],
)
)
elif message["role"] == "tool":
print(
colored(
formatter.print_panel(
(
f"function ({message['name']}):"
f" {message['content']}\n"
),
role_to_color[message["role"]],
)
)
def truncate_memory_with_tokenizer(self):
"""
@ -392,6 +378,33 @@ class Conversation(BaseStructure):
def to_yaml(self):
return yaml.dump(self.conversation_history)
def get_visible_messages(self, agent: "Agent", turn: int):
"""
Get the visible messages for a given agent and turn.
Args:
agent (Agent): The agent.
turn (int): The turn number.
Returns:
List[Dict]: The list of visible messages.
"""
# Get the messages before the current turn
prev_messages = [
message
for message in self.conversation_history
if message["turn"] < turn
]
visible_messages = []
for message in prev_messages:
if (
message["visible_to"] == "all"
or agent.agent_name in message["visible_to"]
):
visible_messages.append(message)
return visible_messages
# # Example usage
# conversation = Conversation()

@ -1,393 +0,0 @@
from typing import List, Callable, Union, Optional
from loguru import logger
from swarms.structs.base_swarm import BaseSwarm
from queue import PriorityQueue
from concurrent.futures import (
ThreadPoolExecutor,
as_completed,
)
import time
from pydantic import BaseModel, Field
class SwarmRunData(BaseModel):
"""
Pydantic model to capture metadata about each swarm's execution.
"""
swarm_name: str
task: str
priority: int
start_time: Optional[float] = None
end_time: Optional[float] = None
duration: Optional[float] = None
status: str = "Pending"
retries: int = 0
result: Optional[str] = None
exception: Optional[str] = None
class FederatedSwarmModel(BaseModel):
"""
Pydantic base model to capture and log data for the FederatedSwarm system.
"""
task: str
swarms_data: List[SwarmRunData] = Field(default_factory=list)
def add_swarm(self, swarm_name: str, task: str, priority: int):
swarm_data = SwarmRunData(
swarm_name=swarm_name, task=task, priority=priority
)
self.swarms_data.append(swarm_data)
def update_swarm_status(
self,
swarm_name: str,
status: str,
start_time: float = None,
end_time: float = None,
retries: int = 0,
result: str = None,
exception: str = None,
):
for swarm in self.swarms_data:
if swarm.name == swarm_name:
swarm.status = status
if start_time:
swarm.start_time = start_time
if end_time:
swarm.end_time = end_time
swarm.duration = end_time - swarm.start_time
swarm.retries = retries
swarm.result = result
swarm.exception = exception
break
class FederatedSwarm:
def __init__(
self,
swarms: List[Union[BaseSwarm, Callable]],
max_workers: int = 4,
):
"""
Initializes the FederatedSwarm with a list of swarms or callable objects and
sets up a priority queue and thread pool for concurrency.
Args:
swarms (List[Union[BaseSwarm, Callable]]): A list of swarms (BaseSwarm) or callable objects.
max_workers (int): The maximum number of concurrent workers (threads) to run swarms in parallel.
"""
self.swarms = PriorityQueue()
self.max_workers = max_workers
self.thread_pool = ThreadPoolExecutor(
max_workers=self.max_workers
)
self.task_queue = []
self.future_to_swarm = {}
self.results = {}
self.validate_swarms(swarms)
def init_metadata(self, task: str):
"""
Initializes the Pydantic base model to capture metadata about the current task and swarms.
"""
self.metadata = FederatedSwarmModel(task=task)
for priority, swarm in list(self.swarms.queue):
swarm_name = (
swarm.__class__.__name__
if hasattr(swarm, "__class__")
else str(swarm)
)
self.metadata.add_swarm(
swarm_name=swarm_name, task=task, priority=priority
)
logger.info(f"Metadata initialized for task '{task}'.")
def validate_swarms(
self, swarms: List[Union[BaseSwarm, Callable]]
):
"""
Validates and adds swarms to the priority queue, ensuring each swarm has a `run(task)` method.
Args:
swarms (List[Union[BaseSwarm, Callable]]): List of swarms with an optional priority value.
"""
for swarm, priority in swarms:
if not callable(swarm):
raise TypeError(f"{swarm} is not callable.")
if hasattr(swarm, "run"):
logger.info(f"{swarm} has a 'run' method.")
else:
raise AttributeError(
f"{swarm} does not have a 'run(task)' method."
)
self.swarms.put((priority, swarm))
logger.info(
f"Swarm {swarm} added with priority {priority}."
)
def run_parallel(
self,
task: str,
timeout: Optional[float] = None,
retries: int = 0,
):
"""
Runs all swarms in parallel with prioritization and optional timeout.
Args:
task (str): The task to be passed to the `run` method of each swarm.
timeout (Optional[float]): Maximum time allowed for each swarm to run.
retries (int): Number of retries allowed for failed swarms.
"""
logger.info(
f"Running task '{task}' in parallel with timeout: {timeout}, retries: {retries}"
)
self.init_metadata(task)
while not self.swarms.empty():
priority, swarm = self.swarms.get()
swarm_name = (
swarm.__class__.__name__
if hasattr(swarm, "__class__")
else str(swarm)
)
future = self.thread_pool.submit(
self._run_with_retry,
swarm,
task,
retries,
timeout,
swarm_name,
)
self.future_to_swarm[future] = swarm
for future in as_completed(self.future_to_swarm):
swarm = self.future_to_swarm[future]
try:
result = future.result()
swarm_name = (
swarm.__class__.__name__
if hasattr(swarm, "__class__")
else str(swarm)
)
self.metadata.update_swarm_status(
swarm_name=swarm_name,
status="Completed",
result=result,
)
logger.info(
f"Swarm {swarm_name} completed successfully."
)
except Exception as e:
swarm_name = (
swarm.__class__.__name__
if hasattr(swarm, "__class__")
else str(swarm)
)
self.metadata.update_swarm_status(
swarm_name=swarm_name,
status="Failed",
exception=str(e),
)
logger.error(f"Swarm {swarm_name} failed: {e}")
self.results[swarm] = "Failed"
def run_sequentially(
self,
task: str,
retries: int = 0,
timeout: Optional[float] = None,
):
"""
Runs all swarms sequentially in order of priority.
Args:
task (str): The task to pass to the `run` method of each swarm.
retries (int): Number of retries for failed swarms.
timeout (Optional[float]): Optional time limit for each swarm.
"""
logger.info(f"Running task '{task}' sequentially.")
while not self.swarms.empty():
priority, swarm = self.swarms.get()
try:
logger.info(
f"Running swarm {swarm} with priority {priority}."
)
self._run_with_retry(swarm, task, retries, timeout)
logger.info(f"Swarm {swarm} completed successfully.")
except Exception as e:
logger.error(f"Swarm {swarm} failed with error: {e}")
def _run_with_retry(
self,
swarm: Union[BaseSwarm, Callable],
task: str,
retries: int,
timeout: Optional[float],
swarm_name: str,
):
"""
Helper function to run a swarm with a retry mechanism and optional timeout.
Args:
swarm (Union[BaseSwarm, Callable]): The swarm to run.
task (str): The task to pass to the swarm.
retries (int): The number of retries allowed for the swarm in case of failure.
timeout (Optional[float]): Maximum time allowed for the swarm to run.
swarm_name (str): Name of the swarm (used for metadata).
"""
attempts = 0
start_time = time.time()
while attempts <= retries:
try:
logger.info(
f"Running swarm {swarm}. Attempt: {attempts + 1}"
)
self.metadata.update_swarm_status(
swarm_name=swarm_name,
status="Running",
start_time=start_time,
)
if hasattr(swarm, "run"):
if timeout:
start_time = time.time()
swarm.run(task)
duration = time.time() - start_time
if duration > timeout:
raise TimeoutError(
f"Swarm {swarm} timed out after {duration:.2f}s."
)
else:
swarm.run(task)
else:
swarm(task)
end_time = time.time()
self.metadata.update_swarm_status(
swarm_name=swarm_name,
status="Completed",
end_time=end_time,
retries=attempts,
)
return "Success"
except Exception as e:
logger.error(f"Swarm {swarm} failed: {e}")
attempts += 1
if attempts > retries:
end_time = time.time()
self.metadata.update_swarm_status(
swarm_name=swarm_name,
status="Failed",
end_time=end_time,
retries=attempts,
exception=str(e),
)
logger.error(f"Swarm {swarm} exhausted retries.")
raise
def add_swarm(
self, swarm: Union[BaseSwarm, Callable], priority: int
):
"""
Adds a new swarm to the FederatedSwarm at runtime.
Args:
swarm (Union[BaseSwarm, Callable]): The swarm to add.
priority (int): The priority level for the swarm.
"""
self.swarms.put((priority, swarm))
logger.info(
f"Swarm {swarm} added dynamically with priority {priority}."
)
def queue_task(self, task: str):
"""
Adds a task to the internal task queue for batch processing.
Args:
task (str): The task to queue.
"""
self.task_queue.append(task)
logger.info(f"Task '{task}' added to the queue.")
def process_task_queue(self):
"""
Processes all tasks in the task queue.
"""
for task in self.task_queue:
logger.info(f"Processing task: {task}")
self.run_parallel(task)
self.task_queue = []
def log_swarm_results(self):
"""
Logs the results of all swarms after execution.
"""
logger.info("Logging swarm results...")
for swarm, result in self.results.items():
logger.info(f"Swarm {swarm}: {result}")
def get_swarm_status(self) -> dict:
"""
Retrieves the status of each swarm (completed, running, failed).
Returns:
dict: Dictionary containing swarm statuses.
"""
status = {}
for future, swarm in self.future_to_swarm.items():
if future.done():
status[swarm] = "Completed"
elif future.running():
status[swarm] = "Running"
else:
status[swarm] = "Failed"
return status
def cancel_running_swarms(self):
"""
Cancels all currently running swarms by shutting down the thread pool.
"""
logger.warning("Cancelling all running swarms...")
self.thread_pool.shutdown(wait=False)
logger.info("All running swarms cancelled.")
# Example Usage:
# class ExampleSwarm(BaseSwarm):
# def run(self, task: str):
# logger.info(f"ExampleSwarm is processing task: {task}")
# def example_callable(task: str):
# logger.info(f"Callable is processing task: {task}")
# if __name__ == "__main__":
# swarms = [(ExampleSwarm(), 1), (example_callable, 2)]
# federated_swarm = FederatedSwarm(swarms)
# # Run in parallel
# federated_swarm.run_parallel(
# "Process data", timeout=10, retries=3
# )
# # Run sequentially
# federated_swarm.run_sequentially("Process data sequentially")
# # Log results
# federated_swarm.log_swarm_results()
# # Get status of swarms
# status = federated_swarm.get_swarm_status()
# logger.info(f"Swarm statuses: {status}")
# # Cancel running swarms (if needed)
# # federated_swarm.cancel_running_swarms()

@ -0,0 +1,665 @@
"""
GraphSwarm: A production-grade framework for orchestrating swarms of agents
Author: Claude
License: MIT
Version: 2.0.0
"""
import asyncio
import json
import time
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple, Union
import chromadb
import networkx as nx
from loguru import logger
from pydantic import BaseModel, Field
from swarms import Agent
# Configure logging
logger.add(
"graphswarm.log",
rotation="500 MB",
retention="10 days",
level="INFO",
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
)
class AgentOutput(BaseModel):
"""Structured output from an agent."""
agent_name: str
timestamp: float = Field(default_factory=time.time)
output: Any
execution_time: float
error: Optional[str] = None
metadata: Dict = Field(default_factory=dict)
class SwarmOutput(BaseModel):
"""Structured output from the entire swarm."""
timestamp: float = Field(default_factory=time.time)
outputs: Dict[str, AgentOutput]
execution_time: float
success: bool
error: Optional[str] = None
metadata: Dict = Field(default_factory=dict)
class SwarmMemory:
"""Vector-based memory system for GraphSwarm using ChromaDB."""
def __init__(self, collection_name: str = "swarm_memories"):
"""Initialize SwarmMemory with ChromaDB."""
self.client = chromadb.Client()
# Get or create collection
self.collection = self.client.get_or_create_collection(
name=collection_name,
metadata={"description": "GraphSwarm execution memories"},
)
def store_execution(self, task: str, result: SwarmOutput):
"""Store execution results in vector memory."""
try:
# Create metadata
metadata = {
"timestamp": datetime.now().isoformat(),
"success": result.success,
"execution_time": result.execution_time,
"agent_sequence": json.dumps(
[name for name in result.outputs.keys()]
),
"error": result.error if result.error else "",
}
# Create document from outputs
document = {
"task": task,
"outputs": json.dumps(
{
name: {
"output": str(output.output),
"execution_time": output.execution_time,
"error": output.error,
}
for name, output in result.outputs.items()
}
),
}
# Store in ChromaDB
self.collection.add(
documents=[json.dumps(document)],
metadatas=[metadata],
ids=[f"exec_{datetime.now().timestamp()}"],
)
print("added to database")
logger.info(f"Stored execution in memory: {task}")
except Exception as e:
logger.error(
f"Failed to store execution in memory: {str(e)}"
)
def get_similar_executions(self, task: str, limit: int = 5):
"""Retrieve similar past executions."""
try:
# Query ChromaDB for similar executions
results = self.collection.query(
query_texts=[task],
n_results=limit,
include=["documents", "metadatas"],
)
print(results)
if not results["documents"]:
return []
# Process results
executions = []
for doc, metadata in zip(
results["documents"][0], results["metadatas"][0]
):
doc_dict = json.loads(doc)
executions.append(
{
"task": doc_dict["task"],
"outputs": json.loads(doc_dict["outputs"]),
"success": metadata["success"],
"execution_time": metadata["execution_time"],
"agent_sequence": json.loads(
metadata["agent_sequence"]
),
"timestamp": metadata["timestamp"],
}
)
return executions
except Exception as e:
logger.error(
f"Failed to retrieve similar executions: {str(e)}"
)
return []
def get_optimal_sequence(self, task: str) -> Optional[List[str]]:
"""Get the most successful agent sequence for similar tasks."""
similar_executions = self.get_similar_executions(task)
print(f"similar_executions {similar_executions}")
if not similar_executions:
return None
# Sort by success and execution time
successful_execs = [
ex for ex in similar_executions if ex["success"]
]
if not successful_execs:
return None
# Return sequence from most successful execution
return successful_execs[0]["agent_sequence"]
def clear_memory(self):
"""Clear all memories."""
self.client.delete_collection(self.collection.name)
self.collection = self.client.get_or_create_collection(
name=self.collection.name
)
class GraphSwarm:
"""
Enhanced framework for creating and managing swarms of collaborative agents.
"""
def __init__(
self,
agents: Union[
List[Agent], List[Tuple[Agent, List[str]]], None
] = None,
max_workers: Optional[int] = None,
swarm_name: str = "Collaborative Agent Swarm",
memory_collection: str = "swarm_memory",
):
"""Initialize GraphSwarm."""
self.graph = nx.DiGraph()
self.agents: Dict[str, Agent] = {}
self.dependencies: Dict[str, List[str]] = {}
self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.swarm_name = swarm_name
self.memory_collection = memory_collection
self.memory = SwarmMemory(collection_name=memory_collection)
if agents:
self.initialize_agents(agents)
logger.info(f"Initialized GraphSwarm: {swarm_name}")
def initialize_agents(
self,
agents: Union[List[Agent], List[Tuple[Agent, List[str]]]],
):
"""Initialize agents and their dependencies."""
try:
# Handle list of Agents or (Agent, dependencies) tuples
for item in agents:
if isinstance(item, tuple):
agent, dependencies = item
else:
agent, dependencies = item, []
if not isinstance(agent, Agent):
raise ValueError(
f"Expected Agent object, got {type(agent)}"
)
self.agents[agent.agent_name] = agent
self.dependencies[agent.agent_name] = dependencies
self.graph.add_node(agent.agent_name, agent=agent)
# Add dependencies
for dep in dependencies:
if dep not in self.agents:
raise ValueError(
f"Dependency {dep} not found for agent {agent.agent_name}"
)
self.graph.add_edge(dep, agent.agent_name)
self._validate_graph()
except Exception as e:
logger.error(f"Failed to initialize agents: {str(e)}")
raise
def _validate_graph(self):
"""Validate the agent dependency graph."""
if not self.graph.nodes():
raise ValueError("No agents added to swarm")
if not nx.is_directed_acyclic_graph(self.graph):
cycles = list(nx.simple_cycles(self.graph))
raise ValueError(
f"Agent dependency graph contains cycles: {cycles}"
)
def _get_agent_role_description(self, agent_name: str) -> str:
"""Generate a description of the agent's role in the swarm."""
predecessors = list(self.graph.predecessors(agent_name))
successors = list(self.graph.successors(agent_name))
position = (
"initial"
if not predecessors
else ("final" if not successors else "intermediate")
)
role = f"""You are {agent_name}, a specialized agent in the {self.swarm_name}.
Position: {position} agent in the workflow
Your relationships:"""
if predecessors:
role += (
f"\nYou receive input from: {', '.join(predecessors)}"
)
if successors:
role += f"\nYour output will be used by: {', '.join(successors)}"
return role
def _generate_workflow_context(self) -> str:
"""Generate a description of the entire workflow."""
execution_order = list(nx.topological_sort(self.graph))
workflow = f"""Workflow Overview of {self.swarm_name}:
Processing Order:
{' -> '.join(execution_order)}
Agent Roles:
"""
for agent_name in execution_order:
predecessors = list(self.graph.predecessors(agent_name))
successors = list(self.graph.successors(agent_name))
workflow += f"\n\n{agent_name}:"
if predecessors:
workflow += (
f"\n- Receives from: {', '.join(predecessors)}"
)
if successors:
workflow += f"\n- Sends to: {', '.join(successors)}"
if not predecessors and not successors:
workflow += "\n- Independent agent"
return workflow
def _build_agent_prompt(
self, agent_name: str, task: str, context: Dict = None
) -> str:
"""Build a comprehensive prompt for the agent including role and context."""
prompt_parts = [
self._get_agent_role_description(agent_name),
"\nWorkflow Context:",
self._generate_workflow_context(),
"\nYour Task:",
task,
]
if context:
prompt_parts.extend(
["\nContext from Previous Agents:", str(context)]
)
prompt_parts.extend(
[
"\nInstructions:",
"1. Process the task according to your role",
"2. Consider the input from previous agents when available",
"3. Provide clear, structured output",
"4. Remember that your output will be used by subsequent agents",
"\nResponse Guidelines:",
"- Provide clear, well-organized output",
"- Include relevant details and insights",
"- Highlight key findings",
"- Flag any uncertainties or issues",
]
)
return "\n".join(prompt_parts)
async def _execute_agent(
self, agent_name: str, task: str, context: Dict = None
) -> AgentOutput:
"""Execute a single agent."""
start_time = time.time()
agent = self.agents[agent_name]
try:
# Build comprehensive prompt
full_prompt = self._build_agent_prompt(
agent_name, task, context
)
logger.debug(f"Prompt for {agent_name}:\n{full_prompt}")
# Execute agent
output = await asyncio.to_thread(agent.run, full_prompt)
return AgentOutput(
agent_name=agent_name,
output=output,
execution_time=time.time() - start_time,
metadata={
"task": task,
"context": context,
"position_in_workflow": list(
nx.topological_sort(self.graph)
).index(agent_name),
},
)
except Exception as e:
logger.error(
f"Error executing agent {agent_name}: {str(e)}"
)
return AgentOutput(
agent_name=agent_name,
output=None,
execution_time=time.time() - start_time,
error=str(e),
metadata={"task": task},
)
async def execute(self, task: str) -> SwarmOutput:
"""
Execute the entire swarm of agents with memory integration.
Args:
task: Initial task to execute
Returns:
SwarmOutput: Structured output from all agents
"""
start_time = time.time()
outputs = {}
success = True
error = None
try:
# Get similar past executions
similar_executions = self.memory.get_similar_executions(
task, limit=3
)
optimal_sequence = self.memory.get_optimal_sequence(task)
# Get base execution order
base_execution_order = list(
nx.topological_sort(self.graph)
)
# Determine final execution order
if optimal_sequence and all(
agent in base_execution_order
for agent in optimal_sequence
):
logger.info(
f"Using optimal sequence from memory: {optimal_sequence}"
)
execution_order = optimal_sequence
else:
execution_order = base_execution_order
# Get historical context if available
historical_context = {}
if similar_executions:
best_execution = similar_executions[0]
if best_execution["success"]:
historical_context = {
"similar_task": best_execution["task"],
"previous_outputs": best_execution["outputs"],
"execution_time": best_execution[
"execution_time"
],
"success_patterns": self._extract_success_patterns(
similar_executions
),
}
# Execute agents in order
for agent_name in execution_order:
try:
# Get context from dependencies and history
agent_context = {
"dependencies": {
dep: outputs[dep].output
for dep in self.graph.predecessors(
agent_name
)
if dep in outputs
},
"historical": historical_context,
"position": execution_order.index(agent_name),
"total_agents": len(execution_order),
}
# Execute agent with enhanced context
output = await self._execute_agent(
agent_name, task, agent_context
)
outputs[agent_name] = output
# Update historical context with current execution
if output.output:
historical_context.update(
{
f"current_{agent_name}_output": output.output
}
)
# Check for errors
if output.error:
success = False
error = f"Agent {agent_name} failed: {output.error}"
# Try to recover using memory
if similar_executions:
recovery_output = self._attempt_recovery(
agent_name, task, similar_executions
)
if recovery_output:
outputs[agent_name] = recovery_output
success = True
error = None
continue
break
except Exception as agent_error:
logger.error(
f"Error executing agent {agent_name}: {str(agent_error)}"
)
success = False
error = f"Agent {agent_name} failed: {str(agent_error)}"
break
# Create result
result = SwarmOutput(
outputs=outputs,
execution_time=time.time() - start_time,
success=success,
error=error,
metadata={
"task": task,
"used_optimal_sequence": optimal_sequence
is not None,
"similar_executions_found": len(
similar_executions
),
"execution_order": execution_order,
"historical_context_used": bool(
historical_context
),
},
)
# Store execution in memory
await self._store_execution_async(task, result)
return result
except Exception as e:
logger.error(f"Swarm execution failed: {str(e)}")
return SwarmOutput(
outputs=outputs,
execution_time=time.time() - start_time,
success=False,
error=str(e),
metadata={"task": task},
)
def run(self, task: str) -> SwarmOutput:
"""Synchronous interface to execute the swarm."""
return asyncio.run(self.execute(task))
def _extract_success_patterns(
self, similar_executions: List[Dict]
) -> Dict:
"""Extract success patterns from similar executions."""
patterns = {}
successful_execs = [
ex for ex in similar_executions if ex["success"]
]
if successful_execs:
patterns = {
"common_sequences": self._find_common_sequences(
successful_execs
),
"avg_execution_time": sum(
ex["execution_time"] for ex in successful_execs
)
/ len(successful_execs),
"successful_strategies": self._extract_strategies(
successful_execs
),
}
return patterns
def _attempt_recovery(
self,
failed_agent: str,
task: str,
similar_executions: List[Dict],
) -> Optional[AgentOutput]:
"""Attempt to recover from failure using memory."""
for execution in similar_executions:
if (
execution["success"]
and failed_agent in execution["outputs"]
):
historical_output = execution["outputs"][failed_agent]
return AgentOutput(
agent_name=failed_agent,
output=historical_output["output"],
execution_time=historical_output[
"execution_time"
],
metadata={
"recovered_from_memory": True,
"original_task": execution["task"],
},
)
return None
async def _store_execution_async(
self, task: str, result: SwarmOutput
):
"""Asynchronously store execution in memory."""
try:
await asyncio.to_thread(
self.memory.store_execution, task, result
)
except Exception as e:
logger.error(
f"Failed to store execution in memory: {str(e)}"
)
def add_agent(self, agent: Agent, dependencies: List[str] = None):
"""Add a new agent to the swarm."""
dependencies = dependencies or []
self.agents[agent.agent_name] = agent
self.dependencies[agent.agent_name] = dependencies
self.graph.add_node(agent.agent_name, agent=agent)
for dep in dependencies:
if dep not in self.agents:
raise ValueError(f"Dependency {dep} not found")
self.graph.add_edge(dep, agent.agent_name)
self._validate_graph()
if __name__ == "__main__":
try:
# Create agents
data_collector = Agent(
agent_name="Market-Data-Collector",
model_name="gpt-4o-mini",
max_loops=1,
streaming_on=True,
)
trend_analyzer = Agent(
agent_name="Market-Trend-Analyzer",
model_name="gpt-4o-mini",
max_loops=1,
streaming_on=True,
)
report_generator = Agent(
agent_name="Investment-Report-Generator",
model_name="gpt-4o-mini",
max_loops=1,
streaming_on=True,
)
# Create swarm
swarm = GraphSwarm(
agents=[
(data_collector, []),
(trend_analyzer, ["Market-Data-Collector"]),
(report_generator, ["Market-Trend-Analyzer"]),
],
swarm_name="Market Analysis Intelligence Network",
)
# Run the swarm
result = swarm.run(
"Analyze current market trends for tech stocks and provide investment recommendations"
)
# Print results
print(f"Execution success: {result.success}")
print(f"Total time: {result.execution_time:.2f} seconds")
for agent_name, output in result.outputs.items():
print(f"\nAgent: {agent_name}")
print(f"Output: {output.output}")
if output.error:
print(f"Error: {output.error}")
except Exception as error:
logger.error(error)
raise error

@ -5,7 +5,9 @@ import networkx as nx
from pydantic.v1 import BaseModel, Field, validator
from swarms.structs.agent import Agent # noqa: F401
from swarms.utils.loguru_logger import logger
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="graph_workflow")
class NodeType(str, Enum):

@ -1,72 +1,165 @@
from typing import List, Dict
from typing import List, Dict, Optional, Union, Callable, Any
from pydantic import BaseModel, Field
from swarms.structs.conversation import Conversation
from swarms.utils.loguru_logger import logger
from swarms.structs.agent import Agent
from datetime import datetime
import json
from uuid import uuid4
from swarms.schemas.agent_step_schemas import ManySteps
import logging
from swarms.structs.agent import Agent
from swarms.structs.agents_available import showcase_available_agents
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Message(BaseModel):
"""Single message in the conversation"""
role: str
content: str
timestamp: datetime = Field(default_factory=datetime.utcnow)
class AgentMetadata(BaseModel):
"""Metadata for tracking agent state and configuration"""
agent_name: str
agent_type: str
system_prompt: Optional[str] = None
description: Optional[str] = None
config: Dict[str, Any] = Field(default_factory=dict)
class InteractionLog(BaseModel):
"""Log entry for a single interaction"""
id: str = Field(default_factory=lambda: uuid4().hex)
agent_name: str
position: int
input_text: str
output_text: str
timestamp: datetime = Field(default_factory=datetime.utcnow)
metadata: Dict[str, Any] = Field(default_factory=dict)
class GroupChatState(BaseModel):
"""Complete state of the group chat"""
class GroupChatInput(BaseModel):
id: str = Field(default_factory=lambda: uuid4().hex)
name: Optional[str] = None
description: Optional[str] = None
admin_name: str
group_objective: str
agents: List[Dict[str, str]]
max_rounds: int
selector_agent: Dict[str, str]
rules: str
rules: Optional[str] = None
agent_metadata: List[AgentMetadata]
messages: List[Message]
interactions: List[InteractionLog]
created_at: datetime = Field(default_factory=datetime.utcnow)
updated_at: datetime = Field(default_factory=datetime.utcnow)
class GroupChatOutput(BaseModel):
id: str = Field(uuid4().hex)
task: str = Field(..., description=None)
input_config: GroupChatInput
agent_outputs: List[ManySteps] = Field(..., description=None)
# Todo:
# Build a function that prompts the llm to output the
# [Agent-Name] in square brackets and then the question or something
# An agentic Language notation
class AgentWrapper:
"""Wrapper class to standardize agent interfaces"""
def __init__(
self,
agent: Union["Agent", Callable],
agent_name: str,
system_prompt: Optional[str] = None,
):
self.agent = agent
self.agent_name = agent_name
self.system_prompt = system_prompt
self._validate_agent()
def _validate_agent(self):
"""Validate that the agent has the required interface"""
if hasattr(self.agent, "run"):
self.run = self.agent.run
elif callable(self.agent):
self.run = self.agent
else:
raise ValueError(
"Agent must either have a 'run' method or be callable"
)
def get_metadata(self) -> AgentMetadata:
"""Extract metadata from the agent"""
return AgentMetadata(
agent_name=self.agent_name,
agent_type=type(self.agent).__name__,
system_prompt=self.system_prompt,
config={
k: v
for k, v in self.agent.__dict__.items()
if isinstance(v, (str, int, float, bool, dict, list))
},
)
class GroupChat:
"""Manager class for a group chat.
"""Enhanced GroupChat manager with state persistence and comprehensive logging.
This class handles the management of a group chat, including initializing the conversation,
selecting the next speaker, resetting the chat, and executing the chat rounds.
This class implements a multi-agent chat system with the following key features:
- State persistence to disk
- Comprehensive interaction logging
- Configurable agent selection
- Early stopping conditions
- Conversation export capabilities
Args:
agents (List[Agent], optional): List of agents participating in the group chat. Defaults to None.
max_rounds (int, optional): Maximum number of chat rounds. Defaults to 10.
admin_name (str, optional): Name of the admin user. Defaults to "Admin".
group_objective (str, optional): Objective of the group chat. Defaults to None.
selector_agent (Agent, optional): Agent responsible for selecting the next speaker. Defaults to None.
rules (str, optional): Rules for the group chat. Defaults to None.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
The GroupChat coordinates multiple agents to have a goal-directed conversation,
with one agent speaking at a time based on a selector agent's decisions.
Attributes:
agents (List[Agent]): List of agents participating in the group chat.
max_rounds (int): Maximum number of chat rounds.
admin_name (str): Name of the admin user.
group_objective (str): Objective of the group chat.
selector_agent (Agent): Agent responsible for selecting the next speaker.
messages (Conversation): Conversation object for storing the chat messages.
name (Optional[str]): Name of the group chat
description (Optional[str]): Description of the group chat's purpose
agents (List[Union["Agent", Callable]]): List of participating agents
max_rounds (int): Maximum number of conversation rounds
admin_name (str): Name of the administrator
group_objective (str): The goal/objective of the conversation
selector_agent (Union["Agent", Callable]): Agent that selects next speaker
rules (Optional[str]): Rules governing the conversation
state_path (Optional[str]): Path to save conversation state
showcase_agents_on (bool): Whether to showcase agent capabilities
"""
def __init__(
self,
name: str = None,
description: str = None,
agents: List[Agent] = None,
name: Optional[str] = None,
description: Optional[str] = None,
agents: List[Union["Agent", Callable]] = None,
max_rounds: int = 10,
admin_name: str = "Admin",
group_objective: str = None,
selector_agent: Agent = None,
rules: str = None,
*args,
**kwargs,
selector_agent: Union["Agent", Callable] = None,
rules: Optional[str] = None,
state_path: Optional[str] = None,
showcase_agents_on: bool = False,
):
# super().__init__(agents = agents, *args, **kwargs)
if not agents:
raise ValueError(
"Agents cannot be empty. Add more agents."
)
"""Initialize a new GroupChat instance.
Args:
name: Name of the group chat
description: Description of the group chat's purpose
agents: List of participating agents
max_rounds: Maximum number of conversation rounds
admin_name: Name of the administrator
group_objective: The goal/objective of the conversation
selector_agent: Agent that selects next speaker
rules: Rules governing the conversation
state_path: Path to save conversation state
showcase_agents_on: Whether to showcase agent capabilities
Raises:
ValueError: If no agents are provided
"""
self.name = name
self.description = description
self.agents = agents
@ -74,184 +167,327 @@ class GroupChat:
self.admin_name = admin_name
self.group_objective = group_objective
self.selector_agent = selector_agent
self.rules = rules
self.state_path = state_path
self.showcase_agents_on = showcase_agents_on
if not agents:
raise ValueError("At least two agents are required")
# Generate unique state path if not provided
self.state_path = (
state_path or f"group_chat_{uuid4().hex}.json"
)
# Wrap all agents to standardize interface
self.wrapped_agents = [
AgentWrapper(
agent,
(
f"Agent_{i}"
if not hasattr(agent, "agent_name")
else agent.agent_name
),
)
for i, agent in enumerate(agents)
]
# Configure selector agent
self.selector_agent = AgentWrapper(
selector_agent or self.wrapped_agents[0].agent,
"Selector",
"Select the next speaker based on the conversation context",
)
# Initialize the conversation
self.message_history = Conversation(
system_prompt=self.group_objective,
time_enabled=True,
user=self.admin_name,
# Initialize conversation state
self.state = GroupChatState(
name=name,
description=description,
admin_name=admin_name,
group_objective=group_objective,
max_rounds=max_rounds,
rules=rules,
*args,
**kwargs,
agent_metadata=[
agent.get_metadata() for agent in self.wrapped_agents
],
messages=[],
interactions=[],
)
# Initialize log for interactions
self.group_log = GroupChatLog(
admin_name=self.admin_name,
group_objective=self.group_objective,
# Showcase agents if enabled
if self.showcase_agents_on is True:
self.showcase_agents()
def showcase_agents(self):
"""Showcase available agents and update their system prompts.
This method displays agent capabilities and updates each agent's
system prompt with information about other agents in the group.
"""
out = showcase_available_agents(
name=self.name,
description=self.description,
agents=self.wrapped_agents,
)
@property
def agent_names(self) -> List[str]:
"""Return the names of the agents in the group chat."""
return [agent.agent_name for agent in self.agents]
for agent in self.wrapped_agents:
# Initialize system_prompt if None
if agent.system_prompt is None:
agent.system_prompt = ""
agent.system_prompt += out
def reset(self):
"""Reset the group chat."""
logger.info("Resetting GroupChat")
self.message_history.clear()
def save_state(self) -> None:
"""Save current conversation state to disk.
def agent_by_name(self, name: str) -> Agent:
"""Find an agent whose name is contained within the given 'name' string.
The state is saved as a JSON file at the configured state_path.
"""
with open(self.state_path, "w") as f:
json.dump(self.state.dict(), f, default=str, indent=2)
logger.info(f"State saved to {self.state_path}")
@classmethod
def load_state(cls, state_path: str) -> "GroupChat":
"""Load GroupChat from saved state.
Args:
name (str): Name string to search for.
state_path: Path to the saved state JSON file
Returns:
Agent: Agent object with a name contained in the given 'name' string.
GroupChat: A new GroupChat instance with restored state
Raises:
ValueError: If no agent is found with a name contained in the given 'name' string.
FileNotFoundError: If state file doesn't exist
json.JSONDecodeError: If state file is invalid JSON
"""
for agent in self.agents:
if agent.agent_name in name:
return agent
raise ValueError(
f"No agent found with a name contained in '{name}'."
with open(state_path, "r") as f:
state_dict = json.load(f)
# Convert loaded data back to state model
state = GroupChatState(**state_dict)
# Initialize with minimal config, then restore state
instance = cls(
name=state.name,
admin_name=state.admin_name,
agents=[], # Temporary empty list
group_objective=state.group_objective,
)
instance.state = state
return instance
def next_agent(self, agent: Agent) -> Agent:
"""Return the next agent in the list.
def _log_interaction(
self,
agent_name: str,
position: int,
input_text: str,
output_text: str,
) -> None:
"""Log a single interaction in the conversation.
Args:
agent (Agent): Current agent.
Returns:
Agent: Next agent in the list.
agent_name: Name of the speaking agent
position: Position in conversation sequence
input_text: Input context provided to agent
output_text: Agent's response
"""
return self.agents[
(self.agent_names.index(agent.agent_name) + 1)
% len(self.agents)
]
log_entry = InteractionLog(
agent_name=agent_name,
position=position,
input_text=input_text,
output_text=output_text,
metadata={
"current_agents": [
a.agent_name for a in self.wrapped_agents
],
"round": position // len(self.wrapped_agents),
},
)
self.state.interactions.append(log_entry)
self.save_state()
def select_speaker_msg(self):
"""Return the message for selecting the next speaker."""
prompt = f"""
You are in a role play game. The following roles are available:
{self._participant_roles()}.
def _add_message(self, role: str, content: str) -> None:
"""Add a message to the conversation history.
Read the following conversation.
Then select the next role from {self.agent_names} to play. Only return the role.
Args:
role: Speaker's role/name
content: Message content
"""
return prompt
message = Message(role=role, content=content)
self.state.messages.append(message)
self.save_state()
def select_speaker(
self, last_speaker_agent: Agent, selector_agent: Agent
) -> Agent:
"""Select the next speaker.
def select_next_speaker(
self, last_speaker: AgentWrapper
) -> AgentWrapper:
"""Select the next speaker using the selector agent.
Args:
last_speaker_agent (Agent): Last speaker in the conversation.
selector_agent (Agent): Agent responsible for selecting the next speaker.
last_speaker: The agent who spoke last
Returns:
Agent: Next speaker.
AgentWrapper: The next agent to speak
Note:
Falls back to round-robin selection if selector agent fails
"""
logger.info("Selecting a new speaker")
selector_agent.system_prompt = self.select_speaker_msg()
n_agents = len(self.agent_names)
if n_agents < 3:
logger.warning(
f"GroupChat is underpopulated with {n_agents} agents. Direct communication might be more efficient."
)
self.message_history.add(
role=self.admin_name,
content=f"Read the above conversation. Then select the next most suitable role from {self.agent_names} to play. Only return the role.",
)
name = selector_agent.run(
self.message_history.return_history_as_string()
conversation_history = "\n".join(
[
f"{msg.role}: {msg.content}"
for msg in self.state.messages
]
)
try:
selected_agent = self.agent_by_name(name)
return selected_agent
except ValueError:
return self.next_agent(last_speaker_agent)
def _participant_roles(self):
"""Print the roles of the participants.
selection_prompt = f"""
Current speakers: {[agent.agent_name for agent in self.wrapped_agents]}
Last speaker: {last_speaker.agent_name}
Group objective: {self.state.group_objective}
Returns:
str: Participant roles.
Based on the conversation history and group objective, select the next most appropriate speaker.
Only return the speaker's name.
Conversation history:
{conversation_history}
"""
return "\n".join(
[
f"{agent.agent_name}: {agent.system_prompt}"
for agent in self.agents
]
try:
next_speaker_name = self.selector_agent.run(
selection_prompt
).strip()
return next(
agent
for agent in self.wrapped_agents
if agent.agent_name in next_speaker_name
)
except (StopIteration, Exception) as e:
logger.warning(
f"Selector agent failed: {str(e)}. Falling back to round-robin."
)
# Fallback to round-robin if selection fails
current_idx = self.wrapped_agents.index(last_speaker)
return self.wrapped_agents[
(current_idx + 1) % len(self.wrapped_agents)
]
def run(self, task: str, *args, **kwargs):
"""Call 'GroupChatManager' instance as a function.
def run(self, task: str) -> str:
"""Execute the group chat conversation.
Args:
task (str): Task to be performed.
task: The initial task/question to discuss
Returns:
str: Reply from the last speaker.
str: The final response from the conversation
Raises:
Exception: If any error occurs during execution
"""
try:
logger.info(f"Starting GroupChat with task: {task}")
self._add_message(self.state.admin_name, task)
current_speaker = self.wrapped_agents[0]
final_response = None
for round_num in range(self.state.max_rounds):
# Select next speaker
current_speaker = self.select_next_speaker(
current_speaker
)
logger.info(
f"Activating GroupChat with {len(self.agents)} Agents"
f"Selected speaker: {current_speaker.agent_name}"
)
self.message_history.add(
self.selector_agent.agent_name, task
# Prepare context and get response
conversation_history = "\n".join(
[
f"{msg.role}: {msg.content}"
for msg in self.state.messages[
-10:
] # Last 10 messages for context
]
)
for i in range(self.max_rounds):
speaker_agent = self.select_speaker(
last_speaker_agent=self.selector_agent,
selector_agent=self.selector_agent,
try:
response = current_speaker.run(
conversation_history
)
logger.info(
f"Next speaker selected: {speaker_agent.agent_name}"
final_response = response
except Exception as e:
logger.error(
f"Agent {current_speaker.agent_name} failed: {str(e)}"
)
reply = speaker_agent.run(
self.message_history.return_history_as_string(),
*args,
**kwargs,
continue
# Log interaction and add to message history
self._log_interaction(
current_speaker.agent_name,
round_num,
conversation_history,
response,
)
self.message_history.add(
speaker_agent.agent_name, reply
self._add_message(
current_speaker.agent_name, response
)
# Log the interaction
self.group_log.log_interaction(
agent_name=speaker_agent.agent_name,
position=i,
input_text=self.message_history.return_history_as_string(),
output_text=reply,
# Optional: Add early stopping condition based on response content
if (
"TASK_COMPLETE" in response
or "CONCLUSION" in response
):
logger.info(
"Task completion detected, ending conversation"
)
if i == self.max_rounds - 1:
break
return reply
return final_response or "No valid response generated"
except Exception as error:
logger.error(
f"Error detected: {error}. Please optimize the inputs and submit an issue on the swarms GitHub."
)
raise error
except Exception as e:
logger.error(f"Error in GroupChat execution: {str(e)}")
raise
def get_conversation_summary(self) -> Dict[str, Any]:
"""Return a summary of the conversation.
def get_group_log_as_json(self) -> str:
"""Return the interaction log as a JSON string."""
return self.group_log.return_json()
Returns:
Dict containing conversation metrics and status
"""
return {
"id": self.state.id,
"total_interactions": len(self.state.interactions),
"participating_agents": [
agent.agent_name for agent in self.wrapped_agents
],
"conversation_length": len(self.state.messages),
"duration": (
datetime.utcnow() - self.state.created_at
).total_seconds(),
"objective_completed": any(
"TASK_COMPLETE" in msg.content
for msg in self.state.messages
),
}
def export_conversation(
self, format: str = "json"
) -> Union[str, Dict]:
"""Export the conversation in the specified format.
Args:
format: Output format ("json" or "text")
Returns:
Union[str, Dict]: Conversation in requested format
Raises:
ValueError: If format is not supported
"""
if format == "json":
return self.state.dict()
elif format == "text":
return "\n".join(
[
f"{msg.role} ({msg.timestamp}): {msg.content}"
for msg in self.state.messages
]
)
else:
raise ValueError(f"Unsupported export format: {format}")

@ -0,0 +1,244 @@
import os
import asyncio
from pydantic import BaseModel, Field
from typing import List, Dict, Any
from swarms import Agent
from swarm_models import OpenAIChat
from dotenv import load_dotenv
from swarms.utils.formatter import formatter
# Load environment variables
load_dotenv()
# Get OpenAI API key
api_key = os.getenv("OPENAI_API_KEY")
# Define Pydantic schema for agent outputs
class AgentOutput(BaseModel):
"""Schema for capturing the output of each agent."""
agent_name: str = Field(..., description="The name of the agent")
message: str = Field(
...,
description="The agent's response or contribution to the group chat",
)
metadata: Dict[str, Any] = Field(
default_factory=dict,
description="Additional metadata about the agent's response",
)
class GroupChat:
"""
GroupChat class to enable multiple agents to communicate in an asynchronous group chat.
Each agent is aware of all other agents, every message exchanged, and the social context.
"""
def __init__(
self,
name: str,
description: str,
agents: List[Agent],
max_loops: int = 1,
):
"""
Initialize the GroupChat.
Args:
name (str): Name of the group chat.
description (str): Description of the purpose of the group chat.
agents (List[Agent]): A list of agents participating in the chat.
max_loops (int): Maximum number of loops to run through all agents.
"""
self.name = name
self.description = description
self.agents = agents
self.max_loops = max_loops
self.chat_history = (
[]
) # Stores all messages exchanged in the chat
formatter.print_panel(
f"Initialized GroupChat '{self.name}' with {len(self.agents)} agents. Max loops: {self.max_loops}",
title="Groupchat Swarm",
)
async def _agent_conversation(
self, agent: Agent, input_message: str
) -> AgentOutput:
"""
Facilitate a single agent's response to the chat.
Args:
agent (Agent): The agent responding.
input_message (str): The message triggering the response.
Returns:
AgentOutput: The agent's response captured in a structured format.
"""
formatter.print_panel(
f"Agent '{agent.agent_name}' is responding to the message: {input_message}",
title="Groupchat Swarm",
)
response = await asyncio.to_thread(agent.run, input_message)
output = AgentOutput(
agent_name=agent.agent_name,
message=response,
metadata={"context_length": agent.context_length},
)
# logger.debug(f"Agent '{agent.agent_name}' response: {response}")
return output
async def _run(self, initial_message: str) -> List[AgentOutput]:
"""
Execute the group chat asynchronously, looping through all agents up to max_loops.
Args:
initial_message (str): The initial message to start the chat.
Returns:
List[AgentOutput]: The responses of all agents across all loops.
"""
formatter.print_panel(
f"Starting group chat '{self.name}' with initial message: {initial_message}",
title="Groupchat Swarm",
)
self.chat_history.append(
{"sender": "System", "message": initial_message}
)
outputs = []
for loop in range(self.max_loops):
formatter.print_panel(
f"Group chat loop {loop + 1}/{self.max_loops}",
title="Groupchat Swarm",
)
for agent in self.agents:
# Create a custom input message for each agent, sharing the chat history and social context
input_message = (
f"Chat History:\n{self._format_chat_history()}\n\n"
f"Participants:\n"
+ "\n".join(
[
f"- {a.agent_name}: {a.system_prompt}"
for a in self.agents
]
)
+ f"\n\nNew Message: {initial_message}\n\n"
f"You are '{agent.agent_name}'. Remember to keep track of the social context, who is speaking, "
f"and respond accordingly based on your role: {agent.system_prompt}."
)
# Collect agent's response
output = await self._agent_conversation(
agent, input_message
)
outputs.append(output)
# Update chat history with the agent's response
self.chat_history.append(
{
"sender": agent.agent_name,
"message": output.message,
}
)
formatter.print_panel(
"Group chat completed. All agent responses captured.",
title="Groupchat Swarm",
)
return outputs
def run(self, task: str, *args, **kwargs):
return asyncio.run(self.run(task, *args, **kwargs))
def _format_chat_history(self) -> str:
"""
Format the chat history for agents to understand the context.
Returns:
str: The formatted chat history as a string.
"""
return "\n".join(
[
f"{entry['sender']}: {entry['message']}"
for entry in self.chat_history
]
)
def __str__(self) -> str:
"""String representation of the group chat's outputs."""
return self._format_chat_history()
def to_json(self) -> str:
"""JSON representation of the group chat's outputs."""
return [
{"sender": entry["sender"], "message": entry["message"]}
for entry in self.chat_history
]
# Example Usage
if __name__ == "__main__":
load_dotenv()
# Get the OpenAI API key from the environment variable
api_key = os.getenv("OPENAI_API_KEY")
# Create an instance of the OpenAIChat class
model = OpenAIChat(
openai_api_key=api_key,
model_name="gpt-4o-mini",
temperature=0.1,
)
# Example agents
agent1 = Agent(
agent_name="Financial-Analysis-Agent",
system_prompt="You are a financial analyst specializing in investment strategies.",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
output_type="string",
streaming_on=False,
)
agent2 = Agent(
agent_name="Tax-Adviser-Agent",
system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.",
llm=model,
max_loops=1,
autosave=False,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
output_type="string",
streaming_on=False,
)
# Create group chat
group_chat = GroupChat(
name="Financial Discussion",
description="A group chat for financial analysis and tax advice.",
agents=[agent1, agent2],
)
# Run the group chat
asyncio.run(
group_chat.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria? What do you guys think?"
)
)

@ -1,7 +1,7 @@
from typing import List, Any
from loguru import logger
from pydantic import BaseModel, Field
from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.agent import Agent
from swarms.structs.concat import concat_strings
@ -9,6 +9,7 @@ from swarms.structs.agent_registry import AgentRegistry
from swarm_models.base_llm import BaseLLM
from swarms.structs.conversation import Conversation
logger = initialize_logger(log_folder="hiearchical_swarm")
# Example usage:
HIEARCHICAL_AGENT_SYSTEM_PROMPT = """

@ -1,26 +1,14 @@
import concurrent.futures
import re
import sys
from collections import Counter
from typing import Any, Callable, List, Optional
from loguru import logger
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.utils.file_processing import create_file
from swarms.utils.loguru_logger import initialize_logger
# Configure loguru logger with advanced settings
logger.remove()
logger.add(
sys.stderr,
colorize=True,
format="<green>{time}</green> <level>{message}</level>",
backtrace=True,
diagnose=True,
enqueue=True,
catch=True,
)
logger = initialize_logger(log_folder="majority_voting")
def extract_last_python_code_block(text):

@ -1,214 +0,0 @@
import hashlib
from time import time_ns
from typing import Callable, List, Optional, Sequence, Union
from swarms.structs.agent import Agent
from swarms.utils.loguru_logger import logger
from swarms.structs.base_swarm import BaseSwarm
def _hash(input: str):
"""
Hashes the input string using SHA256 algorithm.
Args:
input (str): The string to be hashed.
Returns:
str: The hexadecimal representation of the hash value.
"""
hex_dig = hashlib.sha256(input.encode("utf-8")).hexdigest()
return hex_dig
def msg_hash(
agent: Agent, content: str, turn: int, msg_type: str = "text"
):
"""
Generate a hash value for a message.
Args:
agent (Agent): The agent sending the message.
content (str): The content of the message.
turn (int): The turn number of the message.
msg_type (str, optional): The type of the message. Defaults to "text".
Returns:
int: The hash value of the message.
"""
time = time_ns()
return _hash(
f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:"
f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}"
)
class MessagePool(BaseSwarm):
"""
A class representing a message pool for agents in a swarm.
Attributes:
agents (Optional[Sequence[Agent]]): The list of agents in the swarm.
moderator (Optional[Agent]): The moderator agent.
turns (Optional[int]): The number of turns.
routing_function (Optional[Callable]): The routing function for message distribution.
show_names (Optional[bool]): Flag indicating whether to show agent names.
messages (List[Dict]): The list of messages in the pool.
Examples:
>>> from swarms.structs.agent import Agent
>>> from swarms.structs.message_pool import MessagePool
>>> agent1 = Agent(agent_name="agent1")
>>> agent2 = Agent(agent_name="agent2")
>>> agent3 = Agent(agent_name="agent3")
>>> moderator = Agent(agent_name="moderator")
>>> agents = [agent1, agent2, agent3]
>>> message_pool = MessagePool(agents=agents, moderator=moderator, turns=5)
>>> message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
>>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
>>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
>>> message_pool.get_all_messages()
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent1, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent2, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
"""
def __init__(
self,
agents: Optional[Sequence[Agent]] = None,
moderator: Optional[Agent] = None,
turns: Optional[int] = 5,
routing_function: Optional[Callable] = None,
show_names: Optional[bool] = False,
autosave: Optional[bool] = False,
*args,
**kwargs,
):
super().__init__()
self.agent = agents
self.moderator = moderator
self.turns = turns
self.routing_function = routing_function
self.show_names = show_names
self.autosave = autosave
self.messages = []
logger.info("MessagePool initialized")
logger.info(f"Number of agents: {len(agents)}")
logger.info(
f"Agents: {[agent.agent_name for agent in agents]}"
)
logger.info(f"moderator: {moderator.agent_name} is available")
logger.info(f"Number of turns: {turns}")
def add(
self,
agent: Agent,
content: str,
turn: int,
visible_to: Union[str, List[str]] = "all",
logged: bool = True,
):
"""
Add a message to the pool.
Args:
agent (Agent): The agent sending the message.
content (str): The content of the message.
turn (int): The turn number.
visible_to (Union[str, List[str]], optional): The agents who can see the message. Defaults to "all".
logged (bool, optional): Flag indicating whether the message should be logged. Defaults to True.
"""
self.messages.append(
{
"agent": agent,
"content": content,
"turn": turn,
"visible_to": visible_to,
"logged": logged,
}
)
logger.info(f"Message added: {content}")
def reset(self):
"""
Reset the message pool.
"""
self.messages = []
logger.info("MessagePool reset")
def last_turn(self):
"""
Get the last turn number.
Returns:
int: The last turn number.
"""
if len(self.messages) == 0:
return 0
else:
return self.messages[-1]["turn"]
@property
def last_message(self):
"""
Get the last message in the pool.
Returns:
dict: The last message.
"""
if len(self.messages) == 0:
return None
else:
return self.messages[-1]
def get_all_messages(self):
"""
Get all messages in the pool.
Returns:
List[Dict]: The list of all messages.
"""
return self.messages
def get_visible_messages(self, agent: Agent, turn: int):
"""
Get the visible messages for a given agent and turn.
Args:
agent (Agent): The agent.
turn (int): The turn number.
Returns:
List[Dict]: The list of visible messages.
"""
# Get the messages before the current turn
prev_messages = [
message
for message in self.messages
if message["turn"] < turn
]
visible_messages = []
for message in prev_messages:
if (
message["visible_to"] == "all"
or agent.agent_name in message["visible_to"]
):
visible_messages.append(message)
return visible_messages
# def query(self, query: str):
# """
# Query a message from the messages list and then pass it to the moderator
# """
# return [
# (mod, content)
# for mod, content, _ in self.messages # Add an underscore to ignore the rest of the elements
# if query in content
# ]

@ -2,13 +2,15 @@ import asyncio
import time
from typing import Any, Dict, List, Optional
from loguru import logger
from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
from swarms.telemetry.log_swarm_data import log_agent_data
from swarms.telemetry.capture_sys_data import log_agent_data
from swarms.schemas.agent_step_schemas import ManySteps
from swarms.prompts.ag_prompt import aggregator_system_prompt
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="mixture_of_agents")
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S")
@ -18,7 +20,7 @@ class MixtureOfAgentsInput(BaseModel):
description: str = (
"A class to run a mixture of agents and aggregate their responses."
)
reference_agents: List[Dict[str, Any]]
agents: List[Dict[str, Any]]
aggregator_agent: Any = Field(
...,
description="An aggregator agent to be used in the mixture.",
@ -58,7 +60,7 @@ class MixtureOfAgents:
self,
name: str = "MixtureOfAgents",
description: str = "A class to run a mixture of agents and aggregate their responses.",
reference_agents: List[Agent] = [],
agents: List[Agent] = [],
aggregator_agent: Agent = None,
aggregator_system_prompt: str = "",
layers: int = 3,
@ -69,14 +71,14 @@ class MixtureOfAgents:
Args:
name (str, optional): The name of the mixture of agents. Defaults to "MixtureOfAgents".
description (str, optional): A description of the mixture of agents. Defaults to "A class to run a mixture of agents and aggregate their responses.".
reference_agents (List[Agent], optional): A list of reference agents to be used in the mixture. Defaults to [].
agents (List[Agent], optional): A list of reference agents to be used in the mixture. Defaults to [].
aggregator_agent (Agent, optional): The aggregator agent to be used in the mixture. Defaults to None.
aggregator_system_prompt (str, optional): The system prompt for the aggregator agent. Defaults to "".
layers (int, optional): The number of layers to process in the mixture. Defaults to 3.
"""
self.name = name
self.description = description
self.reference_agents: List[Agent] = reference_agents
self.agents: List[Agent] = agents
self.aggregator_agent: Agent = aggregator_agent
self.aggregator_system_prompt: str = aggregator_system_prompt
self.layers: int = layers
@ -84,9 +86,7 @@ class MixtureOfAgents:
self.input_schema = MixtureOfAgentsInput(
name=name,
description=description,
reference_agents=[
agent.to_dict() for agent in self.reference_agents
],
agents=[agent.to_dict() for agent in self.agents],
aggregator_agent=aggregator_agent.to_dict(),
aggregator_system_prompt=self.aggregator_system_prompt,
layers=self.layers,
@ -111,7 +111,7 @@ class MixtureOfAgents:
"Checking the reliability of the Mixture of Agents class."
)
if not self.reference_agents:
if not self.agents:
raise ValueError("No reference agents provided.")
if not self.aggregator_agent:
@ -203,7 +203,7 @@ class MixtureOfAgents:
results: List[str] = await asyncio.gather(
*[
self._run_agent_async(agent, task)
for agent in self.reference_agents
for agent in self.agents
]
)
@ -214,7 +214,7 @@ class MixtureOfAgents:
self._run_agent_async(
agent, task, prev_responses=results
)
for agent in self.reference_agents
for agent in self.agents
]
)

@ -5,10 +5,12 @@ from dataclasses import dataclass
import threading
from typing import List, Union, Any, Callable
from multiprocessing import cpu_count
import os
from swarms.structs.agent import Agent
from swarms.utils.calculate_func_metrics import profile_func
from swarms.utils.wrapper_clusterop import (
exec_callable_with_clusterops,
)
# Type definitions
@ -60,7 +62,6 @@ async def run_agents_concurrently_async(
return results
@profile_func
def run_agents_concurrently(
agents: List[AgentType],
task: str,
@ -106,7 +107,6 @@ def run_agents_concurrently(
return results
@profile_func
def run_agents_concurrently_multiprocess(
agents: List[Agent], task: str, batch_size: int = cpu_count()
) -> List[Any]:
@ -136,7 +136,6 @@ def run_agents_concurrently_multiprocess(
return results
@profile_func
def run_agents_sequentially(
agents: List[AgentType], task: str
) -> List[Any]:
@ -153,7 +152,6 @@ def run_agents_sequentially(
return [run_single_agent(agent, task) for agent in agents]
@profile_func
def run_agents_with_different_tasks(
agent_task_pairs: List[tuple[AgentType, str]],
batch_size: int = None,
@ -230,7 +228,6 @@ async def run_agent_with_timeout(
return None
@profile_func
def run_agents_with_timeout(
agents: List[AgentType],
task: str,
@ -296,7 +293,6 @@ def get_system_metrics() -> ResourceMetrics:
)
@profile_func
def run_agents_with_resource_monitoring(
agents: List[AgentType],
task: str,
@ -332,6 +328,110 @@ def run_agents_with_resource_monitoring(
# Implementation details...
def _run_agents_with_tasks_concurrently(
agents: List[AgentType],
tasks: List[str] = [],
batch_size: int = None,
max_workers: int = None,
) -> List[Any]:
"""
Run multiple agents with corresponding tasks concurrently.
Args:
agents: List of Agent instances to run
tasks: List of task strings to execute
batch_size: Number of agents to run in parallel
max_workers: Maximum number of threads
Returns:
List of outputs from each agent
"""
if len(agents) != len(tasks):
raise ValueError(
"The number of agents must match the number of tasks."
)
cpu_cores = os.cpu_count()
batch_size = batch_size or cpu_cores
max_workers = max_workers or cpu_cores * 2
results = []
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
async def run_agent_task_pair(
agent: AgentType, task: str, executor: ThreadPoolExecutor
) -> Any:
return await run_agent_async(agent, task, executor)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
for i in range(0, len(agents), batch_size):
batch_agents = agents[i : i + batch_size]
batch_tasks = tasks[i : i + batch_size]
batch_results = loop.run_until_complete(
asyncio.gather(
*(
run_agent_task_pair(agent, task, executor)
for agent, task in zip(
batch_agents, batch_tasks
)
)
)
)
results.extend(batch_results)
return results
def run_agents_with_tasks_concurrently(
agents: List[AgentType],
tasks: List[str] = [],
batch_size: int = None,
max_workers: int = None,
device: str = "cpu",
device_id: int = 1,
all_cores: bool = True,
no_clusterops: bool = False,
) -> List[Any]:
"""
Executes a list of agents with their corresponding tasks concurrently on a specified device.
This function orchestrates the concurrent execution of a list of agents with their respective tasks on a specified device, either CPU or GPU. It leverages the `exec_callable_with_clusterops` function to manage the execution on the specified device.
Args:
agents (List[AgentType]): A list of Agent instances or callable functions to execute concurrently.
tasks (List[str], optional): A list of task strings to execute for each agent. Defaults to an empty list.
batch_size (int, optional): The number of agents to run in parallel. Defaults to None.
max_workers (int, optional): The maximum number of threads to use for execution. Defaults to None.
device (str, optional): The device to use for execution. Defaults to "cpu".
device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0.
all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True.
Returns:
List[Any]: A list of outputs from each agent execution.
"""
# Make the first agent not use the ifrs
if no_clusterops:
return _run_agents_with_tasks_concurrently(
agents, tasks, batch_size, max_workers
)
else:
return exec_callable_with_clusterops(
device,
device_id,
all_cores,
_run_agents_with_tasks_concurrently,
agents,
tasks,
batch_size,
max_workers,
)
# # Example usage:
# # Initialize your agents with the same model to avoid re-creating it
# agents = [

@ -1,9 +1,12 @@
from multiprocessing import Manager, Pool, cpu_count
from typing import Sequence, Union, Callable
from typing import Sequence, Union, Callable, List
from concurrent.futures import ThreadPoolExecutor, as_completed
from swarms.structs.agent import Agent
from swarms.structs.base_workflow import BaseWorkflow
from swarms.utils.loguru_logger import logger
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="multi_process_workflow")
class MultiProcessWorkflow(BaseWorkflow):
@ -13,7 +16,7 @@ class MultiProcessWorkflow(BaseWorkflow):
Args:
max_workers (int): The maximum number of workers to use for parallel processing.
autosave (bool): Flag indicating whether to automatically save the workflow.
tasks (List[Task]): A list of Task objects representing the workflow tasks.
agents (List[Union[Agent, Callable]]): A list of Agent objects or callable functions representing the workflow tasks.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
@ -132,7 +135,7 @@ class MultiProcessWorkflow(BaseWorkflow):
callback=results_list.append,
timeout=task.timeout,
)
for agent in self.agent
for agent in self.agents
]
# Wait for all jobs to complete
@ -145,3 +148,97 @@ class MultiProcessWorkflow(BaseWorkflow):
except Exception as error:
logger.error(f"Error in run: {error}")
return None
async def async_run(self, task: str, *args, **kwargs):
"""Asynchronously run the workflow.
Args:
task (Task): The task to run.
*args: Additional positional arguments for the task execution.
**kwargs: Additional keyword arguments for the task execution.
Returns:
List[Any]: The results of all executed tasks.
"""
try:
results = []
with ThreadPoolExecutor(
max_workers=self.max_workers
) as executor:
futures = [
executor.submit(
self.execute_task, task, *args, **kwargs
)
for _ in range(len(self.agents))
]
for future in as_completed(futures):
result = future.result()
results.append(result)
return results
except Exception as error:
logger.error(f"Error in async_run: {error}")
return None
def batched_run(
self, tasks: List[str], batch_size: int = 5, *args, **kwargs
):
"""Run tasks in batches.
Args:
tasks (List[str]): A list of tasks to run.
batch_size (int): The size of each batch.
*args: Additional positional arguments for the task execution.
**kwargs: Additional keyword arguments for the task execution.
Returns:
List[Any]: The results of all executed tasks.
"""
try:
results = []
for i in range(0, len(tasks), batch_size):
batch = tasks[i : i + batch_size]
with Pool(processes=self.max_workers) as pool:
results_list = pool.map(
self.execute_task, batch, *args, **kwargs
)
results.extend(results_list)
return results
except Exception as error:
logger.error(f"Error in batched_run: {error}")
return None
def concurrent_run(self, tasks: List[str], *args, **kwargs):
"""Run tasks concurrently.
Args:
tasks (List[str]): A list of tasks to run.
*args: Additional positional arguments for the task execution.
**kwargs: Additional keyword arguments for the task execution.
Returns:
List[Any]: The results of all executed tasks.
"""
try:
results = []
with ThreadPoolExecutor(
max_workers=self.max_workers
) as executor:
futures = [
executor.submit(
self.execute_task, task, *args, **kwargs
)
for task in tasks
]
for future in as_completed(futures):
result = future.result()
results.append(result)
return results
except Exception as error:
logger.error(f"Error in concurrent_run: {error}")
return None

@ -0,0 +1,276 @@
import asyncio
import pulsar
from pulsar import ConsumerType
from loguru import logger
from swarms import Agent
from typing import List, Dict, Any
import json
class ScalableAsyncAgentSwarm:
"""
A scalable, asynchronous swarm of agents leveraging Apache Pulsar for inter-agent communication.
Provides load balancing, health monitoring, dead letter queues, and centralized logging.
"""
def __init__(
self,
pulsar_url: str,
topic: str,
dlq_topic: str,
agents_config: List[Dict[str, Any]],
):
"""
Initializes the async swarm with agents.
Args:
pulsar_url (str): The URL of the Apache Pulsar broker.
topic (str): The main topic for task distribution.
dlq_topic (str): The Dead Letter Queue topic for failed messages.
agents_config (List[Dict[str, Any]]): List of agent configurations with `name`, `description`, and `model_name`.
"""
self.pulsar_url = pulsar_url
self.topic = topic
self.dlq_topic = dlq_topic
self.agents_config = agents_config
self.client = pulsar.Client(pulsar_url)
self.consumer = self.client.subscribe(
topic,
subscription_name="swarm-task-sub",
consumer_type=ConsumerType.Shared,
)
self.dlq_producer = self.client.create_producer(dlq_topic)
self.response_logger = []
self.agents = [
self.create_agent(config) for config in agents_config
]
self.agent_index = 0
logger.info(
"Swarm initialized with agents: {}",
[agent["name"] for agent in agents_config],
)
def create_agent(
self, agent_config: Dict[str, Any]
) -> Dict[str, Any]:
"""
Creates a new agent configuration with asynchronous capabilities.
Args:
agent_config (Dict[str, Any]): Configuration dictionary with agent details.
Returns:
Dict[str, Any]: A dictionary containing agent metadata and functionality.
"""
agent_name = agent_config["name"]
description = agent_config["description"]
model_name = agent_config.get("model_name", "gpt-4o-mini")
class AsyncAgent:
"""
An asynchronous agent that processes tasks and communicates via Apache Pulsar.
"""
def __init__(
self, name: str, description: str, model_name: str
):
self.name = name
self.description = description
self.agent = Agent(
agent_name=name,
model_name=model_name,
max_loops="auto",
interactive=True,
streaming_on=True,
)
logger.info(
f"Initialized agent '{name}' - {description}"
)
async def process_task(
self, message: str
) -> Dict[str, Any]:
"""
Processes a single task using the agent.
Args:
message (str): The task message.
Returns:
Dict[str, Any]: JSON-formatted response.
"""
try:
logger.info(
f"Agent {self.name} processing task: {message}"
)
response = await asyncio.to_thread(
self.agent.run, message
)
logger.info(f"Agent {self.name} completed task.")
return {
"agent_name": self.name,
"response": response,
}
except Exception as e:
logger.error(
f"Agent {self.name} encountered an error: {e}"
)
return {"agent_name": self.name, "error": str(e)}
return {
"name": agent_name,
"instance": AsyncAgent(
agent_name, description, model_name
),
}
async def distribute_task(self, message: str):
"""
Distributes a task to the next available agent using round-robin.
Args:
message (str): The task message.
"""
agent = self.agents[self.agent_index]
self.agent_index = (self.agent_index + 1) % len(self.agents)
try:
response = await agent["instance"].process_task(message)
self.log_response(response)
except Exception as e:
logger.error(
f"Error processing task by agent {agent['name']}: {e}"
)
self.send_to_dlq(message)
async def monitor_health(self):
"""
Periodically monitors the health of agents.
"""
while True:
logger.info("Performing health check for all agents.")
for agent in self.agents:
logger.info(f"Agent {agent['name']} is online.")
await asyncio.sleep(10)
def send_to_dlq(self, message: str):
"""
Sends a failed message to the Dead Letter Queue (DLQ).
Args:
message (str): The message to send to the DLQ.
"""
try:
self.dlq_producer.send(message.encode("utf-8"))
logger.info("Message sent to Dead Letter Queue.")
except Exception as e:
logger.error(f"Failed to send message to DLQ: {e}")
def log_response(self, response: Dict[str, Any]):
"""
Logs the response to a centralized list for later analysis.
Args:
response (Dict[str, Any]): The agent's response.
"""
self.response_logger.append(response)
logger.info(f"Response logged: {response}")
async def listen_and_distribute(self):
"""
Listens to the main Pulsar topic and distributes tasks to agents.
"""
while True:
msg = self.consumer.receive()
try:
message = msg.data().decode("utf-8")
logger.info(f"Received task: {message}")
await self.distribute_task(message)
self.consumer.acknowledge(msg)
except Exception as e:
logger.error(f"Error processing message: {e}")
self.send_to_dlq(msg.data().decode("utf-8"))
self.consumer.negative_acknowledge(msg)
async def run(self):
"""
Runs the swarm asynchronously with health monitoring and task distribution.
"""
logger.info("Starting the async swarm...")
task_listener = asyncio.create_task(
self.listen_and_distribute()
)
health_monitor = asyncio.create_task(self.monitor_health())
await asyncio.gather(task_listener, health_monitor)
def shutdown(self):
"""
Safely shuts down the swarm and logs all responses.
"""
logger.info("Shutting down the swarm...")
self.client.close()
with open("responses.json", "w") as f:
json.dump(self.response_logger, f, indent=4)
logger.info("Responses saved to 'responses.json'.")
# from scalable_agent_swarm import ScalableAsyncAgentSwarm # Assuming your swarm class is saved here
if __name__ == "__main__":
# Example Configuration
PULSAR_URL = "pulsar://localhost:6650"
TOPIC = "stock-analysis"
DLQ_TOPIC = "stock-analysis-dlq"
# Agents configuration
AGENTS_CONFIG = [
{
"name": "Stock-Analysis-Agent-1",
"description": "Analyzes stock trends.",
"model_name": "gpt-4o-mini",
},
{
"name": "Stock-News-Agent",
"description": "Summarizes stock news.",
"model_name": "gpt-4o-mini",
},
{
"name": "Tech-Trends-Agent",
"description": "Tracks tech sector trends.",
"model_name": "gpt-4o-mini",
},
]
# Tasks to send
TASKS = [
"Analyze the trend for tech stocks in Q4 2024",
"Summarize the latest news on the S&P 500",
"Identify the top-performing sectors in the stock market",
"Provide a forecast for AI-related stocks for 2025",
]
# Initialize and run the swarm
swarm = ScalableAsyncAgentSwarm(
PULSAR_URL, TOPIC, DLQ_TOPIC, AGENTS_CONFIG
)
try:
# Run the swarm in the background
swarm_task = asyncio.create_task(swarm.run())
# Send tasks to the topic
client = pulsar.Client(PULSAR_URL)
producer = client.create_producer(TOPIC)
for task in TASKS:
producer.send(task.encode("utf-8"))
print(f"Sent task: {task}")
producer.close()
client.close()
# Keep the swarm running
asyncio.run(swarm_task)
except KeyboardInterrupt:
swarm.shutdown()

@ -1,39 +1,61 @@
import threading
import asyncio
import traceback
import uuid
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from typing import Callable, Dict, List, Optional
from typing import Callable, Dict, List, Literal, Optional
from pydantic import BaseModel, Field
from swarms_memory import BaseVectorDatabase
from swarms.schemas.agent_step_schemas import ManySteps
from swarms.structs.agent import Agent
from swarms.structs.agents_available import showcase_available_agents
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.omni_agent_types import AgentType
from swarms.utils.loguru_logger import logger
from swarms.utils.add_docs_to_agents import handle_input_docs
from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.wrapper_clusterop import (
exec_callable_with_clusterops,
)
logger = initialize_logger(log_folder="rearrange")
# Literal of output types
OutputType = Literal[
"all",
"final",
"list",
"dict",
".json",
".md",
".txt",
".yaml",
".toml",
]
def swarm_id():
return uuid.uuid4().hex
class AgentRearrangeInput(BaseModel):
swarm_id: str
name: str
description: str
flow: str
max_loops: int
swarm_id: Optional[str] = None
name: Optional[str] = None
description: Optional[str] = None
flow: Optional[str] = None
max_loops: Optional[int] = None
time: str = Field(
default_factory=lambda: datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
),
description="The time the agent was created.",
)
def swarm_id():
return uuid.uuid4().hex
output_type: OutputType = Field(default="final")
class AgentRearrangeOutput(BaseModel):
Input: AgentRearrangeInput
outputs: List[ManySteps]
Input: Optional[AgentRearrangeInput] = None
outputs: Optional[List[ManySteps]] = None
time: str = Field(
default_factory=lambda: datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
@ -47,16 +69,38 @@ class AgentRearrange(BaseSwarm):
A class representing a swarm of agents for rearranging tasks.
Attributes:
agents (dict): A dictionary of agents, where the key is the agent's name and the value is the agent object.
flow (str): The flow pattern of the tasks.
id (str): Unique identifier for the swarm
name (str): Name of the swarm
description (str): Description of the swarm's purpose
agents (callable): Dictionary mapping agent names to Agent objects
flow (str): The flow pattern defining task execution order
max_loops (int): Maximum number of execution loops
verbose (bool): Whether to enable verbose logging
memory_system (BaseVectorDatabase): Memory system for storing agent interactions
human_in_the_loop (bool): Whether human intervention is enabled
custom_human_in_the_loop (Callable): Custom function for human intervention
return_json (bool): Whether to return output in JSON format
output_type (OutputType): Format of output ("all", "final", "list", or "dict")
swarm_history (dict): History of agent interactions
input_config (AgentRearrangeInput): Input configuration schema
output_schema (AgentRearrangeOutput): Output schema
Methods:
__init__(agents: List[Agent] = None, flow: str = None): Initializes the AgentRearrange object.
add_agent(agent: Agent): Adds an agent to the swarm.
remove_agent(agent_name: str): Removes an agent from the swarm.
add_agents(agents: List[Agent]): Adds multiple agents to the swarm.
validate_flow(): Validates the flow pattern.
run(task): Runs the swarm to rearrange the tasks.
__init__(): Initializes the AgentRearrange object
reliability_checks(): Validates swarm configuration
set_custom_flow(): Sets a custom flow pattern
add_agent(): Adds an agent to the swarm
track_history(): Records agent interaction history
remove_agent(): Removes an agent from the swarm
add_agents(): Adds multiple agents to the swarm
validate_flow(): Validates the flow pattern
run(): Executes the swarm's task processing
astream(): Runs the swarm with streaming output
batch_run(): Processes multiple tasks in batches
abatch_run(): Asynchronously processes multiple tasks in batches
concurrent_run(): Processes multiple tasks concurrently
handle_input_docs(): Adds document content to agent prompts
"""
def __init__(
@ -64,7 +108,7 @@ class AgentRearrange(BaseSwarm):
id: str = swarm_id(),
name: str = "AgentRearrange",
description: str = "A swarm of agents for rearranging tasks.",
agents: List[AgentType] = None,
agents: List[Agent] = None,
flow: str = None,
max_loops: int = 1,
verbose: bool = True,
@ -74,25 +118,26 @@ class AgentRearrange(BaseSwarm):
Callable[[str], str]
] = None,
return_json: bool = False,
output_type: OutputType = "final",
docs: List[str] = None,
doc_folder: str = None,
device: str = "cpu",
device_id: int = 0,
all_cores: bool = False,
all_gpus: bool = True,
no_use_clusterops: bool = True,
*args,
**kwargs,
):
"""
Initializes the AgentRearrange object.
Args:
agents (List[Agent], optional): A list of Agent objects. Defaults to None.
flow (str, optional): The flow pattern of the tasks. Defaults to None.
"""
super(AgentRearrange, self).__init__(
name=name,
description=description,
agents=agents,
agents=agents if agents else [],
*args,
**kwargs,
)
self.id = id
self.agents = {agent.name: agent for agent in agents}
self.agents = {agent.agent_name: agent for agent in agents}
self.flow = flow if flow is not None else ""
self.verbose = verbose
self.max_loops = max_loops if max_loops > 0 else 1
@ -100,55 +145,61 @@ class AgentRearrange(BaseSwarm):
self.human_in_the_loop = human_in_the_loop
self.custom_human_in_the_loop = custom_human_in_the_loop
self.return_json = return_json
self.swarm_history = {
agent.agent_name: [] for agent in agents
}
self.lock = threading.Lock()
self.id = uuid.uuid4().hex if id is None else id
# Run the relianility checks
self.reliability_checks()
# Output schema
self.input_config = AgentRearrangeInput(
swarm_id=self.id,
self.output_type = output_type
self.docs = docs
self.doc_folder = doc_folder
self.device = device
self.device_id = device_id
self.all_cores = all_cores
self.all_gpus = all_gpus
self.no_use_clusterops = no_use_clusterops
def showcase_agents(self):
# Get formatted agent info once
agents_available = showcase_available_agents(
name=self.name,
description=self.description,
flow=self.flow,
max_loops=self.max_loops,
agents=self.agents,
format="Table",
)
# Output schema
self.output_schema = AgentRearrangeOutput(
Input=self.input_config,
outputs=[],
)
return agents_available
def reliability_checks(self):
logger.info("Running reliability checks.")
if self.agents is None:
raise ValueError("No agents found in the swarm.")
def rearrange_prompt_prep(self) -> str:
"""Prepares a formatted prompt describing the swarm configuration.
if self.flow is None:
raise ValueError("No flow found in the swarm.")
Returns:
str: A formatted string containing the swarm's name, description,
flow pattern, and participating agents.
"""
agents_available = self.showcase_agents()
prompt = f"""
===== Swarm Configuration =====
if self.max_loops is None:
raise ValueError("No max_loops found in the swarm.")
Name: {self.name}
Description: {self.description}
logger.info(
"AgentRearrange initialized with agents: {}".format(
list(self.agents.keys())
)
)
===== Execution Flow =====
{self.flow}
# Verbose is True
if self.verbose is True:
logger.add("agent_rearrange.log")
===== Participating Agents =====
{agents_available}
===========================
"""
return prompt
def set_custom_flow(self, flow: str):
self.flow = flow
logger.info(f"Custom flow set: {flow}")
def handle_input_docs(self):
self.agents = handle_input_docs(
agents=self.agents,
docs=self.docs,
doc_folder=self.doc_folder,
)
def add_agent(self, agent: Agent):
"""
Adds an agent to the swarm.
@ -156,8 +207,8 @@ class AgentRearrange(BaseSwarm):
Args:
agent (Agent): The agent to be added.
"""
logger.info(f"Adding agent {agent.name} to the swarm.")
self.agents[agent.name] = agent
logger.info(f"Adding agent {agent.agent_name} to the swarm.")
self.agents[agent.agent_name] = agent
def track_history(
self,
@ -183,7 +234,7 @@ class AgentRearrange(BaseSwarm):
agents (List[Agent]): A list of Agent objects.
"""
for agent in agents:
self.agents[agent.name] = agent
self.agents[agent.agent_name] = agent
def validate_flow(self):
"""
@ -226,10 +277,10 @@ class AgentRearrange(BaseSwarm):
"Duplicate agent names in the flow are not allowed."
)
print("Flow is valid.")
logger.info(f"Flow: {self.flow} is valid.")
return True
def run(
def _run(
self,
task: str = None,
img: str = None,
@ -241,308 +292,406 @@ class AgentRearrange(BaseSwarm):
Runs the swarm to rearrange the tasks.
Args:
task: The initial task to be processed.
task (str, optional): The initial task to be processed. Defaults to None.
img (str, optional): Image input for agents that support it. Defaults to None.
custom_tasks (Dict[str, str], optional): Custom tasks for specific agents. Defaults to None.
output_type (str, optional): Format of the output. Can be:
- "all": String containing all agent responses concatenated
- "final": Only the final agent's response
- "list": List of all agent responses
- "dict": Dict mapping agent names to their responses
Defaults to "final".
*args: Additional positional arguments
**kwargs: Additional keyword arguments
Returns:
str: The final processed task.
Union[str, List[str], Dict[str, str]]: The processed output in the specified format
Raises:
ValueError: If flow validation fails
Exception: For any other errors during execution
"""
try:
if not self.validate_flow():
logger.error("Flow validation failed")
return "Invalid flow configuration."
tasks = self.flow.split("->")
current_task = task
all_responses = []
response_dict = {}
previous_agent = None
# If custom_tasks have the agents name and tasks then combine them
logger.info(
f"Starting task execution with {len(tasks)} steps"
)
# Handle custom tasks
if custom_tasks is not None:
logger.info("Processing custom tasks")
c_agent_name, c_task = next(
iter(custom_tasks.items())
)
# Find the position of the custom agent in the tasks list
position = tasks.index(c_agent_name)
# If there is a prebois agent merge its task with the custom tasks
if position > 0:
tasks[position - 1] += "->" + c_task
else:
# If there is no prevous agent just insert the custom tasks
tasks.insert(position, c_task)
# Set the loop counter
loop_count = 0
while loop_count < self.max_loops:
for task in tasks:
logger.info(
f"Starting loop {loop_count + 1}/{self.max_loops}"
)
for task_idx, task in enumerate(tasks):
is_last = task == tasks[-1]
agent_names = [
name.strip() for name in task.split(",")
]
# Prepare prompt with previous agent info
prompt_prefix = ""
if previous_agent and task_idx > 0:
prompt_prefix = f"Previous agent {previous_agent} output: {current_task}\n"
elif task_idx == 0:
prompt_prefix = "Initial task: "
if len(agent_names) > 1:
# Parallel processing
logger.info(
f"Running agents in parallel: {agent_names}"
)
results = []
for agent_name in agent_names:
if agent_name == "H":
# Human in the loop intervention
if (
self.human_in_the_loop
and self.custom_human_in_the_loop
):
current_task = (
self.custom_human_in_the_loop(
current_task
prompt_prefix
+ str(current_task)
)
)
else:
current_task = input(
"Enter your response:"
prompt_prefix
+ "Enter your response: "
)
results.append(current_task)
response_dict[agent_name] = (
current_task
)
else:
agent = self.agents[agent_name]
task_with_context = (
prompt_prefix + str(current_task)
if current_task
else prompt_prefix
)
result = agent.run(
current_task,
img,
is_last,
task=task_with_context,
img=img,
is_last=is_last,
*args,
**kwargs,
)
result = str(result)
results.append(result)
response_dict[agent_name] = result
self.output_schema.outputs.append(
agent.agent_output
)
logger.debug(
f"Agent {agent_name} output: {result}"
)
current_task = "; ".join(results)
all_responses.extend(results)
previous_agent = ",".join(agent_names)
else:
# Sequential processing
logger.info(
f"Running agents sequentially: {agent_names}"
f"Running agent sequentially: {agent_names[0]}"
)
agent_name = agent_names[0]
if agent_name == "H":
# Human-in-the-loop intervention
if (
self.human_in_the_loop
and self.custom_human_in_the_loop
):
current_task = (
self.custom_human_in_the_loop(
current_task
prompt_prefix
+ str(current_task)
)
)
else:
current_task = input(
"Enter the next task: "
prompt_prefix
+ "Enter the next task: "
)
response_dict[agent_name] = current_task
else:
agent = self.agents[agent_name]
task_with_context = (
prompt_prefix + str(current_task)
if current_task
else prompt_prefix
)
current_task = agent.run(
current_task,
img,
is_last,
task=task_with_context,
img=img,
is_last=is_last,
*args,
**kwargs,
)
current_task = str(current_task)
response_dict[agent_name] = current_task
self.output_schema.outputs.append(
agent.agent_output
)
logger.debug(
f"Agent {agent_name} output: {current_task}"
)
all_responses.append(current_task)
previous_agent = agent_name
loop_count += 1
# return current_task
logger.info("Task execution completed")
if self.return_json:
return self.output_schema.model_dump_json(indent=4)
else:
return current_task
# Handle different output types
if self.output_type == "all":
output = " ".join(all_responses)
elif self.output_type == "list":
output = all_responses
elif self.output_type == "dict":
output = response_dict
else: # "final"
output = current_task
return output
except Exception as e:
logger.error(f"An error occurred: {e}")
logger.error(
f"An error occurred: {e} \n {traceback.format_exc()}"
)
return e
async def astream(
def run(
self,
task: str = None,
img: str = None,
custom_tasks: Dict[str, str] = None,
device: str = "cpu",
device_id: int = 2,
all_cores: bool = True,
all_gpus: bool = False,
no_use_clusterops: bool = False,
*args,
**kwargs,
):
"""
Runs the swarm with LangChain's astream_events v1 API enabled.
NOTICE: Be sure to only call this method if you are using LangChain-based models in your swarm.
This is useful for enhancing user experience by providing real-time updates of how each agent
in the swarm is processing the current task.
Execute the agent rearrangement task with specified compute resources.
Args:
task: The initial prompt (aka task) passed to the first agent(s) in the swarm.
task (str, optional): The task to execute. Defaults to None.
img (str, optional): Path to input image if required. Defaults to None.
device (str, optional): Computing device to use ('cpu' or 'gpu'). Defaults to "cpu".
device_id (int, optional): ID of specific device to use. Defaults to 1.
all_cores (bool, optional): Whether to use all CPU cores. Defaults to True.
all_gpus (bool, optional): Whether to use all available GPUs. Defaults to False.
no_use_clusterops (bool, optional): Whether to use clusterops. Defaults to False.
*args: Additional positional arguments passed to _run().
**kwargs: Additional keyword arguments passed to _run().
Returns:
str: The final output generated.
The result from executing the task through the cluster operations wrapper.
"""
try:
if not self.validate_flow():
return "Invalid flow configuration."
tasks = self.flow.split("->")
current_task = task
# If custom_tasks have the agents name and tasks then combine them
if custom_tasks is not None:
c_agent_name, c_task = next(
iter(custom_tasks.items())
no_use_clusterops = (
no_use_clusterops or self.no_use_clusterops
)
# Find the position of the custom agent in the tasks list
position = tasks.index(c_agent_name)
# If there is a prebois agent merge its task with the custom tasks
if position > 0:
tasks[position - 1] += "->" + c_task
else:
# If there is no prevous agent just insert the custom tasks
tasks.insert(position, c_task)
logger.info("TASK:", task)
# Set the loop counter
loop_count = 0
while loop_count < self.max_loops:
for task in tasks:
agent_names = [
name.strip() for name in task.split(",")
]
if len(agent_names) > 1:
# Parallel processing
logger.info(
f"Running agents in parallel: {agent_names}"
)
results = []
for agent_name in agent_names:
if agent_name == "H":
# Human in the loop intervention
if (
self.human_in_the_loop
and self.custom_human_in_the_loop
):
current_task = (
self.custom_human_in_the_loop(
current_task
)
if no_use_clusterops is True:
return self._run(
task=task,
img=img,
*args,
**kwargs,
)
else:
current_task = input(
"Enter your response:"
return exec_callable_with_clusterops(
device=device,
device_id=device_id,
all_cores=all_cores,
all_gpus=all_gpus,
func=self._run,
task=task,
img=img,
*args,
**kwargs,
)
else:
agent = self.agents[agent_name]
result = None
# As the current `swarms` package is using LangChain v0.1 we need to use the v0.1 version of the `astream_events` API
# Below is the link to the `astream_events` spec as outlined in the LangChain v0.1 docs
# https://python.langchain.com/v0.1/docs/expression_language/streaming/#event-reference
# Below is the link to the `astream_events` spec as outlined in the LangChain v0.2 docs
# https://python.langchain.com/v0.2/docs/versions/v0_2/migrating_astream_events/
async for evt in agent.astream_events(
current_task, version="v1"
):
# print(evt) # <- useful when building/debugging
if evt["event"] == "on_llm_end":
result = evt["data"]["output"]
print(agent.name, result)
results.append(result)
current_task = ""
for index, res in enumerate(results):
current_task += (
"# OUTPUT of "
+ agent_names[index]
+ ""
+ res
+ "\n\n"
)
else:
# Sequential processing
logger.info(
f"Running agents sequentially: {agent_names}"
)
def __call__(self, task: str, *args, **kwargs):
"""
Make the class callable by executing the run() method.
agent_name = agent_names[0]
if agent_name == "H":
# Human-in-the-loop intervention
if (
self.human_in_the_loop
and self.custom_human_in_the_loop
):
current_task = (
self.custom_human_in_the_loop(
current_task
)
)
else:
current_task = input(
"Enter the next task: "
)
else:
agent = self.agents[agent_name]
result = None
# As the current `swarms` package is using LangChain v0.1 we need to use the v0.1 version of the `astream_events` API
# Below is the link to the `astream_events` spec as outlined in the LangChain v0.1 docs
# https://python.langchain.com/v0.1/docs/expression_language/streaming/#event-reference
# Below is the link to the `astream_events` spec as outlined in the LangChain v0.2 docs
# https://python.langchain.com/v0.2/docs/versions/v0_2/migrating_astream_events/
async for evt in agent.astream_events(
f"SYSTEM: {agent.system_prompt}\nINPUT:{current_task}",
version="v1",
):
# print(evt) # <- useful when building/debugging
if evt["event"] == "on_llm_end":
result = evt["data"]["output"]
print(
agent.name, "result", result
)
current_task = result
Args:
task (str): The task to execute.
*args: Additional positional arguments passed to run().
**kwargs: Additional keyword arguments passed to run().
loop_count += 1
Returns:
The result from executing run().
"""
return self.run(task=task, *args, **kwargs)
return current_task
except Exception as e:
logger.error(f"An error occurred: {e}")
return e
def batch_run(
self,
tasks: List[str],
img: Optional[List[str]] = None,
batch_size: int = 10,
device: str = "cpu",
device_id: int = None,
all_cores: bool = True,
all_gpus: bool = False,
*args,
**kwargs,
) -> List[str]:
"""
Process multiple tasks in batches.
def process_agent_or_swarm(
self, name: str, task: str, img: str, is_last, *args, **kwargs
):
Args:
tasks: List of tasks to process
img: Optional list of images corresponding to tasks
batch_size: Number of tasks to process simultaneously
device: Computing device to use
device_id: Specific device ID if applicable
all_cores: Whether to use all CPU cores
all_gpus: Whether to use all available GPUs
Returns:
List of results corresponding to input tasks
"""
results = []
for i in range(0, len(tasks), batch_size):
batch_tasks = tasks[i : i + batch_size]
batch_imgs = (
img[i : i + batch_size]
if img
else [None] * len(batch_tasks)
)
# Process batch using concurrent execution
batch_results = [
self.run(
task=task,
img=img_path,
device=device,
device_id=device_id,
all_cores=all_cores,
all_gpus=all_gpus,
*args,
**kwargs,
)
for task, img_path in zip(batch_tasks, batch_imgs)
]
results.extend(batch_results)
process_agent_or_swarm: Processes the agent or sub-swarm based on the given name.
return results
async def abatch_run(
self,
tasks: List[str],
img: Optional[List[str]] = None,
batch_size: int = 10,
*args,
**kwargs,
) -> List[str]:
"""
Asynchronously process multiple tasks in batches.
Args:
name (str): The name of the agent or sub-swarm to process.
task (str): The task to be executed.
img (str): The image to be processed by the agents.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
tasks: List of tasks to process
img: Optional list of images corresponding to tasks
batch_size: Number of tasks to process simultaneously
Returns:
str: The result of the last executed task.
List of results corresponding to input tasks
"""
results = []
for i in range(0, len(tasks), batch_size):
batch_tasks = tasks[i : i + batch_size]
batch_imgs = (
img[i : i + batch_size]
if img
else [None] * len(batch_tasks)
)
# Process batch using asyncio.gather
batch_coros = [
self.astream(task=task, img=img_path, *args, **kwargs)
for task, img_path in zip(batch_tasks, batch_imgs)
]
batch_results = await asyncio.gather(*batch_coros)
results.extend(batch_results)
return results
def concurrent_run(
self,
tasks: List[str],
img: Optional[List[str]] = None,
max_workers: Optional[int] = None,
device: str = "cpu",
device_id: int = None,
all_cores: bool = True,
all_gpus: bool = False,
*args,
**kwargs,
) -> List[str]:
"""
if name.startswith("Human"):
return self.human_intervention(task)
elif name in self.sub_swarm:
return self.run_sub_swarm(
task, name, img, *args, **kwargs
)
else:
agent = self.agents[name]
return agent.run(task, img, is_last, *args, **kwargs)
Process multiple tasks concurrently using ThreadPoolExecutor.
def human_intervention(self, task: str) -> str:
if self.human_in_the_loop and self.custom_human_in_the_loop:
return self.custom_human_in_the_loop(task)
else:
return input(
"Human intervention required. Enter your response: "
Args:
tasks: List of tasks to process
img: Optional list of images corresponding to tasks
max_workers: Maximum number of worker threads
device: Computing device to use
device_id: Specific device ID if applicable
all_cores: Whether to use all CPU cores
all_gpus: Whether to use all available GPUs
Returns:
List of results corresponding to input tasks
"""
with ThreadPoolExecutor(max_workers=max_workers) as executor:
imgs = img if img else [None] * len(tasks)
futures = [
executor.submit(
self.run,
task=task,
img=img_path,
device=device,
device_id=device_id,
all_cores=all_cores,
all_gpus=all_gpus,
*args,
**kwargs,
)
for task, img_path in zip(tasks, imgs)
]
return [future.result() for future in futures]
def rearrange(

@ -2,12 +2,14 @@ import random
from swarms.structs.base_swarm import BaseSwarm
from typing import List
from swarms.structs.agent import Agent
from swarms.utils.loguru_logger import logger
from pydantic import BaseModel, Field
from typing import Optional
from datetime import datetime
from swarms.schemas.agent_step_schemas import ManySteps
import tenacity
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger("round-robin")
datetime_stamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save