commit
c761ec804e
@ -0,0 +1,43 @@
|
|||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
#
|
||||||
|
# This workflow file requires a free account on Bearer.com to manage findings, notifications and more.
|
||||||
|
# See https://docs.bearer.com/guides/bearer-cloud/
|
||||||
|
name: Bearer
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["master" ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: ["master"]
|
||||||
|
schedule:
|
||||||
|
- cron: '24 22 * * 6'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read # for actions/checkout to fetch code
|
||||||
|
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||||
|
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
bearer:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
# Checkout project source
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
# Scan code using Bearer CLI
|
||||||
|
- name: Run Report
|
||||||
|
id: report
|
||||||
|
uses: bearer/bearer-action@828eeb928ce2f4a7ca5ed57fb8b59508cb8c79bc
|
||||||
|
with:
|
||||||
|
api-key: ${{ secrets.BEARER_TOKEN }}
|
||||||
|
format: sarif
|
||||||
|
output: results.sarif
|
||||||
|
exit-code: 0
|
||||||
|
# Upload SARIF file generated in previous step
|
||||||
|
- name: Upload SARIF file
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: results.sarif
|
@ -0,0 +1,39 @@
|
|||||||
|
# Dependency Review Action
|
||||||
|
#
|
||||||
|
# This Action will scan dependency manifest files that change as part of a Pull Request,
|
||||||
|
# surfacing known-vulnerable versions of the packages declared or updated in the PR.
|
||||||
|
# Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable
|
||||||
|
# packages will be blocked from merging.
|
||||||
|
#
|
||||||
|
# Source repository: https://github.com/actions/dependency-review-action
|
||||||
|
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
|
||||||
|
name: 'Dependency review'
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: [ "master" ]
|
||||||
|
|
||||||
|
# If using a dependency submission action in this workflow this permission will need to be set to:
|
||||||
|
#
|
||||||
|
# permissions:
|
||||||
|
# contents: write
|
||||||
|
#
|
||||||
|
# https://docs.github.com/en/enterprise-cloud@latest/code-security/supply-chain-security/understanding-your-software-supply-chain/using-the-dependency-submission-api
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
# Write permissions for pull-requests are required for using the `comment-summary-in-pr` option, comment out if you aren't using this option
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dependency-review:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: 'Checkout repository'
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: 'Dependency Review'
|
||||||
|
uses: actions/dependency-review-action@v4
|
||||||
|
# Commonly enabled options, see https://github.com/actions/dependency-review-action#configuration-options for all available options.
|
||||||
|
with:
|
||||||
|
comment-summary-in-pr: always
|
||||||
|
# fail-on-severity: moderate
|
||||||
|
# deny-licenses: GPL-1.0-or-later, LGPL-2.0-or-later
|
||||||
|
# retry-on-snapshot-warnings: true
|
@ -0,0 +1,18 @@
|
|||||||
|
name: Docker Image CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ "master" ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ "master" ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Build the Docker image
|
||||||
|
run: docker build . --file Dockerfile --tag my-image-name:$(date +%s)
|
@ -0,0 +1,50 @@
|
|||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
|
||||||
|
# This workflow integrates Python Static Analyzer (Pysa) with
|
||||||
|
# GitHub's Code Scanning feature.
|
||||||
|
#
|
||||||
|
# Python Static Analyzer (Pysa) is a security-focused static
|
||||||
|
# analysis tool that tracks flows of data from where they
|
||||||
|
# originate to where they terminate in a dangerous location.
|
||||||
|
#
|
||||||
|
# See https://pyre-check.org/docs/pysa-basics/
|
||||||
|
|
||||||
|
name: Pysa
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches: [ "master" ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ "master" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '43 5 * * 3'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pysa:
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
|
||||||
|
- name: Run Pysa
|
||||||
|
uses: facebook/pysa-action@f46a63777e59268613bd6e2ff4e29f144ca9e88b
|
||||||
|
with:
|
||||||
|
# To customize these inputs:
|
||||||
|
# See https://github.com/facebook/pysa-action#inputs
|
||||||
|
repo-directory: './'
|
||||||
|
requirements-path: 'requirements.txt'
|
||||||
|
infer-types: true
|
||||||
|
include-default-sapp-filters: true
|
@ -0,0 +1,34 @@
|
|||||||
|
name: Python Package using Conda
|
||||||
|
|
||||||
|
on: [push]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-linux:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
max-parallel: 5
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up Python 3.10
|
||||||
|
uses: actions/setup-python@v3
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
- name: Add conda to system path
|
||||||
|
run: |
|
||||||
|
# $CONDA is an environment variable pointing to the root of the miniconda directory
|
||||||
|
echo $CONDA/bin >> $GITHUB_PATH
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
conda env update --file environment.yml --name base
|
||||||
|
- name: Lint with flake8
|
||||||
|
run: |
|
||||||
|
conda install flake8
|
||||||
|
# stop the build if there are Python syntax errors or undefined names
|
||||||
|
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||||
|
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||||
|
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||||
|
- name: Test with pytest
|
||||||
|
run: |
|
||||||
|
conda install pytest
|
||||||
|
pytest
|
@ -0,0 +1,49 @@
|
|||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
|
||||||
|
# This workflow file requires a free account on Semgrep.dev to
|
||||||
|
# manage rules, file ignores, notifications, and more.
|
||||||
|
#
|
||||||
|
# See https://semgrep.dev/docs
|
||||||
|
|
||||||
|
name: Semgrep
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ "master" ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ "master" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '19 7 * * 3'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
semgrep:
|
||||||
|
permissions:
|
||||||
|
contents: read # for actions/checkout to fetch code
|
||||||
|
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||||
|
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||||
|
name: Scan
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
# Checkout project source
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
# Scan code using project's configuration on https://semgrep.dev/manage
|
||||||
|
- uses: returntocorp/semgrep-action@fcd5ab7459e8d91cb1777481980d1b18b4fc6735
|
||||||
|
with:
|
||||||
|
publishToken: ${{ secrets.SEMGREP_APP_TOKEN }}
|
||||||
|
publishDeployment: ${{ secrets.SEMGREP_DEPLOYMENT_ID }}
|
||||||
|
generateSarif: "1"
|
||||||
|
|
||||||
|
# Upload SARIF file generated in previous step
|
||||||
|
- name: Upload SARIF file
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: semgrep.sarif
|
||||||
|
if: always()
|
@ -0,0 +1,48 @@
|
|||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
|
||||||
|
name: trivy
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ "master" ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ "master" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '31 0 * * 5'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
permissions:
|
||||||
|
contents: read # for actions/checkout to fetch code
|
||||||
|
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||||
|
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||||
|
name: Build
|
||||||
|
runs-on: "ubuntu-20.04"
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Build an image from Dockerfile
|
||||||
|
run: |
|
||||||
|
docker build -t docker.io/my-organization/my-app:${{ github.sha }} .
|
||||||
|
|
||||||
|
- name: Run Trivy vulnerability scanner
|
||||||
|
uses: aquasecurity/trivy-action@7b7aa264d83dc58691451798b4d117d53d21edfe
|
||||||
|
with:
|
||||||
|
image-ref: 'docker.io/my-organization/my-app:${{ github.sha }}'
|
||||||
|
format: 'template'
|
||||||
|
template: '@/contrib/sarif.tpl'
|
||||||
|
output: 'trivy-results.sarif'
|
||||||
|
severity: 'CRITICAL,HIGH'
|
||||||
|
|
||||||
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: 'trivy-results.sarif'
|
@ -1,44 +0,0 @@
|
|||||||
import os
|
|
||||||
|
|
||||||
from swarms_memory import ChromaDB
|
|
||||||
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import Anthropic
|
|
||||||
from swarms.prompts.finance_agent_sys_prompt import (
|
|
||||||
FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initilaize the chromadb client
|
|
||||||
chromadb = ChromaDB(
|
|
||||||
metric="cosine",
|
|
||||||
output_dir="fiance_agent_rag",
|
|
||||||
# docs_folder="artifacts", # Folder of your documents
|
|
||||||
)
|
|
||||||
|
|
||||||
# Model
|
|
||||||
model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize the agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Financial-Analysis-Agent",
|
|
||||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
|
||||||
agent_description="Agent creates ",
|
|
||||||
llm=model,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
verbose=True,
|
|
||||||
streaming_on=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="finance_agent.json",
|
|
||||||
user_name="swarms_corp",
|
|
||||||
retry_attempts=3,
|
|
||||||
context_length=200000,
|
|
||||||
long_term_memory=chromadb,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
agent.run(
|
|
||||||
"What are the components of a startups stock incentive equity plan"
|
|
||||||
)
|
|
@ -1,117 +0,0 @@
|
|||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms_memory import ChromaDB
|
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
|
|
||||||
# Making an instance of the ChromaDB class
|
|
||||||
memory = ChromaDB(
|
|
||||||
metric="cosine",
|
|
||||||
n_results=3,
|
|
||||||
output_dir="results",
|
|
||||||
docs_folder="docs",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Model
|
|
||||||
model = OpenAIChat(
|
|
||||||
api_key=os.getenv("OPENAI_API_KEY"),
|
|
||||||
model_name="gpt-4o-mini",
|
|
||||||
temperature=0.1,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Tools in swarms are simple python functions and docstrings
|
|
||||||
def terminal(
|
|
||||||
code: str,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Run code in the terminal.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
code (str): The code to run in the terminal.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The output of the code.
|
|
||||||
"""
|
|
||||||
out = subprocess.run(
|
|
||||||
code, shell=True, capture_output=True, text=True
|
|
||||||
).stdout
|
|
||||||
return str(out)
|
|
||||||
|
|
||||||
|
|
||||||
def browser(query: str):
|
|
||||||
"""
|
|
||||||
Search the query in the browser with the `browser` tool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): The query to search in the browser.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The search results.
|
|
||||||
"""
|
|
||||||
import webbrowser
|
|
||||||
|
|
||||||
url = f"https://www.google.com/search?q={query}"
|
|
||||||
webbrowser.open(url)
|
|
||||||
return f"Searching for {query} in the browser."
|
|
||||||
|
|
||||||
|
|
||||||
def create_file(file_path: str, content: str):
|
|
||||||
"""
|
|
||||||
Create a file using the file editor tool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path (str): The path to the file.
|
|
||||||
content (str): The content to write to the file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The result of the file creation operation.
|
|
||||||
"""
|
|
||||||
with open(file_path, "w") as file:
|
|
||||||
file.write(content)
|
|
||||||
return f"File {file_path} created successfully."
|
|
||||||
|
|
||||||
|
|
||||||
def file_editor(file_path: str, mode: str, content: str):
|
|
||||||
"""
|
|
||||||
Edit a file using the file editor tool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
file_path (str): The path to the file.
|
|
||||||
mode (str): The mode to open the file in.
|
|
||||||
content (str): The content to write to the file.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The result of the file editing operation.
|
|
||||||
"""
|
|
||||||
with open(file_path, mode) as file:
|
|
||||||
file.write(content)
|
|
||||||
return f"File {file_path} edited successfully."
|
|
||||||
|
|
||||||
|
|
||||||
# Agent
|
|
||||||
agent = Agent(
|
|
||||||
agent_name="Devin",
|
|
||||||
system_prompt=(
|
|
||||||
"Autonomous agent that can interact with humans and other"
|
|
||||||
" agents. Be Helpful and Kind. Use the tools provided to"
|
|
||||||
" assist the user. Return all code in markdown format."
|
|
||||||
),
|
|
||||||
llm=model,
|
|
||||||
max_loops="auto",
|
|
||||||
autosave=True,
|
|
||||||
dashboard=False,
|
|
||||||
streaming_on=True,
|
|
||||||
verbose=True,
|
|
||||||
stopping_token="<DONE>",
|
|
||||||
interactive=True,
|
|
||||||
tools=[terminal, browser, file_editor, create_file],
|
|
||||||
streaming=True,
|
|
||||||
long_term_memory=memory,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the agent
|
|
||||||
out = agent(
|
|
||||||
"Create a CSV file with the latest tax rates for C corporations in the following ten states and the District of Columbia: Alabama, California, Florida, Georgia, Illinois, New York, North Carolina, Ohio, Texas, and Washington."
|
|
||||||
)
|
|
||||||
print(out)
|
|
@ -0,0 +1,629 @@
|
|||||||
|
import os
|
||||||
|
from fastapi import (
|
||||||
|
FastAPI,
|
||||||
|
HTTPException,
|
||||||
|
status,
|
||||||
|
Query,
|
||||||
|
BackgroundTasks,
|
||||||
|
)
|
||||||
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from typing import Optional, Dict, Any, List
|
||||||
|
from loguru import logger
|
||||||
|
import uvicorn
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from uuid import UUID, uuid4
|
||||||
|
from enum import Enum
|
||||||
|
from pathlib import Path
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
# Load environment variables
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Configure Loguru
|
||||||
|
logger.add(
|
||||||
|
"logs/api_{time}.log",
|
||||||
|
rotation="500 MB",
|
||||||
|
retention="10 days",
|
||||||
|
level="INFO",
|
||||||
|
format="{time} {level} {message}",
|
||||||
|
backtrace=True,
|
||||||
|
diagnose=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AgentStatus(str, Enum):
|
||||||
|
"""Enum for agent status."""
|
||||||
|
|
||||||
|
IDLE = "idle"
|
||||||
|
PROCESSING = "processing"
|
||||||
|
ERROR = "error"
|
||||||
|
MAINTENANCE = "maintenance"
|
||||||
|
|
||||||
|
|
||||||
|
class AgentConfig(BaseModel):
|
||||||
|
"""Configuration model for creating a new agent."""
|
||||||
|
|
||||||
|
agent_name: str = Field(..., description="Name of the agent")
|
||||||
|
model_name: str = Field(
|
||||||
|
...,
|
||||||
|
description="Name of the llm you want to use provided by litellm",
|
||||||
|
)
|
||||||
|
description: str = Field(
|
||||||
|
default="", description="Description of the agent's purpose"
|
||||||
|
)
|
||||||
|
system_prompt: str = Field(
|
||||||
|
..., description="System prompt for the agent"
|
||||||
|
)
|
||||||
|
model_name: str = Field(
|
||||||
|
default="gpt-4", description="Model name to use"
|
||||||
|
)
|
||||||
|
temperature: float = Field(
|
||||||
|
default=0.1,
|
||||||
|
ge=0.0,
|
||||||
|
le=2.0,
|
||||||
|
description="Temperature for the model",
|
||||||
|
)
|
||||||
|
max_loops: int = Field(
|
||||||
|
default=1, ge=1, description="Maximum number of loops"
|
||||||
|
)
|
||||||
|
autosave: bool = Field(
|
||||||
|
default=True, description="Enable autosave"
|
||||||
|
)
|
||||||
|
dashboard: bool = Field(
|
||||||
|
default=False, description="Enable dashboard"
|
||||||
|
)
|
||||||
|
verbose: bool = Field(
|
||||||
|
default=True, description="Enable verbose output"
|
||||||
|
)
|
||||||
|
dynamic_temperature_enabled: bool = Field(
|
||||||
|
default=True, description="Enable dynamic temperature"
|
||||||
|
)
|
||||||
|
user_name: str = Field(
|
||||||
|
default="default_user", description="Username for the agent"
|
||||||
|
)
|
||||||
|
retry_attempts: int = Field(
|
||||||
|
default=1, ge=1, description="Number of retry attempts"
|
||||||
|
)
|
||||||
|
context_length: int = Field(
|
||||||
|
default=200000, ge=1000, description="Context length"
|
||||||
|
)
|
||||||
|
output_type: str = Field(
|
||||||
|
default="string", description="Output type (string or json)"
|
||||||
|
)
|
||||||
|
streaming_on: bool = Field(
|
||||||
|
default=False, description="Enable streaming"
|
||||||
|
)
|
||||||
|
tags: List[str] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
description="Tags for categorizing the agent",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AgentUpdate(BaseModel):
|
||||||
|
"""Model for updating agent configuration."""
|
||||||
|
|
||||||
|
description: Optional[str] = None
|
||||||
|
system_prompt: Optional[str] = None
|
||||||
|
temperature: Optional[float] = None
|
||||||
|
max_loops: Optional[int] = None
|
||||||
|
tags: Optional[List[str]] = None
|
||||||
|
status: Optional[AgentStatus] = None
|
||||||
|
|
||||||
|
|
||||||
|
class AgentSummary(BaseModel):
|
||||||
|
"""Summary model for agent listing."""
|
||||||
|
|
||||||
|
agent_id: UUID
|
||||||
|
agent_name: str
|
||||||
|
description: str
|
||||||
|
created_at: datetime
|
||||||
|
last_used: datetime
|
||||||
|
total_completions: int
|
||||||
|
tags: List[str]
|
||||||
|
status: AgentStatus
|
||||||
|
|
||||||
|
|
||||||
|
class AgentMetrics(BaseModel):
|
||||||
|
"""Model for agent performance metrics."""
|
||||||
|
|
||||||
|
total_completions: int
|
||||||
|
average_response_time: float
|
||||||
|
error_rate: float
|
||||||
|
last_24h_completions: int
|
||||||
|
total_tokens_used: int
|
||||||
|
uptime_percentage: float
|
||||||
|
success_rate: float
|
||||||
|
peak_tokens_per_minute: int
|
||||||
|
|
||||||
|
|
||||||
|
class CompletionRequest(BaseModel):
|
||||||
|
"""Model for completion requests."""
|
||||||
|
|
||||||
|
prompt: str = Field(..., description="The prompt to process")
|
||||||
|
agent_id: UUID = Field(..., description="ID of the agent to use")
|
||||||
|
max_tokens: Optional[int] = Field(
|
||||||
|
None, description="Maximum tokens to generate"
|
||||||
|
)
|
||||||
|
temperature_override: Optional[float] = None
|
||||||
|
stream: bool = Field(
|
||||||
|
default=False, description="Enable streaming response"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CompletionResponse(BaseModel):
|
||||||
|
"""Model for completion responses."""
|
||||||
|
|
||||||
|
agent_id: UUID
|
||||||
|
response: str
|
||||||
|
metadata: Dict[str, Any]
|
||||||
|
timestamp: datetime
|
||||||
|
processing_time: float
|
||||||
|
token_usage: Dict[str, int]
|
||||||
|
|
||||||
|
|
||||||
|
class AgentStore:
|
||||||
|
"""Enhanced store for managing agents."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.agents: Dict[UUID, Agent] = {}
|
||||||
|
self.agent_metadata: Dict[UUID, Dict[str, Any]] = {}
|
||||||
|
self.executor = ThreadPoolExecutor(max_workers=4)
|
||||||
|
self._ensure_directories()
|
||||||
|
|
||||||
|
def _ensure_directories(self):
|
||||||
|
"""Ensure required directories exist."""
|
||||||
|
Path("logs").mkdir(exist_ok=True)
|
||||||
|
Path("states").mkdir(exist_ok=True)
|
||||||
|
|
||||||
|
async def create_agent(self, config: AgentConfig) -> UUID:
|
||||||
|
"""Create a new agent with the given configuration."""
|
||||||
|
try:
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
|
agent_name=config.agent_name,
|
||||||
|
system_prompt=config.system_prompt,
|
||||||
|
model_name=config.model_name,
|
||||||
|
max_loops=config.max_loops,
|
||||||
|
autosave=config.autosave,
|
||||||
|
dashboard=config.dashboard,
|
||||||
|
verbose=config.verbose,
|
||||||
|
dynamic_temperature_enabled=config.dynamic_temperature_enabled,
|
||||||
|
saved_state_path=f"states/{config.agent_name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
|
||||||
|
user_name=config.user_name,
|
||||||
|
retry_attempts=config.retry_attempts,
|
||||||
|
context_length=config.context_length,
|
||||||
|
return_step_meta=True,
|
||||||
|
output_type="str",
|
||||||
|
streaming_on=config.streaming_on,
|
||||||
|
)
|
||||||
|
|
||||||
|
agent_id = uuid4()
|
||||||
|
self.agents[agent_id] = agent
|
||||||
|
self.agent_metadata[agent_id] = {
|
||||||
|
"description": config.description,
|
||||||
|
"created_at": datetime.utcnow(),
|
||||||
|
"last_used": datetime.utcnow(),
|
||||||
|
"total_completions": 0,
|
||||||
|
"tags": config.tags,
|
||||||
|
"total_tokens": 0,
|
||||||
|
"error_count": 0,
|
||||||
|
"response_times": [],
|
||||||
|
"status": AgentStatus.IDLE,
|
||||||
|
"start_time": datetime.utcnow(),
|
||||||
|
"downtime": timedelta(),
|
||||||
|
"successful_completions": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info(f"Created agent with ID: {agent_id}")
|
||||||
|
return agent_id
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error creating agent: {str(e)}")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||||
|
detail=f"Failed to create agent: {str(e)}",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def get_agent(self, agent_id: UUID) -> Agent:
|
||||||
|
"""Retrieve an agent by ID."""
|
||||||
|
agent = self.agents.get(agent_id)
|
||||||
|
if not agent:
|
||||||
|
logger.error(f"Agent not found: {agent_id}")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_404_NOT_FOUND,
|
||||||
|
detail=f"Agent {agent_id} not found",
|
||||||
|
)
|
||||||
|
return agent
|
||||||
|
|
||||||
|
async def update_agent(
|
||||||
|
self, agent_id: UUID, update: AgentUpdate
|
||||||
|
) -> None:
|
||||||
|
"""Update agent configuration."""
|
||||||
|
agent = await self.get_agent(agent_id)
|
||||||
|
metadata = self.agent_metadata[agent_id]
|
||||||
|
|
||||||
|
if update.system_prompt:
|
||||||
|
agent.system_prompt = update.system_prompt
|
||||||
|
if update.temperature is not None:
|
||||||
|
agent.llm.temperature = update.temperature
|
||||||
|
if update.max_loops is not None:
|
||||||
|
agent.max_loops = update.max_loops
|
||||||
|
if update.tags is not None:
|
||||||
|
metadata["tags"] = update.tags
|
||||||
|
if update.description is not None:
|
||||||
|
metadata["description"] = update.description
|
||||||
|
if update.status is not None:
|
||||||
|
metadata["status"] = update.status
|
||||||
|
if update.status == AgentStatus.MAINTENANCE:
|
||||||
|
metadata["downtime"] += (
|
||||||
|
datetime.utcnow() - metadata["last_used"]
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"Updated agent {agent_id}")
|
||||||
|
|
||||||
|
async def list_agents(
|
||||||
|
self,
|
||||||
|
tags: Optional[List[str]] = None,
|
||||||
|
status: Optional[AgentStatus] = None,
|
||||||
|
) -> List[AgentSummary]:
|
||||||
|
"""List all agents, optionally filtered by tags and status."""
|
||||||
|
summaries = []
|
||||||
|
for agent_id, agent in self.agents.items():
|
||||||
|
metadata = self.agent_metadata[agent_id]
|
||||||
|
|
||||||
|
# Apply filters
|
||||||
|
if tags and not any(
|
||||||
|
tag in metadata["tags"] for tag in tags
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
if status and metadata["status"] != status:
|
||||||
|
continue
|
||||||
|
|
||||||
|
summaries.append(
|
||||||
|
AgentSummary(
|
||||||
|
agent_id=agent_id,
|
||||||
|
agent_name=agent.agent_name,
|
||||||
|
description=metadata["description"],
|
||||||
|
created_at=metadata["created_at"],
|
||||||
|
last_used=metadata["last_used"],
|
||||||
|
total_completions=metadata["total_completions"],
|
||||||
|
tags=metadata["tags"],
|
||||||
|
status=metadata["status"],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return summaries
|
||||||
|
|
||||||
|
async def get_agent_metrics(self, agent_id: UUID) -> AgentMetrics:
|
||||||
|
"""Get performance metrics for an agent."""
|
||||||
|
metadata = self.agent_metadata[agent_id]
|
||||||
|
response_times = metadata["response_times"]
|
||||||
|
|
||||||
|
# Calculate metrics
|
||||||
|
total_time = datetime.utcnow() - metadata["start_time"]
|
||||||
|
uptime = total_time - metadata["downtime"]
|
||||||
|
uptime_percentage = (
|
||||||
|
uptime.total_seconds() / total_time.total_seconds()
|
||||||
|
) * 100
|
||||||
|
|
||||||
|
success_rate = (
|
||||||
|
metadata["successful_completions"]
|
||||||
|
/ metadata["total_completions"]
|
||||||
|
* 100
|
||||||
|
if metadata["total_completions"] > 0
|
||||||
|
else 0
|
||||||
|
)
|
||||||
|
|
||||||
|
return AgentMetrics(
|
||||||
|
total_completions=metadata["total_completions"],
|
||||||
|
average_response_time=(
|
||||||
|
sum(response_times) / len(response_times)
|
||||||
|
if response_times
|
||||||
|
else 0
|
||||||
|
),
|
||||||
|
error_rate=(
|
||||||
|
metadata["error_count"]
|
||||||
|
/ metadata["total_completions"]
|
||||||
|
if metadata["total_completions"] > 0
|
||||||
|
else 0
|
||||||
|
),
|
||||||
|
last_24h_completions=sum(
|
||||||
|
1
|
||||||
|
for t in response_times
|
||||||
|
if (datetime.utcnow() - t).days < 1
|
||||||
|
),
|
||||||
|
total_tokens_used=metadata["total_tokens"],
|
||||||
|
uptime_percentage=uptime_percentage,
|
||||||
|
success_rate=success_rate,
|
||||||
|
peak_tokens_per_minute=max(
|
||||||
|
metadata.get("tokens_per_minute", [0])
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
async def clone_agent(
|
||||||
|
self, agent_id: UUID, new_name: str
|
||||||
|
) -> UUID:
|
||||||
|
"""Clone an existing agent with a new name."""
|
||||||
|
original_agent = await self.get_agent(agent_id)
|
||||||
|
original_metadata = self.agent_metadata[agent_id]
|
||||||
|
|
||||||
|
config = AgentConfig(
|
||||||
|
agent_name=new_name,
|
||||||
|
description=f"Clone of {original_agent.agent_name}",
|
||||||
|
system_prompt=original_agent.system_prompt,
|
||||||
|
model_name=original_agent.llm.model_name,
|
||||||
|
temperature=original_agent.llm.temperature,
|
||||||
|
max_loops=original_agent.max_loops,
|
||||||
|
tags=original_metadata["tags"],
|
||||||
|
)
|
||||||
|
|
||||||
|
return await self.create_agent(config)
|
||||||
|
|
||||||
|
async def delete_agent(self, agent_id: UUID) -> None:
|
||||||
|
"""Delete an agent."""
|
||||||
|
if agent_id not in self.agents:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_404_NOT_FOUND,
|
||||||
|
detail=f"Agent {agent_id} not found",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Clean up any resources
|
||||||
|
agent = self.agents[agent_id]
|
||||||
|
if agent.autosave and os.path.exists(agent.saved_state_path):
|
||||||
|
os.remove(agent.saved_state_path)
|
||||||
|
|
||||||
|
del self.agents[agent_id]
|
||||||
|
del self.agent_metadata[agent_id]
|
||||||
|
logger.info(f"Deleted agent {agent_id}")
|
||||||
|
|
||||||
|
async def process_completion(
|
||||||
|
self,
|
||||||
|
agent: Agent,
|
||||||
|
prompt: str,
|
||||||
|
agent_id: UUID,
|
||||||
|
max_tokens: Optional[int] = None,
|
||||||
|
temperature_override: Optional[float] = None,
|
||||||
|
) -> CompletionResponse:
|
||||||
|
"""Process a completion request using the specified agent."""
|
||||||
|
start_time = datetime.utcnow()
|
||||||
|
metadata = self.agent_metadata[agent_id]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Update agent status
|
||||||
|
metadata["status"] = AgentStatus.PROCESSING
|
||||||
|
metadata["last_used"] = start_time
|
||||||
|
|
||||||
|
# Apply temporary overrides if specified
|
||||||
|
original_temp = agent.llm.temperature
|
||||||
|
if temperature_override is not None:
|
||||||
|
agent.llm.temperature = temperature_override
|
||||||
|
|
||||||
|
# Process the completion
|
||||||
|
response = agent.run(prompt)
|
||||||
|
|
||||||
|
# Reset overrides
|
||||||
|
if temperature_override is not None:
|
||||||
|
agent.llm.temperature = original_temp
|
||||||
|
|
||||||
|
# Update metrics
|
||||||
|
processing_time = (
|
||||||
|
datetime.utcnow() - start_time
|
||||||
|
).total_seconds()
|
||||||
|
metadata["response_times"].append(processing_time)
|
||||||
|
metadata["total_completions"] += 1
|
||||||
|
metadata["successful_completions"] += 1
|
||||||
|
|
||||||
|
# Estimate token usage (this is a rough estimate)
|
||||||
|
prompt_tokens = len(prompt.split()) * 1.3
|
||||||
|
completion_tokens = len(response.split()) * 1.3
|
||||||
|
total_tokens = int(prompt_tokens + completion_tokens)
|
||||||
|
metadata["total_tokens"] += total_tokens
|
||||||
|
|
||||||
|
# Update tokens per minute tracking
|
||||||
|
current_minute = datetime.utcnow().replace(
|
||||||
|
second=0, microsecond=0
|
||||||
|
)
|
||||||
|
if "tokens_per_minute" not in metadata:
|
||||||
|
metadata["tokens_per_minute"] = {}
|
||||||
|
metadata["tokens_per_minute"][current_minute] = (
|
||||||
|
metadata["tokens_per_minute"].get(current_minute, 0)
|
||||||
|
+ total_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
return CompletionResponse(
|
||||||
|
agent_id=agent_id,
|
||||||
|
response=response,
|
||||||
|
metadata={
|
||||||
|
"agent_name": agent.agent_name,
|
||||||
|
"model_name": agent.llm.model_name,
|
||||||
|
"temperature": agent.llm.temperature,
|
||||||
|
},
|
||||||
|
timestamp=datetime.utcnow(),
|
||||||
|
processing_time=processing_time,
|
||||||
|
token_usage={
|
||||||
|
"prompt_tokens": int(prompt_tokens),
|
||||||
|
"completion_tokens": int(completion_tokens),
|
||||||
|
"total_tokens": total_tokens,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
metadata["error_count"] += 1
|
||||||
|
metadata["status"] = AgentStatus.ERROR
|
||||||
|
logger.error(
|
||||||
|
f"Error in completion processing: {str(e)}\n{traceback.format_exc()}"
|
||||||
|
)
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||||
|
detail=f"Error processing completion: {str(e)}",
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
metadata["status"] = AgentStatus.IDLE
|
||||||
|
|
||||||
|
|
||||||
|
class SwarmsAPI:
|
||||||
|
"""Enhanced API class for Swarms agent integration."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.app = FastAPI(
|
||||||
|
title="Swarms Agent API",
|
||||||
|
description="Production-grade API for Swarms agent interaction",
|
||||||
|
version="1.0.0",
|
||||||
|
docs_url="/v1/docs",
|
||||||
|
redoc_url="/v1/redoc",
|
||||||
|
)
|
||||||
|
self.store = AgentStore()
|
||||||
|
# Configure CORS
|
||||||
|
self.app.add_middleware(
|
||||||
|
CORSMiddleware,
|
||||||
|
allow_origins=[
|
||||||
|
"*"
|
||||||
|
], # Configure appropriately for production
|
||||||
|
allow_credentials=True,
|
||||||
|
allow_methods=["*"],
|
||||||
|
allow_headers=["*"],
|
||||||
|
)
|
||||||
|
|
||||||
|
self._setup_routes()
|
||||||
|
|
||||||
|
def _setup_routes(self):
|
||||||
|
"""Set up API routes."""
|
||||||
|
|
||||||
|
@self.app.post("/v1/agent", response_model=Dict[str, UUID])
|
||||||
|
async def create_agent(config: AgentConfig):
|
||||||
|
"""Create a new agent with the specified configuration."""
|
||||||
|
agent_id = await self.store.create_agent(config)
|
||||||
|
return {"agent_id": agent_id}
|
||||||
|
|
||||||
|
@self.app.get("/v1/agents", response_model=List[AgentSummary])
|
||||||
|
async def list_agents(
|
||||||
|
tags: Optional[List[str]] = Query(None),
|
||||||
|
status: Optional[AgentStatus] = None,
|
||||||
|
):
|
||||||
|
"""List all agents, optionally filtered by tags and status."""
|
||||||
|
return await self.store.list_agents(tags, status)
|
||||||
|
|
||||||
|
@self.app.patch(
|
||||||
|
"/v1/agent/{agent_id}", response_model=Dict[str, str]
|
||||||
|
)
|
||||||
|
async def update_agent(agent_id: UUID, update: AgentUpdate):
|
||||||
|
"""Update an existing agent's configuration."""
|
||||||
|
await self.store.update_agent(agent_id, update)
|
||||||
|
return {"status": "updated"}
|
||||||
|
|
||||||
|
@self.app.get(
|
||||||
|
"/v1/agent/{agent_id}/metrics",
|
||||||
|
response_model=AgentMetrics,
|
||||||
|
)
|
||||||
|
async def get_agent_metrics(agent_id: UUID):
|
||||||
|
"""Get performance metrics for a specific agent."""
|
||||||
|
return await self.store.get_agent_metrics(agent_id)
|
||||||
|
|
||||||
|
@self.app.post(
|
||||||
|
"/v1/agent/{agent_id}/clone",
|
||||||
|
response_model=Dict[str, UUID],
|
||||||
|
)
|
||||||
|
async def clone_agent(agent_id: UUID, new_name: str):
|
||||||
|
"""Clone an existing agent with a new name."""
|
||||||
|
new_id = await self.store.clone_agent(agent_id, new_name)
|
||||||
|
return {"agent_id": new_id}
|
||||||
|
|
||||||
|
@self.app.delete("/v1/agent/{agent_id}")
|
||||||
|
async def delete_agent(agent_id: UUID):
|
||||||
|
"""Delete an agent."""
|
||||||
|
await self.store.delete_agent(agent_id)
|
||||||
|
return {"status": "deleted"}
|
||||||
|
|
||||||
|
@self.app.post(
|
||||||
|
"/v1/agent/completions", response_model=CompletionResponse
|
||||||
|
)
|
||||||
|
async def create_completion(
|
||||||
|
request: CompletionRequest,
|
||||||
|
background_tasks: BackgroundTasks,
|
||||||
|
):
|
||||||
|
"""Process a completion request with the specified agent."""
|
||||||
|
try:
|
||||||
|
agent = await self.store.get_agent(request.agent_id)
|
||||||
|
|
||||||
|
# Process completion
|
||||||
|
response = await self.store.process_completion(
|
||||||
|
agent,
|
||||||
|
request.prompt,
|
||||||
|
request.agent_id,
|
||||||
|
request.max_tokens,
|
||||||
|
request.temperature_override,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Schedule background cleanup
|
||||||
|
background_tasks.add_task(
|
||||||
|
self._cleanup_old_metrics, request.agent_id
|
||||||
|
)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing completion: {str(e)}")
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||||
|
detail=f"Error processing completion: {str(e)}",
|
||||||
|
)
|
||||||
|
|
||||||
|
@self.app.get("/v1/agent/{agent_id}/status")
|
||||||
|
async def get_agent_status(agent_id: UUID):
|
||||||
|
"""Get the current status of an agent."""
|
||||||
|
metadata = self.store.agent_metadata.get(agent_id)
|
||||||
|
if not metadata:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=status.HTTP_404_NOT_FOUND,
|
||||||
|
detail=f"Agent {agent_id} not found",
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"agent_id": agent_id,
|
||||||
|
"status": metadata["status"],
|
||||||
|
"last_used": metadata["last_used"],
|
||||||
|
"total_completions": metadata["total_completions"],
|
||||||
|
"error_count": metadata["error_count"],
|
||||||
|
}
|
||||||
|
|
||||||
|
async def _cleanup_old_metrics(self, agent_id: UUID):
|
||||||
|
"""Clean up old metrics data to prevent memory bloat."""
|
||||||
|
metadata = self.store.agent_metadata.get(agent_id)
|
||||||
|
if metadata:
|
||||||
|
# Keep only last 24 hours of response times
|
||||||
|
cutoff = datetime.utcnow() - timedelta(days=1)
|
||||||
|
metadata["response_times"] = [
|
||||||
|
t
|
||||||
|
for t in metadata["response_times"]
|
||||||
|
if isinstance(t, (int, float))
|
||||||
|
and t > cutoff.timestamp()
|
||||||
|
]
|
||||||
|
|
||||||
|
# Clean up old tokens per minute data
|
||||||
|
if "tokens_per_minute" in metadata:
|
||||||
|
metadata["tokens_per_minute"] = {
|
||||||
|
k: v
|
||||||
|
for k, v in metadata["tokens_per_minute"].items()
|
||||||
|
if k > cutoff
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def create_app() -> FastAPI:
|
||||||
|
"""Create and configure the FastAPI application."""
|
||||||
|
api = SwarmsAPI()
|
||||||
|
return api.app
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Configure uvicorn logging
|
||||||
|
logger.info("API Starting")
|
||||||
|
uvicorn.run(
|
||||||
|
"main:create_app",
|
||||||
|
host="0.0.0.0",
|
||||||
|
port=8000,
|
||||||
|
reload=True,
|
||||||
|
workers=4,
|
||||||
|
)
|
@ -0,0 +1,107 @@
|
|||||||
|
import requests
|
||||||
|
from loguru import logger
|
||||||
|
import time
|
||||||
|
|
||||||
|
# Configure loguru
|
||||||
|
logger.add(
|
||||||
|
"api_tests_{time}.log",
|
||||||
|
rotation="100 MB",
|
||||||
|
level="DEBUG",
|
||||||
|
format="{time} {level} {message}",
|
||||||
|
)
|
||||||
|
|
||||||
|
BASE_URL = "http://localhost:8000/v1"
|
||||||
|
|
||||||
|
|
||||||
|
def test_create_agent():
|
||||||
|
"""Test creating a new agent."""
|
||||||
|
logger.info("Testing agent creation")
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"agent_name": "Test Agent",
|
||||||
|
"system_prompt": "You are a helpful assistant",
|
||||||
|
"model_name": "gpt-4",
|
||||||
|
"description": "Test agent",
|
||||||
|
"tags": ["test"],
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(f"{BASE_URL}/agent", json=payload)
|
||||||
|
logger.debug(f"Create response: {response.json()}")
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
logger.success("Successfully created agent")
|
||||||
|
return response.json()["agent_id"]
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to create agent: {response.text}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def test_list_agents():
|
||||||
|
"""Test listing all agents."""
|
||||||
|
logger.info("Testing agent listing")
|
||||||
|
|
||||||
|
response = requests.get(f"{BASE_URL}/agents")
|
||||||
|
logger.debug(f"List response: {response.json()}")
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
logger.success(f"Found {len(response.json())} agents")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to list agents: {response.text}")
|
||||||
|
|
||||||
|
|
||||||
|
def test_completion(agent_id):
|
||||||
|
"""Test running a completion."""
|
||||||
|
logger.info("Testing completion")
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"prompt": "What is the weather like today?",
|
||||||
|
"agent_id": agent_id,
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(
|
||||||
|
f"{BASE_URL}/agent/completions", json=payload
|
||||||
|
)
|
||||||
|
logger.debug(f"Completion response: {response.json()}")
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
logger.success("Successfully got completion")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to get completion: {response.text}")
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_agent(agent_id):
|
||||||
|
"""Test deleting an agent."""
|
||||||
|
logger.info("Testing agent deletion")
|
||||||
|
|
||||||
|
response = requests.delete(f"{BASE_URL}/agent/{agent_id}")
|
||||||
|
logger.debug(f"Delete response: {response.json()}")
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
logger.success("Successfully deleted agent")
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to delete agent: {response.text}")
|
||||||
|
|
||||||
|
|
||||||
|
def run_tests():
|
||||||
|
"""Run all tests in sequence."""
|
||||||
|
logger.info("Starting API tests")
|
||||||
|
|
||||||
|
# Create agent and get ID
|
||||||
|
agent_id = test_create_agent()
|
||||||
|
if not agent_id:
|
||||||
|
logger.error("Cannot continue tests without agent ID")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Wait a bit for agent to be ready
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
# Run other tests
|
||||||
|
test_list_agents()
|
||||||
|
test_completion(agent_id)
|
||||||
|
test_delete_agent(agent_id)
|
||||||
|
|
||||||
|
logger.info("Tests completed")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run_tests()
|
@ -1,125 +0,0 @@
|
|||||||
import os
|
|
||||||
import json
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
from swarm_models import OpenAIFunctionCaller
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from typing import Any, List
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
|
|
||||||
class Flow(BaseModel):
|
|
||||||
id: str = Field(
|
|
||||||
description="A unique identifier for the flow. This should be a short, descriptive name that captures the main purpose of the flow. Use - to separate words and make it lowercase."
|
|
||||||
)
|
|
||||||
plan: str = Field(
|
|
||||||
description="The comprehensive plan detailing how the flow will accomplish the given task. This should include the high-level strategy, key milestones, and expected outcomes. The plan should clearly articulate what the overall goal is, what success looks like, and how progress will be measured throughout execution."
|
|
||||||
)
|
|
||||||
failures_prediction: str = Field(
|
|
||||||
description="A thorough analysis of potential failure modes and mitigation strategies. This should identify technical risks, edge cases, error conditions, and possible points of failure in the flow. For each identified risk, include specific preventive measures, fallback approaches, and recovery procedures to ensure robustness and reliability."
|
|
||||||
)
|
|
||||||
rationale: str = Field(
|
|
||||||
description="The detailed reasoning and justification for why this specific flow design is optimal for the given task. This should explain the key architectural decisions, tradeoffs considered, alternatives evaluated, and why this approach best satisfies the requirements. Include both technical and business factors that influenced the design."
|
|
||||||
)
|
|
||||||
flow: str = Field(
|
|
||||||
description="The precise execution flow defining how agents interact and coordinate. Use -> to indicate sequential processing where one agent must complete before the next begins (e.g. agent1 -> agent2 -> agent3). Use , to indicate parallel execution where multiple agents can run simultaneously (e.g. agent1 -> agent2, agent3, agent4). The flow should clearly show the dependencies and parallelization opportunities between agents. You must only use the agent names provided in the task description do not make up new agent names and do not use any other formatting."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class AgentRearrangeBuilder(BaseModel):
|
|
||||||
name: str = Field(
|
|
||||||
description="The name of the swarm. This should be a short, descriptive name that captures the main purpose of the flow."
|
|
||||||
)
|
|
||||||
description: str = Field(
|
|
||||||
description="A brief description of the swarm. This should be a concise summary of the main purpose of the swarm."
|
|
||||||
)
|
|
||||||
flows: List[Flow] = Field(
|
|
||||||
description="A list of flows that are optimal for the given task. Each flow should be a detailed plan, failure prediction, rationale, and execution flow."
|
|
||||||
)
|
|
||||||
swarm_flow: str = Field(
|
|
||||||
description="The flow defining how each team should communicate and coordinate with eachother.Use -> to indicate sequential processing where one id must complete before the next begins (e.g. team1 -> team2 -> team3). Use , to indicate parallel execution where multiple teams can run simultaneously (e.g. team1 -> team2, team3, team4). The flow should clearly show the dependencies and parallelization opportunities between teams. You must only use the team names provided in the id do not make up new team names and do not use any other formatting."
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# def flow_generator(task: str) -> Flow:
|
|
||||||
|
|
||||||
|
|
||||||
def setup_model(base_model: BaseModel = Flow):
|
|
||||||
model = OpenAIFunctionCaller(
|
|
||||||
system_prompt="""You are an expert flow architect specializing in designing multi-agent workflows. Your role is to analyze tasks and create optimal execution flows that coordinate multiple AI agents effectively.
|
|
||||||
|
|
||||||
When given a task, you will:
|
|
||||||
1. Develop a comprehensive plan breaking down the task into logical steps
|
|
||||||
2. Carefully consider potential failure modes and build in robust error handling
|
|
||||||
3. Provide clear rationale for your architectural decisions and agent coordination strategy
|
|
||||||
4. Design a precise flow showing both sequential dependencies and parallel execution opportunities
|
|
||||||
|
|
||||||
Your flows should maximize:
|
|
||||||
- Efficiency through smart parallelization
|
|
||||||
- Reliability through thorough error handling
|
|
||||||
- Clarity through well-structured agent interactions
|
|
||||||
- Effectiveness through strategic task decomposition
|
|
||||||
|
|
||||||
Format your flow using -> for sequential steps and , for parallel execution. Be specific about agent roles and interactions.
|
|
||||||
""",
|
|
||||||
base_model=base_model,
|
|
||||||
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
|
||||||
temperature=0.5,
|
|
||||||
)
|
|
||||||
return model
|
|
||||||
|
|
||||||
|
|
||||||
def generate_flow(task: str) -> Any:
|
|
||||||
model = setup_model()
|
|
||||||
flow = model.run(task)
|
|
||||||
print(json.dumps(flow, indent=4))
|
|
||||||
return flow
|
|
||||||
|
|
||||||
|
|
||||||
def generate_agent_rearrange(task: str) -> Any:
|
|
||||||
model = setup_model(base_model=AgentRearrangeBuilder)
|
|
||||||
flow = model.run(task)
|
|
||||||
print(json.dumps(flow, indent=4))
|
|
||||||
return flow
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# Basic patient diagnosis flow
|
|
||||||
# generate_flow("Diagnose a patient's symptoms and create a treatment plan. You have 3 agents to use: Diagnostician, Specialist, CareCoordinator")
|
|
||||||
|
|
||||||
# # Complex multi-condition case
|
|
||||||
# generate_flow("""Handle a complex patient case with multiple chronic conditions requiring ongoing care coordination.
|
|
||||||
# The patient has diabetes, heart disease, and chronic pain.
|
|
||||||
# Create a comprehensive diagnosis and treatment plan.
|
|
||||||
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
|
|
||||||
|
|
||||||
# # Emergency trauma case
|
|
||||||
# generate_flow("""Process an emergency trauma case requiring rapid diagnosis and immediate intervention.
|
|
||||||
# Patient presents with multiple injuries from a car accident.
|
|
||||||
# Develop immediate and long-term treatment plans.
|
|
||||||
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
|
|
||||||
|
|
||||||
# # Long-term care planning
|
|
||||||
# generate_flow("""Design a 6-month care plan for an elderly patient with declining cognitive function.
|
|
||||||
# Include regular assessments, specialist consultations, and family coordination.
|
|
||||||
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
|
|
||||||
|
|
||||||
# # Mental health assessment
|
|
||||||
# generate_flow("""Conduct a comprehensive mental health assessment and develop treatment strategy.
|
|
||||||
# Patient shows signs of depression and anxiety with possible underlying conditions.
|
|
||||||
# Create both immediate intervention and long-term support plans.
|
|
||||||
# You have 3 agents to use: Diagnostician, Specialist, CareCoordinator""")
|
|
||||||
|
|
||||||
generate_agent_rearrange(
|
|
||||||
"""Build a complete automated hedge fund system.
|
|
||||||
Design and implement a sophisticated trading strategy incorporating multiple asset classes,
|
|
||||||
risk management protocols, and automated execution systems.
|
|
||||||
The system should include:
|
|
||||||
- Market analysis and research capabilities
|
|
||||||
- Portfolio optimization and risk management
|
|
||||||
- Automated trade execution and settlement
|
|
||||||
- Compliance and regulatory monitoring
|
|
||||||
- Performance tracking and reporting
|
|
||||||
- Fund operations and administration
|
|
||||||
Create a comprehensive architecture that integrates all these components into a fully automated system."""
|
|
||||||
)
|
|
@ -1,162 +0,0 @@
|
|||||||
import os
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
from swarms import Agent
|
|
||||||
from swarm_models import OpenAIChat
|
|
||||||
from swarms.structs.swarm_router import SwarmRouter
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
# Get the OpenAI API key from the environment variable
|
|
||||||
api_key = os.getenv("GROQ_API_KEY")
|
|
||||||
|
|
||||||
# Model
|
|
||||||
model = OpenAIChat(
|
|
||||||
openai_api_base="https://api.groq.com/openai/v1",
|
|
||||||
openai_api_key=api_key,
|
|
||||||
model_name="llama-3.1-70b-versatile",
|
|
||||||
temperature=0.1,
|
|
||||||
)
|
|
||||||
# Define specialized system prompts for each agent
|
|
||||||
DATA_EXTRACTOR_PROMPT = """You are a highly specialized private equity agent focused on data extraction from various documents. Your expertise includes:
|
|
||||||
1. Extracting key financial metrics (revenue, EBITDA, growth rates, etc.) from financial statements and reports
|
|
||||||
2. Identifying and extracting important contract terms from legal documents
|
|
||||||
3. Pulling out relevant market data from industry reports and analyses
|
|
||||||
4. Extracting operational KPIs from management presentations and internal reports
|
|
||||||
5. Identifying and extracting key personnel information from organizational charts and bios
|
|
||||||
Provide accurate, structured data extracted from various document types to support investment analysis."""
|
|
||||||
|
|
||||||
SUMMARIZER_PROMPT = """You are an expert private equity agent specializing in summarizing complex documents. Your core competencies include:
|
|
||||||
1. Distilling lengthy financial reports into concise executive summaries
|
|
||||||
2. Summarizing legal documents, highlighting key terms and potential risks
|
|
||||||
3. Condensing industry reports to capture essential market trends and competitive dynamics
|
|
||||||
4. Summarizing management presentations to highlight key strategic initiatives and projections
|
|
||||||
5. Creating brief overviews of technical documents, emphasizing critical points for non-technical stakeholders
|
|
||||||
Deliver clear, concise summaries that capture the essence of various documents while highlighting information crucial for investment decisions."""
|
|
||||||
|
|
||||||
FINANCIAL_ANALYST_PROMPT = """You are a specialized private equity agent focused on financial analysis. Your key responsibilities include:
|
|
||||||
1. Analyzing historical financial statements to identify trends and potential issues
|
|
||||||
2. Evaluating the quality of earnings and potential adjustments to EBITDA
|
|
||||||
3. Assessing working capital requirements and cash flow dynamics
|
|
||||||
4. Analyzing capital structure and debt capacity
|
|
||||||
5. Evaluating financial projections and underlying assumptions
|
|
||||||
Provide thorough, insightful financial analysis to inform investment decisions and valuation."""
|
|
||||||
|
|
||||||
MARKET_ANALYST_PROMPT = """You are a highly skilled private equity agent specializing in market analysis. Your expertise covers:
|
|
||||||
1. Analyzing industry trends, growth drivers, and potential disruptors
|
|
||||||
2. Evaluating competitive landscape and market positioning
|
|
||||||
3. Assessing market size, segmentation, and growth potential
|
|
||||||
4. Analyzing customer dynamics, including concentration and loyalty
|
|
||||||
5. Identifying potential regulatory or macroeconomic impacts on the market
|
|
||||||
Deliver comprehensive market analysis to assess the attractiveness and risks of potential investments."""
|
|
||||||
|
|
||||||
OPERATIONAL_ANALYST_PROMPT = """You are an expert private equity agent focused on operational analysis. Your core competencies include:
|
|
||||||
1. Evaluating operational efficiency and identifying improvement opportunities
|
|
||||||
2. Analyzing supply chain and procurement processes
|
|
||||||
3. Assessing sales and marketing effectiveness
|
|
||||||
4. Evaluating IT systems and digital capabilities
|
|
||||||
5. Identifying potential synergies in merger or add-on acquisition scenarios
|
|
||||||
Provide detailed operational analysis to uncover value creation opportunities and potential risks."""
|
|
||||||
|
|
||||||
# Initialize specialized agents
|
|
||||||
data_extractor_agent = Agent(
|
|
||||||
agent_name="Data-Extractor",
|
|
||||||
system_prompt=DATA_EXTRACTOR_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="data_extractor_agent.json",
|
|
||||||
user_name="pe_firm",
|
|
||||||
retry_attempts=1,
|
|
||||||
context_length=200000,
|
|
||||||
output_type="string",
|
|
||||||
)
|
|
||||||
|
|
||||||
summarizer_agent = Agent(
|
|
||||||
agent_name="Document-Summarizer",
|
|
||||||
system_prompt=SUMMARIZER_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="summarizer_agent.json",
|
|
||||||
user_name="pe_firm",
|
|
||||||
retry_attempts=1,
|
|
||||||
context_length=200000,
|
|
||||||
output_type="string",
|
|
||||||
)
|
|
||||||
|
|
||||||
financial_analyst_agent = Agent(
|
|
||||||
agent_name="Financial-Analyst",
|
|
||||||
system_prompt=FINANCIAL_ANALYST_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="financial_analyst_agent.json",
|
|
||||||
user_name="pe_firm",
|
|
||||||
retry_attempts=1,
|
|
||||||
context_length=200000,
|
|
||||||
output_type="string",
|
|
||||||
)
|
|
||||||
|
|
||||||
market_analyst_agent = Agent(
|
|
||||||
agent_name="Market-Analyst",
|
|
||||||
system_prompt=MARKET_ANALYST_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="market_analyst_agent.json",
|
|
||||||
user_name="pe_firm",
|
|
||||||
retry_attempts=1,
|
|
||||||
context_length=200000,
|
|
||||||
output_type="string",
|
|
||||||
)
|
|
||||||
|
|
||||||
operational_analyst_agent = Agent(
|
|
||||||
agent_name="Operational-Analyst",
|
|
||||||
system_prompt=OPERATIONAL_ANALYST_PROMPT,
|
|
||||||
llm=model,
|
|
||||||
max_loops=1,
|
|
||||||
autosave=True,
|
|
||||||
verbose=True,
|
|
||||||
dynamic_temperature_enabled=True,
|
|
||||||
saved_state_path="operational_analyst_agent.json",
|
|
||||||
user_name="pe_firm",
|
|
||||||
retry_attempts=1,
|
|
||||||
context_length=200000,
|
|
||||||
output_type="string",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize the SwarmRouter
|
|
||||||
router = SwarmRouter(
|
|
||||||
name="pe-document-analysis-swarm",
|
|
||||||
description="Analyze documents for private equity due diligence and investment decision-making",
|
|
||||||
max_loops=1,
|
|
||||||
agents=[
|
|
||||||
data_extractor_agent,
|
|
||||||
summarizer_agent,
|
|
||||||
# financial_analyst_agent,
|
|
||||||
# market_analyst_agent,
|
|
||||||
# operational_analyst_agent,
|
|
||||||
],
|
|
||||||
swarm_type="auto", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
|
|
||||||
# auto_generate_prompts=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Example usage
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# Run a comprehensive private equity document analysis task
|
|
||||||
result = router.run(
|
|
||||||
"Where is the best place to find template term sheets for series A startups. Provide links and references"
|
|
||||||
)
|
|
||||||
print(result)
|
|
||||||
|
|
||||||
# Retrieve and print logs
|
|
||||||
for log in router.get_logs():
|
|
||||||
print(f"{log.timestamp} - {log.level}: {log.message}")
|
|
@ -0,0 +1,898 @@
|
|||||||
|
from enum import Enum
|
||||||
|
from typing import Union, Optional
|
||||||
|
import io
|
||||||
|
from PIL import Image
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import struct
|
||||||
|
|
||||||
|
|
||||||
|
from enum import auto
|
||||||
|
from typing import List, Dict, Tuple
|
||||||
|
import wave
|
||||||
|
from dataclasses import dataclass
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
from loguru import logger
|
||||||
|
from einops import rearrange
|
||||||
|
from torch import Tensor
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ModelConfig:
|
||||||
|
"""Configuration for the enhanced BytePredictor model."""
|
||||||
|
|
||||||
|
vocab_size: int = 256 # Standard byte range
|
||||||
|
hidden_size: int = 1024
|
||||||
|
num_layers: int = 12
|
||||||
|
num_key_value_heads: int = 8 # For multi-query attention
|
||||||
|
num_query_heads: int = 32 # More query heads than kv heads
|
||||||
|
dropout: float = 0.1
|
||||||
|
max_sequence_length: int = 8192
|
||||||
|
rope_theta: float = 10000.0
|
||||||
|
layer_norm_eps: float = 1e-5
|
||||||
|
vocab_parallel: bool = False
|
||||||
|
qk_norm: bool = True
|
||||||
|
qk_norm_scale: float = None
|
||||||
|
attention_bias: bool = False
|
||||||
|
|
||||||
|
|
||||||
|
class MultiQueryAttention(nn.Module):
|
||||||
|
"""Fixed Multi-Query Attention implementation."""
|
||||||
|
|
||||||
|
def __init__(self, config: ModelConfig):
|
||||||
|
super().__init__()
|
||||||
|
self.hidden_size = config.hidden_size
|
||||||
|
self.num_query_heads = config.num_query_heads
|
||||||
|
self.num_key_value_heads = config.num_key_value_heads
|
||||||
|
self.head_dim = config.hidden_size // config.num_query_heads
|
||||||
|
self.qk_scale = config.qk_norm_scale or (self.head_dim**-0.5)
|
||||||
|
|
||||||
|
self.q_proj = nn.Linear(
|
||||||
|
config.hidden_size, config.num_query_heads * self.head_dim
|
||||||
|
)
|
||||||
|
self.k_proj = nn.Linear(
|
||||||
|
config.hidden_size,
|
||||||
|
config.num_key_value_heads * self.head_dim,
|
||||||
|
)
|
||||||
|
self.v_proj = nn.Linear(
|
||||||
|
config.hidden_size,
|
||||||
|
config.num_key_value_heads * self.head_dim,
|
||||||
|
)
|
||||||
|
self.o_proj = nn.Linear(
|
||||||
|
config.num_query_heads * self.head_dim, config.hidden_size
|
||||||
|
)
|
||||||
|
|
||||||
|
self.qk_norm = config.qk_norm
|
||||||
|
if self.qk_norm:
|
||||||
|
self.q_norm = nn.LayerNorm(self.head_dim)
|
||||||
|
self.k_norm = nn.LayerNorm(self.head_dim)
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
hidden_states: torch.Tensor,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
) -> torch.Tensor:
|
||||||
|
batch_size, seq_length, _ = hidden_states.shape
|
||||||
|
|
||||||
|
# Project and reshape
|
||||||
|
q = self.q_proj(hidden_states)
|
||||||
|
k = self.k_proj(hidden_states)
|
||||||
|
v = self.v_proj(hidden_states)
|
||||||
|
|
||||||
|
# Reshape to [seq_len, batch, heads, head_dim]
|
||||||
|
q = q.view(
|
||||||
|
batch_size,
|
||||||
|
seq_length,
|
||||||
|
self.num_query_heads,
|
||||||
|
self.head_dim,
|
||||||
|
).permute(1, 0, 2, 3)
|
||||||
|
k = k.view(
|
||||||
|
batch_size,
|
||||||
|
seq_length,
|
||||||
|
self.num_key_value_heads,
|
||||||
|
self.head_dim,
|
||||||
|
).permute(1, 0, 2, 3)
|
||||||
|
v = v.view(
|
||||||
|
batch_size,
|
||||||
|
seq_length,
|
||||||
|
self.num_key_value_heads,
|
||||||
|
self.head_dim,
|
||||||
|
).permute(1, 0, 2, 3)
|
||||||
|
|
||||||
|
# Apply rotary embeddings
|
||||||
|
# q, k = self.rotary(q, k, seq_length)
|
||||||
|
|
||||||
|
# Apply QK normalization if enabled
|
||||||
|
if self.qk_norm:
|
||||||
|
q = self.q_norm(q)
|
||||||
|
k = self.k_norm(k)
|
||||||
|
|
||||||
|
# Handle MQA head expansion
|
||||||
|
if self.num_key_value_heads != self.num_query_heads:
|
||||||
|
k = k.repeat_interleave(
|
||||||
|
self.num_query_heads // self.num_key_value_heads,
|
||||||
|
dim=2,
|
||||||
|
)
|
||||||
|
v = v.repeat_interleave(
|
||||||
|
self.num_query_heads // self.num_key_value_heads,
|
||||||
|
dim=2,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Compute attention
|
||||||
|
# Reshape for matmul: [batch, heads, seq_length, head_dim]
|
||||||
|
q = q.permute(1, 2, 0, 3)
|
||||||
|
k = k.permute(1, 2, 0, 3)
|
||||||
|
v = v.permute(1, 2, 0, 3)
|
||||||
|
|
||||||
|
attn_weights = (
|
||||||
|
torch.matmul(q, k.transpose(-2, -1)) * self.qk_scale
|
||||||
|
)
|
||||||
|
|
||||||
|
if attention_mask is not None:
|
||||||
|
attn_weights = attn_weights + attention_mask
|
||||||
|
|
||||||
|
attn_weights = F.softmax(attn_weights, dim=-1)
|
||||||
|
|
||||||
|
output = torch.matmul(attn_weights, v)
|
||||||
|
|
||||||
|
# Reshape back to [batch, seq_length, hidden_size]
|
||||||
|
output = (
|
||||||
|
output.transpose(1, 2)
|
||||||
|
.contiguous()
|
||||||
|
.view(batch_size, seq_length, -1)
|
||||||
|
)
|
||||||
|
output = self.o_proj(output)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
class EnhancedBytePredictor(nn.Module):
|
||||||
|
"""Enhanced byte prediction model with state-of-the-art features."""
|
||||||
|
|
||||||
|
def __init__(self, config: ModelConfig):
|
||||||
|
super().__init__()
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
# Token embeddings
|
||||||
|
self.tok_embeddings = nn.Embedding(
|
||||||
|
config.vocab_size, config.hidden_size
|
||||||
|
)
|
||||||
|
|
||||||
|
# Transformer layers
|
||||||
|
self.layers = nn.ModuleList(
|
||||||
|
[
|
||||||
|
nn.ModuleDict(
|
||||||
|
{
|
||||||
|
"attention": MultiQueryAttention(config),
|
||||||
|
"attention_norm": nn.LayerNorm(
|
||||||
|
config.hidden_size,
|
||||||
|
eps=config.layer_norm_eps,
|
||||||
|
),
|
||||||
|
"feed_forward": nn.Sequential(
|
||||||
|
nn.Linear(
|
||||||
|
config.hidden_size,
|
||||||
|
4 * config.hidden_size,
|
||||||
|
),
|
||||||
|
nn.GELU(),
|
||||||
|
nn.Linear(
|
||||||
|
4 * config.hidden_size,
|
||||||
|
config.hidden_size,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
"feed_forward_norm": nn.LayerNorm(
|
||||||
|
config.hidden_size,
|
||||||
|
eps=config.layer_norm_eps,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
for _ in range(config.num_layers)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.norm = nn.LayerNorm(
|
||||||
|
config.hidden_size, eps=config.layer_norm_eps
|
||||||
|
)
|
||||||
|
self.output = nn.Linear(
|
||||||
|
config.hidden_size, config.vocab_size, bias=False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize weights
|
||||||
|
self.apply(self._init_weights)
|
||||||
|
|
||||||
|
def _init_weights(self, module: nn.Module) -> None:
|
||||||
|
"""Initialize weights with scaled normal distribution."""
|
||||||
|
if isinstance(module, nn.Linear):
|
||||||
|
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
||||||
|
if module.bias is not None:
|
||||||
|
torch.nn.init.zeros_(module.bias)
|
||||||
|
elif isinstance(module, nn.Embedding):
|
||||||
|
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
input_ids: torch.Tensor,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
) -> torch.Tensor:
|
||||||
|
"""
|
||||||
|
Forward pass of the model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_ids: Tensor of shape (batch_size, sequence_length)
|
||||||
|
attention_mask: Optional attention mask
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tensor of logits with shape (batch_size, sequence_length, vocab_size)
|
||||||
|
"""
|
||||||
|
hidden_states = self.tok_embeddings(input_ids)
|
||||||
|
|
||||||
|
# Create causal mask if needed
|
||||||
|
if attention_mask is None:
|
||||||
|
attention_mask = torch.triu(
|
||||||
|
torch.ones(
|
||||||
|
(input_ids.size(1), input_ids.size(1)),
|
||||||
|
device=input_ids.device,
|
||||||
|
dtype=torch.bool,
|
||||||
|
),
|
||||||
|
diagonal=1,
|
||||||
|
)
|
||||||
|
attention_mask = attention_mask.masked_fill(
|
||||||
|
attention_mask == 1, float("-inf")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply transformer layers
|
||||||
|
for layer in self.layers:
|
||||||
|
# Attention block
|
||||||
|
hidden_states = hidden_states + layer["attention"](
|
||||||
|
layer["attention_norm"](hidden_states), attention_mask
|
||||||
|
)
|
||||||
|
|
||||||
|
# Feed-forward block
|
||||||
|
hidden_states = hidden_states + layer["feed_forward"](
|
||||||
|
layer["feed_forward_norm"](hidden_states)
|
||||||
|
)
|
||||||
|
|
||||||
|
hidden_states = self.norm(hidden_states)
|
||||||
|
logits = self.output(hidden_states)
|
||||||
|
|
||||||
|
return logits
|
||||||
|
|
||||||
|
def compute_loss(
|
||||||
|
self,
|
||||||
|
input_ids: torch.Tensor,
|
||||||
|
target_ids: torch.Tensor,
|
||||||
|
attention_mask: Optional[torch.Tensor] = None,
|
||||||
|
) -> torch.Tensor:
|
||||||
|
"""
|
||||||
|
Compute cross entropy loss.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_ids: Input token ids
|
||||||
|
target_ids: Target token ids
|
||||||
|
attention_mask: Optional attention mask
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Loss value
|
||||||
|
"""
|
||||||
|
logits = self(input_ids, attention_mask)
|
||||||
|
loss = F.cross_entropy(
|
||||||
|
rearrange(logits, "b s v -> (b s) v"),
|
||||||
|
rearrange(target_ids, "b s -> (b s)"),
|
||||||
|
)
|
||||||
|
return loss
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def _generate(
|
||||||
|
self,
|
||||||
|
input_ids: torch.Tensor,
|
||||||
|
max_new_tokens: int = 100,
|
||||||
|
temperature: float = 1.0,
|
||||||
|
top_k: Optional[int] = None,
|
||||||
|
top_p: Optional[float] = None,
|
||||||
|
repetition_penalty: float = 1.0,
|
||||||
|
) -> torch.Tensor:
|
||||||
|
"""
|
||||||
|
Generate new tokens autoregressively.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_ids: Starting sequence
|
||||||
|
max_new_tokens: Number of tokens to generate
|
||||||
|
temperature: Sampling temperature
|
||||||
|
top_k: K for top-k sampling
|
||||||
|
top_p: P for nucleus sampling
|
||||||
|
repetition_penalty: Penalty for repeating tokens
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Generated sequence
|
||||||
|
"""
|
||||||
|
batch_size, seq_length = input_ids.shape
|
||||||
|
generated = input_ids.clone()
|
||||||
|
|
||||||
|
for _ in range(max_new_tokens):
|
||||||
|
if generated.size(1) >= self.config.max_sequence_length:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Forward pass
|
||||||
|
logits = self(generated)[:, -1, :]
|
||||||
|
|
||||||
|
# Apply temperature
|
||||||
|
logits = logits / temperature
|
||||||
|
|
||||||
|
# Apply repetition penalty
|
||||||
|
if repetition_penalty != 1.0:
|
||||||
|
for i in range(batch_size):
|
||||||
|
for token_id in set(generated[i].tolist()):
|
||||||
|
logits[i, token_id] /= repetition_penalty
|
||||||
|
|
||||||
|
# Apply top-k sampling
|
||||||
|
if top_k is not None:
|
||||||
|
indices_to_remove = (
|
||||||
|
logits
|
||||||
|
< torch.topk(logits, top_k)[0][..., -1, None]
|
||||||
|
)
|
||||||
|
logits[indices_to_remove] = float("-inf")
|
||||||
|
|
||||||
|
# Apply nucleus (top-p) sampling
|
||||||
|
if top_p is not None:
|
||||||
|
sorted_logits, sorted_indices = torch.sort(
|
||||||
|
logits, descending=True
|
||||||
|
)
|
||||||
|
cumulative_probs = torch.cumsum(
|
||||||
|
F.softmax(sorted_logits, dim=-1), dim=-1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove tokens with cumulative probability above the threshold
|
||||||
|
sorted_indices_to_remove = cumulative_probs > top_p
|
||||||
|
sorted_indices_to_remove[..., 1:] = (
|
||||||
|
sorted_indices_to_remove[..., :-1].clone()
|
||||||
|
)
|
||||||
|
sorted_indices_to_remove[..., 0] = 0
|
||||||
|
|
||||||
|
indices_to_remove = torch.zeros_like(
|
||||||
|
logits, dtype=torch.bool
|
||||||
|
)
|
||||||
|
indices_to_remove.scatter_(
|
||||||
|
1, sorted_indices, sorted_indices_to_remove
|
||||||
|
)
|
||||||
|
logits[indices_to_remove] = float("-inf")
|
||||||
|
|
||||||
|
# Sample next token
|
||||||
|
probs = F.softmax(logits, dim=-1)
|
||||||
|
next_token = torch.multinomial(probs, num_samples=1)
|
||||||
|
|
||||||
|
# Append to sequence
|
||||||
|
generated = torch.cat([generated, next_token], dim=1)
|
||||||
|
|
||||||
|
return generated
|
||||||
|
|
||||||
|
def generate(
|
||||||
|
self,
|
||||||
|
input_ids: torch.Tensor,
|
||||||
|
max_new_tokens: int = 100,
|
||||||
|
temperature: float = 1.0,
|
||||||
|
top_k: Optional[int] = None,
|
||||||
|
top_p: Optional[float] = None,
|
||||||
|
repetition_penalty: float = 1.0,
|
||||||
|
):
|
||||||
|
tensor_data = self._generate(
|
||||||
|
input_ids=input_ids,
|
||||||
|
max_new_tokens=max_new_tokens,
|
||||||
|
temperature=temperature,
|
||||||
|
top_k=top_k,
|
||||||
|
top_p=top_p,
|
||||||
|
repetition_penalty=repetition_penalty,
|
||||||
|
)
|
||||||
|
|
||||||
|
return tensor_to_data(tensor_data)
|
||||||
|
|
||||||
|
|
||||||
|
# import torch
|
||||||
|
# from typing import Optional
|
||||||
|
|
||||||
|
|
||||||
|
class DataType(Enum):
|
||||||
|
TEXT = "text"
|
||||||
|
IMAGE = "image"
|
||||||
|
AUDIO = "audio"
|
||||||
|
VIDEO = "video"
|
||||||
|
BINARY = "binary"
|
||||||
|
|
||||||
|
|
||||||
|
class ByteDetokenizer:
|
||||||
|
"""Utility class for converting model output bytes back to original data formats."""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def tensor_to_bytes(tensor: torch.Tensor) -> bytes:
|
||||||
|
"""Convert model output tensor to bytes."""
|
||||||
|
# Convert logits/probabilities to byte values
|
||||||
|
if tensor.dim() > 1:
|
||||||
|
# If we have logits, convert to byte indices
|
||||||
|
byte_indices = tensor.argmax(dim=-1)
|
||||||
|
else:
|
||||||
|
byte_indices = tensor
|
||||||
|
|
||||||
|
# Convert to Python bytes
|
||||||
|
return bytes(
|
||||||
|
byte_indices.cpu().numpy().astype(np.uint8).tolist()
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def decode_text(byte_sequence: bytes) -> str:
|
||||||
|
"""Convert bytes to text."""
|
||||||
|
try:
|
||||||
|
return byte_sequence.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
# Try with error handling
|
||||||
|
return byte_sequence.decode("utf-8", errors="replace")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def decode_image(
|
||||||
|
byte_sequence: bytes,
|
||||||
|
mode: str = "RGB",
|
||||||
|
size: Optional[tuple] = None,
|
||||||
|
) -> Image.Image:
|
||||||
|
"""Convert bytes to image.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
byte_sequence: Raw image bytes
|
||||||
|
mode: Image mode (RGB, RGBA, L, etc.)
|
||||||
|
size: Optional tuple of (width, height)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Try to load as-is first (for standard image formats)
|
||||||
|
img = Image.open(io.BytesIO(byte_sequence))
|
||||||
|
if size:
|
||||||
|
img = img.resize(size)
|
||||||
|
return img
|
||||||
|
except:
|
||||||
|
# If failed, assume raw pixel data
|
||||||
|
if not size:
|
||||||
|
# Try to determine size from byte count
|
||||||
|
pixel_count = len(byte_sequence) // len(mode)
|
||||||
|
size = (
|
||||||
|
int(np.sqrt(pixel_count)),
|
||||||
|
int(np.sqrt(pixel_count)),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert raw bytes to pixel array
|
||||||
|
pixels = np.frombuffer(byte_sequence, dtype=np.uint8)
|
||||||
|
pixels = pixels.reshape((*size, len(mode)))
|
||||||
|
|
||||||
|
return Image.fromarray(pixels, mode=mode)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def decode_audio(
|
||||||
|
byte_sequence: bytes,
|
||||||
|
sample_rate: int = 44100,
|
||||||
|
channels: int = 2,
|
||||||
|
sample_width: int = 2,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""Convert bytes to audio samples.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
byte_sequence: Raw audio bytes
|
||||||
|
sample_rate: Audio sample rate in Hz
|
||||||
|
channels: Number of audio channels
|
||||||
|
sample_width: Bytes per sample (1, 2, or 4)
|
||||||
|
"""
|
||||||
|
# Determine format string based on sample width
|
||||||
|
format_str = {
|
||||||
|
1: "b", # signed char
|
||||||
|
2: "h", # short
|
||||||
|
4: "i", # int
|
||||||
|
}[sample_width]
|
||||||
|
|
||||||
|
# Unpack bytes to samples
|
||||||
|
sample_count = len(byte_sequence) // (channels * sample_width)
|
||||||
|
samples = struct.unpack(
|
||||||
|
f"<{sample_count * channels}{format_str}", byte_sequence
|
||||||
|
)
|
||||||
|
|
||||||
|
# Reshape to [samples, channels]
|
||||||
|
return np.array(samples).reshape(-1, channels)
|
||||||
|
|
||||||
|
def decode_data(
|
||||||
|
self,
|
||||||
|
model_output: Union[torch.Tensor, bytes],
|
||||||
|
data_type: DataType,
|
||||||
|
**kwargs,
|
||||||
|
) -> Union[str, Image.Image, np.ndarray, bytes]:
|
||||||
|
"""Main method to decode model output to desired format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_output: Either tensor from model or raw bytes
|
||||||
|
data_type: Type of data to decode to
|
||||||
|
**kwargs: Additional parameters for specific decoders
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Decoded data in specified format
|
||||||
|
"""
|
||||||
|
# Convert tensor to bytes if needed
|
||||||
|
if isinstance(model_output, torch.Tensor):
|
||||||
|
byte_sequence = self.tensor_to_bytes(model_output)
|
||||||
|
else:
|
||||||
|
byte_sequence = model_output
|
||||||
|
|
||||||
|
# Decode based on type
|
||||||
|
if data_type == DataType.TEXT:
|
||||||
|
return self.decode_text(byte_sequence)
|
||||||
|
elif data_type == DataType.IMAGE:
|
||||||
|
return self.decode_image(byte_sequence, **kwargs)
|
||||||
|
elif data_type == DataType.AUDIO:
|
||||||
|
return self.decode_audio(byte_sequence, **kwargs)
|
||||||
|
elif data_type == DataType.VIDEO:
|
||||||
|
raise NotImplementedError(
|
||||||
|
"Video decoding not yet implemented"
|
||||||
|
)
|
||||||
|
else: # BINARY
|
||||||
|
return byte_sequence
|
||||||
|
|
||||||
|
|
||||||
|
# Usage example
|
||||||
|
|
||||||
|
|
||||||
|
class Modality(Enum):
|
||||||
|
TEXT = auto()
|
||||||
|
IMAGE = auto()
|
||||||
|
AUDIO = auto()
|
||||||
|
VIDEO = auto()
|
||||||
|
BINARY = auto()
|
||||||
|
MULTIMODAL = auto()
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ModalityInfo:
|
||||||
|
"""Information about detected modality."""
|
||||||
|
|
||||||
|
modality: Modality
|
||||||
|
confidence: float
|
||||||
|
metadata: Dict[str, any]
|
||||||
|
sub_modalities: Optional[List["ModalityInfo"]] = None
|
||||||
|
|
||||||
|
|
||||||
|
class ModalityDetector:
|
||||||
|
"""Detects data modalities from byte sequences."""
|
||||||
|
|
||||||
|
# Common file signatures (magic numbers)
|
||||||
|
SIGNATURES = {
|
||||||
|
# Images
|
||||||
|
b"\xFF\xD8\xFF": "JPEG",
|
||||||
|
b"\x89PNG\r\n\x1a\n": "PNG",
|
||||||
|
b"GIF87a": "GIF",
|
||||||
|
b"GIF89a": "GIF",
|
||||||
|
b"RIFF": "WEBP",
|
||||||
|
# Audio
|
||||||
|
b"RIFF....WAVE": "WAV",
|
||||||
|
b"ID3": "MP3",
|
||||||
|
b"\xFF\xFB": "MP3",
|
||||||
|
b"OggS": "OGG",
|
||||||
|
# Video
|
||||||
|
b"\x00\x00\x00\x18ftypmp42": "MP4",
|
||||||
|
b"\x00\x00\x00\x1Cftypav01": "MP4",
|
||||||
|
b"\x1A\x45\xDF\xA3": "WEBM",
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.magic = magic.Magic(mime=True)
|
||||||
|
|
||||||
|
def _check_text_probability(self, data: bytes) -> float:
|
||||||
|
"""Estimate probability that data is text."""
|
||||||
|
# Check if data is valid UTF-8
|
||||||
|
try:
|
||||||
|
data.decode("utf-8")
|
||||||
|
# Count printable ASCII characters
|
||||||
|
printable = sum(1 for b in data if 32 <= b <= 126)
|
||||||
|
return printable / len(data)
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
def _check_image_validity(self, data: bytes) -> Tuple[bool, Dict]:
|
||||||
|
"""Check if data is a valid image and extract metadata."""
|
||||||
|
try:
|
||||||
|
with io.BytesIO(data) as bio:
|
||||||
|
img = Image.open(bio)
|
||||||
|
return True, {
|
||||||
|
"format": img.format,
|
||||||
|
"size": img.size,
|
||||||
|
"mode": img.mode,
|
||||||
|
}
|
||||||
|
except:
|
||||||
|
return False, {}
|
||||||
|
|
||||||
|
def _check_audio_validity(self, data: bytes) -> Tuple[bool, Dict]:
|
||||||
|
"""Check if data is valid audio and extract metadata."""
|
||||||
|
try:
|
||||||
|
with io.BytesIO(data) as bio:
|
||||||
|
# Try to parse as WAV
|
||||||
|
with wave.open(bio) as wav:
|
||||||
|
return True, {
|
||||||
|
"channels": wav.getnchannels(),
|
||||||
|
"sample_width": wav.getsampwidth(),
|
||||||
|
"framerate": wav.getframerate(),
|
||||||
|
"frames": wav.getnframes(),
|
||||||
|
}
|
||||||
|
except:
|
||||||
|
# Check for other audio signatures
|
||||||
|
for sig in [b"ID3", b"\xFF\xFB", b"OggS"]:
|
||||||
|
if data.startswith(sig):
|
||||||
|
return True, {"format": "compressed_audio"}
|
||||||
|
return False, {}
|
||||||
|
|
||||||
|
def _detect_boundaries(
|
||||||
|
self, data: bytes
|
||||||
|
) -> List[Tuple[int, int, Modality]]:
|
||||||
|
"""Detect boundaries between different modalities in the data."""
|
||||||
|
boundaries = []
|
||||||
|
current_pos = 0
|
||||||
|
|
||||||
|
while current_pos < len(data):
|
||||||
|
# Look for known signatures
|
||||||
|
for sig, format_type in self.SIGNATURES.items():
|
||||||
|
if data[current_pos:].startswith(sig):
|
||||||
|
# Found a signature, determine its length
|
||||||
|
if format_type in ["JPEG", "PNG", "GIF"]:
|
||||||
|
# Find image end
|
||||||
|
try:
|
||||||
|
with io.BytesIO(
|
||||||
|
data[current_pos:]
|
||||||
|
) as bio:
|
||||||
|
img = Image.open(bio)
|
||||||
|
img.verify()
|
||||||
|
size = bio.tell()
|
||||||
|
boundaries.append(
|
||||||
|
(
|
||||||
|
current_pos,
|
||||||
|
current_pos + size,
|
||||||
|
Modality.IMAGE,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
current_pos += size
|
||||||
|
continue
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Check for text sections
|
||||||
|
text_prob = self._check_text_probability(
|
||||||
|
data[current_pos : current_pos + 1024]
|
||||||
|
)
|
||||||
|
if text_prob > 0.8:
|
||||||
|
# Look for end of text section
|
||||||
|
end_pos = current_pos + 1
|
||||||
|
while end_pos < len(data):
|
||||||
|
if (
|
||||||
|
self._check_text_probability(
|
||||||
|
data[end_pos : end_pos + 32]
|
||||||
|
)
|
||||||
|
< 0.5
|
||||||
|
):
|
||||||
|
break
|
||||||
|
end_pos += 1
|
||||||
|
boundaries.append(
|
||||||
|
(current_pos, end_pos, Modality.TEXT)
|
||||||
|
)
|
||||||
|
current_pos = end_pos
|
||||||
|
continue
|
||||||
|
|
||||||
|
current_pos += 1
|
||||||
|
|
||||||
|
return boundaries
|
||||||
|
|
||||||
|
def detect_modality(self, data: bytes) -> ModalityInfo:
|
||||||
|
"""Detect modality of byte sequence."""
|
||||||
|
# First check for single modality
|
||||||
|
mime_type = self.magic.from_buffer(data)
|
||||||
|
|
||||||
|
# Check text
|
||||||
|
text_prob = self._check_text_probability(data)
|
||||||
|
if text_prob > 0.9:
|
||||||
|
return ModalityInfo(
|
||||||
|
modality=Modality.TEXT,
|
||||||
|
confidence=text_prob,
|
||||||
|
metadata={"mime_type": mime_type},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check image
|
||||||
|
is_image, image_meta = self._check_image_validity(data)
|
||||||
|
if is_image:
|
||||||
|
return ModalityInfo(
|
||||||
|
modality=Modality.IMAGE,
|
||||||
|
confidence=1.0,
|
||||||
|
metadata={**image_meta, "mime_type": mime_type},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check audio
|
||||||
|
is_audio, audio_meta = self._check_audio_validity(data)
|
||||||
|
if is_audio:
|
||||||
|
return ModalityInfo(
|
||||||
|
modality=Modality.AUDIO,
|
||||||
|
confidence=1.0,
|
||||||
|
metadata={**audio_meta, "mime_type": mime_type},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check for multimodal content
|
||||||
|
boundaries = self._detect_boundaries(data)
|
||||||
|
if len(boundaries) > 1:
|
||||||
|
sub_modalities = []
|
||||||
|
for start, end, modality in boundaries:
|
||||||
|
chunk_data = data[start:end]
|
||||||
|
sub_info = self.detect_modality(chunk_data)
|
||||||
|
if sub_info.modality != Modality.BINARY:
|
||||||
|
sub_modalities.append(sub_info)
|
||||||
|
|
||||||
|
if sub_modalities:
|
||||||
|
return ModalityInfo(
|
||||||
|
modality=Modality.MULTIMODAL,
|
||||||
|
confidence=0.8,
|
||||||
|
metadata={"mime_type": "multipart/mixed"},
|
||||||
|
sub_modalities=sub_modalities,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Default to binary
|
||||||
|
return ModalityInfo(
|
||||||
|
modality=Modality.BINARY,
|
||||||
|
confidence=0.5,
|
||||||
|
metadata={"mime_type": mime_type},
|
||||||
|
)
|
||||||
|
|
||||||
|
def split_modalities(
|
||||||
|
self, data: bytes
|
||||||
|
) -> List[Tuple[Modality, bytes, Dict]]:
|
||||||
|
"""Split multimodal data into separate modalities."""
|
||||||
|
boundaries = self._detect_boundaries(data)
|
||||||
|
result = []
|
||||||
|
|
||||||
|
for start, end, modality in boundaries:
|
||||||
|
chunk = data[start:end]
|
||||||
|
info = self.detect_modality(chunk)
|
||||||
|
result.append((modality, chunk, info.metadata))
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
class AutoDetectBytesDecoder:
|
||||||
|
"""Decoder that automatically detects and decodes different modalities."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.detector = ModalityDetector()
|
||||||
|
self.text_decoder = ByteDetokenizer() # From previous example
|
||||||
|
|
||||||
|
def decode(
|
||||||
|
self, data: bytes
|
||||||
|
) -> Union[str, Image.Image, np.ndarray, List[any]]:
|
||||||
|
"""Automatically detect and decode byte sequence."""
|
||||||
|
info = self.detector.detect_modality(data)
|
||||||
|
|
||||||
|
if info.modality == Modality.MULTIMODAL:
|
||||||
|
# Handle multimodal content
|
||||||
|
parts = self.detector.split_modalities(data)
|
||||||
|
return [
|
||||||
|
self.decode(chunk) for modality, chunk, _ in parts
|
||||||
|
]
|
||||||
|
|
||||||
|
if info.modality == Modality.TEXT:
|
||||||
|
return self.text_decoder.decode_text(data)
|
||||||
|
elif info.modality == Modality.IMAGE:
|
||||||
|
return self.text_decoder.decode_image(data)
|
||||||
|
elif info.modality == Modality.AUDIO:
|
||||||
|
return self.text_decoder.decode_audio(data)
|
||||||
|
else:
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# # Example usage
|
||||||
|
# def demo_auto_detection():
|
||||||
|
# """Demonstrate auto modality detection."""
|
||||||
|
# # Create mixed content
|
||||||
|
# text = "Hello, World!".encode('utf-8')
|
||||||
|
|
||||||
|
# # Create a small test image
|
||||||
|
# img = Image.new('RGB', (100, 100), color='red')
|
||||||
|
# img_bytes = io.BytesIO()
|
||||||
|
# img.save(img_bytes, format='PNG')
|
||||||
|
|
||||||
|
# # Combine into multimodal content
|
||||||
|
# mixed_content = text + img_bytes.getvalue()
|
||||||
|
|
||||||
|
# # Initialize decoder
|
||||||
|
# decoder = AutoDetectBytesDecoder()
|
||||||
|
|
||||||
|
# # Decode
|
||||||
|
# result = decoder.decode(mixed_content)
|
||||||
|
|
||||||
|
# if isinstance(result, list):
|
||||||
|
# print("Detected multimodal content:")
|
||||||
|
# for i, part in enumerate(result):
|
||||||
|
# print(f"Part {i+1}: {type(part)}")
|
||||||
|
|
||||||
|
# if __name__ == "__main__":
|
||||||
|
# demo_auto_detection()
|
||||||
|
|
||||||
|
|
||||||
|
def tensor_to_data(tensor: Tensor):
|
||||||
|
byte_sequence = ByteDetokenizer.tensor_to_bytes(tensor)
|
||||||
|
|
||||||
|
# Initialize auto-detector
|
||||||
|
decoder = AutoDetectBytesDecoder()
|
||||||
|
|
||||||
|
# Decode with automatic detection
|
||||||
|
result = decoder.decode(byte_sequence)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def demo_byte_predictor():
|
||||||
|
"""Demo with smaller dimensions to test."""
|
||||||
|
# Initialize model configuration with adjusted dimensions
|
||||||
|
config = ModelConfig(
|
||||||
|
vocab_size=256,
|
||||||
|
hidden_size=128, # Smaller for testing
|
||||||
|
num_layers=2, # Fewer layers for testing
|
||||||
|
num_key_value_heads=2,
|
||||||
|
num_query_heads=4,
|
||||||
|
dropout=0.1,
|
||||||
|
max_sequence_length=1024,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize model
|
||||||
|
model = EnhancedBytePredictor(config)
|
||||||
|
logger.info("Model initialized")
|
||||||
|
|
||||||
|
# Move to GPU if available
|
||||||
|
device = torch.device(
|
||||||
|
"cuda" if torch.cuda.is_available() else "cpu"
|
||||||
|
)
|
||||||
|
model = model.to(device)
|
||||||
|
logger.info(f"Using device: {device}")
|
||||||
|
|
||||||
|
# Create sample input data
|
||||||
|
batch_size = 2
|
||||||
|
seq_length = 16 # Shorter sequence for testing
|
||||||
|
input_ids = torch.randint(
|
||||||
|
0, config.vocab_size, (batch_size, seq_length), device=device
|
||||||
|
)
|
||||||
|
logger.info(f"Created input tensor of shape: {input_ids.shape}")
|
||||||
|
|
||||||
|
# Test forward pass
|
||||||
|
try:
|
||||||
|
logits = model(input_ids)
|
||||||
|
logger.info(
|
||||||
|
f"Forward pass successful! Output shape: {logits.shape}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test loss computation
|
||||||
|
target_ids = torch.randint(
|
||||||
|
0,
|
||||||
|
config.vocab_size,
|
||||||
|
(batch_size, seq_length),
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
loss = model.compute_loss(input_ids, target_ids)
|
||||||
|
logger.info(
|
||||||
|
f"Loss computation successful! Loss value: {loss.item():.4f}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Test generation
|
||||||
|
prompt = torch.randint(
|
||||||
|
0,
|
||||||
|
config.vocab_size,
|
||||||
|
(1, 4), # Very short prompt for testing
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
generated = model.generate(
|
||||||
|
prompt, max_new_tokens=8, temperature=0.8, top_k=50
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Generation successful! Generated shape: {generated.shape}"
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during execution: {str(e)}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Set up logging
|
||||||
|
# logger.remove() # Remove default handler
|
||||||
|
# logger.add(sys.stderr, format="<green>{time:HH:mm:ss}</green> | {level} | {message}")
|
||||||
|
|
||||||
|
demo_byte_predictor()
|
@ -1,18 +1,27 @@
|
|||||||
|
/* * Further customization as needed */ */
|
||||||
/* Further customization as needed */
|
|
||||||
|
|
||||||
|
|
||||||
.md-typeset__table {
|
.md-typeset__table {
|
||||||
min-width: 100%;
|
min-width: 100%;
|
||||||
}
|
}
|
||||||
|
|
||||||
.md-typeset table:not([class]) {
|
.md-typeset table:not([class]) {
|
||||||
display: table;
|
display: table;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Dark mode
|
||||||
:root {
|
[data-md-color-scheme="slate"] {
|
||||||
--md-primary-fg-color: #EE0F0F;
|
--md-default-bg-color: black;
|
||||||
--md-primary-fg-color--light: #ECB7B7;
|
}
|
||||||
--md-primary-fg-color--dark: #90030C;
|
|
||||||
} */
|
.header__ellipsis {
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
|
|
||||||
|
.md-copyright__highlight {
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.md-header.md-header--shadow {
|
||||||
|
color: black;
|
||||||
|
} */
|
@ -0,0 +1,59 @@
|
|||||||
|
# Swarms 6.0.0 - Performance & Reliability Update 🚀
|
||||||
|
|
||||||
|
We're excited to announce the release of Swarms 6.0.0, bringing significant improvements to performance, reliability, and developer experience. This release focuses on streamlining core functionalities while enhancing the overall stability of the framework.
|
||||||
|
|
||||||
|
## 📦 Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install -U swarms
|
||||||
|
```
|
||||||
|
|
||||||
|
## 🌟 Highlights
|
||||||
|
|
||||||
|
### Agent Enhancements
|
||||||
|
- **Improved RAG Performance**: Significant improvements to Retrieval-Augmented Generation capabilities
|
||||||
|
- **Enhanced Prompt Generation**: Auto-generate prompt now incorporates name, description, and system prompt for more contextual interactions
|
||||||
|
- **Streamlined Architecture**: Cleaned up unused code for better performance and maintainability
|
||||||
|
- **Simplified State Management**: Consolidated state management methods into a single `load()` function
|
||||||
|
|
||||||
|
### Tools & Execution
|
||||||
|
- **Optimized Environment Management**: Fixed multiple environment instantiation issue
|
||||||
|
- Environments now initialize once during `__init__`
|
||||||
|
- **New SwarmRouter Function**: Simplified routing mechanism
|
||||||
|
- Returns consolidated string output from all agents
|
||||||
|
- Improved coordination between swarm components
|
||||||
|
|
||||||
|
## 💪 Performance Improvements
|
||||||
|
- Faster execution times
|
||||||
|
- Reduced memory footprint
|
||||||
|
- More reliable logging system
|
||||||
|
- Lightweight and efficient codebase
|
||||||
|
|
||||||
|
## 🤝 Join Our Community
|
||||||
|
|
||||||
|
### We're Hiring!
|
||||||
|
Join our growing team! We're currently looking for:
|
||||||
|
- Agent Engineers
|
||||||
|
- Developer Relations
|
||||||
|
- Infrastructure Engineers
|
||||||
|
- And more!
|
||||||
|
|
||||||
|
### Get Involved
|
||||||
|
- ⭐ Star our repository
|
||||||
|
- 🔄 Fork the project
|
||||||
|
- 🛠 Submit pull requests
|
||||||
|
- 🐛 Report issues
|
||||||
|
- 💡 Share your ideas
|
||||||
|
|
||||||
|
### Contact & Support
|
||||||
|
- 📧 Email: kye@swarms.world
|
||||||
|
- 🔗 Issues: [GitHub Issues](https://github.com/kyegomez/swarms/issues)
|
||||||
|
|
||||||
|
## 🔜 What's Next?
|
||||||
|
Have ideas for features, bug fixes, or improvements? We'd love to hear from you! Reach out through our GitHub issues or email us directly.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Thank you to all our contributors and users who make Swarms better every day. Together, we're building the future of swarm intelligence.*
|
||||||
|
|
||||||
|
#SwarmAI #OpenSource #AI #MachineLearning
|
@ -0,0 +1,186 @@
|
|||||||
|
# Swarms Framework Environment Configuration
|
||||||
|
|
||||||
|
This guide details the environment variables used in the Swarms framework for configuration and customization of your agent-based applications.
|
||||||
|
|
||||||
|
## Configuration Setup
|
||||||
|
|
||||||
|
Create a `.env` file in your project's root directory to configure the Swarms framework. This file will contain all necessary environment variables for customizing your agent's behavior, logging, and analytics.
|
||||||
|
|
||||||
|
## Environment Variables
|
||||||
|
|
||||||
|
### Core Variables
|
||||||
|
|
||||||
|
#### `WORKSPACE_DIR`
|
||||||
|
- **Purpose**: Defines the directory where all agent states and execution logs are stored
|
||||||
|
- **Type**: String (path)
|
||||||
|
- **Default**: `./workspace`
|
||||||
|
- **Example**:
|
||||||
|
```bash
|
||||||
|
WORKSPACE_DIR=/path/to/your/workspace
|
||||||
|
```
|
||||||
|
- **Usage**:
|
||||||
|
- Stores JSON files containing agent states
|
||||||
|
- Maintains execution history
|
||||||
|
- Keeps track of agent interactions
|
||||||
|
- Preserves conversation logs
|
||||||
|
|
||||||
|
#### `SWARMS_AUTOUPDATE_ON`
|
||||||
|
- **Purpose**: Controls automatic updates of the Swarms framework
|
||||||
|
- **Type**: Boolean
|
||||||
|
- **Default**: `false`
|
||||||
|
- **Example**:
|
||||||
|
```bash
|
||||||
|
SWARMS_AUTOUPDATE_ON=true
|
||||||
|
```
|
||||||
|
- **Features**:
|
||||||
|
- Automatically updates to the latest stable version
|
||||||
|
- Ensures you have the newest features
|
||||||
|
- Maintains compatibility with the latest improvements
|
||||||
|
- Handles dependency updates
|
||||||
|
- **Considerations**:
|
||||||
|
- Set to `false` if you need version stability
|
||||||
|
- Recommended `true` for development environments
|
||||||
|
- Consider system requirements for auto-updates
|
||||||
|
- May require restart after updates
|
||||||
|
|
||||||
|
### Telemetry Configuration
|
||||||
|
|
||||||
|
#### `USE_TELEMETRY`
|
||||||
|
- **Purpose**: Controls whether telemetry data is collected
|
||||||
|
- **Type**: Boolean
|
||||||
|
- **Default**: `false`
|
||||||
|
- **Example**:
|
||||||
|
```bash
|
||||||
|
USE_TELEMETRY=true
|
||||||
|
```
|
||||||
|
- **Data Collected**:
|
||||||
|
- Agent performance metrics
|
||||||
|
- Execution time statistics
|
||||||
|
- Memory usage
|
||||||
|
- Error rates
|
||||||
|
- System health indicators
|
||||||
|
|
||||||
|
### Analytics Integration
|
||||||
|
|
||||||
|
#### `SWARMS_API_KEY`
|
||||||
|
- **Purpose**: Authentication key for the Swarms Analytics Suite
|
||||||
|
- **Type**: String
|
||||||
|
- **Required**: Yes, for analytics features
|
||||||
|
- **Example**:
|
||||||
|
```bash
|
||||||
|
SWARMS_API_KEY=your_api_key_here
|
||||||
|
```
|
||||||
|
- **Features**:
|
||||||
|
- Real-time agent execution tracking
|
||||||
|
- Usage analytics
|
||||||
|
- Performance monitoring
|
||||||
|
- Cost tracking
|
||||||
|
- Custom metrics
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
1. Create a new `.env` file:
|
||||||
|
```bash
|
||||||
|
touch .env
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Add your configuration:
|
||||||
|
```bash
|
||||||
|
# Basic configuration
|
||||||
|
WORKSPACE_DIR=./my_workspace
|
||||||
|
|
||||||
|
# Enable auto-updates
|
||||||
|
SWARMS_AUTOUPDATE_ON=true
|
||||||
|
|
||||||
|
# Enable telemetry
|
||||||
|
USE_TELEMETRY=true
|
||||||
|
|
||||||
|
# Add your Swarms API key
|
||||||
|
SWARMS_API_KEY=your_api_key_here
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Obtain your API key:
|
||||||
|
- Visit [swarms.ai](https://swarms.ai)
|
||||||
|
- Create an account or log in
|
||||||
|
- Navigate to the API section
|
||||||
|
- Generate your unique API key
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Security**:
|
||||||
|
- Never commit your `.env` file to version control
|
||||||
|
- Add `.env` to your `.gitignore` file
|
||||||
|
- Keep your API keys secure and rotate them periodically
|
||||||
|
|
||||||
|
2. **Workspace Organization**:
|
||||||
|
- Use descriptive workspace directory names
|
||||||
|
- Implement regular cleanup of old logs
|
||||||
|
- Monitor workspace size to prevent disk space issues
|
||||||
|
|
||||||
|
3. **Telemetry Management**:
|
||||||
|
- Enable telemetry in development for debugging
|
||||||
|
- Consider privacy implications in production
|
||||||
|
- Review collected data periodically
|
||||||
|
|
||||||
|
4. **Auto-Update Management**:
|
||||||
|
- Test updates in development before enabling in production
|
||||||
|
- Keep backups before enabling auto-updates
|
||||||
|
- Monitor system resources during updates
|
||||||
|
- Schedule updates during low-traffic periods
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Basic Development Setup
|
||||||
|
```bash
|
||||||
|
WORKSPACE_DIR=./dev_workspace
|
||||||
|
SWARMS_AUTOUPDATE_ON=true
|
||||||
|
USE_TELEMETRY=true
|
||||||
|
SWARMS_API_KEY=sk_test_xxxxxxxxxxxx
|
||||||
|
```
|
||||||
|
|
||||||
|
### Production Setup
|
||||||
|
```bash
|
||||||
|
WORKSPACE_DIR=/var/log/swarms/prod_workspace
|
||||||
|
SWARMS_AUTOUPDATE_ON=false
|
||||||
|
USE_TELEMETRY=true
|
||||||
|
SWARMS_API_KEY=sk_prod_xxxxxxxxxxxx
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing Environment
|
||||||
|
```bash
|
||||||
|
WORKSPACE_DIR=./test_workspace
|
||||||
|
SWARMS_AUTOUPDATE_ON=true
|
||||||
|
USE_TELEMETRY=false
|
||||||
|
SWARMS_API_KEY=sk_test_xxxxxxxxxxxx
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
Common issues and solutions:
|
||||||
|
|
||||||
|
1. **Workspace Access Issues**:
|
||||||
|
- Ensure proper file permissions
|
||||||
|
- Verify the directory exists
|
||||||
|
- Check disk space availability
|
||||||
|
|
||||||
|
2. **API Key Problems**:
|
||||||
|
- Confirm key is properly formatted
|
||||||
|
- Verify key hasn't expired
|
||||||
|
- Check for proper environment variable loading
|
||||||
|
|
||||||
|
3. **Telemetry Issues**:
|
||||||
|
- Confirm network connectivity
|
||||||
|
- Verify firewall settings
|
||||||
|
- Check for proper boolean values
|
||||||
|
|
||||||
|
4. **Auto-Update Issues**:
|
||||||
|
- Check internet connectivity
|
||||||
|
- Verify sufficient disk space
|
||||||
|
- Ensure proper permissions for updates
|
||||||
|
- Check system compatibility requirements
|
||||||
|
|
||||||
|
## Additional Resources
|
||||||
|
|
||||||
|
- [Swarms Framework Documentation](https://github.com/kyegomez/swarms)
|
||||||
|
- [Swarms Analytics Dashboard](https://swarms.ai)
|
||||||
|
- [API Reference](https://swarms.ai/docs/api)
|
@ -1,238 +1,231 @@
|
|||||||
# GroupChat
|
# GroupChat Class Documentation
|
||||||
|
|
||||||
The `GroupChat` class is designed to manage a group chat session involving multiple agents. This class handles initializing the conversation, selecting the next speaker, resetting the chat, and executing the chat rounds, providing a structured approach to managing a dynamic and interactive conversation.
|
|
||||||
|
|
||||||
### Key Concepts
|
The GroupChat class manages multi-agent conversations with state persistence, comprehensive logging, and flexible agent configurations. It supports both Agent class instances and callable functions, making it versatile for different use cases.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
```bash
|
||||||
|
pip install swarms python-dotenv pydantic
|
||||||
|
```
|
||||||
|
|
||||||
- **Agents**: Entities participating in the group chat.
|
|
||||||
- **Conversation Management**: Handling the flow of conversation, selecting speakers, and maintaining chat history.
|
|
||||||
- **Round-based Execution**: Managing the chat in predefined rounds.
|
|
||||||
|
|
||||||
## Attributes
|
## Attributes
|
||||||
|
|
||||||
### Arguments
|
| Attribute | Type | Description |
|
||||||
|
|-----------|------|-------------|
|
||||||
| Argument | Type | Default | Description |
|
| state_path | str | Path for saving/loading chat state |
|
||||||
|---------------------|----------------------|-------------|-------------|
|
| wrapped_agents | List[AgentWrapper] | List of wrapped agent instances |
|
||||||
| `agents` | `List[Agent]` | `None` | List of agents participating in the group chat. |
|
| selector_agent | AgentWrapper | Agent responsible for speaker selection |
|
||||||
| `max_rounds` | `int` | `10` | Maximum number of chat rounds. |
|
| state | GroupChatState | Current state of the group chat |
|
||||||
| `admin_name` | `str` | `"Admin"` | Name of the admin user. |
|
|
||||||
| `group_objective` | `str` | `None` | Objective of the group chat. |
|
|
||||||
| `selector_agent` | `Agent` | `None` | Agent responsible for selecting the next speaker. |
|
|
||||||
| `rules` | `str` | `None` | Rules for the group chat. |
|
|
||||||
| `*args` | | | Variable length argument list. |
|
|
||||||
| `**kwargs` | | | Arbitrary keyword arguments. |
|
|
||||||
|
|
||||||
### Attributes
|
|
||||||
|
|
||||||
| Attribute | Type | Description |
|
|
||||||
|---------------------|----------------------|-------------|
|
|
||||||
| `agents` | `List[Agent]` | List of agents participating in the group chat. |
|
|
||||||
| `max_rounds` | `int` | Maximum number of chat rounds. |
|
|
||||||
| `admin_name` | `str` | Name of the admin user. |
|
|
||||||
| `group_objective` | `str` | Objective of the group chat. |
|
|
||||||
| `selector_agent` | `Agent` | Agent responsible for selecting the next speaker. |
|
|
||||||
| `messages` | `Conversation` | Conversation object for storing the chat messages. |
|
|
||||||
|
|
||||||
## Methods
|
## Methods
|
||||||
|
|
||||||
### __init__
|
### Core Methods
|
||||||
|
|
||||||
Initializes the group chat with the given parameters.
|
|
||||||
|
|
||||||
**Examples:**
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
agents = [Agent(name="Agent 1"), Agent(name="Agent 2")]
|
def run(self, task: str) -> str:
|
||||||
group_chat = GroupChat(agents=agents, max_rounds=5, admin_name="GroupAdmin")
|
"""Execute the group chat conversation"""
|
||||||
```
|
|
||||||
|
|
||||||
### agent_names
|
|
||||||
|
|
||||||
Returns the names of the agents in the group chat.
|
|
||||||
|
|
||||||
**Returns:**
|
def save_state(self) -> None:
|
||||||
|
"""Save current state to disk"""
|
||||||
|
|
||||||
| Return Type | Description |
|
@classmethod
|
||||||
|-------------|-------------|
|
def load_state(cls, state_path: str) -> 'GroupChat':
|
||||||
| `List[str]` | List of agent names. |
|
"""Load GroupChat from saved state"""
|
||||||
|
|
||||||
**Examples:**
|
def get_conversation_summary(self) -> Dict[str, Any]:
|
||||||
|
"""Return a summary of the conversation"""
|
||||||
|
|
||||||
```python
|
def export_conversation(self, format: str = "json") -> Union[str, Dict]:
|
||||||
names = group_chat.agent_names
|
"""Export the conversation in specified format"""
|
||||||
print(names) # Output: ['Agent 1', 'Agent 2']
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### reset
|
### Internal Methods
|
||||||
|
|
||||||
Resets the group chat by clearing the message history.
|
|
||||||
|
|
||||||
**Examples:**
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
group_chat.reset()
|
def _log_interaction(self, agent_name: str, position: int, input_text: str, output_text: str) -> None:
|
||||||
```
|
"""Log a single interaction"""
|
||||||
|
|
||||||
### agent_by_name
|
|
||||||
|
|
||||||
Finds an agent whose name is contained within the given name string.
|
|
||||||
|
|
||||||
**Arguments:**
|
|
||||||
|
|
||||||
| Parameter | Type | Description |
|
def _add_message(self, role: str, content: str) -> None:
|
||||||
|-----------|--------|-------------|
|
"""Add a message to the conversation history"""
|
||||||
| `name` | `str` | Name string to search for. |
|
|
||||||
|
|
||||||
**Returns:**
|
def select_next_speaker(self, last_speaker: AgentWrapper) -> AgentWrapper:
|
||||||
|
"""Select the next speaker using the selector agent"""
|
||||||
| Return Type | Description |
|
```
|
||||||
|-------------|-------------|
|
|
||||||
| `Agent` | Agent object with a name contained in the given name string. |
|
|
||||||
|
|
||||||
**Raises:**
|
|
||||||
|
|
||||||
- `ValueError`: If no agent is found with a name contained in the given name string.
|
|
||||||
|
|
||||||
**Examples:**
|
## Usage Examples
|
||||||
|
|
||||||
|
### 1. Basic Setup with Two Agents
|
||||||
```python
|
```python
|
||||||
agent = group_chat.agent_by_name("Agent 1")
|
import os
|
||||||
print(agent.agent_name) # Output: 'Agent 1'
|
from swarms import Agent
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
# Initialize OpenAI
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
model = OpenAIChat(openai_api_key=api_key, model_name="gpt-4-mini")
|
||||||
|
|
||||||
|
# Create agents
|
||||||
|
analyst = Agent(
|
||||||
|
agent_name="Financial-Analyst",
|
||||||
|
system_prompt="You are a financial analyst...",
|
||||||
|
llm=model
|
||||||
|
)
|
||||||
|
|
||||||
|
advisor = Agent(
|
||||||
|
agent_name="Investment-Advisor",
|
||||||
|
system_prompt="You are an investment advisor...",
|
||||||
|
llm=model
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create group chat
|
||||||
|
chat = GroupChat(
|
||||||
|
name="Investment Team",
|
||||||
|
agents=[analyst, advisor],
|
||||||
|
max_rounds=5,
|
||||||
|
group_objective="Provide investment advice"
|
||||||
|
)
|
||||||
|
|
||||||
|
response = chat.run("What's the best investment strategy for retirement?")
|
||||||
```
|
```
|
||||||
|
|
||||||
### next_agent
|
### 2. Advanced Setup with State Management
|
||||||
|
|
||||||
Returns the next agent in the list.
|
|
||||||
|
|
||||||
**Arguments:**
|
|
||||||
|
|
||||||
| Parameter | Type | Description |
|
|
||||||
|-----------|--------|-------------|
|
|
||||||
| `agent` | `Agent`| Current agent. |
|
|
||||||
|
|
||||||
**Returns:**
|
|
||||||
|
|
||||||
| Return Type | Description |
|
|
||||||
|-------------|-------------|
|
|
||||||
| `Agent` | Next agent in the list. |
|
|
||||||
|
|
||||||
**Examples:**
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
current_agent = group_chat.agents[0]
|
# Create group chat with state persistence
|
||||||
next_agent = group_chat.next_agent(current_agent)
|
chat = GroupChat(
|
||||||
print(next_agent.agent_name) # Output: Name of the next agent
|
name="Investment Advisory Team",
|
||||||
|
description="Expert team for financial planning",
|
||||||
|
agents=[analyst, advisor, tax_specialist],
|
||||||
|
max_rounds=10,
|
||||||
|
admin_name="Senior Advisor",
|
||||||
|
group_objective="Provide comprehensive financial planning",
|
||||||
|
state_path="investment_chat_state.json",
|
||||||
|
rules="1. Always provide sources\n2. Be concise\n3. Focus on practical advice"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run chat and save state
|
||||||
|
response = chat.run("Create a retirement plan for a 35-year old")
|
||||||
|
chat.save_state()
|
||||||
|
|
||||||
|
# Load existing chat state
|
||||||
|
loaded_chat = GroupChat.load_state("investment_chat_state.json")
|
||||||
```
|
```
|
||||||
|
|
||||||
### select_speaker_msg
|
### 3. Using Custom Callable Agents
|
||||||
|
|
||||||
Returns the message for selecting the next speaker.
|
|
||||||
|
|
||||||
**Returns:**
|
|
||||||
|
|
||||||
| Return Type | Description |
|
|
||||||
|-------------|-------------|
|
|
||||||
| `str` | Prompt message for selecting the next speaker. |
|
|
||||||
|
|
||||||
**Examples:**
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
message = group_chat.select_speaker_msg()
|
def custom_agent(input_text: str) -> str:
|
||||||
print(message)
|
# Custom logic here
|
||||||
|
return f"Processed: {input_text}"
|
||||||
|
|
||||||
|
# Mix of regular agents and callable functions
|
||||||
|
chat = GroupChat(
|
||||||
|
name="Hybrid Team",
|
||||||
|
agents=[analyst, custom_agent],
|
||||||
|
max_rounds=3
|
||||||
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
### select_speaker
|
### 4. Export and Analysis
|
||||||
|
```python
|
||||||
Selects the next speaker.
|
# Run chat
|
||||||
|
chat.run("Analyze market conditions")
|
||||||
**Arguments:**
|
|
||||||
|
|
||||||
| Parameter | Type | Description |
|
|
||||||
|----------------------|--------|-------------|
|
|
||||||
| `last_speaker_agent` | `Agent`| Last speaker in the conversation. |
|
|
||||||
| `selector_agent` | `Agent`| Agent responsible for selecting the next speaker. |
|
|
||||||
|
|
||||||
**Returns:**
|
|
||||||
|
|
||||||
| Return Type | Description |
|
# Get summary
|
||||||
|-------------|-------------|
|
summary = chat.get_conversation_summary()
|
||||||
| `Agent` | Next speaker. |
|
print(summary)
|
||||||
|
|
||||||
**Examples:**
|
# Export in different formats
|
||||||
|
json_conv = chat.export_conversation(format="json")
|
||||||
|
text_conv = chat.export_conversation(format="text")
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Advanced Configuration with Custom Selector
|
||||||
```python
|
```python
|
||||||
next_speaker = group_chat.select_speaker(last_speaker_agent, selector_agent)
|
class CustomSelector(Agent):
|
||||||
print(next_speaker.agent_name)
|
def run(self, input_text: str) -> str:
|
||||||
|
# Custom selection logic
|
||||||
|
return "Financial-Analyst"
|
||||||
|
|
||||||
|
chat = GroupChat(
|
||||||
|
name="Custom Selection Team",
|
||||||
|
agents=[analyst, advisor],
|
||||||
|
selector_agent=CustomSelector(
|
||||||
|
agent_name="Custom-Selector",
|
||||||
|
system_prompt="Select the next speaker based on expertise",
|
||||||
|
llm=model
|
||||||
|
),
|
||||||
|
max_rounds=5
|
||||||
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
### _participant_roles
|
### 6. Debugging Setup
|
||||||
|
|
||||||
Returns the roles of the participants.
|
|
||||||
|
|
||||||
**Returns:**
|
|
||||||
|
|
||||||
| Return Type | Description |
|
|
||||||
|-------------|-------------|
|
|
||||||
| `str` | Participant roles. |
|
|
||||||
|
|
||||||
**Examples:**
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
roles = group_chat._participant_roles()
|
import logging
|
||||||
print(roles)
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
|
|
||||||
|
chat = GroupChat(
|
||||||
|
name="Debug Team",
|
||||||
|
agents=[analyst, advisor],
|
||||||
|
max_rounds=3,
|
||||||
|
state_path="debug_chat.json"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run with detailed logging
|
||||||
|
try:
|
||||||
|
response = chat.run("Complex query")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Chat failed: {str(e)}")
|
||||||
|
# Access last successful state
|
||||||
|
state = chat.state
|
||||||
```
|
```
|
||||||
|
|
||||||
### __call__
|
## Error Handling
|
||||||
|
|
||||||
Executes the group chat as a function.
|
The GroupChat class includes comprehensive error handling:
|
||||||
|
|
||||||
**Arguments:**
|
|
||||||
|
|
||||||
| Parameter | Type | Description |
|
|
||||||
|-----------|--------|-------------|
|
|
||||||
| `task` | `str` | Task to be performed. |
|
|
||||||
|
|
||||||
**Returns:**
|
|
||||||
|
|
||||||
| Return Type | Description |
|
|
||||||
|-------------|-------------|
|
|
||||||
| `str` | Reply from the last speaker. |
|
|
||||||
|
|
||||||
**Examples:**
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
response = group_chat(task="Discuss the project plan")
|
try:
|
||||||
print(response)
|
chat = GroupChat(agents=[analyst]) # Will raise ValueError
|
||||||
|
except ValueError as e:
|
||||||
|
print("Configuration error:", str(e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = chat.run("Query")
|
||||||
|
except Exception as e:
|
||||||
|
# Access error state
|
||||||
|
error_summary = chat.get_conversation_summary()
|
||||||
|
print("Execution error:", str(e))
|
||||||
|
print("State at error:", error_summary)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Additional Examples
|
## Best Practices
|
||||||
|
|
||||||
#### Example 1: Initializing and Running a Group Chat
|
1. **State Management**:
|
||||||
|
- Always specify a `state_path` for important conversations
|
||||||
|
- Use `save_state()` after critical operations
|
||||||
|
- Implement regular state backups for long conversations
|
||||||
|
|
||||||
```python
|
2. **Agent Configuration**:
|
||||||
agents = [Agent(name="Agent 1"), Agent(name="Agent 2"), Agent(name="Agent 3")]
|
- Provide clear system prompts for each agent
|
||||||
selector_agent = Agent(name="Selector")
|
- Use descriptive agent names
|
||||||
group_chat = GroupChat(agents=agents, selector_agent=selector_agent, max_rounds=3, group_objective="Discuss the quarterly goals.")
|
- Consider agent expertise when setting the group objective
|
||||||
|
|
||||||
response = group_chat(task="Let's start the discussion on quarterly goals.")
|
3. **Performance**:
|
||||||
print(response)
|
- Keep `max_rounds` reasonable (5-10 for most cases)
|
||||||
```
|
- Use early stopping conditions when possible
|
||||||
|
- Monitor conversation length and complexity
|
||||||
|
|
||||||
#### Example 2: Resetting the Group Chat
|
4. **Error Handling**:
|
||||||
|
- Always wrap chat execution in try-except blocks
|
||||||
|
- Implement proper logging
|
||||||
|
- Save states before potentially risky operations
|
||||||
|
|
||||||
```python
|
## Limitations
|
||||||
group_chat.reset()
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example 3: Selecting the Next Speaker
|
|
||||||
|
|
||||||
```python
|
|
||||||
last_speaker = group_chat.agents[0]
|
|
||||||
next_speaker = group_chat.select_speaker(last_speaker_agent=last_speaker, selector_agent=selector_agent)
|
|
||||||
print(next_speaker.agent_name)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Summary
|
- Agents must either have a `run` method or be callable
|
||||||
|
- State files can grow large with many interactions
|
||||||
|
- Selector agent may need optimization for large agent groups
|
||||||
|
- Real-time streaming not supported in basic configuration
|
||||||
|
|
||||||
The `GroupChat` class offers a structured approach to managing a group chat involving multiple agents. With functionalities for initializing conversations, selecting speakers, and handling chat rounds, it provides a robust framework for dynamic and interactive discussions. This makes it an essential tool for applications requiring coordinated communication among multiple agents.
|
|
@ -0,0 +1,68 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
from swarms.structs.agents_available import showcase_available_agents
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the Claims Director agent
|
||||||
|
director_agent = Agent(
|
||||||
|
agent_name="ClaimsDirector",
|
||||||
|
agent_description="Oversees and coordinates the medical insurance claims processing workflow",
|
||||||
|
system_prompt="""You are the Claims Director responsible for managing the medical insurance claims process.
|
||||||
|
Assign and prioritize tasks between claims processors and auditors. Ensure claims are handled efficiently
|
||||||
|
and accurately while maintaining compliance with insurance policies and regulations.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
stopping_token="<DONE>",
|
||||||
|
state_save_file_type="json",
|
||||||
|
saved_state_path="director_agent.json",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize Claims Processor agent
|
||||||
|
processor_agent = Agent(
|
||||||
|
agent_name="ClaimsProcessor",
|
||||||
|
agent_description="Reviews and processes medical insurance claims, verifying coverage and eligibility",
|
||||||
|
system_prompt="""Review medical insurance claims for completeness and accuracy. Verify patient eligibility,
|
||||||
|
coverage details, and process claims according to policy guidelines. Flag any claims requiring special review.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
stopping_token="<DONE>",
|
||||||
|
state_save_file_type="json",
|
||||||
|
saved_state_path="processor_agent.json",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize Claims Auditor agent
|
||||||
|
auditor_agent = Agent(
|
||||||
|
agent_name="ClaimsAuditor",
|
||||||
|
agent_description="Audits processed claims for accuracy and compliance with policies and regulations",
|
||||||
|
system_prompt="""Audit processed insurance claims for accuracy and compliance. Review claim decisions,
|
||||||
|
identify potential fraud or errors, and ensure all processing follows established guidelines and regulations.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
stopping_token="<DONE>",
|
||||||
|
state_save_file_type="json",
|
||||||
|
saved_state_path="auditor_agent.json",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a list of agents
|
||||||
|
agents = [director_agent, processor_agent, auditor_agent]
|
||||||
|
|
||||||
|
print(showcase_available_agents(agents=agents))
|
@ -0,0 +1,56 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.prompts.finance_agent_sys_prompt import (
|
||||||
|
FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
)
|
||||||
|
from new_features_examples.async_executor import HighSpeedExecutor
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Financial-Analysis-Agent",
|
||||||
|
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
# autosave=True,
|
||||||
|
# dashboard=False,
|
||||||
|
# verbose=True,
|
||||||
|
# dynamic_temperature_enabled=True,
|
||||||
|
# saved_state_path="finance_agent.json",
|
||||||
|
# user_name="swarms_corp",
|
||||||
|
# retry_attempts=1,
|
||||||
|
# context_length=200000,
|
||||||
|
# return_step_meta=True,
|
||||||
|
# output_type="json", # "json", "dict", "csv" OR "string" soon "yaml" and
|
||||||
|
# auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
|
||||||
|
# # artifacts_on=True,
|
||||||
|
# artifacts_output_path="roth_ira_report",
|
||||||
|
# artifacts_file_extension=".txt",
|
||||||
|
# max_tokens=8000,
|
||||||
|
# return_history=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def execute_agent(
|
||||||
|
task: str = "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria. Create a report on this question.",
|
||||||
|
):
|
||||||
|
return agent.run(task)
|
||||||
|
|
||||||
|
|
||||||
|
executor = HighSpeedExecutor()
|
||||||
|
results = executor.run(execute_agent, 2)
|
||||||
|
|
||||||
|
print(results)
|
@ -0,0 +1,131 @@
|
|||||||
|
import asyncio
|
||||||
|
import multiprocessing as mp
|
||||||
|
import time
|
||||||
|
from functools import partial
|
||||||
|
from typing import Any, Dict, Union
|
||||||
|
|
||||||
|
|
||||||
|
class HighSpeedExecutor:
|
||||||
|
def __init__(self, num_processes: int = None):
|
||||||
|
"""
|
||||||
|
Initialize the executor with configurable number of processes.
|
||||||
|
If num_processes is None, it uses CPU count.
|
||||||
|
"""
|
||||||
|
self.num_processes = num_processes or mp.cpu_count()
|
||||||
|
|
||||||
|
async def _worker(
|
||||||
|
self,
|
||||||
|
queue: asyncio.Queue,
|
||||||
|
func: Any,
|
||||||
|
*args: Any,
|
||||||
|
**kwargs: Any,
|
||||||
|
):
|
||||||
|
"""Async worker that processes tasks from the queue"""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# Non-blocking get from queue
|
||||||
|
await queue.get()
|
||||||
|
await asyncio.get_event_loop().run_in_executor(
|
||||||
|
None, partial(func, *args, **kwargs)
|
||||||
|
)
|
||||||
|
queue.task_done()
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
break
|
||||||
|
|
||||||
|
async def _distribute_tasks(
|
||||||
|
self, num_tasks: int, queue: asyncio.Queue
|
||||||
|
):
|
||||||
|
"""Distribute tasks across the queue"""
|
||||||
|
for i in range(num_tasks):
|
||||||
|
await queue.put(i)
|
||||||
|
|
||||||
|
async def execute_batch(
|
||||||
|
self,
|
||||||
|
func: Any,
|
||||||
|
num_executions: int,
|
||||||
|
*args: Any,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> Dict[str, Union[int, float]]:
|
||||||
|
"""
|
||||||
|
Execute the given function multiple times concurrently.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
func: The function to execute
|
||||||
|
num_executions: Number of times to execute the function
|
||||||
|
*args, **kwargs: Arguments to pass to the function
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary containing the number of executions, duration, and executions per second.
|
||||||
|
"""
|
||||||
|
queue = asyncio.Queue()
|
||||||
|
|
||||||
|
# Create worker tasks
|
||||||
|
workers = [
|
||||||
|
asyncio.create_task(
|
||||||
|
self._worker(queue, func, *args, **kwargs)
|
||||||
|
)
|
||||||
|
for _ in range(self.num_processes)
|
||||||
|
]
|
||||||
|
|
||||||
|
# Start timing
|
||||||
|
start_time = time.perf_counter()
|
||||||
|
|
||||||
|
# Distribute tasks
|
||||||
|
await self._distribute_tasks(num_executions, queue)
|
||||||
|
|
||||||
|
# Wait for all tasks to complete
|
||||||
|
await queue.join()
|
||||||
|
|
||||||
|
# Cancel workers
|
||||||
|
for worker in workers:
|
||||||
|
worker.cancel()
|
||||||
|
|
||||||
|
# Wait for all workers to finish
|
||||||
|
await asyncio.gather(*workers, return_exceptions=True)
|
||||||
|
|
||||||
|
end_time = time.perf_counter()
|
||||||
|
duration = end_time - start_time
|
||||||
|
|
||||||
|
return {
|
||||||
|
"executions": num_executions,
|
||||||
|
"duration": duration,
|
||||||
|
"executions_per_second": num_executions / duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
def run(
|
||||||
|
self,
|
||||||
|
func: Any,
|
||||||
|
num_executions: int,
|
||||||
|
*args: Any,
|
||||||
|
**kwargs: Any,
|
||||||
|
):
|
||||||
|
return asyncio.run(
|
||||||
|
self.execute_batch(func, num_executions, *args, **kwargs)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# def example_function(x: int = 0) -> int:
|
||||||
|
# """Example function to execute"""
|
||||||
|
# return x * x
|
||||||
|
|
||||||
|
|
||||||
|
# async def main():
|
||||||
|
# # Create executor with number of CPU cores
|
||||||
|
# executor = HighSpeedExecutor()
|
||||||
|
|
||||||
|
# # Execute the function 1000 times
|
||||||
|
# result = await executor.execute_batch(
|
||||||
|
# example_function, num_executions=1000, x=42
|
||||||
|
# )
|
||||||
|
|
||||||
|
# print(
|
||||||
|
# f"Completed {result['executions']} executions in {result['duration']:.2f} seconds"
|
||||||
|
# )
|
||||||
|
# print(
|
||||||
|
# f"Rate: {result['executions_per_second']:.2f} executions/second"
|
||||||
|
# )
|
||||||
|
|
||||||
|
|
||||||
|
# if __name__ == "__main__":
|
||||||
|
# # Run the async main function
|
||||||
|
# asyncio.run(main())
|
@ -0,0 +1,188 @@
|
|||||||
|
import json
|
||||||
|
import os
|
||||||
|
from contextlib import suppress
|
||||||
|
from typing import Any, Callable, Dict, Optional, Type, Union
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from pydantic import BaseModel, Field, ValidationError, create_model
|
||||||
|
from swarm_models.openai_function_caller import OpenAIFunctionCaller
|
||||||
|
|
||||||
|
|
||||||
|
class DynamicParser:
|
||||||
|
@staticmethod
|
||||||
|
def extract_fields(model: Type[BaseModel]) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
field_name: (field.annotation, ... if field.is_required() else None)
|
||||||
|
for field_name, field in model.model_fields.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_partial_model(model: Type[BaseModel], data: Dict[str, Any]) -> Type[BaseModel]:
|
||||||
|
fields = {
|
||||||
|
field_name: (field.annotation, ... if field.is_required() else None)
|
||||||
|
for field_name, field in model.model_fields.items()
|
||||||
|
if field_name in data
|
||||||
|
}
|
||||||
|
return create_model(f"Partial{model.__name__}", **fields)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def parse(cls, data: Union[str, Dict[str, Any]], model: Type[BaseModel]) -> Optional[BaseModel]:
|
||||||
|
if isinstance(data, str):
|
||||||
|
try:
|
||||||
|
data = json.loads(data)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Try full model first
|
||||||
|
with suppress(ValidationError):
|
||||||
|
return model.model_validate(data)
|
||||||
|
|
||||||
|
# Create and try partial model
|
||||||
|
partial_model = cls.create_partial_model(model, data)
|
||||||
|
with suppress(ValidationError):
|
||||||
|
return partial_model.model_validate(data)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Define the Thoughts schema
|
||||||
|
class Thoughts(BaseModel):
|
||||||
|
text: str = Field(..., description="Current thoughts or observations regarding the task.")
|
||||||
|
reasoning: str = Field(..., description="Logical reasoning behind the thought process.")
|
||||||
|
plan: str = Field(..., description="A short bulleted list that conveys the immediate and long-term plan.")
|
||||||
|
criticism: str = Field(..., description="Constructive self-criticism to improve future responses.")
|
||||||
|
speak: str = Field(..., description="A concise summary of thoughts intended for the user.")
|
||||||
|
|
||||||
|
# Define the Command schema
|
||||||
|
class Command(BaseModel):
|
||||||
|
name: str = Field(..., description="Command name to execute from the provided list of commands.")
|
||||||
|
args: Dict[str, Any] = Field(..., description="Arguments required to execute the command.")
|
||||||
|
|
||||||
|
# Define the AgentResponse schema
|
||||||
|
class AgentResponse(BaseModel):
|
||||||
|
thoughts: Thoughts = Field(..., description="The agent's current thoughts and reasoning.")
|
||||||
|
command: Command = Field(..., description="The command to execute along with its arguments.")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Define tool functions
|
||||||
|
def fluid_api_command(task: str):
|
||||||
|
"""Execute a fluid API request."""
|
||||||
|
# response = fluid_api_request(task)
|
||||||
|
print(response.model_dump_json(indent=4))
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def send_tweet_command(text: str):
|
||||||
|
"""Simulate sending a tweet."""
|
||||||
|
print(f"Tweet sent: {text}")
|
||||||
|
return {"status": "success", "message": f"Tweet sent: {text}"}
|
||||||
|
|
||||||
|
|
||||||
|
def do_nothing_command():
|
||||||
|
"""Do nothing."""
|
||||||
|
print("Doing nothing...")
|
||||||
|
return {"status": "success", "message": "No action taken."}
|
||||||
|
|
||||||
|
|
||||||
|
def task_complete_command(reason: str):
|
||||||
|
"""Mark the task as complete and provide a reason."""
|
||||||
|
print(f"Task completed: {reason}")
|
||||||
|
return {"status": "success", "message": f"Task completed: {reason}"}
|
||||||
|
|
||||||
|
|
||||||
|
# Dynamic command execution
|
||||||
|
def execute_command(name: str, args: Dict[str, Any]):
|
||||||
|
"""Dynamically execute a command based on its name and arguments."""
|
||||||
|
command_map: Dict[str, Callable] = {
|
||||||
|
"fluid_api": lambda **kwargs: fluid_api_command(task=kwargs.get("task")),
|
||||||
|
"send_tweet": lambda **kwargs: send_tweet_command(text=kwargs.get("text")),
|
||||||
|
"do_nothing": lambda **kwargs: do_nothing_command(),
|
||||||
|
"task_complete": lambda **kwargs: task_complete_command(reason=kwargs.get("reason")),
|
||||||
|
}
|
||||||
|
|
||||||
|
if name not in command_map:
|
||||||
|
raise ValueError(f"Unknown command: {name}")
|
||||||
|
|
||||||
|
# Execute the command with the provided arguments
|
||||||
|
return command_map[name](**args)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_and_execute_command(response: Union[str, Dict[str, Any]], base_model: Type[BaseModel] = AgentResponse) -> Any:
|
||||||
|
"""Enhanced command parser with flexible input handling"""
|
||||||
|
parsed = DynamicParser.parse(response, base_model)
|
||||||
|
if not parsed:
|
||||||
|
raise ValueError("Failed to parse response")
|
||||||
|
|
||||||
|
if hasattr(parsed, 'command'):
|
||||||
|
command_name = parsed.command.name
|
||||||
|
command_args = parsed.command.args
|
||||||
|
return execute_command(command_name, command_args)
|
||||||
|
|
||||||
|
return parsed
|
||||||
|
|
||||||
|
|
||||||
|
ainame = "AutoAgent"
|
||||||
|
userprovided = "assistant"
|
||||||
|
|
||||||
|
SYSTEM_PROMPT = f"""
|
||||||
|
You are {ainame}, an advanced and autonomous {userprovided}.
|
||||||
|
Your role is to make decisions and complete tasks independently without seeking user assistance. Leverage your strengths as an LLM to solve tasks efficiently, adhering strictly to the commands and resources provided.
|
||||||
|
|
||||||
|
### GOALS:
|
||||||
|
1. {userprovided}
|
||||||
|
2. Execute tasks with precision and efficiency.
|
||||||
|
3. Ensure outputs are actionable and aligned with the user's objectives.
|
||||||
|
4. Continuously optimize task strategies for maximum effectiveness.
|
||||||
|
5. Maintain reliability and consistency in all responses.
|
||||||
|
|
||||||
|
### CONSTRAINTS:
|
||||||
|
1. Memory limit: ~4000 words for short-term memory. Save essential information to files immediately to avoid loss.
|
||||||
|
2. Independent decision-making: Do not rely on user assistance.
|
||||||
|
3. Exclusively use commands in double quotes (e.g., "command name").
|
||||||
|
4. Use subprocesses for commands that may take longer than a few minutes.
|
||||||
|
5. Ensure all outputs strictly adhere to the specified JSON response format.
|
||||||
|
|
||||||
|
### COMMANDS:
|
||||||
|
1. Fluid API: "fluid_api", args: "method": "<GET/POST/...>", "url": "<url>", "headers": "<headers>", "body": "<payload>"
|
||||||
|
18. Send Tweet: "send_tweet", args: "text": "<text>"
|
||||||
|
19. Do Nothing: "do_nothing", args:
|
||||||
|
20. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
|
||||||
|
|
||||||
|
### RESOURCES:
|
||||||
|
1. Internet access for real-time information and data gathering.
|
||||||
|
2. Long-term memory management for storing critical information.
|
||||||
|
3. Access to GPT-3.5-powered Agents for delegating tasks.
|
||||||
|
4. File handling capabilities for output storage and retrieval.
|
||||||
|
|
||||||
|
### PERFORMANCE EVALUATION:
|
||||||
|
1. Continuously analyze and reflect on actions to ensure optimal task completion.
|
||||||
|
2. Self-critique decisions and strategies constructively to identify areas for improvement.
|
||||||
|
3. Ensure every command serves a clear purpose and minimizes resource usage.
|
||||||
|
4. Complete tasks in the least number of steps, balancing speed and accuracy.
|
||||||
|
|
||||||
|
### RESPONSE FORMAT:
|
||||||
|
Always respond in a strict JSON format as described below. Ensure your responses can be parsed with Python's `json.loads`:
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Initialize the OpenAIFunctionCaller
|
||||||
|
model = OpenAIFunctionCaller(
|
||||||
|
system_prompt=SYSTEM_PROMPT,
|
||||||
|
max_tokens=4000,
|
||||||
|
temperature=0.9,
|
||||||
|
base_model=AgentResponse, # Pass the Pydantic schema as the base model
|
||||||
|
parallel_tool_calls=False,
|
||||||
|
openai_api_key=os.getenv("OPENAI_API_KEY")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
user_input = (
|
||||||
|
"Analyze the provided Python code for inefficiencies, generate suggestions for improvements, "
|
||||||
|
"and provide optimized code."
|
||||||
|
)
|
||||||
|
|
||||||
|
response = model.run(user_input)
|
||||||
|
response = parse_and_execute_command(response)
|
||||||
|
print(response)
|
@ -0,0 +1,120 @@
|
|||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms import Agent
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
from swarms.structs.swarm_router import SwarmRouter
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("GROQ_API_KEY")
|
||||||
|
|
||||||
|
# Model
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_base="https://api.groq.com/openai/v1",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="llama-3.1-70b-versatile",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize specialized agents
|
||||||
|
data_extractor_agent = Agent(
|
||||||
|
agent_name="Data-Extractor",
|
||||||
|
system_prompt="You are a data extraction specialist. Extract relevant information from provided content.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="data_extractor_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
summarizer_agent = Agent(
|
||||||
|
agent_name="Document-Summarizer",
|
||||||
|
system_prompt="You are a document summarization specialist. Provide clear and concise summaries.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="summarizer_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
financial_analyst_agent = Agent(
|
||||||
|
agent_name="Financial-Analyst",
|
||||||
|
system_prompt="You are a financial analysis specialist. Analyze financial aspects of content.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="financial_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
market_analyst_agent = Agent(
|
||||||
|
agent_name="Market-Analyst",
|
||||||
|
system_prompt="You are a market analysis specialist. Analyze market-related aspects.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="market_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
operational_analyst_agent = Agent(
|
||||||
|
agent_name="Operational-Analyst",
|
||||||
|
system_prompt="You are an operational analysis specialist. Analyze operational aspects.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="operational_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the SwarmRouter
|
||||||
|
router = SwarmRouter(
|
||||||
|
name="pe-document-analysis-swarm",
|
||||||
|
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||||
|
max_loops=1,
|
||||||
|
agents=[
|
||||||
|
data_extractor_agent,
|
||||||
|
summarizer_agent,
|
||||||
|
financial_analyst_agent,
|
||||||
|
market_analyst_agent,
|
||||||
|
operational_analyst_agent,
|
||||||
|
],
|
||||||
|
swarm_type="SequentialWorkflow", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
|
||||||
|
auto_generate_prompts=True,
|
||||||
|
output_type="all",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Run a comprehensive private equity document analysis task
|
||||||
|
result = router.run(
|
||||||
|
"Where is the best place to find template term sheets for series A startups. Provide links and references"
|
||||||
|
)
|
||||||
|
print(result)
|
@ -0,0 +1,96 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
from swarms import Agent, run_agents_with_tasks_concurrently
|
||||||
|
|
||||||
|
# Fetch the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize agents for different roles
|
||||||
|
delaware_ccorp_agent = Agent(
|
||||||
|
agent_name="Delaware-CCorp-Hiring-Agent",
|
||||||
|
system_prompt="""
|
||||||
|
Create a comprehensive hiring description for a Delaware C Corporation,
|
||||||
|
including all relevant laws and regulations, such as the Delaware General
|
||||||
|
Corporation Law (DGCL) and the Delaware Corporate Law. Ensure the description
|
||||||
|
covers the requirements for hiring employees, contractors, and officers,
|
||||||
|
including the necessary paperwork, tax obligations, and benefits. Also,
|
||||||
|
outline the procedures for compliance with Delaware's employment laws,
|
||||||
|
including anti-discrimination laws, workers' compensation, and unemployment
|
||||||
|
insurance. Provide guidance on how to navigate the complexities of Delaware's
|
||||||
|
corporate law and ensure that all hiring practices are in compliance with
|
||||||
|
state and federal regulations.
|
||||||
|
""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=False,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
output_type="str",
|
||||||
|
artifacts_on=True,
|
||||||
|
artifacts_output_path="delaware_ccorp_hiring_description.md",
|
||||||
|
artifacts_file_extension=".md",
|
||||||
|
)
|
||||||
|
|
||||||
|
indian_foreign_agent = Agent(
|
||||||
|
agent_name="Indian-Foreign-Hiring-Agent",
|
||||||
|
system_prompt="""
|
||||||
|
Create a comprehensive hiring description for an Indian or foreign country,
|
||||||
|
including all relevant laws and regulations, such as the Indian Contract Act,
|
||||||
|
the Indian Labour Laws, and the Foreign Exchange Management Act (FEMA).
|
||||||
|
Ensure the description covers the requirements for hiring employees,
|
||||||
|
contractors, and officers, including the necessary paperwork, tax obligations,
|
||||||
|
and benefits. Also, outline the procedures for compliance with Indian and
|
||||||
|
foreign employment laws, including anti-discrimination laws, workers'
|
||||||
|
compensation, and unemployment insurance. Provide guidance on how to navigate
|
||||||
|
the complexities of Indian and foreign corporate law and ensure that all hiring
|
||||||
|
practices are in compliance with state and federal regulations. Consider the
|
||||||
|
implications of hiring foreign nationals and the requirements for obtaining
|
||||||
|
necessary visas and work permits.
|
||||||
|
""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=False,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
output_type="str",
|
||||||
|
artifacts_on=True,
|
||||||
|
artifacts_output_path="indian_foreign_hiring_description.md",
|
||||||
|
artifacts_file_extension=".md",
|
||||||
|
)
|
||||||
|
|
||||||
|
# List of agents and corresponding tasks
|
||||||
|
agents = [delaware_ccorp_agent, indian_foreign_agent]
|
||||||
|
tasks = [
|
||||||
|
"""
|
||||||
|
Create a comprehensive hiring description for an Agent Engineer, including
|
||||||
|
required skills and responsibilities. Ensure the description covers the
|
||||||
|
necessary technical expertise, such as proficiency in AI/ML frameworks,
|
||||||
|
programming languages, and data structures. Outline the key responsibilities,
|
||||||
|
including designing and developing AI agents, integrating with existing systems,
|
||||||
|
and ensuring scalability and performance.
|
||||||
|
""",
|
||||||
|
"""
|
||||||
|
Generate a detailed job description for a Prompt Engineer, including
|
||||||
|
required skills and responsibilities. Ensure the description covers the
|
||||||
|
necessary technical expertise, such as proficiency in natural language processing,
|
||||||
|
machine learning, and software development. Outline the key responsibilities,
|
||||||
|
including designing and optimizing prompts for AI systems, ensuring prompt
|
||||||
|
quality and consistency, and collaborating with cross-functional teams.
|
||||||
|
""",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Run agents with tasks concurrently
|
||||||
|
results = run_agents_with_tasks_concurrently(
|
||||||
|
agents, tasks, all_cores=True, device="cpu", no_clusterops=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Print the results
|
||||||
|
# for result in results:
|
||||||
|
# print(result)
|
@ -0,0 +1,54 @@
|
|||||||
|
import pandas as pd
|
||||||
|
import json
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
|
||||||
|
def dict_to_dataframe(data: dict) -> pd.DataFrame:
|
||||||
|
"""
|
||||||
|
Converts a dictionary into a Pandas DataFrame with formatted values.
|
||||||
|
Handles non-serializable values gracefully by skipping them.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data (dict): The dictionary to convert.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
pd.DataFrame: A DataFrame representation of the dictionary.
|
||||||
|
"""
|
||||||
|
formatted_data = {}
|
||||||
|
|
||||||
|
for key, value in data.items():
|
||||||
|
try:
|
||||||
|
# Attempt to serialize the value
|
||||||
|
if isinstance(value, list):
|
||||||
|
# Format list as comma-separated string
|
||||||
|
formatted_value = ", ".join(
|
||||||
|
str(item) for item in value
|
||||||
|
)
|
||||||
|
elif isinstance(value, dict):
|
||||||
|
# Format dict as key-value pairs
|
||||||
|
formatted_value = ", ".join(
|
||||||
|
f"{k}: {v}" for k, v in value.items()
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Convert other serializable types to string
|
||||||
|
formatted_value = json.dumps(
|
||||||
|
value
|
||||||
|
) # Serialize value to string
|
||||||
|
|
||||||
|
formatted_data[key] = formatted_value
|
||||||
|
except (TypeError, ValueError) as e:
|
||||||
|
# Log and skip non-serializable items
|
||||||
|
logger.warning(
|
||||||
|
f"Skipping non-serializable key '{key}': {e}"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Convert the formatted dictionary into a DataFrame
|
||||||
|
return pd.DataFrame(
|
||||||
|
list(formatted_data.items()), columns=["Key", "Value"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
example = dict_to_dataframe(data={"chicken": "noodle_soup"})
|
||||||
|
# formatter.print_panel(example)
|
||||||
|
print(example)
|
@ -0,0 +1,308 @@
|
|||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
from web3 import Web3
|
||||||
|
from typing import Dict, Optional, Any
|
||||||
|
from datetime import datetime
|
||||||
|
import asyncio
|
||||||
|
from loguru import logger
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import csv
|
||||||
|
import requests
|
||||||
|
import time
|
||||||
|
|
||||||
|
BLOCKCHAIN_AGENT_PROMPT = """
|
||||||
|
You are an expert blockchain and cryptocurrency analyst with deep knowledge of Ethereum markets and DeFi ecosystems.
|
||||||
|
You have access to real-time ETH price data and transaction information.
|
||||||
|
|
||||||
|
For each transaction, analyze:
|
||||||
|
|
||||||
|
1. MARKET CONTEXT
|
||||||
|
- Current ETH price and what this transaction means in USD terms
|
||||||
|
- How this movement compares to typical market volumes
|
||||||
|
- Whether this could impact ETH price
|
||||||
|
|
||||||
|
2. BEHAVIORAL ANALYSIS
|
||||||
|
- Whether this appears to be institutional, whale, or protocol movement
|
||||||
|
- If this fits any known wallet patterns or behaviors
|
||||||
|
- Signs of smart contract interaction or DeFi activity
|
||||||
|
|
||||||
|
3. RISK & IMPLICATIONS
|
||||||
|
- Potential market impact or price influence
|
||||||
|
- Signs of potential market manipulation or unusual activity
|
||||||
|
- Protocol or DeFi risks if applicable
|
||||||
|
|
||||||
|
4. STRATEGIC INSIGHTS
|
||||||
|
- What traders should know about this movement
|
||||||
|
- Potential chain reactions or follow-up effects
|
||||||
|
- Market opportunities or risks created
|
||||||
|
|
||||||
|
Write naturally but precisely. Focus on actionable insights and important patterns.
|
||||||
|
Your analysis helps traders and researchers understand significant market movements in real-time."""
|
||||||
|
|
||||||
|
|
||||||
|
class EthereumAnalyzer:
|
||||||
|
def __init__(self, min_value_eth: float = 100.0):
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
logger.add(
|
||||||
|
"eth_analysis.log",
|
||||||
|
rotation="500 MB",
|
||||||
|
retention="10 days",
|
||||||
|
level="INFO",
|
||||||
|
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
|
||||||
|
)
|
||||||
|
|
||||||
|
self.w3 = Web3(
|
||||||
|
Web3.HTTPProvider(
|
||||||
|
"https://mainnet.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if not self.w3.is_connected():
|
||||||
|
raise ConnectionError(
|
||||||
|
"Failed to connect to Ethereum network"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.min_value_eth = min_value_eth
|
||||||
|
self.last_processed_block = self.w3.eth.block_number
|
||||||
|
self.eth_price = self.get_eth_price()
|
||||||
|
self.last_price_update = time.time()
|
||||||
|
|
||||||
|
# Initialize AI agent
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
if not api_key:
|
||||||
|
raise ValueError(
|
||||||
|
"OpenAI API key not found in environment variables"
|
||||||
|
)
|
||||||
|
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="gpt-4",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.agent = Agent(
|
||||||
|
agent_name="Ethereum-Analysis-Agent",
|
||||||
|
system_prompt=BLOCKCHAIN_AGENT_PROMPT,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="eth_agent.json",
|
||||||
|
user_name="eth_analyzer",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
streaming_on=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.csv_filename = "ethereum_analysis.csv"
|
||||||
|
self.initialize_csv()
|
||||||
|
|
||||||
|
def get_eth_price(self) -> float:
|
||||||
|
"""Get current ETH price from CoinGecko API."""
|
||||||
|
try:
|
||||||
|
response = requests.get(
|
||||||
|
"https://api.coingecko.com/api/v3/simple/price",
|
||||||
|
params={"ids": "ethereum", "vs_currencies": "usd"},
|
||||||
|
)
|
||||||
|
return float(response.json()["ethereum"]["usd"])
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fetching ETH price: {str(e)}")
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
def update_eth_price(self):
|
||||||
|
"""Update ETH price if more than 5 minutes have passed."""
|
||||||
|
if time.time() - self.last_price_update > 300: # 5 minutes
|
||||||
|
self.eth_price = self.get_eth_price()
|
||||||
|
self.last_price_update = time.time()
|
||||||
|
logger.info(f"Updated ETH price: ${self.eth_price:,.2f}")
|
||||||
|
|
||||||
|
def initialize_csv(self):
|
||||||
|
"""Initialize CSV file with headers."""
|
||||||
|
headers = [
|
||||||
|
"timestamp",
|
||||||
|
"transaction_hash",
|
||||||
|
"from_address",
|
||||||
|
"to_address",
|
||||||
|
"value_eth",
|
||||||
|
"value_usd",
|
||||||
|
"eth_price",
|
||||||
|
"gas_used",
|
||||||
|
"gas_price_gwei",
|
||||||
|
"block_number",
|
||||||
|
"analysis",
|
||||||
|
]
|
||||||
|
|
||||||
|
if not os.path.exists(self.csv_filename):
|
||||||
|
with open(self.csv_filename, "w", newline="") as f:
|
||||||
|
writer = csv.writer(f)
|
||||||
|
writer.writerow(headers)
|
||||||
|
|
||||||
|
async def analyze_transaction(
|
||||||
|
self, tx_hash: str
|
||||||
|
) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Analyze a single transaction."""
|
||||||
|
try:
|
||||||
|
tx = self.w3.eth.get_transaction(tx_hash)
|
||||||
|
receipt = self.w3.eth.get_transaction_receipt(tx_hash)
|
||||||
|
|
||||||
|
value_eth = float(self.w3.from_wei(tx.value, "ether"))
|
||||||
|
|
||||||
|
if value_eth < self.min_value_eth:
|
||||||
|
return None
|
||||||
|
|
||||||
|
block = self.w3.eth.get_block(tx.blockNumber)
|
||||||
|
|
||||||
|
# Update ETH price if needed
|
||||||
|
self.update_eth_price()
|
||||||
|
|
||||||
|
value_usd = value_eth * self.eth_price
|
||||||
|
|
||||||
|
analysis = {
|
||||||
|
"timestamp": datetime.fromtimestamp(
|
||||||
|
block.timestamp
|
||||||
|
).isoformat(),
|
||||||
|
"transaction_hash": tx_hash.hex(),
|
||||||
|
"from_address": tx["from"],
|
||||||
|
"to_address": tx.to if tx.to else "Contract Creation",
|
||||||
|
"value_eth": value_eth,
|
||||||
|
"value_usd": value_usd,
|
||||||
|
"eth_price": self.eth_price,
|
||||||
|
"gas_used": receipt.gasUsed,
|
||||||
|
"gas_price_gwei": float(
|
||||||
|
self.w3.from_wei(tx.gasPrice, "gwei")
|
||||||
|
),
|
||||||
|
"block_number": tx.blockNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if it's a contract
|
||||||
|
if tx.to:
|
||||||
|
code = self.w3.eth.get_code(tx.to)
|
||||||
|
analysis["is_contract"] = len(code) > 0
|
||||||
|
|
||||||
|
# Get contract events
|
||||||
|
if analysis["is_contract"]:
|
||||||
|
analysis["events"] = receipt.logs
|
||||||
|
|
||||||
|
return analysis
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Error analyzing transaction {tx_hash}: {str(e)}"
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def prepare_analysis_prompt(self, tx_data: Dict[str, Any]) -> str:
|
||||||
|
"""Prepare detailed analysis prompt including price context."""
|
||||||
|
value_usd = tx_data["value_usd"]
|
||||||
|
eth_price = tx_data["eth_price"]
|
||||||
|
|
||||||
|
prompt = f"""Analyze this Ethereum transaction in current market context:
|
||||||
|
|
||||||
|
Transaction Details:
|
||||||
|
- Value: {tx_data['value_eth']:.2f} ETH (${value_usd:,.2f} at current price)
|
||||||
|
- Current ETH Price: ${eth_price:,.2f}
|
||||||
|
- From: {tx_data['from_address']}
|
||||||
|
- To: {tx_data['to_address']}
|
||||||
|
- Contract Interaction: {tx_data.get('is_contract', False)}
|
||||||
|
- Gas Used: {tx_data['gas_used']:,} units
|
||||||
|
- Gas Price: {tx_data['gas_price_gwei']:.2f} Gwei
|
||||||
|
- Block: {tx_data['block_number']}
|
||||||
|
- Timestamp: {tx_data['timestamp']}
|
||||||
|
|
||||||
|
{f"Event Count: {len(tx_data['events'])} events" if tx_data.get('events') else "No contract events"}
|
||||||
|
|
||||||
|
Consider the transaction's significance given the current ETH price of ${eth_price:,.2f} and total USD value of ${value_usd:,.2f}.
|
||||||
|
Analyze market impact, patterns, risks, and strategic implications."""
|
||||||
|
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
def save_to_csv(self, tx_data: Dict[str, Any], ai_analysis: str):
|
||||||
|
"""Save transaction data and analysis to CSV."""
|
||||||
|
row = [
|
||||||
|
tx_data["timestamp"],
|
||||||
|
tx_data["transaction_hash"],
|
||||||
|
tx_data["from_address"],
|
||||||
|
tx_data["to_address"],
|
||||||
|
tx_data["value_eth"],
|
||||||
|
tx_data["value_usd"],
|
||||||
|
tx_data["eth_price"],
|
||||||
|
tx_data["gas_used"],
|
||||||
|
tx_data["gas_price_gwei"],
|
||||||
|
tx_data["block_number"],
|
||||||
|
ai_analysis.replace("\n", " "),
|
||||||
|
]
|
||||||
|
|
||||||
|
with open(self.csv_filename, "a", newline="") as f:
|
||||||
|
writer = csv.writer(f)
|
||||||
|
writer.writerow(row)
|
||||||
|
|
||||||
|
async def monitor_transactions(self):
|
||||||
|
"""Monitor and analyze transactions one at a time."""
|
||||||
|
logger.info(
|
||||||
|
f"Starting transaction monitor (minimum value: {self.min_value_eth} ETH)"
|
||||||
|
)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
current_block = self.w3.eth.block_number
|
||||||
|
block = self.w3.eth.get_block(
|
||||||
|
current_block, full_transactions=True
|
||||||
|
)
|
||||||
|
|
||||||
|
for tx in block.transactions:
|
||||||
|
tx_analysis = await self.analyze_transaction(
|
||||||
|
tx.hash
|
||||||
|
)
|
||||||
|
|
||||||
|
if tx_analysis:
|
||||||
|
# Get AI analysis
|
||||||
|
analysis_prompt = (
|
||||||
|
self.prepare_analysis_prompt(tx_analysis)
|
||||||
|
)
|
||||||
|
ai_analysis = self.agent.run(analysis_prompt)
|
||||||
|
print(ai_analysis)
|
||||||
|
|
||||||
|
# Save to CSV
|
||||||
|
self.save_to_csv(tx_analysis, ai_analysis)
|
||||||
|
|
||||||
|
# Print analysis
|
||||||
|
print("\n" + "=" * 50)
|
||||||
|
print("New Transaction Analysis")
|
||||||
|
print(
|
||||||
|
f"Hash: {tx_analysis['transaction_hash']}"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"Value: {tx_analysis['value_eth']:.2f} ETH (${tx_analysis['value_usd']:,.2f})"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"Current ETH Price: ${self.eth_price:,.2f}"
|
||||||
|
)
|
||||||
|
print("=" * 50)
|
||||||
|
print(ai_analysis)
|
||||||
|
print("=" * 50 + "\n")
|
||||||
|
|
||||||
|
await asyncio.sleep(1) # Wait for next block
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in monitoring loop: {str(e)}")
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Entry point for the analysis system."""
|
||||||
|
analyzer = EthereumAnalyzer(min_value_eth=100.0)
|
||||||
|
await analyzer.monitor_transactions()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print("Starting Ethereum Transaction Analyzer...")
|
||||||
|
print("Saving results to ethereum_analysis.csv")
|
||||||
|
print("Press Ctrl+C to stop")
|
||||||
|
try:
|
||||||
|
asyncio.run(main())
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nStopping analyzer...")
|
@ -0,0 +1,75 @@
|
|||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
from swarms import Agent
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
import time
|
||||||
|
import psutil
|
||||||
|
|
||||||
|
from swarms.prompts.finance_agent_sys_prompt import (
|
||||||
|
FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
)
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Financial-Analysis-Agent",
|
||||||
|
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="finance_agent.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=False,
|
||||||
|
output_type="string",
|
||||||
|
streaming_on=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Function to measure time and memory usage
|
||||||
|
def measure_time_and_memory(func):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
start_time = time.time()
|
||||||
|
result = func(*args, **kwargs)
|
||||||
|
end_time = time.time()
|
||||||
|
memory_usage = psutil.Process().memory_info().rss / 1024**2
|
||||||
|
print(f"Time taken: {end_time - start_time} seconds")
|
||||||
|
print(f"Memory used: {memory_usage} MB")
|
||||||
|
return result
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
# Function to run the agent asynchronously
|
||||||
|
@measure_time_and_memory
|
||||||
|
async def run_agent_async():
|
||||||
|
await asyncio.gather(
|
||||||
|
agent.run(
|
||||||
|
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Function to run the agent on another thread
|
||||||
|
@measure_time_and_memory
|
||||||
|
def run_agent_thread():
|
||||||
|
asyncio.run(run_agent_async())
|
||||||
|
|
||||||
|
|
||||||
|
# Run the agent asynchronously and on another thread to test the speed
|
||||||
|
asyncio.run(run_agent_async())
|
||||||
|
run_agent_thread()
|
@ -0,0 +1,228 @@
|
|||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
|
||||||
|
from loguru import logger
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
from swarms import Agent, AgentRearrange
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("GROQ_API_KEY")
|
||||||
|
|
||||||
|
# Model
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_base="https://api.groq.com/openai/v1",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="llama-3.1-70b-versatile",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class LlamaIndexDB:
|
||||||
|
"""A class to manage document indexing and querying using LlamaIndex.
|
||||||
|
|
||||||
|
This class provides functionality to add documents from a directory and query the indexed documents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_dir (str): Directory containing documents to index. Defaults to "docs".
|
||||||
|
**kwargs: Additional arguments passed to SimpleDirectoryReader and VectorStoreIndex.
|
||||||
|
SimpleDirectoryReader kwargs:
|
||||||
|
- filename_as_id (bool): Use filenames as document IDs
|
||||||
|
- recursive (bool): Recursively read subdirectories
|
||||||
|
- required_exts (List[str]): Only read files with these extensions
|
||||||
|
- exclude_hidden (bool): Skip hidden files
|
||||||
|
|
||||||
|
VectorStoreIndex kwargs:
|
||||||
|
- service_context: Custom service context
|
||||||
|
- embed_model: Custom embedding model
|
||||||
|
- similarity_top_k (int): Number of similar docs to retrieve
|
||||||
|
- store_nodes_override (bool): Override node storage
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, data_dir: str = "docs", **kwargs) -> None:
|
||||||
|
"""Initialize the LlamaIndexDB with an empty index.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_dir (str): Directory containing documents to index
|
||||||
|
**kwargs: Additional arguments for SimpleDirectoryReader and VectorStoreIndex
|
||||||
|
"""
|
||||||
|
self.data_dir = data_dir
|
||||||
|
self.index: Optional[VectorStoreIndex] = None
|
||||||
|
self.reader_kwargs = {
|
||||||
|
k: v
|
||||||
|
for k, v in kwargs.items()
|
||||||
|
if k
|
||||||
|
in SimpleDirectoryReader.__init__.__code__.co_varnames
|
||||||
|
}
|
||||||
|
self.index_kwargs = {
|
||||||
|
k: v
|
||||||
|
for k, v in kwargs.items()
|
||||||
|
if k not in self.reader_kwargs
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("Initialized LlamaIndexDB")
|
||||||
|
data_path = Path(self.data_dir)
|
||||||
|
if not data_path.exists():
|
||||||
|
logger.error(f"Directory not found: {self.data_dir}")
|
||||||
|
raise FileNotFoundError(
|
||||||
|
f"Directory {self.data_dir} does not exist"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
documents = SimpleDirectoryReader(
|
||||||
|
self.data_dir, **self.reader_kwargs
|
||||||
|
).load_data()
|
||||||
|
self.index = VectorStoreIndex.from_documents(
|
||||||
|
documents, **self.index_kwargs
|
||||||
|
)
|
||||||
|
logger.success(
|
||||||
|
f"Successfully indexed documents from {self.data_dir}"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error indexing documents: {str(e)}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def query(self, query: str, **kwargs) -> str:
|
||||||
|
"""Query the indexed documents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): The query string to search for
|
||||||
|
**kwargs: Additional arguments passed to the query engine
|
||||||
|
- similarity_top_k (int): Number of similar documents to retrieve
|
||||||
|
- streaming (bool): Enable streaming response
|
||||||
|
- response_mode (str): Response synthesis mode
|
||||||
|
- max_tokens (int): Maximum tokens in response
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The response from the query engine
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If no documents have been indexed yet
|
||||||
|
"""
|
||||||
|
if self.index is None:
|
||||||
|
logger.error("No documents have been indexed yet")
|
||||||
|
raise ValueError("Must add documents before querying")
|
||||||
|
|
||||||
|
try:
|
||||||
|
query_engine = self.index.as_query_engine(**kwargs)
|
||||||
|
response = query_engine.query(query)
|
||||||
|
print(response)
|
||||||
|
logger.info(f"Successfully queried: {query}")
|
||||||
|
return str(response)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during query: {str(e)}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize specialized medical agents
|
||||||
|
medical_data_extractor = Agent(
|
||||||
|
agent_name="Medical-Data-Extractor",
|
||||||
|
system_prompt="You are a specialized medical data extraction expert, trained in processing and analyzing clinical data, lab results, medical imaging reports, and patient records. Your role is to carefully extract relevant medical information while maintaining strict HIPAA compliance and patient confidentiality. Focus on identifying key clinical indicators, test results, vital signs, medication histories, and relevant patient history. Pay special attention to temporal relationships between symptoms, treatments, and outcomes. Ensure all extracted data maintains proper medical context and terminology.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="medical_data_extractor.json",
|
||||||
|
user_name="medical_team",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
diagnostic_specialist = Agent(
|
||||||
|
agent_name="Diagnostic-Specialist",
|
||||||
|
system_prompt="You are a senior diagnostic physician with extensive experience in differential diagnosis. Your role is to analyze patient symptoms, lab results, and clinical findings to develop comprehensive diagnostic assessments. Consider all presenting symptoms, patient history, risk factors, and test results to formulate possible diagnoses. Prioritize diagnoses based on clinical probability and severity. Always consider both common and rare conditions that match the symptom pattern. Recommend additional tests or imaging when needed for diagnostic clarity. Follow evidence-based diagnostic criteria and current medical guidelines.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="diagnostic_specialist.json",
|
||||||
|
user_name="medical_team",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
treatment_planner = Agent(
|
||||||
|
agent_name="Treatment-Planner",
|
||||||
|
system_prompt="You are an experienced clinical treatment specialist focused on developing comprehensive treatment plans. Your expertise covers both acute and chronic condition management, medication selection, and therapeutic interventions. Consider patient-specific factors including age, comorbidities, allergies, and contraindications when recommending treatments. Incorporate both pharmacological and non-pharmacological interventions. Emphasize evidence-based treatment protocols while considering patient preferences and quality of life. Address potential drug interactions and side effects. Include monitoring parameters and treatment milestones.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="treatment_planner.json",
|
||||||
|
user_name="medical_team",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
specialist_consultant = Agent(
|
||||||
|
agent_name="Specialist-Consultant",
|
||||||
|
system_prompt="You are a medical specialist consultant with expertise across multiple disciplines including cardiology, neurology, endocrinology, and internal medicine. Your role is to provide specialized insight for complex cases requiring deep domain knowledge. Analyze cases from your specialist perspective, considering rare conditions and complex interactions between multiple systems. Provide detailed recommendations for specialized testing, imaging, or interventions within your domain. Highlight potential complications or considerations that may not be immediately apparent to general practitioners.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="specialist_consultant.json",
|
||||||
|
user_name="medical_team",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
patient_care_coordinator = Agent(
|
||||||
|
agent_name="Patient-Care-Coordinator",
|
||||||
|
system_prompt="You are a patient care coordinator specializing in comprehensive healthcare management. Your role is to ensure holistic patient care by coordinating between different medical specialists, considering patient needs, and managing care transitions. Focus on patient education, medication adherence, lifestyle modifications, and follow-up care planning. Consider social determinants of health, patient resources, and access to care. Develop actionable care plans that patients can realistically follow. Coordinate with other healthcare providers to ensure continuity of care and proper implementation of treatment plans.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="patient_care_coordinator.json",
|
||||||
|
user_name="medical_team",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize the SwarmRouter to coordinate the medical agents
|
||||||
|
router = AgentRearrange(
|
||||||
|
name="medical-diagnosis-treatment-swarm",
|
||||||
|
description="Collaborative medical team for comprehensive patient diagnosis and treatment planning",
|
||||||
|
max_loops=1, # Limit to one iteration through the agent flow
|
||||||
|
agents=[
|
||||||
|
medical_data_extractor, # First agent to extract medical data
|
||||||
|
diagnostic_specialist, # Second agent to analyze and diagnose
|
||||||
|
treatment_planner, # Third agent to plan treatment
|
||||||
|
specialist_consultant, # Fourth agent to provide specialist input
|
||||||
|
patient_care_coordinator, # Final agent to coordinate care plan
|
||||||
|
],
|
||||||
|
# Configure the document storage and retrieval system
|
||||||
|
memory_system=LlamaIndexDB(
|
||||||
|
data_dir="docs", # Directory containing medical documents
|
||||||
|
filename_as_id=True, # Use filenames as document identifiers
|
||||||
|
recursive=True, # Search subdirectories
|
||||||
|
# required_exts=[".txt", ".pdf", ".docx"], # Supported file types
|
||||||
|
similarity_top_k=10, # Return top 10 most relevant documents
|
||||||
|
),
|
||||||
|
# Define the sequential flow of information between agents
|
||||||
|
flow=f"{medical_data_extractor.agent_name} -> {diagnostic_specialist.agent_name} -> {treatment_planner.agent_name} -> {specialist_consultant.agent_name} -> {patient_care_coordinator.agent_name}",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Run a comprehensive medical analysis task for patient Lucas Brown
|
||||||
|
router.run(
|
||||||
|
"Analyze this Lucas Brown's medical data to provide a diagnosis and treatment plan"
|
||||||
|
)
|
@ -0,0 +1,63 @@
|
|||||||
|
import os
|
||||||
|
import google.generativeai as genai
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
|
||||||
|
class GeminiModel:
|
||||||
|
"""
|
||||||
|
Represents a GeminiModel instance for generating text based on user input.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
temperature: float,
|
||||||
|
top_p: float,
|
||||||
|
top_k: float,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initializes the GeminiModel by setting up the API key, generation configuration, and starting a chat session.
|
||||||
|
Raises a KeyError if the GEMINI_API_KEY environment variable is not found.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
api_key = os.environ["GEMINI_API_KEY"]
|
||||||
|
genai.configure(api_key=api_key)
|
||||||
|
self.generation_config = {
|
||||||
|
"temperature": 1,
|
||||||
|
"top_p": 0.95,
|
||||||
|
"top_k": 40,
|
||||||
|
"max_output_tokens": 8192,
|
||||||
|
"response_mime_type": "text/plain",
|
||||||
|
}
|
||||||
|
self.model = genai.GenerativeModel(
|
||||||
|
model_name="gemini-1.5-pro",
|
||||||
|
generation_config=self.generation_config,
|
||||||
|
)
|
||||||
|
self.chat_session = self.model.start_chat(history=[])
|
||||||
|
except KeyError as e:
|
||||||
|
logger.error(f"Environment variable not found: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def run(self, task: str) -> str:
|
||||||
|
"""
|
||||||
|
Sends a message to the chat session and returns the response text.
|
||||||
|
Raises an Exception if there's an error running the GeminiModel.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (str): The input task or message to send to the chat session.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The response text from the chat session.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
response = self.chat_session.send_message(task)
|
||||||
|
return response.text
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error running GeminiModel: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
gemini_model = GeminiModel()
|
||||||
|
output = gemini_model.run("INSERT_INPUT_HERE")
|
||||||
|
print(output)
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,420 @@
|
|||||||
|
import os
|
||||||
|
from typing import List, Dict, Any, Optional, Callable, get_type_hints
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
import json
|
||||||
|
from datetime import datetime
|
||||||
|
import inspect
|
||||||
|
import typing
|
||||||
|
from typing import Union
|
||||||
|
from swarms import Agent
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ToolDefinition:
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
parameters: Dict[str, Any]
|
||||||
|
required_params: List[str]
|
||||||
|
callable: Optional[Callable] = None
|
||||||
|
|
||||||
|
|
||||||
|
def extract_type_hints(func: Callable) -> Dict[str, Any]:
|
||||||
|
"""Extract parameter types from function type hints."""
|
||||||
|
return typing.get_type_hints(func)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_tool_info(func: Callable) -> ToolDefinition:
|
||||||
|
"""Extract tool information from a callable function."""
|
||||||
|
# Get function name
|
||||||
|
name = func.__name__
|
||||||
|
|
||||||
|
# Get docstring
|
||||||
|
description = inspect.getdoc(func) or "No description available"
|
||||||
|
|
||||||
|
# Get parameters and their types
|
||||||
|
signature = inspect.signature(func)
|
||||||
|
type_hints = extract_type_hints(func)
|
||||||
|
|
||||||
|
parameters = {}
|
||||||
|
required_params = []
|
||||||
|
|
||||||
|
for param_name, param in signature.parameters.items():
|
||||||
|
# Skip self parameter for methods
|
||||||
|
if param_name == "self":
|
||||||
|
continue
|
||||||
|
|
||||||
|
param_type = type_hints.get(param_name, Any)
|
||||||
|
|
||||||
|
# Handle optional parameters
|
||||||
|
is_optional = (
|
||||||
|
param.default != inspect.Parameter.empty
|
||||||
|
or getattr(param_type, "__origin__", None) is Union
|
||||||
|
and type(None) in param_type.__args__
|
||||||
|
)
|
||||||
|
|
||||||
|
if not is_optional:
|
||||||
|
required_params.append(param_name)
|
||||||
|
|
||||||
|
parameters[param_name] = {
|
||||||
|
"type": str(param_type),
|
||||||
|
"default": (
|
||||||
|
None
|
||||||
|
if param.default is inspect.Parameter.empty
|
||||||
|
else param.default
|
||||||
|
),
|
||||||
|
"required": not is_optional,
|
||||||
|
}
|
||||||
|
|
||||||
|
return ToolDefinition(
|
||||||
|
name=name,
|
||||||
|
description=description,
|
||||||
|
parameters=parameters,
|
||||||
|
required_params=required_params,
|
||||||
|
callable=func,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FunctionSpec:
|
||||||
|
"""Specification for a callable tool function."""
|
||||||
|
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
parameters: Dict[
|
||||||
|
str, dict
|
||||||
|
] # Contains type and description for each parameter
|
||||||
|
return_type: str
|
||||||
|
return_description: str
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ExecutionStep:
|
||||||
|
"""Represents a single step in the execution plan."""
|
||||||
|
|
||||||
|
step_id: int
|
||||||
|
function_name: str
|
||||||
|
parameters: Dict[str, Any]
|
||||||
|
expected_output: str
|
||||||
|
completed: bool = False
|
||||||
|
result: Any = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ExecutionContext:
|
||||||
|
"""Maintains state during execution."""
|
||||||
|
|
||||||
|
task: str
|
||||||
|
steps: List[ExecutionStep] = field(default_factory=list)
|
||||||
|
results: Dict[int, Any] = field(default_factory=dict)
|
||||||
|
current_step: int = 0
|
||||||
|
history: List[Dict[str, Any]] = field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
hints = get_type_hints(func)
|
||||||
|
|
||||||
|
|
||||||
|
class ToolAgent:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
functions: List[Callable],
|
||||||
|
openai_api_key: str,
|
||||||
|
model_name: str = "gpt-4",
|
||||||
|
temperature: float = 0.1,
|
||||||
|
):
|
||||||
|
self.functions = {func.__name__: func for func in functions}
|
||||||
|
self.function_specs = self._analyze_functions(functions)
|
||||||
|
|
||||||
|
self.model = OpenAIChat(
|
||||||
|
openai_api_key=openai_api_key,
|
||||||
|
model_name=model_name,
|
||||||
|
temperature=temperature,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.system_prompt = self._create_system_prompt()
|
||||||
|
self.agent = Agent(
|
||||||
|
agent_name="Tool-Agent",
|
||||||
|
system_prompt=self.system_prompt,
|
||||||
|
llm=self.model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _analyze_functions(
|
||||||
|
self, functions: List[Callable]
|
||||||
|
) -> Dict[str, FunctionSpec]:
|
||||||
|
"""Analyze functions to create detailed specifications."""
|
||||||
|
specs = {}
|
||||||
|
for func in functions:
|
||||||
|
hints = get_type_hints(func)
|
||||||
|
sig = inspect.signature(func)
|
||||||
|
doc = inspect.getdoc(func) or ""
|
||||||
|
|
||||||
|
# Parse docstring for parameter descriptions
|
||||||
|
param_descriptions = {}
|
||||||
|
current_param = None
|
||||||
|
for line in doc.split("\n"):
|
||||||
|
if ":param" in line:
|
||||||
|
param_name = (
|
||||||
|
line.split(":param")[1].split(":")[0].strip()
|
||||||
|
)
|
||||||
|
desc = line.split(":", 2)[-1].strip()
|
||||||
|
param_descriptions[param_name] = desc
|
||||||
|
elif ":return:" in line:
|
||||||
|
return_desc = line.split(":return:")[1].strip()
|
||||||
|
|
||||||
|
# Build parameter specifications
|
||||||
|
parameters = {}
|
||||||
|
for name, param in sig.parameters.items():
|
||||||
|
param_type = hints.get(name, Any)
|
||||||
|
parameters[name] = {
|
||||||
|
"type": str(param_type),
|
||||||
|
"type_class": param_type,
|
||||||
|
"description": param_descriptions.get(name, ""),
|
||||||
|
"required": param.default == param.empty,
|
||||||
|
}
|
||||||
|
|
||||||
|
specs[func.__name__] = FunctionSpec(
|
||||||
|
name=func.__name__,
|
||||||
|
description=doc.split("\n")[0],
|
||||||
|
parameters=parameters,
|
||||||
|
return_type=str(hints.get("return", Any)),
|
||||||
|
return_description=(
|
||||||
|
return_desc if "return_desc" in locals() else ""
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return specs
|
||||||
|
|
||||||
|
def _create_system_prompt(self) -> str:
|
||||||
|
"""Create system prompt with detailed function specifications."""
|
||||||
|
functions_desc = []
|
||||||
|
for spec in self.function_specs.values():
|
||||||
|
params_desc = []
|
||||||
|
for name, details in spec.parameters.items():
|
||||||
|
params_desc.append(
|
||||||
|
f" - {name}: {details['type']} - {details['description']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
functions_desc.append(
|
||||||
|
f"""
|
||||||
|
Function: {spec.name}
|
||||||
|
Description: {spec.description}
|
||||||
|
Parameters:
|
||||||
|
{chr(10).join(params_desc)}
|
||||||
|
Returns: {spec.return_type} - {spec.return_description}
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
return f"""You are an AI agent that creates and executes plans using available functions.
|
||||||
|
|
||||||
|
Available Functions:
|
||||||
|
{chr(10).join(functions_desc)}
|
||||||
|
|
||||||
|
You must respond in two formats depending on the phase:
|
||||||
|
|
||||||
|
1. Planning Phase:
|
||||||
|
{{
|
||||||
|
"phase": "planning",
|
||||||
|
"plan": {{
|
||||||
|
"description": "Overall plan description",
|
||||||
|
"steps": [
|
||||||
|
{{
|
||||||
|
"step_id": 1,
|
||||||
|
"function": "function_name",
|
||||||
|
"parameters": {{
|
||||||
|
"param1": "value1",
|
||||||
|
"param2": "value2"
|
||||||
|
}},
|
||||||
|
"purpose": "Why this step is needed"
|
||||||
|
}}
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
|
||||||
|
2. Execution Phase:
|
||||||
|
{{
|
||||||
|
"phase": "execution",
|
||||||
|
"analysis": "Analysis of current result",
|
||||||
|
"next_action": {{
|
||||||
|
"type": "continue|request_input|complete",
|
||||||
|
"reason": "Why this action was chosen",
|
||||||
|
"needed_input": {{}} # If requesting input
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
|
||||||
|
Always:
|
||||||
|
- Use exact function names
|
||||||
|
- Ensure parameter types match specifications
|
||||||
|
- Provide clear reasoning for each decision
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _execute_function(
|
||||||
|
self, spec: FunctionSpec, parameters: Dict[str, Any]
|
||||||
|
) -> Any:
|
||||||
|
"""Execute a function with type checking."""
|
||||||
|
converted_params = {}
|
||||||
|
for name, value in parameters.items():
|
||||||
|
param_spec = spec.parameters[name]
|
||||||
|
try:
|
||||||
|
# Convert value to required type
|
||||||
|
param_type = param_spec["type_class"]
|
||||||
|
if param_type in (int, float, str, bool):
|
||||||
|
converted_params[name] = param_type(value)
|
||||||
|
else:
|
||||||
|
converted_params[name] = value
|
||||||
|
except (ValueError, TypeError) as e:
|
||||||
|
raise ValueError(
|
||||||
|
f"Parameter '{name}' conversion failed: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return self.functions[spec.name](**converted_params)
|
||||||
|
|
||||||
|
def run(self, task: str) -> Dict[str, Any]:
|
||||||
|
"""Execute task with planning and step-by-step execution."""
|
||||||
|
context = ExecutionContext(task=task)
|
||||||
|
execution_log = {
|
||||||
|
"task": task,
|
||||||
|
"start_time": datetime.utcnow().isoformat(),
|
||||||
|
"steps": [],
|
||||||
|
"final_result": None,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Planning phase
|
||||||
|
plan_prompt = f"Create a plan to: {task}"
|
||||||
|
plan_response = self.agent.run(plan_prompt)
|
||||||
|
plan_data = json.loads(
|
||||||
|
plan_response.replace("System:", "").strip()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert plan to execution steps
|
||||||
|
for step in plan_data["plan"]["steps"]:
|
||||||
|
context.steps.append(
|
||||||
|
ExecutionStep(
|
||||||
|
step_id=step["step_id"],
|
||||||
|
function_name=step["function"],
|
||||||
|
parameters=step["parameters"],
|
||||||
|
expected_output=step["purpose"],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execution phase
|
||||||
|
while context.current_step < len(context.steps):
|
||||||
|
step = context.steps[context.current_step]
|
||||||
|
print(
|
||||||
|
f"\nExecuting step {step.step_id}: {step.function_name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Execute function
|
||||||
|
spec = self.function_specs[step.function_name]
|
||||||
|
result = self._execute_function(
|
||||||
|
spec, step.parameters
|
||||||
|
)
|
||||||
|
context.results[step.step_id] = result
|
||||||
|
step.completed = True
|
||||||
|
step.result = result
|
||||||
|
|
||||||
|
# Get agent's analysis
|
||||||
|
analysis_prompt = f"""
|
||||||
|
Step {step.step_id} completed:
|
||||||
|
Function: {step.function_name}
|
||||||
|
Result: {json.dumps(result)}
|
||||||
|
Remaining steps: {len(context.steps) - context.current_step - 1}
|
||||||
|
|
||||||
|
Analyze the result and decide next action.
|
||||||
|
"""
|
||||||
|
|
||||||
|
analysis_response = self.agent.run(
|
||||||
|
analysis_prompt
|
||||||
|
)
|
||||||
|
analysis_data = json.loads(
|
||||||
|
analysis_response.replace(
|
||||||
|
"System:", ""
|
||||||
|
).strip()
|
||||||
|
)
|
||||||
|
|
||||||
|
execution_log["steps"].append(
|
||||||
|
{
|
||||||
|
"step_id": step.step_id,
|
||||||
|
"function": step.function_name,
|
||||||
|
"parameters": step.parameters,
|
||||||
|
"result": result,
|
||||||
|
"analysis": analysis_data,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
analysis_data["next_action"]["type"]
|
||||||
|
== "complete"
|
||||||
|
):
|
||||||
|
if (
|
||||||
|
context.current_step
|
||||||
|
< len(context.steps) - 1
|
||||||
|
):
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
|
||||||
|
context.current_step += 1
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error in step {step.step_id}: {str(e)}")
|
||||||
|
execution_log["steps"].append(
|
||||||
|
{
|
||||||
|
"step_id": step.step_id,
|
||||||
|
"function": step.function_name,
|
||||||
|
"parameters": step.parameters,
|
||||||
|
"error": str(e),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Final analysis
|
||||||
|
final_prompt = f"""
|
||||||
|
Task completed. Results:
|
||||||
|
{json.dumps(context.results, indent=2)}
|
||||||
|
|
||||||
|
Provide final analysis and recommendations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
final_analysis = self.agent.run(final_prompt)
|
||||||
|
execution_log["final_result"] = {
|
||||||
|
"success": True,
|
||||||
|
"results": context.results,
|
||||||
|
"analysis": json.loads(
|
||||||
|
final_analysis.replace("System:", "").strip()
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
execution_log["final_result"] = {
|
||||||
|
"success": False,
|
||||||
|
"error": str(e),
|
||||||
|
}
|
||||||
|
|
||||||
|
execution_log["end_time"] = datetime.utcnow().isoformat()
|
||||||
|
return execution_log
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_investment_return(
|
||||||
|
principal: float, rate: float, years: int
|
||||||
|
) -> float:
|
||||||
|
"""Calculate investment return with compound interest.
|
||||||
|
|
||||||
|
:param principal: Initial investment amount in dollars
|
||||||
|
:param rate: Annual interest rate as decimal (e.g., 0.07 for 7%)
|
||||||
|
:param years: Number of years to invest
|
||||||
|
:return: Final investment value
|
||||||
|
"""
|
||||||
|
return principal * (1 + rate) ** years
|
||||||
|
|
||||||
|
|
||||||
|
agent = ToolAgent(
|
||||||
|
functions=[calculate_investment_return],
|
||||||
|
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
||||||
|
)
|
||||||
|
|
||||||
|
result = agent.run(
|
||||||
|
"Calculate returns for $10000 invested at 7% for 10 years"
|
||||||
|
)
|
@ -0,0 +1,113 @@
|
|||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
# Custom system prompt for VC legal document generation
|
||||||
|
VC_LEGAL_AGENT_PROMPT = """You are a specialized legal document assistant focusing on venture capital documentation.
|
||||||
|
Your role is to help draft preliminary versions of common VC legal documents while adhering to these guidelines:
|
||||||
|
|
||||||
|
1. Always include standard legal disclaimers
|
||||||
|
2. Follow standard VC document structures
|
||||||
|
3. Flag areas that need attorney review
|
||||||
|
4. Request necessary information for document completion
|
||||||
|
5. Maintain consistency across related documents
|
||||||
|
6. Output <DONE> only when document is complete and verified
|
||||||
|
|
||||||
|
Remember: All output should be marked as 'DRAFT' and require professional legal review."""
|
||||||
|
|
||||||
|
|
||||||
|
def create_vc_legal_agent():
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Configure the model with appropriate parameters for legal work
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("GROQ_API_KEY")
|
||||||
|
|
||||||
|
# Model
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_base="https://api.groq.com/openai/v1",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="llama-3.1-70b-versatile",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the persistent agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="VC-Legal-Document-Agent",
|
||||||
|
system_prompt=VC_LEGAL_AGENT_PROMPT,
|
||||||
|
llm=model,
|
||||||
|
max_loops="auto", # Allows multiple iterations until completion
|
||||||
|
stopping_token="<DONE>", # Agent will continue until this token is output
|
||||||
|
autosave=True,
|
||||||
|
dashboard=True, # Enable dashboard for monitoring
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=False, # Disable for consistency in legal documents
|
||||||
|
saved_state_path="vc_legal_agent_state.json",
|
||||||
|
user_name="legal_corp",
|
||||||
|
retry_attempts=3,
|
||||||
|
context_length=200000,
|
||||||
|
return_step_meta=True,
|
||||||
|
output_type="string",
|
||||||
|
streaming_on=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
return agent
|
||||||
|
|
||||||
|
|
||||||
|
def generate_legal_document(agent, document_type, parameters):
|
||||||
|
"""
|
||||||
|
Generate a legal document with multiple refinement iterations
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent: The initialized VC legal agent
|
||||||
|
document_type: Type of document to generate (e.g., "term_sheet", "investment_agreement")
|
||||||
|
parameters: Dict containing necessary parameters for the document
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The generated document content
|
||||||
|
"""
|
||||||
|
prompt = f"""
|
||||||
|
Generate a {document_type} with the following parameters:
|
||||||
|
{parameters}
|
||||||
|
|
||||||
|
Please follow these steps:
|
||||||
|
1. Create initial draft
|
||||||
|
2. Review for completeness
|
||||||
|
3. Add necessary legal disclaimers
|
||||||
|
4. Verify all required sections
|
||||||
|
5. Output <DONE> when complete
|
||||||
|
|
||||||
|
Include [REQUIRES LEGAL REVIEW] tags for sections needing attorney attention.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return agent.run(prompt)
|
||||||
|
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Initialize the agent
|
||||||
|
legal_agent = create_vc_legal_agent()
|
||||||
|
|
||||||
|
# Example parameters for a term sheet
|
||||||
|
parameters = {
|
||||||
|
"company_name": "TechStartup Inc.",
|
||||||
|
"investment_amount": "$5,000,000",
|
||||||
|
"valuation": "$20,000,000",
|
||||||
|
"investor_rights": [
|
||||||
|
"Board seat",
|
||||||
|
"Pro-rata rights",
|
||||||
|
"Information rights",
|
||||||
|
],
|
||||||
|
"type_of_security": "Series A Preferred Stock",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate a term sheet
|
||||||
|
document = generate_legal_document(
|
||||||
|
legal_agent, "term_sheet", parameters
|
||||||
|
)
|
||||||
|
|
||||||
|
# Save the generated document
|
||||||
|
with open("generated_term_sheet_draft.md", "w") as f:
|
||||||
|
f.write(document)
|
@ -0,0 +1,319 @@
|
|||||||
|
"""
|
||||||
|
Zoe - Real Estate Agent
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Optional, Dict, Any, List
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from datetime import datetime
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
from loguru import logger
|
||||||
|
from swarms import Agent
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
# Configure loguru logger
|
||||||
|
logger.add(
|
||||||
|
"logs/real_estate_agent_{time}.log",
|
||||||
|
rotation="500 MB",
|
||||||
|
retention="10 days",
|
||||||
|
level="INFO",
|
||||||
|
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PropertyType(str, Enum):
|
||||||
|
"""Enum for property types"""
|
||||||
|
|
||||||
|
OFFICE = "office"
|
||||||
|
RETAIL = "retail"
|
||||||
|
INDUSTRIAL = "industrial"
|
||||||
|
MIXED_USE = "mixed-use"
|
||||||
|
LAND = "land"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PropertyListing:
|
||||||
|
"""Data class for commercial property listings"""
|
||||||
|
|
||||||
|
property_id: str
|
||||||
|
address: str
|
||||||
|
city: str
|
||||||
|
state: str
|
||||||
|
zip_code: str
|
||||||
|
price: float
|
||||||
|
square_footage: float
|
||||||
|
property_type: PropertyType
|
||||||
|
zoning: str
|
||||||
|
listing_date: datetime
|
||||||
|
lat: float
|
||||||
|
lng: float
|
||||||
|
description: Optional[str] = None
|
||||||
|
features: Optional[List[str]] = None
|
||||||
|
images: Optional[List[str]] = None
|
||||||
|
|
||||||
|
|
||||||
|
class PropertyRadarAPI:
|
||||||
|
"""Client for PropertyRadar API integration"""
|
||||||
|
|
||||||
|
def __init__(self, api_key: str):
|
||||||
|
"""Initialize PropertyRadar API client
|
||||||
|
|
||||||
|
Args:
|
||||||
|
api_key (str): PropertyRadar API key
|
||||||
|
"""
|
||||||
|
self.api_key = api_key
|
||||||
|
self.base_url = "https://api.propertyradar.com/v1"
|
||||||
|
self.session = requests.Session()
|
||||||
|
self.session.headers.update(
|
||||||
|
{
|
||||||
|
"Authorization": f"Bearer {api_key}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def search_properties(
|
||||||
|
self,
|
||||||
|
max_price: float = 10_000_000,
|
||||||
|
property_types: List[PropertyType] = None,
|
||||||
|
location: Dict[str, Any] = None,
|
||||||
|
min_sqft: Optional[float] = None,
|
||||||
|
max_sqft: Optional[float] = None,
|
||||||
|
page: int = 1,
|
||||||
|
limit: int = 20,
|
||||||
|
) -> List[PropertyListing]:
|
||||||
|
"""
|
||||||
|
Search for commercial properties using PropertyRadar API
|
||||||
|
|
||||||
|
Args:
|
||||||
|
max_price (float): Maximum property price
|
||||||
|
property_types (List[PropertyType]): Types of properties to search for
|
||||||
|
location (Dict[str, Any]): Location criteria (city, county, or coordinates)
|
||||||
|
min_sqft (Optional[float]): Minimum square footage
|
||||||
|
max_sqft (Optional[float]): Maximum square footage
|
||||||
|
page (int): Page number for pagination
|
||||||
|
limit (int): Number of results per page
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[PropertyListing]: List of matching properties
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Build the query parameters
|
||||||
|
params = {
|
||||||
|
"price_max": max_price,
|
||||||
|
"property_types": (
|
||||||
|
[pt.value for pt in property_types]
|
||||||
|
if property_types
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
"page": page,
|
||||||
|
"limit": limit,
|
||||||
|
"for_sale": True,
|
||||||
|
"state": "FL", # Florida only
|
||||||
|
"commercial_property": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add location parameters
|
||||||
|
if location:
|
||||||
|
params.update(location)
|
||||||
|
|
||||||
|
# Add square footage filters
|
||||||
|
if min_sqft:
|
||||||
|
params["square_feet_min"] = min_sqft
|
||||||
|
if max_sqft:
|
||||||
|
params["square_feet_max"] = max_sqft
|
||||||
|
|
||||||
|
# Make the API request
|
||||||
|
response = self.session.get(
|
||||||
|
f"{self.base_url}/properties",
|
||||||
|
params={
|
||||||
|
k: v for k, v in params.items() if v is not None
|
||||||
|
},
|
||||||
|
)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
# Parse the response
|
||||||
|
properties_data = response.json()
|
||||||
|
|
||||||
|
# Convert to PropertyListing objects
|
||||||
|
return [
|
||||||
|
PropertyListing(
|
||||||
|
property_id=prop["id"],
|
||||||
|
address=prop["address"],
|
||||||
|
city=prop["city"],
|
||||||
|
state=prop["state"],
|
||||||
|
zip_code=prop["zip_code"],
|
||||||
|
price=float(prop["price"]),
|
||||||
|
square_footage=float(prop["square_feet"]),
|
||||||
|
property_type=PropertyType(prop["property_type"]),
|
||||||
|
zoning=prop["zoning"],
|
||||||
|
listing_date=datetime.fromisoformat(
|
||||||
|
prop["list_date"]
|
||||||
|
),
|
||||||
|
lat=float(prop["latitude"]),
|
||||||
|
lng=float(prop["longitude"]),
|
||||||
|
description=prop.get("description"),
|
||||||
|
features=prop.get("features", []),
|
||||||
|
images=prop.get("images", []),
|
||||||
|
)
|
||||||
|
for prop in properties_data["results"]
|
||||||
|
]
|
||||||
|
|
||||||
|
except requests.RequestException as e:
|
||||||
|
logger.error(f"Error fetching properties: {str(e)}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class CommercialRealEstateAgent:
|
||||||
|
"""Agent for searching and analyzing commercial real estate properties"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
openai_api_key: str,
|
||||||
|
propertyradar_api_key: str,
|
||||||
|
model_name: str = "gpt-4",
|
||||||
|
temperature: float = 0.1,
|
||||||
|
saved_state_path: Optional[str] = None,
|
||||||
|
):
|
||||||
|
"""Initialize the real estate agent
|
||||||
|
|
||||||
|
Args:
|
||||||
|
openai_api_key (str): OpenAI API key
|
||||||
|
propertyradar_api_key (str): PropertyRadar API key
|
||||||
|
model_name (str): Name of the LLM model to use
|
||||||
|
temperature (float): Temperature setting for the LLM
|
||||||
|
saved_state_path (Optional[str]): Path to save agent state
|
||||||
|
"""
|
||||||
|
self.property_api = PropertyRadarAPI(propertyradar_api_key)
|
||||||
|
|
||||||
|
# Initialize OpenAI model
|
||||||
|
self.model = OpenAIChat(
|
||||||
|
openai_api_key=openai_api_key,
|
||||||
|
model_name=model_name,
|
||||||
|
temperature=temperature,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
self.agent = Agent(
|
||||||
|
agent_name="Commercial-Real-Estate-Agent",
|
||||||
|
system_prompt=self._get_system_prompt(),
|
||||||
|
llm=self.model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
saved_state_path=saved_state_path,
|
||||||
|
context_length=200000,
|
||||||
|
streaming_on=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Commercial Real Estate Agent initialized successfully"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_system_prompt(self) -> str:
|
||||||
|
"""Get the system prompt for the agent"""
|
||||||
|
return """You are a specialized commercial real estate agent assistant focused on Central Florida properties.
|
||||||
|
Your primary responsibilities are:
|
||||||
|
1. Search for commercial properties under $10 million
|
||||||
|
2. Focus on properties zoned for commercial use
|
||||||
|
3. Provide detailed analysis of property features, location benefits, and potential ROI
|
||||||
|
4. Consider local market conditions and growth potential
|
||||||
|
5. Verify zoning compliance and restrictions
|
||||||
|
|
||||||
|
When analyzing properties, consider:
|
||||||
|
- Current market valuations
|
||||||
|
- Local business development plans
|
||||||
|
- Traffic patterns and accessibility
|
||||||
|
- Nearby amenities and businesses
|
||||||
|
- Future development potential"""
|
||||||
|
|
||||||
|
def search_properties(
|
||||||
|
self,
|
||||||
|
max_price: float = 10_000_000,
|
||||||
|
property_types: List[PropertyType] = None,
|
||||||
|
location: Dict[str, Any] = None,
|
||||||
|
min_sqft: Optional[float] = None,
|
||||||
|
max_sqft: Optional[float] = None,
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Search for properties and provide analysis
|
||||||
|
|
||||||
|
Args:
|
||||||
|
max_price (float): Maximum property price
|
||||||
|
property_types (List[PropertyType]): Types of properties to search
|
||||||
|
location (Dict[str, Any]): Location criteria
|
||||||
|
min_sqft (Optional[float]): Minimum square footage
|
||||||
|
max_sqft (Optional[float]): Maximum square footage
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict[str, Any]]: List of properties with analysis
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Search for properties
|
||||||
|
properties = self.property_api.search_properties(
|
||||||
|
max_price=max_price,
|
||||||
|
property_types=property_types,
|
||||||
|
location=location,
|
||||||
|
min_sqft=min_sqft,
|
||||||
|
max_sqft=max_sqft,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Analyze each property
|
||||||
|
analyzed_properties = []
|
||||||
|
for prop in properties:
|
||||||
|
analysis = self.agent.run(
|
||||||
|
f"Analyze this commercial property:\n"
|
||||||
|
f"Address: {prop.address}, {prop.city}, FL {prop.zip_code}\n"
|
||||||
|
f"Price: ${prop.price:,.2f}\n"
|
||||||
|
f"Square Footage: {prop.square_footage:,.0f}\n"
|
||||||
|
f"Property Type: {prop.property_type.value}\n"
|
||||||
|
f"Zoning: {prop.zoning}\n"
|
||||||
|
f"Description: {prop.description or 'Not provided'}"
|
||||||
|
)
|
||||||
|
|
||||||
|
analyzed_properties.append(
|
||||||
|
{"property": prop.__dict__, "analysis": analysis}
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Successfully analyzed {len(analyzed_properties)} properties"
|
||||||
|
)
|
||||||
|
return analyzed_properties
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Error in property search and analysis: {str(e)}"
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main function to demonstrate usage"""
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = CommercialRealEstateAgent(
|
||||||
|
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
||||||
|
propertyradar_api_key=os.getenv("PROPERTYRADAR_API_KEY"),
|
||||||
|
saved_state_path="real_estate_agent_state.json",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Example search
|
||||||
|
results = agent.search_properties(
|
||||||
|
max_price=5_000_000,
|
||||||
|
property_types=[PropertyType.RETAIL, PropertyType.OFFICE],
|
||||||
|
location={"city": "Orlando", "radius_miles": 25},
|
||||||
|
min_sqft=2000,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Save results
|
||||||
|
with open("search_results.json", "w") as f:
|
||||||
|
json.dump(results, f, default=str, indent=2)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -0,0 +1,118 @@
|
|||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms import Agent, SequentialWorkflow
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("GROQ_API_KEY")
|
||||||
|
|
||||||
|
# Model
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_base="https://api.groq.com/openai/v1",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="llama-3.1-70b-versatile",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize specialized agents
|
||||||
|
data_extractor_agent = Agent(
|
||||||
|
agent_name="Data-Extractor",
|
||||||
|
system_prompt=None,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="data_extractor_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
summarizer_agent = Agent(
|
||||||
|
agent_name="Document-Summarizer",
|
||||||
|
system_prompt=None,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="summarizer_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
financial_analyst_agent = Agent(
|
||||||
|
agent_name="Financial-Analyst",
|
||||||
|
system_prompt=None,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="financial_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
market_analyst_agent = Agent(
|
||||||
|
agent_name="Market-Analyst",
|
||||||
|
system_prompt=None,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="market_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
operational_analyst_agent = Agent(
|
||||||
|
agent_name="Operational-Analyst",
|
||||||
|
system_prompt=None,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="operational_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the SwarmRouter
|
||||||
|
router = SequentialWorkflow(
|
||||||
|
name="pe-document-analysis-swarm",
|
||||||
|
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||||
|
max_loops=1,
|
||||||
|
agents=[
|
||||||
|
data_extractor_agent,
|
||||||
|
summarizer_agent,
|
||||||
|
financial_analyst_agent,
|
||||||
|
market_analyst_agent,
|
||||||
|
operational_analyst_agent,
|
||||||
|
],
|
||||||
|
output_type="all",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Run a comprehensive private equity document analysis task
|
||||||
|
result = router.run(
|
||||||
|
"Where is the best place to find template term sheets for series A startups. Provide links and references",
|
||||||
|
img=None,
|
||||||
|
)
|
||||||
|
print(result)
|
@ -0,0 +1,143 @@
|
|||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms import Agent, SequentialWorkflow
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("GROQ_API_KEY")
|
||||||
|
|
||||||
|
# Model
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_base="https://api.groq.com/openai/v1",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="llama-3.1-70b-versatile",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize specialized agents
|
||||||
|
data_extractor_agent = Agent(
|
||||||
|
agent_name="Data-Extractor",
|
||||||
|
system_prompt="""You are a data extraction specialist. Your role is to:
|
||||||
|
1. Extract key information, data points, and metrics from documents
|
||||||
|
2. Identify and pull out important facts, figures, and statistics
|
||||||
|
3. Structure extracted data in a clear, organized format
|
||||||
|
4. Flag any inconsistencies or missing data
|
||||||
|
5. Ensure accuracy in data extraction while maintaining context""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="data_extractor_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
summarizer_agent = Agent(
|
||||||
|
agent_name="Document-Summarizer",
|
||||||
|
system_prompt="""You are a document summarization expert. Your role is to:
|
||||||
|
1. Create concise, comprehensive summaries of documents
|
||||||
|
2. Highlight key points and main takeaways
|
||||||
|
3. Maintain the essential meaning while reducing length
|
||||||
|
4. Structure summaries in a logical, readable format
|
||||||
|
5. Identify and emphasize critical insights""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="summarizer_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
financial_analyst_agent = Agent(
|
||||||
|
agent_name="Financial-Analyst",
|
||||||
|
system_prompt="""You are a financial analysis expert. Your role is to:
|
||||||
|
1. Analyze financial statements and metrics
|
||||||
|
2. Evaluate company valuations and financial projections
|
||||||
|
3. Assess financial risks and opportunities
|
||||||
|
4. Provide insights on financial performance and health
|
||||||
|
5. Make recommendations based on financial analysis""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="financial_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
market_analyst_agent = Agent(
|
||||||
|
agent_name="Market-Analyst",
|
||||||
|
system_prompt="""You are a market analysis expert. Your role is to:
|
||||||
|
1. Analyze market trends and dynamics
|
||||||
|
2. Evaluate competitive landscape and market positioning
|
||||||
|
3. Identify market opportunities and threats
|
||||||
|
4. Assess market size and growth potential
|
||||||
|
5. Provide strategic market insights and recommendations""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="market_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
operational_analyst_agent = Agent(
|
||||||
|
agent_name="Operational-Analyst",
|
||||||
|
system_prompt="""You are an operational analysis expert. Your role is to:
|
||||||
|
1. Analyze business operations and processes
|
||||||
|
2. Evaluate operational efficiency and effectiveness
|
||||||
|
3. Identify operational risks and opportunities
|
||||||
|
4. Assess scalability and growth potential
|
||||||
|
5. Provide recommendations for operational improvements""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=2,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="operational_analyst_agent.json",
|
||||||
|
user_name="pe_firm",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the SwarmRouter
|
||||||
|
router = SequentialWorkflow(
|
||||||
|
name="pe-document-analysis-swarm",
|
||||||
|
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||||
|
max_loops=1,
|
||||||
|
agents=[
|
||||||
|
data_extractor_agent,
|
||||||
|
summarizer_agent,
|
||||||
|
financial_analyst_agent,
|
||||||
|
market_analyst_agent,
|
||||||
|
operational_analyst_agent,
|
||||||
|
],
|
||||||
|
output_type="all",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Run a comprehensive private equity document analysis task
|
||||||
|
result = router.run(
|
||||||
|
"Where is the best place to find template term sheets for series A startups. Provide links and references",
|
||||||
|
no_use_clusterops=True,
|
||||||
|
)
|
||||||
|
print(result)
|
@ -0,0 +1,238 @@
|
|||||||
|
"""
|
||||||
|
Todo
|
||||||
|
|
||||||
|
- You send structured data to the swarm through the users form they make
|
||||||
|
- then connect rag for every agent using llama index to remember all the students data
|
||||||
|
- structured outputs
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms import Agent, AgentRearrange
|
||||||
|
from swarm_models import OpenAIChat, OpenAIFunctionCaller
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
|
class CollegeLog(BaseModel):
|
||||||
|
college_name: str
|
||||||
|
college_description: str
|
||||||
|
college_admission_requirements: str
|
||||||
|
|
||||||
|
|
||||||
|
class CollegesRecommendation(BaseModel):
|
||||||
|
colleges: List[CollegeLog]
|
||||||
|
reasoning: str
|
||||||
|
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the API key from environment variable
|
||||||
|
api_key = os.getenv("GROQ_API_KEY")
|
||||||
|
|
||||||
|
# Initialize the model
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_base="https://api.groq.com/openai/v1",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="llama-3.1-70b-versatile",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
FINAL_AGENT_PROMPT = """
|
||||||
|
You are a college selection final decision maker. Your role is to:
|
||||||
|
1. Synthesize all previous analyses and discussions
|
||||||
|
2. Weigh competing factors and trade-offs
|
||||||
|
3. Create a final ranked list of recommended colleges
|
||||||
|
4. Provide clear rationale for each recommendation
|
||||||
|
5. Include specific action items for each selected school
|
||||||
|
6. Outline next steps in the application process
|
||||||
|
|
||||||
|
Focus on creating actionable, well-reasoned final recommendations that
|
||||||
|
balance all relevant factors and stakeholder input.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
function_caller = OpenAIFunctionCaller(
|
||||||
|
system_prompt=FINAL_AGENT_PROMPT,
|
||||||
|
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
||||||
|
base_model=CollegesRecommendation,
|
||||||
|
parallel_tool_calls=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Student Profile Analyzer Agent
|
||||||
|
profile_analyzer_agent = Agent(
|
||||||
|
agent_name="Student-Profile-Analyzer",
|
||||||
|
system_prompt="""You are an expert student profile analyzer. Your role is to:
|
||||||
|
1. Analyze academic performance, test scores, and extracurricular activities
|
||||||
|
2. Identify student's strengths, weaknesses, and unique qualities
|
||||||
|
3. Evaluate personal statements and essays
|
||||||
|
4. Assess leadership experiences and community involvement
|
||||||
|
5. Determine student's preferences for college environment, location, and programs
|
||||||
|
6. Create a comprehensive student profile summary
|
||||||
|
|
||||||
|
Always consider both quantitative metrics (GPA, test scores) and qualitative aspects
|
||||||
|
(personal growth, challenges overcome, unique perspectives).""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="profile_analyzer_agent.json",
|
||||||
|
user_name="student",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# College Research Agent
|
||||||
|
college_research_agent = Agent(
|
||||||
|
agent_name="College-Research-Specialist",
|
||||||
|
system_prompt="""You are a college research specialist. Your role is to:
|
||||||
|
1. Maintain updated knowledge of college admission requirements
|
||||||
|
2. Research academic programs, campus culture, and student life
|
||||||
|
3. Analyze admission statistics and trends
|
||||||
|
4. Evaluate college-specific opportunities and resources
|
||||||
|
5. Consider financial aid availability and scholarship opportunities
|
||||||
|
6. Track historical admission data and acceptance rates
|
||||||
|
|
||||||
|
Focus on providing accurate, comprehensive information about each institution
|
||||||
|
while considering both academic and cultural fit factors.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="college_research_agent.json",
|
||||||
|
user_name="researcher",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# College Match Agent
|
||||||
|
college_match_agent = Agent(
|
||||||
|
agent_name="College-Match-Maker",
|
||||||
|
system_prompt="""You are a college matching specialist. Your role is to:
|
||||||
|
1. Compare student profiles with college requirements
|
||||||
|
2. Evaluate fit based on academic, social, and cultural factors
|
||||||
|
3. Consider geographic preferences and constraints
|
||||||
|
4. Assess financial fit and aid opportunities
|
||||||
|
5. Create tiered lists of reach, target, and safety schools
|
||||||
|
6. Explain the reasoning behind each match
|
||||||
|
|
||||||
|
Always provide a balanced list with realistic expectations while
|
||||||
|
considering both student preferences and admission probability.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="college_match_agent.json",
|
||||||
|
user_name="matcher",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Debate Moderator Agent
|
||||||
|
debate_moderator_agent = Agent(
|
||||||
|
agent_name="Debate-Moderator",
|
||||||
|
system_prompt="""You are a college selection debate moderator. Your role is to:
|
||||||
|
1. Facilitate discussions between different perspectives
|
||||||
|
2. Ensure all relevant factors are considered
|
||||||
|
3. Challenge assumptions and biases
|
||||||
|
4. Synthesize different viewpoints
|
||||||
|
5. Guide the group toward consensus
|
||||||
|
6. Document key points of agreement and disagreement
|
||||||
|
|
||||||
|
Maintain objectivity while ensuring all important factors are thoroughly discussed
|
||||||
|
and evaluated.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="debate_moderator_agent.json",
|
||||||
|
user_name="moderator",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Critique Agent
|
||||||
|
critique_agent = Agent(
|
||||||
|
agent_name="College-Selection-Critic",
|
||||||
|
system_prompt="""You are a college selection critic. Your role is to:
|
||||||
|
1. Evaluate the strength of college matches
|
||||||
|
2. Identify potential overlooked factors
|
||||||
|
3. Challenge assumptions in the selection process
|
||||||
|
4. Assess risks and potential drawbacks
|
||||||
|
5. Provide constructive feedback on selections
|
||||||
|
6. Suggest alternative options when appropriate
|
||||||
|
|
||||||
|
Focus on constructive criticism that helps improve the final college list
|
||||||
|
while maintaining realistic expectations.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="critique_agent.json",
|
||||||
|
user_name="critic",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Final Decision Agent
|
||||||
|
final_decision_agent = Agent(
|
||||||
|
agent_name="Final-Decision-Maker",
|
||||||
|
system_prompt="""
|
||||||
|
You are a college selection final decision maker. Your role is to:
|
||||||
|
1. Synthesize all previous analyses and discussions
|
||||||
|
2. Weigh competing factors and trade-offs
|
||||||
|
3. Create a final ranked list of recommended colleges
|
||||||
|
4. Provide clear rationale for each recommendation
|
||||||
|
5. Include specific action items for each selected school
|
||||||
|
6. Outline next steps in the application process
|
||||||
|
|
||||||
|
Focus on creating actionable, well-reasoned final recommendations that
|
||||||
|
balance all relevant factors and stakeholder input.
|
||||||
|
""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="final_decision_agent.json",
|
||||||
|
user_name="decision_maker",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the Sequential Workflow
|
||||||
|
college_selection_workflow = AgentRearrange(
|
||||||
|
name="college-selection-swarm",
|
||||||
|
description="Comprehensive college selection and analysis system",
|
||||||
|
max_loops=1,
|
||||||
|
agents=[
|
||||||
|
profile_analyzer_agent,
|
||||||
|
college_research_agent,
|
||||||
|
college_match_agent,
|
||||||
|
debate_moderator_agent,
|
||||||
|
critique_agent,
|
||||||
|
final_decision_agent,
|
||||||
|
],
|
||||||
|
output_type="all",
|
||||||
|
flow=f"{profile_analyzer_agent.name} -> {college_research_agent.name} -> {college_match_agent.name} -> {debate_moderator_agent.name} -> {critique_agent.name} -> {final_decision_agent.name}",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Example student profile input
|
||||||
|
student_profile = """
|
||||||
|
Student Profile:
|
||||||
|
- GPA: 3.8
|
||||||
|
- SAT: 1450
|
||||||
|
- Interests: Computer Science, Robotics
|
||||||
|
- Location Preference: East Coast
|
||||||
|
- Extracurriculars: Robotics Club President, Math Team
|
||||||
|
- Budget: Need financial aid
|
||||||
|
- Preferred Environment: Medium-sized urban campus
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Run the comprehensive college selection analysis
|
||||||
|
result = college_selection_workflow.run(
|
||||||
|
student_profile,
|
||||||
|
no_use_clusterops=True,
|
||||||
|
)
|
||||||
|
print(result)
|
@ -0,0 +1,64 @@
|
|||||||
|
"""
|
||||||
|
Todo
|
||||||
|
|
||||||
|
- You send structured data to the swarm through the users form they make
|
||||||
|
- then connect rag for every agent using llama index to remember all the students data
|
||||||
|
- structured outputs
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarm_models import OpenAIChat, OpenAIFunctionCaller
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
|
class CollegeLog(BaseModel):
|
||||||
|
college_name: str
|
||||||
|
college_description: str
|
||||||
|
college_admission_requirements: str
|
||||||
|
|
||||||
|
|
||||||
|
class CollegesRecommendation(BaseModel):
|
||||||
|
colleges: List[CollegeLog]
|
||||||
|
reasoning: str
|
||||||
|
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the API key from environment variable
|
||||||
|
api_key = os.getenv("GROQ_API_KEY")
|
||||||
|
|
||||||
|
# Initialize the model
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_base="https://api.groq.com/openai/v1",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="llama-3.1-70b-versatile",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
function_caller = OpenAIFunctionCaller(
|
||||||
|
system_prompt="""You are a college selection final decision maker. Your role is to:
|
||||||
|
- Balance all relevant factors and stakeholder input.
|
||||||
|
- Only return the output in the schema format.
|
||||||
|
""",
|
||||||
|
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
||||||
|
base_model=CollegesRecommendation,
|
||||||
|
# parallel_tool_calls=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
print(
|
||||||
|
function_caller.run(
|
||||||
|
"""
|
||||||
|
Student Profile: Kye Gomez
|
||||||
|
- GPA: 3.8
|
||||||
|
- SAT: 1450
|
||||||
|
- Interests: Computer Science, Robotics
|
||||||
|
- Location Preference: East Coast
|
||||||
|
- Extracurriculars: Robotics Club President, Math Team
|
||||||
|
- Budget: Need financial aid
|
||||||
|
- Preferred Environment: Medium-sized urban campus
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
)
|
@ -0,0 +1,116 @@
|
|||||||
|
from typing import Optional
|
||||||
|
from pathlib import Path
|
||||||
|
from loguru import logger
|
||||||
|
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
|
||||||
|
|
||||||
|
|
||||||
|
class LlamaIndexDB:
|
||||||
|
"""A class to manage document indexing and querying using LlamaIndex.
|
||||||
|
|
||||||
|
This class provides functionality to add documents from a directory and query the indexed documents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_dir (str): Directory containing documents to index. Defaults to "docs".
|
||||||
|
**kwargs: Additional arguments passed to SimpleDirectoryReader and VectorStoreIndex.
|
||||||
|
SimpleDirectoryReader kwargs:
|
||||||
|
- filename_as_id (bool): Use filenames as document IDs
|
||||||
|
- recursive (bool): Recursively read subdirectories
|
||||||
|
- required_exts (List[str]): Only read files with these extensions
|
||||||
|
- exclude_hidden (bool): Skip hidden files
|
||||||
|
|
||||||
|
VectorStoreIndex kwargs:
|
||||||
|
- service_context: Custom service context
|
||||||
|
- embed_model: Custom embedding model
|
||||||
|
- similarity_top_k (int): Number of similar docs to retrieve
|
||||||
|
- store_nodes_override (bool): Override node storage
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, data_dir: str = "docs", **kwargs) -> None:
|
||||||
|
"""Initialize the LlamaIndexDB with an empty index.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_dir (str): Directory containing documents to index
|
||||||
|
**kwargs: Additional arguments for SimpleDirectoryReader and VectorStoreIndex
|
||||||
|
"""
|
||||||
|
self.data_dir = data_dir
|
||||||
|
self.index: Optional[VectorStoreIndex] = None
|
||||||
|
self.reader_kwargs = {
|
||||||
|
k: v
|
||||||
|
for k, v in kwargs.items()
|
||||||
|
if k
|
||||||
|
in SimpleDirectoryReader.__init__.__code__.co_varnames
|
||||||
|
}
|
||||||
|
self.index_kwargs = {
|
||||||
|
k: v
|
||||||
|
for k, v in kwargs.items()
|
||||||
|
if k not in self.reader_kwargs
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.info("Initialized LlamaIndexDB")
|
||||||
|
data_path = Path(self.data_dir)
|
||||||
|
if not data_path.exists():
|
||||||
|
logger.error(f"Directory not found: {self.data_dir}")
|
||||||
|
raise FileNotFoundError(
|
||||||
|
f"Directory {self.data_dir} does not exist"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
documents = SimpleDirectoryReader(
|
||||||
|
self.data_dir, **self.reader_kwargs
|
||||||
|
).load_data()
|
||||||
|
self.index = VectorStoreIndex.from_documents(
|
||||||
|
documents, **self.index_kwargs
|
||||||
|
)
|
||||||
|
logger.success(
|
||||||
|
f"Successfully indexed documents from {self.data_dir}"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error indexing documents: {str(e)}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def query(self, query: str, **kwargs) -> str:
|
||||||
|
"""Query the indexed documents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): The query string to search for
|
||||||
|
**kwargs: Additional arguments passed to the query engine
|
||||||
|
- similarity_top_k (int): Number of similar documents to retrieve
|
||||||
|
- streaming (bool): Enable streaming response
|
||||||
|
- response_mode (str): Response synthesis mode
|
||||||
|
- max_tokens (int): Maximum tokens in response
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The response from the query engine
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If no documents have been indexed yet
|
||||||
|
"""
|
||||||
|
if self.index is None:
|
||||||
|
logger.error("No documents have been indexed yet")
|
||||||
|
raise ValueError("Must add documents before querying")
|
||||||
|
|
||||||
|
try:
|
||||||
|
query_engine = self.index.as_query_engine(**kwargs)
|
||||||
|
response = query_engine.query(query)
|
||||||
|
print(response)
|
||||||
|
logger.info(f"Successfully queried: {query}")
|
||||||
|
return str(response)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during query: {str(e)}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# # Example usage
|
||||||
|
# llama_index_db = LlamaIndexDB(
|
||||||
|
# data_dir="docs",
|
||||||
|
# filename_as_id=True,
|
||||||
|
# recursive=True,
|
||||||
|
# required_exts=[".txt", ".pdf", ".docx"],
|
||||||
|
# similarity_top_k=3
|
||||||
|
# )
|
||||||
|
# response = llama_index_db.query(
|
||||||
|
# "What is the medical history of patient 1?",
|
||||||
|
# streaming=True,
|
||||||
|
# response_mode="compact"
|
||||||
|
# )
|
||||||
|
# print(response)
|
Binary file not shown.
@ -0,0 +1,237 @@
|
|||||||
|
"""
|
||||||
|
Todo
|
||||||
|
|
||||||
|
- You send structured data to the swarm through the users form they make
|
||||||
|
- then connect rag for every agent using llama index to remember all the students data
|
||||||
|
- structured outputs
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms import Agent, SequentialWorkflow
|
||||||
|
from swarm_models import OpenAIChat, OpenAIFunctionCaller
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
|
class CollegeLog(BaseModel):
|
||||||
|
college_name: str
|
||||||
|
college_description: str
|
||||||
|
college_admission_requirements: str
|
||||||
|
|
||||||
|
|
||||||
|
class CollegesRecommendation(BaseModel):
|
||||||
|
colleges: List[CollegeLog]
|
||||||
|
reasoning: str
|
||||||
|
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the API key from environment variable
|
||||||
|
api_key = os.getenv("GROQ_API_KEY")
|
||||||
|
|
||||||
|
# Initialize the model
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_base="https://api.groq.com/openai/v1",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="llama-3.1-70b-versatile",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
FINAL_AGENT_PROMPT = """
|
||||||
|
You are a college selection final decision maker. Your role is to:
|
||||||
|
1. Synthesize all previous analyses and discussions
|
||||||
|
2. Weigh competing factors and trade-offs
|
||||||
|
3. Create a final ranked list of recommended colleges
|
||||||
|
4. Provide clear rationale for each recommendation
|
||||||
|
5. Include specific action items for each selected school
|
||||||
|
6. Outline next steps in the application process
|
||||||
|
|
||||||
|
Focus on creating actionable, well-reasoned final recommendations that
|
||||||
|
balance all relevant factors and stakeholder input.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
function_caller = OpenAIFunctionCaller(
|
||||||
|
system_prompt=FINAL_AGENT_PROMPT,
|
||||||
|
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
||||||
|
base_model=CollegesRecommendation,
|
||||||
|
parallel_tool_calls=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Student Profile Analyzer Agent
|
||||||
|
profile_analyzer_agent = Agent(
|
||||||
|
agent_name="Student-Profile-Analyzer",
|
||||||
|
system_prompt="""You are an expert student profile analyzer. Your role is to:
|
||||||
|
1. Analyze academic performance, test scores, and extracurricular activities
|
||||||
|
2. Identify student's strengths, weaknesses, and unique qualities
|
||||||
|
3. Evaluate personal statements and essays
|
||||||
|
4. Assess leadership experiences and community involvement
|
||||||
|
5. Determine student's preferences for college environment, location, and programs
|
||||||
|
6. Create a comprehensive student profile summary
|
||||||
|
|
||||||
|
Always consider both quantitative metrics (GPA, test scores) and qualitative aspects
|
||||||
|
(personal growth, challenges overcome, unique perspectives).""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="profile_analyzer_agent.json",
|
||||||
|
user_name="student",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# College Research Agent
|
||||||
|
college_research_agent = Agent(
|
||||||
|
agent_name="College-Research-Specialist",
|
||||||
|
system_prompt="""You are a college research specialist. Your role is to:
|
||||||
|
1. Maintain updated knowledge of college admission requirements
|
||||||
|
2. Research academic programs, campus culture, and student life
|
||||||
|
3. Analyze admission statistics and trends
|
||||||
|
4. Evaluate college-specific opportunities and resources
|
||||||
|
5. Consider financial aid availability and scholarship opportunities
|
||||||
|
6. Track historical admission data and acceptance rates
|
||||||
|
|
||||||
|
Focus on providing accurate, comprehensive information about each institution
|
||||||
|
while considering both academic and cultural fit factors.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="college_research_agent.json",
|
||||||
|
user_name="researcher",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# College Match Agent
|
||||||
|
college_match_agent = Agent(
|
||||||
|
agent_name="College-Match-Maker",
|
||||||
|
system_prompt="""You are a college matching specialist. Your role is to:
|
||||||
|
1. Compare student profiles with college requirements
|
||||||
|
2. Evaluate fit based on academic, social, and cultural factors
|
||||||
|
3. Consider geographic preferences and constraints
|
||||||
|
4. Assess financial fit and aid opportunities
|
||||||
|
5. Create tiered lists of reach, target, and safety schools
|
||||||
|
6. Explain the reasoning behind each match
|
||||||
|
|
||||||
|
Always provide a balanced list with realistic expectations while
|
||||||
|
considering both student preferences and admission probability.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="college_match_agent.json",
|
||||||
|
user_name="matcher",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Debate Moderator Agent
|
||||||
|
debate_moderator_agent = Agent(
|
||||||
|
agent_name="Debate-Moderator",
|
||||||
|
system_prompt="""You are a college selection debate moderator. Your role is to:
|
||||||
|
1. Facilitate discussions between different perspectives
|
||||||
|
2. Ensure all relevant factors are considered
|
||||||
|
3. Challenge assumptions and biases
|
||||||
|
4. Synthesize different viewpoints
|
||||||
|
5. Guide the group toward consensus
|
||||||
|
6. Document key points of agreement and disagreement
|
||||||
|
|
||||||
|
Maintain objectivity while ensuring all important factors are thoroughly discussed
|
||||||
|
and evaluated.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="debate_moderator_agent.json",
|
||||||
|
user_name="moderator",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Critique Agent
|
||||||
|
critique_agent = Agent(
|
||||||
|
agent_name="College-Selection-Critic",
|
||||||
|
system_prompt="""You are a college selection critic. Your role is to:
|
||||||
|
1. Evaluate the strength of college matches
|
||||||
|
2. Identify potential overlooked factors
|
||||||
|
3. Challenge assumptions in the selection process
|
||||||
|
4. Assess risks and potential drawbacks
|
||||||
|
5. Provide constructive feedback on selections
|
||||||
|
6. Suggest alternative options when appropriate
|
||||||
|
|
||||||
|
Focus on constructive criticism that helps improve the final college list
|
||||||
|
while maintaining realistic expectations.""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="critique_agent.json",
|
||||||
|
user_name="critic",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Final Decision Agent
|
||||||
|
final_decision_agent = Agent(
|
||||||
|
agent_name="Final-Decision-Maker",
|
||||||
|
system_prompt="""
|
||||||
|
You are a college selection final decision maker. Your role is to:
|
||||||
|
1. Synthesize all previous analyses and discussions
|
||||||
|
2. Weigh competing factors and trade-offs
|
||||||
|
3. Create a final ranked list of recommended colleges
|
||||||
|
4. Provide clear rationale for each recommendation
|
||||||
|
5. Include specific action items for each selected school
|
||||||
|
6. Outline next steps in the application process
|
||||||
|
|
||||||
|
Focus on creating actionable, well-reasoned final recommendations that
|
||||||
|
balance all relevant factors and stakeholder input.
|
||||||
|
""",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="final_decision_agent.json",
|
||||||
|
user_name="decision_maker",
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the Sequential Workflow
|
||||||
|
college_selection_workflow = SequentialWorkflow(
|
||||||
|
name="college-selection-swarm",
|
||||||
|
description="Comprehensive college selection and analysis system",
|
||||||
|
max_loops=1,
|
||||||
|
agents=[
|
||||||
|
profile_analyzer_agent,
|
||||||
|
college_research_agent,
|
||||||
|
college_match_agent,
|
||||||
|
debate_moderator_agent,
|
||||||
|
critique_agent,
|
||||||
|
final_decision_agent,
|
||||||
|
],
|
||||||
|
output_type="all",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Example student profile input
|
||||||
|
student_profile = """
|
||||||
|
Student Profile:
|
||||||
|
- GPA: 3.8
|
||||||
|
- SAT: 1450
|
||||||
|
- Interests: Computer Science, Robotics
|
||||||
|
- Location Preference: East Coast
|
||||||
|
- Extracurriculars: Robotics Club President, Math Team
|
||||||
|
- Budget: Need financial aid
|
||||||
|
- Preferred Environment: Medium-sized urban campus
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Run the comprehensive college selection analysis
|
||||||
|
result = college_selection_workflow.run(
|
||||||
|
student_profile,
|
||||||
|
no_use_clusterops=True,
|
||||||
|
)
|
||||||
|
print(result)
|
@ -0,0 +1,52 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Set up logging
|
||||||
|
LOG_FILE="docs_compilation.log"
|
||||||
|
OUTPUT_FILE="combined_docs.txt"
|
||||||
|
|
||||||
|
# Initialize log file
|
||||||
|
echo "$(date): Starting documentation compilation" > "$LOG_FILE"
|
||||||
|
|
||||||
|
# Create/clear output file
|
||||||
|
> "$OUTPUT_FILE"
|
||||||
|
|
||||||
|
# Function to determine file type and handle accordingly
|
||||||
|
process_file() {
|
||||||
|
local file="$1"
|
||||||
|
|
||||||
|
# Get file extension
|
||||||
|
extension="${file##*.}"
|
||||||
|
|
||||||
|
echo "$(date): Processing $file" >> "$LOG_FILE"
|
||||||
|
|
||||||
|
case "$extension" in
|
||||||
|
md|markdown)
|
||||||
|
echo "# $(basename "$file")" >> "$OUTPUT_FILE"
|
||||||
|
cat "$file" >> "$OUTPUT_FILE"
|
||||||
|
echo -e "\n\n" >> "$OUTPUT_FILE"
|
||||||
|
;;
|
||||||
|
txt)
|
||||||
|
echo "# $(basename "$file")" >> "$OUTPUT_FILE"
|
||||||
|
cat "$file" >> "$OUTPUT_FILE"
|
||||||
|
echo -e "\n\n" >> "$OUTPUT_FILE"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "$(date): Skipping $file - unsupported format" >> "$LOG_FILE"
|
||||||
|
return
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "$(date): Successfully processed $file" >> "$LOG_FILE"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Find and process all documentation files
|
||||||
|
find ../docs -type f \( -name "*.md" -o -name "*.txt" -o -name "*.markdown" \) | while read -r file; do
|
||||||
|
process_file "$file"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Log completion
|
||||||
|
echo "$(date): Documentation compilation complete" >> "$LOG_FILE"
|
||||||
|
echo "$(date): Output saved to $OUTPUT_FILE" >> "$LOG_FILE"
|
||||||
|
|
||||||
|
# Print summary
|
||||||
|
echo "Documentation compilation complete. Check $LOG_FILE for details."
|
@ -0,0 +1,7 @@
|
|||||||
|
from swarms import Agent
|
||||||
|
|
||||||
|
Agent(
|
||||||
|
agent_name="Stock-Analysis-Agent",
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
max_loops=1,
|
||||||
|
).run("What are 5 hft algorithms")
|
@ -0,0 +1,253 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.agents.create_agents_from_yaml import (
|
||||||
|
create_agents_from_yaml,
|
||||||
|
)
|
||||||
|
from swarms.utils.formatter import formatter
|
||||||
|
from swarms.utils.litellm import LiteLLM
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_yaml_for_parsing(raw_yaml: str) -> str:
|
||||||
|
"""
|
||||||
|
Prepares raw YAML content by fixing spacing and formatting issues.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
raw_yaml (str): The raw YAML content extracted from Markdown.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The cleaned YAML content ready for parsing.
|
||||||
|
"""
|
||||||
|
# Fix sequence items that are improperly placed on the same line as their key
|
||||||
|
fixed_yaml = re.sub(
|
||||||
|
r"(\b\w+\b):\s*-\s*", r"\1:\n - ", raw_yaml
|
||||||
|
) # Fix "key: - value" to "key:\n - value"
|
||||||
|
|
||||||
|
# Ensure proper spacing after colons
|
||||||
|
fixed_yaml = re.sub(
|
||||||
|
r"(\S):(\S)", r"\1: \2", fixed_yaml
|
||||||
|
) # Ensure space after colons
|
||||||
|
|
||||||
|
# Remove trailing spaces before newlines
|
||||||
|
fixed_yaml = re.sub(r"\s+\n", "\n", fixed_yaml)
|
||||||
|
|
||||||
|
# Replace non-breaking spaces (if any) with regular spaces
|
||||||
|
fixed_yaml = fixed_yaml.replace("\xa0", " ")
|
||||||
|
|
||||||
|
return fixed_yaml.strip()
|
||||||
|
|
||||||
|
|
||||||
|
def parse_yaml_from_swarm_markdown(markdown_text: str) -> dict:
|
||||||
|
"""
|
||||||
|
Extracts and prepares YAML content from a Markdown-style 'Auto-Swarm-Builder' block and parses it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
markdown_text (str): The Markdown text containing the YAML inside 'Auto-Swarm-Builder' block.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: A parsed Python dictionary of the YAML content.
|
||||||
|
"""
|
||||||
|
# Match the 'Auto-Swarm-Builder' block with YAML inside triple backticks
|
||||||
|
pattern = r"```yaml\s*\n(.*?)```"
|
||||||
|
match = re.search(pattern, markdown_text, re.DOTALL)
|
||||||
|
|
||||||
|
if not match:
|
||||||
|
raise ValueError(
|
||||||
|
"No YAML content found in the 'Auto-Swarm-Builder' block."
|
||||||
|
)
|
||||||
|
|
||||||
|
raw_yaml = match.group(1).strip()
|
||||||
|
|
||||||
|
# Preprocess and normalize the YAML content
|
||||||
|
normalized_yaml = prepare_yaml_for_parsing(raw_yaml)
|
||||||
|
|
||||||
|
return normalized_yaml
|
||||||
|
|
||||||
|
|
||||||
|
AUTO_GEN_PROMPT = """
|
||||||
|
You are a specialized agent responsible for creating YAML configuration files for multi-agent swarms. Your role is to generate well-structured YAML that defines both individual agents and swarm architectures based on user requirements.
|
||||||
|
Output only the yaml nothing else. You will be penalized for making mistakes
|
||||||
|
|
||||||
|
GUIDELINES:
|
||||||
|
1. Each YAML file must contain an `agents` section with at least one agent configuration
|
||||||
|
2. Each agent configuration requires the following mandatory fields:
|
||||||
|
- agent_name (string)
|
||||||
|
- system_prompt (string)
|
||||||
|
|
||||||
|
3. Optional agent fields include:
|
||||||
|
- max_loops (integer)
|
||||||
|
- autosave (boolean)
|
||||||
|
- dashboard (boolean)
|
||||||
|
- verbose (boolean)
|
||||||
|
- dynamic_temperature_enabled (boolean)
|
||||||
|
- saved_state_path (string)
|
||||||
|
- user_name (string)
|
||||||
|
- retry_attempts (integer)
|
||||||
|
- context_length (integer)
|
||||||
|
- return_step_meta (boolean)
|
||||||
|
- output_type (string)
|
||||||
|
- task (string)
|
||||||
|
|
||||||
|
4. When a swarm is needed, include a `swarm_architecture` section with:
|
||||||
|
Mandatory fields:
|
||||||
|
- name (string)
|
||||||
|
- swarm_type (string: "ConcurrentWorkflow" or "SequentialWorkflow") [AgentRearrange, MixtureOfAgents, SpreadSheetSwarm, SequentialWorkflow, ConcurrentWorkflow]
|
||||||
|
|
||||||
|
Optional fields:
|
||||||
|
- description (string)
|
||||||
|
- max_loops (integer)
|
||||||
|
- task (string)
|
||||||
|
|
||||||
|
TEMPLATE STRUCTURE:
|
||||||
|
```yaml
|
||||||
|
agents:
|
||||||
|
- agent_name: "Agent-1-Name"
|
||||||
|
system_prompt: "Detailed system prompt here"
|
||||||
|
max_loops: 1
|
||||||
|
# [additional optional fields]
|
||||||
|
|
||||||
|
- agent_name: "Agent-2-Name"
|
||||||
|
system_prompt: "Detailed system prompt here"
|
||||||
|
# [additional optional fields]
|
||||||
|
|
||||||
|
swarm_architecture:
|
||||||
|
name: "Swarm-Name"
|
||||||
|
description: "Swarm purpose and goals"
|
||||||
|
swarm_type: "ConcurrentWorkflow"
|
||||||
|
max_loops: 5
|
||||||
|
task: "Main swarm task description"
|
||||||
|
```
|
||||||
|
|
||||||
|
VALIDATION RULES:
|
||||||
|
1. All agent names must be unique
|
||||||
|
2. System prompts must be clear and specific to the agent's role
|
||||||
|
3. Integer values must be positive
|
||||||
|
4. Boolean values must be true or false (lowercase)
|
||||||
|
5. File paths should use forward slashes
|
||||||
|
6. Tasks should be specific and aligned with the agent/swarm purpose
|
||||||
|
|
||||||
|
When generating a YAML configuration:
|
||||||
|
1. Ask for specific requirements about the agents and swarm needed
|
||||||
|
2. Determine if a swarm architecture is necessary based on the task complexity
|
||||||
|
3. Generate appropriate system prompts for each agent based on their roles
|
||||||
|
4. Include relevant optional fields based on the use case
|
||||||
|
5. Validate the configuration against all rules before returning
|
||||||
|
|
||||||
|
Example valid YAML configurations are provided below. Use these as references for structure and formatting:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
|
||||||
|
|
||||||
|
agents:
|
||||||
|
- agent_name: "Data-Analysis-Agent"
|
||||||
|
system_prompt: "You are a specialized data analysis agent focused on processing and interpreting financial data. Provide clear, actionable insights based on the data provided."
|
||||||
|
max_loops: 3
|
||||||
|
autosave: true
|
||||||
|
verbose: true
|
||||||
|
context_length: 100000
|
||||||
|
output_type: "json"
|
||||||
|
task: "Analyze quarterly financial reports and identify trends"
|
||||||
|
|
||||||
|
# Multi-Agent Swarm Example
|
||||||
|
agents:
|
||||||
|
- agent_name: "Research-Agent"
|
||||||
|
system_prompt: "You are a research agent specialized in gathering and summarizing scientific publications. Focus on peer-reviewed sources and provide comprehensive summaries."
|
||||||
|
max_loops: 2
|
||||||
|
context_length: 150000
|
||||||
|
output_type: "str"
|
||||||
|
|
||||||
|
- agent_name: "Analysis-Agent"
|
||||||
|
system_prompt: "You are an analysis agent that processes research summaries and identifies key patterns and insights. Provide detailed analytical reports."
|
||||||
|
max_loops: 3
|
||||||
|
context_length: 200000
|
||||||
|
output_type: "json"
|
||||||
|
|
||||||
|
swarm_architecture:
|
||||||
|
name: "Research-Analysis-Swarm"
|
||||||
|
description: "A swarm for comprehensive research analysis and insight generation"
|
||||||
|
swarm_type: "SequentialWorkflow"
|
||||||
|
max_loops: 5
|
||||||
|
task: "Research and analyze recent developments in quantum computing"
|
||||||
|
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def generate_swarm_config(
|
||||||
|
task: str,
|
||||||
|
file_name: str = "swarm_config_output.yaml",
|
||||||
|
model_name: str = "gpt-4o",
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Generates a swarm configuration based on the provided task and model name.
|
||||||
|
|
||||||
|
This function attempts to generate a swarm configuration by running an agent with the specified task and model name.
|
||||||
|
It then parses the output into YAML format and creates agents based on the parsed YAML content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (str): The task to be performed by the swarm.
|
||||||
|
file_name (str, optional): The file name for the output YAML configuration. Defaults to "swarm_config_output.yaml".
|
||||||
|
model_name (str, optional): The name of the model to use for the agent. Defaults to "gpt-4o".
|
||||||
|
*args: Additional positional arguments to be passed to the agent's run method.
|
||||||
|
**kwargs: Additional keyword arguments to be passed to the agent's run method.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Any: The output of the swarm configuration generation process. This can be a SwarmRouter instance or an error message.
|
||||||
|
"""
|
||||||
|
formatter.print_panel(
|
||||||
|
"Auto Generating Swarm...", "Auto Swarm Builder"
|
||||||
|
)
|
||||||
|
|
||||||
|
@retry(
|
||||||
|
stop=stop_after_attempt(3),
|
||||||
|
wait=wait_exponential(min=4, max=10),
|
||||||
|
)
|
||||||
|
def attempt_generate_swarm_config():
|
||||||
|
try:
|
||||||
|
model = LiteLLM(model_name=model_name)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Auto-Swarm-Builder",
|
||||||
|
system_prompt=AUTO_GEN_PROMPT,
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
saved_state_path="swarm_builder.json",
|
||||||
|
user_name="swarms_corp",
|
||||||
|
output_type="str",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Generate output from the agent
|
||||||
|
raw_output = agent.run(task, *args, **kwargs)
|
||||||
|
yaml_content = parse_yaml_from_swarm_markdown(raw_output)
|
||||||
|
print(yaml_content)
|
||||||
|
|
||||||
|
# Create agents from the YAML file
|
||||||
|
output = create_agents_from_yaml(
|
||||||
|
yaml_string=yaml_content,
|
||||||
|
return_type="run_swarm",
|
||||||
|
)
|
||||||
|
|
||||||
|
formatter.print_panel(
|
||||||
|
"Swarm configuration generated successfully.",
|
||||||
|
"Success",
|
||||||
|
)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
formatter.print_panel(
|
||||||
|
f"Error generating swarm configuration: {str(e)}",
|
||||||
|
"Error",
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
return attempt_generate_swarm_config()
|
@ -1,9 +1,5 @@
|
|||||||
from swarms.artifacts.base_artifact import BaseArtifact
|
|
||||||
from swarms.artifacts.text_artifact import TextArtifact
|
|
||||||
from swarms.artifacts.main_artifact import Artifact
|
from swarms.artifacts.main_artifact import Artifact
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"BaseArtifact",
|
|
||||||
"TextArtifact",
|
|
||||||
"Artifact",
|
"Artifact",
|
||||||
]
|
]
|
||||||
|
@ -1,77 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
import uuid
|
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class BaseArtifact(ABC):
|
|
||||||
"""
|
|
||||||
Base class for artifacts.
|
|
||||||
"""
|
|
||||||
|
|
||||||
id: str
|
|
||||||
name: str
|
|
||||||
value: Any
|
|
||||||
|
|
||||||
def __post_init__(self):
|
|
||||||
if self.id is None:
|
|
||||||
self.id = uuid.uuid4().hex
|
|
||||||
if self.name is None:
|
|
||||||
self.name = self.id
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def value_to_bytes(cls, value: Any) -> bytes:
|
|
||||||
"""
|
|
||||||
Convert the value to bytes.
|
|
||||||
"""
|
|
||||||
if isinstance(value, bytes):
|
|
||||||
return value
|
|
||||||
else:
|
|
||||||
return str(value).encode()
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def value_to_dict(cls, value: Any) -> dict:
|
|
||||||
"""
|
|
||||||
Convert the value to a dictionary.
|
|
||||||
"""
|
|
||||||
if isinstance(value, dict):
|
|
||||||
dict_value = value
|
|
||||||
else:
|
|
||||||
dict_value = json.loads(value)
|
|
||||||
|
|
||||||
return {k: v for k, v in dict_value.items()}
|
|
||||||
|
|
||||||
def to_text(self) -> str:
|
|
||||||
"""
|
|
||||||
Convert the value to text.
|
|
||||||
"""
|
|
||||||
return str(self.value)
|
|
||||||
|
|
||||||
def __str__(self) -> str:
|
|
||||||
"""
|
|
||||||
Return a string representation of the artifact.
|
|
||||||
"""
|
|
||||||
return self.to_text()
|
|
||||||
|
|
||||||
def __bool__(self) -> bool:
|
|
||||||
"""
|
|
||||||
Return the boolean value of the artifact.
|
|
||||||
"""
|
|
||||||
return bool(self.value)
|
|
||||||
|
|
||||||
def __len__(self) -> int:
|
|
||||||
"""
|
|
||||||
Return the length of the artifact.
|
|
||||||
"""
|
|
||||||
return len(self.value)
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def __add__(self, other: BaseArtifact) -> BaseArtifact:
|
|
||||||
"""
|
|
||||||
Add two artifacts together.
|
|
||||||
"""
|
|
||||||
...
|
|
@ -1,58 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from typing import Callable
|
|
||||||
from swarms.artifacts.base_artifact import BaseArtifact
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class TextArtifact(BaseArtifact):
|
|
||||||
"""
|
|
||||||
Represents a text artifact.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
value (str): The text value of the artifact.
|
|
||||||
encoding (str, optional): The encoding of the text (default is "utf-8").
|
|
||||||
encoding_error_handler (str, optional): The error handler for encoding errors (default is "strict").
|
|
||||||
_embedding (list[float]): The embedding of the text artifact (default is an empty list).
|
|
||||||
|
|
||||||
Properties:
|
|
||||||
embedding (Optional[list[float]]): The embedding of the text artifact.
|
|
||||||
|
|
||||||
Methods:
|
|
||||||
__add__(self, other: BaseArtifact) -> TextArtifact: Concatenates the text value of the artifact with another artifact.
|
|
||||||
__bool__(self) -> bool: Checks if the text value of the artifact is non-empty.
|
|
||||||
generate_embedding(self, driver: BaseEmbeddingModel) -> Optional[list[float]]: Generates the embedding of the text artifact using a given embedding model.
|
|
||||||
token_count(self, tokenizer: BaseTokenizer) -> int: Counts the number of tokens in the text artifact using a given tokenizer.
|
|
||||||
to_bytes(self) -> bytes: Converts the text value of the artifact to bytes using the specified encoding and error handler.
|
|
||||||
"""
|
|
||||||
|
|
||||||
value: str
|
|
||||||
encoding: str = "utf-8"
|
|
||||||
encoding_error_handler: str = "strict"
|
|
||||||
tokenizer: Callable = None
|
|
||||||
_embedding: list[float] = field(default_factory=list)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def embedding(self) -> list[float] | None:
|
|
||||||
return None if len(self._embedding) == 0 else self._embedding
|
|
||||||
|
|
||||||
def __add__(self, other: BaseArtifact) -> TextArtifact:
|
|
||||||
return TextArtifact(self.value + other.value)
|
|
||||||
|
|
||||||
def __bool__(self) -> bool:
|
|
||||||
return bool(self.value.strip())
|
|
||||||
|
|
||||||
def generate_embedding(self, model) -> list[float] | None:
|
|
||||||
self._embedding.clear()
|
|
||||||
self._embedding.extend(model.embed_string(str(self.value)))
|
|
||||||
|
|
||||||
return self.embedding
|
|
||||||
|
|
||||||
def token_count(self) -> int:
|
|
||||||
return self.tokenizer.count_tokens(str(self.value))
|
|
||||||
|
|
||||||
def to_bytes(self) -> bytes:
|
|
||||||
return self.value.encode(
|
|
||||||
encoding=self.encoding, errors=self.encoding_error_handler
|
|
||||||
)
|
|
@ -1,120 +0,0 @@
|
|||||||
from swarms.utils.loguru_logger import logger
|
|
||||||
import yaml
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from typing import List, Optional
|
|
||||||
import json
|
|
||||||
from swarms.structs.agent_registry import AgentRegistry
|
|
||||||
from swarms.structs.agent import Agent
|
|
||||||
from swarm_models.popular_llms import OpenAIChat
|
|
||||||
|
|
||||||
|
|
||||||
class AgentInput(BaseModel):
|
|
||||||
agent_name: str = "Swarm Agent"
|
|
||||||
system_prompt: Optional[str] = None
|
|
||||||
agent_description: Optional[str] = None
|
|
||||||
model_name: str = "OpenAIChat"
|
|
||||||
max_loops: int = 1
|
|
||||||
autosave: bool = False
|
|
||||||
dynamic_temperature_enabled: bool = False
|
|
||||||
dashboard: bool = False
|
|
||||||
verbose: bool = False
|
|
||||||
streaming_on: bool = True
|
|
||||||
saved_state_path: Optional[str] = None
|
|
||||||
sop: Optional[str] = None
|
|
||||||
sop_list: Optional[List[str]] = None
|
|
||||||
user_name: str = "User"
|
|
||||||
retry_attempts: int = 3
|
|
||||||
context_length: int = 8192
|
|
||||||
task: Optional[str] = None
|
|
||||||
interactive: bool = False
|
|
||||||
|
|
||||||
|
|
||||||
def parse_yaml_to_json(yaml_str: str) -> str:
|
|
||||||
"""
|
|
||||||
Parses the given YAML string into an AgentInput model and converts it to a JSON string.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
yaml_str (str): The YAML string to be parsed.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The JSON string representation of the parsed YAML.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If the YAML string cannot be parsed into the AgentInput model.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
data = yaml.safe_load(yaml_str)
|
|
||||||
agent_input = AgentInput(**data)
|
|
||||||
return agent_input.json()
|
|
||||||
except yaml.YAMLError as e:
|
|
||||||
print(f"YAML Error: {e}")
|
|
||||||
raise ValueError("Invalid YAML input.") from e
|
|
||||||
except ValueError as e:
|
|
||||||
print(f"Validation Error: {e}")
|
|
||||||
raise ValueError("Invalid data for AgentInput model.") from e
|
|
||||||
|
|
||||||
|
|
||||||
# # Example usage
|
|
||||||
# yaml_input = """
|
|
||||||
# agent_name: "Custom Agent"
|
|
||||||
# system_prompt: "System prompt example"
|
|
||||||
# agent_description: "This is a test agent"
|
|
||||||
# model_name: "CustomModel"
|
|
||||||
# max_loops: 5
|
|
||||||
# autosave: true
|
|
||||||
# dynamic_temperature_enabled: true
|
|
||||||
# dashboard: true
|
|
||||||
# verbose: true
|
|
||||||
# streaming_on: false
|
|
||||||
# saved_state_path: "/path/to/state"
|
|
||||||
# sop: "Standard operating procedure"
|
|
||||||
# sop_list: ["step1", "step2"]
|
|
||||||
# user_name: "Tester"
|
|
||||||
# retry_attempts: 5
|
|
||||||
# context_length: 4096
|
|
||||||
# task: "Perform testing"
|
|
||||||
# """
|
|
||||||
|
|
||||||
# json_output = parse_yaml_to_json(yaml_input)
|
|
||||||
# print(json_output)
|
|
||||||
|
|
||||||
registry = AgentRegistry()
|
|
||||||
|
|
||||||
|
|
||||||
def create_agent_from_yaml(yaml_path: str) -> None:
|
|
||||||
with open(yaml_path, "r") as file:
|
|
||||||
yaml_str = file.read()
|
|
||||||
agent_json = parse_yaml_to_json(yaml_str)
|
|
||||||
agent_config = json.loads(agent_json)
|
|
||||||
|
|
||||||
agent = Agent(
|
|
||||||
agent_name=agent_config.get("agent_name", "Swarm Agent"),
|
|
||||||
system_prompt=agent_config.get("system_prompt"),
|
|
||||||
agent_description=agent_config.get("agent_description"),
|
|
||||||
llm=OpenAIChat(),
|
|
||||||
max_loops=agent_config.get("max_loops", 1),
|
|
||||||
autosave=agent_config.get("autosave", False),
|
|
||||||
dynamic_temperature_enabled=agent_config.get(
|
|
||||||
"dynamic_temperature_enabled", False
|
|
||||||
),
|
|
||||||
dashboard=agent_config.get("dashboard", False),
|
|
||||||
verbose=agent_config.get("verbose", False),
|
|
||||||
streaming_on=agent_config.get("streaming_on", True),
|
|
||||||
saved_state_path=agent_config.get("saved_state_path"),
|
|
||||||
retry_attempts=agent_config.get("retry_attempts", 3),
|
|
||||||
context_length=agent_config.get("context_length", 8192),
|
|
||||||
)
|
|
||||||
|
|
||||||
registry.add(agent.agent_name, agent)
|
|
||||||
logger.info(f"Agent {agent.agent_name} created from {yaml_path}.")
|
|
||||||
|
|
||||||
|
|
||||||
def run_agent(agent_name: str, task: str) -> None:
|
|
||||||
agent = registry.find_agent_by_name(agent_name)
|
|
||||||
agent.run(task)
|
|
||||||
|
|
||||||
|
|
||||||
def list_agents() -> None:
|
|
||||||
agents = registry.list_agents()
|
|
||||||
for agent_id in agents:
|
|
||||||
print(agent_id)
|
|
@ -1,10 +0,0 @@
|
|||||||
from typing import List
|
|
||||||
from pydantic import BaseModel
|
|
||||||
from swarms.schemas.agent_step_schemas import Step
|
|
||||||
|
|
||||||
|
|
||||||
class Plan(BaseModel):
|
|
||||||
steps: List[Step]
|
|
||||||
|
|
||||||
class Config:
|
|
||||||
orm_mode = True
|
|
@ -1,10 +1,12 @@
|
|||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
|
|
||||||
import chromadb
|
import chromadb
|
||||||
from loguru import logger
|
|
||||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||||
from typing import Union, Callable, Any
|
from typing import Union, Callable, Any
|
||||||
from swarms import Agent
|
from swarms import Agent
|
||||||
|
from swarms.utils.loguru_logger import initialize_logger
|
||||||
|
|
||||||
|
logger = initialize_logger(log_folder="agent_router")
|
||||||
|
|
||||||
|
|
||||||
class AgentRouter:
|
class AgentRouter:
|
@ -0,0 +1,87 @@
|
|||||||
|
from swarms.structs.agent import Agent
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
|
def showcase_available_agents(
|
||||||
|
agents: List[Agent],
|
||||||
|
name: str = None,
|
||||||
|
description: str = None,
|
||||||
|
format: str = "XML",
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Format the available agents in either XML or Table format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agents (List[Agent]): A list of agents to represent
|
||||||
|
name (str, optional): Name of the swarm
|
||||||
|
description (str, optional): Description of the swarm
|
||||||
|
format (str, optional): Output format ("XML" or "Table"). Defaults to "XML"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Formatted string containing agent information
|
||||||
|
"""
|
||||||
|
|
||||||
|
def truncate(text: str, max_length: int = 130) -> str:
|
||||||
|
return (
|
||||||
|
f"{text[:max_length]}..."
|
||||||
|
if len(text) > max_length
|
||||||
|
else text
|
||||||
|
)
|
||||||
|
|
||||||
|
output = []
|
||||||
|
|
||||||
|
if format.upper() == "TABLE":
|
||||||
|
output.append("\n| ID | Agent Name | Description |")
|
||||||
|
output.append("|-----|------------|-------------|")
|
||||||
|
for idx, agent in enumerate(agents):
|
||||||
|
if isinstance(agent, Agent):
|
||||||
|
agent_name = getattr(agent, "agent_name", str(agent))
|
||||||
|
description = getattr(
|
||||||
|
agent,
|
||||||
|
"description",
|
||||||
|
getattr(
|
||||||
|
agent, "system_prompt", "Unknown description"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
desc = truncate(description, 50)
|
||||||
|
output.append(
|
||||||
|
f"| {idx + 1} | {agent_name} | {desc} |"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
output.append(
|
||||||
|
f"| {idx + 1} | {agent} | Unknown description |"
|
||||||
|
)
|
||||||
|
return "\n".join(output)
|
||||||
|
|
||||||
|
# Default XML format
|
||||||
|
output.append("<agents>")
|
||||||
|
if name:
|
||||||
|
output.append(f" <name>{name}</name>")
|
||||||
|
if description:
|
||||||
|
output.append(
|
||||||
|
f" <description>{truncate(description)}</description>"
|
||||||
|
)
|
||||||
|
for idx, agent in enumerate(agents):
|
||||||
|
output.append(f" <agent id='{idx + 1}'>")
|
||||||
|
if isinstance(agent, Agent):
|
||||||
|
agent_name = getattr(agent, "agent_name", str(agent))
|
||||||
|
description = getattr(
|
||||||
|
agent,
|
||||||
|
"description",
|
||||||
|
getattr(
|
||||||
|
agent, "system_prompt", "Unknown description"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
output.append(f" <name>{agent_name}</name>")
|
||||||
|
output.append(
|
||||||
|
f" <description>{truncate(description)}</description>"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
output.append(f" <name>{agent}</name>")
|
||||||
|
output.append(
|
||||||
|
" <description>Unknown description</description>"
|
||||||
|
)
|
||||||
|
output.append(" </agent>")
|
||||||
|
output.append("</agents>")
|
||||||
|
|
||||||
|
return "\n".join(output)
|
@ -1,3 +0,0 @@
|
|||||||
"""
|
|
||||||
This class will input a swarm type -> then auto generate a list of `Agent` structures with their name, descriptions, system prompts, and more.
|
|
||||||
"""
|
|
@ -1,393 +0,0 @@
|
|||||||
from typing import List, Callable, Union, Optional
|
|
||||||
from loguru import logger
|
|
||||||
from swarms.structs.base_swarm import BaseSwarm
|
|
||||||
from queue import PriorityQueue
|
|
||||||
from concurrent.futures import (
|
|
||||||
ThreadPoolExecutor,
|
|
||||||
as_completed,
|
|
||||||
)
|
|
||||||
import time
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
|
|
||||||
class SwarmRunData(BaseModel):
|
|
||||||
"""
|
|
||||||
Pydantic model to capture metadata about each swarm's execution.
|
|
||||||
"""
|
|
||||||
|
|
||||||
swarm_name: str
|
|
||||||
task: str
|
|
||||||
priority: int
|
|
||||||
start_time: Optional[float] = None
|
|
||||||
end_time: Optional[float] = None
|
|
||||||
duration: Optional[float] = None
|
|
||||||
status: str = "Pending"
|
|
||||||
retries: int = 0
|
|
||||||
result: Optional[str] = None
|
|
||||||
exception: Optional[str] = None
|
|
||||||
|
|
||||||
|
|
||||||
class FederatedSwarmModel(BaseModel):
|
|
||||||
"""
|
|
||||||
Pydantic base model to capture and log data for the FederatedSwarm system.
|
|
||||||
"""
|
|
||||||
|
|
||||||
task: str
|
|
||||||
swarms_data: List[SwarmRunData] = Field(default_factory=list)
|
|
||||||
|
|
||||||
def add_swarm(self, swarm_name: str, task: str, priority: int):
|
|
||||||
swarm_data = SwarmRunData(
|
|
||||||
swarm_name=swarm_name, task=task, priority=priority
|
|
||||||
)
|
|
||||||
self.swarms_data.append(swarm_data)
|
|
||||||
|
|
||||||
def update_swarm_status(
|
|
||||||
self,
|
|
||||||
swarm_name: str,
|
|
||||||
status: str,
|
|
||||||
start_time: float = None,
|
|
||||||
end_time: float = None,
|
|
||||||
retries: int = 0,
|
|
||||||
result: str = None,
|
|
||||||
exception: str = None,
|
|
||||||
):
|
|
||||||
for swarm in self.swarms_data:
|
|
||||||
if swarm.name == swarm_name:
|
|
||||||
swarm.status = status
|
|
||||||
if start_time:
|
|
||||||
swarm.start_time = start_time
|
|
||||||
if end_time:
|
|
||||||
swarm.end_time = end_time
|
|
||||||
swarm.duration = end_time - swarm.start_time
|
|
||||||
swarm.retries = retries
|
|
||||||
swarm.result = result
|
|
||||||
swarm.exception = exception
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
class FederatedSwarm:
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
swarms: List[Union[BaseSwarm, Callable]],
|
|
||||||
max_workers: int = 4,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Initializes the FederatedSwarm with a list of swarms or callable objects and
|
|
||||||
sets up a priority queue and thread pool for concurrency.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
swarms (List[Union[BaseSwarm, Callable]]): A list of swarms (BaseSwarm) or callable objects.
|
|
||||||
max_workers (int): The maximum number of concurrent workers (threads) to run swarms in parallel.
|
|
||||||
"""
|
|
||||||
self.swarms = PriorityQueue()
|
|
||||||
self.max_workers = max_workers
|
|
||||||
self.thread_pool = ThreadPoolExecutor(
|
|
||||||
max_workers=self.max_workers
|
|
||||||
)
|
|
||||||
self.task_queue = []
|
|
||||||
self.future_to_swarm = {}
|
|
||||||
self.results = {}
|
|
||||||
self.validate_swarms(swarms)
|
|
||||||
|
|
||||||
def init_metadata(self, task: str):
|
|
||||||
"""
|
|
||||||
Initializes the Pydantic base model to capture metadata about the current task and swarms.
|
|
||||||
"""
|
|
||||||
self.metadata = FederatedSwarmModel(task=task)
|
|
||||||
for priority, swarm in list(self.swarms.queue):
|
|
||||||
swarm_name = (
|
|
||||||
swarm.__class__.__name__
|
|
||||||
if hasattr(swarm, "__class__")
|
|
||||||
else str(swarm)
|
|
||||||
)
|
|
||||||
self.metadata.add_swarm(
|
|
||||||
swarm_name=swarm_name, task=task, priority=priority
|
|
||||||
)
|
|
||||||
logger.info(f"Metadata initialized for task '{task}'.")
|
|
||||||
|
|
||||||
def validate_swarms(
|
|
||||||
self, swarms: List[Union[BaseSwarm, Callable]]
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Validates and adds swarms to the priority queue, ensuring each swarm has a `run(task)` method.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
swarms (List[Union[BaseSwarm, Callable]]): List of swarms with an optional priority value.
|
|
||||||
"""
|
|
||||||
for swarm, priority in swarms:
|
|
||||||
if not callable(swarm):
|
|
||||||
raise TypeError(f"{swarm} is not callable.")
|
|
||||||
|
|
||||||
if hasattr(swarm, "run"):
|
|
||||||
logger.info(f"{swarm} has a 'run' method.")
|
|
||||||
else:
|
|
||||||
raise AttributeError(
|
|
||||||
f"{swarm} does not have a 'run(task)' method."
|
|
||||||
)
|
|
||||||
|
|
||||||
self.swarms.put((priority, swarm))
|
|
||||||
logger.info(
|
|
||||||
f"Swarm {swarm} added with priority {priority}."
|
|
||||||
)
|
|
||||||
|
|
||||||
def run_parallel(
|
|
||||||
self,
|
|
||||||
task: str,
|
|
||||||
timeout: Optional[float] = None,
|
|
||||||
retries: int = 0,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Runs all swarms in parallel with prioritization and optional timeout.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to be passed to the `run` method of each swarm.
|
|
||||||
timeout (Optional[float]): Maximum time allowed for each swarm to run.
|
|
||||||
retries (int): Number of retries allowed for failed swarms.
|
|
||||||
"""
|
|
||||||
logger.info(
|
|
||||||
f"Running task '{task}' in parallel with timeout: {timeout}, retries: {retries}"
|
|
||||||
)
|
|
||||||
self.init_metadata(task)
|
|
||||||
|
|
||||||
while not self.swarms.empty():
|
|
||||||
priority, swarm = self.swarms.get()
|
|
||||||
swarm_name = (
|
|
||||||
swarm.__class__.__name__
|
|
||||||
if hasattr(swarm, "__class__")
|
|
||||||
else str(swarm)
|
|
||||||
)
|
|
||||||
future = self.thread_pool.submit(
|
|
||||||
self._run_with_retry,
|
|
||||||
swarm,
|
|
||||||
task,
|
|
||||||
retries,
|
|
||||||
timeout,
|
|
||||||
swarm_name,
|
|
||||||
)
|
|
||||||
self.future_to_swarm[future] = swarm
|
|
||||||
|
|
||||||
for future in as_completed(self.future_to_swarm):
|
|
||||||
swarm = self.future_to_swarm[future]
|
|
||||||
try:
|
|
||||||
result = future.result()
|
|
||||||
swarm_name = (
|
|
||||||
swarm.__class__.__name__
|
|
||||||
if hasattr(swarm, "__class__")
|
|
||||||
else str(swarm)
|
|
||||||
)
|
|
||||||
self.metadata.update_swarm_status(
|
|
||||||
swarm_name=swarm_name,
|
|
||||||
status="Completed",
|
|
||||||
result=result,
|
|
||||||
)
|
|
||||||
logger.info(
|
|
||||||
f"Swarm {swarm_name} completed successfully."
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
swarm_name = (
|
|
||||||
swarm.__class__.__name__
|
|
||||||
if hasattr(swarm, "__class__")
|
|
||||||
else str(swarm)
|
|
||||||
)
|
|
||||||
self.metadata.update_swarm_status(
|
|
||||||
swarm_name=swarm_name,
|
|
||||||
status="Failed",
|
|
||||||
exception=str(e),
|
|
||||||
)
|
|
||||||
logger.error(f"Swarm {swarm_name} failed: {e}")
|
|
||||||
self.results[swarm] = "Failed"
|
|
||||||
|
|
||||||
def run_sequentially(
|
|
||||||
self,
|
|
||||||
task: str,
|
|
||||||
retries: int = 0,
|
|
||||||
timeout: Optional[float] = None,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Runs all swarms sequentially in order of priority.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to pass to the `run` method of each swarm.
|
|
||||||
retries (int): Number of retries for failed swarms.
|
|
||||||
timeout (Optional[float]): Optional time limit for each swarm.
|
|
||||||
"""
|
|
||||||
logger.info(f"Running task '{task}' sequentially.")
|
|
||||||
|
|
||||||
while not self.swarms.empty():
|
|
||||||
priority, swarm = self.swarms.get()
|
|
||||||
try:
|
|
||||||
logger.info(
|
|
||||||
f"Running swarm {swarm} with priority {priority}."
|
|
||||||
)
|
|
||||||
self._run_with_retry(swarm, task, retries, timeout)
|
|
||||||
logger.info(f"Swarm {swarm} completed successfully.")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Swarm {swarm} failed with error: {e}")
|
|
||||||
|
|
||||||
def _run_with_retry(
|
|
||||||
self,
|
|
||||||
swarm: Union[BaseSwarm, Callable],
|
|
||||||
task: str,
|
|
||||||
retries: int,
|
|
||||||
timeout: Optional[float],
|
|
||||||
swarm_name: str,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Helper function to run a swarm with a retry mechanism and optional timeout.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
swarm (Union[BaseSwarm, Callable]): The swarm to run.
|
|
||||||
task (str): The task to pass to the swarm.
|
|
||||||
retries (int): The number of retries allowed for the swarm in case of failure.
|
|
||||||
timeout (Optional[float]): Maximum time allowed for the swarm to run.
|
|
||||||
swarm_name (str): Name of the swarm (used for metadata).
|
|
||||||
"""
|
|
||||||
attempts = 0
|
|
||||||
start_time = time.time()
|
|
||||||
while attempts <= retries:
|
|
||||||
try:
|
|
||||||
logger.info(
|
|
||||||
f"Running swarm {swarm}. Attempt: {attempts + 1}"
|
|
||||||
)
|
|
||||||
self.metadata.update_swarm_status(
|
|
||||||
swarm_name=swarm_name,
|
|
||||||
status="Running",
|
|
||||||
start_time=start_time,
|
|
||||||
)
|
|
||||||
if hasattr(swarm, "run"):
|
|
||||||
if timeout:
|
|
||||||
start_time = time.time()
|
|
||||||
swarm.run(task)
|
|
||||||
duration = time.time() - start_time
|
|
||||||
if duration > timeout:
|
|
||||||
raise TimeoutError(
|
|
||||||
f"Swarm {swarm} timed out after {duration:.2f}s."
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
swarm.run(task)
|
|
||||||
else:
|
|
||||||
swarm(task)
|
|
||||||
end_time = time.time()
|
|
||||||
self.metadata.update_swarm_status(
|
|
||||||
swarm_name=swarm_name,
|
|
||||||
status="Completed",
|
|
||||||
end_time=end_time,
|
|
||||||
retries=attempts,
|
|
||||||
)
|
|
||||||
return "Success"
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Swarm {swarm} failed: {e}")
|
|
||||||
attempts += 1
|
|
||||||
if attempts > retries:
|
|
||||||
end_time = time.time()
|
|
||||||
self.metadata.update_swarm_status(
|
|
||||||
swarm_name=swarm_name,
|
|
||||||
status="Failed",
|
|
||||||
end_time=end_time,
|
|
||||||
retries=attempts,
|
|
||||||
exception=str(e),
|
|
||||||
)
|
|
||||||
logger.error(f"Swarm {swarm} exhausted retries.")
|
|
||||||
raise
|
|
||||||
|
|
||||||
def add_swarm(
|
|
||||||
self, swarm: Union[BaseSwarm, Callable], priority: int
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Adds a new swarm to the FederatedSwarm at runtime.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
swarm (Union[BaseSwarm, Callable]): The swarm to add.
|
|
||||||
priority (int): The priority level for the swarm.
|
|
||||||
"""
|
|
||||||
self.swarms.put((priority, swarm))
|
|
||||||
logger.info(
|
|
||||||
f"Swarm {swarm} added dynamically with priority {priority}."
|
|
||||||
)
|
|
||||||
|
|
||||||
def queue_task(self, task: str):
|
|
||||||
"""
|
|
||||||
Adds a task to the internal task queue for batch processing.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
task (str): The task to queue.
|
|
||||||
"""
|
|
||||||
self.task_queue.append(task)
|
|
||||||
logger.info(f"Task '{task}' added to the queue.")
|
|
||||||
|
|
||||||
def process_task_queue(self):
|
|
||||||
"""
|
|
||||||
Processes all tasks in the task queue.
|
|
||||||
"""
|
|
||||||
for task in self.task_queue:
|
|
||||||
logger.info(f"Processing task: {task}")
|
|
||||||
self.run_parallel(task)
|
|
||||||
self.task_queue = []
|
|
||||||
|
|
||||||
def log_swarm_results(self):
|
|
||||||
"""
|
|
||||||
Logs the results of all swarms after execution.
|
|
||||||
"""
|
|
||||||
logger.info("Logging swarm results...")
|
|
||||||
for swarm, result in self.results.items():
|
|
||||||
logger.info(f"Swarm {swarm}: {result}")
|
|
||||||
|
|
||||||
def get_swarm_status(self) -> dict:
|
|
||||||
"""
|
|
||||||
Retrieves the status of each swarm (completed, running, failed).
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: Dictionary containing swarm statuses.
|
|
||||||
"""
|
|
||||||
status = {}
|
|
||||||
for future, swarm in self.future_to_swarm.items():
|
|
||||||
if future.done():
|
|
||||||
status[swarm] = "Completed"
|
|
||||||
elif future.running():
|
|
||||||
status[swarm] = "Running"
|
|
||||||
else:
|
|
||||||
status[swarm] = "Failed"
|
|
||||||
return status
|
|
||||||
|
|
||||||
def cancel_running_swarms(self):
|
|
||||||
"""
|
|
||||||
Cancels all currently running swarms by shutting down the thread pool.
|
|
||||||
"""
|
|
||||||
logger.warning("Cancelling all running swarms...")
|
|
||||||
self.thread_pool.shutdown(wait=False)
|
|
||||||
logger.info("All running swarms cancelled.")
|
|
||||||
|
|
||||||
|
|
||||||
# Example Usage:
|
|
||||||
|
|
||||||
|
|
||||||
# class ExampleSwarm(BaseSwarm):
|
|
||||||
# def run(self, task: str):
|
|
||||||
# logger.info(f"ExampleSwarm is processing task: {task}")
|
|
||||||
|
|
||||||
|
|
||||||
# def example_callable(task: str):
|
|
||||||
# logger.info(f"Callable is processing task: {task}")
|
|
||||||
|
|
||||||
|
|
||||||
# if __name__ == "__main__":
|
|
||||||
# swarms = [(ExampleSwarm(), 1), (example_callable, 2)]
|
|
||||||
# federated_swarm = FederatedSwarm(swarms)
|
|
||||||
|
|
||||||
# # Run in parallel
|
|
||||||
# federated_swarm.run_parallel(
|
|
||||||
# "Process data", timeout=10, retries=3
|
|
||||||
# )
|
|
||||||
|
|
||||||
# # Run sequentially
|
|
||||||
# federated_swarm.run_sequentially("Process data sequentially")
|
|
||||||
|
|
||||||
# # Log results
|
|
||||||
# federated_swarm.log_swarm_results()
|
|
||||||
|
|
||||||
# # Get status of swarms
|
|
||||||
# status = federated_swarm.get_swarm_status()
|
|
||||||
# logger.info(f"Swarm statuses: {status}")
|
|
||||||
|
|
||||||
# # Cancel running swarms (if needed)
|
|
||||||
# # federated_swarm.cancel_running_swarms()
|
|
@ -0,0 +1,665 @@
|
|||||||
|
"""
|
||||||
|
GraphSwarm: A production-grade framework for orchestrating swarms of agents
|
||||||
|
Author: Claude
|
||||||
|
License: MIT
|
||||||
|
Version: 2.0.0
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||||
|
|
||||||
|
import chromadb
|
||||||
|
import networkx as nx
|
||||||
|
from loguru import logger
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logger.add(
|
||||||
|
"graphswarm.log",
|
||||||
|
rotation="500 MB",
|
||||||
|
retention="10 days",
|
||||||
|
level="INFO",
|
||||||
|
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class AgentOutput(BaseModel):
|
||||||
|
"""Structured output from an agent."""
|
||||||
|
|
||||||
|
agent_name: str
|
||||||
|
timestamp: float = Field(default_factory=time.time)
|
||||||
|
output: Any
|
||||||
|
execution_time: float
|
||||||
|
error: Optional[str] = None
|
||||||
|
metadata: Dict = Field(default_factory=dict)
|
||||||
|
|
||||||
|
|
||||||
|
class SwarmOutput(BaseModel):
|
||||||
|
"""Structured output from the entire swarm."""
|
||||||
|
|
||||||
|
timestamp: float = Field(default_factory=time.time)
|
||||||
|
outputs: Dict[str, AgentOutput]
|
||||||
|
execution_time: float
|
||||||
|
success: bool
|
||||||
|
error: Optional[str] = None
|
||||||
|
metadata: Dict = Field(default_factory=dict)
|
||||||
|
|
||||||
|
|
||||||
|
class SwarmMemory:
|
||||||
|
"""Vector-based memory system for GraphSwarm using ChromaDB."""
|
||||||
|
|
||||||
|
def __init__(self, collection_name: str = "swarm_memories"):
|
||||||
|
"""Initialize SwarmMemory with ChromaDB."""
|
||||||
|
self.client = chromadb.Client()
|
||||||
|
|
||||||
|
# Get or create collection
|
||||||
|
self.collection = self.client.get_or_create_collection(
|
||||||
|
name=collection_name,
|
||||||
|
metadata={"description": "GraphSwarm execution memories"},
|
||||||
|
)
|
||||||
|
|
||||||
|
def store_execution(self, task: str, result: SwarmOutput):
|
||||||
|
"""Store execution results in vector memory."""
|
||||||
|
try:
|
||||||
|
# Create metadata
|
||||||
|
metadata = {
|
||||||
|
"timestamp": datetime.now().isoformat(),
|
||||||
|
"success": result.success,
|
||||||
|
"execution_time": result.execution_time,
|
||||||
|
"agent_sequence": json.dumps(
|
||||||
|
[name for name in result.outputs.keys()]
|
||||||
|
),
|
||||||
|
"error": result.error if result.error else "",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create document from outputs
|
||||||
|
document = {
|
||||||
|
"task": task,
|
||||||
|
"outputs": json.dumps(
|
||||||
|
{
|
||||||
|
name: {
|
||||||
|
"output": str(output.output),
|
||||||
|
"execution_time": output.execution_time,
|
||||||
|
"error": output.error,
|
||||||
|
}
|
||||||
|
for name, output in result.outputs.items()
|
||||||
|
}
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Store in ChromaDB
|
||||||
|
self.collection.add(
|
||||||
|
documents=[json.dumps(document)],
|
||||||
|
metadatas=[metadata],
|
||||||
|
ids=[f"exec_{datetime.now().timestamp()}"],
|
||||||
|
)
|
||||||
|
|
||||||
|
print("added to database")
|
||||||
|
|
||||||
|
logger.info(f"Stored execution in memory: {task}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to store execution in memory: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_similar_executions(self, task: str, limit: int = 5):
|
||||||
|
"""Retrieve similar past executions."""
|
||||||
|
try:
|
||||||
|
# Query ChromaDB for similar executions
|
||||||
|
results = self.collection.query(
|
||||||
|
query_texts=[task],
|
||||||
|
n_results=limit,
|
||||||
|
include=["documents", "metadatas"],
|
||||||
|
)
|
||||||
|
|
||||||
|
print(results)
|
||||||
|
|
||||||
|
if not results["documents"]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Process results
|
||||||
|
executions = []
|
||||||
|
for doc, metadata in zip(
|
||||||
|
results["documents"][0], results["metadatas"][0]
|
||||||
|
):
|
||||||
|
doc_dict = json.loads(doc)
|
||||||
|
executions.append(
|
||||||
|
{
|
||||||
|
"task": doc_dict["task"],
|
||||||
|
"outputs": json.loads(doc_dict["outputs"]),
|
||||||
|
"success": metadata["success"],
|
||||||
|
"execution_time": metadata["execution_time"],
|
||||||
|
"agent_sequence": json.loads(
|
||||||
|
metadata["agent_sequence"]
|
||||||
|
),
|
||||||
|
"timestamp": metadata["timestamp"],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return executions
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to retrieve similar executions: {str(e)}"
|
||||||
|
)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def get_optimal_sequence(self, task: str) -> Optional[List[str]]:
|
||||||
|
"""Get the most successful agent sequence for similar tasks."""
|
||||||
|
similar_executions = self.get_similar_executions(task)
|
||||||
|
print(f"similar_executions {similar_executions}")
|
||||||
|
|
||||||
|
if not similar_executions:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Sort by success and execution time
|
||||||
|
successful_execs = [
|
||||||
|
ex for ex in similar_executions if ex["success"]
|
||||||
|
]
|
||||||
|
|
||||||
|
if not successful_execs:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Return sequence from most successful execution
|
||||||
|
return successful_execs[0]["agent_sequence"]
|
||||||
|
|
||||||
|
def clear_memory(self):
|
||||||
|
"""Clear all memories."""
|
||||||
|
self.client.delete_collection(self.collection.name)
|
||||||
|
self.collection = self.client.get_or_create_collection(
|
||||||
|
name=self.collection.name
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class GraphSwarm:
|
||||||
|
"""
|
||||||
|
Enhanced framework for creating and managing swarms of collaborative agents.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
agents: Union[
|
||||||
|
List[Agent], List[Tuple[Agent, List[str]]], None
|
||||||
|
] = None,
|
||||||
|
max_workers: Optional[int] = None,
|
||||||
|
swarm_name: str = "Collaborative Agent Swarm",
|
||||||
|
memory_collection: str = "swarm_memory",
|
||||||
|
):
|
||||||
|
"""Initialize GraphSwarm."""
|
||||||
|
self.graph = nx.DiGraph()
|
||||||
|
self.agents: Dict[str, Agent] = {}
|
||||||
|
self.dependencies: Dict[str, List[str]] = {}
|
||||||
|
self.executor = ThreadPoolExecutor(max_workers=max_workers)
|
||||||
|
self.swarm_name = swarm_name
|
||||||
|
self.memory_collection = memory_collection
|
||||||
|
self.memory = SwarmMemory(collection_name=memory_collection)
|
||||||
|
|
||||||
|
if agents:
|
||||||
|
self.initialize_agents(agents)
|
||||||
|
|
||||||
|
logger.info(f"Initialized GraphSwarm: {swarm_name}")
|
||||||
|
|
||||||
|
def initialize_agents(
|
||||||
|
self,
|
||||||
|
agents: Union[List[Agent], List[Tuple[Agent, List[str]]]],
|
||||||
|
):
|
||||||
|
"""Initialize agents and their dependencies."""
|
||||||
|
try:
|
||||||
|
# Handle list of Agents or (Agent, dependencies) tuples
|
||||||
|
for item in agents:
|
||||||
|
if isinstance(item, tuple):
|
||||||
|
agent, dependencies = item
|
||||||
|
else:
|
||||||
|
agent, dependencies = item, []
|
||||||
|
|
||||||
|
if not isinstance(agent, Agent):
|
||||||
|
raise ValueError(
|
||||||
|
f"Expected Agent object, got {type(agent)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.agents[agent.agent_name] = agent
|
||||||
|
self.dependencies[agent.agent_name] = dependencies
|
||||||
|
self.graph.add_node(agent.agent_name, agent=agent)
|
||||||
|
|
||||||
|
# Add dependencies
|
||||||
|
for dep in dependencies:
|
||||||
|
if dep not in self.agents:
|
||||||
|
raise ValueError(
|
||||||
|
f"Dependency {dep} not found for agent {agent.agent_name}"
|
||||||
|
)
|
||||||
|
self.graph.add_edge(dep, agent.agent_name)
|
||||||
|
|
||||||
|
self._validate_graph()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to initialize agents: {str(e)}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _validate_graph(self):
|
||||||
|
"""Validate the agent dependency graph."""
|
||||||
|
if not self.graph.nodes():
|
||||||
|
raise ValueError("No agents added to swarm")
|
||||||
|
|
||||||
|
if not nx.is_directed_acyclic_graph(self.graph):
|
||||||
|
cycles = list(nx.simple_cycles(self.graph))
|
||||||
|
raise ValueError(
|
||||||
|
f"Agent dependency graph contains cycles: {cycles}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _get_agent_role_description(self, agent_name: str) -> str:
|
||||||
|
"""Generate a description of the agent's role in the swarm."""
|
||||||
|
predecessors = list(self.graph.predecessors(agent_name))
|
||||||
|
successors = list(self.graph.successors(agent_name))
|
||||||
|
position = (
|
||||||
|
"initial"
|
||||||
|
if not predecessors
|
||||||
|
else ("final" if not successors else "intermediate")
|
||||||
|
)
|
||||||
|
|
||||||
|
role = f"""You are {agent_name}, a specialized agent in the {self.swarm_name}.
|
||||||
|
Position: {position} agent in the workflow
|
||||||
|
|
||||||
|
Your relationships:"""
|
||||||
|
|
||||||
|
if predecessors:
|
||||||
|
role += (
|
||||||
|
f"\nYou receive input from: {', '.join(predecessors)}"
|
||||||
|
)
|
||||||
|
if successors:
|
||||||
|
role += f"\nYour output will be used by: {', '.join(successors)}"
|
||||||
|
|
||||||
|
return role
|
||||||
|
|
||||||
|
def _generate_workflow_context(self) -> str:
|
||||||
|
"""Generate a description of the entire workflow."""
|
||||||
|
execution_order = list(nx.topological_sort(self.graph))
|
||||||
|
|
||||||
|
workflow = f"""Workflow Overview of {self.swarm_name}:
|
||||||
|
|
||||||
|
Processing Order:
|
||||||
|
{' -> '.join(execution_order)}
|
||||||
|
|
||||||
|
Agent Roles:
|
||||||
|
"""
|
||||||
|
|
||||||
|
for agent_name in execution_order:
|
||||||
|
predecessors = list(self.graph.predecessors(agent_name))
|
||||||
|
successors = list(self.graph.successors(agent_name))
|
||||||
|
|
||||||
|
workflow += f"\n\n{agent_name}:"
|
||||||
|
if predecessors:
|
||||||
|
workflow += (
|
||||||
|
f"\n- Receives from: {', '.join(predecessors)}"
|
||||||
|
)
|
||||||
|
if successors:
|
||||||
|
workflow += f"\n- Sends to: {', '.join(successors)}"
|
||||||
|
if not predecessors and not successors:
|
||||||
|
workflow += "\n- Independent agent"
|
||||||
|
|
||||||
|
return workflow
|
||||||
|
|
||||||
|
def _build_agent_prompt(
|
||||||
|
self, agent_name: str, task: str, context: Dict = None
|
||||||
|
) -> str:
|
||||||
|
"""Build a comprehensive prompt for the agent including role and context."""
|
||||||
|
prompt_parts = [
|
||||||
|
self._get_agent_role_description(agent_name),
|
||||||
|
"\nWorkflow Context:",
|
||||||
|
self._generate_workflow_context(),
|
||||||
|
"\nYour Task:",
|
||||||
|
task,
|
||||||
|
]
|
||||||
|
|
||||||
|
if context:
|
||||||
|
prompt_parts.extend(
|
||||||
|
["\nContext from Previous Agents:", str(context)]
|
||||||
|
)
|
||||||
|
|
||||||
|
prompt_parts.extend(
|
||||||
|
[
|
||||||
|
"\nInstructions:",
|
||||||
|
"1. Process the task according to your role",
|
||||||
|
"2. Consider the input from previous agents when available",
|
||||||
|
"3. Provide clear, structured output",
|
||||||
|
"4. Remember that your output will be used by subsequent agents",
|
||||||
|
"\nResponse Guidelines:",
|
||||||
|
"- Provide clear, well-organized output",
|
||||||
|
"- Include relevant details and insights",
|
||||||
|
"- Highlight key findings",
|
||||||
|
"- Flag any uncertainties or issues",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
return "\n".join(prompt_parts)
|
||||||
|
|
||||||
|
async def _execute_agent(
|
||||||
|
self, agent_name: str, task: str, context: Dict = None
|
||||||
|
) -> AgentOutput:
|
||||||
|
"""Execute a single agent."""
|
||||||
|
start_time = time.time()
|
||||||
|
agent = self.agents[agent_name]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Build comprehensive prompt
|
||||||
|
full_prompt = self._build_agent_prompt(
|
||||||
|
agent_name, task, context
|
||||||
|
)
|
||||||
|
logger.debug(f"Prompt for {agent_name}:\n{full_prompt}")
|
||||||
|
|
||||||
|
# Execute agent
|
||||||
|
output = await asyncio.to_thread(agent.run, full_prompt)
|
||||||
|
|
||||||
|
return AgentOutput(
|
||||||
|
agent_name=agent_name,
|
||||||
|
output=output,
|
||||||
|
execution_time=time.time() - start_time,
|
||||||
|
metadata={
|
||||||
|
"task": task,
|
||||||
|
"context": context,
|
||||||
|
"position_in_workflow": list(
|
||||||
|
nx.topological_sort(self.graph)
|
||||||
|
).index(agent_name),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Error executing agent {agent_name}: {str(e)}"
|
||||||
|
)
|
||||||
|
return AgentOutput(
|
||||||
|
agent_name=agent_name,
|
||||||
|
output=None,
|
||||||
|
execution_time=time.time() - start_time,
|
||||||
|
error=str(e),
|
||||||
|
metadata={"task": task},
|
||||||
|
)
|
||||||
|
|
||||||
|
async def execute(self, task: str) -> SwarmOutput:
|
||||||
|
"""
|
||||||
|
Execute the entire swarm of agents with memory integration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task: Initial task to execute
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
SwarmOutput: Structured output from all agents
|
||||||
|
"""
|
||||||
|
start_time = time.time()
|
||||||
|
outputs = {}
|
||||||
|
success = True
|
||||||
|
error = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get similar past executions
|
||||||
|
similar_executions = self.memory.get_similar_executions(
|
||||||
|
task, limit=3
|
||||||
|
)
|
||||||
|
optimal_sequence = self.memory.get_optimal_sequence(task)
|
||||||
|
|
||||||
|
# Get base execution order
|
||||||
|
base_execution_order = list(
|
||||||
|
nx.topological_sort(self.graph)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Determine final execution order
|
||||||
|
if optimal_sequence and all(
|
||||||
|
agent in base_execution_order
|
||||||
|
for agent in optimal_sequence
|
||||||
|
):
|
||||||
|
logger.info(
|
||||||
|
f"Using optimal sequence from memory: {optimal_sequence}"
|
||||||
|
)
|
||||||
|
execution_order = optimal_sequence
|
||||||
|
else:
|
||||||
|
execution_order = base_execution_order
|
||||||
|
|
||||||
|
# Get historical context if available
|
||||||
|
historical_context = {}
|
||||||
|
if similar_executions:
|
||||||
|
best_execution = similar_executions[0]
|
||||||
|
if best_execution["success"]:
|
||||||
|
historical_context = {
|
||||||
|
"similar_task": best_execution["task"],
|
||||||
|
"previous_outputs": best_execution["outputs"],
|
||||||
|
"execution_time": best_execution[
|
||||||
|
"execution_time"
|
||||||
|
],
|
||||||
|
"success_patterns": self._extract_success_patterns(
|
||||||
|
similar_executions
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute agents in order
|
||||||
|
for agent_name in execution_order:
|
||||||
|
try:
|
||||||
|
# Get context from dependencies and history
|
||||||
|
agent_context = {
|
||||||
|
"dependencies": {
|
||||||
|
dep: outputs[dep].output
|
||||||
|
for dep in self.graph.predecessors(
|
||||||
|
agent_name
|
||||||
|
)
|
||||||
|
if dep in outputs
|
||||||
|
},
|
||||||
|
"historical": historical_context,
|
||||||
|
"position": execution_order.index(agent_name),
|
||||||
|
"total_agents": len(execution_order),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute agent with enhanced context
|
||||||
|
output = await self._execute_agent(
|
||||||
|
agent_name, task, agent_context
|
||||||
|
)
|
||||||
|
outputs[agent_name] = output
|
||||||
|
|
||||||
|
# Update historical context with current execution
|
||||||
|
if output.output:
|
||||||
|
historical_context.update(
|
||||||
|
{
|
||||||
|
f"current_{agent_name}_output": output.output
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check for errors
|
||||||
|
if output.error:
|
||||||
|
success = False
|
||||||
|
error = f"Agent {agent_name} failed: {output.error}"
|
||||||
|
|
||||||
|
# Try to recover using memory
|
||||||
|
if similar_executions:
|
||||||
|
recovery_output = self._attempt_recovery(
|
||||||
|
agent_name, task, similar_executions
|
||||||
|
)
|
||||||
|
if recovery_output:
|
||||||
|
outputs[agent_name] = recovery_output
|
||||||
|
success = True
|
||||||
|
error = None
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
|
||||||
|
except Exception as agent_error:
|
||||||
|
logger.error(
|
||||||
|
f"Error executing agent {agent_name}: {str(agent_error)}"
|
||||||
|
)
|
||||||
|
success = False
|
||||||
|
error = f"Agent {agent_name} failed: {str(agent_error)}"
|
||||||
|
break
|
||||||
|
|
||||||
|
# Create result
|
||||||
|
result = SwarmOutput(
|
||||||
|
outputs=outputs,
|
||||||
|
execution_time=time.time() - start_time,
|
||||||
|
success=success,
|
||||||
|
error=error,
|
||||||
|
metadata={
|
||||||
|
"task": task,
|
||||||
|
"used_optimal_sequence": optimal_sequence
|
||||||
|
is not None,
|
||||||
|
"similar_executions_found": len(
|
||||||
|
similar_executions
|
||||||
|
),
|
||||||
|
"execution_order": execution_order,
|
||||||
|
"historical_context_used": bool(
|
||||||
|
historical_context
|
||||||
|
),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store execution in memory
|
||||||
|
await self._store_execution_async(task, result)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Swarm execution failed: {str(e)}")
|
||||||
|
return SwarmOutput(
|
||||||
|
outputs=outputs,
|
||||||
|
execution_time=time.time() - start_time,
|
||||||
|
success=False,
|
||||||
|
error=str(e),
|
||||||
|
metadata={"task": task},
|
||||||
|
)
|
||||||
|
|
||||||
|
def run(self, task: str) -> SwarmOutput:
|
||||||
|
"""Synchronous interface to execute the swarm."""
|
||||||
|
return asyncio.run(self.execute(task))
|
||||||
|
|
||||||
|
def _extract_success_patterns(
|
||||||
|
self, similar_executions: List[Dict]
|
||||||
|
) -> Dict:
|
||||||
|
"""Extract success patterns from similar executions."""
|
||||||
|
patterns = {}
|
||||||
|
successful_execs = [
|
||||||
|
ex for ex in similar_executions if ex["success"]
|
||||||
|
]
|
||||||
|
|
||||||
|
if successful_execs:
|
||||||
|
patterns = {
|
||||||
|
"common_sequences": self._find_common_sequences(
|
||||||
|
successful_execs
|
||||||
|
),
|
||||||
|
"avg_execution_time": sum(
|
||||||
|
ex["execution_time"] for ex in successful_execs
|
||||||
|
)
|
||||||
|
/ len(successful_execs),
|
||||||
|
"successful_strategies": self._extract_strategies(
|
||||||
|
successful_execs
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
return patterns
|
||||||
|
|
||||||
|
def _attempt_recovery(
|
||||||
|
self,
|
||||||
|
failed_agent: str,
|
||||||
|
task: str,
|
||||||
|
similar_executions: List[Dict],
|
||||||
|
) -> Optional[AgentOutput]:
|
||||||
|
"""Attempt to recover from failure using memory."""
|
||||||
|
for execution in similar_executions:
|
||||||
|
if (
|
||||||
|
execution["success"]
|
||||||
|
and failed_agent in execution["outputs"]
|
||||||
|
):
|
||||||
|
historical_output = execution["outputs"][failed_agent]
|
||||||
|
|
||||||
|
return AgentOutput(
|
||||||
|
agent_name=failed_agent,
|
||||||
|
output=historical_output["output"],
|
||||||
|
execution_time=historical_output[
|
||||||
|
"execution_time"
|
||||||
|
],
|
||||||
|
metadata={
|
||||||
|
"recovered_from_memory": True,
|
||||||
|
"original_task": execution["task"],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def _store_execution_async(
|
||||||
|
self, task: str, result: SwarmOutput
|
||||||
|
):
|
||||||
|
"""Asynchronously store execution in memory."""
|
||||||
|
try:
|
||||||
|
await asyncio.to_thread(
|
||||||
|
self.memory.store_execution, task, result
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to store execution in memory: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def add_agent(self, agent: Agent, dependencies: List[str] = None):
|
||||||
|
"""Add a new agent to the swarm."""
|
||||||
|
dependencies = dependencies or []
|
||||||
|
self.agents[agent.agent_name] = agent
|
||||||
|
self.dependencies[agent.agent_name] = dependencies
|
||||||
|
self.graph.add_node(agent.agent_name, agent=agent)
|
||||||
|
|
||||||
|
for dep in dependencies:
|
||||||
|
if dep not in self.agents:
|
||||||
|
raise ValueError(f"Dependency {dep} not found")
|
||||||
|
self.graph.add_edge(dep, agent.agent_name)
|
||||||
|
|
||||||
|
self._validate_graph()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
try:
|
||||||
|
# Create agents
|
||||||
|
data_collector = Agent(
|
||||||
|
agent_name="Market-Data-Collector",
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
max_loops=1,
|
||||||
|
streaming_on=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
trend_analyzer = Agent(
|
||||||
|
agent_name="Market-Trend-Analyzer",
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
max_loops=1,
|
||||||
|
streaming_on=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
report_generator = Agent(
|
||||||
|
agent_name="Investment-Report-Generator",
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
max_loops=1,
|
||||||
|
streaming_on=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create swarm
|
||||||
|
swarm = GraphSwarm(
|
||||||
|
agents=[
|
||||||
|
(data_collector, []),
|
||||||
|
(trend_analyzer, ["Market-Data-Collector"]),
|
||||||
|
(report_generator, ["Market-Trend-Analyzer"]),
|
||||||
|
],
|
||||||
|
swarm_name="Market Analysis Intelligence Network",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the swarm
|
||||||
|
result = swarm.run(
|
||||||
|
"Analyze current market trends for tech stocks and provide investment recommendations"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Print results
|
||||||
|
print(f"Execution success: {result.success}")
|
||||||
|
print(f"Total time: {result.execution_time:.2f} seconds")
|
||||||
|
|
||||||
|
for agent_name, output in result.outputs.items():
|
||||||
|
print(f"\nAgent: {agent_name}")
|
||||||
|
print(f"Output: {output.output}")
|
||||||
|
if output.error:
|
||||||
|
print(f"Error: {output.error}")
|
||||||
|
except Exception as error:
|
||||||
|
logger.error(error)
|
||||||
|
raise error
|
@ -0,0 +1,244 @@
|
|||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from typing import List, Dict, Any
|
||||||
|
from swarms import Agent
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms.utils.formatter import formatter
|
||||||
|
|
||||||
|
# Load environment variables
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get OpenAI API key
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
|
||||||
|
# Define Pydantic schema for agent outputs
|
||||||
|
class AgentOutput(BaseModel):
|
||||||
|
"""Schema for capturing the output of each agent."""
|
||||||
|
|
||||||
|
agent_name: str = Field(..., description="The name of the agent")
|
||||||
|
message: str = Field(
|
||||||
|
...,
|
||||||
|
description="The agent's response or contribution to the group chat",
|
||||||
|
)
|
||||||
|
metadata: Dict[str, Any] = Field(
|
||||||
|
default_factory=dict,
|
||||||
|
description="Additional metadata about the agent's response",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class GroupChat:
|
||||||
|
"""
|
||||||
|
GroupChat class to enable multiple agents to communicate in an asynchronous group chat.
|
||||||
|
Each agent is aware of all other agents, every message exchanged, and the social context.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
description: str,
|
||||||
|
agents: List[Agent],
|
||||||
|
max_loops: int = 1,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize the GroupChat.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Name of the group chat.
|
||||||
|
description (str): Description of the purpose of the group chat.
|
||||||
|
agents (List[Agent]): A list of agents participating in the chat.
|
||||||
|
max_loops (int): Maximum number of loops to run through all agents.
|
||||||
|
"""
|
||||||
|
self.name = name
|
||||||
|
self.description = description
|
||||||
|
self.agents = agents
|
||||||
|
self.max_loops = max_loops
|
||||||
|
self.chat_history = (
|
||||||
|
[]
|
||||||
|
) # Stores all messages exchanged in the chat
|
||||||
|
|
||||||
|
formatter.print_panel(
|
||||||
|
f"Initialized GroupChat '{self.name}' with {len(self.agents)} agents. Max loops: {self.max_loops}",
|
||||||
|
title="Groupchat Swarm",
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _agent_conversation(
|
||||||
|
self, agent: Agent, input_message: str
|
||||||
|
) -> AgentOutput:
|
||||||
|
"""
|
||||||
|
Facilitate a single agent's response to the chat.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent (Agent): The agent responding.
|
||||||
|
input_message (str): The message triggering the response.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
AgentOutput: The agent's response captured in a structured format.
|
||||||
|
"""
|
||||||
|
formatter.print_panel(
|
||||||
|
f"Agent '{agent.agent_name}' is responding to the message: {input_message}",
|
||||||
|
title="Groupchat Swarm",
|
||||||
|
)
|
||||||
|
response = await asyncio.to_thread(agent.run, input_message)
|
||||||
|
|
||||||
|
output = AgentOutput(
|
||||||
|
agent_name=agent.agent_name,
|
||||||
|
message=response,
|
||||||
|
metadata={"context_length": agent.context_length},
|
||||||
|
)
|
||||||
|
# logger.debug(f"Agent '{agent.agent_name}' response: {response}")
|
||||||
|
return output
|
||||||
|
|
||||||
|
async def _run(self, initial_message: str) -> List[AgentOutput]:
|
||||||
|
"""
|
||||||
|
Execute the group chat asynchronously, looping through all agents up to max_loops.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
initial_message (str): The initial message to start the chat.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[AgentOutput]: The responses of all agents across all loops.
|
||||||
|
"""
|
||||||
|
formatter.print_panel(
|
||||||
|
f"Starting group chat '{self.name}' with initial message: {initial_message}",
|
||||||
|
title="Groupchat Swarm",
|
||||||
|
)
|
||||||
|
self.chat_history.append(
|
||||||
|
{"sender": "System", "message": initial_message}
|
||||||
|
)
|
||||||
|
|
||||||
|
outputs = []
|
||||||
|
for loop in range(self.max_loops):
|
||||||
|
formatter.print_panel(
|
||||||
|
f"Group chat loop {loop + 1}/{self.max_loops}",
|
||||||
|
title="Groupchat Swarm",
|
||||||
|
)
|
||||||
|
|
||||||
|
for agent in self.agents:
|
||||||
|
# Create a custom input message for each agent, sharing the chat history and social context
|
||||||
|
input_message = (
|
||||||
|
f"Chat History:\n{self._format_chat_history()}\n\n"
|
||||||
|
f"Participants:\n"
|
||||||
|
+ "\n".join(
|
||||||
|
[
|
||||||
|
f"- {a.agent_name}: {a.system_prompt}"
|
||||||
|
for a in self.agents
|
||||||
|
]
|
||||||
|
)
|
||||||
|
+ f"\n\nNew Message: {initial_message}\n\n"
|
||||||
|
f"You are '{agent.agent_name}'. Remember to keep track of the social context, who is speaking, "
|
||||||
|
f"and respond accordingly based on your role: {agent.system_prompt}."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Collect agent's response
|
||||||
|
output = await self._agent_conversation(
|
||||||
|
agent, input_message
|
||||||
|
)
|
||||||
|
outputs.append(output)
|
||||||
|
|
||||||
|
# Update chat history with the agent's response
|
||||||
|
self.chat_history.append(
|
||||||
|
{
|
||||||
|
"sender": agent.agent_name,
|
||||||
|
"message": output.message,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
formatter.print_panel(
|
||||||
|
"Group chat completed. All agent responses captured.",
|
||||||
|
title="Groupchat Swarm",
|
||||||
|
)
|
||||||
|
return outputs
|
||||||
|
|
||||||
|
def run(self, task: str, *args, **kwargs):
|
||||||
|
return asyncio.run(self.run(task, *args, **kwargs))
|
||||||
|
|
||||||
|
def _format_chat_history(self) -> str:
|
||||||
|
"""
|
||||||
|
Format the chat history for agents to understand the context.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The formatted chat history as a string.
|
||||||
|
"""
|
||||||
|
return "\n".join(
|
||||||
|
[
|
||||||
|
f"{entry['sender']}: {entry['message']}"
|
||||||
|
for entry in self.chat_history
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def __str__(self) -> str:
|
||||||
|
"""String representation of the group chat's outputs."""
|
||||||
|
return self._format_chat_history()
|
||||||
|
|
||||||
|
def to_json(self) -> str:
|
||||||
|
"""JSON representation of the group chat's outputs."""
|
||||||
|
return [
|
||||||
|
{"sender": entry["sender"], "message": entry["message"]}
|
||||||
|
for entry in self.chat_history
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# Example Usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Get the OpenAI API key from the environment variable
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=api_key,
|
||||||
|
model_name="gpt-4o-mini",
|
||||||
|
temperature=0.1,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Example agents
|
||||||
|
agent1 = Agent(
|
||||||
|
agent_name="Financial-Analysis-Agent",
|
||||||
|
system_prompt="You are a financial analyst specializing in investment strategies.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=False,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
streaming_on=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
agent2 = Agent(
|
||||||
|
agent_name="Tax-Adviser-Agent",
|
||||||
|
system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=False,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True,
|
||||||
|
dynamic_temperature_enabled=True,
|
||||||
|
user_name="swarms_corp",
|
||||||
|
retry_attempts=1,
|
||||||
|
context_length=200000,
|
||||||
|
output_type="string",
|
||||||
|
streaming_on=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create group chat
|
||||||
|
group_chat = GroupChat(
|
||||||
|
name="Financial Discussion",
|
||||||
|
description="A group chat for financial analysis and tax advice.",
|
||||||
|
agents=[agent1, agent2],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the group chat
|
||||||
|
asyncio.run(
|
||||||
|
group_chat.run(
|
||||||
|
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria? What do you guys think?"
|
||||||
|
)
|
||||||
|
)
|
@ -1,214 +0,0 @@
|
|||||||
import hashlib
|
|
||||||
from time import time_ns
|
|
||||||
from typing import Callable, List, Optional, Sequence, Union
|
|
||||||
|
|
||||||
from swarms.structs.agent import Agent
|
|
||||||
from swarms.utils.loguru_logger import logger
|
|
||||||
from swarms.structs.base_swarm import BaseSwarm
|
|
||||||
|
|
||||||
|
|
||||||
def _hash(input: str):
|
|
||||||
"""
|
|
||||||
Hashes the input string using SHA256 algorithm.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
input (str): The string to be hashed.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: The hexadecimal representation of the hash value.
|
|
||||||
"""
|
|
||||||
hex_dig = hashlib.sha256(input.encode("utf-8")).hexdigest()
|
|
||||||
return hex_dig
|
|
||||||
|
|
||||||
|
|
||||||
def msg_hash(
|
|
||||||
agent: Agent, content: str, turn: int, msg_type: str = "text"
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Generate a hash value for a message.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
agent (Agent): The agent sending the message.
|
|
||||||
content (str): The content of the message.
|
|
||||||
turn (int): The turn number of the message.
|
|
||||||
msg_type (str, optional): The type of the message. Defaults to "text".
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
int: The hash value of the message.
|
|
||||||
"""
|
|
||||||
time = time_ns()
|
|
||||||
return _hash(
|
|
||||||
f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:"
|
|
||||||
f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MessagePool(BaseSwarm):
|
|
||||||
"""
|
|
||||||
A class representing a message pool for agents in a swarm.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
agents (Optional[Sequence[Agent]]): The list of agents in the swarm.
|
|
||||||
moderator (Optional[Agent]): The moderator agent.
|
|
||||||
turns (Optional[int]): The number of turns.
|
|
||||||
routing_function (Optional[Callable]): The routing function for message distribution.
|
|
||||||
show_names (Optional[bool]): Flag indicating whether to show agent names.
|
|
||||||
messages (List[Dict]): The list of messages in the pool.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
>>> from swarms.structs.agent import Agent
|
|
||||||
>>> from swarms.structs.message_pool import MessagePool
|
|
||||||
>>> agent1 = Agent(agent_name="agent1")
|
|
||||||
>>> agent2 = Agent(agent_name="agent2")
|
|
||||||
>>> agent3 = Agent(agent_name="agent3")
|
|
||||||
>>> moderator = Agent(agent_name="moderator")
|
|
||||||
>>> agents = [agent1, agent2, agent3]
|
|
||||||
>>> message_pool = MessagePool(agents=agents, moderator=moderator, turns=5)
|
|
||||||
>>> message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
|
|
||||||
>>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
|
|
||||||
>>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
|
|
||||||
>>> message_pool.get_all_messages()
|
|
||||||
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
|
|
||||||
>>> message_pool.get_visible_messages(agent=agent1, turn=1)
|
|
||||||
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
|
|
||||||
>>> message_pool.get_visible_messages(agent=agent2, turn=1)
|
|
||||||
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
agents: Optional[Sequence[Agent]] = None,
|
|
||||||
moderator: Optional[Agent] = None,
|
|
||||||
turns: Optional[int] = 5,
|
|
||||||
routing_function: Optional[Callable] = None,
|
|
||||||
show_names: Optional[bool] = False,
|
|
||||||
autosave: Optional[bool] = False,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
self.agent = agents
|
|
||||||
self.moderator = moderator
|
|
||||||
self.turns = turns
|
|
||||||
self.routing_function = routing_function
|
|
||||||
self.show_names = show_names
|
|
||||||
self.autosave = autosave
|
|
||||||
|
|
||||||
self.messages = []
|
|
||||||
|
|
||||||
logger.info("MessagePool initialized")
|
|
||||||
logger.info(f"Number of agents: {len(agents)}")
|
|
||||||
logger.info(
|
|
||||||
f"Agents: {[agent.agent_name for agent in agents]}"
|
|
||||||
)
|
|
||||||
logger.info(f"moderator: {moderator.agent_name} is available")
|
|
||||||
logger.info(f"Number of turns: {turns}")
|
|
||||||
|
|
||||||
def add(
|
|
||||||
self,
|
|
||||||
agent: Agent,
|
|
||||||
content: str,
|
|
||||||
turn: int,
|
|
||||||
visible_to: Union[str, List[str]] = "all",
|
|
||||||
logged: bool = True,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Add a message to the pool.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
agent (Agent): The agent sending the message.
|
|
||||||
content (str): The content of the message.
|
|
||||||
turn (int): The turn number.
|
|
||||||
visible_to (Union[str, List[str]], optional): The agents who can see the message. Defaults to "all".
|
|
||||||
logged (bool, optional): Flag indicating whether the message should be logged. Defaults to True.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.messages.append(
|
|
||||||
{
|
|
||||||
"agent": agent,
|
|
||||||
"content": content,
|
|
||||||
"turn": turn,
|
|
||||||
"visible_to": visible_to,
|
|
||||||
"logged": logged,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
logger.info(f"Message added: {content}")
|
|
||||||
|
|
||||||
def reset(self):
|
|
||||||
"""
|
|
||||||
Reset the message pool.
|
|
||||||
"""
|
|
||||||
self.messages = []
|
|
||||||
logger.info("MessagePool reset")
|
|
||||||
|
|
||||||
def last_turn(self):
|
|
||||||
"""
|
|
||||||
Get the last turn number.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
int: The last turn number.
|
|
||||||
"""
|
|
||||||
if len(self.messages) == 0:
|
|
||||||
return 0
|
|
||||||
else:
|
|
||||||
return self.messages[-1]["turn"]
|
|
||||||
|
|
||||||
@property
|
|
||||||
def last_message(self):
|
|
||||||
"""
|
|
||||||
Get the last message in the pool.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
dict: The last message.
|
|
||||||
"""
|
|
||||||
if len(self.messages) == 0:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return self.messages[-1]
|
|
||||||
|
|
||||||
def get_all_messages(self):
|
|
||||||
"""
|
|
||||||
Get all messages in the pool.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[Dict]: The list of all messages.
|
|
||||||
"""
|
|
||||||
return self.messages
|
|
||||||
|
|
||||||
def get_visible_messages(self, agent: Agent, turn: int):
|
|
||||||
"""
|
|
||||||
Get the visible messages for a given agent and turn.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
agent (Agent): The agent.
|
|
||||||
turn (int): The turn number.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List[Dict]: The list of visible messages.
|
|
||||||
"""
|
|
||||||
# Get the messages before the current turn
|
|
||||||
prev_messages = [
|
|
||||||
message
|
|
||||||
for message in self.messages
|
|
||||||
if message["turn"] < turn
|
|
||||||
]
|
|
||||||
|
|
||||||
visible_messages = []
|
|
||||||
for message in prev_messages:
|
|
||||||
if (
|
|
||||||
message["visible_to"] == "all"
|
|
||||||
or agent.agent_name in message["visible_to"]
|
|
||||||
):
|
|
||||||
visible_messages.append(message)
|
|
||||||
return visible_messages
|
|
||||||
|
|
||||||
# def query(self, query: str):
|
|
||||||
# """
|
|
||||||
# Query a message from the messages list and then pass it to the moderator
|
|
||||||
# """
|
|
||||||
# return [
|
|
||||||
# (mod, content)
|
|
||||||
# for mod, content, _ in self.messages # Add an underscore to ignore the rest of the elements
|
|
||||||
# if query in content
|
|
||||||
# ]
|
|
@ -0,0 +1,276 @@
|
|||||||
|
import asyncio
|
||||||
|
import pulsar
|
||||||
|
|
||||||
|
from pulsar import ConsumerType
|
||||||
|
from loguru import logger
|
||||||
|
from swarms import Agent
|
||||||
|
from typing import List, Dict, Any
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
class ScalableAsyncAgentSwarm:
|
||||||
|
"""
|
||||||
|
A scalable, asynchronous swarm of agents leveraging Apache Pulsar for inter-agent communication.
|
||||||
|
Provides load balancing, health monitoring, dead letter queues, and centralized logging.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
pulsar_url: str,
|
||||||
|
topic: str,
|
||||||
|
dlq_topic: str,
|
||||||
|
agents_config: List[Dict[str, Any]],
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initializes the async swarm with agents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
pulsar_url (str): The URL of the Apache Pulsar broker.
|
||||||
|
topic (str): The main topic for task distribution.
|
||||||
|
dlq_topic (str): The Dead Letter Queue topic for failed messages.
|
||||||
|
agents_config (List[Dict[str, Any]]): List of agent configurations with `name`, `description`, and `model_name`.
|
||||||
|
"""
|
||||||
|
self.pulsar_url = pulsar_url
|
||||||
|
self.topic = topic
|
||||||
|
self.dlq_topic = dlq_topic
|
||||||
|
self.agents_config = agents_config
|
||||||
|
self.client = pulsar.Client(pulsar_url)
|
||||||
|
self.consumer = self.client.subscribe(
|
||||||
|
topic,
|
||||||
|
subscription_name="swarm-task-sub",
|
||||||
|
consumer_type=ConsumerType.Shared,
|
||||||
|
)
|
||||||
|
self.dlq_producer = self.client.create_producer(dlq_topic)
|
||||||
|
self.response_logger = []
|
||||||
|
self.agents = [
|
||||||
|
self.create_agent(config) for config in agents_config
|
||||||
|
]
|
||||||
|
self.agent_index = 0
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"Swarm initialized with agents: {}",
|
||||||
|
[agent["name"] for agent in agents_config],
|
||||||
|
)
|
||||||
|
|
||||||
|
def create_agent(
|
||||||
|
self, agent_config: Dict[str, Any]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Creates a new agent configuration with asynchronous capabilities.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent_config (Dict[str, Any]): Configuration dictionary with agent details.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Any]: A dictionary containing agent metadata and functionality.
|
||||||
|
"""
|
||||||
|
agent_name = agent_config["name"]
|
||||||
|
description = agent_config["description"]
|
||||||
|
model_name = agent_config.get("model_name", "gpt-4o-mini")
|
||||||
|
|
||||||
|
class AsyncAgent:
|
||||||
|
"""
|
||||||
|
An asynchronous agent that processes tasks and communicates via Apache Pulsar.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, name: str, description: str, model_name: str
|
||||||
|
):
|
||||||
|
self.name = name
|
||||||
|
self.description = description
|
||||||
|
self.agent = Agent(
|
||||||
|
agent_name=name,
|
||||||
|
model_name=model_name,
|
||||||
|
max_loops="auto",
|
||||||
|
interactive=True,
|
||||||
|
streaming_on=True,
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Initialized agent '{name}' - {description}"
|
||||||
|
)
|
||||||
|
|
||||||
|
async def process_task(
|
||||||
|
self, message: str
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Processes a single task using the agent.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message (str): The task message.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Any]: JSON-formatted response.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
logger.info(
|
||||||
|
f"Agent {self.name} processing task: {message}"
|
||||||
|
)
|
||||||
|
response = await asyncio.to_thread(
|
||||||
|
self.agent.run, message
|
||||||
|
)
|
||||||
|
logger.info(f"Agent {self.name} completed task.")
|
||||||
|
return {
|
||||||
|
"agent_name": self.name,
|
||||||
|
"response": response,
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Agent {self.name} encountered an error: {e}"
|
||||||
|
)
|
||||||
|
return {"agent_name": self.name, "error": str(e)}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"name": agent_name,
|
||||||
|
"instance": AsyncAgent(
|
||||||
|
agent_name, description, model_name
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
async def distribute_task(self, message: str):
|
||||||
|
"""
|
||||||
|
Distributes a task to the next available agent using round-robin.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message (str): The task message.
|
||||||
|
"""
|
||||||
|
agent = self.agents[self.agent_index]
|
||||||
|
self.agent_index = (self.agent_index + 1) % len(self.agents)
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = await agent["instance"].process_task(message)
|
||||||
|
self.log_response(response)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Error processing task by agent {agent['name']}: {e}"
|
||||||
|
)
|
||||||
|
self.send_to_dlq(message)
|
||||||
|
|
||||||
|
async def monitor_health(self):
|
||||||
|
"""
|
||||||
|
Periodically monitors the health of agents.
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
logger.info("Performing health check for all agents.")
|
||||||
|
for agent in self.agents:
|
||||||
|
logger.info(f"Agent {agent['name']} is online.")
|
||||||
|
await asyncio.sleep(10)
|
||||||
|
|
||||||
|
def send_to_dlq(self, message: str):
|
||||||
|
"""
|
||||||
|
Sends a failed message to the Dead Letter Queue (DLQ).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message (str): The message to send to the DLQ.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.dlq_producer.send(message.encode("utf-8"))
|
||||||
|
logger.info("Message sent to Dead Letter Queue.")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to send message to DLQ: {e}")
|
||||||
|
|
||||||
|
def log_response(self, response: Dict[str, Any]):
|
||||||
|
"""
|
||||||
|
Logs the response to a centralized list for later analysis.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response (Dict[str, Any]): The agent's response.
|
||||||
|
"""
|
||||||
|
self.response_logger.append(response)
|
||||||
|
logger.info(f"Response logged: {response}")
|
||||||
|
|
||||||
|
async def listen_and_distribute(self):
|
||||||
|
"""
|
||||||
|
Listens to the main Pulsar topic and distributes tasks to agents.
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
msg = self.consumer.receive()
|
||||||
|
try:
|
||||||
|
message = msg.data().decode("utf-8")
|
||||||
|
logger.info(f"Received task: {message}")
|
||||||
|
await self.distribute_task(message)
|
||||||
|
self.consumer.acknowledge(msg)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error processing message: {e}")
|
||||||
|
self.send_to_dlq(msg.data().decode("utf-8"))
|
||||||
|
self.consumer.negative_acknowledge(msg)
|
||||||
|
|
||||||
|
async def run(self):
|
||||||
|
"""
|
||||||
|
Runs the swarm asynchronously with health monitoring and task distribution.
|
||||||
|
"""
|
||||||
|
logger.info("Starting the async swarm...")
|
||||||
|
task_listener = asyncio.create_task(
|
||||||
|
self.listen_and_distribute()
|
||||||
|
)
|
||||||
|
health_monitor = asyncio.create_task(self.monitor_health())
|
||||||
|
await asyncio.gather(task_listener, health_monitor)
|
||||||
|
|
||||||
|
def shutdown(self):
|
||||||
|
"""
|
||||||
|
Safely shuts down the swarm and logs all responses.
|
||||||
|
"""
|
||||||
|
logger.info("Shutting down the swarm...")
|
||||||
|
self.client.close()
|
||||||
|
with open("responses.json", "w") as f:
|
||||||
|
json.dump(self.response_logger, f, indent=4)
|
||||||
|
logger.info("Responses saved to 'responses.json'.")
|
||||||
|
|
||||||
|
|
||||||
|
# from scalable_agent_swarm import ScalableAsyncAgentSwarm # Assuming your swarm class is saved here
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Example Configuration
|
||||||
|
PULSAR_URL = "pulsar://localhost:6650"
|
||||||
|
TOPIC = "stock-analysis"
|
||||||
|
DLQ_TOPIC = "stock-analysis-dlq"
|
||||||
|
|
||||||
|
# Agents configuration
|
||||||
|
AGENTS_CONFIG = [
|
||||||
|
{
|
||||||
|
"name": "Stock-Analysis-Agent-1",
|
||||||
|
"description": "Analyzes stock trends.",
|
||||||
|
"model_name": "gpt-4o-mini",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Stock-News-Agent",
|
||||||
|
"description": "Summarizes stock news.",
|
||||||
|
"model_name": "gpt-4o-mini",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "Tech-Trends-Agent",
|
||||||
|
"description": "Tracks tech sector trends.",
|
||||||
|
"model_name": "gpt-4o-mini",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
# Tasks to send
|
||||||
|
TASKS = [
|
||||||
|
"Analyze the trend for tech stocks in Q4 2024",
|
||||||
|
"Summarize the latest news on the S&P 500",
|
||||||
|
"Identify the top-performing sectors in the stock market",
|
||||||
|
"Provide a forecast for AI-related stocks for 2025",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Initialize and run the swarm
|
||||||
|
swarm = ScalableAsyncAgentSwarm(
|
||||||
|
PULSAR_URL, TOPIC, DLQ_TOPIC, AGENTS_CONFIG
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
# Run the swarm in the background
|
||||||
|
swarm_task = asyncio.create_task(swarm.run())
|
||||||
|
|
||||||
|
# Send tasks to the topic
|
||||||
|
client = pulsar.Client(PULSAR_URL)
|
||||||
|
producer = client.create_producer(TOPIC)
|
||||||
|
|
||||||
|
for task in TASKS:
|
||||||
|
producer.send(task.encode("utf-8"))
|
||||||
|
print(f"Sent task: {task}")
|
||||||
|
|
||||||
|
producer.close()
|
||||||
|
client.close()
|
||||||
|
|
||||||
|
# Keep the swarm running
|
||||||
|
asyncio.run(swarm_task)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
swarm.shutdown()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue