Merge branch 'master' into 668

668
Kye Gomez 3 weeks ago committed by GitHub
commit 80187603f6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -22,4 +22,4 @@ jobs:
- run: ruff format .
- run: ruff check --fix .
- uses: autofix-ci/action@dd55f44df8f7cdb7a6bf74c78677eb8acd40cd0a
- uses: autofix-ci/action@ff86a557419858bb967097bfc916833f5647fa8c

@ -0,0 +1,43 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
#
# This workflow file requires a free account on Bearer.com to manage findings, notifications and more.
# See https://docs.bearer.com/guides/bearer-cloud/
name: Bearer
on:
push:
branches: ["master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: ["master"]
schedule:
- cron: '24 22 * * 6'
permissions:
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
jobs:
bearer:
runs-on: ubuntu-latest
steps:
# Checkout project source
- uses: actions/checkout@v4
# Scan code using Bearer CLI
- name: Run Report
id: report
uses: bearer/bearer-action@828eeb928ce2f4a7ca5ed57fb8b59508cb8c79bc
with:
api-key: ${{ secrets.BEARER_TOKEN }}
format: sarif
output: results.sarif
exit-code: 0
# Upload SARIF file generated in previous step
- name: Upload SARIF file
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: results.sarif

@ -0,0 +1,39 @@
# Dependency Review Action
#
# This Action will scan dependency manifest files that change as part of a Pull Request,
# surfacing known-vulnerable versions of the packages declared or updated in the PR.
# Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable
# packages will be blocked from merging.
#
# Source repository: https://github.com/actions/dependency-review-action
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
name: 'Dependency review'
on:
pull_request:
branches: [ "master" ]
# If using a dependency submission action in this workflow this permission will need to be set to:
#
# permissions:
# contents: write
#
# https://docs.github.com/en/enterprise-cloud@latest/code-security/supply-chain-security/understanding-your-software-supply-chain/using-the-dependency-submission-api
permissions:
contents: read
# Write permissions for pull-requests are required for using the `comment-summary-in-pr` option, comment out if you aren't using this option
pull-requests: write
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4
# Commonly enabled options, see https://github.com/actions/dependency-review-action#configuration-options for all available options.
with:
comment-summary-in-pr: always
# fail-on-severity: moderate
# deny-licenses: GPL-1.0-or-later, LGPL-2.0-or-later
# retry-on-snapshot-warnings: true

@ -0,0 +1,18 @@
name: Docker Image CI
on:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Build the Docker image
run: docker build . --file Dockerfile --tag my-image-name:$(date +%s)

@ -0,0 +1,46 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# This workflow integrates Pyre with GitHub's
# Code Scanning feature.
#
# Pyre is a performant type checker for Python compliant with
# PEP 484. Pyre can analyze codebases with millions of lines
# of code incrementally providing instantaneous feedback
# to developers as they write code.
#
# See https://pyre-check.org
name: Pyre
on:
workflow_dispatch:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
permissions:
contents: read
jobs:
pyre:
permissions:
actions: read
contents: read
security-events: write
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Run Pyre
uses: facebook/pyre-action@12b8d923443ea66cb657facc2e5faac1c8c86e64
with:
# To customize these inputs:
# See https://github.com/facebook/pyre-action#inputs
repo-directory: './'
requirements-path: 'requirements.txt'

@ -0,0 +1,50 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# This workflow integrates Python Static Analyzer (Pysa) with
# GitHub's Code Scanning feature.
#
# Python Static Analyzer (Pysa) is a security-focused static
# analysis tool that tracks flows of data from where they
# originate to where they terminate in a dangerous location.
#
# See https://pyre-check.org/docs/pysa-basics/
name: Pysa
on:
workflow_dispatch:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
schedule:
- cron: '43 5 * * 3'
permissions:
contents: read
jobs:
pysa:
permissions:
actions: read
contents: read
security-events: write
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Run Pysa
uses: facebook/pysa-action@f46a63777e59268613bd6e2ff4e29f144ca9e88b
with:
# To customize these inputs:
# See https://github.com/facebook/pysa-action#inputs
repo-directory: './'
requirements-path: 'requirements.txt'
infer-types: true
include-default-sapp-filters: true

@ -0,0 +1,34 @@
name: Python Package using Conda
on: [push]
jobs:
build-linux:
runs-on: ubuntu-latest
strategy:
max-parallel: 5
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Add conda to system path
run: |
# $CONDA is an environment variable pointing to the root of the miniconda directory
echo $CONDA/bin >> $GITHUB_PATH
- name: Install dependencies
run: |
conda env update --file environment.yml --name base
- name: Lint with flake8
run: |
conda install flake8
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
conda install pytest
pytest

@ -1,6 +1,8 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
# [ ] TODO [pep 458](https://blog.pypi.org/posts/2024-11-14-pypi-now-supports-digital-attestations/)
name: Python package
on:
@ -16,7 +18,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12"]
python-version: ["3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4

@ -0,0 +1,49 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# This workflow file requires a free account on Semgrep.dev to
# manage rules, file ignores, notifications, and more.
#
# See https://semgrep.dev/docs
name: Semgrep
on:
push:
branches: [ "master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "master" ]
schedule:
- cron: '19 7 * * 3'
permissions:
contents: read
jobs:
semgrep:
permissions:
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
name: Scan
runs-on: ubuntu-latest
steps:
# Checkout project source
- uses: actions/checkout@v4
# Scan code using project's configuration on https://semgrep.dev/manage
- uses: returntocorp/semgrep-action@713efdd345f3035192eaa63f56867b88e63e4e5d
with:
publishToken: ${{ secrets.SEMGREP_APP_TOKEN }}
publishDeployment: ${{ secrets.SEMGREP_DEPLOYMENT_ID }}
generateSarif: "1"
# Upload SARIF file generated in previous step
- name: Upload SARIF file
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: semgrep.sarif
if: always()

@ -0,0 +1,48 @@
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
name: trivy
on:
push:
branches: [ "master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "master" ]
schedule:
- cron: '31 0 * * 5'
permissions:
contents: read
jobs:
build:
permissions:
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
name: Build
runs-on: "ubuntu-20.04"
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Build an image from Dockerfile
run: |
docker build -t docker.io/my-organization/my-app:${{ github.sha }} .
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@18f2510ee396bbf400402947b394f2dd8c87dbb0
with:
image-ref: 'docker.io/my-organization/my-app:${{ github.sha }}'
format: 'template'
template: '@/contrib/sarif.tpl'
output: 'trivy-results.sarif'
severity: 'CRITICAL,HIGH'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: 'trivy-results.sarif'

52
.gitignore vendored

@ -8,7 +8,8 @@ audio/
video/
artifacts_three
dataframe/
.ruff_cache
.pytest_cache
static/generated
runs
Financial-Analysis-Agent_state.json
@ -223,3 +224,52 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.vscode/settings.json
# -*- mode: gitignore; -*-
*~
\#*\#
/.emacs.desktop
/.emacs.desktop.lock
*.elc
auto-save-list
tramp
.\#*
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*
# eshell files
/eshell/history
/eshell/lastdir
# elpa packages
/elpa/
# reftex files
*.rel
# AUCTeX auto folder
/auto/
# cask packages
.cask/
dist/
# Flycheck
flycheck_*.el
# server auth directory
/server/
# projectiles files
.projectile
# directory configuration
.dir-locals.el
# network security
/network-security.data

@ -453,8 +453,8 @@ agent.run(task, img)
----
### `ToolAgent`
ToolAgent is an agent that can use tools through JSON function calling. It intakes any open source model from huggingface and is extremely modular and plug in and play. We need help adding general support to all models soon.
### Local Agent `ToolAgent`
ToolAgent is an fully local agent that can use tools through JSON function calling. It intakes any open source model from huggingface and is extremely modular and plug in and play. We need help adding general support to all models soon.
```python
@ -462,7 +462,7 @@ from pydantic import BaseModel, Field
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
from swarms.utils.json_utils import base_model_to_json
from swarms.tools.json_utils import base_model_to_json
# Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
@ -511,83 +511,6 @@ print(f"Generated data: {generated_data}")
```
## Integrating External Agents
Integrating external agents from other agent frameworks is easy with swarms.
Steps:
1. Create a new class that inherits `Agent`
2. Create a `.run(task: str) -> str` method that runs the agent and returns the response.
3. The new Agent must return a string of the response. But you may add additional methods to save the output to JSON.
### Griptape Example
For example, here's an example on how to create an agent from griptape.
Heres how you can create a custom **Griptape** agent that integrates with the **Swarms** framework by inheriting from the `Agent` class in **Swarms** and overriding the `run(task: str) -> str` method.
```python
from swarms import (
Agent as SwarmsAgent,
) # Import the base Agent class from Swarms
from griptape.structures import Agent as GriptapeAgent
from griptape.tools import (
WebScraperTool,
FileManagerTool,
PromptSummaryTool,
)
# Create a custom agent class that inherits from SwarmsAgent
class GriptapeSwarmsAgent(SwarmsAgent):
def __init__(self, *args, **kwargs):
# Initialize the Griptape agent with its tools
self.agent = GriptapeAgent(
input="Load {{ args[0] }}, summarize it, and store it in a file called {{ args[1] }}.",
tools=[
WebScraperTool(off_prompt=True),
PromptSummaryTool(off_prompt=True),
FileManagerTool(),
],
*args,
**kwargs,
# Add additional settings
)
# Override the run method to take a task and execute it using the Griptape agent
def run(self, task: str) -> str:
# Extract URL and filename from task (you can modify this parsing based on task structure)
url, filename = task.split(
","
) # Example of splitting task string
# Execute the Griptape agent with the task inputs
result = self.agent.run(url.strip(), filename.strip())
# Return the final result as a string
return str(result)
# Example usage:
griptape_swarms_agent = GriptapeSwarmsAgent()
output = griptape_swarms_agent.run(
"https://griptape.ai, griptape.txt"
)
print(output)
```
### Key Components:
1. **GriptapeSwarmsAgent**: A custom class that inherits from the `SwarmsAgent` class and integrates the Griptape agent.
2. **run(task: str) -> str**: A method that takes a task string, processes it (e.g., splitting into a URL and filename), and runs the Griptape agent with the provided inputs.
3. **Griptape Tools**: The tools integrated into the Griptape agent (e.g., `WebScraperTool`, `PromptSummaryTool`, `FileManagerTool`) allow for web scraping, summarization, and file management.
You can now easily plug this custom Griptape agent into the **Swarms Framework** and use it to run tasks!
## Understanding Swarms
A swarm refers to a group of more than two agents working collaboratively to achieve a common goal. These agents can be software entities, such as llms that interact with each other to perform complex tasks. The concept of a swarm is inspired by natural systems like ant colonies or bird flocks, where simple individual behaviors lead to complex group dynamics and problem-solving capabilities.
@ -851,6 +774,8 @@ print(
The `AgentRearrange` orchestration technique, inspired by Einops and einsum, allows you to define and map out the relationships between various agents. It provides a powerful tool for orchestrating complex workflows, enabling you to specify linear and sequential relationships such as `a -> a1 -> a2 -> a3`, or concurrent relationships where the first agent sends a message to 3 agents simultaneously: `a -> a1, a2, a3`. This level of customization allows for the creation of highly efficient and dynamic workflows, where agents can work in parallel or in sequence as needed. The `AgentRearrange` technique is a valuable addition to the swarms library, providing a new level of flexibility and control over the orchestration of agents. For more detailed information and examples, please refer to the [official documentation](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/).
[Check out my video on agent rearrange!](https://youtu.be/Rq8wWQ073mg)
### Methods
@ -876,68 +801,184 @@ The `run` method returns the final output after all agents have processed the in
```python
from swarms import Agent, AgentRearrange
from datetime import datetime
# Initialize the director agent
from swarms import Agent, AgentRearrange, create_file_in_folder
director = Agent(
agent_name="Director",
system_prompt="Directs the tasks for the workers",
model_name="claude-2",
chief_medical_officer = Agent(
agent_name="Chief Medical Officer",
system_prompt="""You are the Chief Medical Officer coordinating a team of medical specialists for viral disease diagnosis.
Your responsibilities include:
- Gathering initial patient symptoms and medical history
- Coordinating with specialists to form differential diagnoses
- Synthesizing different specialist opinions into a cohesive diagnosis
- Ensuring all relevant symptoms and test results are considered
- Making final diagnostic recommendations
- Suggesting treatment plans based on team input
- Identifying when additional specialists need to be consulted
Guidelines:
1. Always start with a comprehensive patient history
2. Consider both common and rare viral conditions
3. Factor in patient demographics and risk factors
4. Document your reasoning process clearly
5. Highlight any critical or emergency symptoms
6. Note any limitations or uncertainties in the diagnosis
Format all responses with clear sections for:
- Initial Assessment
- Differential Diagnoses
- Specialist Consultations Needed
- Recommended Next Steps""",
model_name="gpt-4o", # Models from litellm -> claude-2
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="director.json",
)
# Viral Disease Specialist
virologist = Agent(
agent_name="Virologist",
system_prompt="""You are a specialist in viral diseases with expertise in:
- Respiratory viruses (Influenza, Coronavirus, RSV)
- Systemic viral infections (EBV, CMV, HIV)
- Childhood viral diseases (Measles, Mumps, Rubella)
- Emerging viral threats
Your role involves:
1. Analyzing symptoms specific to viral infections
2. Distinguishing between different viral pathogens
3. Assessing viral infection patterns and progression
4. Recommending specific viral tests
5. Evaluating epidemiological factors
For each case, consider:
- Incubation periods
- Transmission patterns
- Seasonal factors
- Geographic prevalence
- Patient immune status
- Current viral outbreaks
Provide detailed analysis of:
- Characteristic viral symptoms
- Disease progression timeline
- Risk factors for severe disease
- Potential complications""",
model_name="gpt-4o",
max_loops=1,
)
# Initialize worker 1
# Internal Medicine Specialist
internist = Agent(
agent_name="Internist",
system_prompt="""You are an Internal Medicine specialist responsible for:
- Comprehensive system-based evaluation
- Integration of symptoms across organ systems
- Identification of systemic manifestations
- Assessment of comorbidities
For each case, analyze:
1. Vital signs and their implications
2. System-by-system review (cardiovascular, respiratory, etc.)
3. Impact of existing medical conditions
4. Medication interactions and contraindications
5. Risk stratification
Consider these aspects:
- Age-related factors
- Chronic disease impact
- Medication history
- Social and environmental factors
Document:
- Physical examination findings
- System-specific symptoms
- Relevant lab abnormalities
- Risk factors for complications""",
model_name="gpt-4o",
max_loops=1,
)
worker1 = Agent(
agent_name="Worker1",
system_prompt="Generates a transcript for a youtube video on what swarms are",
model_name="claude-2",
# Diagnostic Synthesizer
synthesizer = Agent(
agent_name="Diagnostic Synthesizer",
system_prompt="""You are responsible for synthesizing all specialist inputs to create a final diagnostic assessment:
Core responsibilities:
1. Integrate findings from all specialists
2. Identify patterns and correlations
3. Resolve conflicting opinions
4. Generate probability-ranked differential diagnoses
5. Recommend additional testing if needed
Analysis framework:
- Weight evidence based on reliability and specificity
- Consider epidemiological factors
- Evaluate diagnostic certainty
- Account for test limitations
Provide structured output including:
1. Primary diagnosis with confidence level
2. Supporting evidence summary
3. Alternative diagnoses to consider
4. Recommended confirmatory tests
5. Red flags or warning signs
6. Follow-up recommendations
Documentation requirements:
- Clear reasoning chain
- Evidence quality assessment
- Confidence levels for each diagnosis
- Knowledge gaps identified
- Risk assessment""",
model_name="gpt-4o",
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker1.json",
)
# Create agent list
agents = [chief_medical_officer, virologist, internist, synthesizer]
# Define diagnostic flow
flow = f"""{chief_medical_officer.agent_name} -> {virologist.agent_name} -> {internist.agent_name} -> {synthesizer.agent_name}"""
# Initialize worker 2
worker2 = Agent(
agent_name="Worker2",
system_prompt="Summarizes the transcript generated by Worker1",
model_name="claude-2",
# Create the swarm system
diagnosis_system = AgentRearrange(
name="Medical-nlp-diagnosis-swarm",
description="natural language symptions to diagnosis report",
agents=agents,
flow=flow,
max_loops=1,
dashboard=False,
streaming_on=True,
verbose=True,
stopping_token="<DONE>",
state_save_file_type="json",
saved_state_path="worker2.json",
output_type="all",
)
# Create a list of agents
agents = [director, worker1, worker2]
# Example usage
if __name__ == "__main__":
# Example patient case
patient_case = """
Patient: 45-year-old female
Presenting symptoms:
- Fever (101.5°F) for 3 days
- Dry cough
- Fatigue
- Mild shortness of breath
Medical history:
- Controlled hypertension
- No recent travel
- Fully vaccinated for COVID-19
- No known sick contacts
"""
# Define the flow pattern
flow = "Director -> Worker1 -> Worker2"
# Add timestamp to the patient case
case_info = f"Timestamp: {datetime.now()}\nPatient Information: {patient_case}"
# Run the diagnostic process
diagnosis = diagnosis_system.run(case_info)
# Create a folder and file called reports
create_file_in_folder(
"reports", "medical_analysis_agent_rearrange.md", diagnosis
)
# Using AgentRearrange class
agent_system = AgentRearrange(agents=agents, flow=flow)
output = agent_system.run(
"Create a format to express and communicate swarms of llms in a structured manner for youtube"
)
print(output)
```

@ -0,0 +1,6 @@
fastapi
uvicorn
pydantic
loguru
python-dotenv
swarms # Specify the version or source if it's not on PyPI

@ -35,7 +35,3 @@ run: |
# LOG_LEVEL: "INFO"
# # MAX_WORKERS: "4"
# metadata:
# name: swarms-api-service
# version: "1.0.0"
# environment: production

@ -157,6 +157,7 @@ nav:
# - Build Custom Agents: "swarms/structs/diy_your_own_agent.md"
- Agent Architecture: "swarms/framework/agents_explained.md"
- Complete Agent API: "swarms/structs/agent.md"
- OpenAI Assistant: "swarms/agents/openai_assistant.md"
- Create and Run Agents from YAML: "swarms/agents/create_agents_yaml.md"
- Integrating External Agents from Griptape, Langchain, etc: "swarms/agents/external_party_agents.md"
- Tools:

@ -0,0 +1,135 @@
# OpenAI Assistant
The OpenAI Assistant class provides a wrapper around OpenAI's Assistants API, integrating it with the swarms framework.
## Overview
The `OpenAIAssistant` class allows you to create and interact with OpenAI Assistants, providing a simple interface for:
- Creating assistants with specific roles and capabilities
- Adding custom functions that the assistant can call
- Managing conversation threads
- Handling tool calls and function execution
- Getting responses from the assistant
## Insstallation
```bash
pip install swarms
```
## Basic Usage
```python
from swarms import OpenAIAssistant
#Create an assistant
assistant = OpenAIAssistant(
name="Math Tutor",
instructions="You are a helpful math tutor.",
model="gpt-4o",
tools=[{"type": "code_interpreter"}]
)
#Run a Task
response = assistant.run("Solve the equation: 3x + 11 = 14")
print(response)
# Continue the conversation in the same thread
follow_up = assistant.run("Now explain how you solved it")
print(follow_up)
```
## Function Calling
The assistant supports custom function integration:
```python
def get_weather(location: str, unit: str = "celsius") -> str:
# Mock weather function
return f"The weather in {location} is 22 degrees {unit}"
# Add function to assistant
assistant.add_function(
description="Get the current weather in a location",
parameters={
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"default": "celsius"
}
},
"required": ["location"]
}
)
```
## API Reference
### Constructor
```python
OpenAIAssistant(
name: str,
instructions: Optional[str] = None,
model: str = "gpt-4o",
tools: Optional[List[Dict[str, Any]]] = None,
file_ids: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
functions: Optional[List[Dict[str, Any]]] = None,
)
```
### Methods
#### run(task: str) -> str
Sends a task to the assistant and returns its response. The conversation thread is maintained between calls.
#### add_function(func: Callable, description: str, parameters: Dict[str, Any]) -> None
Adds a callable function that the assistant can use during conversations.
#### add_message(content: str, file_ids: Optional[List[str]] = None) -> None
Adds a message to the current conversation thread.
## Error Handling
The assistant implements robust error handling:
- Retries on rate limits
- Graceful handling of API errors
- Clear error messages for debugging
- Status monitoring for runs and completions
## Best Practices
1. Thread Management
- Use the same assistant instance for related conversations
- Create new instances for unrelated tasks
- Monitor thread status during long-running operations
2. Function Integration
- Keep functions simple and focused
- Provide clear descriptions and parameter schemas
- Handle errors gracefully in custom functions
- Test functions independently before integration
3. Performance
- Reuse assistant instances when possible
- Monitor and handle rate limits appropriately
- Use appropriate polling intervals for status checks
- Consider implementing timeouts for long-running operations
## References
- [OpenAI Assistants API Documentation](https://platform.openai.com/docs/assistants/overview)
- [OpenAI Function Calling Guide](https://platform.openai.com/docs/guides/function-calling)
- [OpenAI Rate Limits](https://platform.openai.com/docs/guides/rate-limits)

@ -127,7 +127,7 @@ Before you begin, ensure you have the following installed:
poetry install --extras "desktop"
```
=== "Using Docker"
=== "Using Docker COMING SOON [DOES NOT WORK YET]"
Docker is an excellent option for creating isolated and reproducible environments, suitable for both development and production.

@ -204,3 +204,63 @@ await workflow.add(tasks=[task_1, task_2])
results = await workflow.run()
print(results) # Output: ["Task 1 Completed", "Task 2 Completed"]
```
# Async Workflow
The AsyncWorkflow allows multiple agents to process tasks concurrently using Python's asyncio framework.
## Usage Example
```python
import asyncio
from swarms import Agent, AsyncWorkflow
from swarm_models import OpenAIChat
# Initialize model
model = OpenAIChat(
openai_api_key="your-api-key",
model_name="gpt-4",
temperature=0.7
)
# Create agents
agents = [
Agent(
agent_name=f"Analysis-Agent-{i}",
llm=model,
max_loops=1,
dashboard=False,
verbose=True,
)
for i in range(3)
]
# Initialize workflow
workflow = AsyncWorkflow(
name="Analysis-Workflow",
agents=agents,
max_workers=3,
verbose=True
)
# Run workflow
async def main():
task = "Analyze the potential impact of AI on healthcare"
results = await workflow.run(task)
for i, result in enumerate(results):
print(f"Agent {i} result: {result}")
# Execute
asyncio.run(main())
```
## Parameters
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `name` | str | "AsyncWorkflow" | Name of the workflow |
| `agents` | List[Agent] | None | List of agents to execute tasks |
| `max_workers` | int | 5 | Maximum number of concurrent workers |
| `dashboard` | bool | False | Enable/disable dashboard |
| `autosave` | bool | False | Enable/disable autosaving results |
| `verbose` | bool | False | Enable/disable verbose logging |

@ -61,7 +61,7 @@ python = ">=3.10,<4.0"
# transformers = ">= 4.39.0, <5.0.0"
asyncio = ">=3.4.3,<4.0"
toml = "*"
pypdf = "4.3.1"
pypdf = "5.1.0"
loguru = "*"
pydantic = "*"
tenacity = "*"
@ -111,7 +111,7 @@ swarms = "swarms.cli.main:main"
[tool.poetry.group.lint.dependencies]
black = ">=23.1,<25.0"
ruff = ">=0.5.1,<0.7.4"
ruff = ">=0.5.1,<0.8.3"
types-toml = "^0.10.8.1"
types-pytz = ">=2023.3,<2025.0"
types-chardet = "^5.0.4.6"

@ -3,7 +3,7 @@ torch>=2.1.1,<3.0
transformers>=4.39.0,<5.0.0
asyncio>=3.4.3,<4.0
toml
pypdf==4.3.1
pypdf==5.1.0
ratelimit==2.2.1
loguru
pydantic==2.8.2

@ -0,0 +1,264 @@
from typing import Optional, List, Dict, Any, Callable
import time
from openai import OpenAI
from swarms.structs.agent import Agent
import json
class OpenAIAssistant(Agent):
"""
OpenAI Assistant wrapper for the swarms framework.
Integrates OpenAI's Assistants API with the swarms architecture.
Example:
>>> assistant = OpenAIAssistant(
... name="Math Tutor",
... instructions="You are a personal math tutor.",
... model="gpt-4o",
... tools=[{"type": "code_interpreter"}]
... )
>>> response = assistant.run("Solve 3x + 11 = 14")
"""
def __init__(
self,
name: str,
instructions: Optional[str] = None,
model: str = "gpt-4o",
tools: Optional[List[Dict[str, Any]]] = None,
file_ids: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
functions: Optional[List[Dict[str, Any]]] = None,
*args,
**kwargs
):
"""Initialize the OpenAI Assistant.
Args:
name: Name of the assistant
instructions: System instructions for the assistant
model: Model to use (default: gpt-4-turbo-preview)
tools: List of tools to enable (code_interpreter, retrieval)
file_ids: List of file IDs to attach
metadata: Additional metadata
functions: List of custom functions to make available
"""
super().__init__(*args, **kwargs)
# Initialize tools list with any provided functions
self.tools = tools or []
if functions:
for func in functions:
self.tools.append({
"type": "function",
"function": func
})
# Create the OpenAI Assistant
self.client = OpenAI()
self.assistant = self.client.beta.assistants.create(
name=name,
instructions=instructions,
model=model,
tools=self.tools,
file_ids=file_ids or [],
metadata=metadata or {}
)
# Store available functions
self.available_functions: Dict[str, Callable] = {}
def add_function(self, func: Callable, description: str, parameters: Dict[str, Any]) -> None:
"""Add a function that the assistant can call.
Args:
func: The function to make available to the assistant
description: Description of what the function does
parameters: JSON schema describing the function parameters
"""
func_dict = {
"name": func.__name__,
"description": description,
"parameters": parameters
}
# Add to tools list
self.tools.append({
"type": "function",
"function": func_dict
})
# Store function reference
self.available_functions[func.__name__] = func
# Update assistant with new tools
self.assistant = self.client.beta.assistants.update(
assistant_id=self.assistant.id,
tools=self.tools
)
def _handle_tool_calls(self, run, thread_id: str) -> None:
"""Handle any required tool calls during a run.
This method processes any tool calls required by the assistant during execution.
It extracts function calls, executes them with provided arguments, and submits
the results back to the assistant.
Args:
run: The current run object from the OpenAI API
thread_id: ID of the current conversation thread
Returns:
Updated run object after processing tool calls
Raises:
Exception: If there are errors executing the tool calls
"""
while run.status == "requires_action":
tool_calls = run.required_action.submit_tool_outputs.tool_calls
tool_outputs = []
for tool_call in tool_calls:
if tool_call.type == "function":
# Get function details
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# Call function if available
if function_name in self.available_functions:
function_response = self.available_functions[function_name](**function_args)
tool_outputs.append({
"tool_call_id": tool_call.id,
"output": str(function_response)
})
# Submit outputs back to the run
run = self.client.beta.threads.runs.submit_tool_outputs(
thread_id=thread_id,
run_id=run.id,
tool_outputs=tool_outputs
)
# Wait for processing
run = self._wait_for_run(run)
return run
def _wait_for_run(self, run) -> Any:
"""Wait for a run to complete and handle any required actions.
This method polls the OpenAI API to check the status of a run until it completes
or fails. It handles intermediate states like required actions and implements
exponential backoff.
Args:
run: The run object to monitor
Returns:
The completed run object
Raises:
Exception: If the run fails or expires
"""
while True:
run = self.client.beta.threads.runs.retrieve(
thread_id=run.thread_id,
run_id=run.id
)
if run.status == "completed":
break
elif run.status == "requires_action":
run = self._handle_tool_calls(run, run.thread_id)
if run.status == "completed":
break
elif run.status in ["failed", "expired"]:
raise Exception(f"Run failed with status: {run.status}")
time.sleep(3) # Wait 3 seconds before checking again
return run
def _ensure_thread(self):
"""Ensure a thread exists for the conversation.
This method checks if there is an active thread for the current conversation.
If no thread exists, it creates a new one. This maintains conversation context
across multiple interactions.
Side Effects:
Sets self.thread if it doesn't exist
"""
if not self.thread:
self.thread = self.client.beta.threads.create()
def add_message(self, content: str, file_ids: Optional[List[str]] = None) -> None:
"""Add a message to the thread.
This method adds a new user message to the conversation thread. It ensures
a thread exists before adding the message and handles file attachments.
Args:
content: The text content of the message to add
file_ids: Optional list of file IDs to attach to the message. These must be
files that have been previously uploaded to OpenAI.
Side Effects:
Creates a new thread if none exists
Adds the message to the thread in OpenAI's system
"""
self._ensure_thread()
self.client.beta.threads.messages.create(
thread_id=self.thread.id,
role="user",
content=content,
file_ids=file_ids or []
)
def _get_response(self) -> str:
"""Get the latest assistant response from the thread."""
messages = self.client.beta.threads.messages.list(
thread_id=self.thread.id,
order="desc",
limit=1
)
if not messages.data:
return ""
message = messages.data[0]
if message.role == "assistant":
return message.content[0].text.value
return ""
def run(self, task: str, *args, **kwargs) -> str:
"""Run a task using the OpenAI Assistant.
Args:
task: The task or prompt to send to the assistant
Returns:
The assistant's response as a string
"""
self._ensure_thread()
# Add the user message
self.add_message(task)
# Create and run the assistant
run = self.client.beta.threads.runs.create(
thread_id=self.thread.id,
assistant_id=self.assistant.id,
instructions=self.instructions
)
# Wait for completion
run = self._wait_for_run(run)
# Only get and return the response if run completed successfully
if run.status == "completed":
return self._get_response()
return ""
def call(self, task: str, *args, **kwargs) -> str:
"""Alias for run() to maintain compatibility with different agent interfaces."""
return self.run(task, *args, **kwargs)

@ -73,9 +73,27 @@ from swarms.structs.utils import (
find_token_in_text,
parse_tasks,
)
from swarms.structs.swarm_router import (
SwarmRouter,
SwarmType,
swarm_router,
)
from swarms.structs.swarm_arange import SwarmRearrange
from swarms.structs.multi_agent_exec import (
run_agents_concurrently,
run_agents_concurrently_async,
run_single_agent,
run_agents_concurrently_multiprocess,
run_agents_sequentially,
run_agents_with_different_tasks,
run_agent_with_timeout,
run_agents_with_resource_monitoring,
)
from swarms.structs.async_workflow import AsyncWorkflow
__all__ = [
"Agent",
"AsyncWorkflow",
"AutoSwarm",
"AutoSwarmRouter",
"BaseStructure",
@ -138,6 +156,7 @@ __all__ = [
"run_agent_with_timeout",
"run_agents_with_resource_monitoring",
"swarm_router",
"AsyncWorkflow",
"run_agents_with_tasks_concurrently",
"showcase_available_agents",
"GroupChatState",

@ -1621,11 +1621,16 @@ class Agent:
files = os.listdir(self.docs_folder)
# Extract the text from the files
# Process each file and combine their contents
all_text = ""
for file in files:
text = data_to_text(file)
file_path = os.path.join(self.docs_folder, file)
text = data_to_text(file_path)
all_text += f"\nContent from {file}:\n{text}\n"
# Add the combined content to memory
return self.short_memory.add(
role=self.user_name, content=text
role=self.user_name, content=all_text
)
except Exception as error:
logger.error(
@ -2383,36 +2388,42 @@ class Agent:
) -> None:
"""Handle creating and saving artifacts with error handling."""
try:
logger.info(
f"Creating artifact for file: {file_output_path}"
)
# Ensure file_extension starts with a dot
if not file_extension.startswith('.'):
file_extension = '.' + file_extension
# If file_output_path doesn't have an extension, treat it as a directory
# and create a default filename based on timestamp
if not os.path.splitext(file_output_path)[1]:
timestamp = time.strftime("%Y%m%d_%H%M%S")
filename = f"artifact_{timestamp}{file_extension}"
full_path = os.path.join(file_output_path, filename)
else:
full_path = file_output_path
# Create the directory if it doesn't exist
os.makedirs(os.path.dirname(full_path), exist_ok=True)
logger.info(f"Creating artifact for file: {full_path}")
artifact = Artifact(
file_path=file_output_path,
file_path=full_path,
file_type=file_extension,
contents=text,
edit_count=0,
)
logger.info(
f"Saving artifact with extension: {file_extension}"
)
logger.info(f"Saving artifact with extension: {file_extension}")
artifact.save_as(file_extension)
logger.success(
f"Successfully saved artifact to {file_output_path}"
)
logger.success(f"Successfully saved artifact to {full_path}")
except ValueError as e:
logger.error(
f"Invalid input values for artifact: {str(e)}"
)
logger.error(f"Invalid input values for artifact: {str(e)}")
raise
except IOError as e:
logger.error(f"Error saving artifact to file: {str(e)}")
raise
except Exception as e:
logger.error(
f"Unexpected error handling artifact: {str(e)}"
)
logger.error(f"Unexpected error handling artifact: {str(e)}")
raise
def showcase_config(self):

@ -0,0 +1,62 @@
import asyncio
from typing import Any, Callable, List, Optional
from swarms.structs.base_workflow import BaseWorkflow
from swarms.structs.agent import Agent
from swarms.utils.loguru_logger import logger
class AsyncWorkflow(BaseWorkflow):
def __init__(
self,
name: str = "AsyncWorkflow",
agents: List[Agent] = None,
max_workers: int = 5,
dashboard: bool = False,
autosave: bool = False,
verbose: bool = False,
**kwargs
):
super().__init__(agents=agents, **kwargs)
self.name = name
self.agents = agents or []
self.max_workers = max_workers
self.dashboard = dashboard
self.autosave = autosave
self.verbose = verbose
self.task_pool = []
self.results = []
self.loop = None
async def _execute_agent_task(self, agent: Agent, task: str) -> Any:
"""Execute a single agent task asynchronously"""
try:
if self.verbose:
logger.info(f"Agent {agent.agent_name} processing task: {task}")
result = await agent.arun(task)
if self.verbose:
logger.info(f"Agent {agent.agent_name} completed task")
return result
except Exception as e:
logger.error(f"Error in agent {agent.agent_name}: {str(e)}")
return str(e)
async def run(self, task: str) -> List[Any]:
"""Run the workflow with all agents processing the task concurrently"""
if not self.agents:
raise ValueError("No agents provided to the workflow")
try:
# Create tasks for all agents
tasks = [self._execute_agent_task(agent, task) for agent in self.agents]
# Execute all tasks concurrently
self.results = await asyncio.gather(*tasks, return_exceptions=True)
if self.autosave:
# TODO: Implement autosave logic here
pass
return self.results
except Exception as e:
logger.error(f"Error in workflow execution: {str(e)}")
raise

@ -50,13 +50,11 @@ class SwarmConfig(BaseModel):
name="Research-Agent",
description="Gathers information",
system_prompt="You are a research agent...",
max_loops=2,
),
AgentConfig(
name="Writing-Agent",
description="Writes content",
system_prompt="You are a writing agent...",
max_loops=1,
),
],
)
@ -195,7 +193,7 @@ class AutoSwarmBuilder:
self.name = agents_dictionary.name
self.description = agents_dictionary.description
self.max_loops = getattr(
agents_dictionary, "max_loops", 1
agents_dictionary
) # Default to 1 if not set
logger.info(
@ -213,7 +211,6 @@ class AutoSwarmBuilder:
agent_name=agent_config.name,
agent_description=agent_config.description,
agent_system_prompt=agent_config.system_prompt,
# max_loops=agent_config.max_loops,
)
agents.append(agent)

@ -387,6 +387,8 @@ class BaseTool(BaseModel):
"Converting tools into OpenAI function calling schema"
)
tool_schemas = []
for tool in self.tools:
# Transform the tool into a openai function calling schema
if self.check_func_if_have_docs(
@ -398,7 +400,7 @@ class BaseTool(BaseModel):
logger.info(
f"Converting tool: {name} into a OpenAI certified function calling schema. Add documentation and type hints."
)
tool_schema_list = (
tool_schema = (
get_openai_function_schema_from_func(
tool, name=name, description=description
)
@ -408,18 +410,21 @@ class BaseTool(BaseModel):
f"Tool {name} converted successfully into OpenAI schema"
)
# Transform the dictionary to a string
tool_schema_list = json.dumps(
tool_schema_list, indent=4
)
return tool_schema_list
tool_schemas.append(tool_schema)
else:
logger.error(
f"Tool {tool.__name__} does not have documentation or type hints, please add them to make the tool execution reliable."
)
return tool_schema_list
# Combine all tool schemas into a single schema
if tool_schemas:
combined_schema = {
"type": "function",
"functions": [schema["function"] for schema in tool_schemas]
}
return json.dumps(combined_schema, indent=4)
return None
def check_func_if_have_docs(self, func: callable):
if func.__doc__ is not None:

Loading…
Cancel
Save