Merge pull request #388 from Wyatt-Stanke/fix-ci-2

[WIP] Get CI Passing V2
pull/393/head
Eternal Reclaimer 11 months ago committed by GitHub
commit ff29a6b816
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,11 @@
[flake8]
max-line-length = 127
extend-ignore = E203
per-file-ignores =
# Most of this is just long strings
./swarms/prompts/**.py: E501 W293 W291
./swarms/__init__.py: F401
exclude =
./playground
./tests
./scripts

@ -1,6 +1,5 @@
---
# These are supported funding model platforms
github: [kyegomez]
# patreon: # Replace with a single Patreon username
# open_collective: # Replace with a single Open Collective username

37
.github/action.yml vendored

@ -1,37 +0,0 @@
name: "Init Environment"
description: "Initialize environment for tests"
runs:
using: "composite"
steps:
- name: Checkout actions
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install and configure Poetry
uses: snok/install-poetry@v1
with:
virtualenvs-create: true
virtualenvs-in-project: true
installer-parallel: true
- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v3
with:
path: .venv
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
- name: Install dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install --no-interaction --no-root --with test --with dev --all-extras
shell: bash
- name: Activate venv
run: |
source .venv/bin/activate
echo PATH=$PATH >> $GITHUB_ENV
shell: bash

@ -1,13 +1,11 @@
---
# https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "pip"
directory: "/"
schedule:

@ -1,14 +1,52 @@
---
# this is a config file for the github action labeler
# Add 'label1' to any changes within 'example' folder or any subfolders
example_change:
- example/**
# Add 'label2' to any file changes within 'example2' folder
example2_change: example2/*
# Add label3 to any change to .txt files within the entire repository.
# Quotation marks are required for the leading asterisk
text_files:
- '**/*.txt'
documentation:
- changed-files:
- any-glob-to-any-file: ["docs/**", "*.md"]
tests:
- changed-files:
- any-glob-to-any-file: "tests/**"
agents:
- changed-files:
- any-glob-to-any-file: "swarms/agents/**"
artifacts:
- changed-files:
- any-glob-to-any-file: "swarms/artifacts/**"
chunkers:
- changed-files:
- any-glob-to-any-file: "swarms/chunkers/**"
cli:
- changed-files:
- any-glob-to-any-file: "swarms/cli/**"
loaders:
- changed-files:
- any-glob-to-any-file: "swarms/loaders/**"
memory:
- changed-files:
- any-glob-to-any-file: "swarms/memory/**"
models:
- changed-files:
- any-glob-to-any-file: "swarms/models/**"
prompts:
- changed-files:
- any-glob-to-any-file: "swarms/prompts/**"
structs:
- changed-files:
- any-glob-to-any-file: "swarms/structs/**"
telemetry:
- changed-files:
- any-glob-to-any-file: "swarms/telemetry/**"
tokenizers:
- changed-files:
- any-glob-to-any-file: "swarms/tokenizers/**"
tools:
- changed-files:
- any-glob-to-any-file: "swarms/tools/**"
utils:
- changed-files:
- any-glob-to-any-file: "swarms/utils/**"
workers:
- changed-files:
- any-glob-to-any-file: "swarms/workers/**"
rust:
- changed-files:
- any-glob-to-any-file: "**/*.rs"

@ -0,0 +1,44 @@
---
name: "Setup"
description: Setup the environment for the project
inputs:
python-version:
description: "Python version to use"
required: false
default: "3.10"
runs:
using: "composite"
steps:
- name: Free up disk space
run: |
sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
shell: bash
- name: Set up Python ${{ inputs.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ inputs.python-version }}
- name: Install and configure Poetry
uses: snok/install-poetry@v1
with:
installer-parallel: true
- name: Cache Poetry cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-cache
-${{ runner.os }}
-${{ steps.setup_python.outputs.python-version }}
-${{ env.POETRY_VERSION }}
- name: Cache Packages
uses: actions/cache@v4
with:
path: ~/.local
key: poetry-local
-${{ runner.os }}
-${{ steps.setup_python.outputs.python-version }}
-${{ hashFiles('**/poetry.lock')}}
-${{ hashFiles('.github/workflows/*.yml') }}
- name: Install dependencies
run: POETRY_VIRTUALENVS_CREATE=false poetry install
shell: bash

@ -1,5 +1,5 @@
---
name: release
on:
pull_request:
types:
@ -7,26 +7,18 @@ on:
branches:
- master
paths:
- 'pyproject.toml'
- "pyproject.toml"
env:
POETRY_VERSION: "1.4.2"
jobs:
if_release:
if: |
${{ github.event.pull_request.merged == true }}
&& ${{ contains(github.event.pull_request.labels.*.name, 'release') }}
${{ github.event.pull_request.merged == true }}
&& ${{ contains(github.event.pull_request.labels.*.name, 'release') }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
- name: Set up Python 3.9
uses: actions/setup-python@v5
with:
python-version: "3.9"
cache: "poetry"
- uses: ./.github/library/setup
- name: Build project for distribution
run: poetry build
- name: Check Version
@ -45,5 +37,5 @@ jobs:
- name: Publish to PyPI
env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |
poetry publish
run: |-
poetry publish

@ -1,3 +1,4 @@
---
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
@ -10,39 +11,38 @@
# https://github.com/codacy/codacy-analysis-cli-action.
# For more information on Codacy Analysis CLI in general, see
# https://github.com/codacy/codacy-analysis-cli.
name: Codacy Security Scan
on:
push:
branches: [ "master" ]
branches: ["master"]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "master" ]
branches: ["master"]
schedule:
- cron: '18 23 * * 4'
permissions:
contents: read
jobs:
codacy-security-scan:
permissions:
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
# for actions/checkout to fetch code
contents: read
# for github/codeql-action/upload-sarif to upload SARIF results
security-events: write
# only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
actions: read
name: Codacy Security Scan
runs-on: ubuntu-latest
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout code
uses: actions/checkout@v4
# Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis
- name: Run Codacy Analysis CLI
uses: codacy/codacy-analysis-cli-action@33d455949345bddfdb845fba76b57b70cc83754b
with:
# Check https://github.com/codacy/codacy-analysis-cli#project-token to get your project token from your Codacy repository
# Check https://github.com/codacy/codacy-analysis-cli#project-token
# to get your project token from your Codacy repository
# You can also omit the token and run the tools that support default configurations
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
verbose: true
@ -53,7 +53,6 @@ jobs:
# Force 0 exit code to allow SARIF file generation
# This will handover control about PR rejection to the GitHub side
max-allowed-issues: 2147483647
# Upload the SARIF file generated in the previous step
- name: Upload SARIF results file
uses: github/codeql-action/upload-sarif@v3

@ -1,3 +1,4 @@
---
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
@ -10,16 +11,14 @@
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ "master" ]
branches: ["master"]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "master" ]
branches: ["master"]
schedule:
- cron: '33 12 * * 5'
jobs:
analyze:
name: Analyze
@ -34,49 +33,42 @@ jobs:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
language: ['python']
# CodeQL supports [ 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' ]
# Use only 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use only 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
# For more details on CodeQL's query packs, refer to:
# https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
uses: github/codeql-action/autobuild@v3
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

@ -1,5 +1,5 @@
---
name: Docs WorkAgent
on:
push:
branches:
@ -11,10 +11,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: 3.x
- uses: ./.github/library/setup
- run: pip install mkdocs-material
- run: pip install mkdocs-glightbox
- run: pip install "mkdocstrings[python]"
- run: mkdocs gh-deploy --force
- run: mkdocs gh-deploy --force

@ -1,22 +1,20 @@
---
# This workflow will triage pull requests and apply a label based on the
# paths that are modified in the pull request.
#
# To use this workflow, you will need to set up a .github/labeler.yml
# file with configuration. For more information, see:
# https://github.com/actions/labeler
name: Labeler
on: [pull_request_target]
jobs:
label:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- uses: actions/labeler@v5
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
- uses: actions/labeler@v5
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
sync-labels: true

@ -1,27 +1,42 @@
---
name: Lint
on: [push, pull_request] # yamllint disable-line rule:truthy
on: [push, pull_request]
jobs:
yaml-lint:
runs-on: ubuntu-latest
steps:
- name: Check out source repository
uses: actions/checkout@v4
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- name: yaml Lint
uses: ibiqlik/action-yamllint@v3
with:
file_or_dir: ${{ github.workspace }}
flake8-lint:
runs-on: ubuntu-latest
name: flake8 Lint
steps:
- name: Check out source repository
uses: actions/checkout@v4
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- name: Set up Python environment
uses: py-actions/flake8@v2
with:
args: --verbose
ruff-lint:
runs-on: ubuntu-latest
name: ruff Lint
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- uses: chartboost/ruff-action@v1
pylint-lint:
runs-on: ubuntu-latest
name: pylint Lint
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- name: Install dependencies
run: |
pip install pylint
- name: Analysing the code with pylint
run: |
pylint $(git ls-files '*.py')

@ -1,15 +0,0 @@
name: "PR Labeler"
on:
pull_request_target:
types: ["opened", "reopened", "ready_for_review"]
jobs:
triage:
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v5
if: ${{ github.event.pull_request.draft == false }}

@ -1,30 +0,0 @@
name: Pull Request Checks
on:
pull_request:
branches:
- master
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.x
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install swarms
pip install pytest
- name: Run tests and checks
run: |
pytest
pylint swarms

@ -1,3 +1,4 @@
---
name: readthedocs/actions
on:
pull_request_target:
@ -5,14 +6,12 @@ on:
- opened
paths:
- "docs/**"
permissions:
pull-requests: write
jobs:
pull-request-links:
runs-on: ubuntu-latest
steps:
- uses: readthedocs/actions/preview@v1
with:
project-slug: swarms
project-slug: swarms

@ -1,23 +0,0 @@
name: Pylint
on: [push]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pylint
- name: Analysing the code with pylint
run: |
pylint $(git ls-files '*.py')

@ -1,39 +0,0 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: Python application
on:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest swarms
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest

@ -1,41 +0,0 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: Python package
on:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
jobs:
build:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.7", "3.9", "3.10", "3.11"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade swarms
python -m pip install flake8 pytest swarms
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest

@ -1,27 +1,16 @@
---
name: Upload Python Package
on: # yamllint disable-line rule:truthy
on:
release:
types: [published]
permissions:
contents: read
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install build
- uses: ./.github/library/setup
- name: Build package
run: python -m build
- name: Publish package

@ -1,27 +1,49 @@
---
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
#
# You can adjust the behavior by modifying this file.
# For more information, see:
# https://github.com/actions/stale
name: Mark stale issues and pull requests
on:
schedule:
- cron: '26 12 * * *'
# Scheduled to run at 1.30 UTC everyday
- cron: "30 1 * * *"
jobs:
stale:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'Stale issue message'
stale-pr-message: 'Stale pull request message'
stale-issue-label: 'no-issue-activity'
stale-pr-label: 'no-pr-activity'
- uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-issue-stale: 14
days-before-issue-close: 14
stale-issue-label: "status:stale"
close-issue-reason: not_planned
any-of-labels: "status:awaiting user response,status:more data needed"
stale-issue-message: >
Marking this issue as stale since it has been open for 14 days with no activity. This issue
will be closed if no further activity occurs.
close-issue-message: >
This issue was closed because it has been inactive for 28 days. Please post a new issue if
you need further assistance. Thanks!
days-before-pr-stale: 14
days-before-pr-close: 14
stale-pr-label: "status:stale"
stale-pr-message: >
Marking this pull request as stale since it has been open for 14 days with no activity. This
PR will be closed if no further activity occurs.
close-pr-message: >
This pull request was closed because it has been inactive for 28 days. Please open a new
pull request if you need furtherassistance. Thanks!
# Label that can be assigned to issues to exclude them from being marked as stale
exempt-issue-labels: "override-stale"
# Label that can be assigned to PRs to exclude them from being marked as stale
exempt-pr-labels: "override-stale"

@ -1,49 +0,0 @@
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
#
# You can adjust the behavior by modifying this file.
# For more information, see:
# https://github.com/actions/stale
name: Mark stale issues and pull requests
on:
schedule:
# Scheduled to run at 1.30 UTC everyday
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-issue-stale: 14
days-before-issue-close: 14
stale-issue-label: "status:stale"
close-issue-reason: not_planned
any-of-labels: "status:awaiting user response,status:more data needed"
stale-issue-message: >
Marking this issue as stale since it has been open for 14 days with no activity.
This issue will be closed if no further activity occurs.
close-issue-message: >
This issue was closed because it has been inactive for 28 days.
Please post a new issue if you need further assistance. Thanks!
days-before-pr-stale: 14
days-before-pr-close: 14
stale-pr-label: "status:stale"
stale-pr-message: >
Marking this pull request as stale since it has been open for 14 days with no activity.
This PR will be closed if no further activity occurs.
close-pr-message: >
This pull request was closed because it has been inactive for 28 days.
Please open a new pull request if you need further assistance. Thanks!
# Label that can be assigned to issues to exclude them from being marked as stale
exempt-issue-labels: 'override-stale'
# Label that can be assigned to PRs to exclude them from being marked as stale
exempt-pr-labels: "override-stale"

@ -1,110 +1,16 @@
name: test
on:
push:
branches: [master]
pull_request:
workflow_dispatch:
env:
POETRY_VERSION: "1.4.2"
---
name: build
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
test_type:
- "core"
- "extended"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: "snok/install-poetry@v1"
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.4.2"
cache-key: ${{ matrix.test_type }}
install-command:
if [ "${{ matrix.test_type }}" == "core" ]; then
echo "Running core tests, installing dependencies with poetry..."
poetry install
else
echo "Running extended tests, installing dependencies with poetry..."
poetry install -E extended_testing
fi
- name: Run ${{matrix.test_type}} tests
run: |
if [ "${{ matrix.test_type }}" == "core" ]; then
make test
else
make extended_tests
fi
shell: bash
name: Python ${{ matrix.python-version }} ${{ matrix.test_type }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.4.2"
cache-key: ${{ matrix.test_type }}
install-command: |
if [ "${{ matrix.test_type }}" == "core" ]; then
echo "Running core tests, installing dependencies with poetry..."
poetry install
else
echo "Running extended tests, installing dependencies with poetry..."
poetry install -E extended_testing
fi
- name: Run ${{matrix.test_type}} tests
run: |
if [ "${{ matrix.test_type }}" == "core" ]; then
make test
else
make extended_tests
fi
shell: bash
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
test_type:
- "core"
- "extended"
name: Python ${{ matrix.python-version }} ${{ matrix.test_type }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.4.2"
cache-key: ${{ matrix.test_type }}
install-command: |
if [ "${{ matrix.test_type }}" == "core" ]; then
echo "Running core tests, installing dependencies with poetry..."
poetry install
else
echo "Running extended tests, installing dependencies with poetry..."
poetry install -E extended_testing
fi
- name: Run ${{matrix.test_type}} tests
- uses: ./.github/library/setup
- name: Install dependencies
run: |
if [ "${{ matrix.test_type }}" == "core" ]; then
make test
else
make extended_tests
fi
shell: bash
pip install pytest
- name: Run Python unit tests
run: pytest
- name: Verify that the Docker image for the action builds
run: docker build . --file Dockerfile

@ -1,81 +1,28 @@
---
# Notebook-related checks
name: Presubmit checks
on:
# Relevant PRs
pull_request:
paths:
- "swarms/**"
- "tests/**"
# Allow manual runs
workflow_dispatch:
on: [push, pull_request]
jobs:
test3_11:
name: Test Py3.11
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Run tests
run: |
python --version
pip install .[dev]
python -m pytest
test3_10:
name: Test Py3.10
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Run tests
run: |
python --version
pip install -q .[dev]
python -m pytest
test3_9:
name: Test Py3.9
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- name: Run tests
run: |
python --version
pip install .[dev]
python -m pytest
pytype3_10:
name: pytype 3.10
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Run pytype
run: |
python --version
pip install .[dev]
pip install -q gspread ipython
pytype
# Disabled until google/pytype/issues/151 is resolved
# pytype3_10:
# name: pytype 3.10
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v4
# - uses: ./.github/library/setup
# - name: Install pytype
# run: pip install -q pytype
# - name: Run pytype
# run: pytype ./swarms
format:
name: Check format with black
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Check format
run: |
python --version
pip install -q .
pip install -q black
black . --check
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- name: Check format
run: |
python --version
pip install -q .
pip install -q black
black . --check

@ -1,27 +0,0 @@
name: Unit Tests
on:
push:
branches:
- master
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install pytest
- name: Run unit tests
run: pytest

@ -1,36 +0,0 @@
name: build
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.9'
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install pytest
pip install swarms
- name: Run Python unit tests
run: pytest
- name: Verify that the Docker image for the action builds
run: docker build . --file Dockerfile
- name: Verify integration test results
run: pytest

@ -1,11 +1,10 @@
---
name: Welcome WorkAgent
on:
issues:
types: [opened]
pull_request_target:
types: [opened]
jobs:
build:
name: 👋 Welcome
@ -15,5 +14,10 @@ jobs:
- uses: actions/first-interaction@v1.3.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
issue-message: "Hello there, thank you for opening an Issue ! 🙏🏻 The team was notified and they will get back to you asap."
pr-message: "Hello there, thank you for opening an PR ! 🙏🏻 The team was notified and they will get back to you asap."
issue-message: >
Hello there, thank you for opening an Issue ! 🙏🏻 The team was notified and they will get back
to you asap.
pr-message: >-
Hello there, thank you for opening an PR ! 🙏🏻 The team was notified and they will get back
to you asap.

@ -1,18 +1,19 @@
---
repos:
- repo: https://github.com/ambv/black
rev: 22.3.0
hooks:
- id: black
- id: black
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: 'v0.0.255'
hooks:
- id: ruff
args: [----unsafe-fixes]
args: ['----unsafe-fixes']
- repo: https://github.com/nbQA-dev/nbQA
rev: 1.6.3
hooks:
- id: nbqa-black
additional_dependencies: [ipython==8.12, black]
- id: nbqa-ruff
args: ["--ignore=I001"]
additional_dependencies: [ipython==8.12, ruff]
- id: nbqa-black
additional_dependencies: [ipython==8.12, black]
- id: nbqa-ruff
args: ["--ignore=I001"]
additional_dependencies: [ipython==8.12, ruff]

@ -1,13 +1,11 @@
---
version: 2
build:
os: ubuntu-22.04
tools:
python: "3.11"
mkdocs:
configuration: mkdocs.yml
python:
install:
- requirements: requirements.txt
install:
- requirements: requirements.txt

@ -0,0 +1,6 @@
---
formatter:
type: basic
include_document_start: true
max_line_length: 100
pad_line_comments: 2

@ -0,0 +1,14 @@
---
extends: default
rules:
line-length:
max: 127
truthy:
# GitHub Actions
check-keys: false
ignore:
# GH Actions
- lib
- .venv

@ -30,6 +30,7 @@ This guide is intended for developers and contributors to the `swarms` project w
## Prerequisites
Before you begin, ensure you have:
- A GitHub account
- Git installed on your machine
- Basic command-line proficiency
@ -115,14 +116,13 @@ docker run -it --rm swarms-dev
- Introduce Docker Compose and its role in simplifying multi-container setups.
- Create a `docker-compose.yml` file for the `swarms` project.
## Dockerfile
Creating a Dockerfile for deploying the `swarms` framework to the cloud involves setting up the necessary environment to run your Python application, ensuring all dependencies are installed, and configuring the container to execute the desired tasks. Here's an example Dockerfile that sets up such an environment:
```Dockerfile
# Use an official Python runtime as a parent image
FROM python:3.9-slim
FROM python:3.10-slim
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
@ -194,4 +194,4 @@ For deploying to the cloud, you'll need to push your Docker image to a container
- Setting up networking, storage, and other cloud resources.
- Monitoring, logging, and potentially scaling your containers.
Remember to secure sensitive data, use tagged releases for your images, and follow best practices for operating in the cloud.
Remember to secure sensitive data, use tagged releases for your images, and follow best practices for operating in the cloud.

@ -19,7 +19,7 @@ python -m pip install --upgrade revChatGPT
```
**Supported Python Versions:**
- Minimum: Python3.9
- Minimum: Python3.10
- Recommended: Python3.11+
### Initial Setup and Configuration <a name="initial-setup"></a>

@ -16,7 +16,7 @@ Swarms provides you with all the building blocks you need to build reliable, pro
## 💻 Install
You can install `swarms` with pip in a
[**Python>=3.8**](https://www.python.org/) environment.
[**Python>=3.10**](https://www.python.org/) environment.
!!! example "pip install (recommended)"

@ -1,6 +1,6 @@
from swarms import Agent, OpenAIChat
## Initialize the workflow
# Initialize the workflow
agent = Agent(
llm=OpenAIChat(),
max_loops=1,

@ -1,3 +1,4 @@
---
site_name: Swarms Docs
plugins:
- glightbox
@ -16,15 +17,15 @@ extra:
- icon: fontawesome/brands/python
link: https://pypi.org/project/Swarms/
theme:
name: material
custom_dir: docs/overrides
logo: assets/img/SwarmsLogoIcon.png
palette:
# Palette toggle for light mode
name: material
custom_dir: docs/overrides
logo: assets/img/SwarmsLogoIcon.png
palette:
# Palette toggle for light mode
- scheme: default
primary: black
toggle:
icon: material/brightness-7
icon: material/brightness-7
name: Switch to dark mode
# Palette toggle for dark mode
- scheme: slate
@ -32,14 +33,14 @@ theme:
toggle:
icon: material/brightness-4
name: Switch to light mode
features:
- content.code.copy
- content.code.annotate
- navigation.tabs
- navigation.sections
- navigation.expand
- navigation.top
- announce.dismiss
features:
- content.code.copy
- content.code.annotate
- navigation.tabs
- navigation.sections
- navigation.expand
- navigation.top
- announce.dismiss
markdown_extensions:
- pymdownx.highlight:
anchor_linenums: true
@ -55,127 +56,126 @@ markdown_extensions:
- def_list
- footnotes
nav:
- Home:
- Overview: "index.md"
- Contributing: "contributing.md"
- Limitations of Individual Agents: "limits_of_individual_agents.md"
- Swarms:
- Overview: "swarms/index.md"
- swarms.agents:
- Agents:
- WorkerAgent: "swarms/agents/workeragent.md"
- OmniAgent: "swarms/agents/omni_agent.md"
- AbstractAgent: "swarms/agents/abstractagent.md"
- ToolAgent: "swarms/agents/toolagent.md"
- swarms.models:
- Language:
- BaseLLM: "swarms/models/base_llm.md"
- Overview: "swarms/models/index.md"
- HuggingFaceLLM: "swarms/models/huggingface.md"
- Anthropic: "swarms/models/anthropic.md"
- OpenAI: "swarms/models/openai.md"
- Zephyr: "swarms/models/zephyr.md"
- BioGPT: "swarms/models/biogpt.md"
- vLLM: "swarms/models/vllm.md"
- MPT7B: "swarms/models/mpt.md"
- Mistral: "swarms/models/mistral.md"
- Mixtral: "swarms/models/mixtral.md"
- MultiModal:
- BaseMultiModalModel: "swarms/models/base_multimodal_model.md"
- Fuyu: "swarms/models/fuyu.md"
- Vilt: "swarms/models/vilt.md"
- Idefics: "swarms/models/idefics.md"
- BingChat: "swarms/models/bingchat.md"
- Kosmos: "swarms/models/kosmos.md"
- Nougat: "swarms/models/nougat.md"
- Dalle3: "swarms/models/dalle3.md"
- GPT4V: "swarms/models/gpt4v.md"
- LayoutLMDocumentQA: "swarms/models/layoutlm_document_qa.md"
- DistilWhisperModel: "swarms/models/distilled_whisperx.md"
- ElevenLabsText2SpeechTool: "swarms/models/elevenlabs.md"
- OpenAITTS: "swarms/models/openai_tts.md"
- Gemini: "swarms/models/gemini.md"
- ZeroscopeTTV: "swarms/models/zeroscope.md"
- swarms.structs:
- Foundational Structures:
- agent: "swarms/structs/agent.md"
- basestructure: "swarms/structs/basestructure.md"
- artifactupload: "swarms/structs/artifactupload.md"
- taskinput: "swarms/structs/taskinput.md"
- stepinput: "swarms/structs/stepinput.md"
- artifact: "swarms/structs/artifact.md"
- task: "swarms/structs/task.md"
- Task Queue Base: "swarms/structs/taskqueuebase.md"
- Workflows:
- recursiveworkflow: "swarms/structs/recursiveworkflow.md"
- concurrentworkflow: "swarms/structs/concurrentworkflow.md"
- nonlinearworkflow: "swarms/structs/nonlinearworkflow.md"
- sequential_workflow: "swarms/structs/sequential_workflow.md"
- workflow: "swarms/structs/workflow.md"
- baseworkflow: "swarms/structs/baseworkflow.md"
- Multi Agent Architectures:
- conversation: "swarms/structs/conversation.md"
- groupchat: "swarms/structs/groupchat.md"
- swarmnetwork: "swarms/structs/swarmnetwork.md"
- groupchatmanager: "swarms/structs/groupchatmanager.md"
- MajorityVoting: "swarms/structs/majorityvoting.md"
- swarms.tokenizers:
- Language:
- Tokenizer: "swarms/tokenizers/tokenizer.md"
- CohereTokenizer: "swarms/tokenizers/coheretokenizer.md"
- BaseTokenizer: "swarms/tokenizers/basetokenizer.md"
- HuggingfaceTokenizer: "swarms/tokenizers/huggingfacetokenizer.md"
- SentencepieceTokenizer: "swarms/tokenizers/sentencepiecetokenizer.md"
- AnthropicTokenizer: "swarms/tokenizers/anthropictokenizer.md"
- OpenaiTokenizer: "swarms/tokenizers/openaitokenizer.md"
- swarms.memory:
- Vector Databases:
- Weaviate: "swarms/memory/weaviate.md"
- PineconeDB: "swarms/memory/pinecone.md"
- PGVectorStore: "swarms/memory/pg.md"
- ShortTermMemory: "swarms/memory/short_term_memory.md"
- swarms.utils:
- Misc:
- pdf_to_text: "swarms/utils/pdf_to_text.md"
- load_model_torch: "swarms/utils/load_model_torch.md"
- metrics_decorator: "swarms/utils/metrics_decorator.md"
- prep_torch_inference: "swarms/utils/prep_torch_inference.md"
- find_image_path: "swarms/utils/find_image_path.md"
- print_class_parameters: "swarms/utils/print_class_parameters.md"
- extract_code_from_markdown: "swarms/utils/extract_code_from_markdown.md"
- check_device: "swarms/utils/check_device.md"
- display_markdown_message: "swarms/utils/display_markdown_message.md"
- phoenix_tracer: "swarms/utils/phoenix_tracer.md"
- limit_tokens_from_string: "swarms/utils/limit_tokens_from_string.md"
- math_eval: "swarms/utils/math_eval.md"
- Guides:
- Overview: "examples/index.md"
- Agents:
- Agent: "examples/flow.md"
- OmniAgent: "examples/omni_agent.md"
- Swarms:
- SequentialWorkflow: "examples/reliable_autonomous_agents.md"
- 2O+ Autonomous Agent Blogs: "examples/ideas.md"
- Applications:
- CustomerSupport:
- Overview: "applications/customer_support.md"
- Marketing:
- Overview: "applications/marketing_agencies.md"
- Corporate:
- FAQ: "corporate/faq.md"
- Purpose: "corporate/purpose.md"
- Roadmap: "corporate/roadmap.md"
- Weaknesses: "corporate/failures.md"
- Design: "corporate/design.md"
- Flywheel: "corporate/flywheel.md"
- Bounties: "corporate/bounties.md"
- Metric: "corporate/metric.md"
- Distribution: "corporate/distribution"
- Research: "corporate/research.md"
- Demos: "corporate/demos.md"
- Architecture: "corporate/architecture.md"
- Checklist: "corporate/checklist.md"
- Hiring: "corporate/hiring.md"
- SwarmCloud: "corporate/swarm_cloud.md"
- SwarmMemo: "corporate/swarm_memo.md"
- Data Room: "corporate/data_room.md"
- Home:
- Overview: "index.md"
- Contributing: "contributing.md"
- Swarms:
- Overview: "swarms/index.md"
- swarms.agents:
- Agents:
- WorkerAgent: "swarms/agents/workeragent.md"
- OmniAgent: "swarms/agents/omni_agent.md"
- AbstractAgent: "swarms/agents/abstractagent.md"
- ToolAgent: "swarms/agents/toolagent.md"
- swarms.models:
- Language:
- BaseLLM: "swarms/models/base_llm.md"
- Overview: "swarms/models/index.md"
- HuggingFaceLLM: "swarms/models/huggingface.md"
- Anthropic: "swarms/models/anthropic.md"
- OpenAI: "swarms/models/openai.md"
- Zephyr: "swarms/models/zephyr.md"
- BioGPT: "swarms/models/biogpt.md"
- vLLM: "swarms/models/vllm.md"
- MPT7B: "swarms/models/mpt.md"
- Mistral: "swarms/models/mistral.md"
- Mixtral: "swarms/models/mixtral.md"
- MultiModal:
- BaseMultiModalModel: "swarms/models/base_multimodal_model.md"
- Fuyu: "swarms/models/fuyu.md"
- Vilt: "swarms/models/vilt.md"
- Idefics: "swarms/models/idefics.md"
- BingChat: "swarms/models/bingchat.md"
- Kosmos: "swarms/models/kosmos.md"
- Nougat: "swarms/models/nougat.md"
- Dalle3: "swarms/models/dalle3.md"
- GPT4V: "swarms/models/gpt4v.md"
- LayoutLMDocumentQA: "swarms/models/layoutlm_document_qa.md"
- DistilWhisperModel: "swarms/models/distilled_whisperx.md"
- ElevenLabsText2SpeechTool: "swarms/models/elevenlabs.md"
- OpenAITTS: "swarms/models/openai_tts.md"
- Gemini: "swarms/models/gemini.md"
- ZeroscopeTTV: "swarms/models/zeroscope.md"
- swarms.structs:
- Foundational Structures:
- agent: "swarms/structs/agent.md"
- basestructure: "swarms/structs/basestructure.md"
- artifactupload: "swarms/structs/artifactupload.md"
- taskinput: "swarms/structs/taskinput.md"
- stepinput: "swarms/structs/stepinput.md"
- artifact: "swarms/structs/artifact.md"
- task: "swarms/structs/task.md"
- Task Queue Base: "swarms/structs/taskqueuebase.md"
- Workflows:
- recursiveworkflow: "swarms/structs/recursiveworkflow.md"
- concurrentworkflow: "swarms/structs/concurrentworkflow.md"
- nonlinearworkflow: "swarms/structs/nonlinearworkflow.md"
- sequential_workflow: "swarms/structs/sequential_workflow.md"
- workflow: "swarms/structs/workflow.md"
- baseworkflow: "swarms/structs/baseworkflow.md"
- Multi Agent Architectures:
- conversation: "swarms/structs/conversation.md"
- groupchat: "swarms/structs/groupchat.md"
- swarmnetwork: "swarms/structs/swarmnetwork.md"
- groupchatmanager: "swarms/structs/groupchatmanager.md"
- MajorityVoting: "swarms/structs/majorityvoting.md"
- swarms.tokenizers:
- Language:
- Tokenizer: "swarms/tokenizers/tokenizer.md"
- CohereTokenizer: "swarms/tokenizers/coheretokenizer.md"
- BaseTokenizer: "swarms/tokenizers/basetokenizer.md"
- HuggingfaceTokenizer: "swarms/tokenizers/huggingfacetokenizer.md"
- SentencepieceTokenizer: "swarms/tokenizers/sentencepiecetokenizer.md"
- AnthropicTokenizer: "swarms/tokenizers/anthropictokenizer.md"
- OpenaiTokenizer: "swarms/tokenizers/openaitokenizer.md"
- swarms.memory:
- Vector Databases:
- Weaviate: "swarms/memory/weaviate.md"
- PineconeDB: "swarms/memory/pinecone.md"
- PGVectorStore: "swarms/memory/pg.md"
- ShortTermMemory: "swarms/memory/short_term_memory.md"
- swarms.utils:
- Misc:
- pdf_to_text: "swarms/utils/pdf_to_text.md"
- load_model_torch: "swarms/utils/load_model_torch.md"
- metrics_decorator: "swarms/utils/metrics_decorator.md"
- prep_torch_inference: "swarms/utils/prep_torch_inference.md"
- find_image_path: "swarms/utils/find_image_path.md"
- print_class_parameters: "swarms/utils/print_class_parameters.md"
- extract_code_from_markdown: "swarms/utils/extract_code_from_markdown.md"
- check_device: "swarms/utils/check_device.md"
- display_markdown_message: "swarms/utils/display_markdown_message.md"
- phoenix_tracer: "swarms/utils/phoenix_tracer.md"
- limit_tokens_from_string: "swarms/utils/limit_tokens_from_string.md"
- math_eval: "swarms/utils/math_eval.md"
- Guides:
- Overview: "examples/index.md"
- Agents:
- Agent: "examples/flow.md"
- OmniAgent: "examples/omni_agent.md"
- Swarms:
- SequentialWorkflow: "examples/reliable_autonomous_agents.md"
- 2O+ Autonomous Agent Blogs: "examples/ideas.md"
- Applications:
- CustomerSupport:
- Overview: "applications/customer_support.md"
- Marketing:
- Overview: "applications/marketing_agencies.md"
- Corporate:
- FAQ: "corporate/faq.md"
- Purpose: "corporate/purpose.md"
- Roadmap: "corporate/roadmap.md"
- Weaknesses: "corporate/failures.md"
- Design: "corporate/design.md"
- Flywheel: "corporate/flywheel.md"
- Bounties: "corporate/bounties.md"
- Metric: "corporate/metric.md"
- Distribution: "corporate/distribution"
- Research: "corporate/research.md"
- Demos: "corporate/demos.md"
- Architecture: "corporate/architecture.md"
- Checklist: "corporate/checklist.md"
- Hiring: "corporate/hiring.md"
- SwarmCloud: "corporate/swarm_cloud.md"
- SwarmMemo: "corporate/swarm_memo.md"
- Data Room: "corporate/data_room.md"

@ -2,10 +2,12 @@ import pandas as pd
from swarms import dataframe_to_text
# # Example usage:
df = pd.DataFrame({
'A': [1, 2, 3],
'B': [4, 5, 6],
'C': [7, 8, 9],
})
df = pd.DataFrame(
{
"A": [1, 2, 3],
"B": [4, 5, 6],
"C": [7, 8, 9],
}
)
print(dataframe_to_text(df))

6085
poetry.lock generated

File diff suppressed because it is too large Load Diff

@ -10,30 +10,37 @@ description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
homepage = "https://github.com/kyegomez/swarms"
documentation = "https://swarms.apac.ai"
readme = "README.md" # Assuming you have a README.md
documentation = "https://swarms.apac.ai"
readme = "README.md"
repository = "https://github.com/kyegomez/swarms"
keywords = ["artificial intelligence", "deep learning", "optimizers", "Prompt Engineering", "swarms", "agents"]
keywords = [
"artificial intelligence",
"deep learning",
"optimizers",
"Prompt Engineering",
"swarms",
"agents",
]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.10"
"Programming Language :: Python :: 3.10",
]
[tool.poetry.dependencies]
python = "^3.6.1"
python = ">=3.10,<3.12"
torch = "2.1.1"
transformers = "4.37.1"
openai = "0.28.0"
transformers = "4.38.1"
openai = "1.3.0"
langchain = "0.0.333"
asyncio = "3.4.3"
einops = "0.7.0"
google-generativeai = "0.3.1"
langchain-experimental = "0.0.10"
tensorflow = "*"
tensorflow = "^2.15.0"
weaviate-client = "3.25.3"
opencv-python-headless = "4.8.1.78"
faiss-cpu = "1.7.4"
@ -55,7 +62,7 @@ cohere = "4.24"
huggingface-hub = "*"
pydantic = "1.10.12"
tenacity = "8.2.2"
Pillow = "9.4.0"
Pillow = "10.2.0"
chromadb = "*"
termcolor = "2.2.0"
black = "23.3.0"
@ -86,9 +93,12 @@ types-chardet = "^5.0.4.6"
mypy-protobuf = "^3.0.0"
[tool.poetry.group.test.dependencies]
pytest = "^8.0.2"
[tool.autopep8]
max_line_length = 70
ignore = "E501,W6" # or ["E501", "W6"]
ignore = "E501,W6" # or ["E501", "W6"]
in-place = true
recursive = true
aggressive = 3
@ -103,4 +113,4 @@ preview = true
[tool.poetry.scripts]
swarms = 'swarms.cli._cli:main'
swarms = 'swarms.cli._cli:main'

@ -0,0 +1,2 @@
[pytest]
testpaths = tests

@ -0,0 +1,10 @@
exclude = ["./playground", "./tests", "./scripts"]
line-length = 127
[lint]
ignore = ["E203"]
select = ["E", "F", "W"]
[lint.per-file-ignores]
"./swarms/prompts/**.py" = ["E501", "W291", "W293"]
"./swarms/__init__.py" = ["F401"]

@ -23,9 +23,12 @@ class TextArtifact(BaseArtifact):
Methods:
__add__(self, other: BaseArtifact) -> TextArtifact: Concatenates the text value of the artifact with another artifact.
__bool__(self) -> bool: Checks if the text value of the artifact is non-empty.
generate_embedding(self, driver: BaseEmbeddingModel) -> Optional[list[float]]: Generates the embedding of the text artifact using a given embedding model.
token_count(self, tokenizer: BaseTokenizer) -> int: Counts the number of tokens in the text artifact using a given tokenizer.
to_bytes(self) -> bytes: Converts the text value of the artifact to bytes using the specified encoding and error handler.
generate_embedding(self, driver: BaseEmbeddingModel) -> Optional[list[float]]:
Generates the embedding of the text artifact using a given embedding model.
token_count(self, tokenizer: BaseTokenizer) -> int:
Counts the number of tokens in the text artifact using a given tokenizer.
to_bytes(self) -> bytes:
Converts the text value of the artifact to bytes using the specified encoding and error handler.
"""
value: str

@ -7,7 +7,8 @@ class InternalMemoryBase(ABC):
"""Abstract base class for internal memory of agents in the swarm."""
def __init__(self, n_entries):
"""Initialize the internal memory. In the current architecture the memory always consists of a set of soltuions or evaluations.
"""Initialize the internal memory.
In the current architecture the memory always consists of a set of soltuions or evaluations.
During the operation, the agent should retrivie best solutions from it's internal memory based on the score.
Moreover, the project is designed around LLMs for the proof of concepts, so we treat all entry content as a string.
@ -28,7 +29,8 @@ class InternalMemoryBase(ABC):
class DictInternalMemory(InternalMemoryBase):
def __init__(self, n_entries: int):
"""
Initialize the internal memory. In the current architecture the memory always consists of a set of solutions or evaluations.
Initialize the internal memory.
In the current architecture the memory always consists of a set of solutions or evaluations.
Simple key-value store for now.
Args:

@ -16,13 +16,15 @@ class DictSharedMemory:
Methods:
__init__(self, file_loc: str = None) -> None: Initializes the shared memory.
add_entry(self, score: float, agent_id: str, agent_cycle: int, entry: Any) -> bool: Adds an entry to the internal memory.
add_entry(self, score: float, agent_id: str, agent_cycle: int, entry: Any) -> bool:
Adds an entry to the internal memory.
get_top_n(self, n: int) -> None: Gets the top n entries from the internal memory.
write_to_file(self, data: Dict[str, Dict[str, Any]]) -> bool: Writes the internal memory to a file.
"""
def __init__(self, file_loc: str = None) -> None:
"""Initialize the shared memory. In the current architecture the memory always consists of a set of soltuions or evaluations.
"""Initialize the shared memory.
In the current architecture the memory always consists of a set of soltuions or evaluations.
Moreover, the project is designed around LLMs for the proof of concepts, so we treat all entry content as a string.
"""
if file_loc is not None:

@ -153,7 +153,8 @@ class LangchainChromaVectorMemory:
query (str): The query to search for.
k (int): The number of results to return.
type (str): The type of search to perform: "cos" or "mmr".
distance_threshold (float): The similarity threshold to use for the search. Results with distance > similarity_threshold will be dropped.
distance_threshold (float):
The similarity threshold to use for the search. Results with distance > similarity_threshold will be dropped.
Returns:
list[str]: A list of the top k results.

@ -24,13 +24,31 @@ class PineconeDB(AbstractVectorDatabase):
index (pinecone.Index, optional): The Pinecone index to use. Defaults to None.
Methods:
upsert_vector(vector: list[float], vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, **kwargs) -> str:
upsert_vector(
vector: list[float],
vector_id: Optional[str] = None,
namespace: Optional[str] = None,
meta: Optional[dict] = None,
**kwargs
) -> str:
Upserts a vector into the index.
load_entry(vector_id: str, namespace: Optional[str] = None) -> Optional[BaseVectorStore.Entry]:
load_entry(
vector_id: str,
namespace: Optional[str] = None
) -> Optional[BaseVectorStore.Entry]:
Loads a single vector from the index.
load_entries(namespace: Optional[str] = None) -> list[BaseVectorStore.Entry]:
load_entries(
namespace: Optional[str] = None
) -> list[BaseVectorStore.Entry]:
Loads all vectors from the index.
query(query: str, count: Optional[int] = None, namespace: Optional[str] = None, include_vectors: bool = False, include_metadata=True, **kwargs) -> list[BaseVectorStore.QueryResult]:
query(
query: str,
count: Optional[int] = None,
namespace: Optional[str] = None,
include_vectors: bool = False,
include_metadata=True,
**kwargs
) -> list[BaseVectorStore.QueryResult]:
Queries the index for vectors similar to the given query string.
create_index(name: str, **kwargs) -> None:
Creates a new index.

@ -12,8 +12,10 @@ from swarms.models.clipq import CLIPQ # noqa: E402
# from swarms.models.whisperx_model import WhisperX # noqa: E402
# from swarms.models.kosmos_two import Kosmos # noqa: E402
# from swarms.models.cog_agent import CogAgent # noqa: E402
## Function calling models
from swarms.models.fire_function import FireFunctionCaller
# Function calling models
from swarms.models.fire_function import (
FireFunctionCaller,
)
from swarms.models.fuyu import Fuyu # noqa: E402
from swarms.models.gemini import Gemini # noqa: E402
from swarms.models.gigabind import Gigabind # noqa: E402
@ -50,7 +52,7 @@ from swarms.models.timm import TimmModel # noqa: E402
# ) # noqa: E402
from swarms.models.together import TogetherLLM # noqa: E402
############## Types
# Types
from swarms.models.types import ( # noqa: E402
AudioModality,
ImageModality,

@ -46,7 +46,7 @@ class AzureOpenAI(BaseOpenAI):
deployment_name: str | None = Field(
default=None, alias="azure_deployment"
)
"""A model deployment.
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
@ -62,7 +62,7 @@ class AzureOpenAI(BaseOpenAI):
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
""" # noqa: E501
azure_ad_token_provider: Callable[[], str] | None = None
@ -73,7 +73,7 @@ class AzureOpenAI(BaseOpenAI):
openai_api_type: str = ""
"""Legacy, for openai<1.0.0 support."""
validate_base_url: bool = True
"""For backwards compatibility. If legacy val openai_api_base is passed in, try to
"""For backwards compatibility. If legacy val openai_api_base is passed in, try to
infer if it is a base_url or azure_endpoint and update accordingly.
"""

@ -40,14 +40,30 @@ class BaseMultiModalModel:
Examples:
>>> from swarms.models.base_multimodal_model import BaseMultiModalModel
>>> model = BaseMultiModalModel()
>>> link = "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"
>>> model.run("Generate a summary of this text")
>>> model.run("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")
>>> model.run("Generate a summary of this text", link)
>>> model.run_batch(["Generate a summary of this text", "Generate a summary of this text"])
>>> model.run_batch([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
>>> model.run_batch_async(["Generate a summary of this text", "Generate a summary of this text"])
>>> model.run_batch_async([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
>>> model.run_batch_async_with_retries(["Generate a summary of this text", "Generate a summary of this text"])
>>> model.run_batch_async_with_retries([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
>>> model.run_batch([
("Generate a summary of this text", link),
("Generate a summary of this text", link)
])
>>> model.run_batch_async([
"Generate a summary of this text",
"Generate a summary of this text"
])
>>> model.run_batch_async([
("Generate a summary of this text", link),
("Generate a summary of this text", link)
])
>>> model.run_batch_async_with_retries([
"Generate a summary of this text",
"Generate a summary of this text"
])
>>> model.run_batch_async_with_retries([
("Generate a summary of this text", link),
("Generate a summary of this text", link)
])
>>> model.generate_summary("Generate a summary of this text")
>>> model.set_temperature(0.5)
>>> model.set_max_tokens(500)
@ -348,9 +364,9 @@ class BaseMultiModalModel:
_type_: _description_
"""
META_PROMPT = """
For any labels or markings on an image that you reference in your response, please
enclose them in square brackets ([]) and list them explicitly. Do not use ranges; for
example, instead of '1 - 4', list as '[1], [2], [3], [4]'. These labels could be
For any labels or markings on an image that you reference in your response, please
enclose them in square brackets ([]) and list them explicitly. Do not use ranges; for
example, instead of '1 - 4', list as '[1], [2], [3], [4]'. These labels could be
numbers or letters and typically correspond to specific segments or parts of the image.
"""
return META_PROMPT

@ -2,7 +2,8 @@ r"""
BioGPT
Pre-trained language models have attracted increasing attention in the biomedical domain,
inspired by their great success in the general natural language domain.
Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants),
Among the two main branches of pre-trained language models in the general language domain,
i.e. BERT (and its variants) and GPT (and its variants),
the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT.
While they have achieved great success on a variety of discriminative downstream biomedical tasks,
the lack of generation ability constrains their application scope.
@ -24,7 +25,6 @@ advantage of BioGPT on biomedical literature to generate fluent descriptions for
number = {6},
year = {2022},
month = {09},
abstract = "{Pre-trained language models have attracted increasing attention in the biomedical domain, inspired by their great success in the general natural language domain. Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants), the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT. While they have achieved great success on a variety of discriminative downstream biomedical tasks, the lack of generation ability constrains their application scope. In this paper, we propose BioGPT, a domain-specific generative Transformer language model pre-trained on large-scale biomedical literature. We evaluate BioGPT on six biomedical natural language processing tasks and demonstrate that our model outperforms previous models on most tasks. Especially, we get 44.98\%, 38.42\% and 40.76\% F1 score on BC5CDR, KD-DTI and DDI end-to-end relation extraction tasks, respectively, and 78.2\% accuracy on PubMedQA, creating a new record. Our case study on text generation further demonstrates the advantage of BioGPT on biomedical literature to generate fluent descriptions for biomedical terms.}",
issn = {1477-4054},
doi = {10.1093/bib/bbac409},
url = {https://doi.org/10.1093/bib/bbac409},

@ -161,7 +161,8 @@ class CogVLMMultiModal(BaseMultiModalModel):
Methods:
run: Generates a response using the CogVLM model.
generate_stream_cogvlm: Generates a stream of responses using the CogVLM model in inference mode.
process_history_and_images: Processes history messages to extract text, identify the last user query, and convert base64 encoded image URLs to PIL images.
process_history_and_images: Processes history messages to extract text, identify the last user query,
and convert base64 encoded image URLs to PIL images.
Example:
>>> model = CogVLMMultiModal()

@ -257,7 +257,7 @@ class Dalle3:
"""Print the Dalle3 dashboard"""
print(
colored(
f"""Dalle3 Dashboard:
f"""Dalle3 Dashboard:
--------------------
Model: {self.model}
@ -271,8 +271,8 @@ class Dalle3:
Save Folder: {self.save_folder}
Image Format: {self.image_format}
--------------------
""",
"green",
)

@ -37,7 +37,8 @@ class ElevenLabsText2SpeechTool(BaseTool):
Defaults to ElevenLabsModel.MULTI_LINGUAL.
name (str): The name of the tool. Defaults to "eleven_labs_text2speech".
description (str): The description of the tool.
Defaults to "A wrapper around Eleven Labs Text2Speech. Useful for when you need to convert text to speech. It supports multiple languages, including English, German, Polish, Spanish, Italian, French, Portuguese, and Hindi."
Defaults to "A wrapper around Eleven Labs Text2Speech. Useful for when you need to convert text to speech.
It supports multiple languages, including English, German, Polish, Spanish, Italian, French, Portuguese, and Hindi."
Usage:

@ -132,13 +132,13 @@ class Gemini(BaseMultiModalModel):
system_prompt (str, optional): _description_. Defaults to None.
"""
PROMPT = f"""
{self.system_prompt}
######
{task}
"""
return PROMPT

@ -11,7 +11,8 @@ from swarms.utils.supervision_visualizer import MarkVisualizer
class GPT4VSAM(BaseMultiModalModel):
"""
GPT4VSAM class represents a multi-modal model that combines the capabilities of GPT-4 and SegmentAnythingMarkGenerator.
It takes an instance of BaseMultiModalModel (vlm) and a device as input and provides methods for loading images and making predictions.
It takes an instance of BaseMultiModalModel (vlm)
and a device as input and provides methods for loading images and making predictions.
Args:
vlm (BaseMultiModalModel): An instance of BaseMultiModalModel representing the visual language model.

@ -203,8 +203,9 @@ class GPT4VisionAPI(BaseMultiModalModel):
"""
PROMPT = f"""
These are frames from a video that I want to upload. Generate a compelling description that I can upload along with the video:
These are frames from a video that I want to upload.
Generate a compelling description that I can upload along with the video:
{frames}
"""
return PROMPT

@ -63,7 +63,8 @@ class Idefics(BaseMultiModalModel):
response = model.chat(user_input)
print(response)
user_input = "User: And who is that? https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
user_input = "User: And who is that? \
https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
response = model.chat(user_input)
print(response)

@ -26,7 +26,8 @@ class MedicalSAM:
Methods:
__post_init__(): Initializes the MedicalSAM object.
download_model_weights(model_path: str): Downloads the model weights from the specified URL and saves them to the given file path.
download_model_weights(model_path: str):
Downloads the model weights from the specified URL and saves them to the given file path.
preprocess(img): Preprocesses the input image.
run(img, box): Runs the semantic segmentation on the input image within the specified bounding box.

@ -1,6 +1,7 @@
from typing import Any, Dict, List, Optional, Union
import openai
from openai import OpenAI
import requests
from pydantic import BaseModel, validator
from tenacity import (
@ -147,6 +148,7 @@ class OpenAIFunctionCaller:
self.user = user
self.messages = messages if messages is not None else []
self.timeout_sec = timeout_sec
self.client = OpenAI(api_key=self.openai_api_key)
def add_message(self, role: str, content: str):
self.messages.append({"role": role, "content": content})
@ -163,7 +165,7 @@ class OpenAIFunctionCaller:
):
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + openai.api_key,
"Authorization": "Bearer " + self.openai_api_key,
}
json_data = {"model": self.model, "messages": messages}
if tools is not None:
@ -235,7 +237,7 @@ class OpenAIFunctionCaller:
)
def call(self, task: str, *args, **kwargs) -> Dict:
return openai.Completion.create(
return self.client.completions.create(
engine=self.model,
prompt=task,
max_tokens=self.max_tokens,

@ -37,9 +37,6 @@ from tenacity import (
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
from importlib.metadata import version
from packaging.version import parse
@ -177,11 +174,11 @@ def _create_retry_decorator(
import openai
errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
openai.Timeout,
openai.APIError,
openai.APIConnectionError,
openai.RateLimitError,
openai.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors,
@ -352,7 +349,13 @@ class BaseOpenAI(BaseLLM):
try:
import openai
values["client"] = openai.Completion
values["client"] = openai.OpenAI(
api_key=values["openai_api_key"],
api_base=values["openai_api_base"] or None,
organization=values["openai_organization"] or None,
# TODO: Reenable this when openai package supports proxy
# proxy=values["openai_proxy"] or None,
)
except ImportError:
raise ImportError(
"Could not import openai python package. "
@ -645,9 +648,11 @@ class BaseOpenAI(BaseLLM):
"organization": self.openai_organization,
}
if self.openai_proxy:
import openai
pass
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
# TODO: The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the
# client, e.g. 'OpenAI(proxy={"http": self.openai_proxy, "https": self.openai_proxy})'
# openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
@ -933,44 +938,36 @@ class OpenAIChat(BaseLLM):
@root_validator()
def validate_environment(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
openai_proxy = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
# openai_api_key = get_from_dict_or_env(
# values, "openai_api_key", "OPENAI_API_KEY"
# )
# openai_api_base = get_from_dict_or_env(
# values,
# "openai_api_base",
# "OPENAI_API_BASE",
# default="",
# )
# openai_proxy = get_from_dict_or_env(
# values,
# "openai_proxy",
# "OPENAI_PROXY",
# default="",
# )
# openai_organization = get_from_dict_or_env(
# values,
# "openai_organization",
# "OPENAI_ORGANIZATION",
# default="",
# )
try:
import openai
openai.api_key = openai_api_key
if openai_api_base:
openai.api_base = openai_api_base
if openai_organization:
openai.organization = openai_organization
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
values["client"] = openai.OpenAI
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is"

@ -27,7 +27,8 @@ class SAM:
processor (SamProcessor): The processor for the SAM model.
Methods:
run(task=None, img=None, *args, **kwargs): Runs the SAM model on the given image and returns the segmentation scores and masks.
run(task=None, img=None, *args, **kwargs):
Runs the SAM model on the given image and returns the segmentation scores and masks.
process_img(img: str = None, *args, **kwargs): Processes the input image and returns the processed image.
"""

@ -2,18 +2,29 @@
SpeechT5 (TTS task)
SpeechT5 model fine-tuned for speech synthesis (text-to-speech) on LibriTTS.
This model was introduced in SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
This model was introduced in SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing by
Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu,
Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
SpeechT5 was first released in this repository, original weights. The license used is MIT.
Model Description
Motivated by the success of T5 (Text-To-Text Transfer Transformer) in pre-trained natural language processing models, we propose a unified-modal SpeechT5 framework that explores the encoder-decoder pre-training for self-supervised speech/text representation learning. The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific (speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network models the sequence-to-sequence transformation, and then the post-nets generate the output in the speech/text modality based on the output of the decoder.
Motivated by the success of T5 (Text-To-Text Transfer Transformer) in pre-trained natural language processing models,
we propose a unified-modal SpeechT5 framework that explores the encoder-decoder pre-training for self-supervised speech/text
representation learning. The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific
(speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network
models the sequence-to-sequence transformation,and then the post-nets generate the output in the speech/text modality based on
the output of the decoder.
Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation,
hoping to improve the modeling capability for both speech and text. To align the textual and speech information into
this unified semantic space, we propose a cross-modal vector quantization approach that randomly mixes up speech/text
states with latent units as the interface between encoder and decoder.
Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing
tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement,
and speaker identification.
Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation, hoping to improve the modeling capability for both speech and text. To align the textual and speech information into this unified semantic space, we propose a cross-modal vector quantization approach that randomly mixes up speech/text states with latent units as the interface between encoder and decoder.
Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement, and speaker identification.
Developed by: Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
Shared by [optional]: Matthijs Hollemans
Model type: text-to-speech
Language(s) (NLP): [More Information Needed]

@ -49,7 +49,7 @@ class SSD1B:
max_time_seconds: int = 60
save_folder: str = "images"
image_format: str = "png"
device: str = "cuda"
device: str = "cuda" if torch.cuda.is_available() else "cpu"
dashboard: bool = False
cache = TTLCache(maxsize=100, ttl=3600)
pipe = StableDiffusionXLPipeline.from_pretrained(
@ -171,7 +171,7 @@ class SSD1B:
"""Print the SSD1B dashboard"""
print(
colored(
f"""SSD1B Dashboard:
f"""SSD1B Dashboard:
--------------------
Model: {self.model}
@ -185,8 +185,8 @@ class SSD1B:
Save Folder: {self.save_folder}
Image Format: {self.image_format}
--------------------
""",
"green",
)

@ -34,7 +34,7 @@ commands: {
"""
########### FEW SHOT EXAMPLES ################
# FEW SHOT EXAMPLES #
SCENARIOS = """
commands: {
"tools": {

@ -62,6 +62,6 @@ def worker_tools_sop_promp(name: str, memory: str, time=time):
[{memory}]
Human: Determine which next command to use, and respond using the format specified above:
""".format(name=name, time=time, memory=memory)
""".format(name=name, time=time, memory=memory) # noqa: F521
return str(out)

@ -447,7 +447,7 @@ class Agent:
Name: {self.agent_name}
Description: {self.agent_description}
Standard Operating Procedure: {self.sop}
System Prompt: {self.system_prompt}
System Prompt: {self.system_prompt}
Task: {task}
Max Loops: {self.max_loops}
Stopping Condition: {self.stopping_condition}
@ -778,7 +778,7 @@ class Agent:
Follow this standard operating procedure (SOP) to complete tasks:
{self.sop}
{history}
"""
return agent_history_prompt
@ -786,7 +786,7 @@ class Agent:
system_prompt = self.system_prompt
agent_history_prompt = f"""
System : {system_prompt}
{history}
"""
return agent_history_prompt

@ -98,7 +98,8 @@ class AsyncWorkflow:
# if self.dashboard:
# self.display()
# Add a stopping condition to stop the workflow, if provided but stopping_condition takes in a parameter s for string
# Add a stopping condition to stop the workflow,
# if provided but stopping_condition takes in a parameter s for string
if self.stopping_condition:
if self.stopping_condition(self.results):
break

@ -86,10 +86,6 @@ class AbstractSwarm(ABC):
def step(self):
"""Step the swarm"""
# @abstractmethod
def add_agent(self, agent: "Agent"):
"""Add a agent to the swarm"""
# @abstractmethod
def remove_agent(self, agent: "Agent"):
"""Remove a agent from the swarm"""

@ -195,7 +195,16 @@ class BaseWorkflow(BaseStructure):
>>> workflow.add("Create a report on these metrics", llm)
>>> workflow.delete_task("What's the weather in miami")
>>> workflow.tasks
[Task(description='Create a report on these metrics', agent=Agent(llm=OpenAIChat(openai_api_key=''), max_loops=1, dashboard=False), args=[], kwargs={}, result=None, history=[])]
[
Task(
description='Create a report on these metrics',
agent=Agent(llm=OpenAIChat(openai_api_key=''), max_loops=1, dashboard=False),
args=[],
kwargs={},
result=None,
history=[]
)
]
"""
try:
for task in self.tasks:

@ -15,7 +15,8 @@ class ConcurrentWorkflow(BaseStructure):
Args:
max_workers (int): The maximum number of workers to use for the ThreadPoolExecutor.
autosave (bool): Whether to save the state of the workflow to a file. Default is False.
saved_state_filepath (str): The filepath to save the state of the workflow to. Default is "runs/concurrent_workflow.json".
saved_state_filepath (str):
The filepath to save the state of the workflow to. Default is "runs/concurrent_workflow.json".
print_results (bool): Whether to print the results of each task. Default is False.
return_results (bool): Whether to return the results of each task. Default is False.
use_processes (bool): Whether to use processes instead of threads. Default is False.

@ -19,7 +19,8 @@ class DebatePlayer(Agent):
Args:
model_name(str): model name
name (str): name of this player
temperature (float): higher values make the output more random, while lower values make it more focused and deterministic
temperature (float):
higher values make the output more random, while lower values make it more focused and deterministic
openai_api_key (str): As the parameter name suggests
sleep_time (float): sleep because of rate limits
"""
@ -31,7 +32,8 @@ class Debate:
Args:
model_name (str): openai model name
temperature (float): higher values make the output more random, while lower values make it more focused and deterministic
temperature (float):
higher values make the output more random, while lower values make it more focused and deterministic
num_players (int): num of players
save_file_dir (str): dir path to json file
openai_api_key (str): As the parameter name suggests
@ -359,6 +361,13 @@ class Debate:
# with open(prompts_path, 'w') as file:
# json.dump(config, file, ensure_ascii=False, indent=4)
# debate = Debate(save_file_dir=save_file_dir, num_players=3, openai_api_key=openai_api_key, prompts_path=prompts_path, temperature=0, sleep_time=0)
# debate = Debate(
# save_file_dir=save_file_dir,
# num_players=3,
# openai_api_key=openai_api_key,
# prompts_path=prompts_path,
# temperature=0,
# sleep_time=0
# )
# debate.run()
# debate.save_file_to_json(id)

@ -17,7 +17,8 @@ class GraphWorkflow(BaseStructure):
connect(from_node, to_node): Connects two nodes in the graph.
set_entry_point(node_name): Sets the entry point node for the workflow.
add_edge(from_node, to_node): Adds an edge between two nodes in the graph.
add_conditional_edges(from_node, condition, edge_dict): Adds conditional edges from a node to multiple nodes based on a condition.
add_conditional_edges(from_node, condition, edge_dict):
Adds conditional edges from a node to multiple nodes based on a condition.
run(): Runs the workflow and returns the graph.
Examples:

@ -51,23 +51,23 @@ class LongContextSwarmLeader:
- prompt (str): The formatted string containing the agent metadata.
"""
prompt = f"""
You need to recruit a team of members to solve a
task. Select the appropriate member based on the
task description:
# Task Description
{task}
# Members
Your output must follow this JSON schema below in markdown format:
{{
"agent_id": "string",
"agent_name": "string",
"agent_description": "string"
}}
"""
for agent in self.agents:
prompt += (
@ -83,7 +83,7 @@ class LongContextSwarmLeader:
You are the leader of a team of {len(self.agents)}
members. Your team will need to collaborate to
solve a task. The rule is:
1. Only you know the task description and task
objective; the other members do not.
2. But they will receive different documents that
@ -95,13 +95,13 @@ class LongContextSwarmLeader:
explicitly include the task objective.
4. Finally, you need to complete the task based on
the query results they return.
# Task Description:
{task_description}
# Task Objective:
{task}
# Generate Instruction for Members:
Now, you need to generate an instruction for all
team members. You can ask them to answer a
@ -110,7 +110,7 @@ class LongContextSwarmLeader:
Your output must following the JSON
format: {{"type": "instruction", "content":
"your_instruction_content"}}
"""
return prompt

@ -129,7 +129,8 @@ class MajorityVoting:
multithreaded (bool, optional): Whether to run the agents using multithreading. Defaults to False.
multiprocess (bool, optional): Whether to run the agents using multiprocessing. Defaults to False.
asynchronous (bool, optional): Whether to run the agents asynchronously. Defaults to False.
output_parser (callable, optional): A callable function to parse the output of the majority voting system. Defaults to None.
output_parser (callable, optional): A callable function to parse the output
of the majority voting system. Defaults to None.
Examples:
>>> from swarms.structs.agent import Agent

@ -3,7 +3,7 @@ from time import time_ns
from typing import Callable, List, Optional, Sequence, Union
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.base_swarm import AbstractSwarm
from swarms.utils.loguru_logger import logger
@ -43,7 +43,7 @@ def msg_hash(
)
class MessagePool(BaseSwarm):
class MessagePool(AbstractSwarm):
"""
A class representing a message pool for agents in a swarm.
@ -68,11 +68,17 @@ class MessagePool(BaseSwarm):
>>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
>>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
>>> message_pool.get_all_messages()
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent1, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent2, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent2, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
"""
def __init__(

@ -1,7 +1,7 @@
import importlib.util
import sys
import pkg_resources
from importlib.metadata import version as pkg_version
import requests
from packaging import version
@ -35,7 +35,7 @@ def check_for_update():
latest_version = response.json()["info"]["version"]
# Get the current version using pkg_resources
current_version = pkg_resources.get_distribution("swarms").version
current_version = pkg_version("swarms")
return version.parse(latest_version) > version.parse(
current_version

@ -1,8 +1,8 @@
import platform
import subprocess
import pkg_resources
import psutil
import importlib.metadata as metadata
import toml
@ -31,9 +31,7 @@ def get_swarms_verison():
)
except Exception as e:
swarms_verison_cmd = str(e)
swarms_verison_pkg = pkg_resources.get_distribution(
"swarms"
).version
swarms_verison_pkg = metadata.version("swarms")
swarms_verison = swarms_verison_cmd, swarms_verison_pkg
return swarms_verison
@ -67,7 +65,7 @@ def get_package_mismatches(file_path="pyproject.toml"):
dependencies.update(dev_dependencies)
installed_packages = {
pkg.key: pkg.version for pkg in pkg_resources.working_set
pkg.key: pkg.version for pkg in metadata.distributions()
}
mismatches = []

@ -12,7 +12,8 @@ def scrape_tool_func_docs(fn: Callable) -> str:
fn (Callable): The function to scrape.
Returns:
str: A string containing the function's name, documentation string, and a list of its parameters. Each parameter is represented as a line containing the parameter's name, default value, and annotation.
str: A string containing the function's name, documentation string, and a list of its parameters.
Each parameter is represented as a line containing the parameter's name, default value, and annotation.
"""
try:
# If the function is a tool, get the original function

@ -85,7 +85,7 @@ def tools_prompt_prep(docs: str = None, scenarios: str = SCENARIOS):
You will be provided with a list of APIs. These APIs will have a
description and a list of parameters and return types for each tool. Your
task involves creating varied, complex, and detailed user scenarios
that require to call API calls. You must select what api to call based on
that require to call API calls. You must select what api to call based on
the context of the task and the scenario.
For instance, given the APIs: SearchHotels, BookHotel, CancelBooking,
@ -116,14 +116,14 @@ def tools_prompt_prep(docs: str = None, scenarios: str = SCENARIOS):
different combination of APIs for each scenario. All APIs must be used in
at least one scenario. You can only use the APIs provided in the APIs
section.
Note that API calls are not explicitly mentioned and their uses are
included in parentheses. This behaviour should be mimicked in your
response.
Output the tool usage in a strict json format with the function name and input to
Output the tool usage in a strict json format with the function name and input to
the function. For example, Deliver your response in this format:
{scenarios}

@ -83,7 +83,7 @@ class TestResult:
prompt = f"""
This function has been executed for {self.visit_times} times. Last execution:
1.Status: {self.runtime_status.name}
2.Input:
2.Input:
{self.input_data}
3.Output:
@ -108,7 +108,7 @@ class Action:
def to_json(self):
try:
tool_output = json.loads(self.tool_output)
except:
except json.JSONDecodeError:
tool_output = self.tool_output
return {
"thought": self.thought,

@ -18,7 +18,8 @@ def load_model_torch(
model_path (str): Path to the saved model file.
device (torch.device): Device to move the model to.
model (nn.Module): The model architecture, if the model file only contains the state dictionary.
strict (bool): Whether to strictly enforce that the keys in the state dictionary match the keys returned by the model's `state_dict()` function.
strict (bool): Whether to strictly enforce that the keys in the state dictionary match the keys returned by the model's
`state_dict()` function.
map_location (callable): A function to remap the storage locations of the loaded model.
*args: Additional arguments to pass to `torch.load`.
**kwargs: Additional keyword arguments to pass to `torch.load`.

@ -14,7 +14,7 @@ def dataframe_to_text(
Returns:
str: The string representation of the DataFrame.
Example:
>>> df = pd.DataFrame({
... 'A': [1, 2, 3],

@ -1,7 +1,7 @@
# TESTING
# -==================
# Use an official Python runtime as a parent image
FROM python:3.9-slim
FROM python:3.10-slim
# Set environment variables to make Python output unbuffered and disable the PIP cache
ENV PYTHONDONTWRITEBYTECODE 1

@ -6,7 +6,9 @@ from swarms.structs.agent import Agent
from swarms.structs.groupchat import GroupChat, GroupChatManager
llm = OpenAIChat()
llm2 = Anthropic()
# llm2 = Anthropic()
# TODO: Mock anthropic class
llm2 = OpenAIChat()
# Mock the OpenAI class for testing

@ -1,73 +0,0 @@
# JSON
# Contents of test_json.py, which must be placed in the `tests/` directory.
import json
import pytest
from swarms.tokenizers import JSON
# Fixture for reusable JSON schema file paths
@pytest.fixture
def valid_schema_path(tmp_path):
d = tmp_path / "sub"
d.mkdir()
p = d / "schema.json"
p.write_text(
'{"type": "object", "properties": {"name": {"type":'
' "string"}}}'
)
return str(p)
@pytest.fixture
def invalid_schema_path(tmp_path):
d = tmp_path / "sub"
d.mkdir()
p = d / "invalid_schema.json"
p.write_text("this is not a valid JSON")
return str(p)
# This test class must be subclassed as JSON class is abstract
class TestableJSON(JSON):
def validate(self, data):
# Here must be a real validation implementation for testing
pass
# Basic tests
def test_initialize_json(valid_schema_path):
json_obj = TestableJSON(valid_schema_path)
assert json_obj.schema_path == valid_schema_path
assert "name" in json_obj.schema["properties"]
def test_load_schema_failure(invalid_schema_path):
with pytest.raises(json.JSONDecodeError):
TestableJSON(invalid_schema_path)
# Mocking tests
def test_validate_calls_method(monkeypatch):
# Mock the validate method to check that it is being called
pass
# Exception tests
def test_initialize_with_nonexistent_schema():
with pytest.raises(FileNotFoundError):
TestableJSON("nonexistent_path.json")
# Tests on different Python versions if applicable
# ...
# Grouping tests marked as slow if they perform I/O operations
@pytest.mark.slow
def test_loading_large_schema():
# Test with a large json file
pass

@ -15,7 +15,7 @@ custom_config = {
"verbose": True,
}
huggingface_llm = HuggingfaceLLM(
model_id="NousResearch/Nous-Hermes-2-Vision-Alpha",
model_id="liuhaotian/llava-v1.6-mistral-7b",
**custom_config,
)
mixtral = Mixtral(load_in_4bit=True, use_flash_attention_2=True)

Loading…
Cancel
Save