Revert "[WIP] Get CI Passing V2"

revert-388-fix-ci-2
Eternal Reclaimer 11 months ago committed by GitHub
parent ff29a6b816
commit 415a5cf644
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,11 +0,0 @@
[flake8]
max-line-length = 127
extend-ignore = E203
per-file-ignores =
# Most of this is just long strings
./swarms/prompts/**.py: E501 W293 W291
./swarms/__init__.py: F401
exclude =
./playground
./tests
./scripts

@ -1,5 +1,6 @@
---
# These are supported funding model platforms
github: [kyegomez]
# patreon: # Replace with a single Patreon username
# open_collective: # Replace with a single Open Collective username

37
.github/action.yml vendored

@ -0,0 +1,37 @@
name: "Init Environment"
description: "Initialize environment for tests"
runs:
using: "composite"
steps:
- name: Checkout actions
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install and configure Poetry
uses: snok/install-poetry@v1
with:
virtualenvs-create: true
virtualenvs-in-project: true
installer-parallel: true
- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v3
with:
path: .venv
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
- name: Install dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install --no-interaction --no-root --with test --with dev --all-extras
shell: bash
- name: Activate venv
run: |
source .venv/bin/activate
echo PATH=$PATH >> $GITHUB_ENV
shell: bash

@ -1,11 +1,13 @@
---
# https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "pip"
directory: "/"
schedule:

@ -1,52 +1,14 @@
---
documentation:
- changed-files:
- any-glob-to-any-file: ["docs/**", "*.md"]
tests:
- changed-files:
- any-glob-to-any-file: "tests/**"
agents:
- changed-files:
- any-glob-to-any-file: "swarms/agents/**"
artifacts:
- changed-files:
- any-glob-to-any-file: "swarms/artifacts/**"
chunkers:
- changed-files:
- any-glob-to-any-file: "swarms/chunkers/**"
cli:
- changed-files:
- any-glob-to-any-file: "swarms/cli/**"
loaders:
- changed-files:
- any-glob-to-any-file: "swarms/loaders/**"
memory:
- changed-files:
- any-glob-to-any-file: "swarms/memory/**"
models:
- changed-files:
- any-glob-to-any-file: "swarms/models/**"
prompts:
- changed-files:
- any-glob-to-any-file: "swarms/prompts/**"
structs:
- changed-files:
- any-glob-to-any-file: "swarms/structs/**"
telemetry:
- changed-files:
- any-glob-to-any-file: "swarms/telemetry/**"
tokenizers:
- changed-files:
- any-glob-to-any-file: "swarms/tokenizers/**"
tools:
- changed-files:
- any-glob-to-any-file: "swarms/tools/**"
utils:
- changed-files:
- any-glob-to-any-file: "swarms/utils/**"
workers:
- changed-files:
- any-glob-to-any-file: "swarms/workers/**"
rust:
- changed-files:
- any-glob-to-any-file: "**/*.rs"
# this is a config file for the github action labeler
# Add 'label1' to any changes within 'example' folder or any subfolders
example_change:
- example/**
# Add 'label2' to any file changes within 'example2' folder
example2_change: example2/*
# Add label3 to any change to .txt files within the entire repository.
# Quotation marks are required for the leading asterisk
text_files:
- '**/*.txt'

@ -1,44 +0,0 @@
---
name: "Setup"
description: Setup the environment for the project
inputs:
python-version:
description: "Python version to use"
required: false
default: "3.10"
runs:
using: "composite"
steps:
- name: Free up disk space
run: |
sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache/CodeQL
sudo docker image prune --all --force
shell: bash
- name: Set up Python ${{ inputs.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ inputs.python-version }}
- name: Install and configure Poetry
uses: snok/install-poetry@v1
with:
installer-parallel: true
- name: Cache Poetry cache
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry
key: poetry-cache
-${{ runner.os }}
-${{ steps.setup_python.outputs.python-version }}
-${{ env.POETRY_VERSION }}
- name: Cache Packages
uses: actions/cache@v4
with:
path: ~/.local
key: poetry-local
-${{ runner.os }}
-${{ steps.setup_python.outputs.python-version }}
-${{ hashFiles('**/poetry.lock')}}
-${{ hashFiles('.github/workflows/*.yml') }}
- name: Install dependencies
run: POETRY_VIRTUALENVS_CREATE=false poetry install
shell: bash

@ -1,5 +1,5 @@
---
name: release
on:
pull_request:
types:
@ -7,9 +7,11 @@ on:
branches:
- master
paths:
- "pyproject.toml"
- 'pyproject.toml'
env:
POETRY_VERSION: "1.4.2"
jobs:
if_release:
if: |
@ -18,7 +20,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- name: Install poetry
run: pipx install poetry==$POETRY_VERSION
- name: Set up Python 3.9
uses: actions/setup-python@v5
with:
python-version: "3.9"
cache: "poetry"
- name: Build project for distribution
run: poetry build
- name: Check Version
@ -37,5 +45,5 @@ jobs:
- name: Publish to PyPI
env:
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
run: |-
run: |
poetry publish

@ -1,4 +1,3 @@
---
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
@ -11,38 +10,39 @@
# https://github.com/codacy/codacy-analysis-cli-action.
# For more information on Codacy Analysis CLI in general, see
# https://github.com/codacy/codacy-analysis-cli.
name: Codacy Security Scan
on:
push:
branches: ["master"]
branches: [ "master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: ["master"]
branches: [ "master" ]
schedule:
- cron: '18 23 * * 4'
permissions:
contents: read
jobs:
codacy-security-scan:
permissions:
# for actions/checkout to fetch code
contents: read
# for github/codeql-action/upload-sarif to upload SARIF results
security-events: write
# only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
actions: read
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
name: Codacy Security Scan
runs-on: ubuntu-latest
steps:
# Checkout the repository to the GitHub Actions runner
- name: Checkout code
uses: actions/checkout@v4
# Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis
- name: Run Codacy Analysis CLI
uses: codacy/codacy-analysis-cli-action@33d455949345bddfdb845fba76b57b70cc83754b
with:
# Check https://github.com/codacy/codacy-analysis-cli#project-token
# to get your project token from your Codacy repository
# Check https://github.com/codacy/codacy-analysis-cli#project-token to get your project token from your Codacy repository
# You can also omit the token and run the tools that support default configurations
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
verbose: true
@ -53,6 +53,7 @@ jobs:
# Force 0 exit code to allow SARIF file generation
# This will handover control about PR rejection to the GitHub side
max-allowed-issues: 2147483647
# Upload the SARIF file generated in the previous step
- name: Upload SARIF results file
uses: github/codeql-action/upload-sarif@v3

@ -1,4 +1,3 @@
---
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
@ -11,14 +10,16 @@
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: ["master"]
branches: [ "master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: ["master"]
branches: [ "master" ]
schedule:
- cron: '33 12 * * 5'
jobs:
analyze:
name: Analyze
@ -33,17 +34,20 @@ jobs:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: ['python']
language: [ 'python' ]
# CodeQL supports [ 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' ]
# Use only 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use only 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
@ -52,23 +56,27 @@ jobs:
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
# For more details on CodeQL's query packs, refer to:
# https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
uses: github/codeql-action/autobuild@v3
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

@ -1,5 +1,5 @@
---
name: Docs WorkAgent
on:
push:
branches:
@ -11,7 +11,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- uses: actions/setup-python@v5
with:
python-version: 3.x
- run: pip install mkdocs-material
- run: pip install mkdocs-glightbox
- run: pip install "mkdocstrings[python]"

@ -1,20 +1,22 @@
---
# This workflow will triage pull requests and apply a label based on the
# paths that are modified in the pull request.
#
# To use this workflow, you will need to set up a .github/labeler.yml
# file with configuration. For more information, see:
# https://github.com/actions/labeler
name: Labeler
on: [pull_request_target]
jobs:
label:
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- uses: actions/labeler@v5
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
sync-labels: true

@ -1,42 +1,27 @@
---
name: Lint
on: [push, pull_request]
on: [push, pull_request] # yamllint disable-line rule:truthy
jobs:
yaml-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- name: Check out source repository
uses: actions/checkout@v4
- name: yaml Lint
uses: ibiqlik/action-yamllint@v3
with:
file_or_dir: ${{ github.workspace }}
flake8-lint:
runs-on: ubuntu-latest
name: flake8 Lint
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- name: Check out source repository
uses: actions/checkout@v4
- name: Set up Python environment
uses: py-actions/flake8@v2
with:
args: --verbose
ruff-lint:
runs-on: ubuntu-latest
name: ruff Lint
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- uses: chartboost/ruff-action@v1
pylint-lint:
runs-on: ubuntu-latest
name: pylint Lint
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- name: Install dependencies
run: |
pip install pylint
- name: Analysing the code with pylint
run: |
pylint $(git ls-files '*.py')

@ -0,0 +1,15 @@
name: "PR Labeler"
on:
pull_request_target:
types: ["opened", "reopened", "ready_for_review"]
jobs:
triage:
permissions:
contents: read
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v5
if: ${{ github.event.pull_request.draft == false }}

@ -0,0 +1,30 @@
name: Pull Request Checks
on:
pull_request:
branches:
- master
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.x
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install swarms
pip install pytest
- name: Run tests and checks
run: |
pytest
pylint swarms

@ -1,4 +1,3 @@
---
name: readthedocs/actions
on:
pull_request_target:
@ -6,8 +5,10 @@ on:
- opened
paths:
- "docs/**"
permissions:
pull-requests: write
jobs:
pull-request-links:
runs-on: ubuntu-latest

@ -0,0 +1,23 @@
name: Pylint
on: [push]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pylint
- name: Analysing the code with pylint
run: |
pylint $(git ls-files '*.py')

@ -0,0 +1,39 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: Python application
on:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest swarms
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest

@ -0,0 +1,41 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
name: Python package
on:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
jobs:
build:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.7", "3.9", "3.10", "3.11"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade swarms
python -m pip install flake8 pytest swarms
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest

@ -1,16 +1,27 @@
---
name: Upload Python Package
on:
on: # yamllint disable-line rule:truthy
release:
types: [published]
permissions:
contents: read
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.x'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install build
- name: Build package
run: python -m build
- name: Publish package

@ -1,49 +1,27 @@
---
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
#
# You can adjust the behavior by modifying this file.
# For more information, see:
# https://github.com/actions/stale
name: Mark stale issues and pull requests
on:
schedule:
# Scheduled to run at 1.30 UTC everyday
- cron: "30 1 * * *"
- cron: '26 12 * * *'
jobs:
stale:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-issue-stale: 14
days-before-issue-close: 14
stale-issue-label: "status:stale"
close-issue-reason: not_planned
any-of-labels: "status:awaiting user response,status:more data needed"
stale-issue-message: >
Marking this issue as stale since it has been open for 14 days with no activity. This issue
will be closed if no further activity occurs.
close-issue-message: >
This issue was closed because it has been inactive for 28 days. Please post a new issue if
you need further assistance. Thanks!
days-before-pr-stale: 14
days-before-pr-close: 14
stale-pr-label: "status:stale"
stale-pr-message: >
Marking this pull request as stale since it has been open for 14 days with no activity. This
PR will be closed if no further activity occurs.
close-pr-message: >
This pull request was closed because it has been inactive for 28 days. Please open a new
pull request if you need furtherassistance. Thanks!
# Label that can be assigned to issues to exclude them from being marked as stale
exempt-issue-labels: "override-stale"
# Label that can be assigned to PRs to exclude them from being marked as stale
exempt-pr-labels: "override-stale"
stale-issue-message: 'Stale issue message'
stale-pr-message: 'Stale pull request message'
stale-issue-label: 'no-issue-activity'
stale-pr-label: 'no-pr-activity'

@ -0,0 +1,49 @@
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
#
# You can adjust the behavior by modifying this file.
# For more information, see:
# https://github.com/actions/stale
name: Mark stale issues and pull requests
on:
schedule:
# Scheduled to run at 1.30 UTC everyday
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-issue-stale: 14
days-before-issue-close: 14
stale-issue-label: "status:stale"
close-issue-reason: not_planned
any-of-labels: "status:awaiting user response,status:more data needed"
stale-issue-message: >
Marking this issue as stale since it has been open for 14 days with no activity.
This issue will be closed if no further activity occurs.
close-issue-message: >
This issue was closed because it has been inactive for 28 days.
Please post a new issue if you need further assistance. Thanks!
days-before-pr-stale: 14
days-before-pr-close: 14
stale-pr-label: "status:stale"
stale-pr-message: >
Marking this pull request as stale since it has been open for 14 days with no activity.
This PR will be closed if no further activity occurs.
close-pr-message: >
This pull request was closed because it has been inactive for 28 days.
Please open a new pull request if you need further assistance. Thanks!
# Label that can be assigned to issues to exclude them from being marked as stale
exempt-issue-labels: 'override-stale'
# Label that can be assigned to PRs to exclude them from being marked as stale
exempt-pr-labels: "override-stale"

@ -1,16 +1,110 @@
---
name: build
on: [push, pull_request]
name: test
on:
push:
branches: [master]
pull_request:
workflow_dispatch:
env:
POETRY_VERSION: "1.4.2"
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
test_type:
- "core"
- "extended"
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: "snok/install-poetry@v1"
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.4.2"
cache-key: ${{ matrix.test_type }}
install-command:
if [ "${{ matrix.test_type }}" == "core" ]; then
echo "Running core tests, installing dependencies with poetry..."
poetry install
else
echo "Running extended tests, installing dependencies with poetry..."
poetry install -E extended_testing
fi
- name: Run ${{matrix.test_type}} tests
run: |
if [ "${{ matrix.test_type }}" == "core" ]; then
make test
else
make extended_tests
fi
shell: bash
name: Python ${{ matrix.python-version }} ${{ matrix.test_type }}
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.4.2"
cache-key: ${{ matrix.test_type }}
install-command: |
if [ "${{ matrix.test_type }}" == "core" ]; then
echo "Running core tests, installing dependencies with poetry..."
poetry install
else
echo "Running extended tests, installing dependencies with poetry..."
poetry install -E extended_testing
fi
- name: Run ${{matrix.test_type}} tests
run: |
if [ "${{ matrix.test_type }}" == "core" ]; then
make test
else
make extended_tests
fi
shell: bash
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
test_type:
- "core"
- "extended"
name: Python ${{ matrix.python-version }} ${{ matrix.test_type }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- name: Install dependencies
- name: Set up Python ${{ matrix.python-version }}
uses: "./.github/actions/poetry_setup"
with:
python-version: ${{ matrix.python-version }}
poetry-version: "1.4.2"
cache-key: ${{ matrix.test_type }}
install-command: |
if [ "${{ matrix.test_type }}" == "core" ]; then
echo "Running core tests, installing dependencies with poetry..."
poetry install
else
echo "Running extended tests, installing dependencies with poetry..."
poetry install -E extended_testing
fi
- name: Run ${{matrix.test_type}} tests
run: |
pip install pytest
- name: Run Python unit tests
run: pytest
- name: Verify that the Docker image for the action builds
run: docker build . --file Dockerfile
if [ "${{ matrix.test_type }}" == "core" ]; then
make test
else
make extended_tests
fi
shell: bash

@ -1,25 +1,78 @@
---
# Notebook-related checks
name: Presubmit checks
on: [push, pull_request]
on:
# Relevant PRs
pull_request:
paths:
- "swarms/**"
- "tests/**"
# Allow manual runs
workflow_dispatch:
jobs:
# Disabled until google/pytype/issues/151 is resolved
# pytype3_10:
# name: pytype 3.10
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v4
# - uses: ./.github/library/setup
# - name: Install pytype
# run: pip install -q pytype
# - name: Run pytype
# run: pytype ./swarms
test3_11:
name: Test Py3.11
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Run tests
run: |
python --version
pip install .[dev]
python -m pytest
test3_10:
name: Test Py3.10
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Run tests
run: |
python --version
pip install -q .[dev]
python -m pytest
test3_9:
name: Test Py3.9
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- name: Run tests
run: |
python --version
pip install .[dev]
python -m pytest
pytype3_10:
name: pytype 3.10
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Run pytype
run: |
python --version
pip install .[dev]
pip install -q gspread ipython
pytype
format:
name: Check format with black
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./.github/library/setup
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Check format
run: |
python --version

@ -0,0 +1,27 @@
name: Unit Tests
on:
push:
branches:
- master
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.10"
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install pytest
- name: Run unit tests
run: pytest

@ -0,0 +1,36 @@
name: build
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: '3.9'
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install pytest
pip install swarms
- name: Run Python unit tests
run: pytest
- name: Verify that the Docker image for the action builds
run: docker build . --file Dockerfile
- name: Verify integration test results
run: pytest

@ -1,10 +1,11 @@
---
name: Welcome WorkAgent
on:
issues:
types: [opened]
pull_request_target:
types: [opened]
jobs:
build:
name: 👋 Welcome
@ -14,10 +15,5 @@ jobs:
- uses: actions/first-interaction@v1.3.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
issue-message: >
Hello there, thank you for opening an Issue ! 🙏🏻 The team was notified and they will get back
to you asap.
pr-message: >-
Hello there, thank you for opening an PR ! 🙏🏻 The team was notified and they will get back
to you asap.
issue-message: "Hello there, thank you for opening an Issue ! 🙏🏻 The team was notified and they will get back to you asap."
pr-message: "Hello there, thank you for opening an PR ! 🙏🏻 The team was notified and they will get back to you asap."

@ -1,4 +1,3 @@
---
repos:
- repo: https://github.com/ambv/black
rev: 22.3.0
@ -8,7 +7,7 @@ repos:
rev: 'v0.0.255'
hooks:
- id: ruff
args: ['----unsafe-fixes']
args: [----unsafe-fixes]
- repo: https://github.com/nbQA-dev/nbQA
rev: 1.6.3
hooks:

@ -1,11 +1,13 @@
---
version: 2
build:
os: ubuntu-22.04
tools:
python: "3.11"
mkdocs:
configuration: mkdocs.yml
python:
install:
- requirements: requirements.txt

@ -1,6 +0,0 @@
---
formatter:
type: basic
include_document_start: true
max_line_length: 100
pad_line_comments: 2

@ -1,14 +0,0 @@
---
extends: default
rules:
line-length:
max: 127
truthy:
# GitHub Actions
check-keys: false
ignore:
# GH Actions
- lib
- .venv

@ -30,7 +30,6 @@ This guide is intended for developers and contributors to the `swarms` project w
## Prerequisites
Before you begin, ensure you have:
- A GitHub account
- Git installed on your machine
- Basic command-line proficiency
@ -116,13 +115,14 @@ docker run -it --rm swarms-dev
- Introduce Docker Compose and its role in simplifying multi-container setups.
- Create a `docker-compose.yml` file for the `swarms` project.
## Dockerfile
Creating a Dockerfile for deploying the `swarms` framework to the cloud involves setting up the necessary environment to run your Python application, ensuring all dependencies are installed, and configuring the container to execute the desired tasks. Here's an example Dockerfile that sets up such an environment:
```Dockerfile
# Use an official Python runtime as a parent image
FROM python:3.10-slim
FROM python:3.9-slim
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE 1

@ -19,7 +19,7 @@ python -m pip install --upgrade revChatGPT
```
**Supported Python Versions:**
- Minimum: Python3.10
- Minimum: Python3.9
- Recommended: Python3.11+
### Initial Setup and Configuration <a name="initial-setup"></a>

@ -16,7 +16,7 @@ Swarms provides you with all the building blocks you need to build reliable, pro
## 💻 Install
You can install `swarms` with pip in a
[**Python>=3.10**](https://www.python.org/) environment.
[**Python>=3.8**](https://www.python.org/) environment.
!!! example "pip install (recommended)"

@ -1,6 +1,6 @@
from swarms import Agent, OpenAIChat
# Initialize the workflow
## Initialize the workflow
agent = Agent(
llm=OpenAIChat(),
max_loops=1,

@ -1,4 +1,3 @@
---
site_name: Swarms Docs
plugins:
- glightbox
@ -56,10 +55,11 @@ markdown_extensions:
- def_list
- footnotes
nav:
- Home:
- Home:
- Overview: "index.md"
- Contributing: "contributing.md"
- Swarms:
- Limitations of Individual Agents: "limits_of_individual_agents.md"
- Swarms:
- Overview: "swarms/index.md"
- swarms.agents:
- Agents:
@ -148,7 +148,7 @@ nav:
- phoenix_tracer: "swarms/utils/phoenix_tracer.md"
- limit_tokens_from_string: "swarms/utils/limit_tokens_from_string.md"
- math_eval: "swarms/utils/math_eval.md"
- Guides:
- Guides:
- Overview: "examples/index.md"
- Agents:
- Agent: "examples/flow.md"
@ -156,12 +156,12 @@ nav:
- Swarms:
- SequentialWorkflow: "examples/reliable_autonomous_agents.md"
- 2O+ Autonomous Agent Blogs: "examples/ideas.md"
- Applications:
- Applications:
- CustomerSupport:
- Overview: "applications/customer_support.md"
- Marketing:
- Overview: "applications/marketing_agencies.md"
- Corporate:
- Corporate:
- FAQ: "corporate/faq.md"
- Purpose: "corporate/purpose.md"
- Roadmap: "corporate/roadmap.md"

@ -2,12 +2,10 @@ import pandas as pd
from swarms import dataframe_to_text
# # Example usage:
df = pd.DataFrame(
{
"A": [1, 2, 3],
"B": [4, 5, 6],
"C": [7, 8, 9],
}
)
df = pd.DataFrame({
'A': [1, 2, 3],
'B': [4, 5, 6],
'C': [7, 8, 9],
})
print(dataframe_to_text(df))

6085
poetry.lock generated

File diff suppressed because it is too large Load Diff

@ -11,36 +11,29 @@ license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
homepage = "https://github.com/kyegomez/swarms"
documentation = "https://swarms.apac.ai"
readme = "README.md"
readme = "README.md" # Assuming you have a README.md
repository = "https://github.com/kyegomez/swarms"
keywords = [
"artificial intelligence",
"deep learning",
"optimizers",
"Prompt Engineering",
"swarms",
"agents",
]
keywords = ["artificial intelligence", "deep learning", "optimizers", "Prompt Engineering", "swarms", "agents"]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.10"
]
[tool.poetry.dependencies]
python = ">=3.10,<3.12"
python = "^3.6.1"
torch = "2.1.1"
transformers = "4.38.1"
openai = "1.3.0"
transformers = "4.37.1"
openai = "0.28.0"
langchain = "0.0.333"
asyncio = "3.4.3"
einops = "0.7.0"
google-generativeai = "0.3.1"
langchain-experimental = "0.0.10"
tensorflow = "^2.15.0"
tensorflow = "*"
weaviate-client = "3.25.3"
opencv-python-headless = "4.8.1.78"
faiss-cpu = "1.7.4"
@ -62,7 +55,7 @@ cohere = "4.24"
huggingface-hub = "*"
pydantic = "1.10.12"
tenacity = "8.2.2"
Pillow = "10.2.0"
Pillow = "9.4.0"
chromadb = "*"
termcolor = "2.2.0"
black = "23.3.0"
@ -93,9 +86,6 @@ types-chardet = "^5.0.4.6"
mypy-protobuf = "^3.0.0"
[tool.poetry.group.test.dependencies]
pytest = "^8.0.2"
[tool.autopep8]
max_line_length = 70
ignore = "E501,W6" # or ["E501", "W6"]

@ -1,2 +0,0 @@
[pytest]
testpaths = tests

@ -1,10 +0,0 @@
exclude = ["./playground", "./tests", "./scripts"]
line-length = 127
[lint]
ignore = ["E203"]
select = ["E", "F", "W"]
[lint.per-file-ignores]
"./swarms/prompts/**.py" = ["E501", "W291", "W293"]
"./swarms/__init__.py" = ["F401"]

@ -23,12 +23,9 @@ class TextArtifact(BaseArtifact):
Methods:
__add__(self, other: BaseArtifact) -> TextArtifact: Concatenates the text value of the artifact with another artifact.
__bool__(self) -> bool: Checks if the text value of the artifact is non-empty.
generate_embedding(self, driver: BaseEmbeddingModel) -> Optional[list[float]]:
Generates the embedding of the text artifact using a given embedding model.
token_count(self, tokenizer: BaseTokenizer) -> int:
Counts the number of tokens in the text artifact using a given tokenizer.
to_bytes(self) -> bytes:
Converts the text value of the artifact to bytes using the specified encoding and error handler.
generate_embedding(self, driver: BaseEmbeddingModel) -> Optional[list[float]]: Generates the embedding of the text artifact using a given embedding model.
token_count(self, tokenizer: BaseTokenizer) -> int: Counts the number of tokens in the text artifact using a given tokenizer.
to_bytes(self) -> bytes: Converts the text value of the artifact to bytes using the specified encoding and error handler.
"""
value: str

@ -7,8 +7,7 @@ class InternalMemoryBase(ABC):
"""Abstract base class for internal memory of agents in the swarm."""
def __init__(self, n_entries):
"""Initialize the internal memory.
In the current architecture the memory always consists of a set of soltuions or evaluations.
"""Initialize the internal memory. In the current architecture the memory always consists of a set of soltuions or evaluations.
During the operation, the agent should retrivie best solutions from it's internal memory based on the score.
Moreover, the project is designed around LLMs for the proof of concepts, so we treat all entry content as a string.
@ -29,8 +28,7 @@ class InternalMemoryBase(ABC):
class DictInternalMemory(InternalMemoryBase):
def __init__(self, n_entries: int):
"""
Initialize the internal memory.
In the current architecture the memory always consists of a set of solutions or evaluations.
Initialize the internal memory. In the current architecture the memory always consists of a set of solutions or evaluations.
Simple key-value store for now.
Args:

@ -16,15 +16,13 @@ class DictSharedMemory:
Methods:
__init__(self, file_loc: str = None) -> None: Initializes the shared memory.
add_entry(self, score: float, agent_id: str, agent_cycle: int, entry: Any) -> bool:
Adds an entry to the internal memory.
add_entry(self, score: float, agent_id: str, agent_cycle: int, entry: Any) -> bool: Adds an entry to the internal memory.
get_top_n(self, n: int) -> None: Gets the top n entries from the internal memory.
write_to_file(self, data: Dict[str, Dict[str, Any]]) -> bool: Writes the internal memory to a file.
"""
def __init__(self, file_loc: str = None) -> None:
"""Initialize the shared memory.
In the current architecture the memory always consists of a set of soltuions or evaluations.
"""Initialize the shared memory. In the current architecture the memory always consists of a set of soltuions or evaluations.
Moreover, the project is designed around LLMs for the proof of concepts, so we treat all entry content as a string.
"""
if file_loc is not None:

@ -153,8 +153,7 @@ class LangchainChromaVectorMemory:
query (str): The query to search for.
k (int): The number of results to return.
type (str): The type of search to perform: "cos" or "mmr".
distance_threshold (float):
The similarity threshold to use for the search. Results with distance > similarity_threshold will be dropped.
distance_threshold (float): The similarity threshold to use for the search. Results with distance > similarity_threshold will be dropped.
Returns:
list[str]: A list of the top k results.

@ -24,31 +24,13 @@ class PineconeDB(AbstractVectorDatabase):
index (pinecone.Index, optional): The Pinecone index to use. Defaults to None.
Methods:
upsert_vector(
vector: list[float],
vector_id: Optional[str] = None,
namespace: Optional[str] = None,
meta: Optional[dict] = None,
**kwargs
) -> str:
upsert_vector(vector: list[float], vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, **kwargs) -> str:
Upserts a vector into the index.
load_entry(
vector_id: str,
namespace: Optional[str] = None
) -> Optional[BaseVectorStore.Entry]:
load_entry(vector_id: str, namespace: Optional[str] = None) -> Optional[BaseVectorStore.Entry]:
Loads a single vector from the index.
load_entries(
namespace: Optional[str] = None
) -> list[BaseVectorStore.Entry]:
load_entries(namespace: Optional[str] = None) -> list[BaseVectorStore.Entry]:
Loads all vectors from the index.
query(
query: str,
count: Optional[int] = None,
namespace: Optional[str] = None,
include_vectors: bool = False,
include_metadata=True,
**kwargs
) -> list[BaseVectorStore.QueryResult]:
query(query: str, count: Optional[int] = None, namespace: Optional[str] = None, include_vectors: bool = False, include_metadata=True, **kwargs) -> list[BaseVectorStore.QueryResult]:
Queries the index for vectors similar to the given query string.
create_index(name: str, **kwargs) -> None:
Creates a new index.

@ -12,10 +12,8 @@ from swarms.models.clipq import CLIPQ # noqa: E402
# from swarms.models.whisperx_model import WhisperX # noqa: E402
# from swarms.models.kosmos_two import Kosmos # noqa: E402
# from swarms.models.cog_agent import CogAgent # noqa: E402
# Function calling models
from swarms.models.fire_function import (
FireFunctionCaller,
)
## Function calling models
from swarms.models.fire_function import FireFunctionCaller
from swarms.models.fuyu import Fuyu # noqa: E402
from swarms.models.gemini import Gemini # noqa: E402
from swarms.models.gigabind import Gigabind # noqa: E402
@ -52,7 +50,7 @@ from swarms.models.timm import TimmModel # noqa: E402
# ) # noqa: E402
from swarms.models.together import TogetherLLM # noqa: E402
# Types
############## Types
from swarms.models.types import ( # noqa: E402
AudioModality,
ImageModality,

@ -40,30 +40,14 @@ class BaseMultiModalModel:
Examples:
>>> from swarms.models.base_multimodal_model import BaseMultiModalModel
>>> model = BaseMultiModalModel()
>>> link = "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"
>>> model.run("Generate a summary of this text")
>>> model.run("Generate a summary of this text", link)
>>> model.run("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")
>>> model.run_batch(["Generate a summary of this text", "Generate a summary of this text"])
>>> model.run_batch([
("Generate a summary of this text", link),
("Generate a summary of this text", link)
])
>>> model.run_batch_async([
"Generate a summary of this text",
"Generate a summary of this text"
])
>>> model.run_batch_async([
("Generate a summary of this text", link),
("Generate a summary of this text", link)
])
>>> model.run_batch_async_with_retries([
"Generate a summary of this text",
"Generate a summary of this text"
])
>>> model.run_batch_async_with_retries([
("Generate a summary of this text", link),
("Generate a summary of this text", link)
])
>>> model.run_batch([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
>>> model.run_batch_async(["Generate a summary of this text", "Generate a summary of this text"])
>>> model.run_batch_async([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
>>> model.run_batch_async_with_retries(["Generate a summary of this text", "Generate a summary of this text"])
>>> model.run_batch_async_with_retries([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
>>> model.generate_summary("Generate a summary of this text")
>>> model.set_temperature(0.5)
>>> model.set_max_tokens(500)

@ -2,8 +2,7 @@ r"""
BioGPT
Pre-trained language models have attracted increasing attention in the biomedical domain,
inspired by their great success in the general natural language domain.
Among the two main branches of pre-trained language models in the general language domain,
i.e. BERT (and its variants) and GPT (and its variants),
Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants),
the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT.
While they have achieved great success on a variety of discriminative downstream biomedical tasks,
the lack of generation ability constrains their application scope.
@ -25,6 +24,7 @@ advantage of BioGPT on biomedical literature to generate fluent descriptions for
number = {6},
year = {2022},
month = {09},
abstract = "{Pre-trained language models have attracted increasing attention in the biomedical domain, inspired by their great success in the general natural language domain. Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants), the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT. While they have achieved great success on a variety of discriminative downstream biomedical tasks, the lack of generation ability constrains their application scope. In this paper, we propose BioGPT, a domain-specific generative Transformer language model pre-trained on large-scale biomedical literature. We evaluate BioGPT on six biomedical natural language processing tasks and demonstrate that our model outperforms previous models on most tasks. Especially, we get 44.98\%, 38.42\% and 40.76\% F1 score on BC5CDR, KD-DTI and DDI end-to-end relation extraction tasks, respectively, and 78.2\% accuracy on PubMedQA, creating a new record. Our case study on text generation further demonstrates the advantage of BioGPT on biomedical literature to generate fluent descriptions for biomedical terms.}",
issn = {1477-4054},
doi = {10.1093/bib/bbac409},
url = {https://doi.org/10.1093/bib/bbac409},

@ -161,8 +161,7 @@ class CogVLMMultiModal(BaseMultiModalModel):
Methods:
run: Generates a response using the CogVLM model.
generate_stream_cogvlm: Generates a stream of responses using the CogVLM model in inference mode.
process_history_and_images: Processes history messages to extract text, identify the last user query,
and convert base64 encoded image URLs to PIL images.
process_history_and_images: Processes history messages to extract text, identify the last user query, and convert base64 encoded image URLs to PIL images.
Example:
>>> model = CogVLMMultiModal()

@ -37,8 +37,7 @@ class ElevenLabsText2SpeechTool(BaseTool):
Defaults to ElevenLabsModel.MULTI_LINGUAL.
name (str): The name of the tool. Defaults to "eleven_labs_text2speech".
description (str): The description of the tool.
Defaults to "A wrapper around Eleven Labs Text2Speech. Useful for when you need to convert text to speech.
It supports multiple languages, including English, German, Polish, Spanish, Italian, French, Portuguese, and Hindi."
Defaults to "A wrapper around Eleven Labs Text2Speech. Useful for when you need to convert text to speech. It supports multiple languages, including English, German, Polish, Spanish, Italian, French, Portuguese, and Hindi."
Usage:

@ -11,8 +11,7 @@ from swarms.utils.supervision_visualizer import MarkVisualizer
class GPT4VSAM(BaseMultiModalModel):
"""
GPT4VSAM class represents a multi-modal model that combines the capabilities of GPT-4 and SegmentAnythingMarkGenerator.
It takes an instance of BaseMultiModalModel (vlm)
and a device as input and provides methods for loading images and making predictions.
It takes an instance of BaseMultiModalModel (vlm) and a device as input and provides methods for loading images and making predictions.
Args:
vlm (BaseMultiModalModel): An instance of BaseMultiModalModel representing the visual language model.

@ -203,8 +203,7 @@ class GPT4VisionAPI(BaseMultiModalModel):
"""
PROMPT = f"""
These are frames from a video that I want to upload.
Generate a compelling description that I can upload along with the video:
These are frames from a video that I want to upload. Generate a compelling description that I can upload along with the video:
{frames}
"""

@ -63,8 +63,7 @@ class Idefics(BaseMultiModalModel):
response = model.chat(user_input)
print(response)
user_input = "User: And who is that? \
https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
user_input = "User: And who is that? https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
response = model.chat(user_input)
print(response)

@ -26,8 +26,7 @@ class MedicalSAM:
Methods:
__post_init__(): Initializes the MedicalSAM object.
download_model_weights(model_path: str):
Downloads the model weights from the specified URL and saves them to the given file path.
download_model_weights(model_path: str): Downloads the model weights from the specified URL and saves them to the given file path.
preprocess(img): Preprocesses the input image.
run(img, box): Runs the semantic segmentation on the input image within the specified bounding box.

@ -1,7 +1,6 @@
from typing import Any, Dict, List, Optional, Union
from openai import OpenAI
import openai
import requests
from pydantic import BaseModel, validator
from tenacity import (
@ -148,7 +147,6 @@ class OpenAIFunctionCaller:
self.user = user
self.messages = messages if messages is not None else []
self.timeout_sec = timeout_sec
self.client = OpenAI(api_key=self.openai_api_key)
def add_message(self, role: str, content: str):
self.messages.append({"role": role, "content": content})
@ -165,7 +163,7 @@ class OpenAIFunctionCaller:
):
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.openai_api_key,
"Authorization": "Bearer " + openai.api_key,
}
json_data = {"model": self.model, "messages": messages}
if tools is not None:
@ -237,7 +235,7 @@ class OpenAIFunctionCaller:
)
def call(self, task: str, *args, **kwargs) -> Dict:
return self.client.completions.create(
return openai.Completion.create(
engine=self.model,
prompt=task,
max_tokens=self.max_tokens,

@ -37,6 +37,9 @@ from tenacity import (
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
from importlib.metadata import version
from packaging.version import parse
@ -174,11 +177,11 @@ def _create_retry_decorator(
import openai
errors = [
openai.Timeout,
openai.APIError,
openai.APIConnectionError,
openai.RateLimitError,
openai.ServiceUnavailableError,
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors,
@ -349,13 +352,7 @@ class BaseOpenAI(BaseLLM):
try:
import openai
values["client"] = openai.OpenAI(
api_key=values["openai_api_key"],
api_base=values["openai_api_base"] or None,
organization=values["openai_organization"] or None,
# TODO: Reenable this when openai package supports proxy
# proxy=values["openai_proxy"] or None,
)
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
@ -648,11 +645,9 @@ class BaseOpenAI(BaseLLM):
"organization": self.openai_organization,
}
if self.openai_proxy:
pass
import openai
# TODO: The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the
# client, e.g. 'OpenAI(proxy={"http": self.openai_proxy, "https": self.openai_proxy})'
# openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
@ -938,36 +933,44 @@ class OpenAIChat(BaseLLM):
@root_validator()
def validate_environment(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
# openai_api_key = get_from_dict_or_env(
# values, "openai_api_key", "OPENAI_API_KEY"
# )
# openai_api_base = get_from_dict_or_env(
# values,
# "openai_api_base",
# "OPENAI_API_BASE",
# default="",
# )
# openai_proxy = get_from_dict_or_env(
# values,
# "openai_proxy",
# "OPENAI_PROXY",
# default="",
# )
# openai_organization = get_from_dict_or_env(
# values,
# "openai_organization",
# "OPENAI_ORGANIZATION",
# default="",
# )
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
openai_proxy = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_api_base:
openai.api_base = openai_api_base
if openai_organization:
openai.organization = openai_organization
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.OpenAI
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is"

@ -27,8 +27,7 @@ class SAM:
processor (SamProcessor): The processor for the SAM model.
Methods:
run(task=None, img=None, *args, **kwargs):
Runs the SAM model on the given image and returns the segmentation scores and masks.
run(task=None, img=None, *args, **kwargs): Runs the SAM model on the given image and returns the segmentation scores and masks.
process_img(img: str = None, *args, **kwargs): Processes the input image and returns the processed image.
"""

@ -2,29 +2,18 @@
SpeechT5 (TTS task)
SpeechT5 model fine-tuned for speech synthesis (text-to-speech) on LibriTTS.
This model was introduced in SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing by
Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu,
Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
This model was introduced in SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
SpeechT5 was first released in this repository, original weights. The license used is MIT.
Model Description
Motivated by the success of T5 (Text-To-Text Transfer Transformer) in pre-trained natural language processing models,
we propose a unified-modal SpeechT5 framework that explores the encoder-decoder pre-training for self-supervised speech/text
representation learning. The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific
(speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network
models the sequence-to-sequence transformation,and then the post-nets generate the output in the speech/text modality based on
the output of the decoder.
Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation,
hoping to improve the modeling capability for both speech and text. To align the textual and speech information into
this unified semantic space, we propose a cross-modal vector quantization approach that randomly mixes up speech/text
states with latent units as the interface between encoder and decoder.
Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing
tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement,
and speaker identification.
Motivated by the success of T5 (Text-To-Text Transfer Transformer) in pre-trained natural language processing models, we propose a unified-modal SpeechT5 framework that explores the encoder-decoder pre-training for self-supervised speech/text representation learning. The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific (speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network models the sequence-to-sequence transformation, and then the post-nets generate the output in the speech/text modality based on the output of the decoder.
Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation, hoping to improve the modeling capability for both speech and text. To align the textual and speech information into this unified semantic space, we propose a cross-modal vector quantization approach that randomly mixes up speech/text states with latent units as the interface between encoder and decoder.
Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement, and speaker identification.
Developed by: Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
Shared by [optional]: Matthijs Hollemans
Model type: text-to-speech
Language(s) (NLP): [More Information Needed]

@ -49,7 +49,7 @@ class SSD1B:
max_time_seconds: int = 60
save_folder: str = "images"
image_format: str = "png"
device: str = "cuda" if torch.cuda.is_available() else "cpu"
device: str = "cuda"
dashboard: bool = False
cache = TTLCache(maxsize=100, ttl=3600)
pipe = StableDiffusionXLPipeline.from_pretrained(

@ -34,7 +34,7 @@ commands: {
"""
# FEW SHOT EXAMPLES #
########### FEW SHOT EXAMPLES ################
SCENARIOS = """
commands: {
"tools": {

@ -62,6 +62,6 @@ def worker_tools_sop_promp(name: str, memory: str, time=time):
[{memory}]
Human: Determine which next command to use, and respond using the format specified above:
""".format(name=name, time=time, memory=memory) # noqa: F521
""".format(name=name, time=time, memory=memory)
return str(out)

@ -98,8 +98,7 @@ class AsyncWorkflow:
# if self.dashboard:
# self.display()
# Add a stopping condition to stop the workflow,
# if provided but stopping_condition takes in a parameter s for string
# Add a stopping condition to stop the workflow, if provided but stopping_condition takes in a parameter s for string
if self.stopping_condition:
if self.stopping_condition(self.results):
break

@ -86,6 +86,10 @@ class AbstractSwarm(ABC):
def step(self):
"""Step the swarm"""
# @abstractmethod
def add_agent(self, agent: "Agent"):
"""Add a agent to the swarm"""
# @abstractmethod
def remove_agent(self, agent: "Agent"):
"""Remove a agent from the swarm"""

@ -195,16 +195,7 @@ class BaseWorkflow(BaseStructure):
>>> workflow.add("Create a report on these metrics", llm)
>>> workflow.delete_task("What's the weather in miami")
>>> workflow.tasks
[
Task(
description='Create a report on these metrics',
agent=Agent(llm=OpenAIChat(openai_api_key=''), max_loops=1, dashboard=False),
args=[],
kwargs={},
result=None,
history=[]
)
]
[Task(description='Create a report on these metrics', agent=Agent(llm=OpenAIChat(openai_api_key=''), max_loops=1, dashboard=False), args=[], kwargs={}, result=None, history=[])]
"""
try:
for task in self.tasks:

@ -15,8 +15,7 @@ class ConcurrentWorkflow(BaseStructure):
Args:
max_workers (int): The maximum number of workers to use for the ThreadPoolExecutor.
autosave (bool): Whether to save the state of the workflow to a file. Default is False.
saved_state_filepath (str):
The filepath to save the state of the workflow to. Default is "runs/concurrent_workflow.json".
saved_state_filepath (str): The filepath to save the state of the workflow to. Default is "runs/concurrent_workflow.json".
print_results (bool): Whether to print the results of each task. Default is False.
return_results (bool): Whether to return the results of each task. Default is False.
use_processes (bool): Whether to use processes instead of threads. Default is False.

@ -19,8 +19,7 @@ class DebatePlayer(Agent):
Args:
model_name(str): model name
name (str): name of this player
temperature (float):
higher values make the output more random, while lower values make it more focused and deterministic
temperature (float): higher values make the output more random, while lower values make it more focused and deterministic
openai_api_key (str): As the parameter name suggests
sleep_time (float): sleep because of rate limits
"""
@ -32,8 +31,7 @@ class Debate:
Args:
model_name (str): openai model name
temperature (float):
higher values make the output more random, while lower values make it more focused and deterministic
temperature (float): higher values make the output more random, while lower values make it more focused and deterministic
num_players (int): num of players
save_file_dir (str): dir path to json file
openai_api_key (str): As the parameter name suggests
@ -361,13 +359,6 @@ class Debate:
# with open(prompts_path, 'w') as file:
# json.dump(config, file, ensure_ascii=False, indent=4)
# debate = Debate(
# save_file_dir=save_file_dir,
# num_players=3,
# openai_api_key=openai_api_key,
# prompts_path=prompts_path,
# temperature=0,
# sleep_time=0
# )
# debate = Debate(save_file_dir=save_file_dir, num_players=3, openai_api_key=openai_api_key, prompts_path=prompts_path, temperature=0, sleep_time=0)
# debate.run()
# debate.save_file_to_json(id)

@ -17,8 +17,7 @@ class GraphWorkflow(BaseStructure):
connect(from_node, to_node): Connects two nodes in the graph.
set_entry_point(node_name): Sets the entry point node for the workflow.
add_edge(from_node, to_node): Adds an edge between two nodes in the graph.
add_conditional_edges(from_node, condition, edge_dict):
Adds conditional edges from a node to multiple nodes based on a condition.
add_conditional_edges(from_node, condition, edge_dict): Adds conditional edges from a node to multiple nodes based on a condition.
run(): Runs the workflow and returns the graph.
Examples:

@ -129,8 +129,7 @@ class MajorityVoting:
multithreaded (bool, optional): Whether to run the agents using multithreading. Defaults to False.
multiprocess (bool, optional): Whether to run the agents using multiprocessing. Defaults to False.
asynchronous (bool, optional): Whether to run the agents asynchronously. Defaults to False.
output_parser (callable, optional): A callable function to parse the output
of the majority voting system. Defaults to None.
output_parser (callable, optional): A callable function to parse the output of the majority voting system. Defaults to None.
Examples:
>>> from swarms.structs.agent import Agent

@ -3,7 +3,7 @@ from time import time_ns
from typing import Callable, List, Optional, Sequence, Union
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import AbstractSwarm
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.loguru_logger import logger
@ -43,7 +43,7 @@ def msg_hash(
)
class MessagePool(AbstractSwarm):
class MessagePool(BaseSwarm):
"""
A class representing a message pool for agents in a swarm.
@ -68,17 +68,11 @@ class MessagePool(AbstractSwarm):
>>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
>>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
>>> message_pool.get_all_messages()
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent1, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent2, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent2, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True},
{'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
"""
def __init__(

@ -1,7 +1,7 @@
import importlib.util
import sys
from importlib.metadata import version as pkg_version
import pkg_resources
import requests
from packaging import version
@ -35,7 +35,7 @@ def check_for_update():
latest_version = response.json()["info"]["version"]
# Get the current version using pkg_resources
current_version = pkg_version("swarms")
current_version = pkg_resources.get_distribution("swarms").version
return version.parse(latest_version) > version.parse(
current_version

@ -1,8 +1,8 @@
import platform
import subprocess
import pkg_resources
import psutil
import importlib.metadata as metadata
import toml
@ -31,7 +31,9 @@ def get_swarms_verison():
)
except Exception as e:
swarms_verison_cmd = str(e)
swarms_verison_pkg = metadata.version("swarms")
swarms_verison_pkg = pkg_resources.get_distribution(
"swarms"
).version
swarms_verison = swarms_verison_cmd, swarms_verison_pkg
return swarms_verison
@ -65,7 +67,7 @@ def get_package_mismatches(file_path="pyproject.toml"):
dependencies.update(dev_dependencies)
installed_packages = {
pkg.key: pkg.version for pkg in metadata.distributions()
pkg.key: pkg.version for pkg in pkg_resources.working_set
}
mismatches = []

@ -12,8 +12,7 @@ def scrape_tool_func_docs(fn: Callable) -> str:
fn (Callable): The function to scrape.
Returns:
str: A string containing the function's name, documentation string, and a list of its parameters.
Each parameter is represented as a line containing the parameter's name, default value, and annotation.
str: A string containing the function's name, documentation string, and a list of its parameters. Each parameter is represented as a line containing the parameter's name, default value, and annotation.
"""
try:
# If the function is a tool, get the original function

@ -108,7 +108,7 @@ class Action:
def to_json(self):
try:
tool_output = json.loads(self.tool_output)
except json.JSONDecodeError:
except:
tool_output = self.tool_output
return {
"thought": self.thought,

@ -18,8 +18,7 @@ def load_model_torch(
model_path (str): Path to the saved model file.
device (torch.device): Device to move the model to.
model (nn.Module): The model architecture, if the model file only contains the state dictionary.
strict (bool): Whether to strictly enforce that the keys in the state dictionary match the keys returned by the model's
`state_dict()` function.
strict (bool): Whether to strictly enforce that the keys in the state dictionary match the keys returned by the model's `state_dict()` function.
map_location (callable): A function to remap the storage locations of the loaded model.
*args: Additional arguments to pass to `torch.load`.
**kwargs: Additional keyword arguments to pass to `torch.load`.

@ -1,7 +1,7 @@
# TESTING
# -==================
# Use an official Python runtime as a parent image
FROM python:3.10-slim
FROM python:3.9-slim
# Set environment variables to make Python output unbuffered and disable the PIP cache
ENV PYTHONDONTWRITEBYTECODE 1

@ -6,9 +6,7 @@ from swarms.structs.agent import Agent
from swarms.structs.groupchat import GroupChat, GroupChatManager
llm = OpenAIChat()
# llm2 = Anthropic()
# TODO: Mock anthropic class
llm2 = OpenAIChat()
llm2 = Anthropic()
# Mock the OpenAI class for testing

@ -0,0 +1,73 @@
# JSON
# Contents of test_json.py, which must be placed in the `tests/` directory.
import json
import pytest
from swarms.tokenizers import JSON
# Fixture for reusable JSON schema file paths
@pytest.fixture
def valid_schema_path(tmp_path):
d = tmp_path / "sub"
d.mkdir()
p = d / "schema.json"
p.write_text(
'{"type": "object", "properties": {"name": {"type":'
' "string"}}}'
)
return str(p)
@pytest.fixture
def invalid_schema_path(tmp_path):
d = tmp_path / "sub"
d.mkdir()
p = d / "invalid_schema.json"
p.write_text("this is not a valid JSON")
return str(p)
# This test class must be subclassed as JSON class is abstract
class TestableJSON(JSON):
def validate(self, data):
# Here must be a real validation implementation for testing
pass
# Basic tests
def test_initialize_json(valid_schema_path):
json_obj = TestableJSON(valid_schema_path)
assert json_obj.schema_path == valid_schema_path
assert "name" in json_obj.schema["properties"]
def test_load_schema_failure(invalid_schema_path):
with pytest.raises(json.JSONDecodeError):
TestableJSON(invalid_schema_path)
# Mocking tests
def test_validate_calls_method(monkeypatch):
# Mock the validate method to check that it is being called
pass
# Exception tests
def test_initialize_with_nonexistent_schema():
with pytest.raises(FileNotFoundError):
TestableJSON("nonexistent_path.json")
# Tests on different Python versions if applicable
# ...
# Grouping tests marked as slow if they perform I/O operations
@pytest.mark.slow
def test_loading_large_schema():
# Test with a large json file
pass

@ -15,7 +15,7 @@ custom_config = {
"verbose": True,
}
huggingface_llm = HuggingfaceLLM(
model_id="liuhaotian/llava-v1.6-mistral-7b",
model_id="NousResearch/Nous-Hermes-2-Vision-Alpha",
**custom_config,
)
mixtral = Mixtral(load_in_4bit=True, use_flash_attention_2=True)

Loading…
Cancel
Save