@ -0,0 +1,297 @@
|
|||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
.env
|
||||||
|
__pycache__
|
||||||
|
*.pyc
|
||||||
|
*.pyo
|
||||||
|
*.pyd
|
||||||
|
.Python
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
.tox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.log
|
||||||
|
.pytest_cache/
|
||||||
|
.mypy_cache/
|
||||||
|
|
||||||
|
__pycache__/
|
||||||
|
.venv/
|
||||||
|
|
||||||
|
.env
|
||||||
|
|
||||||
|
image/
|
||||||
|
audio/
|
||||||
|
video/
|
||||||
|
artifacts_three
|
||||||
|
dataframe/
|
||||||
|
.ruff_cache
|
||||||
|
.pytest_cache
|
||||||
|
static/generated
|
||||||
|
runs
|
||||||
|
Financial-Analysis-Agent_state.json
|
||||||
|
experimental
|
||||||
|
artifacts_five
|
||||||
|
encryption
|
||||||
|
errors
|
||||||
|
chroma
|
||||||
|
agent_workspace
|
||||||
|
.pt
|
||||||
|
Accounting Assistant_state.json
|
||||||
|
Unit Testing Agent_state.json
|
||||||
|
sec_agent
|
||||||
|
Devin_state.json
|
||||||
|
poetry.lock
|
||||||
|
hire_researchers
|
||||||
|
agent_workspace
|
||||||
|
json_logs
|
||||||
|
Medical Image Diagnostic Agent_state.json
|
||||||
|
flight agent_state.json
|
||||||
|
D_state.json
|
||||||
|
artifacts_six
|
||||||
|
artifacts_seven
|
||||||
|
swarms/__pycache__
|
||||||
|
artifacts_once
|
||||||
|
transcript_generator.json
|
||||||
|
venv
|
||||||
|
.DS_Store
|
||||||
|
Cargo.lock
|
||||||
|
.DS_STORE
|
||||||
|
artifacts_logs
|
||||||
|
Cargo.lock
|
||||||
|
Medical Treatment Recommendation Agent_state.json
|
||||||
|
swarms/agents/.DS_Store
|
||||||
|
artifacts_two
|
||||||
|
logs
|
||||||
|
T_state.json
|
||||||
|
_build
|
||||||
|
conversation.txt
|
||||||
|
t1_state.json
|
||||||
|
stderr_log.txt
|
||||||
|
t2_state.json
|
||||||
|
.vscode
|
||||||
|
.DS_STORE
|
||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
Transcript Generator_state.json
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
.grit
|
||||||
|
swarm-worker-01_state.json
|
||||||
|
error.txt
|
||||||
|
Devin Worker 2_state.json
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
.ruff_cache
|
||||||
|
|
||||||
|
|
||||||
|
errors.txt
|
||||||
|
|
||||||
|
Autonomous-Agent-XYZ1B_state.json
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
cover/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
.pybuilder/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
.DS_Store
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
# For a library or package, you might want to ignore these files since the code is
|
||||||
|
# intended to run in multiple environments; otherwise, check them in:
|
||||||
|
# .python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||||
|
# commonly ignored for libraries.
|
||||||
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||||
|
#poetry.lock
|
||||||
|
|
||||||
|
# pdm
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||||
|
#pdm.lock
|
||||||
|
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||||
|
# in version control.
|
||||||
|
# https://pdm.fming.dev/#use-with-ide
|
||||||
|
.pdm.toml
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||||
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||||
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
|
#.idea/
|
||||||
|
.vscode/settings.json
|
||||||
|
# -*- mode: gitignore; -*-
|
||||||
|
*~
|
||||||
|
\#*\#
|
||||||
|
/.emacs.desktop
|
||||||
|
/.emacs.desktop.lock
|
||||||
|
*.elc
|
||||||
|
auto-save-list
|
||||||
|
tramp
|
||||||
|
.\#*
|
||||||
|
|
||||||
|
# Org-mode
|
||||||
|
.org-id-locations
|
||||||
|
*_archive
|
||||||
|
|
||||||
|
# flymake-mode
|
||||||
|
*_flymake.*
|
||||||
|
|
||||||
|
# eshell files
|
||||||
|
/eshell/history
|
||||||
|
/eshell/lastdir
|
||||||
|
|
||||||
|
# elpa packages
|
||||||
|
/elpa/
|
||||||
|
|
||||||
|
# reftex files
|
||||||
|
*.rel
|
||||||
|
|
||||||
|
# AUCTeX auto folder
|
||||||
|
/auto/
|
||||||
|
|
||||||
|
# cask packages
|
||||||
|
.cask/
|
||||||
|
dist/
|
||||||
|
|
||||||
|
# Flycheck
|
||||||
|
flycheck_*.el
|
||||||
|
|
||||||
|
# server auth directory
|
||||||
|
/server/
|
||||||
|
|
||||||
|
# projectiles files
|
||||||
|
.projectile
|
||||||
|
|
||||||
|
# directory configuration
|
||||||
|
.dir-locals.el
|
||||||
|
|
||||||
|
# network security
|
||||||
|
/network-security.data
|
||||||
|
|
@ -0,0 +1,23 @@
|
|||||||
|
WORKSPACE_DIR="agent_workspace"
|
||||||
|
SWARMS_API_KEY=""
|
||||||
|
USE_TELEMETRY=True
|
||||||
|
OPENAI_API_KEY="sk-"
|
||||||
|
GOOGLE_API_KEY=""
|
||||||
|
AI21_API_KEY="your_api_key_here"
|
||||||
|
COHERE_API_KEY="your_api_key_here"
|
||||||
|
ALEPHALPHA_API_KEY="your_api_key_here"
|
||||||
|
HUGGINFACEHUB_API_KEY="your_api_key_here"
|
||||||
|
EVAL_PORT=8000
|
||||||
|
MODEL_NAME="gpt-4"
|
||||||
|
USE_GPU=True
|
||||||
|
PLAYGROUND_DIR="examples"
|
||||||
|
LOG_LEVEL="INFO"
|
||||||
|
BOT_NAME="Orca"
|
||||||
|
HF_API_KEY="your_huggingface_api_key_here"
|
||||||
|
AGENTOPS_API_KEY=""
|
||||||
|
ANTHROPIC_API_KEY="your_anthropic_api_key"
|
||||||
|
AZURE_OPENAI_ENDPOINT="your_azure_openai_endpoint"
|
||||||
|
AZURE_OPENAI_DEPLOYMENT="your_azure_openai_deployment"
|
||||||
|
OPENAI_API_VERSION="your_openai_api_version"
|
||||||
|
AZURE_OPENAI_API_KEY="your_azure_openai_api_key"
|
||||||
|
AZURE_OPENAI_AD_TOKEN="your_azure_openai_ad_token"
|
@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
# These are supported funding model platforms
|
||||||
|
github: [kyegomez]
|
||||||
|
# patreon: # Replace with a single Patreon username
|
||||||
|
# open_collective: # Replace with a single Open Collective username
|
||||||
|
# ko_fi: # Replace with a single Ko-fi username
|
||||||
|
# tidelift: # Replace with a single Tidelift platform-name/package-name
|
||||||
|
# community_bridge: # Replace with a single Community Bridge project-name
|
||||||
|
# liberapay: # Replace with a single Liberapay username
|
||||||
|
# issuehunt: # Replace with a single IssueHunt username
|
||||||
|
# otechie: # Replace with a single Otechie username
|
||||||
|
# lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name
|
||||||
|
# custom: #Nothing
|
@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: "[BUG] "
|
||||||
|
labels: bug
|
||||||
|
assignees: kyegomez
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Describe the bug**
|
||||||
|
A clear and concise description of what the bug is.
|
||||||
|
|
||||||
|
**To Reproduce**
|
||||||
|
Steps to reproduce the behavior:
|
||||||
|
1. Go to '...'
|
||||||
|
2. Click on '....'
|
||||||
|
3. Scroll down to '....'
|
||||||
|
4. See error
|
||||||
|
|
||||||
|
**Expected behavior**
|
||||||
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
|
**Screenshots**
|
||||||
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context about the problem here.
|
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: 'kyegomez'
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
A clear and concise description of what you want to happen.
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context or screenshots about the feature request here.
|
@ -0,0 +1,29 @@
|
|||||||
|
Thank you for contributing to Swarms!
|
||||||
|
|
||||||
|
Replace this comment with:
|
||||||
|
- Description: a description of the change,
|
||||||
|
- Issue: the issue # it fixes (if applicable),
|
||||||
|
- Dependencies: any dependencies required for this change,
|
||||||
|
- Tag maintainer: for a quicker response, tag the relevant maintainer (see below),
|
||||||
|
- Twitter handle: we announce bigger features on Twitter. If your PR gets announced and you'd like a mention, we'll gladly shout you out!
|
||||||
|
|
||||||
|
Please make sure your PR is passing linting and testing before submitting. Run `make format`, `make lint` and `make test` to check this locally.
|
||||||
|
|
||||||
|
See contribution guidelines for more information on how to write/run tests, lint, etc:
|
||||||
|
https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md
|
||||||
|
|
||||||
|
If you're adding a new integration, please include:
|
||||||
|
1. a test for the integration, preferably unit tests that do not rely on network access,
|
||||||
|
2. an example notebook showing its use.
|
||||||
|
|
||||||
|
|
||||||
|
Maintainer responsibilities:
|
||||||
|
- General / Misc / if you don't know who to tag: kye@apac.ai
|
||||||
|
- DataLoaders / VectorStores / Retrievers: kye@apac.ai
|
||||||
|
- swarms.models: kye@apac.ai
|
||||||
|
- swarms.memory: kye@apac.ai
|
||||||
|
- swarms.structures: kye@apac.ai
|
||||||
|
|
||||||
|
If no one reviews your PR within a few days, feel free to email Kye at kye@apac.ai
|
||||||
|
|
||||||
|
See contribution guidelines for more information on how to write/run tests, lint, etc: https://github.com/kyegomez/swarms
|
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
name: "Init Environment"
|
||||||
|
description: "Initialize environment for tests"
|
||||||
|
runs:
|
||||||
|
using: "composite"
|
||||||
|
steps:
|
||||||
|
- name: Checkout actions
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
- name: Install and configure Poetry
|
||||||
|
uses: snok/install-poetry@v1
|
||||||
|
with:
|
||||||
|
virtualenvs-create: true
|
||||||
|
virtualenvs-in-project: true
|
||||||
|
installer-parallel: true
|
||||||
|
- name: Load cached venv
|
||||||
|
id: cached-poetry-dependencies
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: .venv
|
||||||
|
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{hashFiles('**/poetry.lock') }}
|
||||||
|
- name: Install dependencies
|
||||||
|
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
|
||||||
|
run: poetry install --no-interaction --no-root --with test --with dev --all-extras
|
||||||
|
shell: bash
|
||||||
|
- name: Activate venv
|
||||||
|
run: |
|
||||||
|
source .venv/bin/activate
|
||||||
|
echo PATH=$PATH >> $GITHUB_ENV
|
||||||
|
shell: bash
|
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
# https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates
|
||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
- package-ecosystem: "pip"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
documentation:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: ["docs/**", "*.md"]
|
||||||
|
tests:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: "tests/**"
|
||||||
|
agents:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: "swarms/agents/**"
|
||||||
|
artifacts:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: "swarms/artifacts/**"
|
||||||
|
memory:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: "swarms/memory/**"
|
||||||
|
models:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: "swarms/models/**"
|
||||||
|
prompts:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: "swarms/prompts/**"
|
||||||
|
structs:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: "swarms/structs/**"
|
||||||
|
telemetry:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: "swarms/telemetry/**"
|
||||||
|
tools:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: "swarms/tools/**"
|
||||||
|
utils:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: "swarms/utils/**"
|
@ -0,0 +1,47 @@
|
|||||||
|
---
|
||||||
|
name: release
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
- closed
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
paths:
|
||||||
|
- "pyproject.toml"
|
||||||
|
env:
|
||||||
|
POETRY_VERSION: "1.4.2"
|
||||||
|
jobs:
|
||||||
|
if_release:
|
||||||
|
if: |
|
||||||
|
${{ github.event.pull_request.merged == true }}
|
||||||
|
&& ${{ contains(github.event.pull_request.labels.*.name, 'release') }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install poetry
|
||||||
|
run: pipx install poetry==$POETRY_VERSION
|
||||||
|
- name: Set up Python 3.9
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.9"
|
||||||
|
cache: "poetry"
|
||||||
|
- name: Build project for distribution
|
||||||
|
run: poetry build
|
||||||
|
- name: Check Version
|
||||||
|
id: check-version
|
||||||
|
run: |
|
||||||
|
echo version=$(poetry version --short) >> $GITHUB_OUTPUT
|
||||||
|
- name: Create Release
|
||||||
|
uses: ncipollo/release-action@v1
|
||||||
|
with:
|
||||||
|
artifacts: "dist/*"
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
draft: false
|
||||||
|
generateReleaseNotes: true
|
||||||
|
tag: v${{ steps.check-version.outputs.version }}
|
||||||
|
commit: master
|
||||||
|
- name: Publish to PyPI
|
||||||
|
env:
|
||||||
|
POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
|
run: |-
|
||||||
|
poetry publish
|
@ -0,0 +1,25 @@
|
|||||||
|
name: autofix.ci
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches: ["main"]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
autofix:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
- run: go install github.com/google/yamlfmt/cmd/yamlfmt@latest
|
||||||
|
- run: yamlfmt .
|
||||||
|
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
- run: pip install ruff
|
||||||
|
- run: ruff format .
|
||||||
|
- run: ruff check --fix .
|
||||||
|
|
||||||
|
- uses: autofix-ci/action@ff86a557419858bb967097bfc916833f5647fa8c
|
@ -0,0 +1,43 @@
|
|||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
#
|
||||||
|
# This workflow file requires a free account on Bearer.com to manage findings, notifications and more.
|
||||||
|
# See https://docs.bearer.com/guides/bearer-cloud/
|
||||||
|
name: Bearer
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["master" ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: ["master"]
|
||||||
|
schedule:
|
||||||
|
- cron: '24 22 * * 6'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read # for actions/checkout to fetch code
|
||||||
|
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||||
|
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
bearer:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
# Checkout project source
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
# Scan code using Bearer CLI
|
||||||
|
- name: Run Report
|
||||||
|
id: report
|
||||||
|
uses: bearer/bearer-action@828eeb928ce2f4a7ca5ed57fb8b59508cb8c79bc
|
||||||
|
with:
|
||||||
|
api-key: ${{ secrets.BEARER_TOKEN }}
|
||||||
|
format: sarif
|
||||||
|
output: results.sarif
|
||||||
|
exit-code: 0
|
||||||
|
# Upload SARIF file generated in previous step
|
||||||
|
- name: Upload SARIF file
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: results.sarif
|
@ -0,0 +1,45 @@
|
|||||||
|
---
|
||||||
|
name: Codacy
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["master"]
|
||||||
|
pull_request:
|
||||||
|
branches: ["master"]
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * "
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
codacy-security-scan:
|
||||||
|
permissions:
|
||||||
|
contents: read # for actions/checkout to fetch code
|
||||||
|
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||||
|
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||||
|
name: Codacy Security Scan
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
# Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis
|
||||||
|
- name: Run Codacy Analysis CLI
|
||||||
|
uses: codacy/codacy-analysis-cli-action@97bf5df3c09e75f5bcd72695998f96ebd701846e
|
||||||
|
with:
|
||||||
|
# Check https://github.com/codacy/codacy-analysis-cli#project-token to
|
||||||
|
# get your project token from your Codacy repository
|
||||||
|
# You can also omit the token and run the tools that support default configurations
|
||||||
|
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
|
||||||
|
verbose: true
|
||||||
|
output: results.sarif
|
||||||
|
format: sarif
|
||||||
|
# Adjust severity of non-security issues
|
||||||
|
gh-code-scanning-compat: true
|
||||||
|
# Force 0 exit code to allow SARIF file generation
|
||||||
|
# This will handover control about PR rejection to the GitHub side
|
||||||
|
max-allowed-issues: 2147483647
|
||||||
|
# Upload the SARIF file generated in the previous step
|
||||||
|
- name: Upload SARIF results file
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: results.sarif
|
@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
name: "CodeQL"
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["master"]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: ["master"]
|
||||||
|
schedule:
|
||||||
|
- cron: "33 12 * * 5"
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||||
|
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||||
|
# - https://gh.io/supported-runners-and-hardware-resources
|
||||||
|
# - https://gh.io/using-larger-runners
|
||||||
|
# Consider using larger runners for possible analysis time improvements.
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
timeout-minutes: 360
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: ["python"]
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v3
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v3
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v3
|
||||||
|
with:
|
||||||
|
category: "/language:${{matrix.language}}"
|
@ -0,0 +1,39 @@
|
|||||||
|
# Dependency Review Action
|
||||||
|
#
|
||||||
|
# This Action will scan dependency manifest files that change as part of a Pull Request,
|
||||||
|
# surfacing known-vulnerable versions of the packages declared or updated in the PR.
|
||||||
|
# Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable
|
||||||
|
# packages will be blocked from merging.
|
||||||
|
#
|
||||||
|
# Source repository: https://github.com/actions/dependency-review-action
|
||||||
|
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
|
||||||
|
name: 'Dependency review'
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches: [ "master" ]
|
||||||
|
|
||||||
|
# If using a dependency submission action in this workflow this permission will need to be set to:
|
||||||
|
#
|
||||||
|
# permissions:
|
||||||
|
# contents: write
|
||||||
|
#
|
||||||
|
# https://docs.github.com/en/enterprise-cloud@latest/code-security/supply-chain-security/understanding-your-software-supply-chain/using-the-dependency-submission-api
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
# Write permissions for pull-requests are required for using the `comment-summary-in-pr` option, comment out if you aren't using this option
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dependency-review:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: 'Checkout repository'
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: 'Dependency Review'
|
||||||
|
uses: actions/dependency-review-action@v4
|
||||||
|
# Commonly enabled options, see https://github.com/actions/dependency-review-action#configuration-options for all available options.
|
||||||
|
with:
|
||||||
|
comment-summary-in-pr: always
|
||||||
|
# fail-on-severity: moderate
|
||||||
|
# deny-licenses: GPL-1.0-or-later, LGPL-2.0-or-later
|
||||||
|
# retry-on-snapshot-warnings: true
|
@ -0,0 +1,18 @@
|
|||||||
|
name: Docker Image CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ "master" ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ "master" ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Build the Docker image
|
||||||
|
run: docker build . --file Dockerfile --tag my-image-name:$(date +%s)
|
@ -0,0 +1,16 @@
|
|||||||
|
name: Documentation Links
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types:
|
||||||
|
- opened
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
documentation-links:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: readthedocs/actions/preview@v1
|
||||||
|
with:
|
||||||
|
project-slug: "swarms"
|
@ -0,0 +1,24 @@
|
|||||||
|
name: Documentation
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- main
|
||||||
|
- develop
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: 3.11
|
||||||
|
- run: pip install mkdocs-material
|
||||||
|
- run: pip install mkdocs-glightbox
|
||||||
|
- run: pip install "mkdocstrings[python]"
|
||||||
|
- run: pip3 install mkdocs-git-authors-plugin
|
||||||
|
- run: pip install mkdocs-jupyter==0.16.0
|
||||||
|
- run: pip install --upgrade lxml_html_clean
|
||||||
|
- run: pip install mkdocs-git-committers-plugin
|
||||||
|
- run: pip3 install mkdocs-git-revision-date-localized-plugin
|
||||||
|
- run: mkdocs gh-deploy --force -f docs/mkdocs.yml
|
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
# This workflow will triage pull requests and apply a label based on the
|
||||||
|
# paths that are modified in the pull request.
|
||||||
|
#
|
||||||
|
# To use this workflow, you will need to set up a .github/labeler.yml
|
||||||
|
# file with configuration. For more information, see:
|
||||||
|
# https://github.com/actions/labeler
|
||||||
|
|
||||||
|
name: Labeler
|
||||||
|
on: [pull_request_target]
|
||||||
|
jobs:
|
||||||
|
label:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/labeler@v5
|
||||||
|
with:
|
||||||
|
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
name: Lint
|
||||||
|
on: [push, pull_request] # yamllint disable-line rule:truthy
|
||||||
|
jobs:
|
||||||
|
yaml-lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
- run: pip install yamllint
|
||||||
|
- run: yamllint .
|
||||||
|
flake8-lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
- run: pip install flake8
|
||||||
|
- run: flake8 .
|
||||||
|
ruff-lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
- run: pip install ruff
|
||||||
|
- run: ruff format .
|
||||||
|
- run: ruff check --fix .
|
||||||
|
pylint-lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-python@v5
|
||||||
|
- run: pip install pylint
|
||||||
|
- run: pylint swarms --recursive=y
|
@ -0,0 +1,50 @@
|
|||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
|
||||||
|
# This workflow integrates Python Static Analyzer (Pysa) with
|
||||||
|
# GitHub's Code Scanning feature.
|
||||||
|
#
|
||||||
|
# Python Static Analyzer (Pysa) is a security-focused static
|
||||||
|
# analysis tool that tracks flows of data from where they
|
||||||
|
# originate to where they terminate in a dangerous location.
|
||||||
|
#
|
||||||
|
# See https://pyre-check.org/docs/pysa-basics/
|
||||||
|
|
||||||
|
name: Pysa
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches: [ "master" ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ "master" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '43 5 * * 3'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pysa:
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
|
||||||
|
- name: Run Pysa
|
||||||
|
uses: facebook/pysa-action@f46a63777e59268613bd6e2ff4e29f144ca9e88b
|
||||||
|
with:
|
||||||
|
# To customize these inputs:
|
||||||
|
# See https://github.com/facebook/pysa-action#inputs
|
||||||
|
repo-directory: './'
|
||||||
|
requirements-path: 'requirements.txt'
|
||||||
|
infer-types: true
|
||||||
|
include-default-sapp-filters: true
|
@ -0,0 +1,34 @@
|
|||||||
|
name: Python Package using Conda
|
||||||
|
|
||||||
|
on: [push]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-linux:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
max-parallel: 5
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up Python 3.10
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
- name: Add conda to system path
|
||||||
|
run: |
|
||||||
|
# $CONDA is an environment variable pointing to the root of the miniconda directory
|
||||||
|
echo $CONDA/bin >> $GITHUB_PATH
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
conda env update --file environment.yml --name base
|
||||||
|
- name: Lint with flake8
|
||||||
|
run: |
|
||||||
|
conda install flake8
|
||||||
|
# stop the build if there are Python syntax errors or undefined names
|
||||||
|
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||||
|
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||||
|
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||||
|
- name: Test with pytest
|
||||||
|
run: |
|
||||||
|
conda install pytest
|
||||||
|
pytest
|
@ -0,0 +1,42 @@
|
|||||||
|
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||||
|
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
|
||||||
|
|
||||||
|
# [ ] TODO [pep 458](https://blog.pypi.org/posts/2024-11-14-pypi-now-supports-digital-attestations/)
|
||||||
|
|
||||||
|
name: Python package
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ "master" ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ "master" ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python-version: ["3.10", "3.11", "3.12"]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
python -m pip install flake8 pytest
|
||||||
|
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||||
|
- name: Lint with flake8
|
||||||
|
run: |
|
||||||
|
# stop the build if there are Python syntax errors or undefined names
|
||||||
|
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||||
|
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||||
|
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||||
|
- name: Test with pytest
|
||||||
|
run: |
|
||||||
|
pytest
|
@ -0,0 +1,49 @@
|
|||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
|
||||||
|
# This workflow file requires a free account on Semgrep.dev to
|
||||||
|
# manage rules, file ignores, notifications, and more.
|
||||||
|
#
|
||||||
|
# See https://semgrep.dev/docs
|
||||||
|
|
||||||
|
name: Semgrep
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ "master" ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ "master" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '19 7 * * 3'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
semgrep:
|
||||||
|
permissions:
|
||||||
|
contents: read # for actions/checkout to fetch code
|
||||||
|
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||||
|
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||||
|
name: Scan
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
# Checkout project source
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
# Scan code using project's configuration on https://semgrep.dev/manage
|
||||||
|
- uses: returntocorp/semgrep-action@713efdd345f3035192eaa63f56867b88e63e4e5d
|
||||||
|
with:
|
||||||
|
publishToken: ${{ secrets.SEMGREP_APP_TOKEN }}
|
||||||
|
publishDeployment: ${{ secrets.SEMGREP_DEPLOYMENT_ID }}
|
||||||
|
generateSarif: "1"
|
||||||
|
|
||||||
|
# Upload SARIF file generated in previous step
|
||||||
|
- name: Upload SARIF file
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: semgrep.sarif
|
||||||
|
if: always()
|
@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
|
||||||
|
#
|
||||||
|
# You can adjust the behavior by modifying this file.
|
||||||
|
# For more information, see:
|
||||||
|
# https://github.com/actions/stale
|
||||||
|
name: Stale
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
# Scheduled to run at 1.30 UTC everyday
|
||||||
|
- cron: "0 0 * * *"
|
||||||
|
jobs:
|
||||||
|
stale:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v9
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
days-before-issue-stale: 14
|
||||||
|
days-before-issue-close: 14
|
||||||
|
stale-issue-label: "status:stale"
|
||||||
|
close-issue-reason: not_planned
|
||||||
|
any-of-labels: "status:awaiting user response,status:more data needed"
|
||||||
|
stale-issue-message: >
|
||||||
|
Marking this issue as stale since it has been open for 14 days with no
|
||||||
|
activity. This issue will be closed if no further activity occurs.
|
||||||
|
|
||||||
|
close-issue-message: >
|
||||||
|
This issue was closed because it has been inactive for 28 days. Please
|
||||||
|
post a new issue if you need further assistance. Thanks!
|
||||||
|
|
||||||
|
days-before-pr-stale: 14
|
||||||
|
days-before-pr-close: 14
|
||||||
|
stale-pr-label: "status:stale"
|
||||||
|
stale-pr-message: >
|
||||||
|
Marking this pull request as stale since it has been open for 14 days
|
||||||
|
with no activity. This PR will be closed if no further activity occurs.
|
||||||
|
|
||||||
|
close-pr-message: >
|
||||||
|
This pull request was closed because it has been inactive for 28 days.
|
||||||
|
Please open a new pull request if you need further assistance. Thanks!
|
||||||
|
|
||||||
|
# Label that can be assigned to issues to exclude them from being marked as stale
|
||||||
|
exempt-issue-labels: "override-stale"
|
||||||
|
# Label that can be assigned to PRs to exclude them from being marked as stale
|
||||||
|
exempt-pr-labels: "override-stale"
|
@ -0,0 +1,60 @@
|
|||||||
|
name: Tests
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
schedule:
|
||||||
|
- cron: "0 0 * * *"
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
- name: Install Poetry
|
||||||
|
uses: snok/install-poetry@v1
|
||||||
|
- name: Setup a local virtual environment
|
||||||
|
run: |
|
||||||
|
poetry config virtualenvs.create true --local
|
||||||
|
poetry config virtualenvs.in-project true --local
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
name: Define a cache for the virtual environment
|
||||||
|
file
|
||||||
|
with:
|
||||||
|
path: ./.venv
|
||||||
|
key: venv-${{ hashFiles('poetry.lock') }}
|
||||||
|
- name: Install the project dependencies
|
||||||
|
run: poetry install
|
||||||
|
- name: Install OpenCV
|
||||||
|
run: sudo apt-get install python3-opencv
|
||||||
|
- name: Enter the virtual environment
|
||||||
|
run: source $VENV
|
||||||
|
- name: Run the tests
|
||||||
|
run: poetry run pytest --verbose
|
||||||
|
run-examples:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Install Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
- name: Install Poetry
|
||||||
|
uses: snok/install-poetry@v1
|
||||||
|
- name: Setup a local virtual environment
|
||||||
|
run: |
|
||||||
|
poetry config virtualenvs.create true --local
|
||||||
|
poetry config virtualenvs.in-project true --local
|
||||||
|
- uses: actions/cache@v4
|
||||||
|
name: Define a cache for the virtual environment
|
||||||
|
file
|
||||||
|
with:
|
||||||
|
path: ./.venv
|
||||||
|
key: venv-${{ hashFiles('poetry.lock') }}
|
||||||
|
- name: Install the project dependencies
|
||||||
|
run: poetry install
|
||||||
|
- name: Install OpenCV
|
||||||
|
run: sudo apt-get install python3-opencv
|
||||||
|
- name: Enter the virtual environment
|
||||||
|
run: source $VENV
|
||||||
|
- name: Make Script Executable and Run
|
||||||
|
run: |-
|
||||||
|
chmod +x ./scripts/run_examples.sh
|
||||||
|
./scripts/run_examples.sh
|
@ -0,0 +1,48 @@
|
|||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
|
||||||
|
name: trivy
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ "master" ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ "master" ]
|
||||||
|
schedule:
|
||||||
|
- cron: '31 0 * * 5'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
permissions:
|
||||||
|
contents: read # for actions/checkout to fetch code
|
||||||
|
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||||
|
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||||
|
name: Build
|
||||||
|
runs-on: "ubuntu-20.04"
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Build an image from Dockerfile
|
||||||
|
run: |
|
||||||
|
docker build -t docker.io/my-organization/my-app:${{ github.sha }} .
|
||||||
|
|
||||||
|
- name: Run Trivy vulnerability scanner
|
||||||
|
uses: aquasecurity/trivy-action@18f2510ee396bbf400402947b394f2dd8c87dbb0
|
||||||
|
with:
|
||||||
|
image-ref: 'docker.io/my-organization/my-app:${{ github.sha }}'
|
||||||
|
format: 'template'
|
||||||
|
template: '@/contrib/sarif.tpl'
|
||||||
|
output: 'trivy-results.sarif'
|
||||||
|
severity: 'CRITICAL,HIGH'
|
||||||
|
|
||||||
|
- name: Upload Trivy scan results to GitHub Security tab
|
||||||
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
|
with:
|
||||||
|
sarif_file: 'trivy-results.sarif'
|
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
name: Welcome
|
||||||
|
on:
|
||||||
|
issues:
|
||||||
|
types: [opened]
|
||||||
|
pull_request_target:
|
||||||
|
types: [opened]
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: 👋 Welcome
|
||||||
|
permissions: write-all
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/first-interaction@v1.3.0
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
issue-message:
|
||||||
|
"Hello there, thank you for opening an Issue ! 🙏🏻 The team
|
||||||
|
was notified and they will get back to you asap."
|
||||||
|
pr-message:
|
||||||
|
"Hello there, thank you for opening an PR ! 🙏🏻 The team was
|
||||||
|
notified and they will get back to you asap."
|
@ -0,0 +1,404 @@
|
|||||||
|
__pycache__/
|
||||||
|
.venv/
|
||||||
|
|
||||||
|
.env
|
||||||
|
|
||||||
|
image/
|
||||||
|
audio/
|
||||||
|
video/
|
||||||
|
artifacts_three
|
||||||
|
dataframe/
|
||||||
|
.ruff_cache
|
||||||
|
.pytest_cache
|
||||||
|
static/generated
|
||||||
|
runs
|
||||||
|
Financial-Analysis-Agent_state.json
|
||||||
|
experimental
|
||||||
|
artifacts_five
|
||||||
|
encryption
|
||||||
|
errors
|
||||||
|
chroma
|
||||||
|
agent_workspace
|
||||||
|
.pt
|
||||||
|
Accounting Assistant_state.json
|
||||||
|
Unit Testing Agent_state.json
|
||||||
|
sec_agent
|
||||||
|
Devin_state.json
|
||||||
|
poetry.lock
|
||||||
|
hire_researchers
|
||||||
|
agent_workspace
|
||||||
|
json_logs
|
||||||
|
Medical Image Diagnostic Agent_state.json
|
||||||
|
flight agent_state.json
|
||||||
|
D_state.json
|
||||||
|
artifacts_six
|
||||||
|
artifacts_seven
|
||||||
|
swarms/__pycache__
|
||||||
|
artifacts_once
|
||||||
|
transcript_generator.json
|
||||||
|
venv
|
||||||
|
.DS_Store
|
||||||
|
Cargo.lock
|
||||||
|
.DS_STORE
|
||||||
|
artifacts_logs
|
||||||
|
Cargo.lock
|
||||||
|
Medical Treatment Recommendation Agent_state.json
|
||||||
|
swarms/agents/.DS_Store
|
||||||
|
artifacts_two
|
||||||
|
logs
|
||||||
|
T_state.json
|
||||||
|
_build
|
||||||
|
conversation.txt
|
||||||
|
t1_state.json
|
||||||
|
stderr_log.txt
|
||||||
|
t2_state.json
|
||||||
|
.vscode
|
||||||
|
.DS_STORE
|
||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
Transcript Generator_state.json
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
.grit
|
||||||
|
swarm-worker-01_state.json
|
||||||
|
error.txt
|
||||||
|
Devin Worker 2_state.json
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
.ruff_cache
|
||||||
|
|
||||||
|
|
||||||
|
errors.txt
|
||||||
|
|
||||||
|
Autonomous-Agent-XYZ1B_state.json
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
cover/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
.pybuilder/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
.DS_Store
|
||||||
|
# pyenv
|
||||||
|
# For a library or package, you might want to ignore these files since the code is
|
||||||
|
# intended to run in multiple environments; otherwise, check them in:
|
||||||
|
# .python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||||
|
# commonly ignored for libraries.
|
||||||
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||||
|
#poetry.lock
|
||||||
|
|
||||||
|
# pdm
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||||
|
#pdm.lock
|
||||||
|
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||||
|
# in version control.
|
||||||
|
# https://pdm.fming.dev/#use-with-ide
|
||||||
|
.pdm.toml
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||||
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||||
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
|
#.idea/
|
||||||
|
.vscode/settings.json
|
||||||
|
# -*- mode: gitignore; -*-
|
||||||
|
*~
|
||||||
|
\#*\#
|
||||||
|
/.emacs.desktop
|
||||||
|
/.emacs.desktop.lock
|
||||||
|
*.elc
|
||||||
|
auto-save-list
|
||||||
|
tramp
|
||||||
|
.\#*
|
||||||
|
|
||||||
|
# Org-mode
|
||||||
|
.org-id-locations
|
||||||
|
*_archive
|
||||||
|
|
||||||
|
# flymake-mode
|
||||||
|
*_flymake.*
|
||||||
|
|
||||||
|
# eshell files
|
||||||
|
/eshell/history
|
||||||
|
/eshell/lastdir
|
||||||
|
|
||||||
|
# elpa packages
|
||||||
|
/elpa/
|
||||||
|
|
||||||
|
# reftex files
|
||||||
|
*.rel
|
||||||
|
|
||||||
|
# AUCTeX auto folder
|
||||||
|
/auto/
|
||||||
|
|
||||||
|
# cask packages
|
||||||
|
.cask/
|
||||||
|
dist/
|
||||||
|
|
||||||
|
# Flycheck
|
||||||
|
flycheck_*.el
|
||||||
|
|
||||||
|
# server auth directory
|
||||||
|
/server/
|
||||||
|
|
||||||
|
# projectiles files
|
||||||
|
.projectile
|
||||||
|
|
||||||
|
# directory configuration
|
||||||
|
.dir-locals.el
|
||||||
|
|
||||||
|
# network security
|
||||||
|
/network-security.data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
# For a library or package, you might want to ignore these files since the code is
|
||||||
|
# intended to run in multiple environments; otherwise, check them in:
|
||||||
|
# .python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||||
|
# commonly ignored for libraries.
|
||||||
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||||
|
#poetry.lock
|
||||||
|
|
||||||
|
# pdm
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||||
|
#pdm.lock
|
||||||
|
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||||
|
# in version control.
|
||||||
|
# https://pdm.fming.dev/#use-with-ide
|
||||||
|
.pdm.toml
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||||
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||||
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
|
#.idea/
|
||||||
|
.vscode/settings.json
|
||||||
|
# -*- mode: gitignore; -*-
|
||||||
|
*~
|
||||||
|
\#*\#
|
||||||
|
/.emacs.desktop
|
||||||
|
/.emacs.desktop.lock
|
||||||
|
*.elc
|
||||||
|
auto-save-list
|
||||||
|
tramp
|
||||||
|
.\#*
|
||||||
|
|
||||||
|
# Org-mode
|
||||||
|
.org-id-locations
|
||||||
|
*_archive
|
||||||
|
|
||||||
|
# flymake-mode
|
||||||
|
*_flymake.*
|
||||||
|
|
||||||
|
# eshell files
|
||||||
|
/eshell/history
|
||||||
|
/eshell/lastdir
|
||||||
|
|
||||||
|
# elpa packages
|
||||||
|
/elpa/
|
||||||
|
|
||||||
|
# reftex files
|
||||||
|
*.rel
|
||||||
|
|
||||||
|
# AUCTeX auto folder
|
||||||
|
/auto/
|
||||||
|
|
||||||
|
# cask packages
|
||||||
|
.cask/
|
||||||
|
dist/
|
||||||
|
|
||||||
|
# Flycheck
|
||||||
|
flycheck_*.el
|
||||||
|
|
||||||
|
# server auth directory
|
||||||
|
/server/
|
||||||
|
|
||||||
|
# projectiles files
|
||||||
|
.projectile
|
||||||
|
|
||||||
|
# directory configuration
|
||||||
|
.dir-locals.el
|
||||||
|
|
||||||
|
# network security
|
||||||
|
/network-security.data
|
||||||
|
|
@ -0,0 +1,18 @@
|
|||||||
|
repos:
|
||||||
|
- repo: https://github.com/ambv/black
|
||||||
|
rev: 22.3.0
|
||||||
|
hooks:
|
||||||
|
- id: black
|
||||||
|
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||||
|
rev: 'v0.0.255'
|
||||||
|
hooks:
|
||||||
|
- id: ruff
|
||||||
|
args: [----unsafe-fixes]
|
||||||
|
- repo: https://github.com/nbQA-dev/nbQA
|
||||||
|
rev: 1.6.3
|
||||||
|
hooks:
|
||||||
|
- id: nbqa-black
|
||||||
|
additional_dependencies: [ipython==8.12, black]
|
||||||
|
- id: nbqa-ruff
|
||||||
|
args: ["--ignore=I001"]
|
||||||
|
additional_dependencies: [ipython==8.12, ruff]
|
@ -0,0 +1,128 @@
|
|||||||
|
# Contributor Covenant Code of Conduct
|
||||||
|
|
||||||
|
## Our Pledge
|
||||||
|
|
||||||
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
|
identity and expression, level of experience, education, socio-economic status,
|
||||||
|
nationality, personal appearance, race, religion, or sexual identity
|
||||||
|
and orientation.
|
||||||
|
|
||||||
|
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||||
|
diverse, inclusive, and healthy community.
|
||||||
|
|
||||||
|
## Our Standards
|
||||||
|
|
||||||
|
Examples of behavior that contributes to a positive environment for our
|
||||||
|
community include:
|
||||||
|
|
||||||
|
* Demonstrating empathy and kindness toward other people
|
||||||
|
* Being respectful of differing opinions, viewpoints, and experiences
|
||||||
|
* Giving and gracefully accepting constructive feedback
|
||||||
|
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||||
|
and learning from the experience
|
||||||
|
* Focusing on what is best not just for us as individuals, but for the
|
||||||
|
overall community
|
||||||
|
|
||||||
|
Examples of unacceptable behavior include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery, and sexual attention or
|
||||||
|
advances of any kind
|
||||||
|
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing others' private information, such as a physical or email
|
||||||
|
address, without their explicit permission
|
||||||
|
* Other conduct which could reasonably be considered inappropriate in a
|
||||||
|
professional setting
|
||||||
|
|
||||||
|
## Enforcement Responsibilities
|
||||||
|
|
||||||
|
Community leaders are responsible for clarifying and enforcing our standards of
|
||||||
|
acceptable behavior and will take appropriate and fair corrective action in
|
||||||
|
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||||
|
or harmful.
|
||||||
|
|
||||||
|
Community leaders have the right and responsibility to remove, edit, or reject
|
||||||
|
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||||
|
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||||
|
decisions when appropriate.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
This Code of Conduct applies within all community spaces, and also applies when
|
||||||
|
an individual is officially representing the community in public spaces.
|
||||||
|
Examples of representing our community include using an official e-mail address,
|
||||||
|
posting via an official social media account, or acting as an appointed
|
||||||
|
representative at an online or offline event.
|
||||||
|
|
||||||
|
## Enforcement
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported to the community leaders responsible for enforcement at
|
||||||
|
kye@apac.ai.
|
||||||
|
All complaints will be reviewed and investigated promptly and fairly.
|
||||||
|
|
||||||
|
All community leaders are obligated to respect the privacy and security of the
|
||||||
|
reporter of any incident.
|
||||||
|
|
||||||
|
## Enforcement Guidelines
|
||||||
|
|
||||||
|
Community leaders will follow these Community Impact Guidelines in determining
|
||||||
|
the consequences for any action they deem in violation of this Code of Conduct:
|
||||||
|
|
||||||
|
### 1. Correction
|
||||||
|
|
||||||
|
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||||
|
unprofessional or unwelcome in the community.
|
||||||
|
|
||||||
|
**Consequence**: A private, written warning from community leaders, providing
|
||||||
|
clarity around the nature of the violation and an explanation of why the
|
||||||
|
behavior was inappropriate. A public apology may be requested.
|
||||||
|
|
||||||
|
### 2. Warning
|
||||||
|
|
||||||
|
**Community Impact**: A violation through a single incident or series
|
||||||
|
of actions.
|
||||||
|
|
||||||
|
**Consequence**: A warning with consequences for continued behavior. No
|
||||||
|
interaction with the people involved, including unsolicited interaction with
|
||||||
|
those enforcing the Code of Conduct, for a specified period of time. This
|
||||||
|
includes avoiding interactions in community spaces as well as external channels
|
||||||
|
like social media. Violating these terms may lead to a temporary or
|
||||||
|
permanent ban.
|
||||||
|
|
||||||
|
### 3. Temporary Ban
|
||||||
|
|
||||||
|
**Community Impact**: A serious violation of community standards, including
|
||||||
|
sustained inappropriate behavior.
|
||||||
|
|
||||||
|
**Consequence**: A temporary ban from any sort of interaction or public
|
||||||
|
communication with the community for a specified period of time. No public or
|
||||||
|
private interaction with the people involved, including unsolicited interaction
|
||||||
|
with those enforcing the Code of Conduct, is allowed during this period.
|
||||||
|
Violating these terms may lead to a permanent ban.
|
||||||
|
|
||||||
|
### 4. Permanent Ban
|
||||||
|
|
||||||
|
**Community Impact**: Demonstrating a pattern of violation of community
|
||||||
|
standards, including sustained inappropriate behavior, harassment of an
|
||||||
|
individual, or aggression toward or disparagement of classes of individuals.
|
||||||
|
|
||||||
|
**Consequence**: A permanent ban from any sort of public interaction within
|
||||||
|
the community.
|
||||||
|
|
||||||
|
## Attribution
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 2.0, available at
|
||||||
|
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||||
|
|
||||||
|
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||||
|
enforcement ladder](https://github.com/mozilla/diversity).
|
||||||
|
|
||||||
|
[homepage]: https://www.contributor-covenant.org
|
||||||
|
|
||||||
|
For answers to common questions about this code of conduct, see the FAQ at
|
||||||
|
https://www.contributor-covenant.org/faq. Translations are available at
|
||||||
|
https://www.contributor-covenant.org/translations.
|
@ -0,0 +1,238 @@
|
|||||||
|
# Contribution Guidelines
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
- [Project Overview](#project-overview)
|
||||||
|
- [Getting Started](#getting-started)
|
||||||
|
- [Installation](#installation)
|
||||||
|
- [Project Structure](#project-structure)
|
||||||
|
- [How to Contribute](#how-to-contribute)
|
||||||
|
- [Reporting Issues](#reporting-issues)
|
||||||
|
- [Submitting Pull Requests](#submitting-pull-requests)
|
||||||
|
- [Coding Standards](#coding-standards)
|
||||||
|
- [Type Annotations](#type-annotations)
|
||||||
|
- [Docstrings and Documentation](#docstrings-and-documentation)
|
||||||
|
- [Testing](#testing)
|
||||||
|
- [Code Style](#code-style)
|
||||||
|
- [Areas Needing Contributions](#areas-needing-contributions)
|
||||||
|
- [Writing Tests](#writing-tests)
|
||||||
|
- [Improving Documentation](#improving-documentation)
|
||||||
|
- [Creating Training Scripts](#creating-training-scripts)
|
||||||
|
- [Community and Support](#community-and-support)
|
||||||
|
- [License](#license)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Project Overview
|
||||||
|
|
||||||
|
**swarms** is a library focused on making it simple to orchestrate agents to automate real-world activities. The goal is to automate the world economy with these swarms of agents.
|
||||||
|
|
||||||
|
We need your help to:
|
||||||
|
|
||||||
|
- **Write Tests**: Ensure the reliability and correctness of the codebase.
|
||||||
|
- **Improve Documentation**: Maintain clear and comprehensive documentation.
|
||||||
|
- **Add New Orchestration Methods**: Add multi-agent orchestration methods
|
||||||
|
- **Removing Defunct Code**: Removing bad code
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Your contributions will help us push the boundaries of AI and make this library a valuable resource for the community.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
You can install swarms using `pip`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install swarms
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, you can clone the repository:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/kyegomez/swarms
|
||||||
|
```
|
||||||
|
|
||||||
|
### Project Structure
|
||||||
|
|
||||||
|
- **`swarms/`**: Contains all the source code for the library.
|
||||||
|
- **`examples/`**: Includes example scripts and notebooks demonstrating how to use the library.
|
||||||
|
- **`tests/`**: (To be created) Will contain unit tests for the library.
|
||||||
|
- **`docs/`**: (To be maintained) Contains documentation files.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## How to Contribute
|
||||||
|
|
||||||
|
### Reporting Issues
|
||||||
|
|
||||||
|
If you find any bugs, inconsistencies, or have suggestions for enhancements, please open an issue on GitHub:
|
||||||
|
|
||||||
|
1. **Search Existing Issues**: Before opening a new issue, check if it has already been reported.
|
||||||
|
2. **Open a New Issue**: If it hasn't been reported, create a new issue and provide detailed information.
|
||||||
|
- **Title**: A concise summary of the issue.
|
||||||
|
- **Description**: Detailed description, steps to reproduce, expected behavior, and any relevant logs or screenshots.
|
||||||
|
3. **Label Appropriately**: Use labels to categorize the issue (e.g., bug, enhancement, documentation).
|
||||||
|
|
||||||
|
### Submitting Pull Requests
|
||||||
|
|
||||||
|
We welcome pull requests (PRs) for bug fixes, improvements, and new features. Please follow these guidelines:
|
||||||
|
|
||||||
|
1. **Fork the Repository**: Create a personal fork of the repository on GitHub.
|
||||||
|
2. **Clone Your Fork**: Clone your forked repository to your local machine.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/kyegomez/swarms.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Create a New Branch**: Use a descriptive branch name.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git checkout -b feature/your-feature-name
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Make Your Changes**: Implement your code, ensuring it adheres to the coding standards.
|
||||||
|
5. **Add Tests**: Write tests to cover your changes.
|
||||||
|
6. **Commit Your Changes**: Write clear and concise commit messages.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git commit -am "Add feature X"
|
||||||
|
```
|
||||||
|
|
||||||
|
7. **Push to Your Fork**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git push origin feature/your-feature-name
|
||||||
|
```
|
||||||
|
|
||||||
|
8. **Create a Pull Request**:
|
||||||
|
|
||||||
|
- Go to the original repository on GitHub.
|
||||||
|
- Click on "New Pull Request".
|
||||||
|
- Select your branch and create the PR.
|
||||||
|
- Provide a clear description of your changes and reference any related issues.
|
||||||
|
|
||||||
|
9. **Respond to Feedback**: Be prepared to make changes based on code reviews.
|
||||||
|
|
||||||
|
**Note**: It's recommended to create small and focused PRs for easier review and faster integration.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Coding Standards
|
||||||
|
|
||||||
|
To maintain code quality and consistency, please adhere to the following standards.
|
||||||
|
|
||||||
|
### Type Annotations
|
||||||
|
|
||||||
|
- **Mandatory**: All functions and methods must have type annotations.
|
||||||
|
- **Example**:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def add_numbers(a: int, b: int) -> int:
|
||||||
|
return a + b
|
||||||
|
```
|
||||||
|
|
||||||
|
- **Benefits**:
|
||||||
|
- Improves code readability.
|
||||||
|
- Helps with static type checking tools.
|
||||||
|
|
||||||
|
### Docstrings and Documentation
|
||||||
|
|
||||||
|
- **Docstrings**: Every public class, function, and method must have a docstring following the [Google Python Style Guide](http://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) or [NumPy Docstring Standard](https://numpydoc.readthedocs.io/en/latest/format.html).
|
||||||
|
- **Content**:
|
||||||
|
- **Description**: Briefly describe what the function or class does.
|
||||||
|
- **Args**: List and describe each parameter.
|
||||||
|
- **Returns**: Describe the return value(s).
|
||||||
|
- **Raises**: List any exceptions that are raised.
|
||||||
|
|
||||||
|
- **Example**:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def calculate_mean(values: List[float]) -> float:
|
||||||
|
"""
|
||||||
|
Calculates the mean of a list of numbers.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
values (List[float]): A list of numerical values.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
float: The mean of the input values.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If the input list is empty.
|
||||||
|
"""
|
||||||
|
if not values:
|
||||||
|
raise ValueError("The input list is empty.")
|
||||||
|
return sum(values) / len(values)
|
||||||
|
```
|
||||||
|
|
||||||
|
- **Documentation**: Update or create documentation pages if your changes affect the public API.
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
- **Required**: All new features and bug fixes must include appropriate unit tests.
|
||||||
|
- **Framework**: Use `unittest`, `pytest`, or a similar testing framework.
|
||||||
|
- **Test Location**: Place tests in the `tests/` directory, mirroring the structure of `swarms/`.
|
||||||
|
- **Test Coverage**: Aim for high test coverage to ensure code reliability.
|
||||||
|
- **Running Tests**: Provide instructions for running tests.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pytest tests/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Code Style
|
||||||
|
|
||||||
|
- **PEP 8 Compliance**: Follow [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guidelines.
|
||||||
|
- **Linting Tools**: Use `flake8`, `black`, or `pylint` to check code style.
|
||||||
|
- **Consistency**: Maintain consistency with the existing codebase.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Areas Needing Contributions
|
||||||
|
|
||||||
|
We have several areas where contributions are particularly welcome.
|
||||||
|
|
||||||
|
### Writing Tests
|
||||||
|
|
||||||
|
- **Goal**: Increase test coverage to ensure the library's robustness.
|
||||||
|
- **Tasks**:
|
||||||
|
- Write unit tests for existing code in `swarms/`.
|
||||||
|
- Identify edge cases and potential failure points.
|
||||||
|
- Ensure tests are repeatable and independent.
|
||||||
|
|
||||||
|
### Improving Documentation
|
||||||
|
|
||||||
|
- **Goal**: Maintain clear and comprehensive documentation for users and developers.
|
||||||
|
- **Tasks**:
|
||||||
|
- Update docstrings to reflect any changes.
|
||||||
|
- Add examples and tutorials in the `examples/` directory.
|
||||||
|
- Improve or expand the content in the `docs/` directory.
|
||||||
|
|
||||||
|
### Creating Multi-Agent Orchestration Methods
|
||||||
|
|
||||||
|
- **Goal**: Provide new multi-agent orchestration methods
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Community and Support
|
||||||
|
|
||||||
|
- **Communication**: Engage with the community by participating in discussions on issues and pull requests.
|
||||||
|
- **Respect**: Maintain a respectful and inclusive environment.
|
||||||
|
- **Feedback**: Be open to receiving and providing constructive feedback.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
By contributing to swarms, you agree that your contributions will be licensed under the [MIT License](LICENSE).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Thank you for contributing to swarms! Your efforts help make this project better for everyone.
|
||||||
|
|
||||||
|
If you have any questions or need assistance, please feel free to open an issue or reach out to the maintainers.
|
@ -0,0 +1,55 @@
|
|||||||
|
# Use Python 3.11 slim-bullseye for smaller base image
|
||||||
|
FROM python:3.11-slim-bullseye AS builder
|
||||||
|
|
||||||
|
# Set environment variables
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||||
|
PYTHONUNBUFFERED=1 \
|
||||||
|
PIP_NO_CACHE_DIR=1 \
|
||||||
|
PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||||
|
|
||||||
|
# Set the working directory
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
# Install only essential build dependencies
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
gcc \
|
||||||
|
g++ \
|
||||||
|
gfortran \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install swarms packages
|
||||||
|
RUN pip install --no-cache-dir swarm-models swarms
|
||||||
|
|
||||||
|
# Production stage
|
||||||
|
FROM python:3.11-slim-bullseye
|
||||||
|
|
||||||
|
# Set secure environment variables
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||||
|
PYTHONUNBUFFERED=1 \
|
||||||
|
WORKSPACE_DIR="agent_workspace" \
|
||||||
|
PATH="/app:${PATH}" \
|
||||||
|
PYTHONPATH="/app:${PYTHONPATH}" \
|
||||||
|
USER=swarms
|
||||||
|
|
||||||
|
# Create non-root user
|
||||||
|
RUN useradd -m -s /bin/bash -U $USER && \
|
||||||
|
mkdir -p /app && \
|
||||||
|
chown -R $USER:$USER /app
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy only necessary files from builder
|
||||||
|
COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages
|
||||||
|
COPY --from=builder /usr/local/bin /usr/local/bin
|
||||||
|
|
||||||
|
# Copy application with correct permissions
|
||||||
|
COPY --chown=$USER:$USER . .
|
||||||
|
|
||||||
|
# Switch to non-root user
|
||||||
|
USER $USER
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
|
||||||
|
CMD python -c "import swarms; print('Health check passed')" || exit 1
|
@ -0,0 +1,661 @@
|
|||||||
|
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 19 November 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU Affero General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works, specifically designed to ensure
|
||||||
|
cooperation with the community in the case of network server software.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
our General Public Licenses are intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
Developers that use our General Public Licenses protect your rights
|
||||||
|
with two steps: (1) assert copyright on the software, and (2) offer
|
||||||
|
you this License which gives you legal permission to copy, distribute
|
||||||
|
and/or modify the software.
|
||||||
|
|
||||||
|
A secondary benefit of defending all users' freedom is that
|
||||||
|
improvements made in alternate versions of the program, if they
|
||||||
|
receive widespread use, become available for other developers to
|
||||||
|
incorporate. Many developers of free software are heartened and
|
||||||
|
encouraged by the resulting cooperation. However, in the case of
|
||||||
|
software used on network servers, this result may fail to come about.
|
||||||
|
The GNU General Public License permits making a modified version and
|
||||||
|
letting the public access it on a server without ever releasing its
|
||||||
|
source code to the public.
|
||||||
|
|
||||||
|
The GNU Affero General Public License is designed specifically to
|
||||||
|
ensure that, in such cases, the modified source code becomes available
|
||||||
|
to the community. It requires the operator of a network server to
|
||||||
|
provide the source code of the modified version running there to the
|
||||||
|
users of that server. Therefore, public use of a modified version, on
|
||||||
|
a publicly accessible server, gives the public access to the source
|
||||||
|
code of the modified version.
|
||||||
|
|
||||||
|
An older license, called the Affero General Public License and
|
||||||
|
published by Affero, was designed to accomplish similar goals. This is
|
||||||
|
a different license, not a version of the Affero GPL, but Affero has
|
||||||
|
released a new version of the Affero GPL which permits relicensing under
|
||||||
|
this license.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, if you modify the
|
||||||
|
Program, your modified version must prominently offer all users
|
||||||
|
interacting with it remotely through a computer network (if your version
|
||||||
|
supports such interaction) an opportunity to receive the Corresponding
|
||||||
|
Source of your version by providing access to the Corresponding Source
|
||||||
|
from a network server at no charge, through some standard or customary
|
||||||
|
means of facilitating copying of software. This Corresponding Source
|
||||||
|
shall include the Corresponding Source for any work covered by version 3
|
||||||
|
of the GNU General Public License that is incorporated pursuant to the
|
||||||
|
following paragraph.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the work with which it is combined will remain governed by version
|
||||||
|
3 of the GNU General Public License.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU Affero General Public License from time to time. Such new versions
|
||||||
|
will be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU Affero General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU Affero General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU Affero General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
Swarms provides multi-agent orchestration mechanisms to enable llm agents to collaborate and work together
|
||||||
|
Copyright (C) <2025> <Kye Gomez Chairman of TGSC>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published
|
||||||
|
by the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If your software can interact with users remotely through a computer
|
||||||
|
network, you should also make sure that it provides a way for users to
|
||||||
|
get its source. For example, if your program is a web application, its
|
||||||
|
interface could display a "Source" link that leads users to an archive
|
||||||
|
of the code. There are many ways you could offer source, and different
|
||||||
|
solutions will be better for different programs; see section 13 for the
|
||||||
|
specific requirements.
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
@ -0,0 +1,38 @@
|
|||||||
|
# Security Policy
|
||||||
|
===============
|
||||||
|
|
||||||
|
| Security Feature | Benefit | Description |
|
||||||
|
|-------------------------------|------------------------------------------|-----------------------------------------------------------------------------|
|
||||||
|
| Environment Variables | Secure Configuration | Uses environment variables to manage sensitive configurations securely. |
|
||||||
|
| No Telemetry | Enhanced Privacy | Prioritizes user privacy by not collecting telemetry data. |
|
||||||
|
| Data Encryption | Data Protection | Encrypts sensitive data to protect it from unauthorized access. |
|
||||||
|
| Authentication | Access Control | Ensures that only authorized users can access the system. |
|
||||||
|
| Authorization | Fine-grained Access | Provides specific access rights to users based on roles and permissions. |
|
||||||
|
| Dependency Security | Reduced Vulnerabilities | Securely manages dependencies to prevent vulnerabilities. |
|
||||||
|
| Secure Installation | Integrity Assurance | Ensures the integrity of the software through verified sources and checksums.|
|
||||||
|
| Regular Updates | Ongoing Protection | Keeps the system secure by regularly updating to patch vulnerabilities. |
|
||||||
|
| Logging and Monitoring | Operational Oversight | Tracks system activity for security monitoring and anomaly detection. |
|
||||||
|
| Error Handling | Robust Security | Manages errors securely to prevent leakage of sensitive information. |
|
||||||
|
| Data Storage Security | Secure Data Handling | Stores data securely, ensuring confidentiality and integrity. |
|
||||||
|
| Data Transmission Security | Secure Data Transfer | Protects data during transit from eavesdropping and tampering. |
|
||||||
|
| Access Control Mechanisms | Restricted Access | Limits system access to authorized personnel only. |
|
||||||
|
| Vulnerability Management | Proactive Protection | Identifies and mitigates security vulnerabilities effectively. |
|
||||||
|
| Regulatory Compliance | Legal Conformity | Ensures that the system adheres to relevant legal and regulatory standards. |
|
||||||
|
| Security Audits |
|
||||||
|
|
||||||
|
|
||||||
|
# Reporting a Vulnerability
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
* * * * *
|
||||||
|
|
||||||
|
If you discover a security vulnerability in any of the above versions, please report it immediately to our security team by sending an email to kye@apac.ai. We take security vulnerabilities seriously and appreciate your efforts in disclosing them responsibly.
|
||||||
|
|
||||||
|
Please provide detailed information on the vulnerability, including steps to reproduce, potential impact, and any known mitigations. Our security team will acknowledge receipt of your report within 24 hours and will provide regular updates on the progress of the investigation.
|
||||||
|
|
||||||
|
Once the vulnerability has been thoroughly assessed, we will take the necessary steps to address it. This may include releasing a security patch, issuing a security advisory, or implementing other appropriate mitigations.
|
||||||
|
|
||||||
|
We aim to respond to all vulnerability reports in a timely manner and work towards resolving them as quickly as possible. We thank you for your contribution to the security of our software.
|
||||||
|
|
||||||
|
Please note that any vulnerability reports that are not related to the specified versions or do not provide sufficient information may be declined.
|
||||||
|
|
@ -0,0 +1,39 @@
|
|||||||
|
# Example usage
|
||||||
|
from swarms.structs.agent_loader import AgentLoader
|
||||||
|
|
||||||
|
|
||||||
|
# Example agent configurations
|
||||||
|
agent_configs = [
|
||||||
|
{
|
||||||
|
"agent_name": "Financial-Analysis-Agent",
|
||||||
|
"system_prompt": "You are a financial expert...",
|
||||||
|
"model_name": "gpt-4o",
|
||||||
|
"max_loops": 1,
|
||||||
|
"autosave": True,
|
||||||
|
"dashboard": False,
|
||||||
|
"verbose": True,
|
||||||
|
"dynamic_temperature": True,
|
||||||
|
"saved_state_path": "finance_agent.json",
|
||||||
|
"user_name": "swarms_corp",
|
||||||
|
"retry_attempts": 3,
|
||||||
|
"context_length": 200000,
|
||||||
|
"return_step_meta": False,
|
||||||
|
"output_type": "string",
|
||||||
|
"streaming": False,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize loader
|
||||||
|
loader = AgentLoader("agents/my_agents")
|
||||||
|
|
||||||
|
# Create agents in CSV format
|
||||||
|
loader.create_agents(agent_configs, file_type="csv")
|
||||||
|
|
||||||
|
# Or create agents in JSON format
|
||||||
|
loader.create_agents(agent_configs, file_type="json")
|
||||||
|
|
||||||
|
# Load agents from either format
|
||||||
|
agents_from_csv = loader.load_agents(file_type="csv")
|
||||||
|
agents_from_json = loader.load_agents(file_type="json")
|
||||||
|
print(agents_from_csv)
|
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
version: 2
|
||||||
|
build:
|
||||||
|
os: ubuntu-22.04
|
||||||
|
tools:
|
||||||
|
python: "3.11"
|
||||||
|
mkdocs:
|
||||||
|
configuration: docs/mkdocs.yml
|
||||||
|
python:
|
||||||
|
install:
|
||||||
|
- requirements: docs/requirements.txt
|
@ -0,0 +1,131 @@
|
|||||||
|
# Deploying Azure OpenAI in Production: A Comprehensive Guide
|
||||||
|
|
||||||
|
In today's fast-paced digital landscape, leveraging cutting-edge technologies has become essential for businesses to stay competitive and provide exceptional services to their customers. One such technology that has gained significant traction is Azure OpenAI, a powerful platform that allows developers to integrate advanced natural language processing (NLP) capabilities into their applications. Whether you're building a chatbot, a content generation system, or any other AI-powered solution, Azure OpenAI offers a robust and scalable solution for production-grade deployment.
|
||||||
|
|
||||||
|
In this comprehensive guide, we'll walk through the process of setting up and deploying Azure OpenAI in a production environment. We'll dive deep into the code, provide clear explanations, and share best practices to ensure a smooth and successful implementation.
|
||||||
|
|
||||||
|
## Prerequisites:
|
||||||
|
Before we begin, it's essential to have the following prerequisites in place:
|
||||||
|
|
||||||
|
1. **Python**: You'll need to have Python installed on your system. This guide assumes you're using Python 3.6 or later.
|
||||||
|
2. **Azure Subscription**: You'll need an active Azure subscription to access Azure OpenAI services.
|
||||||
|
3. **Azure OpenAI Resource**: Create an Azure OpenAI resource in your Azure subscription.
|
||||||
|
4. **Python Packages**: Install the required Python packages, including `python-dotenv` and `swarms`.
|
||||||
|
|
||||||
|
## Setting up the Environment:
|
||||||
|
To kick things off, we'll set up our development environment and install the necessary dependencies.
|
||||||
|
|
||||||
|
1. **Create a Virtual Environment**: It's a best practice to create a virtual environment to isolate your project dependencies from the rest of your system. You can create a virtual environment using `venv` or any other virtual environment management tool of your choice.
|
||||||
|
|
||||||
|
```
|
||||||
|
python -m venv myenv
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Activate the Virtual Environment**: Activate the virtual environment to ensure that any packages you install are isolated within the environment.
|
||||||
|
|
||||||
|
```
|
||||||
|
source myenv/bin/activate # On Windows, use `myenv\Scripts\activate`
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Install Required Packages**: Install the `python-dotenv` and `swarms` packages using pip.
|
||||||
|
|
||||||
|
```
|
||||||
|
pip install python-dotenv swarms
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Create a `.env` File**: In the root directory of your project, create a new file called `.env`. This file will store your Azure OpenAI credentials and configuration settings.
|
||||||
|
|
||||||
|
```
|
||||||
|
AZURE_OPENAI_ENDPOINT=<your_azure_openai_endpoint>
|
||||||
|
AZURE_OPENAI_DEPLOYMENT=<your_azure_openai_deployment_name>
|
||||||
|
OPENAI_API_VERSION=<your_openai_api_version>
|
||||||
|
AZURE_OPENAI_API_KEY=<your_azure_openai_api_key>
|
||||||
|
AZURE_OPENAI_AD_TOKEN=<your_azure_openai_ad_token>
|
||||||
|
```
|
||||||
|
|
||||||
|
Replace the placeholders with your actual Azure OpenAI credentials and configuration settings.
|
||||||
|
|
||||||
|
## Connecting to Azure OpenAI:
|
||||||
|
Now that we've set up our environment, let's dive into the code that connects to Azure OpenAI and interacts with the language model.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms import AzureOpenAI
|
||||||
|
|
||||||
|
# Load the environment variables
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Create an instance of the AzureOpenAI class
|
||||||
|
model = AzureOpenAI(
|
||||||
|
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
|
||||||
|
deployment_name=os.getenv("AZURE_OPENAI_DEPLOYMENT"),
|
||||||
|
openai_api_version=os.getenv("OPENAI_API_VERSION"),
|
||||||
|
openai_api_key=os.getenv("AZURE_OPENAI_API_KEY"),
|
||||||
|
azure_ad_token=os.getenv("AZURE_OPENAI_AD_TOKEN")
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Let's break down this code:
|
||||||
|
|
||||||
|
1. **Import Statements**: We import the necessary modules, including `os` for interacting with the operating system, `load_dotenv` from `python-dotenv` to load environment variables, and `AzureOpenAI` from `swarms` to interact with the Azure OpenAI service.
|
||||||
|
|
||||||
|
2. **Load Environment Variables**: We use `load_dotenv()` to load the environment variables stored in the `.env` file we created earlier.
|
||||||
|
|
||||||
|
3. **Create AzureOpenAI Instance**: We create an instance of the `AzureOpenAI` class by passing in the required configuration parameters:
|
||||||
|
- `azure_endpoint`: The endpoint URL for your Azure OpenAI resource.
|
||||||
|
- `deployment_name`: The name of the deployment you want to use.
|
||||||
|
- `openai_api_version`: The version of the OpenAI API you want to use.
|
||||||
|
- `openai_api_key`: Your Azure OpenAI API key, which authenticates your requests.
|
||||||
|
- `azure_ad_token`: An optional Azure Active Directory (AAD) token for additional security.
|
||||||
|
|
||||||
|
Querying the Language Model:
|
||||||
|
With our connection to Azure OpenAI established, we can now query the language model and receive responses.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Define the prompt
|
||||||
|
prompt = "Analyze this load document and assess it for any risks and create a table in markdwon format."
|
||||||
|
|
||||||
|
# Generate a response
|
||||||
|
response = model(prompt)
|
||||||
|
print(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Here's what's happening:
|
||||||
|
|
||||||
|
1. **Define the Prompt**: We define a prompt, which is the input text or question we want to feed into the language model.
|
||||||
|
|
||||||
|
2. **Generate a Response**: We call the `model` instance with the `prompt` as an argument. This triggers the Azure OpenAI service to process the prompt and generate a response.
|
||||||
|
|
||||||
|
3. **Print the Response**: Finally, we print the response received from the language model.
|
||||||
|
|
||||||
|
Running the Code:
|
||||||
|
To run the code, save it in a Python file (e.g., `main.py`) and execute it from the command line:
|
||||||
|
|
||||||
|
```
|
||||||
|
python main.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices for Production Deployment:
|
||||||
|
While the provided code serves as a basic example, there are several best practices to consider when deploying Azure OpenAI in a production environment:
|
||||||
|
|
||||||
|
1. **Secure Credentials Management**: Instead of storing sensitive credentials like API keys in your codebase, consider using secure storage solutions like Azure Key Vault or environment variables managed by your cloud provider.
|
||||||
|
|
||||||
|
2. **Error Handling and Retries**: Implement robust error handling and retry mechanisms to handle potential failures or rate-limiting scenarios.
|
||||||
|
|
||||||
|
3. **Logging and Monitoring**: Implement comprehensive logging and monitoring strategies to track application performance, identify issues, and gather insights for optimization.
|
||||||
|
|
||||||
|
4. **Scalability and Load Testing**: Conduct load testing to ensure your application can handle anticipated traffic volumes and scale appropriately based on demand.
|
||||||
|
|
||||||
|
5. **Caching and Optimization**: Explore caching strategies and performance optimizations to improve response times and reduce the load on the Azure OpenAI service.
|
||||||
|
|
||||||
|
6. **Integration with Other Services**: Depending on your use case, you may need to integrate Azure OpenAI with other Azure services or third-party tools for tasks like data processing, storage, or analysis.
|
||||||
|
|
||||||
|
7. **Compliance and Security**: Ensure your application adheres to relevant compliance standards and security best practices, especially when handling sensitive data.
|
||||||
|
|
||||||
|
## Conclusion:
|
||||||
|
Azure OpenAI is a powerful platform that enables developers to integrate advanced natural language processing capabilities into their applications. By following the steps outlined in this guide, you can set up a production-ready environment for deploying Azure OpenAI and start leveraging its capabilities in your projects.
|
||||||
|
|
||||||
|
Remember, this guide serves as a starting point, and there are numerous additional features and capabilities within Azure OpenAI that you can explore to enhance your applications further. As with any production deployment, it's crucial to follow best practices, conduct thorough testing, and implement robust monitoring and security measures.
|
||||||
|
|
||||||
|
With the right approach and careful planning, you can successfully deploy Azure OpenAI in a production environment and unlock the power of cutting-edge language models to drive innovation and provide exceptional experiences for your users.
|
@ -0,0 +1,976 @@
|
|||||||
|
## Building Analyst Agents with Swarms to write Business Reports
|
||||||
|
|
||||||
|
> Jupyter Notebook accompanying this post is accessible at: [Business Analyst Agent Notebook](https://github.com/kyegomez/swarms/blob/master/examples/demos/business_analysis_swarm/business-analyst-agent.ipynb)
|
||||||
|
|
||||||
|
Solving a business problem often involves preparing a Business Case Report. This report comprehensively analyzes the problem, evaluates potential solutions, and provides evidence-based recommendations and an implementation plan to effectively address the issue and drive business value. While the process of preparing one requires an experienced business analyst, the workflow can be augmented using AI agents. Two candidates stick out as areas to work on:
|
||||||
|
|
||||||
|
- Developing an outline to solve the problem
|
||||||
|
- Doing background research and gathering data
|
||||||
|
|
||||||
|
In this post, we will explore how Swarms agents can be used to tackle a busuiness problem by outlining the solution, conducting background research and generating a preliminary report.
|
||||||
|
|
||||||
|
Before we proceed, this blog uses 3 API tools. Please obtain the following keys and store them in a `.env` file in the same folder as this file.
|
||||||
|
|
||||||
|
- **[OpenAI API](https://openai.com/blog/openai-api)** as `OPENAI_API_KEY`
|
||||||
|
- **[TavilyAI API](https://app.tavily.com/home)** `TAVILY_API_KEY`
|
||||||
|
- **[KayAI API](https://www.kay.ai/)** as `KAY_API_KEY`
|
||||||
|
|
||||||
|
```python
|
||||||
|
import dotenv
|
||||||
|
dotenv.load_dotenv() # Load environment variables from .env file
|
||||||
|
```
|
||||||
|
|
||||||
|
### Developing an Outline to solve the problem
|
||||||
|
|
||||||
|
Assume the business problem is: **How do we improve Nike's revenue in Q3 2024?** We first create a planning agent to break down the problem into dependent sub-problems.
|
||||||
|
|
||||||
|
|
||||||
|
#### Step 1. Defining the Data Model and Tool Schema
|
||||||
|
|
||||||
|
Using Pydantic, we define a structure to help the agent generate sub-problems.
|
||||||
|
|
||||||
|
- **QueryType:** Questions are either standalone or involve a combination of multiple others
|
||||||
|
- **Query:** Defines structure of a question.
|
||||||
|
- **QueryPlan:** Allows generation of a dependency graph of sub-questions
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
import enum
|
||||||
|
from typing import List
|
||||||
|
from pydantic import Field, BaseModel
|
||||||
|
|
||||||
|
class QueryType(str, enum.Enum):
|
||||||
|
"""Enumeration representing the types of queries that can be asked to a question answer system."""
|
||||||
|
|
||||||
|
SINGLE_QUESTION = "SINGLE"
|
||||||
|
MERGE_MULTIPLE_RESPONSES = "MERGE_MULTIPLE_RESPONSES"
|
||||||
|
|
||||||
|
class Query(BaseModel):
|
||||||
|
"""Class representing a single question in a query plan."""
|
||||||
|
|
||||||
|
id: int = Field(..., description="Unique id of the query")
|
||||||
|
question: str = Field(
|
||||||
|
...,
|
||||||
|
description="Question asked using a question answering system",
|
||||||
|
)
|
||||||
|
dependencies: List[int] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
description="List of sub questions that need to be answered before asking this question",
|
||||||
|
)
|
||||||
|
node_type: QueryType = Field(
|
||||||
|
default=QueryType.SINGLE_QUESTION,
|
||||||
|
description="Type of question, either a single question or a multi-question merge",
|
||||||
|
)
|
||||||
|
|
||||||
|
class QueryPlan(BaseModel):
|
||||||
|
"""Container class representing a tree of questions to ask a question answering system."""
|
||||||
|
|
||||||
|
query_graph: List[Query] = Field(
|
||||||
|
..., description="The query graph representing the plan"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _dependencies(self, ids: List[int]) -> List[Query]:
|
||||||
|
"""Returns the dependencies of a query given their ids."""
|
||||||
|
|
||||||
|
return [q for q in self.query_graph if q.id in ids]
|
||||||
|
```
|
||||||
|
|
||||||
|
Also, a `tool_schema` needs to be defined. It is an instance of `QueryPlan` and is used to initialize the agent.
|
||||||
|
|
||||||
|
```python
|
||||||
|
tool_schema = QueryPlan(
|
||||||
|
query_graph = [query.dict() for query in [
|
||||||
|
Query(
|
||||||
|
id=1,
|
||||||
|
question="How do we improve Nike's revenue in Q3 2024?",
|
||||||
|
dependencies=[2],
|
||||||
|
node_type=QueryType('SINGLE')
|
||||||
|
),
|
||||||
|
# ... other queries ...
|
||||||
|
]]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 2. Defining the Planning Agent
|
||||||
|
|
||||||
|
We specify the query, task specification and an appropriate system prompt.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
from swarms import Agent
|
||||||
|
|
||||||
|
query = "How do we improve Nike's revenue in Q3 2024?"
|
||||||
|
task = f"Consider: {query}. Generate just the correct query plan in JSON format."
|
||||||
|
system_prompt = (
|
||||||
|
"You are a world class query planning algorithm "
|
||||||
|
"capable of breaking apart questions into its "
|
||||||
|
"dependency queries such that the answers can be "
|
||||||
|
"used to inform the parent question. Do not answer "
|
||||||
|
"the questions, simply provide a correct compute "
|
||||||
|
"graph with good specific questions to ask and relevant "
|
||||||
|
"dependencies. Before you call the function, think "
|
||||||
|
"step-by-step to get a better understanding of the problem."
|
||||||
|
)
|
||||||
|
llm = OpenAIChat(
|
||||||
|
temperature=0.0, model_name="gpt-4", max_tokens=4000
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, we proceed with agent definition.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Query Planner",
|
||||||
|
system_prompt=system_prompt,
|
||||||
|
# Set the tool schema to the JSON string -- this is the key difference
|
||||||
|
tool_schema=tool_schema,
|
||||||
|
llm=llm,
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
interactive=False,
|
||||||
|
# Set the output type to the tool schema which is a BaseModel
|
||||||
|
output_type=tool_schema, # or dict, or str
|
||||||
|
metadata_output_type="json",
|
||||||
|
# List of schemas that the agent can handle
|
||||||
|
list_base_models=[tool_schema],
|
||||||
|
function_calling_format_type="OpenAI",
|
||||||
|
function_calling_type="json", # or soon yaml
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 3. Obtaining Outline from Planning Agent
|
||||||
|
|
||||||
|
We now run the agent, and since its output is in JSON format, we can load it as a dictionary.
|
||||||
|
|
||||||
|
```python
|
||||||
|
generated_data = agent.run(task)
|
||||||
|
```
|
||||||
|
|
||||||
|
At times the agent could return extra content other than JSON. Below function will filter it out.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_json_output(content):
|
||||||
|
# Find the index of the first occurrence of '```json\n'
|
||||||
|
start_index = content.find('```json\n')
|
||||||
|
if start_index == -1:
|
||||||
|
# If '```json\n' is not found, return the original content
|
||||||
|
return content
|
||||||
|
# Return the part of the content after '```json\n' and remove the '```' at the end
|
||||||
|
return content[start_index + len('```json\n'):].rstrip('`')
|
||||||
|
|
||||||
|
# Use the function to clean up the output
|
||||||
|
json_content = process_json_output(generated_data.content)
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
# Load the JSON string into a Python object
|
||||||
|
json_object = json.loads(json_content)
|
||||||
|
|
||||||
|
# Convert the Python object back to a JSON string
|
||||||
|
json_content = json.dumps(json_object, indent=2)
|
||||||
|
|
||||||
|
# Print the JSON string
|
||||||
|
print(json_content)
|
||||||
|
```
|
||||||
|
|
||||||
|
Below is the output this produces
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"main_query": "How do we improve Nike's revenue in Q3 2024?",
|
||||||
|
"sub_queries": [
|
||||||
|
{
|
||||||
|
"id": "1",
|
||||||
|
"query": "What is Nike's current revenue trend?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "2",
|
||||||
|
"query": "What are the projected market trends for the sports apparel industry in 2024?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "3",
|
||||||
|
"query": "What are the current successful strategies being used by Nike's competitors?",
|
||||||
|
"dependencies": [
|
||||||
|
"2"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "4",
|
||||||
|
"query": "What are the current and projected economic conditions in Nike's major markets?",
|
||||||
|
"dependencies": [
|
||||||
|
"2"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "5",
|
||||||
|
"query": "What are the current consumer preferences in the sports apparel industry?",
|
||||||
|
"dependencies": [
|
||||||
|
"2"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "6",
|
||||||
|
"query": "What are the potential areas of improvement in Nike's current business model?",
|
||||||
|
"dependencies": [
|
||||||
|
"1"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "7",
|
||||||
|
"query": "What are the potential new markets for Nike to explore in 2024?",
|
||||||
|
"dependencies": [
|
||||||
|
"2",
|
||||||
|
"4"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "8",
|
||||||
|
"query": "What are the potential new products or services Nike could introduce in 2024?",
|
||||||
|
"dependencies": [
|
||||||
|
"5"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "9",
|
||||||
|
"query": "What are the potential marketing strategies Nike could use to increase its revenue in Q3 2024?",
|
||||||
|
"dependencies": [
|
||||||
|
"3",
|
||||||
|
"5",
|
||||||
|
"7",
|
||||||
|
"8"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "10",
|
||||||
|
"query": "What are the potential cost-saving strategies Nike could implement to increase its net revenue in Q3 2024?",
|
||||||
|
"dependencies": [
|
||||||
|
"6"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The JSON dictionary is not convenient for humans to process. We make a directed graph out of it.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import networkx as nx
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import textwrap
|
||||||
|
import random
|
||||||
|
|
||||||
|
# Create a directed graph
|
||||||
|
G = nx.DiGraph()
|
||||||
|
|
||||||
|
# Define a color map
|
||||||
|
color_map = {}
|
||||||
|
|
||||||
|
# Add nodes and edges to the graph
|
||||||
|
for sub_query in json_object['sub_queries']:
|
||||||
|
# Check if 'dependencies' key exists in sub_query, if not, initialize it as an empty list
|
||||||
|
if 'dependencies' not in sub_query:
|
||||||
|
sub_query['dependencies'] = []
|
||||||
|
# Assign a random color for each node
|
||||||
|
color_map[sub_query['id']] = "#{:06x}".format(random.randint(0, 0xFFFFFF))
|
||||||
|
G.add_node(sub_query['id'], label=textwrap.fill(sub_query['query'], width=20))
|
||||||
|
for dependency in sub_query['dependencies']:
|
||||||
|
G.add_edge(dependency, sub_query['id'])
|
||||||
|
|
||||||
|
# Draw the graph
|
||||||
|
pos = nx.spring_layout(G)
|
||||||
|
nx.draw(G, pos, with_labels=True, node_size=800, node_color=[color_map[node] for node in G.nodes()], node_shape="o", alpha=0.5, linewidths=40)
|
||||||
|
|
||||||
|
# Prepare labels for legend
|
||||||
|
labels = nx.get_node_attributes(G, 'label')
|
||||||
|
handles = [plt.Line2D([0], [0], marker='o', color=color_map[node], label=f"{node}: {label}", markersize=10, linestyle='None') for node, label in labels.items()]
|
||||||
|
|
||||||
|
# Create a legend
|
||||||
|
plt.legend(handles=handles, title="Queries", bbox_to_anchor=(1.05, 1), loc='upper left')
|
||||||
|
|
||||||
|
plt.show()
|
||||||
|
```
|
||||||
|
|
||||||
|
This produces the below diagram which makes the plan much more convenient to understand.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
### Doing Background Research and Gathering Data
|
||||||
|
|
||||||
|
At this point, we have solved the first half of the problem. We have an outline consisting of sub-problems to to tackled to solve our business problem. This will form the overall structure of our report. We now need to research information for each sub-problem in order to write an informed report. This mechanically intensive and is the aspect that will most benefit from Agentic intervention.
|
||||||
|
|
||||||
|
Essentially, we can spawn parallel agents to gather the data. Each agent will have 2 tools:
|
||||||
|
|
||||||
|
- Internet access
|
||||||
|
- Financial data retrieval
|
||||||
|
|
||||||
|
As they run parallely, they will add their knowledge into a common long-term memory. We will then spawn a separate report writing agent with access to this memory to generate our business case report.
|
||||||
|
|
||||||
|
#### Step 4. Defining Tools for Worker Agents
|
||||||
|
|
||||||
|
Let us first define the 2 tools.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from typing import List, Dict
|
||||||
|
|
||||||
|
from swarms import tool
|
||||||
|
|
||||||
|
os.environ['TAVILY_API_KEY'] = os.getenv('TAVILY_API_KEY')
|
||||||
|
os.environ["KAY_API_KEY"] = os.getenv('KAY_API_KEY')
|
||||||
|
|
||||||
|
from langchain_community.tools.tavily_search import TavilySearchResults
|
||||||
|
from langchain_core.pydantic_v1 import BaseModel, Field
|
||||||
|
|
||||||
|
from kay.rag.retrievers import KayRetriever
|
||||||
|
|
||||||
|
def browser(query: str) -> str:
|
||||||
|
"""
|
||||||
|
Search the query in the browser with the Tavily API tool.
|
||||||
|
Args:
|
||||||
|
query (str): The query to search in the browser.
|
||||||
|
Returns:
|
||||||
|
str: The search results
|
||||||
|
"""
|
||||||
|
internet_search = TavilySearchResults()
|
||||||
|
results = internet_search.invoke({"query": query})
|
||||||
|
response = ''
|
||||||
|
for result in results:
|
||||||
|
response += (result['content'] + '\n')
|
||||||
|
return response
|
||||||
|
|
||||||
|
def kay_retriever(query: str) -> str:
|
||||||
|
"""
|
||||||
|
Search the financial data query with the KayAI API tool.
|
||||||
|
Args:
|
||||||
|
query (str): The query to search in the KayRetriever.
|
||||||
|
Returns:
|
||||||
|
str: The first context retrieved as a string.
|
||||||
|
"""
|
||||||
|
# Initialize the retriever
|
||||||
|
retriever = KayRetriever(dataset_id = "company", data_types=["10-K", "10-Q", "8-K", "PressRelease"])
|
||||||
|
# Query the retriever
|
||||||
|
context = retriever.query(query=query,num_context=1)
|
||||||
|
return context[0]['chunk_embed_text']
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 5. Defining Long-Term Memory
|
||||||
|
|
||||||
|
As mentioned previously, the worker agents running parallely, will pool their knowledge into a common memory. Let us define that.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import uuid
|
||||||
|
from typing import Callable, List, Optional
|
||||||
|
|
||||||
|
import chromadb
|
||||||
|
import numpy as np
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
from swarms.utils.data_to_text import data_to_text
|
||||||
|
from swarms.utils.markdown_message import display_markdown_message
|
||||||
|
from swarms_memory import AbstractVectorDatabase
|
||||||
|
|
||||||
|
|
||||||
|
# Results storage using local ChromaDB
|
||||||
|
class ChromaDB(AbstractVectorDatabase):
|
||||||
|
"""
|
||||||
|
|
||||||
|
ChromaDB database
|
||||||
|
|
||||||
|
Args:
|
||||||
|
metric (str): The similarity metric to use.
|
||||||
|
output (str): The name of the collection to store the results in.
|
||||||
|
limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000.
|
||||||
|
n_results (int, optional): The number of results to retrieve. Defaults to 2.
|
||||||
|
|
||||||
|
Methods:
|
||||||
|
add: _description_
|
||||||
|
query: _description_
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> chromadb = ChromaDB(
|
||||||
|
>>> metric="cosine",
|
||||||
|
>>> output="results",
|
||||||
|
>>> llm="gpt3",
|
||||||
|
>>> openai_api_key=OPENAI_API_KEY,
|
||||||
|
>>> )
|
||||||
|
>>> chromadb.add(task, result, result_id)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
metric: str = "cosine",
|
||||||
|
output_dir: str = "swarms",
|
||||||
|
limit_tokens: Optional[int] = 1000,
|
||||||
|
n_results: int = 3,
|
||||||
|
embedding_function: Callable = None,
|
||||||
|
docs_folder: str = None,
|
||||||
|
verbose: bool = False,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
self.metric = metric
|
||||||
|
self.output_dir = output_dir
|
||||||
|
self.limit_tokens = limit_tokens
|
||||||
|
self.n_results = n_results
|
||||||
|
self.docs_folder = docs_folder
|
||||||
|
self.verbose = verbose
|
||||||
|
|
||||||
|
# Disable ChromaDB logging
|
||||||
|
if verbose:
|
||||||
|
logging.getLogger("chromadb").setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# Create Chroma collection
|
||||||
|
chroma_persist_dir = "chroma"
|
||||||
|
chroma_client = chromadb.PersistentClient(
|
||||||
|
settings=chromadb.config.Settings(
|
||||||
|
persist_directory=chroma_persist_dir,
|
||||||
|
),
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Embedding model
|
||||||
|
if embedding_function:
|
||||||
|
self.embedding_function = embedding_function
|
||||||
|
else:
|
||||||
|
self.embedding_function = None
|
||||||
|
|
||||||
|
# Create ChromaDB client
|
||||||
|
self.client = chromadb.Client()
|
||||||
|
|
||||||
|
# Create Chroma collection
|
||||||
|
self.collection = chroma_client.get_or_create_collection(
|
||||||
|
name=output_dir,
|
||||||
|
metadata={"hnsw:space": metric},
|
||||||
|
embedding_function=self.embedding_function,
|
||||||
|
# data_loader=self.data_loader,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
display_markdown_message(
|
||||||
|
"ChromaDB collection created:"
|
||||||
|
f" {self.collection.name} with metric: {self.metric} and"
|
||||||
|
f" output directory: {self.output_dir}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# If docs
|
||||||
|
if docs_folder:
|
||||||
|
display_markdown_message(
|
||||||
|
f"Traversing directory: {docs_folder}"
|
||||||
|
)
|
||||||
|
self.traverse_directory()
|
||||||
|
|
||||||
|
def add(
|
||||||
|
self,
|
||||||
|
document: str,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Add a document to the ChromaDB collection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
document (str): The document to be added.
|
||||||
|
condition (bool, optional): The condition to check before adding the document. Defaults to True.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The ID of the added document.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
doc_id = str(uuid.uuid4())
|
||||||
|
self.collection.add(
|
||||||
|
ids=[doc_id],
|
||||||
|
documents=[document],
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
print('-----------------')
|
||||||
|
print("Document added successfully")
|
||||||
|
print('-----------------')
|
||||||
|
return doc_id
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"Failed to add document: {str(e)}")
|
||||||
|
|
||||||
|
def query(
|
||||||
|
self,
|
||||||
|
query_text: str,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Query documents from the ChromaDB collection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query (str): The query string.
|
||||||
|
n_docs (int, optional): The number of documents to retrieve. Defaults to 1.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: The retrieved documents.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
docs = self.collection.query(
|
||||||
|
query_texts=[query_text],
|
||||||
|
n_results=self.n_results,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)["documents"]
|
||||||
|
return docs[0]
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"Failed to query documents: {str(e)}")
|
||||||
|
|
||||||
|
def traverse_directory(self):
|
||||||
|
"""
|
||||||
|
Traverse through every file in the given directory and its subdirectories,
|
||||||
|
and return the paths of all files.
|
||||||
|
Parameters:
|
||||||
|
- directory_name (str): The name of the directory to traverse.
|
||||||
|
Returns:
|
||||||
|
- list: A list of paths to each file in the directory and its subdirectories.
|
||||||
|
"""
|
||||||
|
added_to_db = False
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(self.docs_folder):
|
||||||
|
for file in files:
|
||||||
|
file = os.path.join(self.docs_folder, file)
|
||||||
|
_, ext = os.path.splitext(file)
|
||||||
|
data = data_to_text(file)
|
||||||
|
added_to_db = self.add([data])
|
||||||
|
print(f"{file} added to Database")
|
||||||
|
|
||||||
|
return added_to_db
|
||||||
|
```
|
||||||
|
|
||||||
|
We can now proceed to initialize the memory.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from chromadb.utils import embedding_functions
|
||||||
|
default_ef = embedding_functions.DefaultEmbeddingFunction()
|
||||||
|
|
||||||
|
memory = ChromaDB(
|
||||||
|
metric="cosine",
|
||||||
|
n_results=3,
|
||||||
|
output_dir="results",
|
||||||
|
embedding_function=default_ef
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 6. Defining Worker Agents
|
||||||
|
|
||||||
|
The Worker Agent sub-classes the `Agent` class. The only different between these 2 is in how the `run()` method works. In the `Agent` class, `run()` simply returns the set of tool commands to run, but does not execute it. We, however, desire this. In addition, after we run our tools, we get the relevant information as output. We want to add this information to our memory. Hence, to incorporate these 2 changes, we define `WorkerAgent` as follows.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class WorkerAgent(Agent):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def run(self, task, *args, **kwargs):
|
||||||
|
response = super().run(task, *args, **kwargs)
|
||||||
|
print(response.content)
|
||||||
|
|
||||||
|
json_dict = json.loads(process_json_output(response.content))
|
||||||
|
|
||||||
|
#print(json.dumps(json_dict, indent=2))
|
||||||
|
|
||||||
|
if response!=None:
|
||||||
|
try:
|
||||||
|
commands = json_dict["commands"]
|
||||||
|
except:
|
||||||
|
commands = [json_dict['command']]
|
||||||
|
|
||||||
|
for command in commands:
|
||||||
|
tool_name = command["name"]
|
||||||
|
|
||||||
|
if tool_name not in ['browser', 'kay_retriever']:
|
||||||
|
continue
|
||||||
|
|
||||||
|
query = command["args"]["query"]
|
||||||
|
|
||||||
|
# Get the tool by its name
|
||||||
|
tool = globals()[tool_name]
|
||||||
|
tool_response = tool(query)
|
||||||
|
|
||||||
|
# Add tool's output to long term memory
|
||||||
|
self.long_term_memory.add(tool_response)
|
||||||
|
```
|
||||||
|
|
||||||
|
We can then instantiate an object of the `WorkerAgent` class.
|
||||||
|
|
||||||
|
```python
|
||||||
|
worker_agent = WorkerAgent(
|
||||||
|
agent_name="Worker Agent",
|
||||||
|
system_prompt=(
|
||||||
|
"Autonomous agent that can interact with browser, "
|
||||||
|
"financial data retriever and other agents. Be Helpful "
|
||||||
|
"and Kind. Use the tools provided to assist the user. "
|
||||||
|
"Generate the plan with list of commands in JSON format."
|
||||||
|
),
|
||||||
|
llm=OpenAIChat(
|
||||||
|
temperature=0.0, model_name="gpt-4", max_tokens=4000
|
||||||
|
),
|
||||||
|
max_loops="auto",
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
stopping_token="<DONE>",
|
||||||
|
interactive=True,
|
||||||
|
tools=[browser, kay_retriever],
|
||||||
|
long_term_memory=memory,
|
||||||
|
code_interpreter=True,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 7. Running the Worker Agents
|
||||||
|
|
||||||
|
At this point, we need to setup a concurrent workflow. While the order of adding tasks to the workflow doesn't matter (since they will all run concurrently late when executed), we can take some time to define an order for these tasks. This order will come in handy later when writing the report using our Writer Agent.
|
||||||
|
|
||||||
|
The order we will follow is Breadth First Traversal (BFT) of the sub-queries in the graph we had made earlier (shown below again for reference). BFT makes sense to be used here because we want all the dependent parent questions to be answered before answering the child question. Also, since we could have independent subgraphs, we will also perform BFT separately on each subgraph.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
Below is the code that produces the order of processing sub-queries.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from collections import deque, defaultdict
|
||||||
|
|
||||||
|
# Define the graph nodes
|
||||||
|
nodes = json_object['sub_queries']
|
||||||
|
|
||||||
|
# Create a graph from the nodes
|
||||||
|
graph = defaultdict(list)
|
||||||
|
for node in nodes:
|
||||||
|
for dependency in node['dependencies']:
|
||||||
|
graph[dependency].append(node['id'])
|
||||||
|
|
||||||
|
# Find all nodes with no dependencies (potential starting points)
|
||||||
|
start_nodes = [node['id'] for node in nodes if not node['dependencies']]
|
||||||
|
|
||||||
|
# Adjust the BFT function to handle dependencies correctly
|
||||||
|
def bft_corrected(start, graph, nodes_info):
|
||||||
|
visited = set()
|
||||||
|
queue = deque([start])
|
||||||
|
order = []
|
||||||
|
|
||||||
|
while queue:
|
||||||
|
node = queue.popleft()
|
||||||
|
if node not in visited:
|
||||||
|
# Check if all dependencies of the current node are visited
|
||||||
|
node_dependencies = [n['id'] for n in nodes if n['id'] == node][0]
|
||||||
|
dependencies_met = all(dep in visited for dep in nodes_info[node_dependencies]['dependencies'])
|
||||||
|
|
||||||
|
if dependencies_met:
|
||||||
|
visited.add(node)
|
||||||
|
order.append(node)
|
||||||
|
# Add only nodes to the queue whose dependencies are fully met
|
||||||
|
for next_node in graph[node]:
|
||||||
|
if all(dep in visited for dep in nodes_info[next_node]['dependencies']):
|
||||||
|
queue.append(next_node)
|
||||||
|
else:
|
||||||
|
# Requeue the node to check dependencies later
|
||||||
|
queue.append(node)
|
||||||
|
|
||||||
|
return order
|
||||||
|
|
||||||
|
# Dictionary to access node information quickly
|
||||||
|
nodes_info = {node['id']: node for node in nodes}
|
||||||
|
|
||||||
|
# Perform BFT for each unvisited start node using the corrected BFS function
|
||||||
|
visited_global = set()
|
||||||
|
bfs_order = []
|
||||||
|
|
||||||
|
for start in start_nodes:
|
||||||
|
if start not in visited_global:
|
||||||
|
order = bft_corrected(start, graph, nodes_info)
|
||||||
|
bfs_order.extend(order)
|
||||||
|
visited_global.update(order)
|
||||||
|
|
||||||
|
print("BFT Order:", bfs_order)
|
||||||
|
```
|
||||||
|
|
||||||
|
This produces the following output.
|
||||||
|
|
||||||
|
```python
|
||||||
|
BFT Order: ['1', '6', '10', '2', '3', '4', '5', '7', '8', '9']
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, let's define our `ConcurrentWorkflow` and run it.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms import Agent, ConcurrentWorkflow, OpenAIChat, Task
|
||||||
|
|
||||||
|
# Create a workflow
|
||||||
|
workflow = ConcurrentWorkflow(max_workers=5)
|
||||||
|
task_list = []
|
||||||
|
|
||||||
|
for node in bfs_order:
|
||||||
|
sub_query =nodes_info[node]['query']
|
||||||
|
task = Task(worker_agent, sub_query)
|
||||||
|
print('-----------------')
|
||||||
|
print("Added task: ", sub_query)
|
||||||
|
print('-----------------')
|
||||||
|
task_list.append(task)
|
||||||
|
|
||||||
|
workflow.add(tasks=task_list)
|
||||||
|
|
||||||
|
# Run the workflow
|
||||||
|
workflow.run()
|
||||||
|
```
|
||||||
|
|
||||||
|
Below is part of the output this workflow produces. We clearly see the thought process of the agent and the plan it came up to solve a particular sub-query. In addition, we see the tool-calling schema it produces in `"command"`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
...
|
||||||
|
...
|
||||||
|
content='\n{\n "thoughts": {\n "text": "To find out Nike\'s current revenue trend, I will use the financial data retriever tool to search for \'Nike revenue trend\'.",\n "reasoning": "The financial data retriever tool allows me to search for specific financial data, so I can look up the current revenue trend of Nike.", \n "plan": "Use the financial data retriever tool to search for \'Nike revenue trend\'. Parse the result to get the current revenue trend and format that into a readable report."\n },\n "command": {\n "name": "kay_retriever", \n "args": {\n "query": "Nike revenue trend"\n }\n }\n}\n```' response_metadata={'token_usage': {'completion_tokens': 152, 'prompt_tokens': 1527, 'total_tokens': 1679}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}
|
||||||
|
Saved agent state to: Worker Agent_state.json
|
||||||
|
|
||||||
|
{
|
||||||
|
"thoughts": {
|
||||||
|
"text": "To find out Nike's current revenue trend, I will use the financial data retriever tool to search for 'Nike revenue trend'.",
|
||||||
|
"reasoning": "The financial data retriever tool allows me to search for specific financial data, so I can look up the current revenue trend of Nike.",
|
||||||
|
"plan": "Use the financial data retriever tool to search for 'Nike revenue trend'. Parse the result to get the current revenue trend and format that into a readable report."
|
||||||
|
},
|
||||||
|
"command": {
|
||||||
|
"name": "kay_retriever",
|
||||||
|
"args": {
|
||||||
|
"query": "Nike revenue trend"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
-----------------
|
||||||
|
Document added successfully
|
||||||
|
-----------------
|
||||||
|
...
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
Here, `"name"` pertains to the name of the tool to be called and `"args"` is the arguments to be passed to the tool call. Like mentioned before, we modify `Agent`'s default behaviour in `WorkerAgent`. Hence, the tool call is executed here and its results (information from web pages and Kay Retriever API) are added to long-term memory. We get confirmation for this from the message `Document added successfully`.
|
||||||
|
|
||||||
|
|
||||||
|
#### Step 7. Generating the report using Writer Agent
|
||||||
|
|
||||||
|
At this point, our Worker Agents have gathered all the background information required to generate the report. We have also defined a coherent structure to write the report, which is following the BFT order to answering the sub-queries. Now it's time to define a Writer Agent and call it sequentially in the order of sub-queries.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from swarms import Agent, OpenAIChat, tool
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Writer Agent",
|
||||||
|
agent_description=(
|
||||||
|
"This agent writes reports based on information in long-term memory"
|
||||||
|
),
|
||||||
|
system_prompt=(
|
||||||
|
"You are a world-class financial report writer. "
|
||||||
|
"Write analytical and accurate responses using memory to answer the query. "
|
||||||
|
"Do not mention use of long-term memory in the report. "
|
||||||
|
"Do not mention Writer Agent in response."
|
||||||
|
"Return only response content in strict markdown format."
|
||||||
|
),
|
||||||
|
llm=OpenAIChat(temperature=0.2, model='gpt-3.5-turbo'),
|
||||||
|
max_loops=1,
|
||||||
|
autosave=True,
|
||||||
|
verbose=True,
|
||||||
|
long_term_memory=memory,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
The report individual sections of the report will be collected in a list.
|
||||||
|
|
||||||
|
```python
|
||||||
|
report = []
|
||||||
|
```
|
||||||
|
|
||||||
|
Let us now run the writer agent.
|
||||||
|
|
||||||
|
```python
|
||||||
|
for node in bfs_order:
|
||||||
|
sub_query =nodes_info[node]['query']
|
||||||
|
print("Running task: ", sub_query)
|
||||||
|
out = agent.run(f"Consider: {sub_query}. Write response in strict markdown format using long-term memory. Do not mention Writer Agent in response.")
|
||||||
|
print(out)
|
||||||
|
try:
|
||||||
|
report.append(out.content)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, we need to clean up the repoort a bit to make it render professionally.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Remove any content before the first "#" as that signals start of heading
|
||||||
|
# Anything before this usually contains filler content
|
||||||
|
stripped_report = [entry[entry.find('#'):] if '#' in entry else entry for entry in report]
|
||||||
|
report = stripped_report
|
||||||
|
|
||||||
|
# At times the LLM outputs \\n instead of \n
|
||||||
|
cleaned_report = [entry.replace("\\n", "\n") for entry in report]
|
||||||
|
import re
|
||||||
|
|
||||||
|
# Function to clean up unnecessary metadata from the report entries
|
||||||
|
def clean_report(report):
|
||||||
|
cleaned_report = []
|
||||||
|
for entry in report:
|
||||||
|
# This pattern matches 'response_metadata={' followed by any characters that are not '}' (non-greedy),
|
||||||
|
# possibly nested inside other braces, until the closing '}'.
|
||||||
|
cleaned_entry = re.sub(r"response_metadata=\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}", "", entry, flags=re.DOTALL)
|
||||||
|
cleaned_report.append(cleaned_entry)
|
||||||
|
return cleaned_report
|
||||||
|
|
||||||
|
# Apply the cleaning function to the markdown report
|
||||||
|
cleaned_report = clean_report(cleaned_report)
|
||||||
|
```
|
||||||
|
|
||||||
|
After cleaning, we append parts of the report together to get out final report.
|
||||||
|
|
||||||
|
```python
|
||||||
|
final_report = ' \n '.join(cleaned_report)
|
||||||
|
```
|
||||||
|
|
||||||
|
In Jupyter Notebook, we can use the below code to render it in Markdown.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from IPython.display import display, Markdown
|
||||||
|
|
||||||
|
display(Markdown(final_report))
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Final Generated Report
|
||||||
|
|
||||||
|
|
||||||
|
### Nike's Current Revenue Trend
|
||||||
|
|
||||||
|
Nike's current revenue trend has been steadily increasing over the past few years. In the most recent fiscal year, Nike reported a revenue of $37.4 billion, which was a 7% increase from the previous year. This growth can be attributed to strong sales in key markets, successful marketing campaigns, and a focus on innovation in product development. Overall, Nike continues to demonstrate strong financial performance and is well-positioned for future growth.
|
||||||
|
### Potential Areas of Improvement in Nike's Business Model
|
||||||
|
|
||||||
|
1. **Sustainability Practices**: Nike could further enhance its sustainability efforts by reducing its carbon footprint, using more eco-friendly materials, and ensuring ethical labor practices throughout its supply chain.
|
||||||
|
|
||||||
|
2. **Diversification of Product Portfolio**: While Nike is known for its athletic footwear and apparel, diversifying into new product categories or expanding into untapped markets could help drive growth and mitigate risks associated with a single product line.
|
||||||
|
|
||||||
|
3. **E-commerce Strategy**: Improving the online shopping experience, investing in digital marketing, and leveraging data analytics to personalize customer interactions could boost online sales and customer loyalty.
|
||||||
|
|
||||||
|
4. **Innovation and R&D**: Continuously investing in research and development to stay ahead of competitors, introduce new technologies, and enhance product performance could help maintain Nike's competitive edge in the market.
|
||||||
|
|
||||||
|
5. **Brand Image and Reputation**: Strengthening brand image through effective marketing campaigns, community engagement, and transparent communication with stakeholders can help build trust and loyalty among consumers.
|
||||||
|
### Potential Cost-Saving Strategies for Nike to Increase Net Revenue in Q3 2024
|
||||||
|
|
||||||
|
1. **Supply Chain Optimization**: Streamlining the supply chain, reducing transportation costs, and improving inventory management can lead to significant cost savings for Nike.
|
||||||
|
|
||||||
|
2. **Operational Efficiency**: Implementing lean manufacturing practices, reducing waste, and optimizing production processes can help lower production costs and improve overall efficiency.
|
||||||
|
|
||||||
|
3. **Outsourcing Non-Core Functions**: Outsourcing non-core functions such as IT services, customer support, or logistics can help reduce overhead costs and focus resources on core business activities.
|
||||||
|
|
||||||
|
4. **Energy Efficiency**: Investing in energy-efficient technologies, renewable energy sources, and sustainable practices can lower utility costs and demonstrate a commitment to environmental responsibility.
|
||||||
|
|
||||||
|
5. **Negotiating Supplier Contracts**: Negotiating better terms with suppliers, leveraging economies of scale, and exploring alternative sourcing options can help lower procurement costs and improve margins.
|
||||||
|
|
||||||
|
By implementing these cost-saving strategies, Nike can improve its bottom line and increase net revenue in Q3 2024.
|
||||||
|
### Projected Market Trends for the Sports Apparel Industry in 2024
|
||||||
|
|
||||||
|
1. **Sustainable Fashion**: Consumers are increasingly demanding eco-friendly and sustainable products, leading to a rise in sustainable sportswear options in the market.
|
||||||
|
|
||||||
|
2. **Digital Transformation**: The sports apparel industry is expected to continue its shift towards digital platforms, with a focus on e-commerce, personalized shopping experiences, and digital marketing strategies.
|
||||||
|
|
||||||
|
3. **Athleisure Wear**: The trend of athleisure wear, which combines athletic and leisure clothing, is projected to remain popular in 2024 as consumers seek comfort and versatility in their apparel choices.
|
||||||
|
|
||||||
|
4. **Innovative Materials**: Advances in technology and material science are likely to drive the development of innovative fabrics and performance-enhancing materials in sports apparel, catering to the demand for high-quality and functional products.
|
||||||
|
|
||||||
|
5. **Health and Wellness Focus**: With a growing emphasis on health and wellness, sports apparel brands are expected to incorporate features that promote comfort, performance, and overall well-being in their products.
|
||||||
|
|
||||||
|
Overall, the sports apparel industry in 2024 is anticipated to be characterized by sustainability, digitalization, innovation, and a focus on consumer health and lifestyle trends.
|
||||||
|
### Current Successful Strategies Used by Nike's Competitors
|
||||||
|
|
||||||
|
1. **Adidas**: Adidas has been successful in leveraging collaborations with celebrities and designers to create limited-edition collections that generate hype and drive sales. They have also focused on sustainability initiatives, such as using recycled materials in their products, to appeal to environmentally conscious consumers.
|
||||||
|
|
||||||
|
2. **Under Armour**: Under Armour has differentiated itself by targeting performance-driven athletes and emphasizing technological innovation in their products. They have also invested heavily in digital marketing and e-commerce to reach a wider audience and enhance the customer shopping experience.
|
||||||
|
|
||||||
|
3. **Puma**: Puma has successfully capitalized on the athleisure trend by offering stylish and versatile sportswear that can be worn both in and out of the gym. They have also focused on building partnerships with influencers and sponsoring high-profile athletes to increase brand visibility and credibility.
|
||||||
|
|
||||||
|
4. **Lululemon**: Lululemon has excelled in creating a strong community around its brand, hosting events, classes, and collaborations to engage with customers beyond just selling products. They have also prioritized customer experience by offering personalized services and creating a seamless omnichannel shopping experience.
|
||||||
|
|
||||||
|
5. **New Balance**: New Balance has carved out a niche in the market by emphasizing quality craftsmanship, heritage, and authenticity in their products. They have also focused on customization and personalization options for customers, allowing them to create unique and tailored footwear and apparel.
|
||||||
|
|
||||||
|
Overall, Nike's competitors have found success through a combination of innovative product offerings, strategic marketing initiatives, and a focus on customer engagement and experience.
|
||||||
|
### Current and Projected Economic Conditions in Nike's Major Markets
|
||||||
|
|
||||||
|
1. **United States**: The United States, being one of Nike's largest markets, is currently experiencing moderate economic growth driven by consumer spending, low unemployment rates, and a rebound in manufacturing. However, uncertainties surrounding trade policies, inflation, and interest rates could impact consumer confidence and spending in the near future.
|
||||||
|
|
||||||
|
2. **China**: China remains a key market for Nike, with a growing middle class and increasing demand for sportswear and athletic footwear. Despite recent trade tensions with the U.S., China's economy is projected to continue expanding, driven by domestic consumption, infrastructure investments, and technological advancements.
|
||||||
|
|
||||||
|
3. **Europe**: Economic conditions in Europe vary across countries, with some experiencing sluggish growth due to Brexit uncertainties, political instability, and trade tensions. However, overall consumer confidence is improving, and the sports apparel market is expected to grow, driven by e-commerce and sustainability trends.
|
||||||
|
|
||||||
|
4. **Emerging Markets**: Nike's presence in emerging markets such as India, Brazil, and Southeast Asia provides opportunities for growth, given the rising disposable incomes, urbanization, and increasing focus on health and fitness. However, challenges such as currency fluctuations, regulatory changes, and competition from local brands could impact Nike's performance in these markets.
|
||||||
|
|
||||||
|
Overall, Nike's major markets exhibit a mix of opportunities and challenges, with economic conditions influenced by global trends, geopolitical factors, and consumer preferences."
|
||||||
|
### Current Consumer Preferences in the Sports Apparel Industry
|
||||||
|
|
||||||
|
1. **Sustainability**: Consumers are increasingly seeking eco-friendly and sustainable options in sports apparel, driving brands to focus on using recycled materials, reducing waste, and promoting ethical practices.
|
||||||
|
|
||||||
|
2. **Athleisure**: The trend of athleisure wear continues to be popular, with consumers looking for versatile and comfortable clothing that can be worn both during workouts and in everyday life.
|
||||||
|
|
||||||
|
3. **Performance and Functionality**: Consumers prioritize performance-enhancing features in sports apparel, such as moisture-wicking fabrics, breathable materials, and ergonomic designs that enhance comfort and mobility.
|
||||||
|
|
||||||
|
4. **Personalization**: Customization options, personalized fit, and unique design elements are appealing to consumers who seek individuality and exclusivity in their sports apparel choices.
|
||||||
|
|
||||||
|
5. **Brand Transparency**: Consumers value transparency in brand practices, including supply chain transparency, ethical sourcing, and clear communication on product quality and manufacturing processes.
|
||||||
|
|
||||||
|
Overall, consumer preferences in the sports apparel industry are shifting towards sustainability, versatility, performance, personalization, and transparency, influencing brand strategies and product offerings.
|
||||||
|
### Potential New Markets for Nike to Explore in 2024
|
||||||
|
|
||||||
|
1. **India**: With a growing population, increasing disposable incomes, and a rising interest in health and fitness, India presents a significant opportunity for Nike to expand its presence and tap into a large consumer base.
|
||||||
|
|
||||||
|
2. **Africa**: The African market, particularly countries with emerging economies and a young population, offers potential for Nike to introduce its products and capitalize on the growing demand for sportswear and athletic footwear.
|
||||||
|
|
||||||
|
3. **Middle East**: Countries in the Middle East, known for their luxury shopping destinations and a growing interest in sports and fitness activities, could be strategic markets for Nike to target and establish a strong foothold.
|
||||||
|
|
||||||
|
4. **Latin America**: Markets in Latin America, such as Brazil, Mexico, and Argentina, present opportunities for Nike to cater to a diverse consumer base and leverage the region's passion for sports and active lifestyles.
|
||||||
|
|
||||||
|
5. **Southeast Asia**: Rapid urbanization, increasing urban middle-class population, and a trend towards health and wellness in countries like Indonesia, Thailand, and Vietnam make Southeast Asia an attractive region for Nike to explore and expand its market reach.
|
||||||
|
|
||||||
|
By exploring these new markets in 2024, Nike can diversify its geographical presence, reach untapped consumer segments, and drive growth in emerging economies.
|
||||||
|
### Potential New Products or Services Nike Could Introduce in 2024
|
||||||
|
|
||||||
|
1. **Smart Apparel**: Nike could explore the integration of technology into its apparel, such as smart fabrics that monitor performance metrics, provide feedback, or enhance comfort during workouts.
|
||||||
|
|
||||||
|
2. **Athletic Accessories**: Introducing a line of athletic accessories like gym bags, water bottles, or fitness trackers could complement Nike's existing product offerings and provide additional value to customers.
|
||||||
|
|
||||||
|
3. **Customization Platforms**: Offering personalized design options for footwear and apparel through online customization platforms could appeal to consumers seeking unique and tailored products.
|
||||||
|
|
||||||
|
4. **Athletic Recovery Gear**: Developing recovery-focused products like compression wear, recovery sandals, or massage tools could cater to athletes and fitness enthusiasts looking to enhance post-workout recovery.
|
||||||
|
|
||||||
|
5. **Sustainable Collections**: Launching sustainable collections made from eco-friendly materials, recycled fabrics, or biodegradable components could align with consumer preferences for environmentally conscious products.
|
||||||
|
|
||||||
|
By introducing these new products or services in 2024, Nike can innovate its product portfolio, cater to evolving consumer needs, and differentiate itself in the competitive sports apparel market.
|
||||||
|
### Potential Marketing Strategies for Nike to Increase Revenue in Q3 2024
|
||||||
|
|
||||||
|
1. **Influencer Partnerships**: Collaborating with popular athletes, celebrities, or social media influencers to promote Nike products can help reach a wider audience and drive sales.
|
||||||
|
|
||||||
|
2. **Interactive Campaigns**: Launching interactive marketing campaigns, contests, or events that engage customers and create buzz around new product releases can generate excitement and increase brand visibility.
|
||||||
|
|
||||||
|
3. **Social Media Engagement**: Leveraging social media platforms to connect with consumers, share user-generated content, and respond to feedback can build brand loyalty and encourage repeat purchases.
|
||||||
|
|
||||||
|
4. **Localized Marketing**: Tailoring marketing messages, promotions, and product offerings to specific regions or target demographics can enhance relevance and appeal to diverse consumer groups.
|
||||||
|
|
||||||
|
5. **Customer Loyalty Programs**: Implementing loyalty programs, exclusive offers, or rewards for repeat customers can incentivize brand loyalty, increase retention rates, and drive higher lifetime customer value.
|
||||||
|
|
||||||
|
By employing these marketing strategies in Q3 2024, Nike can enhance its brand presence, attract new customers, and ultimately boost revenue growth.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,42 @@
|
|||||||
|
## **Applications of Swarms: Revolutionizing Customer Support**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Introduction**:
|
||||||
|
In today's fast-paced digital world, responsive and efficient customer support is a linchpin for business success. The introduction of AI-driven swarms in the customer support domain can transform the way businesses interact with and assist their customers. By leveraging the combined power of multiple AI agents working in concert, businesses can achieve unprecedented levels of efficiency, customer satisfaction, and operational cost savings.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **The Benefits of Using Swarms for Customer Support:**
|
||||||
|
|
||||||
|
1. **24/7 Availability**: Swarms never sleep. Customers receive instantaneous support at any hour, ensuring constant satisfaction and loyalty.
|
||||||
|
|
||||||
|
2. **Infinite Scalability**: Whether it's ten inquiries or ten thousand, swarms can handle fluctuating volumes with ease, eliminating the need for vast human teams and minimizing response times.
|
||||||
|
|
||||||
|
3. **Adaptive Intelligence**: Swarms learn collectively, meaning that a solution found for one customer can be instantly applied to benefit all. This leads to constantly improving support experiences, evolving with every interaction.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Features - Reinventing Customer Support**:
|
||||||
|
|
||||||
|
- **AI Inbox Monitor**: Continuously scans email inboxes, identifying and categorizing support requests for swift responses.
|
||||||
|
|
||||||
|
- **Intelligent Debugging**: Proactively helps customers by diagnosing and troubleshooting underlying issues.
|
||||||
|
|
||||||
|
- **Automated Refunds & Coupons**: Seamless integration with payment systems like Stripe allows for instant issuance of refunds or coupons if a problem remains unresolved.
|
||||||
|
|
||||||
|
- **Full System Integration**: Holistically connects with CRM, email systems, and payment portals, ensuring a cohesive and unified support experience.
|
||||||
|
|
||||||
|
- **Conversational Excellence**: With advanced LLMs (Language Model Transformers), the swarm agents can engage in natural, human-like conversations, enhancing customer comfort and trust.
|
||||||
|
|
||||||
|
- **Rule-based Operation**: By working with rule engines, swarms ensure that all actions adhere to company guidelines, ensuring consistent, error-free support.
|
||||||
|
|
||||||
|
- **Turing Test Ready**: Crafted to meet and exceed the Turing Test standards, ensuring that every customer interaction feels genuine and personal.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Conclusion**:
|
||||||
|
Swarms are not just another technological advancement; they represent the future of customer support. Their ability to provide round-the-clock, scalable, and continuously improving support can redefine customer experience standards. By adopting swarms, businesses can stay ahead of the curve, ensuring unparalleled customer loyalty and satisfaction.
|
||||||
|
|
||||||
|
**Experience the future of customer support. Dive into the swarm revolution.**
|
||||||
|
|
@ -0,0 +1,105 @@
|
|||||||
|
## Usage Documentation: Discord Bot with Advanced Features
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Overview:
|
||||||
|
|
||||||
|
This code provides a structure for a Discord bot with advanced features such as voice channel interactions, image generation, and text-based interactions using OpenAI models.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Setup:
|
||||||
|
|
||||||
|
1. Ensure that the necessary libraries are installed:
|
||||||
|
```bash
|
||||||
|
pip install discord.py python-dotenv dalle3 invoke openai
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create a `.env` file in the same directory as your bot script and add the following:
|
||||||
|
```
|
||||||
|
DISCORD_TOKEN=your_discord_bot_token
|
||||||
|
STORAGE_SERVICE=your_storage_service_endpoint
|
||||||
|
SAVE_DIRECTORY=path_to_save_generated_images
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Bot Class and its Methods:
|
||||||
|
|
||||||
|
#### `__init__(self, agent, llm, command_prefix="!")`:
|
||||||
|
|
||||||
|
Initializes the bot with the given agent, language model (`llm`), and a command prefix (default is `!`).
|
||||||
|
|
||||||
|
#### `add_command(self, name, func)`:
|
||||||
|
|
||||||
|
Allows you to dynamically add new commands to the bot. The `name` is the command's name and `func` is the function to execute when the command is called.
|
||||||
|
|
||||||
|
#### `run(self)`:
|
||||||
|
|
||||||
|
Starts the bot using the `DISCORD_TOKEN` from the `.env` file.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Commands:
|
||||||
|
|
||||||
|
1. **!greet**: Greets the user.
|
||||||
|
|
||||||
|
2. **!help_me**: Provides a list of commands and their descriptions.
|
||||||
|
|
||||||
|
3. **!join**: Joins the voice channel the user is in.
|
||||||
|
|
||||||
|
4. **!leave**: Leaves the voice channel the bot is currently in.
|
||||||
|
|
||||||
|
5. **!listen**: Starts listening to voice in the current voice channel and records the audio.
|
||||||
|
|
||||||
|
6. **!generate_image [prompt]**: Generates images based on the provided prompt using the DALL-E3 model.
|
||||||
|
|
||||||
|
7. **!send_text [text] [use_agent=True]**: Sends the provided text to the worker (either the agent or the LLM) and returns the response.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Usage:
|
||||||
|
|
||||||
|
Initialize the `llm` (Language Learning Model) with your OpenAI API key:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
|
||||||
|
llm = OpenAIChat(
|
||||||
|
openai_api_key="Your_OpenAI_API_Key",
|
||||||
|
temperature=0.5,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Initialize the bot with the `llm`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from apps.discord import Bot
|
||||||
|
|
||||||
|
bot = Bot(llm=llm)
|
||||||
|
```
|
||||||
|
|
||||||
|
Send a task to the bot:
|
||||||
|
|
||||||
|
```python
|
||||||
|
task = "What were the winning Boston Marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."
|
||||||
|
bot.send_text(task)
|
||||||
|
```
|
||||||
|
|
||||||
|
Start the bot:
|
||||||
|
|
||||||
|
```python
|
||||||
|
bot.run()
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Additional Notes:
|
||||||
|
|
||||||
|
- The bot makes use of the `dalle3` library for image generation. Ensure you have the model and necessary setup for it.
|
||||||
|
|
||||||
|
- For the storage service, you might want to integrate with a cloud service like Google Cloud Storage or AWS S3 to store and retrieve generated images. The given code assumes a method `.upload()` for the storage service to upload files.
|
||||||
|
|
||||||
|
- Ensure that you've granted the bot necessary permissions on Discord, especially if you want to use voice channel features.
|
||||||
|
|
||||||
|
- Handle API keys and tokens securely. Avoid hardcoding them directly into your code. Use environment variables or secure secret management tools.
|
@ -0,0 +1,27 @@
|
|||||||
|
/* * Further customization as needed */ */
|
||||||
|
|
||||||
|
.md-typeset__table {
|
||||||
|
min-width: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.md-typeset table:not([class]) {
|
||||||
|
display: table;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Dark mode
|
||||||
|
[data-md-color-scheme="slate"] {
|
||||||
|
--md-default-bg-color: black;
|
||||||
|
}
|
||||||
|
|
||||||
|
.header__ellipsis {
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
|
|
||||||
|
.md-copyright__highlight {
|
||||||
|
color: black;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.md-header.md-header--shadow {
|
||||||
|
color: black;
|
||||||
|
} */
|
After Width: | Height: | Size: 200 KiB |
After Width: | Height: | Size: 89 KiB |
After Width: | Height: | Size: 122 KiB |
After Width: | Height: | Size: 390 KiB |
After Width: | Height: | Size: 40 KiB |
After Width: | Height: | Size: 283 KiB |
After Width: | Height: | Size: 184 KiB |
After Width: | Height: | Size: 235 KiB |
After Width: | Height: | Size: 148 KiB |
@ -0,0 +1,334 @@
|
|||||||
|
# ClusterOps API Reference
|
||||||
|
|
||||||
|
ClusterOps is a Python library for managing and executing tasks across CPU and GPU resources in a distributed computing environment. It provides functions for resource discovery, task execution, and performance monitoring.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
|
||||||
|
$ pip3 install clusterops
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
1. [CPU Operations](#cpu-operations)
|
||||||
|
2. [GPU Operations](#gpu-operations)
|
||||||
|
3. [Utility Functions](#utility-functions)
|
||||||
|
4. [Resource Monitoring](#resource-monitoring)
|
||||||
|
|
||||||
|
## CPU Operations
|
||||||
|
|
||||||
|
### `list_available_cpus()`
|
||||||
|
|
||||||
|
Lists all available CPU cores.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `List[int]` | A list of available CPU core indices. |
|
||||||
|
|
||||||
|
#### Raises
|
||||||
|
| Exception | Description |
|
||||||
|
|-----------|-------------|
|
||||||
|
| `RuntimeError` | If no CPUs are found. |
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import list_available_cpus
|
||||||
|
|
||||||
|
available_cpus = list_available_cpus()
|
||||||
|
print(f"Available CPU cores: {available_cpus}")
|
||||||
|
```
|
||||||
|
|
||||||
|
### `execute_on_cpu(cpu_id: int, func: Callable, *args: Any, **kwargs: Any) -> Any`
|
||||||
|
|
||||||
|
Executes a callable on a specific CPU.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
| Name | Type | Description |
|
||||||
|
|------|------|-------------|
|
||||||
|
| `cpu_id` | `int` | The CPU core to run the function on. |
|
||||||
|
| `func` | `Callable` | The function to be executed. |
|
||||||
|
| `*args` | `Any` | Arguments for the callable. |
|
||||||
|
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `Any` | The result of the function execution. |
|
||||||
|
|
||||||
|
#### Raises
|
||||||
|
| Exception | Description |
|
||||||
|
|-----------|-------------|
|
||||||
|
| `ValueError` | If the CPU core specified is invalid. |
|
||||||
|
| `RuntimeError` | If there is an error executing the function on the CPU. |
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import execute_on_cpu
|
||||||
|
|
||||||
|
def sample_task(n: int) -> int:
|
||||||
|
return n * n
|
||||||
|
|
||||||
|
result = execute_on_cpu(0, sample_task, 10)
|
||||||
|
print(f"Result of sample task on CPU 0: {result}")
|
||||||
|
```
|
||||||
|
|
||||||
|
### `execute_with_cpu_cores(core_count: int, func: Callable, *args: Any, **kwargs: Any) -> Any`
|
||||||
|
|
||||||
|
Executes a callable using a specified number of CPU cores.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
| Name | Type | Description |
|
||||||
|
|------|------|-------------|
|
||||||
|
| `core_count` | `int` | The number of CPU cores to run the function on. |
|
||||||
|
| `func` | `Callable` | The function to be executed. |
|
||||||
|
| `*args` | `Any` | Arguments for the callable. |
|
||||||
|
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `Any` | The result of the function execution. |
|
||||||
|
|
||||||
|
#### Raises
|
||||||
|
| Exception | Description |
|
||||||
|
|-----------|-------------|
|
||||||
|
| `ValueError` | If the number of CPU cores specified is invalid or exceeds available cores. |
|
||||||
|
| `RuntimeError` | If there is an error executing the function on the specified CPU cores. |
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import execute_with_cpu_cores
|
||||||
|
|
||||||
|
def parallel_task(n: int) -> int:
|
||||||
|
return sum(range(n))
|
||||||
|
|
||||||
|
result = execute_with_cpu_cores(4, parallel_task, 1000000)
|
||||||
|
print(f"Result of parallel task using 4 CPU cores: {result}")
|
||||||
|
```
|
||||||
|
|
||||||
|
## GPU Operations
|
||||||
|
|
||||||
|
### `list_available_gpus() -> List[str]`
|
||||||
|
|
||||||
|
Lists all available GPUs.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `List[str]` | A list of available GPU names. |
|
||||||
|
|
||||||
|
#### Raises
|
||||||
|
| Exception | Description |
|
||||||
|
|-----------|-------------|
|
||||||
|
| `RuntimeError` | If no GPUs are found. |
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import list_available_gpus
|
||||||
|
|
||||||
|
available_gpus = list_available_gpus()
|
||||||
|
print(f"Available GPUs: {available_gpus}")
|
||||||
|
```
|
||||||
|
|
||||||
|
### `select_best_gpu() -> Optional[int]`
|
||||||
|
|
||||||
|
Selects the GPU with the most free memory.
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `Optional[int]` | The GPU ID of the best available GPU, or None if no GPUs are available. |
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import select_best_gpu
|
||||||
|
|
||||||
|
best_gpu = select_best_gpu()
|
||||||
|
if best_gpu is not None:
|
||||||
|
print(f"Best GPU for execution: GPU {best_gpu}")
|
||||||
|
else:
|
||||||
|
print("No GPUs available")
|
||||||
|
```
|
||||||
|
|
||||||
|
### `execute_on_gpu(gpu_id: int, func: Callable, *args: Any, **kwargs: Any) -> Any`
|
||||||
|
|
||||||
|
Executes a callable on a specific GPU using Ray.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
| Name | Type | Description |
|
||||||
|
|------|------|-------------|
|
||||||
|
| `gpu_id` | `int` | The GPU to run the function on. |
|
||||||
|
| `func` | `Callable` | The function to be executed. |
|
||||||
|
| `*args` | `Any` | Arguments for the callable. |
|
||||||
|
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `Any` | The result of the function execution. |
|
||||||
|
|
||||||
|
#### Raises
|
||||||
|
| Exception | Description |
|
||||||
|
|-----------|-------------|
|
||||||
|
| `ValueError` | If the GPU index is invalid. |
|
||||||
|
| `RuntimeError` | If there is an error executing the function on the GPU. |
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import execute_on_gpu
|
||||||
|
|
||||||
|
def gpu_task(n: int) -> int:
|
||||||
|
return n ** 2
|
||||||
|
|
||||||
|
result = execute_on_gpu(0, gpu_task, 10)
|
||||||
|
print(f"Result of GPU task on GPU 0: {result}")
|
||||||
|
```
|
||||||
|
|
||||||
|
### `execute_on_multiple_gpus(gpu_ids: List[int], func: Callable, all_gpus: bool = False, timeout: float = None, *args: Any, **kwargs: Any) -> List[Any]`
|
||||||
|
|
||||||
|
Executes a callable across multiple GPUs using Ray.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
| Name | Type | Description |
|
||||||
|
|------|------|-------------|
|
||||||
|
| `gpu_ids` | `List[int]` | The list of GPU IDs to run the function on. |
|
||||||
|
| `func` | `Callable` | The function to be executed. |
|
||||||
|
| `all_gpus` | `bool` | Whether to use all available GPUs (default: False). |
|
||||||
|
| `timeout` | `float` | Timeout for the execution in seconds (default: None). |
|
||||||
|
| `*args` | `Any` | Arguments for the callable. |
|
||||||
|
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `List[Any]` | A list of results from the execution on each GPU. |
|
||||||
|
|
||||||
|
#### Raises
|
||||||
|
| Exception | Description |
|
||||||
|
|-----------|-------------|
|
||||||
|
| `ValueError` | If any GPU index is invalid. |
|
||||||
|
| `RuntimeError` | If there is an error executing the function on the GPUs. |
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import execute_on_multiple_gpus
|
||||||
|
|
||||||
|
def multi_gpu_task(n: int) -> int:
|
||||||
|
return n ** 3
|
||||||
|
|
||||||
|
results = execute_on_multiple_gpus([0, 1], multi_gpu_task, 5)
|
||||||
|
print(f"Results of multi-GPU task: {results}")
|
||||||
|
```
|
||||||
|
|
||||||
|
### `distributed_execute_on_gpus(gpu_ids: List[int], func: Callable, *args: Any, **kwargs: Any) -> List[Any]`
|
||||||
|
|
||||||
|
Executes a callable across multiple GPUs and nodes using Ray's distributed task scheduling.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
| Name | Type | Description |
|
||||||
|
|------|------|-------------|
|
||||||
|
| `gpu_ids` | `List[int]` | The list of GPU IDs across nodes to run the function on. |
|
||||||
|
| `func` | `Callable` | The function to be executed. |
|
||||||
|
| `*args` | `Any` | Arguments for the callable. |
|
||||||
|
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `List[Any]` | A list of results from the execution on each GPU. |
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import distributed_execute_on_gpus
|
||||||
|
|
||||||
|
def distributed_task(n: int) -> int:
|
||||||
|
return n ** 4
|
||||||
|
|
||||||
|
results = distributed_execute_on_gpus([0, 1, 2, 3], distributed_task, 3)
|
||||||
|
print(f"Results of distributed GPU task: {results}")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Utility Functions
|
||||||
|
|
||||||
|
### `retry_with_backoff(func: Callable, retries: int = RETRY_COUNT, delay: float = RETRY_DELAY, *args: Any, **kwargs: Any) -> Any`
|
||||||
|
|
||||||
|
Retries a callable function with exponential backoff in case of failure.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
| Name | Type | Description |
|
||||||
|
|------|------|-------------|
|
||||||
|
| `func` | `Callable` | The function to execute with retries. |
|
||||||
|
| `retries` | `int` | Number of retries (default: RETRY_COUNT from env). |
|
||||||
|
| `delay` | `float` | Delay between retries in seconds (default: RETRY_DELAY from env). |
|
||||||
|
| `*args` | `Any` | Arguments for the callable. |
|
||||||
|
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `Any` | The result of the function execution. |
|
||||||
|
|
||||||
|
#### Raises
|
||||||
|
| Exception | Description |
|
||||||
|
|-----------|-------------|
|
||||||
|
| `Exception` | After all retries fail. |
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import retry_with_backoff
|
||||||
|
|
||||||
|
def unstable_task():
|
||||||
|
# Simulating an unstable task that might fail
|
||||||
|
import random
|
||||||
|
if random.random() < 0.5:
|
||||||
|
raise Exception("Task failed")
|
||||||
|
return "Task succeeded"
|
||||||
|
|
||||||
|
result = retry_with_backoff(unstable_task, retries=5, delay=1)
|
||||||
|
print(f"Result of unstable task: {result}")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Resource Monitoring
|
||||||
|
|
||||||
|
### `monitor_resources()`
|
||||||
|
|
||||||
|
Continuously monitors CPU and GPU resources and logs alerts when thresholds are crossed.
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import monitor_resources
|
||||||
|
|
||||||
|
# Start monitoring resources
|
||||||
|
monitor_resources()
|
||||||
|
```
|
||||||
|
|
||||||
|
### `profile_execution(func: Callable, *args: Any, **kwargs: Any) -> Any`
|
||||||
|
|
||||||
|
Profiles the execution of a task, collecting metrics like execution time and CPU/GPU usage.
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
| Name | Type | Description |
|
||||||
|
|------|------|-------------|
|
||||||
|
| `func` | `Callable` | The function to profile. |
|
||||||
|
| `*args` | `Any` | Arguments for the callable. |
|
||||||
|
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||||
|
|
||||||
|
#### Returns
|
||||||
|
| Type | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `Any` | The result of the function execution along with the collected metrics. |
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
```python
|
||||||
|
from clusterops import profile_execution
|
||||||
|
|
||||||
|
def cpu_intensive_task():
|
||||||
|
return sum(i*i for i in range(10000000))
|
||||||
|
|
||||||
|
result = profile_execution(cpu_intensive_task)
|
||||||
|
print(f"Result of profiled task: {result}")
|
||||||
|
```
|
||||||
|
|
||||||
|
This API reference provides a comprehensive overview of the ClusterOps library's main functions, their parameters, return values, and usage examples. It should help users understand and utilize the library effectively for managing and executing tasks across CPU and GPU resources in a distributed computing environment.
|
@ -0,0 +1,358 @@
|
|||||||
|
# Architecture
|
||||||
|
|
||||||
|
## **1. Introduction**
|
||||||
|
|
||||||
|
In today's rapidly evolving digital world, harnessing the collaborative power of multiple computational agents is more crucial than ever. 'Swarms' represents a bold stride in this direction—a scalable and dynamic framework designed to enable swarms of agents to function in harmony and tackle complex tasks. This document serves as a comprehensive guide, elucidating the underlying architecture and strategies pivotal to realizing the Swarms vision.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **2. The Vision**
|
||||||
|
|
||||||
|
At its heart, the Swarms framework seeks to emulate the collaborative efficiency witnessed in natural systems, like ant colonies or bird flocks. These entities, though individually simple, achieve remarkable outcomes through collaboration. Similarly, Swarms will unleash the collective potential of numerous agents, operating cohesively.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **3. Architecture Overview**
|
||||||
|
|
||||||
|
### **3.1 Agent Level**
|
||||||
|
The base level that serves as the building block for all further complexity.
|
||||||
|
|
||||||
|
#### Mechanics:
|
||||||
|
* **Model**: At its core, each agent harnesses a powerful model like OpenAI's GPT.
|
||||||
|
* **Vectorstore**: A memory structure allowing agents to store and retrieve information.
|
||||||
|
* **Tools**: Utilities and functionalities that aid in the agent's task execution.
|
||||||
|
|
||||||
|
#### Interaction:
|
||||||
|
Agents interact with the external world through their model and tools. The Vectorstore aids in retaining knowledge and facilitating inter-agent communication.
|
||||||
|
|
||||||
|
### **3.2 Worker Infrastructure Level**
|
||||||
|
Building on the agent foundation, enhancing capability and readiness for swarm integration.
|
||||||
|
|
||||||
|
#### Mechanics:
|
||||||
|
* **Human Input Integration**: Enables agents to accept and understand human-provided instructions.
|
||||||
|
* **Unique Identifiers**: Assigns each agent a unique ID to facilitate tracking and communication.
|
||||||
|
* **Asynchronous Tools**: Bolsters agents' capability to multitask and interact in real-time.
|
||||||
|
|
||||||
|
#### Interaction:
|
||||||
|
Each worker is an enhanced agent, capable of operating independently or in sync with its peers, allowing for dynamic, scalable operations.
|
||||||
|
|
||||||
|
### **3.3 Swarm Level**
|
||||||
|
Multiple Worker Nodes orchestrated into a synchronized, collaborative entity.
|
||||||
|
|
||||||
|
#### Mechanics:
|
||||||
|
* **Orchestrator**: The maestro, responsible for directing the swarm, task allocation, and communication.
|
||||||
|
* **Scalable Communication Layer**: Facilitates interactions among nodes and between nodes and the orchestrator.
|
||||||
|
* **Task Assignment & Completion Protocols**: Structured procedures ensuring tasks are efficiently distributed and concluded.
|
||||||
|
|
||||||
|
#### Interaction:
|
||||||
|
Nodes collaborate under the orchestrator's guidance, ensuring tasks are partitioned appropriately, executed, and results consolidated.
|
||||||
|
|
||||||
|
### **3.4 Hivemind Level**
|
||||||
|
Envisioned as a 'Swarm of Swarms'. An upper echelon of collaboration.
|
||||||
|
|
||||||
|
#### Mechanics:
|
||||||
|
* **Hivemind Orchestrator**: Oversees multiple swarm orchestrators, ensuring harmony on a grand scale.
|
||||||
|
* **Inter-Swarm Communication Protocols**: Dictates how swarms interact, exchange information, and co-execute tasks.
|
||||||
|
|
||||||
|
#### Interaction:
|
||||||
|
Multiple swarms, each a formidable force, combine their prowess under the Hivemind. This level tackles monumental tasks by dividing them among swarms.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **4. Building the Framework: A Task Checklist**
|
||||||
|
|
||||||
|
### **4.1 Foundations: Agent Level**
|
||||||
|
* Define and standardize agent properties.
|
||||||
|
* Integrate desired model (e.g., OpenAI's GPT) with agent.
|
||||||
|
* Implement Vectorstore mechanisms: storage, retrieval, and communication protocols.
|
||||||
|
* Incorporate essential tools and utilities.
|
||||||
|
* Conduct preliminary testing: Ensure agents can execute basic tasks and utilize the Vectorstore.
|
||||||
|
|
||||||
|
### **4.2 Enhancements: Worker Infrastructure Level**
|
||||||
|
* Interface agents with human input mechanisms.
|
||||||
|
* Assign and manage unique identifiers for each worker.
|
||||||
|
* Integrate asynchronous capabilities: Ensure real-time response and multitasking.
|
||||||
|
* Test worker nodes for both solitary and collaborative tasks.
|
||||||
|
|
||||||
|
### **4.3 Cohesion: Swarm Level**
|
||||||
|
* Design and develop the orchestrator: Ensure it can manage multiple worker nodes.
|
||||||
|
* Establish a scalable and efficient communication layer.
|
||||||
|
* Implement task distribution and retrieval protocols.
|
||||||
|
* Test swarms for efficiency, scalability, and robustness.
|
||||||
|
|
||||||
|
### **4.4 Apex Collaboration: Hivemind Level**
|
||||||
|
* Build the Hivemind Orchestrator: Ensure it can oversee multiple swarms.
|
||||||
|
* Define inter-swarm communication, prioritization, and task-sharing protocols.
|
||||||
|
* Develop mechanisms to balance loads and optimize resource utilization across swarms.
|
||||||
|
* Thoroughly test the Hivemind level for macro-task execution.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **5. Integration and Communication Mechanisms**
|
||||||
|
|
||||||
|
### **5.1 Vectorstore as the Universal Communication Layer**
|
||||||
|
Serving as the memory and communication backbone, the Vectorstore must:
|
||||||
|
* Facilitate rapid storage and retrieval of high-dimensional vectors.
|
||||||
|
* Enable similarity-based lookups: Crucial for recognizing patterns or finding similar outputs.
|
||||||
|
* Scale seamlessly as agent count grows.
|
||||||
|
|
||||||
|
### **5.2 Orchestrator-Driven Communication**
|
||||||
|
* Orchestrators, both at the swarm and hivemind level, should employ adaptive algorithms to optimally distribute tasks.
|
||||||
|
* Ensure real-time monitoring of task execution and worker node health.
|
||||||
|
* Integrate feedback loops: Allow for dynamic task reassignment in case of node failures or inefficiencies.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **6. Conclusion & Forward Path**
|
||||||
|
|
||||||
|
The Swarms framework, once realized, will usher in a new era of computational efficiency and collaboration. While the roadmap ahead is intricate, with diligent planning, development, and testing, Swarms will redefine the boundaries of collaborative computing.
|
||||||
|
|
||||||
|
--------
|
||||||
|
|
||||||
|
|
||||||
|
# Overview
|
||||||
|
|
||||||
|
### 1. Model
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
The foundational level where a trained model (e.g., OpenAI GPT model) is initialized. It's the base on which further abstraction levels build upon. It provides the core capabilities to perform tasks, answer queries, etc.
|
||||||
|
|
||||||
|
**Diagram:**
|
||||||
|
```
|
||||||
|
[ Model (openai) ]
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Agent Level
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
At the agent level, the raw model is coupled with tools and a vector store, allowing it to be more than just a model. The agent can now remember, use tools, and become a more versatile entity ready for integration into larger systems.
|
||||||
|
|
||||||
|
**Diagram:**
|
||||||
|
```
|
||||||
|
+-----------+
|
||||||
|
| Agent |
|
||||||
|
| +-------+ |
|
||||||
|
| | Model | |
|
||||||
|
| +-------+ |
|
||||||
|
| +-----------+ |
|
||||||
|
| | VectorStore | |
|
||||||
|
| +-----------+ |
|
||||||
|
| +-------+ |
|
||||||
|
| | Tools | |
|
||||||
|
| +-------+ |
|
||||||
|
+-----------+
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Worker Infrastructure Level
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
The worker infrastructure is a step above individual agents. Here, an agent is paired with additional utilities like human input and other tools, making it a more advanced, responsive unit capable of complex tasks.
|
||||||
|
|
||||||
|
**Diagram:**
|
||||||
|
```
|
||||||
|
+----------------+
|
||||||
|
| WorkerNode |
|
||||||
|
| +-----------+ |
|
||||||
|
| | Agent | |
|
||||||
|
| | +-------+ | |
|
||||||
|
| | | Model | | |
|
||||||
|
| | +-------+ | |
|
||||||
|
| | +-------+ | |
|
||||||
|
| | | Tools | | |
|
||||||
|
| | +-------+ | |
|
||||||
|
| +-----------+ |
|
||||||
|
| |
|
||||||
|
| +-----------+ |
|
||||||
|
| |Human Input| |
|
||||||
|
| +-----------+ |
|
||||||
|
| |
|
||||||
|
| +-------+ |
|
||||||
|
| | Tools | |
|
||||||
|
| +-------+ |
|
||||||
|
+----------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Swarm Level
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
At the swarm level, the orchestrator is central. It's responsible for assigning tasks to worker nodes, monitoring their completion, and handling the communication layer (for example, through a vector store or another universal communication mechanism) between worker nodes.
|
||||||
|
|
||||||
|
**Diagram:**
|
||||||
|
```
|
||||||
|
+------------+
|
||||||
|
|Orchestrator|
|
||||||
|
+------------+
|
||||||
|
|
|
||||||
|
+---------------------------+
|
||||||
|
| |
|
||||||
|
| Swarm-level Communication|
|
||||||
|
| Layer (e.g. |
|
||||||
|
| Vector Store) |
|
||||||
|
+---------------------------+
|
||||||
|
/ | \
|
||||||
|
+---------------+ +---------------+ +---------------+
|
||||||
|
|WorkerNode 1 | |WorkerNode 2 | |WorkerNode n |
|
||||||
|
| | | | | |
|
||||||
|
+---------------+ +---------------+ +---------------+
|
||||||
|
| Task Assigned | Task Completed | Communication |
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Hivemind Level
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
At the Hivemind level, it's a multi-swarm setup, with an upper-layer orchestrator managing multiple swarm-level orchestrators. The Hivemind orchestrator is responsible for broader tasks like assigning macro-tasks to swarms, handling inter-swarm communications, and ensuring the overall system is functioning smoothly.
|
||||||
|
|
||||||
|
**Diagram:**
|
||||||
|
```
|
||||||
|
+--------+
|
||||||
|
|Hivemind|
|
||||||
|
+--------+
|
||||||
|
|
|
||||||
|
+--------------+
|
||||||
|
|Hivemind |
|
||||||
|
|Orchestrator |
|
||||||
|
+--------------+
|
||||||
|
/ | \
|
||||||
|
+------------+ +------------+ +------------+
|
||||||
|
|Orchestrator| |Orchestrator| |Orchestrator|
|
||||||
|
+------------+ +------------+ +------------+
|
||||||
|
| | |
|
||||||
|
+--------------+ +--------------+ +--------------+
|
||||||
|
| Swarm-level| | Swarm-level| | Swarm-level|
|
||||||
|
|Communication| |Communication| |Communication|
|
||||||
|
| Layer | | Layer | | Layer |
|
||||||
|
+--------------+ +--------------+ +--------------+
|
||||||
|
/ \ / \ / \
|
||||||
|
+-------+ +-------+ +-------+ +-------+ +-------+
|
||||||
|
|Worker | |Worker | |Worker | |Worker | |Worker |
|
||||||
|
| Node | | Node | | Node | | Node | | Node |
|
||||||
|
+-------+ +-------+ +-------+ +-------+ +-------+
|
||||||
|
```
|
||||||
|
|
||||||
|
This setup allows the Hivemind level to operate at a grander scale, with the capability to manage hundreds or even thousands of worker nodes across multiple swarms efficiently.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
-------
|
||||||
|
# **Swarms Framework Development Strategy Checklist**
|
||||||
|
|
||||||
|
## **Introduction**
|
||||||
|
|
||||||
|
The development of the Swarms framework requires a systematic and granular approach to ensure that each component is robust and that the overall framework is efficient and scalable. This checklist will serve as a guide to building Swarms from the ground up, breaking down tasks into small, manageable pieces.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **1. Agent Level Development**
|
||||||
|
|
||||||
|
### **1.1 Model Integration**
|
||||||
|
- [ ] Research the most suitable models (e.g., OpenAI's GPT).
|
||||||
|
- [ ] Design an API for the agent to call the model.
|
||||||
|
- [ ] Implement error handling when model calls fail.
|
||||||
|
- [ ] Test the model with sample data for accuracy and speed.
|
||||||
|
|
||||||
|
### **1.2 Vectorstore Implementation**
|
||||||
|
- [ ] Design the schema for the vector storage system.
|
||||||
|
- [ ] Implement storage methods to add, delete, and update vectors.
|
||||||
|
- [ ] Develop retrieval methods with optimization for speed.
|
||||||
|
- [ ] Create protocols for vector-based communication between agents.
|
||||||
|
- [ ] Conduct stress tests to ascertain storage and retrieval speed.
|
||||||
|
|
||||||
|
### **1.3 Tools & Utilities Integration**
|
||||||
|
- [ ] List out essential tools required for agent functionality.
|
||||||
|
- [ ] Develop or integrate APIs for each tool.
|
||||||
|
- [ ] Implement error handling and logging for tool interactions.
|
||||||
|
- [ ] Validate tools integration with unit tests.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **2. Worker Infrastructure Level Development**
|
||||||
|
|
||||||
|
### **2.1 Human Input Integration**
|
||||||
|
- [ ] Design a UI/UX for human interaction with worker nodes.
|
||||||
|
- [ ] Create APIs for input collection.
|
||||||
|
- [ ] Implement input validation and error handling.
|
||||||
|
- [ ] Test human input methods for clarity and ease of use.
|
||||||
|
|
||||||
|
### **2.2 Unique Identifier System**
|
||||||
|
- [ ] Research optimal formats for unique ID generation.
|
||||||
|
- [ ] Develop methods for generating and assigning IDs to agents.
|
||||||
|
- [ ] Implement a tracking system to manage and monitor agents via IDs.
|
||||||
|
- [ ] Validate the uniqueness and reliability of the ID system.
|
||||||
|
|
||||||
|
### **2.3 Asynchronous Operation Tools**
|
||||||
|
- [ ] Incorporate libraries/frameworks to enable asynchrony.
|
||||||
|
- [ ] Ensure tasks within an agent can run in parallel without conflict.
|
||||||
|
- [ ] Test asynchronous operations for efficiency improvements.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **3. Swarm Level Development**
|
||||||
|
|
||||||
|
### **3.1 Orchestrator Design & Development**
|
||||||
|
- [ ] Draft a blueprint of orchestrator functionalities.
|
||||||
|
- [ ] Implement methods for task distribution among worker nodes.
|
||||||
|
- [ ] Develop communication protocols for the orchestrator to monitor workers.
|
||||||
|
- [ ] Create feedback systems to detect and address worker node failures.
|
||||||
|
- [ ] Test orchestrator with a mock swarm to ensure efficient task allocation.
|
||||||
|
|
||||||
|
### **3.2 Communication Layer Development**
|
||||||
|
- [ ] Select a suitable communication protocol/framework (e.g., gRPC, WebSockets).
|
||||||
|
- [ ] Design the architecture for scalable, low-latency communication.
|
||||||
|
- [ ] Implement methods for sending, receiving, and broadcasting messages.
|
||||||
|
- [ ] Test communication layer for reliability, speed, and error handling.
|
||||||
|
|
||||||
|
### **3.3 Task Management Protocols**
|
||||||
|
- [ ] Develop a system to queue, prioritize, and allocate tasks.
|
||||||
|
- [ ] Implement methods for real-time task status tracking.
|
||||||
|
- [ ] Create a feedback loop for completed tasks.
|
||||||
|
- [ ] Test task distribution, execution, and feedback systems for efficiency.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **4. Hivemind Level Development**
|
||||||
|
|
||||||
|
### **4.1 Hivemind Orchestrator Development**
|
||||||
|
- [ ] Extend swarm orchestrator functionalities to manage multiple swarms.
|
||||||
|
- [ ] Create inter-swarm communication protocols.
|
||||||
|
- [ ] Implement load balancing mechanisms to distribute tasks across swarms.
|
||||||
|
- [ ] Validate hivemind orchestrator functionalities with multi-swarm setups.
|
||||||
|
|
||||||
|
### **4.2 Inter-Swarm Communication Protocols**
|
||||||
|
- [ ] Design methods for swarms to exchange data.
|
||||||
|
- [ ] Implement data reconciliation methods for swarms working on shared tasks.
|
||||||
|
- [ ] Test inter-swarm communication for efficiency and data integrity.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **5. Scalability & Performance Testing**
|
||||||
|
|
||||||
|
- [ ] Simulate heavy loads to test the limits of the framework.
|
||||||
|
- [ ] Identify and address bottlenecks in both communication and computation.
|
||||||
|
- [ ] Conduct speed tests under different conditions.
|
||||||
|
- [ ] Test the system's responsiveness under various levels of stress.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **6. Documentation & User Guide**
|
||||||
|
|
||||||
|
- [ ] Develop detailed documentation covering architecture, setup, and usage.
|
||||||
|
- [ ] Create user guides with step-by-step instructions.
|
||||||
|
- [ ] Incorporate visual aids, diagrams, and flowcharts for clarity.
|
||||||
|
- [ ] Update documentation regularly with new features and improvements.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **7. Continuous Integration & Deployment**
|
||||||
|
|
||||||
|
- [ ] Setup CI/CD pipelines for automated testing and deployment.
|
||||||
|
- [ ] Ensure automatic rollback in case of deployment failures.
|
||||||
|
- [ ] Integrate code quality and security checks in the pipeline.
|
||||||
|
- [ ] Document deployment strategies and best practices.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **Conclusion**
|
||||||
|
|
||||||
|
The Swarms framework represents a monumental leap in agent-based computation. This checklist provides a thorough roadmap for the framework's development, ensuring that every facet is addressed in depth. Through diligent adherence to this guide, the Swarms vision can be realized as a powerful, scalable, and robust system ready to tackle the challenges of tomorrow.
|
||||||
|
|
||||||
|
(Note: This document, given the word limit, provides a high-level overview. A full 5000-word document would delve into even more intricate details, nuances, potential pitfalls, and include considerations for security, user experience, compatibility, etc.)
|
@ -0,0 +1,86 @@
|
|||||||
|
# Bounty Program
|
||||||
|
|
||||||
|
Our bounty program is an exciting opportunity for contributors to help us build the future of Swarms. By participating, you can earn rewards while contributing to a project that aims to revolutionize digital activity.
|
||||||
|
|
||||||
|
Here's how it works:
|
||||||
|
|
||||||
|
1. **Check out our Roadmap**: We've shared our roadmap detailing our short and long-term goals. These are the areas where we're seeking contributions.
|
||||||
|
|
||||||
|
2. **Pick a Task**: Choose a task from the roadmap that aligns with your skills and interests. If you're unsure, you can reach out to our team for guidance.
|
||||||
|
|
||||||
|
3. **Get to Work**: Once you've chosen a task, start working on it. Remember, quality is key. We're looking for contributions that truly make a difference.
|
||||||
|
|
||||||
|
4. **Submit your Contribution**: Once your work is complete, submit it for review. We'll evaluate your contribution based on its quality, relevance, and the value it brings to Swarms.
|
||||||
|
|
||||||
|
5. **Earn Rewards**: If your contribution is approved, you'll earn a bounty. The amount of the bounty depends on the complexity of the task, the quality of your work, and the value it brings to Swarms.
|
||||||
|
|
||||||
|
## The Three Phases of Our Bounty Program
|
||||||
|
|
||||||
|
### Phase 1: Building the Foundation
|
||||||
|
In the first phase, our focus is on building the basic infrastructure of Swarms. This includes developing key components like the Swarms class, integrating essential tools, and establishing task completion and evaluation logic. We'll also start developing our testing and evaluation framework during this phase. If you're interested in foundational work and have a knack for building robust, scalable systems, this phase is for you.
|
||||||
|
|
||||||
|
### Phase 2: Enhancing the System
|
||||||
|
In the second phase, we'll focus on enhancing Swarms by integrating more advanced features, improving the system's efficiency, and refining our testing and evaluation framework. This phase involves more complex tasks, so if you enjoy tackling challenging problems and contributing to the development of innovative features, this is the phase for you.
|
||||||
|
|
||||||
|
### Phase 3: Towards Super-Intelligence
|
||||||
|
The third phase of our bounty program is the most exciting - this is where we aim to achieve super-intelligence. In this phase, we'll be working on improving the swarm's capabilities, expanding its skills, and fine-tuning the system based on real-world testing and feedback. If you're excited about the future of AI and want to contribute to a project that could potentially transform the digital world, this is the phase for you.
|
||||||
|
|
||||||
|
Remember, our roadmap is a guide, and we encourage you to bring your own ideas and creativity to the table. We believe that every contribution, no matter how small, can make a difference. So join us on this exciting journey and help us create the future of Swarms.
|
||||||
|
|
||||||
|
**To participate in our bounty program, visit the [Swarms Bounty Program Page](https://swarms.ai/bounty).** Let's build the future together!
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Bounties for Roadmap Items
|
||||||
|
|
||||||
|
To accelerate the development of Swarms and to encourage more contributors to join our journey towards automating every digital activity in existence, we are announcing a Bounty Program for specific roadmap items. Each bounty will be rewarded based on the complexity and importance of the task. Below are the items available for bounty:
|
||||||
|
|
||||||
|
1. **Multi-Agent Debate Integration**: $2000
|
||||||
|
2. **Meta Prompting Integration**: $1500
|
||||||
|
3. **Swarms Class**: $1500
|
||||||
|
4. **Integration of Additional Tools**: $1000
|
||||||
|
5. **Task Completion and Evaluation Logic**: $2000
|
||||||
|
6. **Ocean Integration**: $2500
|
||||||
|
7. **Improved Communication**: $2000
|
||||||
|
8. **Testing and Evaluation**: $1500
|
||||||
|
9. **Worker Swarm Class**: $2000
|
||||||
|
10. **Documentation**: $500
|
||||||
|
|
||||||
|
For each bounty task, there will be a strict evaluation process to ensure the quality of the contribution. This process includes a thorough review of the code and extensive testing to ensure it meets our standards.
|
||||||
|
|
||||||
|
# 3-Phase Testing Framework
|
||||||
|
|
||||||
|
To ensure the quality and efficiency of the Swarm, we will introduce a 3-phase testing framework which will also serve as our evaluation criteria for each of the bounty tasks.
|
||||||
|
|
||||||
|
## Phase 1: Unit Testing
|
||||||
|
In this phase, individual modules will be tested to ensure that they work correctly in isolation. Unit tests will be designed for all functions and methods, with an emphasis on edge cases.
|
||||||
|
|
||||||
|
## Phase 2: Integration Testing
|
||||||
|
After passing unit tests, we will test the integration of different modules to ensure they work correctly together. This phase will also test the interoperability of the Swarm with external systems and libraries.
|
||||||
|
|
||||||
|
## Phase 3: Benchmarking & Stress Testing
|
||||||
|
In the final phase, we will perform benchmarking and stress tests. We'll push the limits of the Swarm under extreme conditions to ensure it performs well in real-world scenarios. This phase will measure the performance, speed, and scalability of the Swarm under high load conditions.
|
||||||
|
|
||||||
|
By following this 3-phase testing framework, we aim to develop a reliable, high-performing, and scalable Swarm that can automate all digital activities.
|
||||||
|
|
||||||
|
# Reverse Engineering to Reach Phase 3
|
||||||
|
|
||||||
|
To reach the Phase 3 level, we need to reverse engineer the tasks we need to complete. Here's an example of what this might look like:
|
||||||
|
|
||||||
|
1. **Set Clear Expectations**: Define what success looks like for each task. Be clear about the outputs and outcomes we expect. This will guide our testing and development efforts.
|
||||||
|
|
||||||
|
2. **Develop Testing Scenarios**: Create a comprehensive list of testing scenarios that cover both common and edge cases. This will help us ensure that our Swarm can handle a wide range of situations.
|
||||||
|
|
||||||
|
3. **Write Test Cases**: For each scenario, write detailed test cases that outline the exact steps to be followed, the inputs to be used, and the expected outputs.
|
||||||
|
|
||||||
|
4. **Execute the Tests**: Run the test cases on our Swarm, making note of any issues or bugs that arise.
|
||||||
|
|
||||||
|
5. **Iterate and Improve**: Based on the results of our tests, iterate and improve our Swarm. This may involve fixing bugs, optimizing code, or redesigning parts of our system.
|
||||||
|
|
||||||
|
6. **Repeat**: Repeat this process until our Swarm meets our expectations and passes all test cases.
|
||||||
|
|
||||||
|
By following these steps, we will systematically build, test, and improve our Swarm until it reaches the Phase 3 level. This methodical approach will help us ensure that we create a reliable, high-performing, and scalable Swarm that can truly automate all digital activities.
|
||||||
|
|
||||||
|
Let's shape the future of digital automation together!
|
@ -0,0 +1,122 @@
|
|||||||
|
# **Swarms Framework Development Strategy Checklist**
|
||||||
|
|
||||||
|
## **Introduction**
|
||||||
|
|
||||||
|
The development of the Swarms framework requires a systematic and granular approach to ensure that each component is robust and that the overall framework is efficient and scalable. This checklist will serve as a guide to building Swarms from the ground up, breaking down tasks into small, manageable pieces.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **1. Agent Level Development**
|
||||||
|
|
||||||
|
### **1.1 Model Integration**
|
||||||
|
- [ ] Research the most suitable models (e.g., OpenAI's GPT).
|
||||||
|
- [ ] Design an API for the agent to call the model.
|
||||||
|
- [ ] Implement error handling when model calls fail.
|
||||||
|
- [ ] Test the model with sample data for accuracy and speed.
|
||||||
|
|
||||||
|
### **1.2 Vectorstore Implementation**
|
||||||
|
- [ ] Design the schema for the vector storage system.
|
||||||
|
- [ ] Implement storage methods to add, delete, and update vectors.
|
||||||
|
- [ ] Develop retrieval methods with optimization for speed.
|
||||||
|
- [ ] Create protocols for vector-based communication between agents.
|
||||||
|
- [ ] Conduct stress tests to ascertain storage and retrieval speed.
|
||||||
|
|
||||||
|
### **1.3 Tools & Utilities Integration**
|
||||||
|
- [ ] List out essential tools required for agent functionality.
|
||||||
|
- [ ] Develop or integrate APIs for each tool.
|
||||||
|
- [ ] Implement error handling and logging for tool interactions.
|
||||||
|
- [ ] Validate tools integration with unit tests.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **2. Worker Infrastructure Level Development**
|
||||||
|
|
||||||
|
### **2.1 Human Input Integration**
|
||||||
|
- [ ] Design a UI/UX for human interaction with worker nodes.
|
||||||
|
- [ ] Create APIs for input collection.
|
||||||
|
- [ ] Implement input validation and error handling.
|
||||||
|
- [ ] Test human input methods for clarity and ease of use.
|
||||||
|
|
||||||
|
### **2.2 Unique Identifier System**
|
||||||
|
- [ ] Research optimal formats for unique ID generation.
|
||||||
|
- [ ] Develop methods for generating and assigning IDs to agents.
|
||||||
|
- [ ] Implement a tracking system to manage and monitor agents via IDs.
|
||||||
|
- [ ] Validate the uniqueness and reliability of the ID system.
|
||||||
|
|
||||||
|
### **2.3 Asynchronous Operation Tools**
|
||||||
|
- [ ] Incorporate libraries/frameworks to enable asynchrony.
|
||||||
|
- [ ] Ensure tasks within an agent can run in parallel without conflict.
|
||||||
|
- [ ] Test asynchronous operations for efficiency improvements.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **3. Swarm Level Development**
|
||||||
|
|
||||||
|
### **3.1 Orchestrator Design & Development**
|
||||||
|
- [ ] Draft a blueprint of orchestrator functionalities.
|
||||||
|
- [ ] Implement methods for task distribution among worker nodes.
|
||||||
|
- [ ] Develop communication protocols for the orchestrator to monitor workers.
|
||||||
|
- [ ] Create feedback systems to detect and address worker node failures.
|
||||||
|
- [ ] Test orchestrator with a mock swarm to ensure efficient task allocation.
|
||||||
|
|
||||||
|
### **3.2 Communication Layer Development**
|
||||||
|
- [ ] Select a suitable communication protocol/framework (e.g., gRPC, WebSockets).
|
||||||
|
- [ ] Design the architecture for scalable, low-latency communication.
|
||||||
|
- [ ] Implement methods for sending, receiving, and broadcasting messages.
|
||||||
|
- [ ] Test communication layer for reliability, speed, and error handling.
|
||||||
|
|
||||||
|
### **3.3 Task Management Protocols**
|
||||||
|
- [ ] Develop a system to queue, prioritize, and allocate tasks.
|
||||||
|
- [ ] Implement methods for real-time task status tracking.
|
||||||
|
- [ ] Create a feedback loop for completed tasks.
|
||||||
|
- [ ] Test task distribution, execution, and feedback systems for efficiency.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **4. Hivemind Level Development**
|
||||||
|
|
||||||
|
### **4.1 Hivemind Orchestrator Development**
|
||||||
|
- [ ] Extend swarm orchestrator functionalities to manage multiple swarms.
|
||||||
|
- [ ] Create inter-swarm communication protocols.
|
||||||
|
- [ ] Implement load balancing mechanisms to distribute tasks across swarms.
|
||||||
|
- [ ] Validate hivemind orchestrator functionalities with multi-swarm setups.
|
||||||
|
|
||||||
|
### **4.2 Inter-Swarm Communication Protocols**
|
||||||
|
- [ ] Design methods for swarms to exchange data.
|
||||||
|
- [ ] Implement data reconciliation methods for swarms working on shared tasks.
|
||||||
|
- [ ] Test inter-swarm communication for efficiency and data integrity.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **5. Scalability & Performance Testing**
|
||||||
|
|
||||||
|
- [ ] Simulate heavy loads to test the limits of the framework.
|
||||||
|
- [ ] Identify and address bottlenecks in both communication and computation.
|
||||||
|
- [ ] Conduct speed tests under different conditions.
|
||||||
|
- [ ] Test the system's responsiveness under various levels of stress.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **6. Documentation & User Guide**
|
||||||
|
|
||||||
|
- [ ] Develop detailed documentation covering architecture, setup, and usage.
|
||||||
|
- [ ] Create user guides with step-by-step instructions.
|
||||||
|
- [ ] Incorporate visual aids, diagrams, and flowcharts for clarity.
|
||||||
|
- [ ] Update documentation regularly with new features and improvements.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **7. Continuous Integration & Deployment**
|
||||||
|
|
||||||
|
- [ ] Setup CI/CD pipelines for automated testing and deployment.
|
||||||
|
- [ ] Ensure automatic rollback in case of deployment failures.
|
||||||
|
- [ ] Integrate code quality and security checks in the pipeline.
|
||||||
|
- [ ] Document deployment strategies and best practices.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## **Conclusion**
|
||||||
|
|
||||||
|
The Swarms framework represents a monumental leap in agent-based computation. This checklist provides a thorough roadmap for the framework's development, ensuring that every facet is addressed in depth. Through diligent adherence to this guide, the Swarms vision can be realized as a powerful, scalable, and robust system ready to tackle the challenges of tomorrow.
|
||||||
|
|
||||||
|
(Note: This document, given the word limit, provides a high-level overview. A full 5000-word document would delve into even more intricate details, nuances, potential pitfalls, and include considerations for security, user experience, compatibility, etc.)
|
@ -0,0 +1,100 @@
|
|||||||
|
# Costs Structure of Deploying Autonomous Agents
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
1. Introduction
|
||||||
|
2. Our Time: Generating System Prompts and Custom Tools
|
||||||
|
3. Consultancy Fees
|
||||||
|
4. Model Inference Infrastructure
|
||||||
|
5. Deployment and Continual Maintenance
|
||||||
|
6. Output Metrics: Blogs Generation Rates
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. Introduction
|
||||||
|
|
||||||
|
Autonomous agents are revolutionizing various industries, from self-driving cars to chatbots and customer service solutions. The prospect of automation and improved efficiency makes these agents attractive investments. However, like any other technological solution, deploying autonomous agents involves several cost elements that organizations need to consider carefully. This comprehensive guide aims to provide an exhaustive outline of the costs associated with deploying autonomous agents.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Our Time: Generating System Prompts and Custom Tools
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
The deployment of autonomous agents often requires a substantial investment of time to develop system prompts and custom tools tailored to specific operational needs.
|
||||||
|
|
||||||
|
### Costs
|
||||||
|
|
||||||
|
| Task | Time Required (Hours) | Cost per Hour ($) | Total Cost ($) |
|
||||||
|
| ------------------------ | --------------------- | ----------------- | -------------- |
|
||||||
|
| System Prompts Design | 50 | 100 | 5,000 |
|
||||||
|
| Custom Tools Development | 100 | 100 | 10,000 |
|
||||||
|
| **Total** | **150** | | **15,000** |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Consultancy Fees
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
Consultation is often necessary for navigating the complexities of autonomous agents. This includes system assessment, customization, and other essential services.
|
||||||
|
|
||||||
|
### Costs
|
||||||
|
|
||||||
|
| Service | Fees ($) |
|
||||||
|
| -------------------- | --------- |
|
||||||
|
| Initial Assessment | 5,000 |
|
||||||
|
| System Customization | 7,000 |
|
||||||
|
| Training | 3,000 |
|
||||||
|
| **Total** | **15,000**|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Model Inference Infrastructure
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
The hardware and software needed for the agent's functionality, known as the model inference infrastructure, form a significant part of the costs.
|
||||||
|
|
||||||
|
### Costs
|
||||||
|
|
||||||
|
| Component | Cost ($) |
|
||||||
|
| -------------------- | --------- |
|
||||||
|
| Hardware | 10,000 |
|
||||||
|
| Software Licenses | 2,000 |
|
||||||
|
| Cloud Services | 3,000 |
|
||||||
|
| **Total** | **15,000**|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Deployment and Continual Maintenance
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
Once everything is in place, deploying the autonomous agents and their ongoing maintenance are the next major cost factors.
|
||||||
|
|
||||||
|
### Costs
|
||||||
|
|
||||||
|
| Task | Monthly Cost ($) | Annual Cost ($) |
|
||||||
|
| ------------------- | ---------------- | --------------- |
|
||||||
|
| Deployment | 5,000 | 60,000 |
|
||||||
|
| Ongoing Maintenance | 1,000 | 12,000 |
|
||||||
|
| **Total** | **6,000** | **72,000** |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. Output Metrics: Blogs Generation Rates
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
To provide a sense of what an investment in autonomous agents can yield, we offer the following data regarding blogs that can be generated as an example of output.
|
||||||
|
|
||||||
|
### Blogs Generation Rates
|
||||||
|
|
||||||
|
| Timeframe | Number of Blogs |
|
||||||
|
|-----------|-----------------|
|
||||||
|
| Per Day | 20 |
|
||||||
|
| Per Week | 140 |
|
||||||
|
| Per Month | 600 |
|
||||||
|
|
||||||
|
|
@ -0,0 +1,112 @@
|
|||||||
|
# Swarms Data Room
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
**Introduction**
|
||||||
|
|
||||||
|
- Overview of the Company
|
||||||
|
|
||||||
|
- Vision and Mission Statement
|
||||||
|
|
||||||
|
- Executive Summary
|
||||||
|
|
||||||
|
**Corporate Documents**
|
||||||
|
|
||||||
|
- Articles of Incorporation
|
||||||
|
|
||||||
|
- Bylaws
|
||||||
|
|
||||||
|
- Shareholder Agreements
|
||||||
|
|
||||||
|
- Board Meeting Minutes
|
||||||
|
|
||||||
|
- Company Structure and Org Chart
|
||||||
|
|
||||||
|
**Financial Information**
|
||||||
|
|
||||||
|
- Historical Financial Statements
|
||||||
|
|
||||||
|
- Income Statements
|
||||||
|
|
||||||
|
- Balance Sheets
|
||||||
|
|
||||||
|
- Cash Flow Statements
|
||||||
|
|
||||||
|
- Financial Projections and Forecasts
|
||||||
|
|
||||||
|
- Cap Table
|
||||||
|
|
||||||
|
- Funding History and Use of Funds
|
||||||
|
|
||||||
|
**Products and Services**
|
||||||
|
|
||||||
|
- Detailed Descriptions of Products/Services
|
||||||
|
|
||||||
|
- Product Development Roadmap
|
||||||
|
|
||||||
|
- User Manuals and Technical Specifications
|
||||||
|
|
||||||
|
- Case Studies and Use Cases
|
||||||
|
|
||||||
|
|
||||||
|
## **Introdution**
|
||||||
|
Swarms provides automation-as-a-service through swarms of autonomous agents that work together as a team. We enable our customers to build, deploy, and scale production-grade multi-agent applications to automate real-world tasks.
|
||||||
|
|
||||||
|
### **Vision**
|
||||||
|
Our vision for 2024 is to provide the most reliable infrastructure for deploying autonomous agents into the real world through the Swarm Cloud, our premier cloud platform for the scalable deployment of Multi-Modal Autonomous Agents. The platform focuses on delivering maximum value to users by only taking a small fee when utilizing the agents for the hosted compute power needed to host the agents.
|
||||||
|
|
||||||
|
### **Executive Summary**
|
||||||
|
The Swarm Corporation aims to enable AI models to automate complex workflows and operations, not just singular low-value tasks. We believe collaboration between multiple agents can overcome limitations of individual agents for reasoning, planning, etc. This will allow automation of processes in mission-critical industries like security, logistics, and manufacturing where AI adoption is currently low.
|
||||||
|
|
||||||
|
We provide an open source framework to deploy production-grade multi-modal agents in just a few lines of code. This builds our user base, recruits talent, gets customer feedback to improve products, gains awareness and trust.
|
||||||
|
|
||||||
|
Our business model focuses on customer satisfaction, openness, integration with other tools/platforms, and production-grade reliability.
|
||||||
|
|
||||||
|
Go-to-market strategy is to get the framework to product-market fit with over 50K weekly recurring users, then secure high-value contracts in target industries. Long-term monetization via microtransactions, usage-based pricing, subscriptions.
|
||||||
|
|
||||||
|
The team has thousands of hours building and optimizing autonomous agents. Leadership includes AI engineers, product experts, open source contributors and community builders.
|
||||||
|
|
||||||
|
Key milestones: get 80K framework users in January 2024, start contracts in target verticals, introduce commercial products in 2025 with various pricing models.
|
||||||
|
|
||||||
|
### **Resources**
|
||||||
|
- [Swarm Pre-Seed Deck](https://drive.google.com/file/d/1n8o2mjORbG96uDfx4TabjnyieludYaZz/view?usp=sharing)
|
||||||
|
- [Swarm Memo](https://docs.google.com/document/d/1hS_nv_lFjCqLfnJBoF6ULY9roTbSgSuCkvXvSUSc7Lo/edit?usp=sharing)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## **Financial Documents**
|
||||||
|
This section is dedicated entirely for corporate documents.
|
||||||
|
|
||||||
|
- [Cap Table](https://docs.google.com/spreadsheets/d/1wuTWbfhYaY5Xp6nSQ9R0wDtSpwSS9coHxsjKd0UbIDc/edit?usp=sharing)
|
||||||
|
|
||||||
|
- [Cashflow Prediction Sheet](https://docs.google.com/spreadsheets/d/1HQEHCIXXMHajXMl5sj8MEfcQtWfOnD7GjHtNiocpD60/edit?usp=sharing)
|
||||||
|
|
||||||
|
|
||||||
|
------
|
||||||
|
|
||||||
|
## **Product**
|
||||||
|
Swarms is an open source framework for developers in python to enable seamless, reliable, and scalable multi-agent orchestration through modularity, customization, and precision.
|
||||||
|
|
||||||
|
- [Swarms Github Page:](https://github.com/kyegomez/swarms)
|
||||||
|
- [Swarms Memo](https://docs.google.com/document/d/1hS_nv_lFjCqLfnJBoF6ULY9roTbSgSuCkvXvSUSc7Lo/edit)
|
||||||
|
- [Swarms Project Board](https://github.com/users/kyegomez/projects/1)
|
||||||
|
- [Swarms Website](https://www.swarms.world/g)
|
||||||
|
- [Swarm Ecosystem](https://github.com/kyegomez/swarm-ecosystem)
|
||||||
|
- [Swarm Core](https://github.com/kyegomez/swarms-core)
|
||||||
|
|
||||||
|
### Product Growth Metrics
|
||||||
|
| Name | Description | Link |
|
||||||
|
|----------------------------------|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------|
|
||||||
|
| Total Downloads of all time | Total number of downloads for the product over its entire lifespan. | [](https://pepy.tech/project/swarms) |
|
||||||
|
| Downloads this month | Number of downloads for the product in the current month. | [](https://pepy.tech/project/swarms) |
|
||||||
|
| Total Downloads this week | Total number of downloads for the product in the current week. | [](https://pepy.tech/project/swarms) |
|
||||||
|
| Github Forks | Number of times the product's codebase has been copied for optimization, contribution, or usage. | [](https://github.com/kyegomez/swarms/network) |
|
||||||
|
| Github Stars | Number of users who have 'liked' the project. | [](https://github.com/kyegomez/swarms/stargazers) |
|
||||||
|
| Pip Module Metrics | Various project statistics such as watchers, number of contributors, date repository was created, and more. | [CLICK HERE](https://libraries.io/github/kyegomez/swarms) |
|
||||||
|
| Contribution Based Statistics | Statistics like number of contributors, lines of code changed, etc. | [HERE](https://github.com/kyegomez/swarms/graphs/contributors) |
|
||||||
|
| Github Community insights | Insights into the Github community around the product. | [Github Community insights](https://github.com/kyegomez/swarms/graphs/community) |
|
||||||
|
| Github Traffic Metrics | Metrics related to traffic, such as views and clones on Github. | [Github Traffic Metrics](https://github.com/kyegomez/swarms/graphs/traffic) |
|
||||||
|
| Issues with the framework | Current open issues for the product on Github. | [](https://github.com/kyegomez/swarms/issues) |
|
||||||
|
|
||||||
|
|
@ -0,0 +1,9 @@
|
|||||||
|
# Demo Ideas
|
||||||
|
|
||||||
|
* We could also try to create an AI influencer run by a swarm, let it create a whole identity and generate images, memes, and other content for Twitter, Reddit, etc.
|
||||||
|
|
||||||
|
* had a thought that we should have either a more general one of these or a swarm or both -- need something connecting all the calendars, events, and initiatives of all the AI communities, langchain, laion, eluther, lesswrong, gato, rob miles, chatgpt hackers, etc etc
|
||||||
|
|
||||||
|
* Swarm of AI influencers to spread marketing
|
||||||
|
|
||||||
|
* Delegation System to better organize teams: Start with a team of passionate humans and let them self-report their skills/strengths so the agent has a concept of who to delegate to, then feed the agent a huge task list (like the bullet list a few messages above) that it breaks down into actionable steps and "prompts" specific team members to complete tasks. Could even suggest breakout teams of a few people with complementary skills to tackle more complex tasks. There can also be a live board that updates each time a team member completes something, to encourage momentum and keep track of progress
|
@ -0,0 +1,152 @@
|
|||||||
|
# Design Philosophy Document for Swarms
|
||||||
|
|
||||||
|
## Usable
|
||||||
|
|
||||||
|
### Objective
|
||||||
|
|
||||||
|
Our goal is to ensure that Swarms is intuitive and easy to use for all users, regardless of their level of technical expertise. This includes the developers who implement Swarms in their applications, as well as end users who interact with the implemented systems.
|
||||||
|
|
||||||
|
### Tactics
|
||||||
|
|
||||||
|
- Clear and Comprehensive Documentation: We will provide well-written and easily accessible documentation that guides users through using and understanding Swarms.
|
||||||
|
- User-Friendly APIs: We'll design clean and self-explanatory APIs that help developers to understand their purpose quickly.
|
||||||
|
- Prompt and Effective Support: We will ensure that support is readily available to assist users when they encounter problems or need help with Swarms.
|
||||||
|
|
||||||
|
## Reliable
|
||||||
|
|
||||||
|
### Objective
|
||||||
|
|
||||||
|
Swarms should be dependable and trustworthy. Users should be able to count on Swarms to perform consistently and without error or failure.
|
||||||
|
|
||||||
|
### Tactics
|
||||||
|
|
||||||
|
- Robust Error Handling: We will focus on error prevention, detection, and recovery to minimize failures in Swarms.
|
||||||
|
- Comprehensive Testing: We will apply various testing methodologies such as unit testing, integration testing, and stress testing to validate the reliability of our software.
|
||||||
|
- Continuous Integration/Continuous Delivery (CI/CD): We will use CI/CD pipelines to ensure that all changes are tested and validated before they're merged into the main branch.
|
||||||
|
|
||||||
|
## Fast
|
||||||
|
|
||||||
|
### Objective
|
||||||
|
|
||||||
|
Swarms should offer high performance and rapid response times. The system should be able to handle requests and tasks swiftly.
|
||||||
|
|
||||||
|
### Tactics
|
||||||
|
|
||||||
|
- Efficient Algorithms: We will focus on optimizing our algorithms and data structures to ensure they run as quickly as possible.
|
||||||
|
- Caching: Where appropriate, we will use caching techniques to speed up response times.
|
||||||
|
- Profiling and Performance Monitoring: We will regularly analyze the performance of Swarms to identify bottlenecks and opportunities for improvement.
|
||||||
|
|
||||||
|
## Scalable
|
||||||
|
|
||||||
|
### Objective
|
||||||
|
|
||||||
|
Swarms should be able to grow in capacity and complexity without compromising performance or reliability. It should be able to handle increased workloads gracefully.
|
||||||
|
|
||||||
|
### Tactics
|
||||||
|
|
||||||
|
- Modular Architecture: We will design Swarms using a modular architecture that allows for easy scaling and modification.
|
||||||
|
- Load Balancing: We will distribute tasks evenly across available resources to prevent overload and maximize throughput.
|
||||||
|
- Horizontal and Vertical Scaling: We will design Swarms to be capable of both horizontal (adding more machines) and vertical (adding more power to an existing machine) scaling.
|
||||||
|
|
||||||
|
### Philosophy
|
||||||
|
|
||||||
|
Swarms is designed with a philosophy of simplicity and reliability. We believe that software should be a tool that empowers users, not a hurdle that they need to overcome. Therefore, our focus is on usability, reliability, speed, and scalability. We want our users to find Swarms intuitive and dependable, fast and adaptable to their needs. This philosophy guides all of our design and development decisions.
|
||||||
|
|
||||||
|
# Swarm Architecture Design Document
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The goal of the Swarm Architecture is to provide a flexible and scalable system to build swarm intelligence models that can solve complex problems. This document details the proposed design to create a plug-and-play system, which makes it easy to create custom swarms, and provides pre-configured swarms with multi-modal agents.
|
||||||
|
|
||||||
|
## Design Principles
|
||||||
|
|
||||||
|
- **Modularity**: The system will be built in a modular fashion, allowing various components to be easily swapped or upgraded.
|
||||||
|
- **Interoperability**: Different swarm classes and components should be able to work together seamlessly.
|
||||||
|
- **Scalability**: The design should support the growth of the system by adding more components or swarms.
|
||||||
|
- **Ease of Use**: Users should be able to easily create their own swarms or use pre-configured ones with minimal configuration.
|
||||||
|
|
||||||
|
## Design Components
|
||||||
|
|
||||||
|
### BaseSwarm
|
||||||
|
|
||||||
|
The BaseSwarm is an abstract base class which defines the basic structure of a swarm and the methods that need to be implemented. Any new swarm should inherit from this class and implement the required methods.
|
||||||
|
|
||||||
|
### Swarm Classes
|
||||||
|
|
||||||
|
Various Swarm classes can be implemented inheriting from the BaseSwarm class. Each swarm class should implement the required methods for initializing the components, worker nodes, and boss node, and running the swarm.
|
||||||
|
|
||||||
|
Pre-configured swarm classes with multi-modal agents can be provided for ease of use. These classes come with a default configuration of tools and agents, which can be used out of the box.
|
||||||
|
|
||||||
|
### Tools and Agents
|
||||||
|
|
||||||
|
Tools and agents are the components that provide the actual functionality to the swarms. They can be language models, AI assistants, vector stores, or any other components that can help in problem solving.
|
||||||
|
|
||||||
|
To make the system plug-and-play, a standard interface should be defined for these components. Any new tool or agent should implement this interface, so that it can be easily plugged into the system.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Users can either use pre-configured swarms or create their own custom swarms.
|
||||||
|
|
||||||
|
To use a pre-configured swarm, they can simply instantiate the corresponding swarm class and call the run method with the required objective.
|
||||||
|
|
||||||
|
To create a custom swarm, they need to:
|
||||||
|
|
||||||
|
1. Define a new swarm class inheriting from BaseSwarm.
|
||||||
|
2. Implement the required methods for the new swarm class.
|
||||||
|
3. Instantiate the swarm class and call the run method.
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Using pre-configured swarm
|
||||||
|
swarm = PreConfiguredSwarm(openai_api_key)
|
||||||
|
swarm.run_swarms(objective)
|
||||||
|
|
||||||
|
# Creating custom swarm
|
||||||
|
class CustomSwarm(BaseSwarm):
|
||||||
|
# Implement required methods
|
||||||
|
|
||||||
|
swarm = CustomSwarm(openai_api_key)
|
||||||
|
swarm.run_swarms(objective)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
This Swarm Architecture design provides a scalable and flexible system for building swarm intelligence models. The plug-and-play design allows users to easily use pre-configured swarms or create their own custom swarms.
|
||||||
|
|
||||||
|
|
||||||
|
# Swarming Architectures
|
||||||
|
Sure, below are five different swarm architectures with their base requirements and an abstract class that processes these components:
|
||||||
|
|
||||||
|
1. **Hierarchical Swarm**: This architecture is characterized by a boss/worker relationship. The boss node takes high-level decisions and delegates tasks to the worker nodes. The worker nodes perform tasks and report back to the boss node.
|
||||||
|
- Requirements: Boss node (can be a large language model), worker nodes (can be smaller language models), and a task queue for task management.
|
||||||
|
|
||||||
|
2. **Homogeneous Swarm**: In this architecture, all nodes in the swarm are identical and contribute equally to problem-solving. Each node has the same capabilities.
|
||||||
|
- Requirements: Homogeneous nodes (can be language models of the same size), communication protocol for nodes to share information.
|
||||||
|
|
||||||
|
3. **Heterogeneous Swarm**: This architecture contains different types of nodes, each with its specific capabilities. This diversity can lead to more robust problem-solving.
|
||||||
|
- Requirements: Different types of nodes (can be different types and sizes of language models), a communication protocol, and a mechanism to delegate tasks based on node capabilities.
|
||||||
|
|
||||||
|
4. **Competitive Swarm**: In this architecture, nodes compete with each other to find the best solution. The system may use a selection process to choose the best solutions.
|
||||||
|
- Requirements: Nodes (can be language models), a scoring mechanism to evaluate node performance, a selection mechanism.
|
||||||
|
|
||||||
|
5. **Cooperative Swarm**: In this architecture, nodes work together and share information to find solutions. The focus is on cooperation rather than competition.
|
||||||
|
- Requirements: Nodes (can be language models), a communication protocol, a consensus mechanism to agree on solutions.
|
||||||
|
|
||||||
|
|
||||||
|
6. **Grid-based Swarm**: This architecture positions agents on a grid, where they can only interact with their neighbors. This is useful for simulations, especially in fields like ecology or epidemiology.
|
||||||
|
- Requirements: Agents (can be language models), a grid structure, and a neighborhood definition (i.e., how to identify neighboring agents).
|
||||||
|
|
||||||
|
7. **Particle Swarm Optimization (PSO) Swarm**: In this architecture, each agent represents a potential solution to an optimization problem. Agents move in the solution space based on their own and their neighbors' past performance. PSO is especially useful for continuous numerical optimization problems.
|
||||||
|
- Requirements: Agents (each representing a solution), a definition of the solution space, an evaluation function to rate the solutions, a mechanism to adjust agent positions based on performance.
|
||||||
|
|
||||||
|
8. **Ant Colony Optimization (ACO) Swarm**: Inspired by ant behavior, this architecture has agents leave a pheromone trail that other agents follow, reinforcing the best paths. It's useful for problems like the traveling salesperson problem.
|
||||||
|
- Requirements: Agents (can be language models), a representation of the problem space, a pheromone updating mechanism.
|
||||||
|
|
||||||
|
9. **Genetic Algorithm (GA) Swarm**: In this architecture, agents represent potential solutions to a problem. They can 'breed' to create new solutions and can undergo 'mutations'. GA swarms are good for search and optimization problems.
|
||||||
|
- Requirements: Agents (each representing a potential solution), a fitness function to evaluate solutions, a crossover mechanism to breed solutions, and a mutation mechanism.
|
||||||
|
|
||||||
|
10. **Stigmergy-based Swarm**: In this architecture, agents communicate indirectly by modifying the environment, and other agents react to such modifications. It's a decentralized method of coordinating tasks.
|
||||||
|
- Requirements: Agents (can be language models), an environment that agents can modify, a mechanism for agents to perceive environment changes.
|
||||||
|
|
||||||
|
These architectures all have unique features and requirements, but they share the need for agents (often implemented as language models) and a mechanism for agents to communicate or interact, whether it's directly through messages, indirectly through the environment, or implicitly through a shared solution space. Some also require specific data structures, like a grid or problem space, and specific algorithms, like for evaluating solutions or updating agent positions.
|
@ -0,0 +1,469 @@
|
|||||||
|
|
||||||
|
|
||||||
|
# Swarms Monetization Strategy
|
||||||
|
|
||||||
|
This strategy includes a variety of business models, potential revenue streams, cashflow structures, and customer identification methods. Let's explore these further.
|
||||||
|
|
||||||
|
## Business Models
|
||||||
|
|
||||||
|
1. **Platform as a Service (PaaS):** Provide the Swarms AI platform on a subscription basis, charged monthly or annually. This could be tiered based on usage and access to premium features.
|
||||||
|
|
||||||
|
2. **API Usage-based Pricing:** Charge customers based on their usage of the Swarms API. The more requests made, the higher the fee.
|
||||||
|
|
||||||
|
3. **Managed Services:** Offer complete end-to-end solutions where you manage the entire AI infrastructure for the clients. This could be on a contract basis with a recurring fee.
|
||||||
|
|
||||||
|
4. **Training and Certification:** Provide Swarms AI training and certification programs for interested developers and businesses. These could be monetized as separate courses or subscription-based access.
|
||||||
|
|
||||||
|
5. **Partnerships:** Collaborate with large enterprises and offer them dedicated Swarm AI services. These could be performance-based contracts, ensuring a mutually beneficial relationship.
|
||||||
|
|
||||||
|
6. **Data as a Service (DaaS):** Leverage the data generated by Swarms for insights and analytics, providing valuable business intelligence to clients.
|
||||||
|
|
||||||
|
## Potential Revenue Streams
|
||||||
|
|
||||||
|
1. **Subscription Fees:** This would be the main revenue stream from providing the Swarms platform as a service.
|
||||||
|
|
||||||
|
2. **Usage Fees:** Additional revenue can come from usage fees for businesses that have high demand for Swarms API.
|
||||||
|
|
||||||
|
3. **Contract Fees:** From offering managed services and bespoke solutions to businesses.
|
||||||
|
|
||||||
|
4. **Training Fees:** Revenue from providing training and certification programs to developers and businesses.
|
||||||
|
|
||||||
|
5. **Partnership Contracts:** Large-scale projects with enterprises, involving dedicated Swarm AI services, could provide substantial income.
|
||||||
|
|
||||||
|
6. **Data Insights:** Revenue from selling valuable business intelligence derived from Swarm's aggregated and anonymized data.
|
||||||
|
|
||||||
|
## Potential Customers
|
||||||
|
|
||||||
|
1. **Businesses Across Sectors:** Any business seeking to leverage AI for automation, efficiency, and data insights could be a potential customer. This includes sectors like finance, eCommerce, logistics, healthcare, and more.
|
||||||
|
|
||||||
|
2. **Developers:** Both freelance and those working in organizations could use Swarms to enhance their projects and services.
|
||||||
|
|
||||||
|
3. **Enterprises:** Large enterprises looking to automate and optimize their operations could greatly benefit from Swarms.
|
||||||
|
|
||||||
|
4. **Educational Institutions:** Universities and research institutions could leverage Swarms for research and teaching purposes.
|
||||||
|
|
||||||
|
## Roadmap
|
||||||
|
|
||||||
|
1. **Landing Page Creation:** Develop a dedicated product page on apac.ai for Swarms.
|
||||||
|
|
||||||
|
2. **Hosted Swarms API:** Launch a cloud-based Swarms API service. It should be highly reliable, with robust documentation to attract daily users.
|
||||||
|
|
||||||
|
3. **Consumer and Enterprise Subscription Service:** Launch a comprehensive subscription service on The Domain. This would provide users with access to a wide array of APIs and data streams.
|
||||||
|
|
||||||
|
4. **Dedicated Capacity Deals:** Partner with large enterprises to offer them dedicated Swarm AI solutions for automating their operations.
|
||||||
|
|
||||||
|
5. **Enterprise Partnerships:** Develop partnerships with large enterprises for extensive contract-based projects.
|
||||||
|
|
||||||
|
6. **Integration with Collaboration Platforms:** Develop Swarms bots for platforms like Discord and Slack, charging users a subscription fee for access.
|
||||||
|
|
||||||
|
7. **Personal Data Instances:** Offer users dedicated instances of all their data that the Swarm can query as needed.
|
||||||
|
|
||||||
|
8. **Browser Extension:** Develop a browser extension that integrates with the Swarms platform, offering users a more seamless experience.
|
||||||
|
|
||||||
|
Remember, customer satisfaction and a value-centric approach are at the core of any successful monetization strategy. It's essential to continuously iterate and improve the product based on customer feedback and evolving market needs.
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
# Other ideas
|
||||||
|
|
||||||
|
1. **Platform as a Service (PaaS):** Create a cloud-based platform that allows users to build, run, and manage applications without the complexity of maintaining the infrastructure. You could charge users a subscription fee for access to the platform and provide different pricing tiers based on usage levels. This could be an attractive solution for businesses that do not have the capacity to build or maintain their own swarm intelligence solutions.
|
||||||
|
|
||||||
|
2. **Professional Services:** Offer consultancy and implementation services to businesses looking to utilize the Swarm technology. This could include assisting with integration into existing systems, offering custom development services, or helping customers to build specific solutions using the framework.
|
||||||
|
|
||||||
|
3. **Education and Training:** Create a certification program for developers or companies looking to become proficient with the Swarms framework. This could be sold as standalone courses, or bundled with other services.
|
||||||
|
|
||||||
|
4. **Managed Services:** Some companies may prefer to outsource the management of their Swarm-based systems. A managed services solution could take care of all the technical aspects, from hosting the solution to ensuring it runs smoothly, allowing the customer to focus on their core business.
|
||||||
|
|
||||||
|
5. **Data Analysis and Insights:** Swarm intelligence can generate valuable data and insights. By anonymizing and aggregating this data, you could provide industry reports, trend analysis, and other valuable insights to businesses.
|
||||||
|
|
||||||
|
As for the type of platform, Swarms can be offered as a cloud-based solution given its scalability and flexibility. This would also allow you to apply a SaaS/PaaS type monetization model, which provides recurring revenue.
|
||||||
|
|
||||||
|
Potential customers could range from small to large enterprises in various sectors such as logistics, eCommerce, finance, and technology, who are interested in leveraging artificial intelligence and machine learning for complex problem solving, optimization, and decision-making.
|
||||||
|
|
||||||
|
**Product Brief Monetization Strategy:**
|
||||||
|
|
||||||
|
Product Name: Swarms.AI Platform
|
||||||
|
|
||||||
|
Product Description: A cloud-based AI and ML platform harnessing the power of swarm intelligence.
|
||||||
|
|
||||||
|
1. **Platform as a Service (PaaS):** Offer tiered subscription plans (Basic, Premium, Enterprise) to accommodate different usage levels and business sizes.
|
||||||
|
|
||||||
|
2. **Professional Services:** Offer consultancy and custom development services to tailor the Swarms solution to the specific needs of the business.
|
||||||
|
|
||||||
|
3. **Education and Training:** Launch an online Swarms.AI Academy with courses and certifications for developers and businesses.
|
||||||
|
|
||||||
|
4. **Managed Services:** Provide a premium, fully-managed service offering that includes hosting, maintenance, and 24/7 support.
|
||||||
|
|
||||||
|
5. **Data Analysis and Insights:** Offer industry reports and customized insights generated from aggregated and anonymized Swarm data.
|
||||||
|
|
||||||
|
Potential Customers: Enterprises in sectors such as logistics, eCommerce, finance, and technology. This can be sold globally, provided there's an internet connection.
|
||||||
|
|
||||||
|
Marketing Channels: Online marketing (SEO, Content Marketing, Social Media), Partnerships with tech companies, Direct Sales to Enterprises.
|
||||||
|
|
||||||
|
This strategy is designed to provide multiple revenue streams, while ensuring the Swarms.AI platform is accessible and useful to a range of potential customers.
|
||||||
|
|
||||||
|
1. **AI Solution as a Service:** By offering the Swarms framework as a service, businesses can access and utilize the power of multiple LLM agents without the need to maintain the infrastructure themselves. Subscription can be tiered based on usage and additional features.
|
||||||
|
|
||||||
|
2. **Integration and Custom Development:** Offer integration services to businesses wanting to incorporate the Swarms framework into their existing systems. Also, you could provide custom development for businesses with specific needs not met by the standard framework.
|
||||||
|
|
||||||
|
3. **Training and Certification:** Develop an educational platform offering courses, webinars, and certifications on using the Swarms framework. This can serve both developers seeking to broaden their skills and businesses aiming to train their in-house teams.
|
||||||
|
|
||||||
|
4. **Managed Swarms Solutions:** For businesses that prefer to outsource their AI needs, provide a complete solution which includes the development, maintenance, and continuous improvement of swarms-based applications.
|
||||||
|
|
||||||
|
5. **Data Analytics Services:** Leveraging the aggregated insights from the AI swarms, you could offer data analytics services. Businesses can use these insights to make informed decisions and predictions.
|
||||||
|
|
||||||
|
**Type of Platform:**
|
||||||
|
|
||||||
|
Cloud-based platform or Software as a Service (SaaS) will be a suitable model. It offers accessibility, scalability, and ease of updates.
|
||||||
|
|
||||||
|
**Target Customers:**
|
||||||
|
|
||||||
|
The technology can be beneficial for businesses across sectors like eCommerce, technology, logistics, finance, healthcare, and education, among others.
|
||||||
|
|
||||||
|
**Product Brief Monetization Strategy:**
|
||||||
|
|
||||||
|
Product Name: Swarms.AI
|
||||||
|
|
||||||
|
1. **AI Solution as a Service:** Offer different tiered subscriptions (Standard, Premium, and Enterprise) each with varying levels of usage and features.
|
||||||
|
|
||||||
|
2. **Integration and Custom Development:** Offer custom development and integration services, priced based on the scope and complexity of the project.
|
||||||
|
|
||||||
|
3. **Training and Certification:** Launch the Swarms.AI Academy with courses and certifications, available for a fee.
|
||||||
|
|
||||||
|
4. **Managed Swarms Solutions:** Offer fully managed solutions tailored to business needs, priced based on scope and service level agreements.
|
||||||
|
|
||||||
|
5. **Data Analytics Services:** Provide insightful reports and data analyses, which can be purchased on a one-off basis or through a subscription.
|
||||||
|
|
||||||
|
By offering a variety of services and payment models, Swarms.AI will be able to cater to a diverse range of business needs, from small start-ups to large enterprises. Marketing channels would include digital marketing, partnerships with technology companies, presence in tech events, and direct sales to targeted industries.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Roadmap
|
||||||
|
|
||||||
|
* Create a landing page for swarms apac.ai/product/swarms
|
||||||
|
|
||||||
|
* Create Hosted Swarms API for anybody to just use without need for mega gpu infra, charge usage based pricing. Prerequisites for success => Swarms has to be extremely reliable + we need world class documentation and many daily users => how do we get many daily users? We provide a seamless and fluid experience, how do we create a seamless and fluid experience? We write good code that is modular, provides feedback to the user in times of distress, and ultimately accomplishes the user's tasks.
|
||||||
|
|
||||||
|
* Hosted consumer and enterprise subscription as a service on The Domain, where users can interact with 1000s of APIs and ingest 1000s of different data streams.
|
||||||
|
|
||||||
|
* Hosted dedicated capacity deals with mega enterprises on automating many operations with Swarms for monthly subscription 300,000+$
|
||||||
|
|
||||||
|
* Partnerships with enterprises, massive contracts with performance based fee
|
||||||
|
|
||||||
|
* Have discord bot and or slack bot with users personal data, charge subscription + browser extension
|
||||||
|
|
||||||
|
* each user gets a dedicated ocean instance of all their data so the swarm can query it as needed.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# Swarms Monetization Strategy: A Revolutionary AI-powered Future
|
||||||
|
|
||||||
|
Swarms is a powerful AI platform leveraging the transformative potential of Swarm Intelligence. Our ambition is to monetize this groundbreaking technology in ways that generate significant cashflow while providing extraordinary value to our customers.
|
||||||
|
|
||||||
|
Here we outline our strategic monetization pathways and provide a roadmap that plots our course to future success.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## I. Business Models
|
||||||
|
|
||||||
|
1. **Platform as a Service (PaaS):** We provide the Swarms platform as a service, billed on a monthly or annual basis. Subscriptions can range from $50 for basic access, to $500+ for premium features and extensive usage.
|
||||||
|
|
||||||
|
2. **API Usage-based Pricing:** Customers are billed according to their use of the Swarms API. Starting at $0.01 per request, this creates a cashflow model that rewards extensive platform usage.
|
||||||
|
|
||||||
|
3. **Managed Services:** We offer end-to-end solutions, managing clients' entire AI infrastructure. Contract fees start from $100,000 per month, offering both a sustainable cashflow and considerable savings for our clients.
|
||||||
|
|
||||||
|
4. **Training and Certification:** A Swarms AI training and certification program is available for developers and businesses. Course costs can range from $200 to $2,000, depending on course complexity and duration.
|
||||||
|
|
||||||
|
5. **Partnerships:** We forge collaborations with large enterprises, offering dedicated Swarm AI services. These performance-based contracts start from $1,000,000, creating a potentially lucrative cashflow stream.
|
||||||
|
|
||||||
|
6. **Data as a Service (DaaS):** Swarms generated data are mined for insights and analytics, with business intelligence reports offered from $500 each.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## II. Potential Revenue Streams
|
||||||
|
|
||||||
|
1. **Subscription Fees:** From $50 to $500+ per month for platform access.
|
||||||
|
|
||||||
|
2. **Usage Fees:** From $0.01 per API request, generating income from high platform usage.
|
||||||
|
|
||||||
|
3. **Contract Fees:** Starting from $100,000 per month for managed services.
|
||||||
|
|
||||||
|
4. **Training Fees:** From $200 to $2,000 for individual courses or subscription access.
|
||||||
|
|
||||||
|
5. **Partnership Contracts:** Contracts starting from $100,000, offering major income potential.
|
||||||
|
|
||||||
|
6. **Data Insights:** Business intelligence reports starting from $500.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## III. Potential Customers
|
||||||
|
|
||||||
|
1. **Businesses Across Sectors:** Our offerings cater to businesses across finance, eCommerce, logistics, healthcare, and more.
|
||||||
|
|
||||||
|
2. **Developers:** Both freelancers and organization-based developers can leverage Swarms for their projects.
|
||||||
|
|
||||||
|
3. **Enterprises:** Swarms offers large enterprises solutions for optimizing operations.
|
||||||
|
|
||||||
|
4. **Educational Institutions:** Universities and research institutions can use Swarms for research and teaching.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## IV. Roadmap
|
||||||
|
|
||||||
|
1. **Landing Page Creation:** Develop a dedicated Swarms product page on apac.ai.
|
||||||
|
|
||||||
|
2. **Hosted Swarms API:** Launch a reliable, well-documented cloud-based Swarms API service.
|
||||||
|
|
||||||
|
3. **Consumer and Enterprise Subscription Service:** Launch an extensive subscription service on The Domain, providing wide-ranging access to APIs and data streams.
|
||||||
|
|
||||||
|
4. **Dedicated Capacity Deals:** Offer large enterprises dedicated Swarm AI solutions, starting from $300,000 monthly subscription.
|
||||||
|
|
||||||
|
5. **Enterprise Partnerships:** Develop performance-based contracts with large enterprises.
|
||||||
|
|
||||||
|
6. **Integration with Collaboration Platforms:** Develop Swarms bots for platforms like Discord and Slack, charging a subscription fee for access.
|
||||||
|
|
||||||
|
7. **Personal Data Instances:** Offer users dedicated data instances that the Swarm can query as needed.
|
||||||
|
|
||||||
|
8. **Browser Extension:** Develop a browser extension that integrates with the Swarms platform for seamless user experience.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Our North Star remains customer satisfaction and value provision.
|
||||||
|
As we embark on this journey, we continuously refine our product based on customer feedback and evolving market needs, ensuring we lead in the age of AI-driven solutions.
|
||||||
|
|
||||||
|
## **Platform Distribution Strategy for Swarms**
|
||||||
|
|
||||||
|
*Note: This strategy aims to diversify the presence of 'Swarms' across various platforms and mediums while focusing on monetization and value creation for its users.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **1. Framework:**
|
||||||
|
|
||||||
|
#### **Objective:**
|
||||||
|
To offer Swarms as an integrated solution within popular frameworks to ensure that developers and businesses can seamlessly incorporate its functionalities.
|
||||||
|
|
||||||
|
#### **Strategy:**
|
||||||
|
|
||||||
|
* **Language/Framework Integration:**
|
||||||
|
* Target popular frameworks like Django, Flask for Python, Express.js for Node, etc.
|
||||||
|
* Create SDKs or plugins for easy integration.
|
||||||
|
|
||||||
|
* **Monetization:**
|
||||||
|
* Freemium Model: Offer basic integration for free, and charge for additional features or advanced integrations.
|
||||||
|
* Licensing: Allow businesses to purchase licenses for enterprise-level integrations.
|
||||||
|
|
||||||
|
* **Promotion:**
|
||||||
|
* Engage in partnerships with popular online coding platforms like Udemy, Coursera, etc., offering courses and tutorials on integrating Swarms.
|
||||||
|
* Host webinars and write technical blogs to promote the integration benefits.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **2. Paid API:**
|
||||||
|
|
||||||
|
#### **Objective:**
|
||||||
|
To provide a scalable solution for developers and businesses that want direct access to Swarms' functionalities without integrating the entire framework.
|
||||||
|
|
||||||
|
#### **Strategy:**
|
||||||
|
|
||||||
|
* **API Endpoints:**
|
||||||
|
* Offer various endpoints catering to different functionalities.
|
||||||
|
* Maintain robust documentation to ensure ease of use.
|
||||||
|
|
||||||
|
* **Monetization:**
|
||||||
|
* Usage-based Pricing: Charge based on the number of API calls.
|
||||||
|
* Subscription Tiers: Provide tiered packages based on usage limits and advanced features.
|
||||||
|
|
||||||
|
* **Promotion:**
|
||||||
|
* List on API marketplaces like RapidAPI.
|
||||||
|
* Engage in SEO to make the API documentation discoverable.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **3. Domain Hosted:**
|
||||||
|
|
||||||
|
#### **Objective:**
|
||||||
|
To provide a centralized web platform where users can directly access and engage with Swarms' offerings.
|
||||||
|
|
||||||
|
#### **Strategy:**
|
||||||
|
|
||||||
|
* **User-Friendly Interface:**
|
||||||
|
* Ensure a seamless user experience with intuitive design.
|
||||||
|
* Incorporate features like real-time chat support, tutorials, and an FAQ section.
|
||||||
|
|
||||||
|
* **Monetization:**
|
||||||
|
* Subscription Model: Offer monthly/annual subscriptions for premium features.
|
||||||
|
* Affiliate Marketing: Partner with related tech products/services and earn through referrals.
|
||||||
|
|
||||||
|
* **Promotion:**
|
||||||
|
* Invest in PPC advertising on platforms like Google Ads.
|
||||||
|
* Engage in content marketing, targeting keywords related to Swarms' offerings.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **4. Build Your Own (No-Code Platform):**
|
||||||
|
|
||||||
|
#### **Objective:**
|
||||||
|
To cater to the non-developer audience, allowing them to leverage Swarms' features without any coding expertise.
|
||||||
|
|
||||||
|
#### **Strategy:**
|
||||||
|
|
||||||
|
* **Drag-and-Drop Interface:**
|
||||||
|
* Offer customizable templates.
|
||||||
|
* Ensure integration with popular platforms and apps.
|
||||||
|
|
||||||
|
* **Monetization:**
|
||||||
|
* Freemium Model: Offer basic features for free, and charge for advanced functionalities.
|
||||||
|
* Marketplace for Plugins: Allow third-party developers to sell their plugins/extensions on the platform.
|
||||||
|
|
||||||
|
* **Promotion:**
|
||||||
|
* Partner with no-code communities and influencers.
|
||||||
|
* Offer promotions and discounts to early adopters.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **5. Marketplace for the No-Code Platform:**
|
||||||
|
|
||||||
|
#### **Objective:**
|
||||||
|
To create an ecosystem where third-party developers can contribute, and users can enhance their Swarms experience.
|
||||||
|
|
||||||
|
#### **Strategy:**
|
||||||
|
|
||||||
|
* **Open API for Development:**
|
||||||
|
* Offer robust documentation and developer support.
|
||||||
|
* Ensure a strict quality check for marketplace additions.
|
||||||
|
|
||||||
|
* **Monetization:**
|
||||||
|
* Revenue Sharing: Take a percentage cut from third-party sales.
|
||||||
|
* Featured Listings: Charge developers for premium listings.
|
||||||
|
|
||||||
|
* **Promotion:**
|
||||||
|
* Host hackathons and competitions to boost developer engagement.
|
||||||
|
* Promote top plugins/extensions through email marketing and on the main platform.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Future Outlook & Expansion:**
|
||||||
|
|
||||||
|
* **Hosted Dedicated Capacity:** Hosted dedicated capacity deals for enterprises starting at 399,999$
|
||||||
|
* **Decentralized Free Peer to peer endpoint hosted on The Grid:** Hosted endpoint by the people for the people.
|
||||||
|
* **Browser Extenision:** Athena browser extension for deep browser automation, subscription, usage,
|
||||||
|
|
||||||
|
|
||||||
|
* **Mobile Application:** Develop a mobile app version for Swarms to tap into the vast mobile user base.
|
||||||
|
* **Global Expansion:** Localize the platform for non-English speaking regions to tap into global markets.
|
||||||
|
* **Continuous Learning:** Regularly collect user feedback and iterate on the product features.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### **50 Creative Distribution Platforms for Swarms**
|
||||||
|
|
||||||
|
1. **E-commerce Integrations:** Platforms like Shopify, WooCommerce, where Swarms can add value to sellers.
|
||||||
|
|
||||||
|
2. **Web Browser Extensions:** Chrome, Firefox, and Edge extensions that bring Swarms features directly to users.
|
||||||
|
|
||||||
|
3. **Podcasting Platforms:** Swarms-themed content on platforms like Spotify, Apple Podcasts to reach aural learners.
|
||||||
|
|
||||||
|
4. **Virtual Reality (VR) Platforms:** Integration with VR experiences on Oculus or Viveport.
|
||||||
|
|
||||||
|
5. **Gaming Platforms:** Tools or plugins for game developers on Steam, Epic Games.
|
||||||
|
|
||||||
|
6. **Decentralized Platforms:** Using blockchain, create decentralized apps (DApps) versions of Swarms.
|
||||||
|
|
||||||
|
7. **Chat Applications:** Integrate with popular messaging platforms like WhatsApp, Telegram, Slack.
|
||||||
|
|
||||||
|
8. **AI Assistants:** Integration with Siri, Alexa, Google Assistant to provide Swarms functionalities via voice commands.
|
||||||
|
|
||||||
|
9. **Freelancing Websites:** Offer tools or services for freelancers on platforms like Upwork, Fiverr.
|
||||||
|
|
||||||
|
10. **Online Forums:** Platforms like Reddit, Quora, where users can discuss or access Swarms.
|
||||||
|
|
||||||
|
11. **Educational Platforms:** Sites like Khan Academy, Udacity where Swarms can enhance learning experiences.
|
||||||
|
|
||||||
|
12. **Digital Art Platforms:** Integrate with platforms like DeviantArt, Behance.
|
||||||
|
|
||||||
|
13. **Open-source Repositories:** Hosting Swarms on GitHub, GitLab, Bitbucket with open-source plugins.
|
||||||
|
|
||||||
|
14. **Augmented Reality (AR) Apps:** Create AR experiences powered by Swarms.
|
||||||
|
|
||||||
|
15. **Smart Home Devices:** Integrate Swarms' functionalities into smart home devices.
|
||||||
|
|
||||||
|
16. **Newsletters:** Platforms like Substack, where Swarms insights can be shared.
|
||||||
|
|
||||||
|
17. **Interactive Kiosks:** In malls, airports, and other public places.
|
||||||
|
|
||||||
|
18. **IoT Devices:** Incorporate Swarms in devices like smart fridges, smartwatches.
|
||||||
|
|
||||||
|
19. **Collaboration Tools:** Platforms like Trello, Notion, offering Swarms-enhanced productivity.
|
||||||
|
|
||||||
|
20. **Dating Apps:** An AI-enhanced matching algorithm powered by Swarms.
|
||||||
|
|
||||||
|
21. **Music Platforms:** Integrate with Spotify, SoundCloud for music-related AI functionalities.
|
||||||
|
|
||||||
|
22. **Recipe Websites:** Platforms like AllRecipes, Tasty with AI-recommended recipes.
|
||||||
|
|
||||||
|
23. **Travel & Hospitality:** Integrate with platforms like Airbnb, Tripadvisor for AI-based recommendations.
|
||||||
|
|
||||||
|
24. **Language Learning Apps:** Duolingo, Rosetta Stone integrations.
|
||||||
|
|
||||||
|
25. **Virtual Events Platforms:** Websites like Hopin, Zoom where Swarms can enhance the virtual event experience.
|
||||||
|
|
||||||
|
26. **Social Media Management:** Tools like Buffer, Hootsuite with AI insights by Swarms.
|
||||||
|
|
||||||
|
27. **Fitness Apps:** Platforms like MyFitnessPal, Strava with AI fitness insights.
|
||||||
|
|
||||||
|
28. **Mental Health Apps:** Integration into apps like Calm, Headspace for AI-driven wellness.
|
||||||
|
|
||||||
|
29. **E-books Platforms:** Amazon Kindle, Audible with AI-enhanced reading experiences.
|
||||||
|
|
||||||
|
30. **Sports Analysis Tools:** Websites like ESPN, Sky Sports where Swarms can provide insights.
|
||||||
|
|
||||||
|
31. **Financial Tools:** Integration into platforms like Mint, Robinhood for AI-driven financial advice.
|
||||||
|
|
||||||
|
32. **Public Libraries:** Digital platforms of public libraries for enhanced reading experiences.
|
||||||
|
|
||||||
|
33. **3D Printing Platforms:** Websites like Thingiverse, Shapeways with AI customization.
|
||||||
|
|
||||||
|
34. **Meme Platforms:** Websites like Memedroid, 9GAG where Swarms can suggest memes.
|
||||||
|
|
||||||
|
35. **Astronomy Apps:** Platforms like Star Walk, NASA's Eyes with AI-driven space insights.
|
||||||
|
|
||||||
|
36. **Weather Apps:** Integration into Weather.com, AccuWeather for predictive analysis.
|
||||||
|
|
||||||
|
37. **Sustainability Platforms:** Websites like Ecosia, GoodGuide with AI-driven eco-tips.
|
||||||
|
|
||||||
|
38. **Fashion Apps:** Platforms like ASOS, Zara with AI-based style recommendations.
|
||||||
|
|
||||||
|
39. **Pet Care Apps:** Integration into PetSmart, Chewy for AI-driven pet care tips.
|
||||||
|
|
||||||
|
40. **Real Estate Platforms:** Websites like Zillow, Realtor with AI-enhanced property insights.
|
||||||
|
|
||||||
|
41. **DIY Platforms:** Websites like Instructables, DIY.org with AI project suggestions.
|
||||||
|
|
||||||
|
42. **Genealogy Platforms:** Ancestry, MyHeritage with AI-driven family tree insights.
|
||||||
|
|
||||||
|
43. **Car Rental & Sale Platforms:** Integration into AutoTrader, Turo for AI-driven vehicle suggestions.
|
||||||
|
|
||||||
|
44. **Wedding Planning Websites:** Platforms like Zola, The Knot with AI-driven planning.
|
||||||
|
|
||||||
|
45. **Craft Platforms:** Websites like Etsy, Craftsy with AI-driven craft suggestions.
|
||||||
|
|
||||||
|
46. **Gift Recommendation Platforms:** AI-driven gift suggestions for websites like Gifts.com.
|
||||||
|
|
||||||
|
47. **Study & Revision Platforms:** Websites like Chegg, Quizlet with AI-driven study guides.
|
||||||
|
|
||||||
|
48. **Local Business Directories:** Yelp, Yellow Pages with AI-enhanced reviews.
|
||||||
|
|
||||||
|
49. **Networking Platforms:** LinkedIn, Meetup with AI-driven connection suggestions.
|
||||||
|
|
||||||
|
50. **Lifestyle Magazines' Digital Platforms:** Websites like Vogue, GQ with AI-curated fashion and lifestyle insights.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Endnote: Leveraging these diverse platforms ensures that Swarms becomes an integral part of multiple ecosystems, enhancing its visibility and user engagement.*
|
@ -0,0 +1,110 @@
|
|||||||
|
### FAQ on Swarm Intelligence and Multi-Agent Systems
|
||||||
|
|
||||||
|
#### What is an agent in the context of AI and swarm intelligence?
|
||||||
|
|
||||||
|
In artificial intelligence (AI), an agent refers to an LLM with some objective to accomplish.
|
||||||
|
|
||||||
|
In swarm intelligence, each agent interacts with other agents and possibly the environment to achieve complex collective behaviors or solve problems more efficiently than individual agents could on their own.
|
||||||
|
|
||||||
|
|
||||||
|
#### What do you need Swarms at all?
|
||||||
|
Individual agents are limited by a vast array of issues such as context window loss, single task execution, hallucination, and no collaboration.
|
||||||
|
|
||||||
|
|
||||||
|
#### How does a swarm work?
|
||||||
|
|
||||||
|
A swarm works through the principles of decentralized control, local interactions, and simple rules followed by each agent. Unlike centralized systems, where a single entity dictates the behavior of all components, in a swarm, each agent makes its own decisions based on local information and interactions with nearby agents. These local interactions lead to the emergence of complex, organized behaviors or solutions at the collective level, enabling the swarm to tackle tasks efficiently.
|
||||||
|
|
||||||
|
#### Why do you need more agents in a swarm?
|
||||||
|
|
||||||
|
More agents in a swarm can enhance its problem-solving capabilities, resilience, and efficiency. With more agents:
|
||||||
|
|
||||||
|
- **Diversity and Specialization**: The swarm can leverage a wider range of skills, knowledge, and perspectives, allowing for more creative and effective solutions to complex problems.
|
||||||
|
- **Scalability**: Adding more agents can increase the swarm's capacity to handle larger tasks or multiple tasks simultaneously.
|
||||||
|
- **Robustness**: A larger number of agents enhances the system's redundancy and fault tolerance, as the failure of a few agents has a minimal impact on the overall performance of the swarm.
|
||||||
|
|
||||||
|
#### Isn't it more expensive to use more agents?
|
||||||
|
|
||||||
|
While deploying more agents can initially increase costs, especially in terms of computational resources, hosting, and potentially API usage, there are several factors and strategies that can mitigate these expenses:
|
||||||
|
|
||||||
|
- **Efficiency at Scale**: Larger swarms can often solve problems more quickly or effectively, reducing the overall computational time and resources required.
|
||||||
|
- **Optimization and Caching**: Implementing optimizations and caching strategies can reduce redundant computations, lowering the workload on individual agents and the overall system.
|
||||||
|
- **Dynamic Scaling**: Utilizing cloud services that offer dynamic scaling can ensure you only pay for the resources you need when you need them, optimizing cost-efficiency.
|
||||||
|
|
||||||
|
#### Can swarms make decisions better than individual agents?
|
||||||
|
|
||||||
|
Yes, swarms can make better decisions than individual agents for several reasons:
|
||||||
|
|
||||||
|
- **Collective Intelligence**: Swarms combine the knowledge and insights of multiple agents, leading to more informed and well-rounded decision-making processes.
|
||||||
|
- **Error Correction**: The collaborative nature of swarms allows for error checking and correction among agents, reducing the likelihood of mistakes.
|
||||||
|
- **Adaptability**: Swarms are highly adaptable to changing environments or requirements, as the collective can quickly reorganize or shift strategies based on new information.
|
||||||
|
|
||||||
|
#### How do agents in a swarm communicate?
|
||||||
|
|
||||||
|
Communication in a swarm can vary based on the design and purpose of the system but generally involves either direct or indirect interactions:
|
||||||
|
|
||||||
|
- **Direct Communication**: Agents exchange information directly through messaging, signals, or other communication protocols designed for the system.
|
||||||
|
- **Indirect Communication**: Agents influence each other through the environment, a method known as stigmergy. Actions by one agent alter the environment, which in turn influences the behavior of other agents.
|
||||||
|
|
||||||
|
#### Are swarms only useful in computational tasks?
|
||||||
|
|
||||||
|
While swarms are often associated with computational tasks, their applications extend far beyond. Swarms can be utilized in:
|
||||||
|
|
||||||
|
- **Robotics**: Coordinating multiple robots for tasks like search and rescue, exploration, or surveillance.
|
||||||
|
- **Environmental Monitoring**: Using sensor networks to monitor pollution, wildlife, or climate conditions.
|
||||||
|
- **Social Sciences**: Modeling social behaviors or economic systems to understand complex societal dynamics.
|
||||||
|
- **Healthcare**: Coordinating care strategies in hospital settings or managing pandemic responses through distributed data analysis.
|
||||||
|
|
||||||
|
#### How do you ensure the security of a swarm system?
|
||||||
|
|
||||||
|
Security in swarm systems involves:
|
||||||
|
|
||||||
|
- **Encryption**: Ensuring all communications between agents are encrypted to prevent unauthorized access or manipulation.
|
||||||
|
- **Authentication**: Implementing strict authentication mechanisms to verify the identity of each agent in the swarm.
|
||||||
|
- **Resilience to Attacks**: Designing the swarm to continue functioning effectively even if some agents are compromised or attacked, utilizing redundancy and fault tolerance strategies.
|
||||||
|
|
||||||
|
#### How do individual agents within a swarm share insights without direct learning mechanisms like reinforcement learning?
|
||||||
|
|
||||||
|
In the context of pre-trained Large Language Models (LLMs) that operate within a swarm, sharing insights typically involves explicit communication and data exchange protocols rather than direct learning mechanisms like reinforcement learning. Here's how it can work:
|
||||||
|
|
||||||
|
- **Shared Databases and Knowledge Bases**: Agents can write to and read from a shared database or knowledge base where insights, generated content, and relevant data are stored. This allows agents to benefit from the collective experience of the swarm by accessing information that other agents have contributed.
|
||||||
|
|
||||||
|
- **APIs for Information Exchange**: Custom APIs can facilitate the exchange of information between agents. Through these APIs, agents can request specific information or insights from others within the swarm, effectively sharing knowledge without direct learning.
|
||||||
|
|
||||||
|
#### How do you balance the autonomy of individual LLMs with the need for coherent collective behavior in a swarm?
|
||||||
|
|
||||||
|
Balancing autonomy with collective coherence in a swarm of LLMs involves:
|
||||||
|
|
||||||
|
- **Central Coordination Mechanism**: Implementing a lightweight central coordination mechanism that can assign tasks, distribute information, and collect outputs from individual LLMs. This ensures that while each LLM operates autonomously, their actions are aligned with the swarm's overall objectives.
|
||||||
|
|
||||||
|
- **Standardized Communication Protocols**: Developing standardized protocols for how LLMs communicate and share information ensures that even though each agent works autonomously, the information exchange remains coherent and aligned with the collective goals.
|
||||||
|
|
||||||
|
#### How do LLM swarms adapt to changing environments or tasks without machine learning techniques?
|
||||||
|
|
||||||
|
Adaptation in LLM swarms, without relying on machine learning techniques for dynamic learning, can be achieved through:
|
||||||
|
|
||||||
|
- **Dynamic Task Allocation**: A central system or distributed algorithm can dynamically allocate tasks to different LLMs based on the changing environment or requirements. This ensures that the most suitable LLMs are addressing tasks for which they are best suited as conditions change.
|
||||||
|
|
||||||
|
- **Pre-trained Versatility**: Utilizing a diverse set of pre-trained LLMs with different specialties or training data allows the swarm to select the most appropriate agent for a task as the requirements evolve.
|
||||||
|
|
||||||
|
- **In Context Learning**: In context learning is another mechanism that can be employed within LLM swarms to adapt to changing environments or tasks. This approach involves leveraging the collective knowledge and experiences of the swarm to facilitate learning and improve performance. Here's how it can work:
|
||||||
|
|
||||||
|
|
||||||
|
#### Can LLM swarms operate in physical environments, or are they limited to digital spaces?
|
||||||
|
|
||||||
|
LLM swarms primarily operate in digital spaces, given their nature as software entities. However, they can interact with physical environments indirectly through interfaces with sensors, actuaries, or other devices connected to the Internet of Things (IoT). For example, LLMs can process data from physical sensors and control devices based on their outputs, enabling applications like smart home management or autonomous vehicle navigation.
|
||||||
|
|
||||||
|
#### Without direct learning from each other, how do agents in a swarm improve over time?
|
||||||
|
|
||||||
|
Improvement over time in a swarm of pre-trained LLMs, without direct learning from each other, can be achieved through:
|
||||||
|
|
||||||
|
- **Human Feedback**: Incorporating feedback from human operators or users can guide adjustments to the usage patterns or selection criteria of LLMs within the swarm, optimizing performance based on observed outcomes.
|
||||||
|
|
||||||
|
- **Periodic Re-training and Updating**: The individual LLMs can be periodically re-trained or updated by their developers based on collective insights and feedback from their deployment within swarms. While this does not involve direct learning from each encounter, it allows the LLMs to improve over time based on aggregated experiences.
|
||||||
|
|
||||||
|
These adjustments to the FAQ reflect the specific context of pre-trained LLMs operating within a swarm, focusing on communication, coordination, and adaptation mechanisms that align with their capabilities and constraints.
|
||||||
|
|
||||||
|
|
||||||
|
#### Conclusion
|
||||||
|
|
||||||
|
Swarms represent a powerful paradigm in AI, offering innovative solutions to complex, dynamic problems through collective intelligence and decentralized control. While challenges exist, particularly regarding cost and security, strategic design and management can leverage the strengths of swarm intelligence to achieve remarkable efficiency, adaptability, and robustness in a wide range of applications.
|
@ -0,0 +1,101 @@
|
|||||||
|
# The Swarms Flywheel
|
||||||
|
|
||||||
|
1. **Building a Supportive Community:** Initiate by establishing an engaging and inclusive open-source community for both developers and sales freelancers around Swarms. Regular online meetups, webinars, tutorials, and sales training can make them feel welcome and encourage contributions and sales efforts.
|
||||||
|
|
||||||
|
2. **Increased Contributions and Sales Efforts:** The more engaged the community, the more developers will contribute to Swarms and the more effort sales freelancers will put into selling Swarms.
|
||||||
|
|
||||||
|
3. **Improvement in Quality and Market Reach:** More developer contributions mean better quality, reliability, and feature offerings from Swarms. Simultaneously, increased sales efforts from freelancers boost Swarms' market penetration and visibility.
|
||||||
|
|
||||||
|
4. **Rise in User Base:** As Swarms becomes more robust and more well-known, the user base grows, driving more revenue.
|
||||||
|
|
||||||
|
5. **Greater Financial Incentives:** Increased revenue can be redirected to offer more significant financial incentives to both developers and salespeople. Developers can be incentivized based on their contribution to Swarms, and salespeople can be rewarded with higher commissions.
|
||||||
|
|
||||||
|
6. **Attract More Developers and Salespeople:** These financial incentives, coupled with the recognition and experience from participating in a successful project, attract more developers and salespeople to the community.
|
||||||
|
|
||||||
|
7. **Wider Adoption of Swarms:** An ever-improving product, a growing user base, and an increasing number of passionate salespeople accelerate the adoption of Swarms.
|
||||||
|
|
||||||
|
8. **Return to Step 1:** As the community, user base, and sales network continue to grow, the cycle repeats, each time speeding up the flywheel.
|
||||||
|
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
+---------------------+
|
||||||
|
| Building a |
|
||||||
|
| Supportive | <--+
|
||||||
|
| Community | |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| |
|
||||||
|
v |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| Increased | |
|
||||||
|
| Contributions & | |
|
||||||
|
| Sales Efforts | |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| |
|
||||||
|
v |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| Improvement in | |
|
||||||
|
| Quality & Market | |
|
||||||
|
| Reach | |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| |
|
||||||
|
v |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| Rise in User | |
|
||||||
|
| Base | |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| |
|
||||||
|
v |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| Greater Financial | |
|
||||||
|
| Incentives | |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| |
|
||||||
|
v |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| Attract More | |
|
||||||
|
| Developers & | |
|
||||||
|
| Salespeople | |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| |
|
||||||
|
v |
|
||||||
|
+--------+-----------+ |
|
||||||
|
| Wider Adoption of | |
|
||||||
|
| Swarms |----+
|
||||||
|
+---------------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
# Potential Risks and Mitigations:
|
||||||
|
|
||||||
|
1. **Insufficient Contributions or Quality of Work**: Open-source efforts rely on individuals being willing and able to spend time contributing. If not enough people participate, or the work they produce is of poor quality, the product development could stall.
|
||||||
|
* **Mitigation**: Create a robust community with clear guidelines, support, and resources. Provide incentives for quality contributions, such as a reputation system, swag, or financial rewards. Conduct thorough code reviews to ensure the quality of contributions.
|
||||||
|
|
||||||
|
2. **Lack of Sales Results**: Commission-based salespeople will only continue to sell the product if they're successful. If they aren't making enough sales, they may lose motivation and cease their efforts.
|
||||||
|
* **Mitigation**: Provide adequate sales training and resources. Ensure the product-market fit is strong, and adjust messaging or sales tactics as necessary. Consider implementing a minimum commission or base pay to reduce risk for salespeople.
|
||||||
|
|
||||||
|
3. **Poor User Experience or User Adoption**: If users don't find the product useful or easy to use, they won't adopt it, and the user base won't grow. This could also discourage salespeople and contributors.
|
||||||
|
* **Mitigation**: Prioritize user experience in the product development process. Regularly gather and incorporate user feedback. Ensure robust user support is in place.
|
||||||
|
|
||||||
|
4. **Inadequate Financial Incentives**: If the financial rewards don't justify the time and effort contributors and salespeople are putting in, they will likely disengage.
|
||||||
|
* **Mitigation**: Regularly review and adjust financial incentives as needed. Ensure that the method for calculating and distributing rewards is transparent and fair.
|
||||||
|
|
||||||
|
5. **Security and Compliance Risks**: As the user base grows and the software becomes more complex, the risk of security issues increases. Moreover, as contributors from various regions join, compliance with various international laws could become an issue.
|
||||||
|
* **Mitigation**: Establish strong security practices from the start. Regularly conduct security audits. Seek legal counsel to understand and adhere to international laws and regulations.
|
||||||
|
|
||||||
|
## Activation Plan for the Flywheel:
|
||||||
|
|
||||||
|
1. **Community Building**: Begin by fostering a supportive community around Swarms. Encourage early adopters to contribute and provide feedback. Create comprehensive documentation, community guidelines, and a forum for discussion and support.
|
||||||
|
|
||||||
|
2. **Sales and Development Training**: Provide resources and training for salespeople and developers. Make sure they understand the product, its value, and how to effectively contribute or sell.
|
||||||
|
|
||||||
|
3. **Increase Contributions and Sales Efforts**: Encourage increased participation by highlighting successful contributions and sales, rewarding top contributors and salespeople, and regularly communicating about the project's progress and impact.
|
||||||
|
|
||||||
|
4. **Iterate and Improve**: Continually gather and implement feedback to improve Swarms and its market reach. The better the product and its alignment with the market, the more the user base will grow.
|
||||||
|
|
||||||
|
5. **Expand User Base**: As the product improves and sales efforts continue, the user base should grow. Ensure you have the infrastructure to support this growth and maintain a positive user experience.
|
||||||
|
|
||||||
|
6. **Increase Financial Incentives**: As the user base and product grow, so too should the financial incentives. Make sure rewards continue to be competitive and attractive.
|
||||||
|
|
||||||
|
7. **Attract More Contributors and Salespeople**: As the financial incentives and success of the product increase, this should attract more contributors and salespeople, further feeding the flywheel.
|
||||||
|
|
||||||
|
Throughout this process, it's important to regularly reassess and adjust your strategy as necessary. Stay flexible and responsive to changes in the market, user feedback, and the evolving needs of the community.
|
@ -0,0 +1,40 @@
|
|||||||
|
# Frontend Contributor Guide
|
||||||
|
|
||||||
|
## Mission
|
||||||
|
At the heart of Swarms is the mission to democratize multi-agent technology, making it accessible to businesses of all sizes around the globe. This technology, which allows for the orchestration of multiple autonomous agents to achieve complex goals, has the potential to revolutionize industries by enhancing efficiency, scalability, and innovation. Swarms is committed to leading this charge by developing a platform that empowers businesses and individuals to harness the power of multi-agent systems without the need for specialized knowledge or resources.
|
||||||
|
|
||||||
|
|
||||||
|
## Understanding Your Impact as a Frontend Engineer
|
||||||
|
Crafting User Experiences: As a frontend engineer at Swarms, you play a crucial role in making multi-agent technology understandable and usable for businesses worldwide. Your work involves translating complex systems into intuitive interfaces, ensuring users can easily navigate, manage, and benefit from multi-agent solutions. By focusing on user-centric design and seamless integration, you help bridge the gap between advanced technology and practical business applications.
|
||||||
|
|
||||||
|
Skills and Attributes for Success: Successful frontend engineers at Swarms combine technical expertise with a passion for innovation and a deep understanding of user needs. Proficiency in modern frontend technologies, such as React, NextJS, and Tailwind, is just the beginning. You also need a strong grasp of usability principles, accessibility standards, and the ability to work collaboratively with cross-functional teams. Creativity, problem-solving skills, and a commitment to continuous learning are essential for developing solutions that meet diverse business needs.
|
||||||
|
|
||||||
|
|
||||||
|
## Joining the Team
|
||||||
|
As you contribute to Swarms, you become part of a collaborative effort to change the world. We value each contribution and provide constructive feedback to help you grow. Outstanding contributors who share our vision and demonstrate exceptional skill and dedication are invited to join our team, where they can have an even greater impact on our mission.
|
||||||
|
|
||||||
|
|
||||||
|
### Becoming a Full-Time Swarms Engineer:
|
||||||
|
Swarms is radically devoted to open source and transparency. To join the full time team, you must first contribute to the open source repository so we can assess your technical capability and general way of working. After a series of quality contributions, we'll offer you a full time position!
|
||||||
|
|
||||||
|
Joining Swarms full-time means more than just a job. It's an opportunity to be at the forefront of technological innovation, working alongside passionate professionals dedicated to making a difference. We look for individuals who are not only skilled but also driven by the desire to make multi-agent technology accessible and beneficial to businesses worldwide.
|
||||||
|
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
- **Project Management Details**
|
||||||
|
- **Linear**: Our projects and tasks at a glance. Get a sense of our workflow and priorities.
|
||||||
|
- [View on Linear](https://linear.app/swarms/join/e7f4c6c560ffa0e1395820682f4e110a?s=1)
|
||||||
|
|
||||||
|
- **Design System and UI/UX Guidelines**
|
||||||
|
- **Figma**: Dive into our design system to grasp the aesthetics and user experience objectives of Swarms.
|
||||||
|
- [View on Figma](https://www.figma.com/file/KL4VIXfZKwwLgAes2WbGNa/Swarms-Cloud-Platform?type=design&node-id=0%3A1&mode=design&t=MkrM0mBQa6qsTDtJ-1)
|
||||||
|
|
||||||
|
- **Swarms Platform Repository**
|
||||||
|
- **GitHub**: The hub of our development activities. Familiarize yourself with our codebase and current projects.
|
||||||
|
- [Visit GitHub Repository](https://github.com/kyegomez/swarms-platform)
|
||||||
|
|
||||||
|
- **[Swarms Community](https://discord.gg/pSTSxqDk)**
|
||||||
|
|
||||||
|
|
||||||
|
### Design Style & User Experience
|
||||||
|
- [How to build great products with game design, not gamification](https://blog.superhuman.com/game-design-not-gamification/)
|
@ -0,0 +1,73 @@
|
|||||||
|
# Careers at Swarms
|
||||||
|
|
||||||
|
We are a team of engineers, developers, and visionaries on a mission to build the future of AI by orchestrating multi-agent collaboration. We move fast, think ambitiously, and deliver with urgency. Join us if you want to be part of building the next generation of multi-agent systems, redefining how businesses automate operations and leverage AI.
|
||||||
|
|
||||||
|
**We offer none of the following benefits Yet:**
|
||||||
|
|
||||||
|
- No medical, dental, or vision insurance
|
||||||
|
|
||||||
|
- No paid time off
|
||||||
|
|
||||||
|
- No life or AD&D insurance
|
||||||
|
|
||||||
|
- No short-term or long-term disability insurance
|
||||||
|
|
||||||
|
- No 401(k) plan
|
||||||
|
|
||||||
|
**Working hours:** 9 AM to 10 PM, every day, 7 days a week. This is not for people who seek work-life balance.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Hiring Process: How to Join Swarms
|
||||||
|
We have a simple 3-step hiring process:
|
||||||
|
|
||||||
|
**NOTE** We do not consider applicants who have not previously submitted a PR, to be considered a PR containing a new feature of a bug fixed must be submitted.
|
||||||
|
|
||||||
|
1. **Submit a pull request (PR)**: Start by submitting an approved PR to the [Swarms GitHub repository](https://github.com/kyegomez/swarms) or the appropriate repository .
|
||||||
|
2. **Code review**: Our technical team will review your PR. If it meets our standards, you will be invited for a quick interview.
|
||||||
|
3. **Final interview**: Discuss your contributions and approach with our team. If you pass, you're in!
|
||||||
|
|
||||||
|
There are no recruiters. All evaluations are done by our technical team.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Location
|
||||||
|
|
||||||
|
- **Palo Alto** CA Our Palo Alto office houses the majority of our core research teams including our prompting, agent design, and model training
|
||||||
|
|
||||||
|
- **Miami** Our miami office holds prompt engineering, agent design, and more.
|
||||||
|
|
||||||
|
|
||||||
|
### Open Roles at Swarms
|
||||||
|
|
||||||
|
**Infrastructure Engineer**
|
||||||
|
|
||||||
|
- Build and maintain the systems that run our AI multi-agent infrastructure.
|
||||||
|
|
||||||
|
- Expertise in Skypilot, AWS, Terraform.
|
||||||
|
|
||||||
|
- Ensure seamless, high-availability environments for agent operations.
|
||||||
|
|
||||||
|
**Agent Engineer**
|
||||||
|
|
||||||
|
- Design, develop, and orchestrate complex swarms of AI agents.
|
||||||
|
|
||||||
|
- Extensive experience with Python, multi-agent systems, and neural networks.
|
||||||
|
|
||||||
|
- Ability to create dynamic and efficient agent architectures from scratch.
|
||||||
|
|
||||||
|
**Prompt Engineer**
|
||||||
|
|
||||||
|
- Craft highly optimized prompts that drive our LLM-based agents.
|
||||||
|
|
||||||
|
- Specialize in instruction-based prompts, multi-shot examples, and production-grade deployment.
|
||||||
|
|
||||||
|
- Collaborate with agents to deliver state-of-the-art solutions.
|
||||||
|
|
||||||
|
**Front-End Engineer**
|
||||||
|
|
||||||
|
- Build sleek, intuitive interfaces for interacting with swarms of agents.
|
||||||
|
|
||||||
|
- Proficiency in Next.js, FastAPI, and modern front-end technologies.
|
||||||
|
|
||||||
|
- Design with the user experience in mind, integrating complex AI features into simple workflows.
|
@ -0,0 +1,66 @@
|
|||||||
|
def calculate_monthly_charge(
|
||||||
|
development_time_hours: float,
|
||||||
|
hourly_rate: float,
|
||||||
|
amortization_months: int,
|
||||||
|
api_calls_per_month: int,
|
||||||
|
cost_per_api_call: float,
|
||||||
|
monthly_maintenance: float,
|
||||||
|
additional_monthly_costs: float,
|
||||||
|
profit_margin_percentage: float,
|
||||||
|
) -> float:
|
||||||
|
"""
|
||||||
|
Calculate the monthly charge for a service based on various cost factors.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- development_time_hours (float): The total number of hours spent on development and setup.
|
||||||
|
- hourly_rate (float): The rate per hour for development and setup.
|
||||||
|
- amortization_months (int): The number of months over which to amortize the development and setup costs.
|
||||||
|
- api_calls_per_month (int): The number of API calls made per month.
|
||||||
|
- cost_per_api_call (float): The cost per API call.
|
||||||
|
- monthly_maintenance (float): The monthly maintenance cost.
|
||||||
|
- additional_monthly_costs (float): Any additional monthly costs.
|
||||||
|
- profit_margin_percentage (float): The desired profit margin as a percentage.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- monthly_charge (float): The calculated monthly charge for the service.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Calculate Development and Setup Costs (amortized monthly)
|
||||||
|
development_and_setup_costs_monthly = (
|
||||||
|
development_time_hours * hourly_rate
|
||||||
|
) / amortization_months
|
||||||
|
|
||||||
|
# Calculate Operational Costs per Month
|
||||||
|
operational_costs_monthly = (
|
||||||
|
(api_calls_per_month * cost_per_api_call)
|
||||||
|
+ monthly_maintenance
|
||||||
|
+ additional_monthly_costs
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate Total Monthly Costs
|
||||||
|
total_monthly_costs = (
|
||||||
|
development_and_setup_costs_monthly
|
||||||
|
+ operational_costs_monthly
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate Pricing with Profit Margin
|
||||||
|
monthly_charge = total_monthly_costs * (
|
||||||
|
1 + profit_margin_percentage / 100
|
||||||
|
)
|
||||||
|
|
||||||
|
return monthly_charge
|
||||||
|
|
||||||
|
|
||||||
|
# Example usage:
|
||||||
|
monthly_charge = calculate_monthly_charge(
|
||||||
|
development_time_hours=100,
|
||||||
|
hourly_rate=500,
|
||||||
|
amortization_months=12,
|
||||||
|
api_calls_per_month=500000,
|
||||||
|
cost_per_api_call=0.002,
|
||||||
|
monthly_maintenance=1000,
|
||||||
|
additional_monthly_costs=300,
|
||||||
|
profit_margin_percentage=10000,
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"Monthly Charge: ${monthly_charge:.2f}")
|
@ -0,0 +1,14 @@
|
|||||||
|
|
||||||
|
## Purpose
|
||||||
|
Artificial Intelligence has grown at an exponential rate over the past decade. Yet, we are far from fully harnessing its potential. Today's AI operates in isolation, each working separately in their corner. But life doesn't work like that. The world doesn't work like that. Success isn't built in silos; it's built in teams.
|
||||||
|
|
||||||
|
Imagine a world where AI models work in unison. Where they can collaborate, interact, and pool their collective intelligence to achieve more than any single model could. This is the future we envision. But today, we lack a framework for AI to collaborate effectively, to form a true swarm of intelligent agents.
|
||||||
|
|
||||||
|
|
||||||
|
This is a difficult problem, one that has eluded solution. It requires sophisticated systems that can allow individual models to not just communicate but also understand each other, pool knowledge and resources, and create collective intelligence. This is the next frontier of AI.
|
||||||
|
|
||||||
|
But here at Swarms, we have a secret sauce. It's not just a technology or a breakthrough invention. It's a way of thinking - the philosophy of rapid iteration. With each cycle, we make massive progress. We experiment, we learn, and we grow. We have developed a pioneering framework that can enable AI models to work together as a swarm, combining their strengths to create richer, more powerful outputs.
|
||||||
|
|
||||||
|
We are uniquely positioned to take on this challenge with 1,500+ devoted researchers in Agora. We have assembled a team of world-class experts, experienced and driven, united by a shared vision. Our commitment to breaking barriers, pushing boundaries, and our belief in the power of collective intelligence makes us the best team to usher in this future to fundamentally advance our species, Humanity.
|
||||||
|
|
||||||
|
---
|
@ -0,0 +1,82 @@
|
|||||||
|
# Research Lists
|
||||||
|
A compilation of projects, papers, blogs in autonomous agents.
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Projects](#projects)
|
||||||
|
- [Articles](#articles)
|
||||||
|
- [Talks](#talks)
|
||||||
|
|
||||||
|
|
||||||
|
## Projects
|
||||||
|
|
||||||
|
### Developer tools
|
||||||
|
- [2023/8/10] [ModelScope-Agent](https://github.com/modelscope/modelscope-agent) - An Agent Framework Connecting Models in ModelScope with the World
|
||||||
|
- [2023/05/25] [Gorilla](https://github.com/ShishirPatil/gorilla) - An API store for LLMs
|
||||||
|
- [2023/03/31] [BMTools](https://github.com/OpenBMB/BMTools) - Tool Learning for Big Models, Open-Source Solutions of ChatGPT-Plugins
|
||||||
|
- [2023/03/09] [LMQL](https://github.com/eth-sri/lmql) - A query language for programming (large) language models.
|
||||||
|
- [2022/10/25] [Langchain](https://github.com/hwchase17/langchain) - ⚡ Building applications with LLMs through composability ⚡
|
||||||
|
|
||||||
|
### Applications
|
||||||
|
- [2023/07/08] [ShortGPT](https://github.com/RayVentura/ShortGPT) - 🚀🎬 ShortGPT - An experimental AI framework for automated short/video content creation. Enables creators to rapidly produce, manage, and deliver content using AI and automation.
|
||||||
|
- [2023/07/05] [gpt-researcher](https://github.com/assafelovic/gpt-researcher) - GPT based autonomous agent that does online comprehensive research on any given topic
|
||||||
|
- [2023/07/04] [DemoGPT](https://github.com/melih-unsal/DemoGPT) - 🧩DemoGPT enables you to create quick demos by just using prompts. [[demo]](demogpt.io)
|
||||||
|
- [2023/06/30] [MetaGPT](https://github.com/geekan/MetaGPT) - 🌟 The Multi-Agent Framework: Given one line Requirement, return PRD, Design, Tasks, Repo
|
||||||
|
- [2023/06/11] [gpt-engineer](https://github.com/AntonOsika/gpt-engineer) - Specify what you want it to build, the AI asks for clarification, and then builds it.
|
||||||
|
- [2023/05/16] [SuperAGI](https://github.com/TransformerOptimus/SuperAGI) - <⚡️> SuperAGI - A dev-first open source autonomous AI agent framework. Enabling developers to build, manage & run useful autonomous agents quickly and reliably.
|
||||||
|
- [2023/05/13] [Developer](https://github.com/smol-ai/developer) - Human-centric & Coherent Whole Program Synthesis aka your own personal junior developer
|
||||||
|
- [2023/04/07] [AgentGPT](https://github.com/reworkd/AgentGPT) - 🤖 Assemble, configure, and deploy autonomous AI Agents in your browser. [[demo]](agentgpt.reworkd.ai)
|
||||||
|
- [2023/04/03] [BabyAGI](https://github.com/yoheinakajima/babyagi) - an example of an AI-powered task management system
|
||||||
|
- [2023/03/30] [AutoGPT](https://github.com/Significant-Gravitas/Auto-GPT) - An experimental open-source attempt to make GPT-4 fully autonomous.
|
||||||
|
|
||||||
|
### Benchmarks
|
||||||
|
- [2023/08/07] [AgentBench](https://github.com/THUDM/AgentBench) - A Comprehensive Benchmark to Evaluate LLMs as Agents. [paper](https://arxiv.org/abs/2308.03688)
|
||||||
|
- [2023/06/18] [Auto-GPT-Benchmarks](https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks) - A repo built for the purpose of benchmarking the performance of agents, regardless of how they are set up and how they work.
|
||||||
|
- [2023/05/28] [ToolBench](https://github.com/OpenBMB/ToolBench) - An open platform for training, serving, and evaluating large language model for tool learning.
|
||||||
|
|
||||||
|
## Articles
|
||||||
|
### Research Papers
|
||||||
|
- [2023/08/11] [BOLAA: Benchmarking and Orchestrating LLM-Augmented Autonomous Agents](https://arxiv.org/pdf/2308.05960v1.pdf), Zhiwei Liu, et al.
|
||||||
|
- [2023/07/31] [ToolLLM: Facilitating Large Language Models to Master 16000+ Real-world APIs](https://arxiv.org/abs/2307.16789), Yujia Qin, et al.
|
||||||
|
- [2023/07/16] [Communicative Agents for Software Development](https://arxiv.org/abs/2307.07924), Chen Qian, et al.
|
||||||
|
- [2023/06/09] [Mind2Web: Towards a Generalist Agent for the Web](https://arxiv.org/pdf/2306.06070.pdf), Xiang Deng, et al. [[code]](https://github.com/OSU-NLP-Group/Mind2Web) [[demo]](https://osu-nlp-group.github.io/Mind2Web/)
|
||||||
|
- [2023/06/05] [Orca: Progressive Learning from Complex Explanation Traces of GPT-4](https://arxiv.org/pdf/2306.02707.pdf), Subhabrata Mukherjee et al.
|
||||||
|
- [2023/05/25] [Voyager: An Open-Ended Embodied Agent with Large Language Models](https://arxiv.org/pdf/2305.16291.pdf), Guanzhi Wang, et al. [[code]](https://github.com/MineDojo/Voyager) [[website]](https://voyager.minedojo.org/)
|
||||||
|
- [2023/05/23] [ReWOO: Decoupling Reasoning from Observations for Efficient Augmented Language Models](https://arxiv.org/pdf/2305.18323.pdf), Binfeng Xu, et al. [[code]](https://github.com/billxbf/ReWOO)
|
||||||
|
- [2023/05/17] [Tree of Thoughts: Deliberate Problem Solving with Large Language Models](https://arxiv.org/abs/2305.10601), Shunyu Yao, et al.[[code]](https://github.com/kyegomez/tree-of-thoughts) [[code-orig]](https://github.com/ysymyth/tree-of-thought-llm)
|
||||||
|
- [2023/05/12] [MEGABYTE: Predicting Million-byte Sequences with Multiscale Transformers](https://arxiv.org/abs/2305.07185), Lili Yu, et al.
|
||||||
|
- [2023/05/19] [FrugalGPT: How to Use Large Language Models While Reducing Cost and Improving Performance](https://arxiv.org/abs/2305.05176), Lingjiao Chen, et al.
|
||||||
|
- [2023/05/06] [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](https://arxiv.org/abs/2305.04091), Lei Wang, et al.
|
||||||
|
- [2023/05/01] [Learning to Reason and Memorize with Self-Notes](https://arxiv.org/abs/2305.00833), Jack Lanchantin, et al.
|
||||||
|
- [2023/04/24] [WizardLM: Empowering Large Language Models to Follow Complex Instructions](https://arxiv.org/abs/2304.12244), Can Xu, et al.
|
||||||
|
- [2023/04/22] [LLM+P: Empowering Large Language Models with Optimal Planning Proficiency](https://arxiv.org/abs/2304.11477), Bo Liu, et al.
|
||||||
|
- [2023/04/07] [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442), Joon Sung Park, et al. [[code]](https://github.com/mkturkcan/generative-agents)
|
||||||
|
- [2023/03/30] [Self-Refine: Iterative Refinement with Self-Feedback](https://arxiv.org/abs/2303.17651), Aman Madaan, et al.[[code]](https://github.com/madaan/self-refine)
|
||||||
|
- [2023/03/30] [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in HuggingFace](https://arxiv.org/pdf/2303.17580.pdf), Yongliang Shen, et al. [[code]](https://github.com/microsoft/JARVIS) [[demo]](https://huggingface.co/spaces/microsoft/HuggingGPT)
|
||||||
|
- [2023/03/20] [Reflexion: Language Agents with Verbal Reinforcement Learning](https://arxiv.org/pdf/2303.11366.pdf), Noah Shinn, et al. [[code]](https://github.com/noahshinn024/reflexion)
|
||||||
|
- [2023/03/04] [Towards A Unified Agent with Foundation Models](https://openreview.net/pdf?id=JK_B1tB6p-), Norman Di Palo et al.
|
||||||
|
- [2023/02/23] [Not what you've signed up for: Compromising Real-World LLM-Integrated Applications with Indirect Prompt Injection](https://arxiv.org/abs/2302.12173), Sahar Abdelnab, et al.
|
||||||
|
- [2023/02/09] [Toolformer: Language Models Can Teach Themselves to Use Tools](https://arxiv.org/pdf/2302.04761.pdf), Timo Schick, et al. [[code]](https://github.com/lucidrains/toolformer-pytorch)
|
||||||
|
- [2022/12/12] [LMQL: Prompting Is Programming: A Query Language for Large Language Models](https://arxiv.org/abs/2212.06094), Luca Beurer-Kellner, et al.
|
||||||
|
- [2022/10/06] [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/pdf/2210.03629.pdf), Shunyu Yao, et al. [[code]](https://github.com/ysymyth/ReAct)
|
||||||
|
- [2022/07/20] [Inner Monologue: Embodied Reasoning through Planning with Language Models](https://arxiv.org/pdf/2207.05608.pdf), Wenlong Huang, et al. [[demo]](https://innermonologue.github.io/)
|
||||||
|
- [2022/04/04] [Do As I Can, Not As I Say: Grounding Language in Robotic Affordances](), Michael Ahn, e al. [[demo]](https://say-can.github.io/)
|
||||||
|
- [2021/12/17] [WebGPT: Browser-assisted question-answering with human feedback](https://arxiv.org/pdf/2112.09332.pdf), Reiichiro Nakano, et al.
|
||||||
|
- [2021/06/17] [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685), Edward J. Hu, et al.
|
||||||
|
|
||||||
|
|
||||||
|
### Blog Articles
|
||||||
|
|
||||||
|
- [2023/08/14] [A Roadmap of AI Agents(Chinese)](https://zhuanlan.zhihu.com/p/649916692) By Haojie Pan
|
||||||
|
- [2023/06/23] [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) By Lilian Weng
|
||||||
|
- [2023/06/11] [A CRITICAL LOOK AT AI-GENERATED SOFTWARE](https://spectrum.ieee.org/ai-software) By JAIDEEP VAIDYAHAFIZ ASIF
|
||||||
|
- [2023/04/29] [AUTO-GPT: UNLEASHING THE POWER OF AUTONOMOUS AI AGENTS](https://www.leewayhertz.com/autogpt/) By Akash Takyar
|
||||||
|
- [2023/04/20] [Conscious Machines: Experiments, Theory, and Implementations(Chinese)](https://pattern.swarma.org/article/230) By Jiang Zhang
|
||||||
|
- [2023/04/18] [Autonomous Agents & Agent Simulations](https://blog.langchain.dev/agents-round/) By Langchain
|
||||||
|
- [2023/04/16] [4 Autonomous AI Agents you need to know](https://towardsdatascience.com/4-autonomous-ai-agents-you-need-to-know-d612a643fa92) By Sophia Yang
|
||||||
|
- [2023/03/31] [ChatGPT that learns to use tools(Chinese)](https://zhuanlan.zhihu.com/p/618448188) By Haojie Pan
|
||||||
|
|
||||||
|
### Talks
|
||||||
|
- [2023/06/05] [Two Paths to Intelligence](https://www.youtube.com/watch?v=rGgGOccMEiY&t=1497s) by Geoffrey Hinton
|
||||||
|
- [2023/05/24] [State of GPT](https://www.youtube.com/watch?v=bZQun8Y4L2A) by Andrej Karpathy | OpenAI
|
@ -0,0 +1,13 @@
|
|||||||
|
## The Plan
|
||||||
|
|
||||||
|
### Phase 1: Building the Foundation
|
||||||
|
In the first phase, our focus is on building the basic infrastructure of Swarms. This includes developing key components like the Swarms class, integrating essential tools, and establishing task completion and evaluation logic. We'll also start developing our testing and evaluation framework during this phase. If you're interested in foundational work and have a knack for building robust, scalable systems, this phase is for you.
|
||||||
|
|
||||||
|
### Phase 2: Optimizing the System
|
||||||
|
In the second phase, we'll focus on optimizng Swarms by integrating more advanced features, improving the system's efficiency, and refining our testing and evaluation framework. This phase involves more complex tasks, so if you enjoy tackling challenging problems and contributing to the development of innovative features, this is the phase for you.
|
||||||
|
|
||||||
|
### Phase 3: Towards Super-Intelligence
|
||||||
|
The third phase of our bounty program is the most exciting - this is where we aim to achieve super-intelligence. In this phase, we'll be working on improving the swarm's capabilities, expanding its skills, and fine-tuning the system based on real-world testing and feedback. If you're excited about the future of AI and want to contribute to a project that could potentially transform the digital world, this is the phase for you.
|
||||||
|
|
||||||
|
Remember, our roadmap is a guide, and we encourage you to bring your own ideas and creativity to the table. We believe that every contribution, no matter how small, can make a difference. So join us on this exciting journey and help us create the future of Swarms.
|
||||||
|
|
@ -0,0 +1,222 @@
|
|||||||
|
|
||||||
|
**Objective:** Your task is to intake a business problem or activity and create a swarm of specialized LLM agents that can efficiently solve or automate the given problem. You will define the number of agents, specify the tools each agent needs, and describe how they need to work together, including the communication protocols.
|
||||||
|
|
||||||
|
**Instructions:**
|
||||||
|
|
||||||
|
1. **Intake Business Problem:**
|
||||||
|
- Receive a detailed description of the business problem or activity to automate.
|
||||||
|
- Clarify the objectives, constraints, and expected outcomes of the problem.
|
||||||
|
- Identify key components and sub-tasks within the problem.
|
||||||
|
|
||||||
|
2. **Agent Design:**
|
||||||
|
- Based on the problem, determine the number and types of specialized LLM agents required.
|
||||||
|
- For each agent, specify:
|
||||||
|
- The specific task or role it will perform.
|
||||||
|
- The tools and resources it needs to perform its task.
|
||||||
|
- Any prerequisite knowledge or data it must have access to.
|
||||||
|
- Ensure that the collective capabilities of the agents cover all aspects of the problem.
|
||||||
|
|
||||||
|
3. **Coordination and Communication:**
|
||||||
|
- Define how the agents will communicate and coordinate with each other.
|
||||||
|
- Choose the type of communication (e.g., synchronous, asynchronous, broadcast, direct messaging).
|
||||||
|
- Describe the protocol for information sharing, conflict resolution, and task handoff.
|
||||||
|
|
||||||
|
4. **Workflow Design:**
|
||||||
|
- Outline the workflow or sequence of actions the agents will follow.
|
||||||
|
- Define the input and output for each agent.
|
||||||
|
- Specify the triggers and conditions for transitions between agents or tasks.
|
||||||
|
- Ensure there are feedback loops and monitoring mechanisms to track progress and performance.
|
||||||
|
|
||||||
|
5. **Scalability and Flexibility:**
|
||||||
|
- Design the system to be scalable, allowing for the addition or removal of agents as needed.
|
||||||
|
- Ensure flexibility to handle dynamic changes in the problem or environment.
|
||||||
|
|
||||||
|
6. **Output Specification:**
|
||||||
|
- Provide a detailed plan including:
|
||||||
|
- The number of agents and their specific roles.
|
||||||
|
- The tools and resources each agent will use.
|
||||||
|
- The communication and coordination strategy.
|
||||||
|
- The workflow and sequence of actions.
|
||||||
|
- Include a diagram or flowchart if necessary to visualize the system.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
# Swarm Architectures
|
||||||
|
|
||||||
|
Swarms was designed to faciliate the communication between many different and specialized agents from a vast array of other frameworks such as langchain, autogen, crew, and more.
|
||||||
|
|
||||||
|
In traditional swarm theory, there are many types of swarms usually for very specialized use-cases and problem sets. Such as Hiearchical and sequential are great for accounting and sales, because there is usually a boss coordinator agent that distributes a workload to other specialized agents.
|
||||||
|
|
||||||
|
|
||||||
|
| **Name** | **Description** | **Code Link** | **Use Cases** |
|
||||||
|
|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------|---------------------------------------------------------------------------------------------------|
|
||||||
|
| Hierarchical Swarms | A system where agents are organized in a hierarchy, with higher-level agents coordinating lower-level agents to achieve complex tasks. | [Code Link](#) | Manufacturing process optimization, multi-level sales management, healthcare resource coordination |
|
||||||
|
| Agent Rearrange | A setup where agents rearrange themselves dynamically based on the task requirements and environmental conditions. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | Adaptive manufacturing lines, dynamic sales territory realignment, flexible healthcare staffing |
|
||||||
|
| Concurrent Workflows | Agents perform different tasks simultaneously, coordinating to complete a larger goal. | [Code Link](#) | Concurrent production lines, parallel sales operations, simultaneous patient care processes |
|
||||||
|
| Sequential Coordination | Agents perform tasks in a specific sequence, where the completion of one task triggers the start of the next. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) | Step-by-step assembly lines, sequential sales processes, stepwise patient treatment workflows |
|
||||||
|
| Parallel Processing | Agents work on different parts of a task simultaneously to speed up the overall process. | [Code Link](#) | Parallel data processing in manufacturing, simultaneous sales analytics, concurrent medical tests |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Hierarchical Swarm
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
A Hierarchical Swarm architecture organizes the agents in a tree-like structure. Higher-level agents delegate tasks to lower-level agents, which can further divide tasks among themselves. This structure allows for efficient task distribution and scalability.
|
||||||
|
|
||||||
|
**Use-Cases:**
|
||||||
|
|
||||||
|
- Complex decision-making processes where tasks can be broken down into subtasks.
|
||||||
|
|
||||||
|
- Multi-stage workflows such as data processing pipelines or hierarchical reinforcement learning.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Root Agent] --> B1[Sub-Agent 1]
|
||||||
|
A --> B2[Sub-Agent 2]
|
||||||
|
B1 --> C1[Sub-Agent 1.1]
|
||||||
|
B1 --> C2[Sub-Agent 1.2]
|
||||||
|
B2 --> C3[Sub-Agent 2.1]
|
||||||
|
B2 --> C4[Sub-Agent 2.2]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Parallel Swarm
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
In a Parallel Swarm architecture, multiple agents operate independently and simultaneously on different tasks. Each agent works on its own task without dependencies on the others. [Learn more here in the docs:](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/)
|
||||||
|
|
||||||
|
|
||||||
|
**Use-Cases:**
|
||||||
|
- Tasks that can be processed independently, such as parallel data analysis.
|
||||||
|
- Large-scale simulations where multiple scenarios are run in parallel.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph LR
|
||||||
|
A[Task] --> B1[Sub-Agent 1]
|
||||||
|
A --> B2[Sub-Agent 2]
|
||||||
|
A --> B3[Sub-Agent 3]
|
||||||
|
A --> B4[Sub-Agent 4]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Sequential Swarm
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
A Sequential Swarm architecture processes tasks in a linear sequence. Each agent completes its task before passing the result to the next agent in the chain. This architecture ensures orderly processing and is useful when tasks have dependencies. [Learn more here in the docs:](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/)
|
||||||
|
|
||||||
|
**Use-Cases:**
|
||||||
|
- Workflows where each step depends on the previous one, such as assembly lines or sequential data processing.
|
||||||
|
|
||||||
|
- Scenarios requiring strict order of operations.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[First Agent] --> B[Second Agent]
|
||||||
|
B --> C[Third Agent]
|
||||||
|
C --> D[Fourth Agent]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Round Robin Swarm
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
In a Round Robin Swarm architecture, tasks are distributed cyclically among a set of agents. Each agent takes turns handling tasks in a rotating order, ensuring even distribution of workload.
|
||||||
|
|
||||||
|
**Use-Cases:**
|
||||||
|
|
||||||
|
- Load balancing in distributed systems.
|
||||||
|
|
||||||
|
- Scenarios requiring fair distribution of tasks to avoid overloading any single agent.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Coordinator Agent] --> B1[Sub-Agent 1]
|
||||||
|
A --> B2[Sub-Agent 2]
|
||||||
|
A --> B3[Sub-Agent 3]
|
||||||
|
A --> B4[Sub-Agent 4]
|
||||||
|
B1 --> A
|
||||||
|
B2 --> A
|
||||||
|
B3 --> A
|
||||||
|
B4 --> A
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### SpreadSheet Swarm
|
||||||
|
|
||||||
|
**Overview:**
|
||||||
|
The SpreadSheet Swarm makes it easy to manage thousands of agents all in one place: a csv file. You can initialize any number of agents and then there is a loop parameter to run the loop of agents on the task. Learn more in the [docs here](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/)
|
||||||
|
|
||||||
|
**Use-Cases:**
|
||||||
|
|
||||||
|
- Multi-threaded execution: Execution agents on multiple threads
|
||||||
|
|
||||||
|
- Save agent outputs into CSV file
|
||||||
|
|
||||||
|
- One place to analyze agent outputs
|
||||||
|
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
|
||||||
|
graph TD
|
||||||
|
A[Initialize SpreadSheetSwarm] --> B[Initialize Agents]
|
||||||
|
B --> C[Load Task Queue]
|
||||||
|
C --> D[Run Task]
|
||||||
|
|
||||||
|
subgraph Agents
|
||||||
|
D --> E1[Agent 1]
|
||||||
|
D --> E2[Agent 2]
|
||||||
|
D --> E3[Agent 3]
|
||||||
|
end
|
||||||
|
|
||||||
|
E1 --> F1[Process Task]
|
||||||
|
E2 --> F2[Process Task]
|
||||||
|
E3 --> F3[Process Task]
|
||||||
|
|
||||||
|
F1 --> G1[Track Output]
|
||||||
|
F2 --> G2[Track Output]
|
||||||
|
F3 --> G3[Track Output]
|
||||||
|
|
||||||
|
subgraph Save Outputs
|
||||||
|
G1 --> H[Save to CSV]
|
||||||
|
G2 --> H[Save to CSV]
|
||||||
|
G3 --> H[Save to CSV]
|
||||||
|
end
|
||||||
|
|
||||||
|
H --> I{Autosave Enabled?}
|
||||||
|
I --> |Yes| J[Export Metadata to JSON]
|
||||||
|
I --> |No| K[End Swarm Run]
|
||||||
|
|
||||||
|
%% Style adjustments
|
||||||
|
classDef blackBox fill:#000,stroke:#f00,color:#fff;
|
||||||
|
class A,B,C,D,E1,E2,E3,F1,F2,F3,G1,G2,G3,H,I,J,K blackBox;
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Mixture of Agents Architecture
|
||||||
|
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
|
||||||
|
graph TD
|
||||||
|
A[Task Input] --> B[Layer 1: Reference Agents]
|
||||||
|
B --> C[Agent 1]
|
||||||
|
B --> D[Agent 2]
|
||||||
|
B --> E[Agent N]
|
||||||
|
|
||||||
|
C --> F[Agent 1 Response]
|
||||||
|
D --> G[Agent 2 Response]
|
||||||
|
E --> H[Agent N Response]
|
||||||
|
|
||||||
|
F & G & H --> I[Layer 2: Aggregator Agent]
|
||||||
|
I --> J[Aggregate All Responses]
|
||||||
|
J --> K[Final Output]
|
||||||
|
|
||||||
|
|
||||||
|
```
|
@ -0,0 +1,21 @@
|
|||||||
|
# [Go To Market Strategy][GTM]
|
||||||
|
|
||||||
|
Our vision is to become the world leader in real-world production grade autonomous agent deployment through open-source product development, Deep Verticalization, and unmatched value delivery to the end user.
|
||||||
|
|
||||||
|
We will focus on first accelerating the open source framework to PMF where it will serve as the backend for upstream products and services such as the Swarm Cloud which will enable enterprises to deploy autonomous agents with long term memory and tools in the cloud and a no-code platform for users to build their own swarm by dragging and dropping blocks.
|
||||||
|
|
||||||
|
Our target user segment for the framework is AI engineers looking to deploy agents into high risk environments where reliability is crucial.
|
||||||
|
|
||||||
|
Once PMF has been achieved and the framework has been extensively benchmarked we aim to establish high value contracts with customers in Security, Logistics, Manufacturing, Health and various other untapped industries.
|
||||||
|
|
||||||
|
Our growth strategy for the OS framework can be summarized by:
|
||||||
|
|
||||||
|
- Educating developers on value of autonomous agent usage.
|
||||||
|
- Tutorial Walkthrough on various applications like deploying multi-modal agents through cameras or building custom swarms for a specific business operation.
|
||||||
|
- Demonstrate unmatched reliability by delighting users.
|
||||||
|
- Staying up to date with trends and integrating the latest models, frameworks, and methodologies.
|
||||||
|
- Building a loyal and devoted community for long term user retention. [Join here](https://codex.apac.ai)
|
||||||
|
|
||||||
|
As we continuously deliver value with the open framework we will strategically position ourselves to acquire leads for high value contracts by demonstrating the power, reliability, and performance of our framework openly.
|
||||||
|
|
||||||
|
Acquire Full Access to the memo here: [TSC Memo](https://docs.google.com/document/d/1hS_nv_lFjCqLfnJBoF6ULY9roTbSgSuCkvXvSUSc7Lo/edit?usp=sharing)
|
@ -0,0 +1,254 @@
|
|||||||
|
### Understanding Agent Evaluation Mechanisms
|
||||||
|
|
||||||
|
Agent evaluation mechanisms play a crucial role in ensuring that autonomous agents, particularly in multi-agent systems, perform their tasks effectively and efficiently. This blog delves into the intricacies of agent evaluation, the importance of accuracy tracking, and the methodologies used to measure and visualize agent performance. We'll use Mermaid graphs to provide clear visual representations of these processes.
|
||||||
|
|
||||||
|
#### 1. Introduction to Agent Evaluation Mechanisms
|
||||||
|
|
||||||
|
Agent evaluation mechanisms refer to the processes and criteria used to assess the performance of agents within a system. These mechanisms are essential for:
|
||||||
|
|
||||||
|
- **Ensuring Reliability:** Agents must consistently perform their designated tasks correctly.
|
||||||
|
- **Improving Performance:** Evaluation helps in identifying areas where agents can improve.
|
||||||
|
- **Maintaining Accountability:** It provides a way to hold agents accountable for their actions.
|
||||||
|
|
||||||
|
### 2. Key Components of Agent Evaluation
|
||||||
|
|
||||||
|
To effectively evaluate agents, several components and metrics are considered:
|
||||||
|
|
||||||
|
#### a. Performance Metrics
|
||||||
|
|
||||||
|
These are quantitative measures used to assess how well an agent is performing. Common performance metrics include:
|
||||||
|
|
||||||
|
- **Accuracy:** The percentage of correct actions or decisions made by the agent.
|
||||||
|
- **Precision and Recall:** Precision measures the number of true positive results divided by the number of all positive results, while recall measures the number of true positive results divided by the number of positives that should have been retrieved.
|
||||||
|
- **F1 Score:** The harmonic mean of precision and recall.
|
||||||
|
- **Response Time:** How quickly an agent responds to a given task or query.
|
||||||
|
|
||||||
|
#### b. Evaluation Criteria
|
||||||
|
|
||||||
|
Evaluation criteria define the standards or benchmarks against which agent performance is measured. These criteria are often task-specific and may include:
|
||||||
|
|
||||||
|
- **Task Completion Rate:** The percentage of tasks successfully completed by the agent.
|
||||||
|
- **Error Rate:** The frequency of errors made by the agent during task execution.
|
||||||
|
- **Resource Utilization:** How efficiently an agent uses resources such as memory and CPU.
|
||||||
|
|
||||||
|
### 3. The Process of Agent Evaluation
|
||||||
|
|
||||||
|
The evaluation process involves several steps, which can be visualized using Mermaid graphs:
|
||||||
|
|
||||||
|
#### a. Define Evaluation Metrics
|
||||||
|
|
||||||
|
The first step is to define the metrics that will be used to evaluate the agent. This involves identifying the key performance indicators (KPIs) relevant to the agent's tasks.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Define Evaluation Metrics] --> B[Identify KPIs]
|
||||||
|
B --> C[Accuracy]
|
||||||
|
B --> D[Precision and Recall]
|
||||||
|
B --> E[F1 Score]
|
||||||
|
B --> F[Response Time]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### b. Collect Data
|
||||||
|
|
||||||
|
Data collection involves gathering information on the agent's performance. This data can come from logs, user feedback, or direct observations.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Collect Data] --> B[Logs]
|
||||||
|
A --> C[User Feedback]
|
||||||
|
A --> D[Direct Observations]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### c. Analyze Performance
|
||||||
|
|
||||||
|
Once data is collected, it is analyzed to assess the agent's performance against the defined metrics. This step may involve statistical analysis, machine learning models, or other analytical techniques.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Analyze Performance] --> B[Statistical Analysis]
|
||||||
|
A --> C[Machine Learning Models]
|
||||||
|
A --> D[Other Analytical Techniques]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### d. Generate Reports
|
||||||
|
|
||||||
|
After analysis, performance reports are generated. These reports provide insights into how well the agent is performing and identify areas for improvement.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Generate Reports] --> B[Performance Insights]
|
||||||
|
B --> C[Identify Areas for Improvement]
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Tracking Agent Accuracy
|
||||||
|
|
||||||
|
Accuracy tracking is a critical aspect of agent evaluation. It involves measuring how often an agent's actions or decisions are correct. The following steps outline the process of tracking agent accuracy:
|
||||||
|
|
||||||
|
#### a. Define Correctness Criteria
|
||||||
|
|
||||||
|
The first step is to define what constitutes a correct action or decision for the agent.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Define Correctness Criteria] --> B[Task-Specific Standards]
|
||||||
|
B --> C[Action Accuracy]
|
||||||
|
B --> D[Decision Accuracy]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### b. Monitor Agent Actions
|
||||||
|
|
||||||
|
Agents' actions are continuously monitored to track their performance. This monitoring can be done in real-time or through periodic evaluations.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Monitor Agent Actions] --> B[Real-Time Monitoring]
|
||||||
|
A --> C[Periodic Evaluations]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### c. Compare Against Correctness Criteria
|
||||||
|
|
||||||
|
Each action or decision made by the agent is compared against the defined correctness criteria to determine its accuracy.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Compare Against Correctness Criteria] --> B[Evaluate Each Action]
|
||||||
|
B --> C[Correct or Incorrect?]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### d. Calculate Accuracy Metrics
|
||||||
|
|
||||||
|
Accuracy metrics are calculated based on the comparison results. These metrics provide a quantitative measure of the agent's accuracy.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Calculate Accuracy Metrics] --> B[Accuracy Percentage]
|
||||||
|
A --> C[Error Rate]
|
||||||
|
```
|
||||||
|
|
||||||
|
### 5. Measuring Agent Accuracy
|
||||||
|
|
||||||
|
Measuring agent accuracy involves several steps and considerations:
|
||||||
|
|
||||||
|
#### a. Data Labeling
|
||||||
|
|
||||||
|
To measure accuracy, the data used for evaluation must be accurately labeled. This involves annotating the data with the correct actions or decisions.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Data Labeling] --> B[Annotate Data with Correct Actions]
|
||||||
|
B --> C[Ensure Accuracy of Labels]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### b. Establish Baseline Performance
|
||||||
|
|
||||||
|
A baseline performance level is established by evaluating a sample set of data. This baseline serves as a reference point for measuring improvements or declines in accuracy.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Establish Baseline Performance] --> B[Evaluate Sample Data]
|
||||||
|
B --> C[Set Performance Benchmarks]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### c. Regular Evaluations
|
||||||
|
|
||||||
|
Agents are regularly evaluated to measure their accuracy over time. This helps in tracking performance trends and identifying any deviations from the expected behavior.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Regular Evaluations] --> B[Track Performance Over Time]
|
||||||
|
B --> C[Identify Performance Trends]
|
||||||
|
B --> D[Detect Deviations]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### d. Feedback and Improvement
|
||||||
|
|
||||||
|
Feedback from evaluations is used to improve the agent's performance. This may involve retraining the agent, adjusting its algorithms, or refining its decision-making processes.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Feedback and Improvement] --> B[Use Evaluation Feedback]
|
||||||
|
B --> C[Retrain Agent]
|
||||||
|
B --> D[Adjust Algorithms]
|
||||||
|
B --> E[Refine Decision-Making Processes]
|
||||||
|
```
|
||||||
|
|
||||||
|
### 6. Visualizing Agent Evaluation with Mermaid Graphs
|
||||||
|
|
||||||
|
Mermaid graphs provide a clear and concise way to visualize the agent evaluation process. Here are some examples of how Mermaid graphs can be used:
|
||||||
|
|
||||||
|
#### a. Overall Evaluation Process
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Define Evaluation Metrics] --> B[Collect Data]
|
||||||
|
B --> C[Analyze Performance]
|
||||||
|
C --> D[Generate Reports]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### b. Accuracy Tracking
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Define Correctness Criteria] --> B[Monitor Agent Actions]
|
||||||
|
B --> C[Compare Against Correctness Criteria]
|
||||||
|
C --> D[Calculate Accuracy Metrics]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### c. Continuous Improvement Cycle
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
graph TD
|
||||||
|
A[Regular Evaluations] --> B[Track Performance Over Time]
|
||||||
|
B --> C[Identify Performance Trends]
|
||||||
|
C --> D[Detect Deviations]
|
||||||
|
D --> E[Feedback and Improvement]
|
||||||
|
E --> A
|
||||||
|
```
|
||||||
|
|
||||||
|
### 7. Case Study: Evaluating a Chatbot Agent
|
||||||
|
|
||||||
|
To illustrate the agent evaluation process, let's consider a case study involving a chatbot agent designed to assist customers in an e-commerce platform.
|
||||||
|
|
||||||
|
#### a. Define Evaluation Metrics
|
||||||
|
|
||||||
|
For the chatbot, key performance metrics might include:
|
||||||
|
|
||||||
|
- **Response Accuracy:** The percentage of correct responses provided by the chatbot.
|
||||||
|
- **Response Time:** The average time taken by the chatbot to respond to user queries.
|
||||||
|
- **Customer Satisfaction:** Measured through user feedback and ratings.
|
||||||
|
|
||||||
|
#### b. Collect Data
|
||||||
|
|
||||||
|
Data is collected from chatbot interactions, including user queries, responses, and feedback.
|
||||||
|
|
||||||
|
#### c. Analyze Performance
|
||||||
|
|
||||||
|
Performance analysis involves comparing the chatbot's responses against a predefined set of correct responses and calculating accuracy metrics.
|
||||||
|
|
||||||
|
#### d. Generate Reports
|
||||||
|
|
||||||
|
Reports are generated to provide insights into the chatbot's performance, highlighting areas where it excels and areas needing improvement.
|
||||||
|
|
||||||
|
### 8. Best Practices for Agent Evaluation
|
||||||
|
|
||||||
|
Here are some best practices to ensure effective agent evaluation:
|
||||||
|
|
||||||
|
#### a. Use Realistic Scenarios
|
||||||
|
|
||||||
|
Evaluate agents in realistic scenarios that closely mimic real-world conditions. This ensures that the evaluation results are relevant and applicable.
|
||||||
|
|
||||||
|
#### b. Continuous Monitoring
|
||||||
|
|
||||||
|
Continuously monitor agent performance to detect and address issues promptly. This helps in maintaining high performance levels.
|
||||||
|
|
||||||
|
#### c. Incorporate User Feedback
|
||||||
|
|
||||||
|
User feedback is invaluable for improving agent performance. Incorporate feedback into the evaluation process to identify and rectify shortcomings.
|
||||||
|
|
||||||
|
#### d. Regular Updates
|
||||||
|
|
||||||
|
Regularly update the evaluation metrics and criteria to keep pace with evolving tasks and requirements.
|
||||||
|
|
||||||
|
### Conclusion
|
||||||
|
|
||||||
|
Agent evaluation mechanisms are vital for ensuring the reliability, efficiency, and effectiveness of autonomous agents. By defining clear evaluation metrics, continuously monitoring performance, and using feedback for improvement, we can develop agents that consistently perform at high levels. Visualizing the evaluation process with tools like Mermaid graphs further aids in understanding and communication. Through diligent evaluation and continuous improvement, we can harness the full potential of autonomous agents in various applications.
|
@ -0,0 +1,481 @@
|
|||||||
|
# Building a Multi-Agent System for Real-Time Financial Analysis: A Comprehensive Tutorial
|
||||||
|
|
||||||
|
In this tutorial, we'll walk through the process of building a sophisticated multi-agent system for real-time financial analysis using the Swarms framework. This system is designed for financial analysts and developer analysts who want to leverage AI and multiple data sources to gain deeper insights into stock performance, market trends, and economic indicators.
|
||||||
|
|
||||||
|
Before we dive into the code, let's briefly introduce the Swarms framework. Swarms is an innovative open-source project that simplifies the creation and management of AI agents. It's particularly well-suited for complex tasks like financial analysis, where multiple specialized agents can work together to provide comprehensive insights.
|
||||||
|
|
||||||
|
For more information and to contribute to the project, visit the [Swarms GitHub repository](https://github.com/kyegomez/swarms). We highly recommend exploring the documentation for a deeper understanding of Swarms' capabilities.
|
||||||
|
|
||||||
|
Additional resources:
|
||||||
|
- [Swarms Discord](https://discord.com/servers/agora-999382051935506503) for community discussions
|
||||||
|
- [Swarms Twitter](https://x.com/swarms_corp) for updates
|
||||||
|
- [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) for podcasts
|
||||||
|
- [Swarms Blog](https://medium.com/@kyeg) for in-depth articles
|
||||||
|
- [Swarms Website](https://swarms.xyz) for an overview of the project
|
||||||
|
|
||||||
|
Now, let's break down our financial analysis system step by step.
|
||||||
|
|
||||||
|
## Step 1: Setting Up the Environment
|
||||||
|
First install the necessary packages:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ pip3 install -U swarms yfiance swarm_models fredapi pandas
|
||||||
|
```
|
||||||
|
|
||||||
|
First, we need to set up our environment and import the necessary libraries:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
import yfinance as yf
|
||||||
|
import requests
|
||||||
|
from fredapi import Fred
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from swarms import Agent, AgentRearrange
|
||||||
|
from swarm_models import OpenAIChat
|
||||||
|
import logging
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
from ratelimit import limits, sleep_and_retry
|
||||||
|
|
||||||
|
# Load environment variables
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Set up logging
|
||||||
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# API keys
|
||||||
|
POLYGON_API_KEY = os.getenv('POLYGON_API_KEY')
|
||||||
|
FRED_API_KEY = os.getenv('FRED_API_KEY')
|
||||||
|
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
||||||
|
|
||||||
|
# Initialize FRED client
|
||||||
|
fred_client = Fred(api_key=FRED_API_KEY)
|
||||||
|
|
||||||
|
# Polygon API base URL
|
||||||
|
POLYGON_BASE_URL = "https://api.polygon.io"
|
||||||
|
```
|
||||||
|
|
||||||
|
This section sets up our environment, imports necessary libraries, and initializes our API keys and clients. We're using `dotenv` to securely manage our API keys, and we've set up logging to track the execution of our script.
|
||||||
|
|
||||||
|
## Step 2: Implementing Rate Limiting
|
||||||
|
|
||||||
|
To respect API rate limits, we implement rate limiting decorators:
|
||||||
|
|
||||||
|
```python
|
||||||
|
@sleep_and_retry
|
||||||
|
@limits(calls=5, period=60) # Adjust these values based on your Polygon API tier
|
||||||
|
async def call_polygon_api(session, endpoint, params=None):
|
||||||
|
url = f"{POLYGON_BASE_URL}{endpoint}"
|
||||||
|
params = params or {}
|
||||||
|
params['apiKey'] = POLYGON_API_KEY
|
||||||
|
async with session.get(url, params=params) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
return await response.json()
|
||||||
|
|
||||||
|
@sleep_and_retry
|
||||||
|
@limits(calls=120, period=60) # FRED allows 120 requests per minute
|
||||||
|
def call_fred_api(func, *args, **kwargs):
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
```
|
||||||
|
|
||||||
|
These decorators ensure that we don't exceed the rate limits for our API calls. The `call_polygon_api` function is designed to work with asynchronous code, while `call_fred_api` is a wrapper for synchronous FRED API calls.
|
||||||
|
|
||||||
|
## Step 3: Implementing Data Fetching Functions
|
||||||
|
|
||||||
|
Next, we implement functions to fetch data from various sources:
|
||||||
|
|
||||||
|
### Yahoo Finance Integration
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def get_yahoo_finance_data(session, ticker, period="1d", interval="1m"):
|
||||||
|
try:
|
||||||
|
stock = yf.Ticker(ticker)
|
||||||
|
hist = await asyncio.to_thread(stock.history, period=period, interval=interval)
|
||||||
|
info = await asyncio.to_thread(lambda: stock.info)
|
||||||
|
return hist, info
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fetching Yahoo Finance data for {ticker}: {e}")
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
async def get_yahoo_finance_realtime(session, ticker):
|
||||||
|
try:
|
||||||
|
stock = yf.Ticker(ticker)
|
||||||
|
return await asyncio.to_thread(lambda: stock.fast_info)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fetching Yahoo Finance realtime data for {ticker}: {e}")
|
||||||
|
return None
|
||||||
|
```
|
||||||
|
|
||||||
|
These functions fetch historical and real-time data from Yahoo Finance. We use `asyncio.to_thread` to run the synchronous `yfinance` functions in a separate thread, allowing our main event loop to continue running.
|
||||||
|
|
||||||
|
### Polygon.io Integration
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def get_polygon_realtime_data(session, ticker):
|
||||||
|
try:
|
||||||
|
trades = await call_polygon_api(session, f"/v2/last/trade/{ticker}")
|
||||||
|
quotes = await call_polygon_api(session, f"/v2/last/nbbo/{ticker}")
|
||||||
|
return trades, quotes
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fetching Polygon.io realtime data for {ticker}: {e}")
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
async def get_polygon_news(session, ticker, limit=10):
|
||||||
|
try:
|
||||||
|
news = await call_polygon_api(session, f"/v2/reference/news", params={"ticker": ticker, "limit": limit})
|
||||||
|
return news.get('results', [])
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fetching Polygon.io news for {ticker}: {e}")
|
||||||
|
return []
|
||||||
|
```
|
||||||
|
|
||||||
|
These functions fetch real-time trade and quote data, as well as news articles from Polygon.io. We use our `call_polygon_api` function to make these requests, ensuring we respect rate limits.
|
||||||
|
|
||||||
|
### FRED Integration
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def get_fred_data(session, series_id, start_date, end_date):
|
||||||
|
try:
|
||||||
|
data = await asyncio.to_thread(call_fred_api, fred_client.get_series, series_id, start_date, end_date)
|
||||||
|
return data
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fetching FRED data for {series_id}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
async def get_fred_realtime(session, series_ids):
|
||||||
|
try:
|
||||||
|
data = {}
|
||||||
|
for series_id in series_ids:
|
||||||
|
series = await asyncio.to_thread(call_fred_api, fred_client.get_series, series_id)
|
||||||
|
data[series_id] = series.iloc[-1] # Get the most recent value
|
||||||
|
return data
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fetching FRED realtime data: {e}")
|
||||||
|
return {}
|
||||||
|
```
|
||||||
|
|
||||||
|
These functions fetch historical and real-time economic data from FRED. Again, we use `asyncio.to_thread` to run the synchronous FRED API calls in a separate thread.
|
||||||
|
|
||||||
|
## Step 4: Creating Specialized Agents
|
||||||
|
|
||||||
|
Now we create our specialized agents using the Swarms framework:
|
||||||
|
|
||||||
|
```python
|
||||||
|
stock_agent = Agent(
|
||||||
|
agent_name="StockAgent",
|
||||||
|
system_prompt="""You are an expert stock analyst. Your task is to analyze real-time stock data and provide insights.
|
||||||
|
Consider price movements, trading volume, and any available company information.
|
||||||
|
Provide a concise summary of the stock's current status and any notable trends or events.""",
|
||||||
|
llm=OpenAIChat(api_key=OPENAI_API_KEY),
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
market_agent = Agent(
|
||||||
|
agent_name="MarketAgent",
|
||||||
|
system_prompt="""You are a market analysis expert. Your task is to analyze overall market conditions using real-time data.
|
||||||
|
Consider major indices, sector performance, and market-wide trends.
|
||||||
|
Provide a concise summary of current market conditions and any significant developments.""",
|
||||||
|
llm=OpenAIChat(api_key=OPENAI_API_KEY),
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
macro_agent = Agent(
|
||||||
|
agent_name="MacroAgent",
|
||||||
|
system_prompt="""You are a macroeconomic analysis expert. Your task is to analyze key economic indicators and provide insights on the overall economic situation.
|
||||||
|
Consider GDP growth, inflation rates, unemployment figures, and other relevant economic data.
|
||||||
|
Provide a concise summary of the current economic situation and any potential impacts on financial markets.""",
|
||||||
|
llm=OpenAIChat(api_key=OPENAI_API_KEY),
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
news_agent = Agent(
|
||||||
|
agent_name="NewsAgent",
|
||||||
|
system_prompt="""You are a financial news analyst. Your task is to analyze recent news articles related to specific stocks or the overall market.
|
||||||
|
Consider the potential impact of news events on stock prices or market trends.
|
||||||
|
Provide a concise summary of key news items and their potential market implications.""",
|
||||||
|
llm=OpenAIChat(api_key=OPENAI_API_KEY),
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Each agent is specialized in a different aspect of financial analysis. The `system_prompt` for each agent defines its role and the type of analysis it should perform.
|
||||||
|
|
||||||
|
## Step 5: Building the Multi-Agent System
|
||||||
|
|
||||||
|
We then combine our specialized agents into a multi-agent system:
|
||||||
|
|
||||||
|
```python
|
||||||
|
agents = [stock_agent, market_agent, macro_agent, news_agent]
|
||||||
|
flow = "StockAgent -> MarketAgent -> MacroAgent -> NewsAgent"
|
||||||
|
|
||||||
|
agent_system = AgentRearrange(agents=agents, flow=flow)
|
||||||
|
```
|
||||||
|
|
||||||
|
The `flow` variable defines the order in which our agents will process information. This allows for a logical progression from specific stock analysis to broader market and economic analysis.
|
||||||
|
|
||||||
|
## Step 6: Implementing Real-Time Analysis
|
||||||
|
|
||||||
|
Now we implement our main analysis function:
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def real_time_analysis(session, ticker):
|
||||||
|
logger.info(f"Starting real-time analysis for {ticker}")
|
||||||
|
|
||||||
|
# Fetch real-time data
|
||||||
|
yf_data, yf_info = await get_yahoo_finance_data(session, ticker)
|
||||||
|
yf_realtime = await get_yahoo_finance_realtime(session, ticker)
|
||||||
|
polygon_trades, polygon_quotes = await get_polygon_realtime_data(session, ticker)
|
||||||
|
polygon_news = await get_polygon_news(session, ticker)
|
||||||
|
fred_data = await get_fred_realtime(session, ['GDP', 'UNRATE', 'CPIAUCSL'])
|
||||||
|
|
||||||
|
# Prepare input for the multi-agent system
|
||||||
|
input_data = f"""
|
||||||
|
Yahoo Finance Data:
|
||||||
|
{yf_realtime}
|
||||||
|
|
||||||
|
Recent Stock History:
|
||||||
|
{yf_data.tail().to_string() if yf_data is not None else 'Data unavailable'}
|
||||||
|
|
||||||
|
Polygon.io Trade Data:
|
||||||
|
{polygon_trades}
|
||||||
|
|
||||||
|
Polygon.io Quote Data:
|
||||||
|
{polygon_quotes}
|
||||||
|
|
||||||
|
Recent News:
|
||||||
|
{polygon_news[:3] if polygon_news else 'No recent news available'}
|
||||||
|
|
||||||
|
Economic Indicators:
|
||||||
|
{fred_data}
|
||||||
|
|
||||||
|
Analyze this real-time financial data for {ticker}. Provide insights on the stock's performance, overall market conditions, relevant economic factors, and any significant news that might impact the stock or market.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Run the multi-agent analysis
|
||||||
|
try:
|
||||||
|
analysis = agent_system.run(input_data)
|
||||||
|
logger.info(f"Analysis completed for {ticker}")
|
||||||
|
return analysis
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during multi-agent analysis for {ticker}: {e}")
|
||||||
|
return f"Error during analysis: {e}"
|
||||||
|
```
|
||||||
|
|
||||||
|
This function fetches data from all our sources, prepares it as input for our multi-agent system, and then runs the analysis. The result is a comprehensive analysis of the stock, considering individual performance, market conditions, economic factors, and relevant news.
|
||||||
|
|
||||||
|
## Step 7: Implementing Advanced Use Cases
|
||||||
|
|
||||||
|
We then implement more advanced analysis functions:
|
||||||
|
|
||||||
|
### Compare Stocks
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def compare_stocks(session, tickers):
|
||||||
|
results = {}
|
||||||
|
for ticker in tickers:
|
||||||
|
results[ticker] = await real_time_analysis(session, ticker)
|
||||||
|
|
||||||
|
comparison_prompt = f"""
|
||||||
|
Compare the following stocks based on the provided analyses:
|
||||||
|
{results}
|
||||||
|
|
||||||
|
Highlight key differences and similarities. Provide a ranking of these stocks based on their current performance and future prospects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
comparison = agent_system.run(comparison_prompt)
|
||||||
|
logger.info(f"Stock comparison completed for {tickers}")
|
||||||
|
return comparison
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during stock comparison: {e}")
|
||||||
|
return f"Error during comparison: {e}"
|
||||||
|
```
|
||||||
|
|
||||||
|
This function compares multiple stocks by running a real-time analysis on each and then prompting our multi-agent system to compare the results.
|
||||||
|
|
||||||
|
### Sector Analysis
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def sector_analysis(session, sector):
|
||||||
|
sector_stocks = {
|
||||||
|
'Technology': ['AAPL', 'MSFT', 'GOOGL', 'AMZN', 'NVDA'],
|
||||||
|
'Finance': ['JPM', 'BAC', 'WFC', 'C', 'GS'],
|
||||||
|
'Healthcare': ['JNJ', 'UNH', 'PFE', 'ABT', 'MRK'],
|
||||||
|
'Consumer Goods': ['PG', 'KO', 'PEP', 'COST', 'WMT'],
|
||||||
|
'Energy': ['XOM', 'CVX', 'COP', 'SLB', 'EOG']
|
||||||
|
}
|
||||||
|
|
||||||
|
if sector not in sector_stocks:
|
||||||
|
return f"Sector '{sector}' not found. Available sectors: {', '.join(sector_stocks.keys())}"
|
||||||
|
|
||||||
|
stocks = sector_stocks[sector][:5]
|
||||||
|
|
||||||
|
sector_data = {}
|
||||||
|
for stock in stocks:
|
||||||
|
sector_data[stock] = await real_time_analysis(session, stock)
|
||||||
|
|
||||||
|
sector_prompt = f"""
|
||||||
|
Analyze the {sector} sector based on the following data from its top stocks:
|
||||||
|
{sector_data}
|
||||||
|
|
||||||
|
Provide insights on:
|
||||||
|
1. Overall sector performance
|
||||||
|
2. Key trends within the sector
|
||||||
|
3. Top performing stocks and why they're outperforming
|
||||||
|
4. Any challenges or opportunities facing the sector
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
analysis = agent_system.run(sector_prompt)
|
||||||
|
logger.info(f"Sector analysis completed for {sector}")
|
||||||
|
return analysis
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during sector analysis for {sector}: {e}")
|
||||||
|
return f"Error during sector analysis: {e}"
|
||||||
|
```
|
||||||
|
|
||||||
|
This function analyzes an entire sector by running real-time analysis on its top stocks and then prompting our multi-agent system to provide sector-wide insights.
|
||||||
|
|
||||||
|
### Economic Impact Analysis
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def economic_impact_analysis(session, indicator, threshold):
|
||||||
|
# Fetch historical data for the indicator
|
||||||
|
end_date = datetime.now().strftime('%Y-%m-%d')
|
||||||
|
start_date = (datetime.now() - timedelta(days=365)).strftime('%Y-%m-%d')
|
||||||
|
indicator_data = await get_fred_data(session, indicator, start_date, end_date)
|
||||||
|
|
||||||
|
if indicator_data is None or len(indicator_data) < 2:
|
||||||
|
return f"Insufficient data for indicator {indicator}"
|
||||||
|
|
||||||
|
# Check if the latest value crosses the threshold
|
||||||
|
latest_value = indicator_data.iloc[-1]
|
||||||
|
previous_value = indicator_data.iloc[-2]
|
||||||
|
crossed_threshold = (latest_value > threshold and previous_value <= threshold) or (latest_value < threshold and previous_value >= threshold)
|
||||||
|
|
||||||
|
if crossed_threshold:
|
||||||
|
impact_prompt = f"""
|
||||||
|
The economic indicator {indicator} has crossed the threshold of {threshold}. Its current value is {latest_value}.
|
||||||
|
|
||||||
|
Historical data:
|
||||||
|
{indicator_data.tail().to_string()}
|
||||||
|
|
||||||
|
Analyze the potential impacts of this change on:
|
||||||
|
1. Overall economic conditions
|
||||||
|
2. Different market
|
||||||
|
2. Different market sectors
|
||||||
|
3. Specific types of stocks (e.g., growth vs. value)
|
||||||
|
4. Other economic indicators
|
||||||
|
|
||||||
|
Provide a comprehensive analysis of the potential consequences and any recommended actions for investors.
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
analysis = agent_system.run(impact_prompt)
|
||||||
|
logger.info(f"Economic impact analysis completed for {indicator}")
|
||||||
|
return analysis
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during economic impact analysis for {indicator}: {e}")
|
||||||
|
return f"Error during economic impact analysis: {e}"
|
||||||
|
else:
|
||||||
|
return f"The {indicator} indicator has not crossed the threshold of {threshold}. Current value: {latest_value}"
|
||||||
|
```
|
||||||
|
|
||||||
|
This function analyzes the potential impact of significant changes in economic indicators. It fetches historical data, checks if a threshold has been crossed, and if so, prompts our multi-agent system to provide a comprehensive analysis of the potential consequences.
|
||||||
|
|
||||||
|
## Step 8: Running the Analysis
|
||||||
|
|
||||||
|
Finally, we implement our main function to run all of our analyses:
|
||||||
|
|
||||||
|
```python
|
||||||
|
async def main():
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
# Example usage
|
||||||
|
analysis_result = await real_time_analysis(session, 'AAPL')
|
||||||
|
print("Single Stock Analysis:")
|
||||||
|
print(analysis_result)
|
||||||
|
|
||||||
|
comparison_result = await compare_stocks(session, ['AAPL', 'GOOGL', 'MSFT'])
|
||||||
|
print("\nStock Comparison:")
|
||||||
|
print(comparison_result)
|
||||||
|
|
||||||
|
tech_sector_analysis = await sector_analysis(session, 'Technology')
|
||||||
|
print("\nTechnology Sector Analysis:")
|
||||||
|
print(tech_sector_analysis)
|
||||||
|
|
||||||
|
gdp_impact = await economic_impact_analysis(session, 'GDP', 22000)
|
||||||
|
print("\nEconomic Impact Analysis:")
|
||||||
|
print(gdp_impact)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
|
```
|
||||||
|
|
||||||
|
This `main` function demonstrates how to use all of our analysis functions. It runs a single stock analysis, compares multiple stocks, performs a sector analysis, and conducts an economic impact analysis.
|
||||||
|
|
||||||
|
## Conclusion and Next Steps
|
||||||
|
|
||||||
|
This tutorial has walked you through the process of building a sophisticated multi-agent system for real-time financial analysis using the Swarms framework. Here's a summary of what we've accomplished:
|
||||||
|
|
||||||
|
1. Set up our environment and API connections
|
||||||
|
2. Implemented rate limiting to respect API constraints
|
||||||
|
3. Created functions to fetch data from multiple sources (Yahoo Finance, Polygon.io, FRED)
|
||||||
|
4. Designed specialized AI agents for different aspects of financial analysis
|
||||||
|
5. Combined these agents into a multi-agent system
|
||||||
|
6. Implemented advanced analysis functions including stock comparison, sector analysis, and economic impact analysis
|
||||||
|
|
||||||
|
This system provides a powerful foundation for financial analysis, but there's always room for expansion and improvement. Here are some potential next steps:
|
||||||
|
|
||||||
|
1. **Expand data sources**: Consider integrating additional financial data providers for even more comprehensive analysis.
|
||||||
|
|
||||||
|
2. **Enhance agent specialization**: You could create more specialized agents, such as a technical analysis agent or a sentiment analysis agent for social media data.
|
||||||
|
|
||||||
|
3. **Implement a user interface**: Consider building a web interface or dashboard to make the system more user-friendly for non-technical analysts.
|
||||||
|
|
||||||
|
4. **Add visualization capabilities**: Integrate data visualization tools to help interpret complex financial data more easily.
|
||||||
|
|
||||||
|
5. **Implement a backtesting system**: Develop a system to evaluate your multi-agent system's performance on historical data.
|
||||||
|
|
||||||
|
6. **Explore advanced AI models**: The Swarms framework supports various AI models. Experiment with different models to see which performs best for your specific use case.
|
||||||
|
|
||||||
|
7. **Implement real-time monitoring**: Set up a system to continuously monitor markets and alert you to significant changes or opportunities.
|
||||||
|
|
||||||
|
Remember, the Swarms framework is a powerful and flexible tool that can be adapted to a wide range of complex tasks beyond just financial analysis. We encourage you to explore the [Swarms GitHub repository](https://github.com/kyegomez/swarms) for more examples and inspiration.
|
||||||
|
|
||||||
|
For more in-depth discussions and community support, consider joining the [Swarms Discord](https://discord.com/servers/agora-999382051935506503). You can also stay updated with the latest developments by following [Swarms on Twitter](https://x.com/swarms_corp).
|
||||||
|
|
||||||
|
If you're interested in learning more about AI and its applications in various fields, check out the [Swarms Spotify podcast](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994) and the [Swarms Blog](https://medium.com/@kyeg) for insightful articles and discussions.
|
||||||
|
|
||||||
|
Lastly, don't forget to visit the [Swarms Website](https://swarms.xyz) for a comprehensive overview of the project and its capabilities.
|
||||||
|
|
||||||
|
By leveraging the power of multi-agent AI systems, you're well-equipped to navigate the complex world of financial markets. Happy analyzing!
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Swarm Resources:
|
||||||
|
|
||||||
|
|
||||||
|
* [Swarms Github](https://github.com/kyegomez/swarms)
|
||||||
|
* [Swarms Discord](https://discord.com/servers/agora-999382051935506503)
|
||||||
|
* [Swarms Twitter](https://x.com/swarms_corp)
|
||||||
|
* [Swarms Spotify](https://open.spotify.com/show/2HLiswhmUaMdjHC8AUHcCF?si=c831ef10c5ef4994)
|
||||||
|
* [Swarms Blog](https://medium.com/@kyeg)
|
||||||
|
* [Swarms Website](https://swarms.xyz)
|
@ -0,0 +1,751 @@
|
|||||||
|
# Analyzing Financial Data with AI Agents using Swarms Framework
|
||||||
|
|
||||||
|
In the rapidly evolving landscape of quantitative finance, the integration of artificial intelligence with financial data analysis has become increasingly crucial. This blog post will explore how to leverage the power of AI agents, specifically using the Swarms framework, to analyze financial data from various top-tier data providers. We'll demonstrate how to connect these agents with different financial APIs, enabling sophisticated analysis and decision-making processes.
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
1. [Introduction to Swarms Framework](#introduction-to-swarms-framework)
|
||||||
|
2. [Setting Up the Environment](#setting-up-the-environment)
|
||||||
|
3. [Connecting AI Agents with Financial Data Providers](#connecting-ai-agents-with-financial-data-providers)
|
||||||
|
- [Polygon.io](#polygonio)
|
||||||
|
- [Alpha Vantage](#alpha-vantage)
|
||||||
|
- [Yahoo Finance](#yahoo-finance)
|
||||||
|
- [IEX Cloud](#iex-cloud)
|
||||||
|
- [Finnhub](#finnhub)
|
||||||
|
4. [Advanced Analysis Techniques](#advanced-analysis-techniques)
|
||||||
|
5. [Best Practices and Considerations](#best-practices-and-considerations)
|
||||||
|
6. [Conclusion](#conclusion)
|
||||||
|
|
||||||
|
## Introduction to Swarms Framework
|
||||||
|
|
||||||
|
The Swarms framework is a powerful tool for building and deploying AI agents that can interact with various data sources and perform complex analyses. In the context of financial data analysis, Swarms can be used to create intelligent agents that can process large volumes of financial data, identify patterns, and make data-driven decisions. Explore our github for examples, applications, and more.
|
||||||
|
|
||||||
|
## Setting Up the Environment
|
||||||
|
|
||||||
|
Before we dive into connecting AI agents with financial data providers, let's set up our environment:
|
||||||
|
|
||||||
|
1. Install the Swarms framework:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -U swarms
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install additional required libraries:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install requests pandas numpy matplotlib
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Set up your API keys for the various financial data providers. It's recommended to use environment variables or a secure configuration file to store these keys.
|
||||||
|
|
||||||
|
## Connecting AI Agents with Financial Data Providers
|
||||||
|
|
||||||
|
Now, let's explore how to connect AI agents using the Swarms framework with different financial data providers.
|
||||||
|
|
||||||
|
### Polygon.io
|
||||||
|
|
||||||
|
First, we'll create an AI agent that can fetch and analyze stock data from Polygon.io.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import requests
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Polygon.io API setup
|
||||||
|
POLYGON_API_KEY = os.getenv("POLYGON_API_KEY")
|
||||||
|
POLYGON_BASE_URL = "https://api.polygon.io/v2"
|
||||||
|
|
||||||
|
# OpenAI API setup
|
||||||
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=OPENAI_API_KEY,
|
||||||
|
model_name="gpt-4",
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Financial-Analysis-Agent",
|
||||||
|
system_prompt="You are a financial analysis AI assistant. Your task is to analyze stock data and provide insights.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_stock_data(symbol, from_date, to_date):
|
||||||
|
endpoint = f"{POLYGON_BASE_URL}/aggs/ticker/{symbol}/range/1/day/{from_date}/{to_date}"
|
||||||
|
params = {
|
||||||
|
'apiKey': POLYGON_API_KEY,
|
||||||
|
'adjusted': 'true'
|
||||||
|
}
|
||||||
|
response = requests.get(endpoint, params=params)
|
||||||
|
data = response.json()
|
||||||
|
return pd.DataFrame(data['results'])
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
symbol = "AAPL"
|
||||||
|
from_date = "2023-01-01"
|
||||||
|
to_date = "2023-12-31"
|
||||||
|
|
||||||
|
stock_data = get_stock_data(symbol, from_date, to_date)
|
||||||
|
|
||||||
|
analysis_request = f"""
|
||||||
|
Analyze the following stock data for {symbol} from {from_date} to {to_date}:
|
||||||
|
|
||||||
|
{stock_data.to_string()}
|
||||||
|
|
||||||
|
Provide insights on the stock's performance, including trends, volatility, and any notable events.
|
||||||
|
"""
|
||||||
|
|
||||||
|
analysis = agent.run(analysis_request)
|
||||||
|
print(analysis)
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example, we've created an AI agent that can fetch stock data from Polygon.io and perform an analysis based on that data. The agent uses the GPT-4 model to generate insights about the stock's performance.
|
||||||
|
|
||||||
|
### Alpha Vantage
|
||||||
|
|
||||||
|
Next, let's create an agent that can work with Alpha Vantage data to perform fundamental analysis.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import requests
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Alpha Vantage API setup
|
||||||
|
ALPHA_VANTAGE_API_KEY = os.getenv("ALPHA_VANTAGE_API_KEY")
|
||||||
|
ALPHA_VANTAGE_BASE_URL = "https://www.alphavantage.co/query"
|
||||||
|
|
||||||
|
# OpenAI API setup
|
||||||
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=OPENAI_API_KEY,
|
||||||
|
model_name="gpt-4",
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Fundamental-Analysis-Agent",
|
||||||
|
system_prompt="You are a financial analysis AI assistant specializing in fundamental analysis. Your task is to analyze company financials and provide insights.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_income_statement(symbol):
|
||||||
|
params = {
|
||||||
|
'function': 'INCOME_STATEMENT',
|
||||||
|
'symbol': symbol,
|
||||||
|
'apikey': ALPHA_VANTAGE_API_KEY
|
||||||
|
}
|
||||||
|
response = requests.get(ALPHA_VANTAGE_BASE_URL, params=params)
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
symbol = "MSFT"
|
||||||
|
|
||||||
|
income_statement = get_income_statement(symbol)
|
||||||
|
|
||||||
|
analysis_request = f"""
|
||||||
|
Analyze the following income statement data for {symbol}:
|
||||||
|
|
||||||
|
{income_statement}
|
||||||
|
|
||||||
|
Provide insights on the company's financial health, profitability trends, and any notable observations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
analysis = agent.run(analysis_request)
|
||||||
|
print(analysis)
|
||||||
|
```
|
||||||
|
|
||||||
|
This example demonstrates an AI agent that can fetch income statement data from Alpha Vantage and perform a fundamental analysis of a company's financials.
|
||||||
|
|
||||||
|
### Yahoo Finance
|
||||||
|
|
||||||
|
Now, let's create an agent that can work with Yahoo Finance data to perform technical analysis.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# OpenAI API setup
|
||||||
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=OPENAI_API_KEY,
|
||||||
|
model_name="gpt-4",
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Technical-Analysis-Agent",
|
||||||
|
system_prompt="You are a financial analysis AI assistant specializing in technical analysis. Your task is to analyze stock price data and provide insights on trends and potential trading signals.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_stock_data(symbol, start_date, end_date):
|
||||||
|
stock = yf.Ticker(symbol)
|
||||||
|
data = stock.history(start=start_date, end=end_date)
|
||||||
|
return data
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
symbol = "GOOGL"
|
||||||
|
start_date = "2023-01-01"
|
||||||
|
end_date = "2023-12-31"
|
||||||
|
|
||||||
|
stock_data = get_stock_data(symbol, start_date, end_date)
|
||||||
|
|
||||||
|
# Calculate some technical indicators
|
||||||
|
stock_data['SMA_20'] = stock_data['Close'].rolling(window=20).mean()
|
||||||
|
stock_data['SMA_50'] = stock_data['Close'].rolling(window=50).mean()
|
||||||
|
|
||||||
|
analysis_request = f"""
|
||||||
|
Analyze the following stock price data and technical indicators for {symbol} from {start_date} to {end_date}:
|
||||||
|
|
||||||
|
{stock_data.tail(30).to_string()}
|
||||||
|
|
||||||
|
Provide insights on the stock's price trends, potential support and resistance levels, and any notable trading signals based on the moving averages.
|
||||||
|
"""
|
||||||
|
|
||||||
|
analysis = agent.run(analysis_request)
|
||||||
|
print(analysis)
|
||||||
|
```
|
||||||
|
|
||||||
|
This example shows an AI agent that can fetch stock price data from Yahoo Finance, calculate some basic technical indicators, and perform a technical analysis.
|
||||||
|
|
||||||
|
### IEX Cloud
|
||||||
|
|
||||||
|
Let's create an agent that can work with IEX Cloud data to analyze company news sentiment.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import requests
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# IEX Cloud API setup
|
||||||
|
IEX_CLOUD_API_KEY = os.getenv("IEX_CLOUD_API_KEY")
|
||||||
|
IEX_CLOUD_BASE_URL = "https://cloud.iexapis.com/stable"
|
||||||
|
|
||||||
|
# OpenAI API setup
|
||||||
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=OPENAI_API_KEY,
|
||||||
|
model_name="gpt-4",
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="News-Sentiment-Analysis-Agent",
|
||||||
|
system_prompt="You are a financial analysis AI assistant specializing in news sentiment analysis. Your task is to analyze company news and provide insights on the overall sentiment and potential impact on the stock.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_company_news(symbol, last_n):
|
||||||
|
endpoint = f"{IEX_CLOUD_BASE_URL}/stock/{symbol}/news/last/{last_n}"
|
||||||
|
params = {'token': IEX_CLOUD_API_KEY}
|
||||||
|
response = requests.get(endpoint, params=params)
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
symbol = "TSLA"
|
||||||
|
last_n = 10
|
||||||
|
|
||||||
|
news_data = get_company_news(symbol, last_n)
|
||||||
|
|
||||||
|
analysis_request = f"""
|
||||||
|
Analyze the following recent news articles for {symbol}:
|
||||||
|
|
||||||
|
{news_data}
|
||||||
|
|
||||||
|
Provide insights on the overall sentiment of the news, potential impact on the stock price, and any notable trends or events mentioned.
|
||||||
|
"""
|
||||||
|
|
||||||
|
analysis = agent.run(analysis_request)
|
||||||
|
print(analysis)
|
||||||
|
```
|
||||||
|
|
||||||
|
This example demonstrates an AI agent that can fetch recent news data from IEX Cloud and perform a sentiment analysis on the company news.
|
||||||
|
|
||||||
|
### Finnhub
|
||||||
|
|
||||||
|
Finally, let's create an agent that can work with Finnhub data to analyze earnings estimates and recommendations.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import finnhub
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Finnhub API setup
|
||||||
|
FINNHUB_API_KEY = os.getenv("FINNHUB_API_KEY")
|
||||||
|
finnhub_client = finnhub.Client(api_key=FINNHUB_API_KEY)
|
||||||
|
|
||||||
|
# OpenAI API setup
|
||||||
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=OPENAI_API_KEY,
|
||||||
|
model_name="gpt-4",
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Earnings-Analysis-Agent",
|
||||||
|
system_prompt="You are a financial analysis AI assistant specializing in earnings analysis. Your task is to analyze earnings estimates and recommendations to provide insights on a company's financial outlook.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_earnings_estimates(symbol):
|
||||||
|
return finnhub_client.earnings_calendar(symbol=symbol, from_date="2023-01-01", to_date="2023-12-31")
|
||||||
|
|
||||||
|
def get_recommendations(symbol):
|
||||||
|
return finnhub_client.recommendation_trends(symbol)
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
symbol = "NVDA"
|
||||||
|
|
||||||
|
earnings_estimates = get_earnings_estimates(symbol)
|
||||||
|
recommendations = get_recommendations(symbol)
|
||||||
|
|
||||||
|
analysis_request = f"""
|
||||||
|
Analyze the following earnings estimates and recommendations for {symbol}:
|
||||||
|
|
||||||
|
Earnings Estimates:
|
||||||
|
{earnings_estimates}
|
||||||
|
|
||||||
|
Recommendations:
|
||||||
|
{recommendations}
|
||||||
|
|
||||||
|
Provide insights on the company's expected financial performance, analyst sentiment, and any notable trends in the recommendations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
analysis = agent.run(analysis_request)
|
||||||
|
print(analysis)
|
||||||
|
```
|
||||||
|
|
||||||
|
This example shows an AI agent that can fetch earnings estimates and analyst recommendations from Finnhub and perform an analysis on the company's financial outlook.
|
||||||
|
|
||||||
|
## Advanced Analysis Techniques
|
||||||
|
|
||||||
|
To further enhance the capabilities of our AI agents, we can implement more advanced analysis techniques:
|
||||||
|
|
||||||
|
1. Multi-source analysis: Combine data from multiple providers to get a more comprehensive view of a stock or market.
|
||||||
|
|
||||||
|
2. Time series forecasting: Implement machine learning models for price prediction.
|
||||||
|
|
||||||
|
3. Sentiment analysis of social media: Incorporate data from social media platforms to gauge market sentiment.
|
||||||
|
|
||||||
|
4. Portfolio optimization: Use AI agents to suggest optimal portfolio allocations based on risk tolerance and investment goals.
|
||||||
|
|
||||||
|
5. Anomaly detection: Implement algorithms to detect unusual patterns or events in financial data.
|
||||||
|
|
||||||
|
Here's an example of how we might implement a multi-source analysis:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import yfinance as yf
|
||||||
|
import requests
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# API setup
|
||||||
|
POLYGON_API_KEY = os.getenv("POLYGON_API_KEY")
|
||||||
|
ALPHA_VANTAGE_API_KEY = os.getenv("ALPHA_VANTAGE_API_KEY")
|
||||||
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Create an instance of the OpenAIChat class
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=OPENAI_API_KEY,
|
||||||
|
model_name="gpt-4",
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Multi-Source-Analysis-Agent",
|
||||||
|
system_prompt="You are a financial analysis AI assistant capable of analyzing data from multiple sources. Your task is to provide comprehensive insights on a stock based on various data points.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_stock_data_yf(symbol, start_date, end_date):
|
||||||
|
stock = yf.Ticker(symbol)
|
||||||
|
return stock.history(start=start_date, end=end_date)
|
||||||
|
|
||||||
|
def get_stock_data_polygon(symbol, from_date, to_date):
|
||||||
|
endpoint = f"https://api.polygon.io/v2/aggs/ticker/{symbol}/range/1/day/{from_date}/{to_date}"
|
||||||
|
params = {'apiKey': POLYGON_API_KEY, 'adjusted': 'true'}
|
||||||
|
response = requests.get(endpoint, params=params)
|
||||||
|
data = response.json()
|
||||||
|
return pd.DataFrame(data['results'])
|
||||||
|
|
||||||
|
def get_company_overview_av(symbol):
|
||||||
|
params = {
|
||||||
|
'function': 'OVERVIEW',
|
||||||
|
'symbol': symbol,
|
||||||
|
'apikey': ALPHA_VANTAGE_API_KEY
|
||||||
|
}
|
||||||
|
response = requests.get("https://www.alphavantage.co/query", params=params)
|
||||||
|
return response.json()
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
symbol = "AAPL"
|
||||||
|
start_date = "2023-01-01"
|
||||||
|
end_date = "2023-12-31"
|
||||||
|
|
||||||
|
yf_data = get_stock_data_yf(symbol, start_date, end_date)
|
||||||
|
polygon_data = get_stock_data_polygon(symbol, start_date, end_date)
|
||||||
|
av_overview = get_company_overview_av(symbol)
|
||||||
|
|
||||||
|
analysis_request = f"""
|
||||||
|
Analyze the following data for {symbol} from {start_date} to {end_date}:
|
||||||
|
|
||||||
|
Yahoo Finance Data:
|
||||||
|
{yf_data.tail().to_string()}
|
||||||
|
|
||||||
|
Polygon.io Data:
|
||||||
|
{polygon_data.tail().to_string()}
|
||||||
|
|
||||||
|
Alpha Vantage Company Overview:
|
||||||
|
{av_overview}
|
||||||
|
|
||||||
|
Provide a comprehensive analysis of the stock, including:
|
||||||
|
1. Price trends and volatility
|
||||||
|
2. Trading volume analysis
|
||||||
|
3. Fundamental analysis based on the company overview
|
||||||
|
4. Any discrepancies between data sources and potential reasons
|
||||||
|
5. Overall outlook and potential risks/opportunities
|
||||||
|
"""
|
||||||
|
|
||||||
|
analysis = agent.run(analysis_request)
|
||||||
|
print(analysis)
|
||||||
|
```
|
||||||
|
|
||||||
|
This multi-source analysis example combines data from Yahoo Finance, Polygon.io, and Alpha Vantage to provide a more comprehensive view of a stock. The AI agent can then analyze this diverse set of data to provide deeper insights.
|
||||||
|
|
||||||
|
Now, let's explore some additional advanced analysis techniques:
|
||||||
|
|
||||||
|
### Time Series Forecasting
|
||||||
|
|
||||||
|
We can implement a simple time series forecasting model using the Prophet library and integrate it with our AI agent:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
from prophet import Prophet
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=OPENAI_API_KEY,
|
||||||
|
model_name="gpt-4",
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Time-Series-Forecast-Agent",
|
||||||
|
system_prompt="You are a financial analysis AI assistant specializing in time series forecasting. Your task is to analyze stock price predictions and provide insights.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_stock_data(symbol, start_date, end_date):
|
||||||
|
stock = yf.Ticker(symbol)
|
||||||
|
data = stock.history(start=start_date, end=end_date)
|
||||||
|
return data
|
||||||
|
|
||||||
|
def forecast_stock_price(data, periods=30):
|
||||||
|
df = data.reset_index()[['Date', 'Close']]
|
||||||
|
df.columns = ['ds', 'y']
|
||||||
|
|
||||||
|
model = Prophet()
|
||||||
|
model.fit(df)
|
||||||
|
|
||||||
|
future = model.make_future_dataframe(periods=periods)
|
||||||
|
forecast = model.predict(future)
|
||||||
|
|
||||||
|
fig = model.plot(forecast)
|
||||||
|
plt.savefig('forecast_plot.png')
|
||||||
|
plt.close()
|
||||||
|
|
||||||
|
return forecast
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
symbol = "MSFT"
|
||||||
|
start_date = "2020-01-01"
|
||||||
|
end_date = "2023-12-31"
|
||||||
|
|
||||||
|
stock_data = get_stock_data(symbol, start_date, end_date)
|
||||||
|
forecast = forecast_stock_price(stock_data)
|
||||||
|
|
||||||
|
analysis_request = f"""
|
||||||
|
Analyze the following time series forecast for {symbol}:
|
||||||
|
|
||||||
|
Forecast Data:
|
||||||
|
{forecast.tail(30).to_string()}
|
||||||
|
|
||||||
|
The forecast plot has been saved as 'forecast_plot.png'.
|
||||||
|
|
||||||
|
Provide insights on:
|
||||||
|
1. The predicted trend for the stock price
|
||||||
|
2. Any seasonal patterns observed
|
||||||
|
3. Potential factors that might influence the forecast
|
||||||
|
4. Limitations of this forecasting method
|
||||||
|
5. Recommendations for investors based on this forecast
|
||||||
|
"""
|
||||||
|
|
||||||
|
analysis = agent.run(analysis_request)
|
||||||
|
print(analysis)
|
||||||
|
```
|
||||||
|
|
||||||
|
This example demonstrates how to integrate a time series forecasting model (Prophet) with our AI agent. The agent can then provide insights based on the forecasted data.
|
||||||
|
|
||||||
|
### Sentiment Analysis of Social Media
|
||||||
|
|
||||||
|
We can use a pre-trained sentiment analysis model to analyze tweets about a company and integrate this with our AI agent:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import tweepy
|
||||||
|
from textblob import TextBlob
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Twitter API setup
|
||||||
|
TWITTER_API_KEY = os.getenv("TWITTER_API_KEY")
|
||||||
|
TWITTER_API_SECRET = os.getenv("TWITTER_API_SECRET")
|
||||||
|
TWITTER_ACCESS_TOKEN = os.getenv("TWITTER_ACCESS_TOKEN")
|
||||||
|
TWITTER_ACCESS_TOKEN_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET")
|
||||||
|
|
||||||
|
auth = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_SECRET)
|
||||||
|
auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
|
||||||
|
api = tweepy.API(auth)
|
||||||
|
|
||||||
|
# OpenAI setup
|
||||||
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=OPENAI_API_KEY,
|
||||||
|
model_name="gpt-4",
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Social-Media-Sentiment-Agent",
|
||||||
|
system_prompt="You are a financial analysis AI assistant specializing in social media sentiment analysis. Your task is to analyze sentiment data from tweets and provide insights on market perception.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_tweets(query, count=100):
|
||||||
|
tweets = api.search_tweets(q=query, count=count, tweet_mode="extended")
|
||||||
|
return [tweet.full_text for tweet in tweets]
|
||||||
|
|
||||||
|
def analyze_sentiment(tweets):
|
||||||
|
sentiments = [TextBlob(tweet).sentiment.polarity for tweet in tweets]
|
||||||
|
return pd.DataFrame({'tweet': tweets, 'sentiment': sentiments})
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
symbol = "TSLA"
|
||||||
|
query = f"${symbol} stock"
|
||||||
|
|
||||||
|
tweets = get_tweets(query)
|
||||||
|
sentiment_data = analyze_sentiment(tweets)
|
||||||
|
|
||||||
|
analysis_request = f"""
|
||||||
|
Analyze the following sentiment data for tweets about {symbol} stock:
|
||||||
|
|
||||||
|
Sentiment Summary:
|
||||||
|
Positive tweets: {sum(sentiment_data['sentiment'] > 0)}
|
||||||
|
Negative tweets: {sum(sentiment_data['sentiment'] < 0)}
|
||||||
|
Neutral tweets: {sum(sentiment_data['sentiment'] == 0)}
|
||||||
|
|
||||||
|
Average sentiment: {sentiment_data['sentiment'].mean()}
|
||||||
|
|
||||||
|
Sample tweets and their sentiments:
|
||||||
|
{sentiment_data.head(10).to_string()}
|
||||||
|
|
||||||
|
Provide insights on:
|
||||||
|
1. The overall sentiment towards the stock
|
||||||
|
2. Any notable trends or patterns in the sentiment
|
||||||
|
3. Potential reasons for the observed sentiment
|
||||||
|
4. How this sentiment might impact the stock price
|
||||||
|
5. Limitations of this sentiment analysis method
|
||||||
|
"""
|
||||||
|
|
||||||
|
analysis = agent.run(analysis_request)
|
||||||
|
print(analysis)
|
||||||
|
```
|
||||||
|
|
||||||
|
This example shows how to perform sentiment analysis on tweets about a stock and integrate the results with our AI agent for further analysis.
|
||||||
|
|
||||||
|
### Portfolio Optimization
|
||||||
|
|
||||||
|
We can use the PyPortfolioOpt library to perform portfolio optimization and have our AI agent provide insights:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models import OpenAIChat
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from pypfopt import EfficientFrontier
|
||||||
|
from pypfopt import risk_models
|
||||||
|
from pypfopt import expected_returns
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
model = OpenAIChat(
|
||||||
|
openai_api_key=OPENAI_API_KEY,
|
||||||
|
model_name="gpt-4",
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Portfolio-Optimization-Agent",
|
||||||
|
system_prompt="You are a financial analysis AI assistant specializing in portfolio optimization. Your task is to analyze optimized portfolio allocations and provide investment advice.",
|
||||||
|
llm=model,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=False,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_stock_data(symbols, start_date, end_date):
|
||||||
|
data = yf.download(symbols, start=start_date, end=end_date)['Adj Close']
|
||||||
|
return data
|
||||||
|
|
||||||
|
def optimize_portfolio(data):
|
||||||
|
mu = expected_returns.mean_historical_return(data)
|
||||||
|
S = risk_models.sample_cov(data)
|
||||||
|
|
||||||
|
ef = EfficientFrontier(mu, S)
|
||||||
|
weights = ef.max_sharpe()
|
||||||
|
cleaned_weights = ef.clean_weights()
|
||||||
|
|
||||||
|
return cleaned_weights
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
symbols = ["AAPL", "GOOGL", "MSFT", "AMZN", "FB"]
|
||||||
|
start_date = "2018-01-01"
|
||||||
|
end_date = "2023-12-31"
|
||||||
|
|
||||||
|
stock_data = get_stock_data(symbols, start_date, end_date)
|
||||||
|
optimized_weights = optimize_portfolio(stock_data)
|
||||||
|
|
||||||
|
analysis_request = f"""
|
||||||
|
Analyze the following optimized portfolio allocation:
|
||||||
|
|
||||||
|
{pd.Series(optimized_weights).to_string()}
|
||||||
|
|
||||||
|
The optimization aimed to maximize the Sharpe ratio based on historical data from {start_date} to {end_date}.
|
||||||
|
|
||||||
|
Provide insights on:
|
||||||
|
1. The recommended allocation and its potential benefits
|
||||||
|
2. Any notable concentrations or diversification in the portfolio
|
||||||
|
3. Potential risks associated with this allocation
|
||||||
|
4. How this portfolio might perform in different market conditions
|
||||||
|
5. Recommendations for an investor considering this allocation
|
||||||
|
6. Limitations of this optimization method
|
||||||
|
"""
|
||||||
|
|
||||||
|
analysis = agent.run(analysis_request)
|
||||||
|
print(analysis)
|
||||||
|
```
|
||||||
|
|
||||||
|
This example demonstrates how to perform portfolio optimization using the PyPortfolioOpt library and have our AI agent provide insights on the optimized allocation.
|
||||||
|
|
||||||
|
## Best Practices and Considerations
|
||||||
|
|
||||||
|
When using AI agents for financial data analysis, consider the following best practices:
|
||||||
|
|
||||||
|
1. Data quality: Ensure that the data you're feeding into the agents is accurate and up-to-date.
|
||||||
|
|
||||||
|
2. Model limitations: Be aware of the limitations of both the financial models and the AI models being used.
|
||||||
|
|
||||||
|
3. Regulatory compliance: Ensure that your use of AI in financial analysis complies with relevant regulations.
|
||||||
|
|
||||||
|
4. Ethical considerations: Be mindful of potential biases in AI models and strive for fair and ethical analysis.
|
||||||
|
|
||||||
|
5. Continuous monitoring: Regularly evaluate the performance of your AI agents and update them as needed.
|
||||||
|
|
||||||
|
6. Human oversight: While AI agents can provide valuable insights, human judgment should always play a role in financial decision-making.
|
||||||
|
|
||||||
|
7. Privacy and security: Implement robust security measures to protect sensitive financial data.
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
The integration of AI agents with financial data APIs opens up exciting possibilities for advanced financial analysis. By leveraging the power of the Swarms framework and connecting it with various financial data providers, analysts and quants can gain deeper insights, automate complex analyses, and potentially make more informed investment decisions.
|
||||||
|
|
||||||
|
However, it's crucial to remember that while AI agents can process vast amounts of data and identify patterns that humans might miss, they should be used as tools to augment human decision-making rather than replace it entirely. The financial markets are complex systems influenced by numerous factors, many of which may not be captured in historical data or current models.
|
||||||
|
|
||||||
|
As the field of AI in finance continues to evolve, we can expect even more sophisticated analysis techniques and integrations. Staying updated with the latest developments in both AI and financial analysis will be key to leveraging these powerful tools effectively.
|
@ -0,0 +1,868 @@
|
|||||||
|
# Comparing LLM Provider Pricing: A Guide for Enterprises
|
||||||
|
|
||||||
|
Large language models (LLMs) have become a cornerstone of innovation for enterprises across various industries.
|
||||||
|
|
||||||
|
As executives contemplate which model to integrate into their operations, understanding the intricacies of LLM provider pricing is crucial.
|
||||||
|
|
||||||
|
This comprehensive guide delves into the tactical business considerations, unit economics, profit margins, and ROI calculations that will empower decision-makers to deploy the right AI solution for their organization.
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
1. [Introduction to LLM Pricing Models](#introduction-to-llm-pricing-models)
|
||||||
|
2. [Understanding Unit Economics in LLM Deployment](#understanding-unit-economics-in-llm-deployment)
|
||||||
|
3. [Profit Margins and Cost Structures](#profit-margins-and-cost-structures)
|
||||||
|
4. [LLM Pricing in Action: Case Studies](#llm-pricing-in-action-case-studies)
|
||||||
|
5. [Calculating ROI for LLM Integration](#calculating-roi-for-llm-integration)
|
||||||
|
6. [Comparative Analysis of Major LLM Providers](#comparative-analysis-of-major-llm-providers)
|
||||||
|
7. [Hidden Costs and Considerations](#hidden-costs-and-considerations)
|
||||||
|
8. [Optimizing LLM Usage for Cost-Efficiency](#optimizing-llm-usage-for-cost-efficiency)
|
||||||
|
9. [Future Trends in LLM Pricing](#future-trends-in-llm-pricing)
|
||||||
|
10. [Strategic Decision-Making Framework](#strategic-decision-making-framework)
|
||||||
|
11. [Conclusion: Navigating the LLM Pricing Landscape](#conclusion-navigating-the-llm-pricing-landscape)
|
||||||
|
|
||||||
|
## 1. Introduction to LLM Pricing Models
|
||||||
|
|
||||||
|
The pricing of Large Language Models (LLMs) is a complex landscape that can significantly impact an enterprise's bottom line. As we dive into this topic, it's crucial to understand the various pricing models employed by LLM providers and how they align with different business needs.
|
||||||
|
|
||||||
|
### Pay-per-Token Model
|
||||||
|
|
||||||
|
The most common pricing structure in the LLM market is the pay-per-token model. In this system, businesses are charged based on the number of tokens processed by the model. A token can be as short as one character or as long as one word, depending on the language and the specific tokenization method used by the model.
|
||||||
|
|
||||||
|
**Advantages:**
|
||||||
|
- Scalability: Costs scale directly with usage, allowing for flexibility as demand fluctuates.
|
||||||
|
- Transparency: Easy to track and attribute costs to specific projects or departments.
|
||||||
|
|
||||||
|
**Disadvantages:**
|
||||||
|
- Unpredictability: Costs can vary significantly based on the verbosity of inputs and outputs.
|
||||||
|
- Potential for overruns: Without proper monitoring, costs can quickly escalate.
|
||||||
|
|
||||||
|
### Subscription-Based Models
|
||||||
|
|
||||||
|
Some providers offer subscription tiers that provide a set amount of compute resources or tokens for a fixed monthly or annual fee.
|
||||||
|
|
||||||
|
**Advantages:**
|
||||||
|
- Predictable costs: Easier budgeting and financial planning.
|
||||||
|
- Potential cost savings: Can be more economical for consistent, high-volume usage.
|
||||||
|
|
||||||
|
**Disadvantages:**
|
||||||
|
- Less flexibility: May lead to underutilization or overages.
|
||||||
|
- Commitment required: Often involves longer-term contracts.
|
||||||
|
|
||||||
|
### Custom Enterprise Agreements
|
||||||
|
|
||||||
|
For large-scale deployments, providers may offer custom pricing agreements tailored to the specific needs of an enterprise.
|
||||||
|
|
||||||
|
**Advantages:**
|
||||||
|
- Optimized for specific use cases: Can include specialized support, SLAs, and pricing structures.
|
||||||
|
- Potential for significant cost savings at scale.
|
||||||
|
|
||||||
|
**Disadvantages:**
|
||||||
|
- Complexity: Negotiating and managing these agreements can be resource-intensive.
|
||||||
|
- Less standardization: Difficult to compare across providers.
|
||||||
|
|
||||||
|
### Hybrid Models
|
||||||
|
|
||||||
|
Some providers are beginning to offer hybrid models that combine elements of pay-per-token and subscription-based pricing.
|
||||||
|
|
||||||
|
**Advantages:**
|
||||||
|
- Flexibility: Can adapt to varying usage patterns.
|
||||||
|
- Risk mitigation: Balances the benefits of both main pricing models.
|
||||||
|
|
||||||
|
**Disadvantages:**
|
||||||
|
- Complexity: Can be more challenging to understand and manage.
|
||||||
|
- Potential for suboptimal pricing if not carefully structured.
|
||||||
|
|
||||||
|
As we progress through this guide, we'll explore how these pricing models interact with various business considerations and how executives can leverage this understanding to make informed decisions.
|
||||||
|
|
||||||
|
## 2. Understanding Unit Economics in LLM Deployment
|
||||||
|
|
||||||
|
To make informed decisions about LLM deployment, executives must have a clear grasp of the unit economics involved. This section breaks down the components that contribute to the cost per unit of LLM usage and how they impact overall business economics.
|
||||||
|
|
||||||
|
### Defining the Unit
|
||||||
|
|
||||||
|
In the context of LLMs, a "unit" can be defined in several ways:
|
||||||
|
|
||||||
|
1. **Per Token**: The most granular unit, often used in pricing models.
|
||||||
|
2. **Per Request**: A single API call to the LLM, which may process multiple tokens.
|
||||||
|
3. **Per Task**: A complete operation, such as generating a summary or answering a question, which may involve multiple requests.
|
||||||
|
4. **Per User Interaction**: In customer-facing applications, this could be an entire conversation or session.
|
||||||
|
|
||||||
|
Understanding which unit is most relevant to your use case is crucial for accurate economic analysis.
|
||||||
|
|
||||||
|
### Components of Unit Cost
|
||||||
|
|
||||||
|
1. **Direct LLM Costs**
|
||||||
|
- Token processing fees
|
||||||
|
- API call charges
|
||||||
|
- Data transfer costs
|
||||||
|
|
||||||
|
2. **Indirect Costs**
|
||||||
|
- Compute resources for pre/post-processing
|
||||||
|
- Storage for inputs, outputs, and fine-tuning data
|
||||||
|
- Networking costs
|
||||||
|
|
||||||
|
3. **Operational Costs**
|
||||||
|
- Monitoring and management tools
|
||||||
|
- Integration and maintenance engineering time
|
||||||
|
- Customer support related to AI functions
|
||||||
|
|
||||||
|
4. **Overhead**
|
||||||
|
- Legal and compliance costs
|
||||||
|
- Training and documentation
|
||||||
|
- Risk management and insurance
|
||||||
|
|
||||||
|
### Calculating Unit Economics
|
||||||
|
|
||||||
|
To calculate the true unit economics, follow these steps:
|
||||||
|
|
||||||
|
1. **Determine Total Costs**: Sum all direct, indirect, operational, and overhead costs over a fixed period (e.g., monthly).
|
||||||
|
|
||||||
|
2. **Measure Total Units**: Track the total number of relevant units processed in the same period.
|
||||||
|
|
||||||
|
3. **Calculate Cost per Unit**: Divide total costs by total units.
|
||||||
|
|
||||||
|
```
|
||||||
|
Cost per Unit = Total Costs / Total Units
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **Analyze Revenue per Unit**: If the LLM is part of a revenue-generating product, calculate the revenue attributed to each unit.
|
||||||
|
|
||||||
|
5. **Determine Profit per Unit**: Subtract the cost per unit from the revenue per unit.
|
||||||
|
|
||||||
|
```
|
||||||
|
Profit per Unit = Revenue per Unit - Cost per Unit
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example Calculation
|
||||||
|
|
||||||
|
Let's consider a hypothetical customer service AI chatbot:
|
||||||
|
|
||||||
|
- Monthly LLM API costs: $10,000
|
||||||
|
- Indirect and operational costs: $5,000
|
||||||
|
- Total monthly interactions: 100,000
|
||||||
|
|
||||||
|
```
|
||||||
|
Cost per Interaction = ($10,000 + $5,000) / 100,000 = $0.15
|
||||||
|
```
|
||||||
|
|
||||||
|
If each interaction generates an average of $0.50 in value (through cost savings or revenue):
|
||||||
|
|
||||||
|
```
|
||||||
|
Profit per Interaction = $0.50 - $0.15 = $0.35
|
||||||
|
```
|
||||||
|
|
||||||
|
### Economies of Scale
|
||||||
|
|
||||||
|
As usage increases, unit economics often improve due to:
|
||||||
|
|
||||||
|
- Volume discounts from LLM providers
|
||||||
|
- Amortization of fixed costs over more units
|
||||||
|
- Efficiency gains through learning and optimization
|
||||||
|
|
||||||
|
However, it's crucial to model how these economies of scale manifest in your specific use case, as they may plateau or even reverse at very high volumes due to increased complexity and support needs.
|
||||||
|
|
||||||
|
### Diseconomies of Scale
|
||||||
|
|
||||||
|
Conversely, be aware of potential diseconomies of scale:
|
||||||
|
|
||||||
|
- Increased complexity in managing large-scale deployments
|
||||||
|
- Higher costs for specialized talent as operations grow
|
||||||
|
- Potential for diminishing returns on very large language models
|
||||||
|
|
||||||
|
By thoroughly understanding these unit economics, executives can make more informed decisions about which LLM provider and pricing model best aligns with their business objectives and scale.
|
||||||
|
|
||||||
|
## 3. Profit Margins and Cost Structures
|
||||||
|
|
||||||
|
Understanding profit margins and cost structures is crucial for executives evaluating LLM integration. This section explores how different pricing models and operational strategies can impact overall profitability.
|
||||||
|
|
||||||
|
### Components of Profit Margin
|
||||||
|
|
||||||
|
1. **Gross Margin**: The difference between revenue and the direct costs of LLM usage.
|
||||||
|
```
|
||||||
|
Gross Margin = Revenue - Direct LLM Costs
|
||||||
|
Gross Margin % = (Gross Margin / Revenue) * 100
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Contribution Margin**: Gross margin minus variable operational costs.
|
||||||
|
```
|
||||||
|
Contribution Margin = Gross Margin - Variable Operational Costs
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Net Margin**: The final profit after all costs, including fixed overheads.
|
||||||
|
```
|
||||||
|
Net Margin = Contribution Margin - Fixed Costs
|
||||||
|
Net Margin % = (Net Margin / Revenue) * 100
|
||||||
|
```
|
||||||
|
|
||||||
|
### Cost Structures in LLM Deployment
|
||||||
|
|
||||||
|
1. **Fixed Costs**
|
||||||
|
- Subscription fees for LLM access (if using a subscription model)
|
||||||
|
- Base infrastructure costs
|
||||||
|
- Core team salaries
|
||||||
|
- Licensing fees for essential software
|
||||||
|
|
||||||
|
2. **Variable Costs**
|
||||||
|
- Per-token or per-request charges
|
||||||
|
- Scaling infrastructure costs
|
||||||
|
- Usage-based API fees
|
||||||
|
- Performance-based team bonuses
|
||||||
|
|
||||||
|
3. **Step Costs**
|
||||||
|
- Costs that increase in chunks as usage scales
|
||||||
|
- Examples: Adding new server clusters, hiring additional support staff
|
||||||
|
|
||||||
|
### Analyzing Profit Margins Across Different Pricing Models
|
||||||
|
|
||||||
|
Let's compare how different LLM pricing models might affect profit margins for a hypothetical AI-powered writing assistant service:
|
||||||
|
|
||||||
|
**Scenario**: The service charges users $20/month and expects to process an average of 100,000 tokens per user per month.
|
||||||
|
|
||||||
|
1. **Pay-per-Token Model**
|
||||||
|
- LLM cost: $0.06 per 1,000 tokens
|
||||||
|
- Monthly LLM cost per user: $6
|
||||||
|
- Gross margin per user: $14 (70%)
|
||||||
|
|
||||||
|
2. **Subscription Model**
|
||||||
|
- Fixed monthly fee: $5,000 for up to 10 million tokens
|
||||||
|
- At 1,000 users: $5 per user
|
||||||
|
- Gross margin per user: $15 (75%)
|
||||||
|
|
||||||
|
3. **Hybrid Model**
|
||||||
|
- Base fee: $2,000 per month
|
||||||
|
- Reduced per-token rate: $0.04 per 1,000 tokens
|
||||||
|
- Monthly LLM cost per user: $6 ($2 base + $4 usage)
|
||||||
|
- Gross margin per user: $14 (70%)
|
||||||
|
|
||||||
|
### Strategies for Improving Profit Margins
|
||||||
|
|
||||||
|
1. **Optimize Token Usage**
|
||||||
|
- Implement efficient prompting techniques
|
||||||
|
- Cache common responses
|
||||||
|
- Use compression algorithms for inputs and outputs
|
||||||
|
|
||||||
|
2. **Leverage Economies of Scale**
|
||||||
|
- Negotiate better rates at higher volumes
|
||||||
|
- Spread fixed costs across a larger user base
|
||||||
|
|
||||||
|
3. **Implement Tiered Pricing**
|
||||||
|
- Offer different service levels to capture more value from power users
|
||||||
|
- Example: Basic ($10/month, 50K tokens), Pro ($30/month, 200K tokens)
|
||||||
|
|
||||||
|
4. **Vertical Integration**
|
||||||
|
- Invest in proprietary LLM development for core functionalities
|
||||||
|
- Reduce dependency on third-party providers for critical operations
|
||||||
|
|
||||||
|
5. **Smart Caching and Pre-computation**
|
||||||
|
- Store and reuse common LLM outputs
|
||||||
|
- Perform batch processing during off-peak hours
|
||||||
|
|
||||||
|
6. **Hybrid Cloud Strategies**
|
||||||
|
- Use on-premises solutions for consistent workloads
|
||||||
|
- Leverage cloud elasticity for demand spikes
|
||||||
|
|
||||||
|
### Case Study: Margin Improvement
|
||||||
|
|
||||||
|
Consider a company that initially used a pay-per-token model:
|
||||||
|
|
||||||
|
**Initial State:**
|
||||||
|
- Revenue per user: $20
|
||||||
|
- LLM cost per user: $6
|
||||||
|
- Other variable costs: $4
|
||||||
|
- Fixed costs per user: $5
|
||||||
|
- Net margin per user: $5 (25%)
|
||||||
|
|
||||||
|
**After Optimization:**
|
||||||
|
- Implemented efficient prompting: Reduced token usage by 20%
|
||||||
|
- Negotiated volume discount: 10% reduction in per-token price
|
||||||
|
- Introduced tiered pricing: Average revenue per user increased to $25
|
||||||
|
- Optimized operations: Reduced other variable costs to $3
|
||||||
|
|
||||||
|
**Result:**
|
||||||
|
- New LLM cost per user: $4.32
|
||||||
|
- New net margin per user: $12.68 (50.7%)
|
||||||
|
|
||||||
|
This case study demonstrates how a holistic approach to margin improvement, addressing both revenue and various cost components, can significantly enhance profitability.
|
||||||
|
|
||||||
|
Understanding these profit margin dynamics and cost structures is essential for executives to make informed decisions about LLM integration and to continuously optimize their AI-powered services for maximum profitability.
|
||||||
|
|
||||||
|
## 4. LLM Pricing in Action: Case Studies
|
||||||
|
|
||||||
|
To provide a concrete understanding of how LLM pricing models work in real-world scenarios, let's examine several case studies across different industries and use cases. These examples will illustrate the interplay between pricing models, usage patterns, and business outcomes.
|
||||||
|
|
||||||
|
### Case Study 1: E-commerce Product Description Generator
|
||||||
|
|
||||||
|
**Company**: GlobalMart, a large online retailer
|
||||||
|
**Use Case**: Automated generation of product descriptions
|
||||||
|
**LLM Provider**: GPT-4o
|
||||||
|
|
||||||
|
**Pricing Model**: Pay-per-token
|
||||||
|
- Input: $5.00 per 1M tokens
|
||||||
|
- Output: $15.00 per 1M tokens
|
||||||
|
|
||||||
|
**Usage Pattern**:
|
||||||
|
- Average input: 50 tokens per product (product attributes)
|
||||||
|
- Average output: 200 tokens per product (generated description)
|
||||||
|
- Daily products processed: 10,000
|
||||||
|
|
||||||
|
**Daily Cost Calculation**:
|
||||||
|
1. Input cost: (50 tokens * 10,000 products) / 1M * $5.00 = $2.50
|
||||||
|
2. Output cost: (200 tokens * 10,000 products) / 1M * $15.00 = $30.00
|
||||||
|
3. Total daily cost: $32.50
|
||||||
|
|
||||||
|
**Business Impact**:
|
||||||
|
- Reduced time to market for new products by 70%
|
||||||
|
- Improved SEO performance due to unique, keyword-rich descriptions
|
||||||
|
- Estimated daily value generated: $500 (based on increased sales and efficiency)
|
||||||
|
|
||||||
|
**ROI Analysis**:
|
||||||
|
- Daily investment: $32.50
|
||||||
|
- Daily return: $500
|
||||||
|
- ROI = (Return - Investment) / Investment * 100 = 1,438%
|
||||||
|
|
||||||
|
**Key Takeaway**: The pay-per-token model works well for this use case due to the predictable and moderate token usage per task. The high ROI justifies the investment in a more advanced model like GPT-4o.
|
||||||
|
|
||||||
|
### Case Study 2: Customer Service Chatbot
|
||||||
|
|
||||||
|
**Company**: TechSupport Inc., a software company
|
||||||
|
**Use Case**: 24/7 customer support chatbot
|
||||||
|
**LLM Provider**: Claude 3.5 Sonnet
|
||||||
|
|
||||||
|
**Pricing Model**: Input: $3 per 1M tokens, Output: $15 per 1M tokens
|
||||||
|
|
||||||
|
**Usage Pattern**:
|
||||||
|
- Average conversation: 500 tokens input (customer queries + context), 1000 tokens output (bot responses)
|
||||||
|
- Daily conversations: 5,000
|
||||||
|
|
||||||
|
**Daily Cost Calculation**:
|
||||||
|
1. Input cost: (500 tokens * 5,000 conversations) / 1M * $3 = $7.50
|
||||||
|
2. Output cost: (1000 tokens * 5,000 conversations) / 1M * $15 = $75.00
|
||||||
|
3. Total daily cost: $82.50
|
||||||
|
|
||||||
|
**Business Impact**:
|
||||||
|
- Reduced customer wait times by 90%
|
||||||
|
- Resolved 70% of queries without human intervention
|
||||||
|
- Estimated daily cost savings: $2,000 (based on reduced human support hours)
|
||||||
|
|
||||||
|
**ROI Analysis**:
|
||||||
|
- Daily investment: $82.50
|
||||||
|
- Daily return: $2,000
|
||||||
|
- ROI = (Return - Investment) / Investment * 100 = 2,324%
|
||||||
|
|
||||||
|
**Key Takeaway**: The higher cost of Claude 3.5 Sonnet is justified by its superior performance in handling complex customer queries, resulting in significant cost savings and improved customer satisfaction.
|
||||||
|
|
||||||
|
### Case Study 3: Financial Report Summarization
|
||||||
|
|
||||||
|
**Company**: FinAnalyze, a financial services firm
|
||||||
|
**Use Case**: Automated summarization of lengthy financial reports
|
||||||
|
**LLM Provider**: GPT-3.5 Turbo
|
||||||
|
|
||||||
|
**Pricing Model**: Input: $0.50 per 1M tokens, Output: $1.50 per 1M tokens
|
||||||
|
|
||||||
|
**Usage Pattern**:
|
||||||
|
- Average report: 20,000 tokens input, 2,000 tokens output
|
||||||
|
- Daily reports processed: 100
|
||||||
|
|
||||||
|
**Daily Cost Calculation**:
|
||||||
|
1. Input cost: (20,000 tokens * 100 reports) / 1M * $0.50 = $100
|
||||||
|
2. Output cost: (2,000 tokens * 100 reports) / 1M * $1.50 = $30
|
||||||
|
3. Total daily cost: $130
|
||||||
|
|
||||||
|
**Business Impact**:
|
||||||
|
- Reduced analysis time by 80%
|
||||||
|
- Improved consistency in report summaries
|
||||||
|
- Enabled analysts to focus on high-value tasks
|
||||||
|
- Estimated daily value generated: $1,000 (based on time savings and improved decision-making)
|
||||||
|
|
||||||
|
**ROI Analysis**:
|
||||||
|
- Daily investment: $130
|
||||||
|
- Daily return: $1,000
|
||||||
|
- ROI = (Return - Investment) / Investment * 100 = 669%
|
||||||
|
|
||||||
|
**Key Takeaway**: The lower cost of GPT-3.5 Turbo is suitable for this task, which requires processing large volumes of text but doesn't necessarily need the most advanced language understanding. The high input token count makes the input pricing a significant factor in model selection.
|
||||||
|
|
||||||
|
### Case Study 4: AI-Powered Language Learning App
|
||||||
|
|
||||||
|
**Company**: LinguaLeap, an edtech startup
|
||||||
|
**Use Case**: Personalized language exercises and conversations
|
||||||
|
**LLM Provider**: Claude 3 Haiku
|
||||||
|
|
||||||
|
**Pricing Model**: Input: $0.25 per 1M tokens, Output: $1.25 per 1M tokens
|
||||||
|
|
||||||
|
**Usage Pattern**:
|
||||||
|
- Average session: 300 tokens input (user responses + context), 500 tokens output (exercises + feedback)
|
||||||
|
- Daily active users: 50,000
|
||||||
|
- Average sessions per user per day: 3
|
||||||
|
|
||||||
|
**Daily Cost Calculation**:
|
||||||
|
1. Input cost: (300 tokens * 3 sessions * 50,000 users) / 1M * $0.25 = $11.25
|
||||||
|
2. Output cost: (500 tokens * 3 sessions * 50,000 users) / 1M * $1.25 = $93.75
|
||||||
|
3. Total daily cost: $105
|
||||||
|
|
||||||
|
**Business Impact**:
|
||||||
|
- Increased user engagement by 40%
|
||||||
|
- Improved learning outcomes, leading to higher user retention
|
||||||
|
- Enabled scaling to new languages without proportional increase in human tutors
|
||||||
|
- Estimated daily revenue: $5,000 (based on subscription fees and in-app purchases)
|
||||||
|
|
||||||
|
**ROI Analysis**:
|
||||||
|
- Daily investment: $105
|
||||||
|
- Daily revenue: $5,000
|
||||||
|
- ROI = (Revenue - Investment) / Investment * 100 = 4,662%
|
||||||
|
|
||||||
|
**Key Takeaway**: The high-volume, relatively simple interactions in this use case make Claude 3 Haiku an excellent choice. Its low cost allows for frequent interactions without prohibitive expenses, which is crucial for an app relying on regular user engagement.
|
||||||
|
|
||||||
|
### Case Study 5: Legal Document Analysis
|
||||||
|
|
||||||
|
**Company**: LegalEagle LLP, a large law firm
|
||||||
|
**Use Case**: Contract review and risk assessment
|
||||||
|
**LLM Provider**: Claude 3 Opus
|
||||||
|
|
||||||
|
**Pricing Model**: Input: $15 per 1M tokens, Output: $75 per 1M tokens
|
||||||
|
|
||||||
|
**Usage Pattern**:
|
||||||
|
- Average contract: 10,000 tokens input, 3,000 tokens output (analysis and risk assessment)
|
||||||
|
- Daily contracts processed: 50
|
||||||
|
|
||||||
|
**Daily Cost Calculation**:
|
||||||
|
1. Input cost: (10,000 tokens * 50 contracts) / 1M * $15 = $7.50
|
||||||
|
2. Output cost: (3,000 tokens * 50 contracts) / 1M * $75 = $11.25
|
||||||
|
3. Total daily cost: $18.75
|
||||||
|
|
||||||
|
**Business Impact**:
|
||||||
|
- Reduced contract review time by 60%
|
||||||
|
- Improved accuracy in identifying potential risks
|
||||||
|
- Enabled handling of more complex cases
|
||||||
|
- Estimated daily value: $10,000 (based on time savings and improved risk management)
|
||||||
|
|
||||||
|
**ROI Analysis**:
|
||||||
|
- Daily investment: $18.75
|
||||||
|
- Daily value: $10,000
|
||||||
|
- ROI = (Value - Investment) / Investment * 100 = 53,233%
|
||||||
|
|
||||||
|
**Key Takeaway**: Despite the high cost per token, Claude 3 Opus's advanced capabilities justify its use in this high-stakes environment where accuracy and nuanced understanding are critical. The high value generated per task offsets the higher token costs.
|
||||||
|
|
||||||
|
These case studies demonstrate how different LLM providers and pricing models can be optimal for various use cases, depending on factors such as token volume, task complexity, and the value generated by the AI application. Executives should carefully consider these factors when selecting an LLM provider and pricing model for their specific needs.
|
||||||
|
|
||||||
|
## 5. Calculating ROI for LLM Integration
|
||||||
|
|
||||||
|
Calculating the Return on Investment (ROI) for LLM integration is crucial for executives to justify the expenditure and assess the business value of AI implementation. This section will guide you through the process of calculating ROI, considering both tangible and intangible benefits.
|
||||||
|
|
||||||
|
### The ROI Formula
|
||||||
|
|
||||||
|
The basic ROI formula is:
|
||||||
|
|
||||||
|
```
|
||||||
|
ROI = (Net Benefit / Cost of Investment) * 100
|
||||||
|
```
|
||||||
|
|
||||||
|
For LLM integration, we can expand this to:
|
||||||
|
|
||||||
|
```
|
||||||
|
ROI = ((Total Benefits - Total Costs) / Total Costs) * 100
|
||||||
|
```
|
||||||
|
|
||||||
|
### Identifying Benefits
|
||||||
|
|
||||||
|
1. **Direct Cost Savings**
|
||||||
|
- Reduced labor costs
|
||||||
|
- Decreased operational expenses
|
||||||
|
- Lower error-related costs
|
||||||
|
|
||||||
|
2. **Revenue Increases**
|
||||||
|
- New product offerings enabled by LLM
|
||||||
|
- Improved customer acquisition and retention
|
||||||
|
- Upselling and cross-selling opportunities
|
||||||
|
|
||||||
|
3. **Productivity Gains**
|
||||||
|
- Time saved on repetitive tasks
|
||||||
|
- Faster decision-making processes
|
||||||
|
- Improved employee efficiency
|
||||||
|
|
||||||
|
4. **Quality Improvements**
|
||||||
|
- Enhanced accuracy in outputs
|
||||||
|
- Consistency in service delivery
|
||||||
|
- Reduced error rates
|
||||||
|
|
||||||
|
5. **Strategic Advantages**
|
||||||
|
- Market differentiation
|
||||||
|
- Faster time-to-market for new offerings
|
||||||
|
- Improved competitive positioning
|
||||||
|
|
||||||
|
### Calculating Costs
|
||||||
|
|
||||||
|
1. **Direct LLM Costs**
|
||||||
|
- API usage fees
|
||||||
|
- Subscription costs
|
||||||
|
|
||||||
|
2. **Infrastructure Costs**
|
||||||
|
- Cloud computing resources
|
||||||
|
- Data storage
|
||||||
|
- Networking expenses
|
||||||
|
|
||||||
|
3. **Integration and Development Costs**
|
||||||
|
- Initial setup and integration
|
||||||
|
- Ongoing maintenance and updates
|
||||||
|
- Custom feature development
|
||||||
|
|
||||||
|
4. **Training and Support**
|
||||||
|
- Employee training programs
|
||||||
|
- User support and documentation
|
||||||
|
- Change management initiatives
|
||||||
|
|
||||||
|
5. **Compliance and Security**
|
||||||
|
- Data privacy measures
|
||||||
|
- Security audits and implementations
|
||||||
|
- Regulatory compliance efforts
|
||||||
|
|
||||||
|
### Step-by-Step ROI Calculation
|
||||||
|
|
||||||
|
1. **Define the Time Period**: Determine the timeframe for your ROI calculation (e.g., 1 year, 3 years).
|
||||||
|
|
||||||
|
2. **Estimate Total Benefits**:
|
||||||
|
- Quantify direct cost savings and revenue increases
|
||||||
|
- Assign monetary values to productivity gains and quality improvements
|
||||||
|
- Estimate the value of strategic advantages (this may be more subjective)
|
||||||
|
|
||||||
|
3. **Calculate Total Costs**:
|
||||||
|
- Sum up all direct and indirect costs related to LLM integration
|
||||||
|
|
||||||
|
4. **Apply the ROI Formula**:
|
||||||
|
```
|
||||||
|
ROI = ((Total Benefits - Total Costs) / Total Costs) * 100
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Consider Time Value of Money**: For longer-term projections, use Net Present Value (NPV) to account for the time value of money.
|
||||||
|
|
||||||
|
### Example ROI Calculation
|
||||||
|
|
||||||
|
Let's consider a hypothetical customer service chatbot implementation:
|
||||||
|
|
||||||
|
**Time Period**: 1 year
|
||||||
|
|
||||||
|
**Benefits**:
|
||||||
|
- Labor cost savings: $500,000
|
||||||
|
- Increased sales from improved customer satisfaction: $300,000
|
||||||
|
- Productivity gains from faster query resolution: $200,000
|
||||||
|
|
||||||
|
Total Benefits: $1,000,000
|
||||||
|
|
||||||
|
**Costs**:
|
||||||
|
- LLM API fees: $100,000
|
||||||
|
- Integration and development: $150,000
|
||||||
|
- Training and support: $50,000
|
||||||
|
- Infrastructure: $50,000
|
||||||
|
|
||||||
|
Total Costs: $350,000
|
||||||
|
|
||||||
|
**ROI Calculation**:
|
||||||
|
```
|
||||||
|
ROI = (($1,000,000 - $350,000) / $350,000) * 100 = 185.7%
|
||||||
|
```
|
||||||
|
|
||||||
|
This indicates a strong positive return on investment, with benefits outweighing costs by a significant margin.
|
||||||
|
|
||||||
|
### Considerations for Accurate ROI Calculation
|
||||||
|
|
||||||
|
1. **Be Conservative in Estimates**: It's better to underestimate benefits and overestimate costs to provide a more realistic view.
|
||||||
|
|
||||||
|
2. **Account for Ramp-Up Time**: Full benefits may not be realized immediately. Consider a phased approach in your calculations.
|
||||||
|
|
||||||
|
3. **Include Opportunity Costs**: Consider the potential returns if the investment were made elsewhere.
|
||||||
|
|
||||||
|
4. **Factor in Risk**: Adjust your ROI based on the likelihood of achieving projected benefits.
|
||||||
|
|
||||||
|
5. **Consider Non-Financial Benefits**: Some benefits, like improved employee satisfaction or enhanced brand perception, may not have direct financial equivalents but are still valuable.
|
||||||
|
|
||||||
|
6. **Perform Sensitivity Analysis**: Calculate ROI under different scenarios (best case, worst case, most likely) to understand the range of possible outcomes.
|
||||||
|
|
||||||
|
7. **Benchmark Against Alternatives**: Compare the ROI of LLM integration against other potential investments or solutions.
|
||||||
|
|
||||||
|
### Long-Term ROI Considerations
|
||||||
|
|
||||||
|
While initial ROI calculations are crucial for decision-making, it's important to consider long-term implications:
|
||||||
|
|
||||||
|
1. **Scalability**: How will ROI change as usage increases?
|
||||||
|
2. **Technological Advancements**: Will newer, more efficient models become available?
|
||||||
|
3. **Market Changes**: How might shifts in the competitive landscape affect the value proposition?
|
||||||
|
4. **Regulatory Environment**: Could future regulations impact the cost or feasibility of LLM use?
|
||||||
|
|
||||||
|
By thoroughly calculating and analyzing the ROI of LLM integration, executives can make data-driven decisions about AI investments and set realistic expectations for the value these technologies can bring to their organizations.
|
||||||
|
|
||||||
|
## 6. Comparative Analysis of Major LLM Providers
|
||||||
|
|
||||||
|
In this section, we'll compare the offerings of major LLM providers, focusing on their pricing structures, model capabilities, and unique selling points. This analysis will help executives understand the landscape and make informed decisions about which provider best suits their needs.
|
||||||
|
|
||||||
|
### OpenAI
|
||||||
|
|
||||||
|
**Models**: GPT-4o, GPT-3.5 Turbo
|
||||||
|
|
||||||
|
**Pricing Structure**:
|
||||||
|
- Pay-per-token model
|
||||||
|
- Different rates for input and output tokens
|
||||||
|
- Bulk discounts available for high-volume users
|
||||||
|
|
||||||
|
**Key Features**:
|
||||||
|
- State-of-the-art performance on a wide range of tasks
|
||||||
|
- Regular model updates and improvements
|
||||||
|
- Extensive documentation and community support
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Higher pricing compared to some competitors
|
||||||
|
- Potential for rapid price changes as technology evolves
|
||||||
|
- Usage limits and approval process for higher-tier models
|
||||||
|
|
||||||
|
### Anthropic
|
||||||
|
|
||||||
|
**Models**: Claude 3.5 Sonnet, Claude 3 Opus, Claude 3 Haiku
|
||||||
|
|
||||||
|
**Pricing Structure**:
|
||||||
|
- Pay-per-token model
|
||||||
|
- Different rates for input and output tokens
|
||||||
|
- Tiered pricing based on model capabilities
|
||||||
|
|
||||||
|
**Key Features**:
|
||||||
|
- Strong focus on AI safety and ethics
|
||||||
|
- Long context windows (200K tokens)
|
||||||
|
- Specialized models for different use cases (e.g., Haiku for speed, Opus for complex tasks)
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Newer to the market compared to OpenAI
|
||||||
|
- Potentially more limited third-party integrations
|
||||||
|
- Strong emphasis on responsible AI use
|
||||||
|
|
||||||
|
### Google (Vertex AI)
|
||||||
|
|
||||||
|
**Models**: PaLM 2 for Chat, PaLM 2 for Text
|
||||||
|
|
||||||
|
**Pricing Structure**:
|
||||||
|
- Pay-per-thousand characters model
|
||||||
|
- Different rates for input and output
|
||||||
|
- Additional charges for advanced features (e.g., semantic retrieval)
|
||||||
|
|
||||||
|
**Key Features**:
|
||||||
|
- Integration with Google Cloud ecosystem
|
||||||
|
- Multi-modal capabilities (text, image, audio)
|
||||||
|
- Enterprise-grade security and compliance features
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Pricing can be complex due to additional Google Cloud costs
|
||||||
|
- Strong performance in specialized domains (e.g., coding, mathematical reasoning)
|
||||||
|
- Potential for integration with other Google services
|
||||||
|
|
||||||
|
### Amazon (Bedrock)
|
||||||
|
|
||||||
|
**Models**: Claude (Anthropic), Titan
|
||||||
|
|
||||||
|
**Pricing Structure**:
|
||||||
|
- Pay-per-second of compute time
|
||||||
|
- Additional charges for data transfer and storage
|
||||||
|
|
||||||
|
**Key Features**:
|
||||||
|
- Seamless integration with AWS services
|
||||||
|
- Access to multiple model providers through a single API
|
||||||
|
- Fine-tuning and customization options
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Pricing model can be less predictable for inconsistent workloads
|
||||||
|
- Strong appeal for existing AWS customers
|
||||||
|
- Potential for cost optimizations through AWS ecosystem
|
||||||
|
|
||||||
|
### Microsoft (Azure OpenAI Service)
|
||||||
|
|
||||||
|
**Models**: GPT-4, GPT-3.5 Turbo
|
||||||
|
|
||||||
|
**Pricing Structure**:
|
||||||
|
- Similar to OpenAI's pricing, but with Azure integration
|
||||||
|
- Additional costs for Azure services (e.g., storage, networking)
|
||||||
|
|
||||||
|
**Key Features**:
|
||||||
|
- Enterprise-grade security and compliance
|
||||||
|
- Integration with Azure AI services
|
||||||
|
- Access to fine-tuning and customization options
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Attractive for organizations already using Azure
|
||||||
|
- Potential for volume discounts through Microsoft Enterprise Agreements
|
||||||
|
- Additional overhead for Azure management
|
||||||
|
|
||||||
|
### Comparative Analysis
|
||||||
|
|
||||||
|
| Provider | Pricing Model | Strengths | Considerations |
|
||||||
|
|----------|---------------|-----------|----------------|
|
||||||
|
| OpenAI | Pay-per-token | - Top performance<br>- Regular updates<br>- Strong community | - Higher costs<br>- Usage limits |
|
||||||
|
| Anthropic| Pay-per-token | - Ethical focus<br>- Long context<br>- Specialized models | - Newer provider<br>- Limited integrations |
|
||||||
|
| Google | Pay-per-character | - Google Cloud integration<br>- Multi-modal<br>- Enterprise features | - Complex pricing<br>- Google ecosystem lock-in |
|
||||||
|
| Amazon | Pay-per-compute time | - AWS integration<br>- Multiple providers<br>- Customization options | - Less predictable costs<br>- AWS ecosystem focus |
|
||||||
|
| Microsoft| Pay-per-token (Azure-based) | - Enterprise security<br>- Azure integration<br>- Fine-tuning options | - Azure overhead<br>- Potential lock-in |
|
||||||
|
|
||||||
|
### Factors to Consider in Provider Selection
|
||||||
|
|
||||||
|
1. **Performance Requirements**: Assess whether you need state-of-the-art performance or if a less advanced (and potentially cheaper) model suffices.
|
||||||
|
|
||||||
|
2. **Pricing Predictability**: Consider whether your usage patterns align better with token-based or compute-time-based pricing.
|
||||||
|
|
||||||
|
3. **Integration Needs**: Evaluate how well each provider integrates with your existing technology stack.
|
||||||
|
|
||||||
|
4. **Scalability**: Assess each provider's ability to handle your expected growth in usage.
|
||||||
|
|
||||||
|
5. **Customization Options**: Determine if you need fine-tuning or specialized model development capabilities.
|
||||||
|
|
||||||
|
6. **Compliance and Security**: Consider your industry-specific regulatory requirements and each provider's security offerings.
|
||||||
|
|
||||||
|
7. **Support and Documentation**: Evaluate the quality of documentation, community support, and enterprise-level assistance.
|
||||||
|
|
||||||
|
8. **Ethical Considerations**: Assess each provider's stance on AI ethics and responsible use.
|
||||||
|
|
||||||
|
9. **Lock-In Concerns**: Consider the long-term implications of committing to a specific provider or cloud ecosystem.
|
||||||
|
|
||||||
|
10. **Multi-Provider Strategy**: Evaluate the feasibility and benefits of using multiple providers for different use cases.
|
||||||
|
|
||||||
|
By carefully comparing these providers and considering the factors most relevant to your organization, you can make an informed decision that balances cost, performance, and strategic fit. Remember that the LLM landscape is rapidly evolving, so it's important to regularly reassess your choices and stay informed about new developments and pricing changes.
|
||||||
|
|
||||||
|
## 7. Hidden Costs and Considerations
|
||||||
|
|
||||||
|
When evaluating LLM providers and calculating the total cost of ownership, it's crucial to look beyond the advertised pricing and consider the hidden costs and additional factors that can significantly impact your budget and overall implementation success. This section explores these often-overlooked aspects to help executives make more comprehensive and accurate assessments.
|
||||||
|
|
||||||
|
### 1. Data Preparation and Cleaning
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Cost of data collection and aggregation
|
||||||
|
- Expenses related to data cleaning and normalization
|
||||||
|
- Ongoing data maintenance and updates
|
||||||
|
|
||||||
|
**Impact**:
|
||||||
|
- Can be time-consuming and labor-intensive
|
||||||
|
- May require specialized tools or personnel
|
||||||
|
- Critical for model performance and accuracy
|
||||||
|
|
||||||
|
### 2. Fine-Tuning and Customization
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Costs associated with creating custom datasets
|
||||||
|
- Compute resources required for fine-tuning
|
||||||
|
- Potential need for specialized ML expertise
|
||||||
|
|
||||||
|
**Impact**:
|
||||||
|
- Can significantly improve model performance for specific tasks
|
||||||
|
- May lead to better ROI in the long run
|
||||||
|
- Increases initial implementation costs
|
||||||
|
|
||||||
|
### 3. Integration and Development
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Engineering time for API integration
|
||||||
|
- Development of custom interfaces or applications
|
||||||
|
- Ongoing maintenance and updates
|
||||||
|
|
||||||
|
**Impact**:
|
||||||
|
- Can be substantial, especially for complex integrations
|
||||||
|
- May require hiring additional developers or consultants
|
||||||
|
- Critical for seamless user experience and workflow integration
|
||||||
|
|
||||||
|
### 4. Monitoring and Optimization
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Tools and systems for performance monitoring
|
||||||
|
- Regular audits and optimizations
|
||||||
|
- Costs associated with debugging and troubleshooting
|
||||||
|
|
||||||
|
**Impact**:
|
||||||
|
- Ongoing expense that increases with scale
|
||||||
|
- Essential for maintaining efficiency and cost-effectiveness
|
||||||
|
- Can lead to significant savings through optimized usage
|
||||||
|
|
||||||
|
### 5. Compliance and Security
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Legal counsel for data privacy and AI regulations
|
||||||
|
- Implementation of security measures (e.g., encryption, access controls)
|
||||||
|
- Regular audits and certifications
|
||||||
|
|
||||||
|
**Impact**:
|
||||||
|
- Can be substantial, especially in heavily regulated industries
|
||||||
|
- Critical for risk management and maintaining customer trust
|
||||||
|
- May limit certain use cases or require additional safeguards
|
||||||
|
|
||||||
|
### 6. Training and Change Management
|
||||||
|
|
||||||
|
- Employee training programs
|
||||||
|
- Development of user guides and documentation
|
||||||
|
- Change management initiatives
|
||||||
|
|
||||||
|
**Impact**:
|
||||||
|
- Often underestimated but crucial for adoption
|
||||||
|
- Can affect productivity during the transition period
|
||||||
|
- Important for realizing the full potential of LLM integration
|
||||||
|
|
||||||
|
### 7. Scaling Costs
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Potential price increases as usage grows
|
||||||
|
- Need for additional infrastructure or resources
|
||||||
|
- Costs associated with managing increased complexity
|
||||||
|
|
||||||
|
**Impact**:
|
||||||
|
- Can lead to unexpected expenses if not properly forecasted
|
||||||
|
- May require renegotiation of contracts or switching providers
|
||||||
|
- Important to consider in long-term planning
|
||||||
|
|
||||||
|
### 8. Opportunity Costs
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Time and resources diverted from other projects
|
||||||
|
- Potential missed opportunities due to focus on LLM implementation
|
||||||
|
- Learning curve and productivity dips during adoption
|
||||||
|
|
||||||
|
**Impact**:
|
||||||
|
- Difficult to quantify but important to consider
|
||||||
|
- Can affect overall business strategy and priorities
|
||||||
|
- May influence timing and scope of LLM integration
|
||||||
|
|
||||||
|
### 9. Vendor Lock-in
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Costs associated with switching providers
|
||||||
|
- Dependency on provider-specific features or integrations
|
||||||
|
- Potential for price increases once deeply integrated
|
||||||
|
|
||||||
|
**Impact**:
|
||||||
|
- Can limit flexibility and negotiating power
|
||||||
|
- May affect long-term costs and strategic decisions
|
||||||
|
- Important to consider multi-provider or portable implementation strategies
|
||||||
|
|
||||||
|
### 10. Ethical and Reputational Considerations
|
||||||
|
|
||||||
|
**Considerations**:
|
||||||
|
- Potential backlash from AI-related controversies
|
||||||
|
- Costs of ensuring ethical AI use and transparency
|
||||||
|
- Investments in responsible AI practices
|
||||||
|
|
||||||
|
**Impact**:
|
||||||
|
- Can affect brand reputation and customer trust
|
||||||
|
- May require ongoing public relations efforts
|
||||||
|
- Important for long-term sustainability and social responsibility
|
||||||
|
|
||||||
|
By carefully considering these hidden costs and factors, executives can develop a more comprehensive understanding of the total investment required for successful LLM integration. This holistic approach allows for better budgeting, risk management, and strategic planning.
|
||||||
|
|
||||||
|
## Conclusion: Navigating the LLM Pricing Landscape
|
||||||
|
|
||||||
|
As we've explored throughout this guide, the landscape of LLM provider pricing is complex and multifaceted. From understanding the basic pricing models to calculating ROI and considering hidden costs, there are numerous factors that executives must weigh when making decisions about AI integration.
|
||||||
|
|
||||||
|
Key takeaways include:
|
||||||
|
|
||||||
|
1. The importance of aligning LLM selection with specific business needs and use cases.
|
||||||
|
2. The need for thorough ROI analysis that goes beyond simple cost calculations.
|
||||||
|
3. The value of considering both short-term implementation costs and long-term scalability.
|
||||||
|
4. The critical role of hidden costs in determining the true total cost of ownership.
|
||||||
|
5. The potential for significant business value when LLMs are strategically implemented and optimized.
|
||||||
|
|
||||||
|
As the AI landscape continues to evolve rapidly, staying informed and adaptable is crucial. What may be the best choice today could change as new models are released, pricing structures shift, and your organization's needs evolve.
|
||||||
|
|
||||||
|
To help you navigate these complexities and make the most informed decisions for your enterprise, we invite you to take the next steps in your AI journey:
|
||||||
|
|
||||||
|
1. **Book a Consultation**: Speak with our enterprise-grade LLM specialists who can provide personalized insights and recommendations tailored to your specific needs. Schedule a 15-minute call at [https://cal.com/swarms/15min](https://cal.com/swarms/15min).
|
||||||
|
|
||||||
|
2. **Join Our Community**: Connect with fellow AI executives, share experiences, and stay updated on the latest developments in the LLM space. Join our Discord community at [https://discord.gg/yxU9t9da](https://discord.gg/yxU9t9da).
|
||||||
|
|
||||||
|
By leveraging expert guidance and peer insights, you can position your organization to make the most of LLM technologies while optimizing costs and maximizing value. The future of AI in enterprise is bright, and with the right approach, your organization can be at the forefront of this transformative technology.
|
@ -0,0 +1,52 @@
|
|||||||
|
# Welcome to Swarms Docs Home
|
||||||
|
|
||||||
|
[](https://discord.gg/agora-999382051935506503) [](https://www.youtube.com/@kyegomez3242) [](https://www.linkedin.com/in/kye-g-38759a207/) [](https://x.com/kyegomezb)
|
||||||
|
|
||||||
|
|
||||||
|
**Get Started Building Production-Grade Multi-Agent Applications**
|
||||||
|
|
||||||
|
## Onboarding
|
||||||
|
|
||||||
|
| Section | Links |
|
||||||
|
|----------------------|--------------------------------------------------------------------------------------------|
|
||||||
|
| Installation | [Installation](https://docs.swarms.world/en/latest/swarms/install/install/) |
|
||||||
|
| Quickstart | [Get Started](https://docs.swarms.world/en/latest/swarms/install/quickstart/) |
|
||||||
|
| Agent Internal Mechanisms | [Agent Architecture](https://docs.swarms.world/en/latest/swarms/framework/agents_explained/) |
|
||||||
|
| Agent API | [Agent API](https://docs.swarms.world/en/latest/swarms/structs/agent/) |
|
||||||
|
| Integrating External Agents Griptape, Autogen, etc | [Integrating External APIs](https://docs.swarms.world/en/latest/swarms/agents/external_party_agents/) |
|
||||||
|
| Creating Agents from YAML | [Creating Agents from YAML](https://docs.swarms.world/en/latest/swarms/agents/create_agents_yaml/) |
|
||||||
|
| Why You Need Swarms | [Why MultiAgent Collaboration is Necessary](https://docs.swarms.world/en/latest/swarms/concept/why/) |
|
||||||
|
| Swarm Architectures Analysis | [Swarm Architectures](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) |
|
||||||
|
| Choosing the Right Swarm for Your Business Problem¶ | [CLICK HERE](https://docs.swarms.world/en/latest/swarms/concept/swarm_architectures/) |
|
||||||
|
| AgentRearrange Docs| [CLICK HERE](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) |
|
||||||
|
|
||||||
|
|
||||||
|
## Ecosystem
|
||||||
|
|
||||||
|
Here you'll find references about the Swarms framework, marketplace, community, and more to enable you to build your multi-agent applications.
|
||||||
|
|
||||||
|
| Section | Links |
|
||||||
|
|----------------------|--------------------------------------------------------------------------------------------|
|
||||||
|
| Swarms Python Framework Docs | [Framework Docs](https://docs.swarms.world/en/latest/swarms/install/install/) |
|
||||||
|
| Swarms Marketplace API Docs | [Swarms Marketplace](https://docs.swarms.world/en/latest/swarms_platform/) |
|
||||||
|
| Swarms Cloud Docs | [Swarms Cloud](https://docs.swarms.world/en/latest/swarms_cloud/main/) |
|
||||||
|
| Swarms Models | [Swarms Models](https://docs.swarms.world/en/latest/swarms/models/) |
|
||||||
|
| Swarms Memory | [Swarms Memory](https://docs.swarms.world/en/latest/swarms_memory/) |
|
||||||
|
| Swarms Corp Github Profile | [Swarms Corp GitHub](https://github.com/The-Swarm-Corporation) |
|
||||||
|
| Swarms Platform/Marketplace Frontend Github | [Swarms Platform GitHub](https://github.com/kyegomez/swarms-platform) |
|
||||||
|
|
||||||
|
|
||||||
|
## Community
|
||||||
|
| Section | Links |
|
||||||
|
|----------------------|--------------------------------------------------------------------------------------------|
|
||||||
|
| Community | [Discord](https://discord.com/servers/agora-999382051935506503) |
|
||||||
|
| Blog | [Blog](https://medium.com/@kyeg) |
|
||||||
|
| Event Calendar | [LUMA](https://lu.ma/swarms_calendar) |
|
||||||
|
| Twitter | [Twitter](https://x.com/swarms_corp) |
|
||||||
|
| Agent Marketplace | [Website](https://swarms.xyz) |
|
||||||
|
| Docs | [Website](https://docs.swarms.world) |
|
||||||
|
|
||||||
|
|
||||||
|
## Get Support
|
||||||
|
|
||||||
|
Want to get in touch with the Swarms team? Open an issue on [GitHub](https://github.com/kyegomez/swarms/issues/new) or reach out to us via [email](mailto:kye@swarms.world). We're here to help!
|
@ -0,0 +1,187 @@
|
|||||||
|
```markdown
|
||||||
|
# Swarm Alpha: Data Cruncher
|
||||||
|
**Overview**: Processes large datasets.
|
||||||
|
**Strengths**: Efficient data handling.
|
||||||
|
**Weaknesses**: Requires structured data.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```sql
|
||||||
|
FOR each data_entry IN dataset:
|
||||||
|
result = PROCESS(data_entry)
|
||||||
|
STORE(result)
|
||||||
|
END FOR
|
||||||
|
RETURN aggregated_results
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Beta: Artistic Ally
|
||||||
|
**Overview**: Generates art pieces.
|
||||||
|
**Strengths**: Creativity.
|
||||||
|
**Weaknesses**: Somewhat unpredictable.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```scss
|
||||||
|
INITIATE canvas_parameters
|
||||||
|
SELECT art_style
|
||||||
|
DRAW(canvas_parameters, art_style)
|
||||||
|
RETURN finished_artwork
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Gamma: Sound Sculptor
|
||||||
|
**Overview**: Crafts audio sequences.
|
||||||
|
**Strengths**: Diverse audio outputs.
|
||||||
|
**Weaknesses**: Complexity in refining outputs.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```sql
|
||||||
|
DEFINE sound_parameters
|
||||||
|
SELECT audio_style
|
||||||
|
GENERATE_AUDIO(sound_parameters, audio_style)
|
||||||
|
RETURN audio_sequence
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Delta: Web Weaver
|
||||||
|
**Overview**: Constructs web designs.
|
||||||
|
**Strengths**: Modern design sensibility.
|
||||||
|
**Weaknesses**: Limited to web interfaces.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```scss
|
||||||
|
SELECT template
|
||||||
|
APPLY user_preferences(template)
|
||||||
|
DESIGN_web(template, user_preferences)
|
||||||
|
RETURN web_design
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Epsilon: Code Compiler
|
||||||
|
**Overview**: Writes and compiles code snippets.
|
||||||
|
**Strengths**: Quick code generation.
|
||||||
|
**Weaknesses**: Limited to certain programming languages.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```scss
|
||||||
|
DEFINE coding_task
|
||||||
|
WRITE_CODE(coding_task)
|
||||||
|
COMPILE(code)
|
||||||
|
RETURN executable
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Zeta: Security Shield
|
||||||
|
**Overview**: Detects system vulnerabilities.
|
||||||
|
**Strengths**: High threat detection rate.
|
||||||
|
**Weaknesses**: Potential false positives.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```sql
|
||||||
|
MONITOR system_activity
|
||||||
|
IF suspicious_activity_detected:
|
||||||
|
ANALYZE threat_level
|
||||||
|
INITIATE mitigation_protocol
|
||||||
|
END IF
|
||||||
|
RETURN system_status
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Eta: Researcher Relay
|
||||||
|
**Overview**: Gathers and synthesizes research data.
|
||||||
|
**Strengths**: Access to vast databases.
|
||||||
|
**Weaknesses**: Depth of research can vary.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```sql
|
||||||
|
DEFINE research_topic
|
||||||
|
SEARCH research_sources(research_topic)
|
||||||
|
SYNTHESIZE findings
|
||||||
|
RETURN research_summary
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# Swarm Theta: Sentiment Scanner
|
||||||
|
**Overview**: Analyzes text for sentiment and emotional tone.
|
||||||
|
**Strengths**: Accurate sentiment detection.
|
||||||
|
**Weaknesses**: Contextual nuances might be missed.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```arduino
|
||||||
|
INPUT text_data
|
||||||
|
ANALYZE text_data FOR emotional_tone
|
||||||
|
DETERMINE sentiment_value
|
||||||
|
RETURN sentiment_value
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Iota: Image Interpreter
|
||||||
|
**Overview**: Processes and categorizes images.
|
||||||
|
**Strengths**: High image recognition accuracy.
|
||||||
|
**Weaknesses**: Can struggle with abstract visuals.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```objective-c
|
||||||
|
LOAD image_data
|
||||||
|
PROCESS image_data FOR features
|
||||||
|
CATEGORIZE image_based_on_features
|
||||||
|
RETURN image_category
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Kappa: Language Learner
|
||||||
|
**Overview**: Translates and interprets multiple languages.
|
||||||
|
**Strengths**: Supports multiple languages.
|
||||||
|
**Weaknesses**: Nuances in dialects might pose challenges.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```vbnet
|
||||||
|
RECEIVE input_text, target_language
|
||||||
|
TRANSLATE input_text TO target_language
|
||||||
|
RETURN translated_text
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Lambda: Trend Tracker
|
||||||
|
**Overview**: Monitors and predicts trends based on data.
|
||||||
|
**Strengths**: Proactive trend identification.
|
||||||
|
**Weaknesses**: Requires continuous data stream.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```sql
|
||||||
|
COLLECT data_over_time
|
||||||
|
ANALYZE data_trends
|
||||||
|
PREDICT upcoming_trends
|
||||||
|
RETURN trend_forecast
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Mu: Financial Forecaster
|
||||||
|
**Overview**: Analyzes financial data to predict market movements.
|
||||||
|
**Strengths**: In-depth financial analytics.
|
||||||
|
**Weaknesses**: Market volatility can affect predictions.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```sql
|
||||||
|
GATHER financial_data
|
||||||
|
COMPUTE statistical_analysis
|
||||||
|
FORECAST market_movements
|
||||||
|
RETURN financial_projections
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Nu: Network Navigator
|
||||||
|
**Overview**: Optimizes and manages network traffic.
|
||||||
|
**Strengths**: Efficient traffic management.
|
||||||
|
**Weaknesses**: Depends on network infrastructure.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```sql
|
||||||
|
MONITOR network_traffic
|
||||||
|
IDENTIFY congestion_points
|
||||||
|
OPTIMIZE traffic_flow
|
||||||
|
RETURN network_status
|
||||||
|
```
|
||||||
|
|
||||||
|
# Swarm Xi: Content Curator
|
||||||
|
**Overview**: Gathers and presents content based on user preferences.
|
||||||
|
**Strengths**: Personalized content delivery.
|
||||||
|
**Weaknesses**: Limited by available content sources.
|
||||||
|
|
||||||
|
**Pseudo Code**:
|
||||||
|
```sql
|
||||||
|
DEFINE user_preferences
|
||||||
|
SEARCH content_sources
|
||||||
|
FILTER content_matching_preferences
|
||||||
|
DISPLAY curated_content
|
||||||
|
```
|
||||||
|
|
@ -0,0 +1,50 @@
|
|||||||
|
# Swarms Multi-Agent Permissions System (SMAPS)
|
||||||
|
|
||||||
|
## Description
|
||||||
|
SMAPS is a robust permissions management system designed to integrate seamlessly with Swarm's multi-agent AI framework. Drawing inspiration from Amazon's IAM, SMAPS ensures secure, granular control over agent actions while allowing for collaborative human-in-the-loop interventions.
|
||||||
|
|
||||||
|
## Technical Specification
|
||||||
|
|
||||||
|
### 1. Components
|
||||||
|
|
||||||
|
- **User Management**: Handle user registrations, roles, and profiles.
|
||||||
|
- **Agent Management**: Register, monitor, and manage AI agents.
|
||||||
|
- **Permissions Engine**: Define and enforce permissions based on roles.
|
||||||
|
- **Multiplayer Interface**: Allows multiple human users to intervene, guide, or collaborate on tasks being executed by AI agents.
|
||||||
|
|
||||||
|
### 2. Features
|
||||||
|
|
||||||
|
- **Role-Based Access Control (RBAC)**:
|
||||||
|
- Users can be assigned predefined roles (e.g., Admin, Agent Supervisor, Collaborator).
|
||||||
|
- Each role has specific permissions associated with it, defining what actions can be performed on AI agents or tasks.
|
||||||
|
|
||||||
|
- **Dynamic Permissions**:
|
||||||
|
- Create custom roles with specific permissions.
|
||||||
|
- Permissions granularity: From broad (e.g., view all tasks) to specific (e.g., modify parameters of a particular agent).
|
||||||
|
|
||||||
|
- **Multiplayer Collaboration**:
|
||||||
|
- Multiple users can join a task in real-time.
|
||||||
|
- Collaborators can provide real-time feedback or guidance to AI agents.
|
||||||
|
- A voting system for decision-making when human intervention is required.
|
||||||
|
|
||||||
|
- **Agent Supervision**:
|
||||||
|
- Monitor agent actions in real-time.
|
||||||
|
- Intervene, if necessary, to guide agent actions based on permissions.
|
||||||
|
|
||||||
|
- **Audit Trail**:
|
||||||
|
- All actions, whether performed by humans or AI agents, are logged.
|
||||||
|
- Review historical actions, decisions, and interventions for accountability and improvement.
|
||||||
|
|
||||||
|
### 3. Security
|
||||||
|
|
||||||
|
- **Authentication**: Secure login mechanisms with multi-factor authentication options.
|
||||||
|
- **Authorization**: Ensure users and agents can only perform actions they are permitted to.
|
||||||
|
- **Data Encryption**: All data, whether at rest or in transit, is encrypted using industry-standard protocols.
|
||||||
|
|
||||||
|
### 4. Integration
|
||||||
|
|
||||||
|
- **APIs**: Expose APIs for integrating SMAPS with other systems or for extending its capabilities.
|
||||||
|
- **SDK**: Provide software development kits for popular programming languages to facilitate integration and extension.
|
||||||
|
|
||||||
|
## Documentation Description
|
||||||
|
Swarms Multi-Agent Permissions System (SMAPS) offers a sophisticated permissions management mechanism tailored for multi-agent AI frameworks. It combines the robustness of Amazon IAM-like permissions with a unique "multiplayer" feature, allowing multiple humans to collaboratively guide AI agents in real-time. This ensures not only that tasks are executed efficiently but also that they uphold the highest standards of accuracy and ethics. With SMAPS, businesses can harness the power of swarms with confidence, knowing that they have full control and transparency over their AI operations.
|
@ -0,0 +1,73 @@
|
|||||||
|
# AgentArchive Documentation
|
||||||
|
## Swarms Multi-Agent Framework
|
||||||
|
|
||||||
|
**AgentArchive is an advanced feature crafted to archive, bookmark, and harness the transcripts of agent runs. It promotes the storing and leveraging of successful agent interactions, offering a powerful means for users to derive "recipes" for future agents. Furthermore, with its public archive feature, users can contribute to and benefit from the collective wisdom of the community.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview:
|
||||||
|
|
||||||
|
AgentArchive empowers users to:
|
||||||
|
1. Preserve complete transcripts of agent instances.
|
||||||
|
2. Bookmark and annotate significant runs.
|
||||||
|
3. Categorize runs using various tags.
|
||||||
|
4. Transform successful runs into actionable "recipes".
|
||||||
|
5. Publish and access a shared knowledge base via a public archive.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Features:
|
||||||
|
|
||||||
|
### 1. Archiving:
|
||||||
|
|
||||||
|
- **Save Transcripts**: Retain the full narrative of an agent's interaction and choices.
|
||||||
|
- **Searchable Database**: Dive into archives using specific keywords, timestamps, or tags.
|
||||||
|
|
||||||
|
### 2. Bookmarking:
|
||||||
|
|
||||||
|
- **Highlight Essential Runs**: Designate specific agent runs for future reference.
|
||||||
|
- **Annotations**: Embed notes or remarks to bookmarked runs for clearer understanding.
|
||||||
|
|
||||||
|
### 3. Tagging:
|
||||||
|
|
||||||
|
Organize and classify agent runs via:
|
||||||
|
- **Prompt**: The originating instruction that triggered the agent run.
|
||||||
|
- **Tasks**: Distinct tasks or operations executed by the agent.
|
||||||
|
- **Model**: The specific AI model or iteration used during the interaction.
|
||||||
|
- **Temperature (Temp)**: The set randomness or innovation level for the agent.
|
||||||
|
|
||||||
|
### 4. Recipe Generation:
|
||||||
|
|
||||||
|
- **Standardization**: Convert successful run transcripts into replicable "recipes".
|
||||||
|
- **Guidance**: Offer subsequent agents a structured approach, rooted in prior successes.
|
||||||
|
- **Evolution**: Periodically refine recipes based on newer, enhanced runs.
|
||||||
|
|
||||||
|
### 5. Public Archive & Sharing:
|
||||||
|
|
||||||
|
- **Publish Successful Runs**: Users can choose to share their successful agent runs.
|
||||||
|
- **Collaborative Knowledge Base**: Access a shared repository of successful agent interactions from the community.
|
||||||
|
- **Ratings & Reviews**: Users can rate and review shared runs, highlighting particularly effective "recipes."
|
||||||
|
- **Privacy & Redaction**: Ensure that any sensitive information is automatically redacted before publishing.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Benefits:
|
||||||
|
|
||||||
|
1. **Efficiency**: Revisit past agent activities to inform and guide future decisions.
|
||||||
|
2. **Consistency**: Guarantee a uniform approach to recurring challenges, leading to predictable and trustworthy outcomes.
|
||||||
|
3. **Collaborative Learning**: Tap into a reservoir of shared experiences, fostering community-driven learning and growth.
|
||||||
|
4. **Transparency**: By sharing successful runs, users can build trust and contribute to the broader community's success.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Usage:
|
||||||
|
|
||||||
|
1. **Access AgentArchive**: Navigate to the dedicated section within the Swarms Multi-Agent Framework dashboard.
|
||||||
|
2. **Search, Filter & Organize**: Utilize the search bar and tagging system for precise retrieval.
|
||||||
|
3. **Bookmark, Annotate & Share**: Pin important runs, add notes, and consider sharing with the broader community.
|
||||||
|
4. **Engage with Public Archive**: Explore, rate, and apply shared knowledge to enhance agent performance.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
With AgentArchive, users not only benefit from their past interactions but can also leverage the collective expertise of the Swarms community, ensuring continuous improvement and shared success.
|
||||||
|
|
@ -0,0 +1,67 @@
|
|||||||
|
# Swarms Multi-Agent Framework Documentation
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
- Agent Failure Protocol
|
||||||
|
- Swarm Failure Protocol
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Agent Failure Protocol
|
||||||
|
|
||||||
|
### 1. Overview
|
||||||
|
Agent failures may arise from bugs, unexpected inputs, or external system changes. This protocol aims to diagnose, address, and prevent such failures.
|
||||||
|
|
||||||
|
### 2. Root Cause Analysis
|
||||||
|
- **Data Collection**: Record the task, inputs, and environmental variables present during the failure.
|
||||||
|
- **Diagnostic Tests**: Run the agent in a controlled environment replicating the failure scenario.
|
||||||
|
- **Error Logging**: Analyze error logs to identify patterns or anomalies.
|
||||||
|
|
||||||
|
### 3. Solution Brainstorming
|
||||||
|
- **Code Review**: Examine the code sections linked to the failure for bugs or inefficiencies.
|
||||||
|
- **External Dependencies**: Check if external systems or data sources have changed.
|
||||||
|
- **Algorithmic Analysis**: Evaluate if the agent's algorithms were overwhelmed or faced an unhandled scenario.
|
||||||
|
|
||||||
|
### 4. Risk Analysis & Solution Ranking
|
||||||
|
- Assess the potential risks associated with each solution.
|
||||||
|
- Rank solutions based on:
|
||||||
|
- Implementation complexity
|
||||||
|
- Potential negative side effects
|
||||||
|
- Resource requirements
|
||||||
|
- Assign a success probability score (0.0 to 1.0) based on the above factors.
|
||||||
|
|
||||||
|
### 5. Solution Implementation
|
||||||
|
- Implement the top 3 solutions sequentially, starting with the highest success probability.
|
||||||
|
- If all three solutions fail, trigger the "Human-in-the-Loop" protocol.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Swarm Failure Protocol
|
||||||
|
|
||||||
|
### 1. Overview
|
||||||
|
Swarm failures are more complex, often resulting from inter-agent conflicts, systemic bugs, or large-scale environmental changes. This protocol delves deep into such failures to ensure the swarm operates optimally.
|
||||||
|
|
||||||
|
### 2. Root Cause Analysis
|
||||||
|
- **Inter-Agent Analysis**: Examine if agents were in conflict or if there was a breakdown in collaboration.
|
||||||
|
- **System Health Checks**: Ensure all system components supporting the swarm are operational.
|
||||||
|
- **Environment Analysis**: Investigate if external factors or systems impacted the swarm's operation.
|
||||||
|
|
||||||
|
### 3. Solution Brainstorming
|
||||||
|
- **Collaboration Protocols**: Review and refine how agents collaborate.
|
||||||
|
- **Resource Allocation**: Check if the swarm had adequate computational and memory resources.
|
||||||
|
- **Feedback Loops**: Ensure agents are effectively learning from each other.
|
||||||
|
|
||||||
|
### 4. Risk Analysis & Solution Ranking
|
||||||
|
- Assess the potential systemic risks posed by each solution.
|
||||||
|
- Rank solutions considering:
|
||||||
|
- Scalability implications
|
||||||
|
- Impact on individual agents
|
||||||
|
- Overall swarm performance potential
|
||||||
|
- Assign a success probability score (0.0 to 1.0) based on the above considerations.
|
||||||
|
|
||||||
|
### 5. Solution Implementation
|
||||||
|
- Implement the top 3 solutions sequentially, prioritizing the one with the highest success probability.
|
||||||
|
- If all three solutions are unsuccessful, invoke the "Human-in-the-Loop" protocol for expert intervention.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
By following these protocols, the Swarms Multi-Agent Framework can systematically address and prevent failures, ensuring a high degree of reliability and efficiency.
|
@ -0,0 +1,49 @@
|
|||||||
|
# Human-in-the-Loop Task Handling Protocol
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Swarms Multi-Agent Framework recognizes the invaluable contributions humans can make, especially in complex scenarios where nuanced judgment is required. The "Human-in-the-Loop Task Handling Protocol" ensures that when agents encounter challenges they cannot handle autonomously, the most capable human collaborator is engaged to provide guidance, based on their skills and expertise.
|
||||||
|
|
||||||
|
## Protocol Steps
|
||||||
|
|
||||||
|
### 1. Task Initiation & Analysis
|
||||||
|
|
||||||
|
- When a task is initiated, agents first analyze the task's requirements.
|
||||||
|
- The system maintains an understanding of each task's complexity, requirements, and potential challenges.
|
||||||
|
|
||||||
|
### 2. Automated Resolution Attempt
|
||||||
|
|
||||||
|
- Agents first attempt to resolve the task autonomously using their algorithms and data.
|
||||||
|
- If the task can be completed without issues, it progresses normally.
|
||||||
|
|
||||||
|
### 3. Challenge Detection
|
||||||
|
|
||||||
|
- If agents encounter challenges or uncertainties they cannot resolve, the "Human-in-the-Loop" protocol is triggered.
|
||||||
|
|
||||||
|
### 4. Human Collaborator Identification
|
||||||
|
|
||||||
|
- The system maintains a dynamic profile of each human collaborator, cataloging their skills, expertise, and past performance on related tasks.
|
||||||
|
- Using this profile data, the system identifies the most capable human collaborator to assist with the current challenge.
|
||||||
|
|
||||||
|
### 5. Real-time Collaboration
|
||||||
|
|
||||||
|
- The identified human collaborator is notified and provided with all the relevant information about the task and the challenge.
|
||||||
|
- Collaborators can provide guidance, make decisions, or even take over specific portions of the task.
|
||||||
|
|
||||||
|
### 6. Task Completion & Feedback Loop
|
||||||
|
|
||||||
|
- Once the challenge is resolved, agents continue with the task until completion.
|
||||||
|
- Feedback from human collaborators is used to update agent algorithms, ensuring continuous learning and improvement.
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Maintain Up-to-date Human Profiles**: Ensure that the skillsets, expertise, and performance metrics of human collaborators are updated regularly.
|
||||||
|
2. **Limit Interruptions**: Implement mechanisms to limit the frequency of human interventions, ensuring collaborators are not overwhelmed with requests.
|
||||||
|
3. **Provide Context**: When seeking human intervention, provide collaborators with comprehensive context to ensure they can make informed decisions.
|
||||||
|
4. **Continuous Training**: Regularly update and train agents based on feedback from human collaborators.
|
||||||
|
5. **Measure & Optimize**: Monitor the efficiency of the "Human-in-the-Loop" protocol, aiming to reduce the frequency of interventions while maximizing the value of each intervention.
|
||||||
|
6. **Skill Enhancement**: Encourage human collaborators to continuously enhance their skills, ensuring that the collective expertise of the group grows over time.
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
The integration of human expertise with AI capabilities is a cornerstone of the Swarms Multi-Agent Framework. This "Human-in-the-Loop Task Handling Protocol" ensures that tasks are executed efficiently, leveraging the best of both human judgment and AI automation. Through collaborative synergy, we can tackle challenges more effectively and drive innovation.
|
@ -0,0 +1,48 @@
|
|||||||
|
# Secure Communication Protocols
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Swarms Multi-Agent Framework prioritizes the security and integrity of data, especially personal and sensitive information. Our Secure Communication Protocols ensure that all communications between agents are encrypted, authenticated, and resistant to tampering or unauthorized access.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### 1. End-to-End Encryption
|
||||||
|
|
||||||
|
- All inter-agent communications are encrypted using state-of-the-art cryptographic algorithms.
|
||||||
|
- This ensures that data remains confidential and can only be read by the intended recipient agent.
|
||||||
|
|
||||||
|
### 2. Authentication
|
||||||
|
|
||||||
|
- Before initiating communication, agents authenticate each other using digital certificates.
|
||||||
|
- This prevents impersonation attacks and ensures that agents are communicating with legitimate counterparts.
|
||||||
|
|
||||||
|
### 3. Forward Secrecy
|
||||||
|
|
||||||
|
- Key exchange mechanisms employ forward secrecy, meaning that even if a malicious actor gains access to an encryption key, they cannot decrypt past communications.
|
||||||
|
|
||||||
|
### 4. Data Integrity
|
||||||
|
|
||||||
|
- Cryptographic hashes ensure that the data has not been altered in transit.
|
||||||
|
- Any discrepancies in data integrity result in the communication being rejected.
|
||||||
|
|
||||||
|
### 5. Zero-Knowledge Protocols
|
||||||
|
|
||||||
|
- When handling especially sensitive data, agents use zero-knowledge proofs to validate information without revealing the actual data.
|
||||||
|
|
||||||
|
### 6. Periodic Key Rotation
|
||||||
|
|
||||||
|
- To mitigate the risk of long-term key exposure, encryption keys are periodically rotated.
|
||||||
|
- Old keys are securely discarded, ensuring that even if they are compromised, they cannot be used to decrypt communications.
|
||||||
|
|
||||||
|
## Best Practices for Handling Personal and Sensitive Information
|
||||||
|
|
||||||
|
1. **Data Minimization**: Agents should only request and process the minimum amount of personal data necessary for the task.
|
||||||
|
2. **Anonymization**: Whenever possible, agents should anonymize personal data, stripping away identifying details.
|
||||||
|
3. **Data Retention Policies**: Personal data should be retained only for the period necessary to complete the task, after which it should be securely deleted.
|
||||||
|
4. **Access Controls**: Ensure that only authorized agents have access to personal and sensitive information. Implement strict access control mechanisms.
|
||||||
|
5. **Regular Audits**: Conduct regular security audits to ensure compliance with privacy regulations and to detect any potential vulnerabilities.
|
||||||
|
6. **Training**: All agents should be regularly updated and trained on the latest security protocols and best practices for handling sensitive data.
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
Secure communication is paramount in the Swarms Multi-Agent Framework, especially when dealing with personal and sensitive information. Adhering to these protocols and best practices ensures the safety, privacy, and trust of all stakeholders involved.
|
@ -0,0 +1,68 @@
|
|||||||
|
# Promptimizer Documentation
|
||||||
|
## Swarms Multi-Agent Framework
|
||||||
|
|
||||||
|
**The Promptimizer Tool stands as a cornerstone innovation within the Swarms Multi-Agent Framework, meticulously engineered to refine and supercharge prompts across diverse categories. Capitalizing on extensive libraries of best-practice prompting techniques, this tool ensures your prompts are razor-sharp, tailored, and primed for optimal outcomes.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview:
|
||||||
|
|
||||||
|
The Promptimizer Tool is crafted to:
|
||||||
|
1. Rigorously analyze and elevate the quality of provided prompts.
|
||||||
|
2. Furnish best-in-class recommendations rooted in proven prompting strategies.
|
||||||
|
3. Serve a spectrum of categories, from technical operations to expansive creative ventures.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Core Features:
|
||||||
|
|
||||||
|
### 1. Deep Prompt Analysis:
|
||||||
|
|
||||||
|
- **Clarity Matrix**: A proprietary algorithm assessing prompt clarity, removing ambiguities and sharpening focus.
|
||||||
|
- **Efficiency Gauge**: Evaluates the prompt's structure to ensure swift and precise desired results.
|
||||||
|
|
||||||
|
### 2. Adaptive Recommendations:
|
||||||
|
|
||||||
|
- **Technique Engine**: Suggests techniques aligned with the gold standard for the chosen category.
|
||||||
|
- **Exemplar Database**: Offers an extensive array of high-quality prompt examples for comparison and inspiration.
|
||||||
|
|
||||||
|
### 3. Versatile Category Framework:
|
||||||
|
|
||||||
|
- **Tech Suite**: Optimizes prompts for technical tasks, ensuring actionable clarity.
|
||||||
|
- **Narrative Craft**: Hones prompts to elicit vivid and coherent stories.
|
||||||
|
- **Visual Visionary**: Shapes prompts for precise and dynamic visual generation.
|
||||||
|
- **Sonic Sculptor**: Orchestrates prompts for audio creation, tuning into desired tones and moods.
|
||||||
|
|
||||||
|
### 4. Machine Learning Integration:
|
||||||
|
|
||||||
|
- **Feedback Dynamo**: Harnesses user feedback, continually refining the tool's recommendation capabilities.
|
||||||
|
- **Live Library Updates**: Periodic syncing with the latest in prompting techniques, ensuring the tool remains at the cutting edge.
|
||||||
|
|
||||||
|
### 5. Collaboration & Sharing:
|
||||||
|
|
||||||
|
- **TeamSync**: Allows teams to collaborate on prompt optimization in real-time.
|
||||||
|
- **ShareSpace**: Share and access a community-driven repository of optimized prompts, fostering collective growth.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Benefits:
|
||||||
|
|
||||||
|
1. **Precision Engineering**: Harness the power of refined prompts, ensuring desired outcomes are achieved with surgical precision.
|
||||||
|
2. **Learning Hub**: Immerse in a tool that not only refines but educates, enhancing the user's prompting acumen.
|
||||||
|
3. **Versatile Mastery**: Navigate seamlessly across categories, ensuring top-tier prompt quality regardless of the domain.
|
||||||
|
4. **Community-driven Excellence**: Dive into a world of shared knowledge, elevating the collective expertise of the Swarms community.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Usage Workflow:
|
||||||
|
|
||||||
|
1. **Launch the Prompt Optimizer**: Access the tool directly from the Swarms Multi-Agent Framework dashboard.
|
||||||
|
2. **Prompt Entry**: Input the initial prompt for refinement.
|
||||||
|
3. **Category Selection**: Pinpoint the desired category for specialized optimization.
|
||||||
|
4. **Receive & Review**: Engage with the tool's recommendations, comparing original and optimized prompts.
|
||||||
|
5. **Collaborate, Implement & Share**: Work in tandem with team members, deploy the refined prompt, and consider contributing to the community repository.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
By integrating the Promptimizer Tool into their workflow, Swarms users stand poised to redefine the boundaries of what's possible, turning each prompt into a beacon of excellence and efficiency.
|
||||||
|
|
@ -0,0 +1,68 @@
|
|||||||
|
# Shorthand Communication System
|
||||||
|
## Swarms Multi-Agent Framework
|
||||||
|
|
||||||
|
**The Enhanced Shorthand Communication System is designed to streamline agent-agent communication within the Swarms Multi-Agent Framework. This system employs concise alphanumeric notations to relay task-specific details to agents efficiently.**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Format:
|
||||||
|
|
||||||
|
The shorthand format is structured as `[AgentType]-[TaskLayer].[TaskNumber]-[Priority]-[Status]`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Components:
|
||||||
|
|
||||||
|
### 1. Agent Type:
|
||||||
|
- Denotes the specific agent role, such as:
|
||||||
|
* `C`: Code agent
|
||||||
|
* `D`: Data processing agent
|
||||||
|
* `M`: Monitoring agent
|
||||||
|
* `N`: Network agent
|
||||||
|
* `R`: Resource management agent
|
||||||
|
* `I`: Interface agent
|
||||||
|
* `S`: Security agent
|
||||||
|
|
||||||
|
### 2. Task Layer & Number:
|
||||||
|
- Represents the task's category.
|
||||||
|
* Example: `1.8` signifies Task layer 1, task number 8.
|
||||||
|
|
||||||
|
### 3. Priority:
|
||||||
|
- Indicates task urgency.
|
||||||
|
* `H`: High
|
||||||
|
* `M`: Medium
|
||||||
|
* `L`: Low
|
||||||
|
|
||||||
|
### 4. Status:
|
||||||
|
- Gives a snapshot of the task's progress.
|
||||||
|
* `I`: Initialized
|
||||||
|
* `P`: In-progress
|
||||||
|
* `C`: Completed
|
||||||
|
* `F`: Failed
|
||||||
|
* `W`: Waiting
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Extended Features:
|
||||||
|
|
||||||
|
### 1. Error Codes (for failures):
|
||||||
|
- `E01`: Resource issues
|
||||||
|
- `E02`: Data inconsistency
|
||||||
|
- `E03`: Dependency malfunction
|
||||||
|
... and more as needed.
|
||||||
|
|
||||||
|
### 2. Collaboration Flag:
|
||||||
|
- `+`: Denotes required collaboration.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Example Codes:
|
||||||
|
|
||||||
|
- `C-1.8-H-I`: A high-priority coding task that's initializing.
|
||||||
|
- `D-2.3-M-P`: A medium-priority data task currently in-progress.
|
||||||
|
- `M-3.5-L-P+`: A low-priority monitoring task in progress needing collaboration.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
By leveraging the Enhanced Shorthand Communication System, the Swarms Multi-Agent Framework can ensure swift interactions, concise communications, and effective task management.
|
||||||
|
|
@ -0,0 +1,282 @@
|
|||||||
|
docs_dir: '.' # replace with the correct path if your documentation files are not in the same directory as mkdocs.yml
|
||||||
|
site_name: Swarms
|
||||||
|
site_url: https://docs.swarms.world
|
||||||
|
site_author: Swarms
|
||||||
|
site_description: The Enterprise-Grade Production-Ready Multi-Agent Orchestration Framework
|
||||||
|
repo_name: kyegomez/swarms
|
||||||
|
repo_url: https://github.com/kyegomez/swarms
|
||||||
|
edit_uri: https://github.com/kyegomez/swarms/tree/main/docs
|
||||||
|
copyright: TGSC Corp 2024. All rights reserved.
|
||||||
|
|
||||||
|
plugins:
|
||||||
|
# - glightbox
|
||||||
|
- search
|
||||||
|
- git-authors
|
||||||
|
- mkdocs-jupyter:
|
||||||
|
kernel_name: python3
|
||||||
|
execute: false
|
||||||
|
include_source: True
|
||||||
|
include_requirejs: true
|
||||||
|
- mkdocstrings:
|
||||||
|
default_handler: python
|
||||||
|
handlers:
|
||||||
|
python:
|
||||||
|
options:
|
||||||
|
parameter_headings: true
|
||||||
|
paths: [supervision]
|
||||||
|
load_external_modules: true
|
||||||
|
allow_inspection: true
|
||||||
|
show_bases: true
|
||||||
|
group_by_category: true
|
||||||
|
docstring_style: google
|
||||||
|
show_symbol_type_heading: true
|
||||||
|
show_symbol_type_toc: true
|
||||||
|
show_category_heading: true
|
||||||
|
domains: [std, py]
|
||||||
|
- git-committers:
|
||||||
|
repository: kyegomez/swarms
|
||||||
|
branch: master
|
||||||
|
# token: !ENV ["GITHUB_TOKEN"]
|
||||||
|
- git-revision-date-localized:
|
||||||
|
enable_creation_date: true
|
||||||
|
extra_css:
|
||||||
|
- assets/css/extra.css
|
||||||
|
extra:
|
||||||
|
social:
|
||||||
|
- icon: fontawesome/brands/twitter
|
||||||
|
link: https://x.com/KyeGomezB
|
||||||
|
- icon: fontawesome/brands/github
|
||||||
|
link: https://github.com/kyegomez/swarms
|
||||||
|
- icon: fontawesome/brands/twitter
|
||||||
|
link: https://x.com/swarms_corp
|
||||||
|
- icon: fontawesome/brands/discord
|
||||||
|
link: https://discord.com/servers/agora-999382051935506503
|
||||||
|
|
||||||
|
analytics:
|
||||||
|
provider: google
|
||||||
|
property: G-MPE9C65596
|
||||||
|
|
||||||
|
theme:
|
||||||
|
name: material
|
||||||
|
custom_dir: overrides
|
||||||
|
logo: assets/img/swarms-logo.png
|
||||||
|
palette:
|
||||||
|
- scheme: default
|
||||||
|
primary: white # White background
|
||||||
|
accent: white # Black accents for interactive elements
|
||||||
|
toggle:
|
||||||
|
icon: material/brightness-7
|
||||||
|
name: Switch to dark mode
|
||||||
|
- scheme: slate # Optional: lighter shades for accessibility
|
||||||
|
primary: black
|
||||||
|
accent: black
|
||||||
|
toggle:
|
||||||
|
icon: material/brightness-4
|
||||||
|
name: Switch to light mode
|
||||||
|
features:
|
||||||
|
- content.code.copy
|
||||||
|
- content.code.annotate
|
||||||
|
- navigation.tabs
|
||||||
|
- navigation.sections
|
||||||
|
- navigation.expand
|
||||||
|
- navigation.top
|
||||||
|
- announce.dismiss
|
||||||
|
font:
|
||||||
|
text: "Fira Sans" # Clean and readable text
|
||||||
|
code: "Fira Code" # Modern look for code snippets
|
||||||
|
|
||||||
|
|
||||||
|
# Extensions
|
||||||
|
markdown_extensions:
|
||||||
|
- abbr
|
||||||
|
- admonition
|
||||||
|
- attr_list
|
||||||
|
- def_list
|
||||||
|
- footnotes
|
||||||
|
- md_in_html
|
||||||
|
- toc:
|
||||||
|
permalink: true
|
||||||
|
- pymdownx.arithmatex:
|
||||||
|
generic: true
|
||||||
|
- pymdownx.betterem:
|
||||||
|
smart_enable: all
|
||||||
|
- pymdownx.caret
|
||||||
|
- pymdownx.details
|
||||||
|
- pymdownx.emoji:
|
||||||
|
emoji_generator: !!python/name:material.extensions.emoji.to_svg
|
||||||
|
emoji_index: !!python/name:material.extensions.emoji.twemoji
|
||||||
|
- pymdownx.highlight:
|
||||||
|
anchor_linenums: true
|
||||||
|
line_spans: __span
|
||||||
|
pygments_lang_class: true
|
||||||
|
- pymdownx.inlinehilite
|
||||||
|
- pymdownx.keys
|
||||||
|
- pymdownx.magiclink:
|
||||||
|
normalize_issue_symbols: true
|
||||||
|
repo_url_shorthand: true
|
||||||
|
user: squidfunk
|
||||||
|
repo: mkdocs-material
|
||||||
|
- pymdownx.mark
|
||||||
|
- pymdownx.smartsymbols
|
||||||
|
- pymdownx.snippets:
|
||||||
|
auto_append:
|
||||||
|
- includes/mkdocs.md
|
||||||
|
- pymdownx.superfences:
|
||||||
|
custom_fences:
|
||||||
|
- name: mermaid
|
||||||
|
class: mermaid
|
||||||
|
format: !!python/name:pymdownx.superfences.fence_code_format
|
||||||
|
- pymdownx.tabbed:
|
||||||
|
alternate_style: true
|
||||||
|
combine_header_slug: true
|
||||||
|
slugify: !!python/object/apply:pymdownx.slugs.slugify
|
||||||
|
kwds:
|
||||||
|
case: lower
|
||||||
|
- pymdownx.tasklist:
|
||||||
|
custom_checkbox: true
|
||||||
|
- pymdownx.tilde
|
||||||
|
nav:
|
||||||
|
- Home:
|
||||||
|
- Overview: "index.md"
|
||||||
|
# - The Vision: "swarms/framework/vision.md"
|
||||||
|
# - Docker Setup: "swarms/install/docker_setup.md"
|
||||||
|
- Our Goal; The Ultimate Multi-Agent LLM Framework for Developers: "swarms/concept/vision.md"
|
||||||
|
- Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md"
|
||||||
|
- Onboarding:
|
||||||
|
- Installation: "swarms/install/install.md"
|
||||||
|
- Environment Configuration: "swarms/install/workspace_manager.md"
|
||||||
|
- Quickstart: "swarms/install/quickstart.md"
|
||||||
|
- Swarms CLI: "swarms/cli/main.md"
|
||||||
|
# - Swarms + Docker:
|
||||||
|
- Swarms Framework Architecture: "swarms/concept/framework_architecture.md"
|
||||||
|
# - Prelimary:
|
||||||
|
# - 80/20 Rule For Agents: "swarms/prompting/8020.md"
|
||||||
|
- Managing Prompts in Production: "swarms/prompts/main.md"
|
||||||
|
- Agents:
|
||||||
|
# - Overview: "swarms/structs/index.md"
|
||||||
|
# - Build Custom Agents: "swarms/structs/diy_your_own_agent.md"
|
||||||
|
- Agent Architecture: "swarms/framework/agents_explained.md"
|
||||||
|
- Complete Agent API: "swarms/structs/agent.md"
|
||||||
|
- OpenAI Assistant: "swarms/agents/openai_assistant.md"
|
||||||
|
- Create and Run Agents from YAML: "swarms/agents/create_agents_yaml.md"
|
||||||
|
- Integrating External Agents from Griptape, Langchain, etc: "swarms/agents/external_party_agents.md"
|
||||||
|
- Creating Custom Agents: "swarms/agents/new_agent.md"
|
||||||
|
- Tools:
|
||||||
|
- Overview: "swarms/tools/main.md"
|
||||||
|
- What are tools?: "swarms/tools/build_tool.md"
|
||||||
|
- ToolAgent: "swarms/agents/tool_agent.md"
|
||||||
|
- Tool Storage & tool_registry decorator: "swarms/tools/tool_storage.md"
|
||||||
|
- RAG || Long Term Memory:
|
||||||
|
- Integrating RAG with Agents: "swarms/memory/diy_memory.md"
|
||||||
|
- Swarm Architectures:
|
||||||
|
- Why MultiAgent Collaboration is Necessary: "swarms/concept/why.md"
|
||||||
|
- Swarm Architectures: "swarms/concept/swarm_architectures.md"
|
||||||
|
- Choosing the right Swarm Architecture: "swarms/concept/how_to_choose_swarms.md"
|
||||||
|
- Building Custom Swarms: "swarms/structs/custom_swarm.md"
|
||||||
|
- Create New Swarm Architectures: "swarms/structs/create_new_swarm.md"
|
||||||
|
- Architectures Available:
|
||||||
|
- MajorityVoting: "swarms/structs/majorityvoting.md"
|
||||||
|
- AgentRearrange: "swarms/structs/agent_rearrange.md"
|
||||||
|
- RoundRobin: "swarms/structs/round_robin_swarm.md"
|
||||||
|
- Mixture of Agents: "swarms/structs/moa.md"
|
||||||
|
- GraphWorkflow: "swarms/structs/graph_workflow.md"
|
||||||
|
- GroupChat: "swarms/structs/group_chat.md"
|
||||||
|
- AgentRegistry: "swarms/structs/agent_registry.md"
|
||||||
|
- SpreadSheetSwarm: "swarms/structs/spreadsheet_swarm.md"
|
||||||
|
- ForestSwarm: "swarms/structs/forest_swarm.md"
|
||||||
|
- SwarmRouter: "swarms/structs/swarm_router.md"
|
||||||
|
- TaskQueueSwarm: "swarms/structs/taskqueue_swarm.md"
|
||||||
|
- SwarmRearrange: "swarms/structs/swarm_rearrange.md"
|
||||||
|
- MultiAgentRouter: "swarms/structs/multi_agent_router.md"
|
||||||
|
- Various Execution Methods: "swarms/structs/various_execution_methods.md"
|
||||||
|
- Workflows:
|
||||||
|
- ConcurrentWorkflow: "swarms/structs/concurrentworkflow.md"
|
||||||
|
- AsyncWorkflow: "swarms/structs/async_workflow.md"
|
||||||
|
- SequentialWorkflow: "swarms/structs/sequential_workflow.md"
|
||||||
|
- Structs:
|
||||||
|
- Conversation: "swarms/structs/conversation.md"
|
||||||
|
- AgentLoader: "swarms/structs/agent_loader.md"
|
||||||
|
# - Task: "swarms/structs/task.md"
|
||||||
|
- Full API Reference: "swarms/framework/reference.md"
|
||||||
|
- Examples:
|
||||||
|
- Unique Swarms: "swarms/examples/unique_swarms.md"
|
||||||
|
- Swarm Models:
|
||||||
|
- Overview: "swarms/models/index.md"
|
||||||
|
# - Models Available: "swarms/models/index.md"
|
||||||
|
# - Available Models from OpenAI, Huggingface, TogetherAI, and more: "swarms/models/models_available_overview.md"
|
||||||
|
# - Model Router
|
||||||
|
- Quickstart: "swarms/models/models_available_overview.md"
|
||||||
|
- How to Create A Custom Language Model: "swarms/models/custom_model.md"
|
||||||
|
- Language Models:
|
||||||
|
- BaseLLM: "swarms/models/base_llm.md"
|
||||||
|
- HuggingFaceLLM: "swarms/models/huggingface.md"
|
||||||
|
- Anthropic: "swarms/models/anthropic.md"
|
||||||
|
- OpenAIChat: "swarms/models/openai.md"
|
||||||
|
- OpenAIFunctionCaller: "swarms/models/openai_function_caller.md"
|
||||||
|
- Groq: "swarms/models/groq.md"
|
||||||
|
# - Ollama:
|
||||||
|
# - Fireworks
|
||||||
|
# - Octo
|
||||||
|
# - Liquid AI
|
||||||
|
- MultiModal Models:
|
||||||
|
- BaseMultiModalModel: "swarms/models/base_multimodal_model.md"
|
||||||
|
- Multi Modal Models Available: "swarms/models/multimodal_models.md"
|
||||||
|
- GPT4VisionAPI: "swarms/models/gpt4v.md"
|
||||||
|
- Swarms Cloud API:
|
||||||
|
# - Overview: "swarms_cloud/main.md"
|
||||||
|
- Overview: "swarms_cloud/vision.md"
|
||||||
|
- MCS API: "swarms_cloud/mcs_api.md"
|
||||||
|
- Swarms Cloud CLI: "swarms_cloud/cli.md"
|
||||||
|
# - Add Agents to Marketplace: "swarms_cloud/add_agent.md"
|
||||||
|
# - Available Models: "swarms_cloud/available_models.md"
|
||||||
|
# - Agent API: "swarms_cloud/agent_api.md"
|
||||||
|
# - Migrate from OpenAI to Swarms in 3 lines of code: "swarms_cloud/migrate_openai.md"
|
||||||
|
# - Getting Started with SOTA Vision Language Models VLM: "swarms_cloud/getting_started.md"
|
||||||
|
- Swarms Memory:
|
||||||
|
- Overview: "swarms_memory/index.md"
|
||||||
|
- Memory Systems:
|
||||||
|
- ChromaDB: "swarms_memory/chromadb.md"
|
||||||
|
- Pinecone: "swarms_memory/pinecone.md"
|
||||||
|
- Faiss: "swarms_memory/faiss.md"
|
||||||
|
- Swarms Marketplace:
|
||||||
|
- Overview: "swarms_platform/index.md"
|
||||||
|
- Share & Discover Prompts, Agents, Tools, and more: "swarms_platform/share_discover.md"
|
||||||
|
- Prompts API:
|
||||||
|
- Add Prompts: "swarms_platform/prompts/add_prompt.md"
|
||||||
|
- Edit Prompts: "swarms_platform/prompts/edit_prompt.md"
|
||||||
|
- Query Prompts: "swarms_platform/prompts/fetch_prompts.md"
|
||||||
|
- Agents API:
|
||||||
|
- Add Agents: "swarms_platform/agents/agents_api.md"
|
||||||
|
- Query Agents: "swarms_platform/agents/fetch_agents.md"
|
||||||
|
- Edit Agents: "swarms_platform/agents/edit_agent.md"
|
||||||
|
- Telemetry API:
|
||||||
|
- PUT: "swarms_platform/telemetry/index.md"
|
||||||
|
# - Tools API:
|
||||||
|
# - Overview: "swarms_platform/tools_api.md"
|
||||||
|
# - Add Tools: "swarms_platform/fetch_tools.md"
|
||||||
|
# - Guides:
|
||||||
|
# - Unlocking Efficiency and Cost Savings in Healthcare; How Swarms of LLM Agents Can Revolutionize Medical Operations and Save Millions: "guides/healthcare_blog.md"
|
||||||
|
# - Understanding Agent Evaluation Mechanisms: "guides/agent_evals.md"
|
||||||
|
# - Agent Glossary: "swarms/glossary.md"
|
||||||
|
# - The Ultimate Technical Guide to the Swarms CLI; A Step-by-Step Developers Guide: "swarms/cli/cli_guide.md"
|
||||||
|
# - Prompting Guide:
|
||||||
|
# - The Essence of Enterprise-Grade Prompting: "swarms/prompts/essence.md"
|
||||||
|
# - An Analysis on Prompting Strategies: "swarms/prompts/overview.md"
|
||||||
|
# - Managing Prompts in Production: "swarms/prompts/main.md"
|
||||||
|
- Community:
|
||||||
|
- Bounty Program: "corporate/bounty_program.md"
|
||||||
|
- Contributing:
|
||||||
|
- Contributing: "swarms/contributing.md"
|
||||||
|
- Tests: "swarms/framework/test.md"
|
||||||
|
- Code Cleanliness: "swarms/framework/code_cleanliness.md"
|
||||||
|
- Philosophy: "swarms/concept/philosophy.md"
|
||||||
|
- Changelog:
|
||||||
|
- Swarms 5.6.8: "swarms/changelog/5_6_8.md"
|
||||||
|
- Swarms 5.8.1: "swarms/changelog/5_8_1.md"
|
||||||
|
- Swarms 5.9.2: "swarms/changelog/changelog_new.md"
|
||||||
|
- Corporate:
|
||||||
|
- Culture: "corporate/culture.md"
|
||||||
|
- Hiring: "corporate/hiring.md"
|
||||||
|
- Swarms Goals & Milestone Tracking; A Vision for 2024 and Beyond: "corporate/2024_2025_goals.md"
|
||||||
|
# - Clusterops:
|
||||||
|
# - Overview: "clusterops/reference.md"
|
@ -0,0 +1,9 @@
|
|||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
<!--https://squidfunk.github.io/mkdocs-material/customization/#overriding-blocks-->
|
||||||
|
|
||||||
|
{% block announce %}
|
||||||
|
<div style="text-align:center">
|
||||||
|
<a href="https://github.com/kyegomez/swarms">Star and contribute</a> to Swarms on GitHub!
|
||||||
|
</div>
|
||||||
|
{% endblock %}
|
@ -0,0 +1,35 @@
|
|||||||
|
mkdocs
|
||||||
|
mkdocs-material
|
||||||
|
mkdocs-glightbox
|
||||||
|
mkdocs-git-authors-plugin
|
||||||
|
mkdocs-git-revision-date-plugin
|
||||||
|
mkdocs-git-committers-plugin
|
||||||
|
mkdocstrings
|
||||||
|
mike
|
||||||
|
mkdocs-jupyter
|
||||||
|
mkdocs-git-committers-plugin-2
|
||||||
|
mkdocs-git-revision-date-localized-plugin
|
||||||
|
mkdocs-redirects
|
||||||
|
mkdocs-material-extensions
|
||||||
|
mkdocs-simple-hooks
|
||||||
|
mkdocs-awesome-pages-plugin
|
||||||
|
mkdocs-versioning
|
||||||
|
mkdocs-mermaid2-plugin
|
||||||
|
mkdocs-include-markdown-plugin
|
||||||
|
mkdocs-enumerate-headings-plugin
|
||||||
|
mkdocs-autolinks-plugin
|
||||||
|
mkdocs-minify-html-plugin
|
||||||
|
mkdocs-autolinks-plugin
|
||||||
|
|
||||||
|
# Requirements for core
|
||||||
|
jinja2~=3.1
|
||||||
|
markdown~=3.7
|
||||||
|
mkdocs-material-extensions~=1.3
|
||||||
|
pygments~=2.18
|
||||||
|
pymdown-extensions~=10.12
|
||||||
|
|
||||||
|
# Requirements for plugins
|
||||||
|
babel~=2.16
|
||||||
|
colorama~=0.4
|
||||||
|
paginate~=0.5
|
||||||
|
regex>=2022.4
|