diff --git a/.env.example b/.env.example
new file mode 100644
index 00000000..dfe4c75b
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,29 @@
+OPENAI_API_KEY="sk-"
+GOOGLE_API_KEY=""
+ANTHROPIC_API_KEY=""
+AI21_API_KEY="your_api_key_here"
+COHERE_API_KEY="your_api_key_here"
+ALEPHALPHA_API_KEY="your_api_key_here"
+HUGGINFACEHUB_API_KEY="your_api_key_here"
+SWARMS_API_KEY=""
+
+EVAL_PORT=8000
+MODEL_NAME="gpt-4"
+
+USE_GPU=True
+PLAYGROUND_DIR="playground"
+
+LOG_LEVEL="INFO"
+BOT_NAME="Orca"
+HF_API_KEY="your_huggingface_api_key_here"
+
+USE_TELEMETRY=True
+AGENTOPS_API_KEY=""
+FIREWORKS_API_KEY=""
+OPENAI_API_KEY=your_openai_api_key
+ANTHROPIC_API_KEY=your_anthropic_api_key
+AZURE_OPENAI_ENDPOINT=your_azure_openai_endpoint
+AZURE_OPENAI_DEPLOYMENT=your_azure_openai_deployment
+OPENAI_API_VERSION=your_openai_api_version
+AZURE_OPENAI_API_KEY=your_azure_openai_api_key
+AZURE_OPENAI_AD_TOKEN=your_azure_openai_ad_token
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 00000000..17d44f5d
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,13 @@
+---
+# These are supported funding model platforms
+github: [kyegomez]
+# patreon: # Replace with a single Patreon username
+# open_collective: # Replace with a single Open Collective username
+# ko_fi: # Replace with a single Ko-fi username
+# tidelift: # Replace with a single Tidelift platform-name/package-name
+# community_bridge: # Replace with a single Community Bridge project-name
+# liberapay: # Replace with a single Liberapay username
+# issuehunt: # Replace with a single IssueHunt username
+# otechie: # Replace with a single Otechie username
+# lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name
+# custom: #Nothing
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 00000000..41ddcb33
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,27 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: "[BUG] "
+labels: bug
+assignees: kyegomez
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 00000000..806abd71
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: 'kyegomez'
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000..7dc861aa
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,29 @@
+Thank you for contributing to Swarms!
+
+Replace this comment with:
+ - Description: a description of the change,
+ - Issue: the issue # it fixes (if applicable),
+ - Dependencies: any dependencies required for this change,
+ - Tag maintainer: for a quicker response, tag the relevant maintainer (see below),
+ - Twitter handle: we announce bigger features on Twitter. If your PR gets announced and you'd like a mention, we'll gladly shout you out!
+
+Please make sure your PR is passing linting and testing before submitting. Run `make format`, `make lint` and `make test` to check this locally.
+
+See contribution guidelines for more information on how to write/run tests, lint, etc:
+https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md
+
+If you're adding a new integration, please include:
+ 1. a test for the integration, preferably unit tests that do not rely on network access,
+ 2. an example notebook showing its use.
+
+
+Maintainer responsibilities:
+ - General / Misc / if you don't know who to tag: kye@apac.ai
+ - DataLoaders / VectorStores / Retrievers: kye@apac.ai
+ - swarms.models: kye@apac.ai
+ - swarms.memory: kye@apac.ai
+ - swarms.structures: kye@apac.ai
+
+If no one reviews your PR within a few days, feel free to email Kye at kye@apac.ai
+
+See contribution guidelines for more information on how to write/run tests, lint, etc: https://github.com/kyegomez/swarms
\ No newline at end of file
diff --git a/.github/action.yml b/.github/action.yml
new file mode 100644
index 00000000..4158bc81
--- /dev/null
+++ b/.github/action.yml
@@ -0,0 +1,33 @@
+---
+name: "Init Environment"
+description: "Initialize environment for tests"
+runs:
+ using: "composite"
+ steps:
+ - name: Checkout actions
+ uses: actions/checkout@v3
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install and configure Poetry
+ uses: snok/install-poetry@v1
+ with:
+ virtualenvs-create: true
+ virtualenvs-in-project: true
+ installer-parallel: true
+ - name: Load cached venv
+ id: cached-poetry-dependencies
+ uses: actions/cache@v3
+ with:
+ path: .venv
+ key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{hashFiles('**/poetry.lock') }}
+ - name: Install dependencies
+ if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
+ run: poetry install --no-interaction --no-root --with test --with dev --all-extras
+ shell: bash
+ - name: Activate venv
+ run: |
+ source .venv/bin/activate
+ echo PATH=$PATH >> $GITHUB_ENV
+ shell: bash
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..3b365943
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,12 @@
+---
+# https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ - package-ecosystem: "pip"
+ directory: "/"
+ schedule:
+ interval: "weekly"
diff --git a/.github/labeler.yml b/.github/labeler.yml
new file mode 100644
index 00000000..99dd5c25
--- /dev/null
+++ b/.github/labeler.yml
@@ -0,0 +1,34 @@
+---
+documentation:
+ - changed-files:
+ - any-glob-to-any-file: ["docs/**", "*.md"]
+tests:
+ - changed-files:
+ - any-glob-to-any-file: "tests/**"
+agents:
+ - changed-files:
+ - any-glob-to-any-file: "swarms/agents/**"
+artifacts:
+ - changed-files:
+ - any-glob-to-any-file: "swarms/artifacts/**"
+memory:
+ - changed-files:
+ - any-glob-to-any-file: "swarms/memory/**"
+models:
+ - changed-files:
+ - any-glob-to-any-file: "swarms/models/**"
+prompts:
+ - changed-files:
+ - any-glob-to-any-file: "swarms/prompts/**"
+structs:
+ - changed-files:
+ - any-glob-to-any-file: "swarms/structs/**"
+telemetry:
+ - changed-files:
+ - any-glob-to-any-file: "swarms/telemetry/**"
+tools:
+ - changed-files:
+ - any-glob-to-any-file: "swarms/tools/**"
+utils:
+ - changed-files:
+ - any-glob-to-any-file: "swarms/utils/**"
diff --git a/.github/workflows/RELEASE.yml b/.github/workflows/RELEASE.yml
new file mode 100644
index 00000000..059ec93e
--- /dev/null
+++ b/.github/workflows/RELEASE.yml
@@ -0,0 +1,47 @@
+---
+name: release
+on:
+ pull_request:
+ types:
+ - closed
+ branches:
+ - master
+ paths:
+ - "pyproject.toml"
+env:
+ POETRY_VERSION: "1.4.2"
+jobs:
+ if_release:
+ if: |
+ ${{ github.event.pull_request.merged == true }}
+ && ${{ contains(github.event.pull_request.labels.*.name, 'release') }}
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install poetry
+ run: pipx install poetry==$POETRY_VERSION
+ - name: Set up Python 3.9
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.9"
+ cache: "poetry"
+ - name: Build project for distribution
+ run: poetry build
+ - name: Check Version
+ id: check-version
+ run: |
+ echo version=$(poetry version --short) >> $GITHUB_OUTPUT
+ - name: Create Release
+ uses: ncipollo/release-action@v1
+ with:
+ artifacts: "dist/*"
+ token: ${{ secrets.GITHUB_TOKEN }}
+ draft: false
+ generateReleaseNotes: true
+ tag: v${{ steps.check-version.outputs.version }}
+ commit: master
+ - name: Publish to PyPI
+ env:
+ POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
+ run: |-
+ poetry publish
diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml
new file mode 100644
index 00000000..21129735
--- /dev/null
+++ b/.github/workflows/autofix.yml
@@ -0,0 +1,25 @@
+name: autofix.ci
+
+on:
+ pull_request:
+ push:
+ branches: ["main"]
+permissions:
+ contents: read
+
+jobs:
+ autofix:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - uses: actions/setup-go@v5
+ - run: go install github.com/google/yamlfmt/cmd/yamlfmt@latest
+ - run: yamlfmt .
+
+ - uses: actions/setup-python@v5
+ - run: pip install ruff
+ - run: ruff format .
+ - run: ruff check --fix .
+
+ - uses: autofix-ci/action@dd55f44df8f7cdb7a6bf74c78677eb8acd40cd0a
diff --git a/.github/workflows/codacy.yml b/.github/workflows/codacy.yml
new file mode 100644
index 00000000..06d9d87e
--- /dev/null
+++ b/.github/workflows/codacy.yml
@@ -0,0 +1,45 @@
+---
+name: Codacy
+on:
+ push:
+ branches: ["master"]
+ pull_request:
+ branches: ["master"]
+ schedule:
+ - cron: "0 0 * * "
+
+permissions:
+ contents: read
+
+jobs:
+ codacy-security-scan:
+ permissions:
+ contents: read # for actions/checkout to fetch code
+ security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
+ actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
+ name: Codacy Security Scan
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ # Execute Codacy Analysis CLI and generate a SARIF output with the security issues identified during the analysis
+ - name: Run Codacy Analysis CLI
+ uses: codacy/codacy-analysis-cli-action@97bf5df3c09e75f5bcd72695998f96ebd701846e
+ with:
+ # Check https://github.com/codacy/codacy-analysis-cli#project-token to
+ # get your project token from your Codacy repository
+ # You can also omit the token and run the tools that support default configurations
+ project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
+ verbose: true
+ output: results.sarif
+ format: sarif
+ # Adjust severity of non-security issues
+ gh-code-scanning-compat: true
+ # Force 0 exit code to allow SARIF file generation
+ # This will handover control about PR rejection to the GitHub side
+ max-allowed-issues: 2147483647
+ # Upload the SARIF file generated in the previous step
+ - name: Upload SARIF results file
+ uses: github/codeql-action/upload-sarif@v3
+ with:
+ sarif_file: results.sarif
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
new file mode 100644
index 00000000..b93db343
--- /dev/null
+++ b/.github/workflows/codeql.yml
@@ -0,0 +1,41 @@
+---
+name: "CodeQL"
+on:
+ push:
+ branches: ["master"]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: ["master"]
+ schedule:
+ - cron: "33 12 * * 5"
+jobs:
+ analyze:
+ name: Analyze
+ # Runner size impacts CodeQL analysis time. To learn more, please see:
+ # - https://gh.io/recommended-hardware-resources-for-running-codeql
+ # - https://gh.io/supported-runners-and-hardware-resources
+ # - https://gh.io/using-larger-runners
+ # Consider using larger runners for possible analysis time improvements.
+ runs-on: ubuntu-latest
+ timeout-minutes: 360
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+ strategy:
+ fail-fast: false
+ matrix:
+ language: ["python"]
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v3
+ with:
+ languages: ${{ matrix.language }}
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v3
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v3
+ with:
+ category: "/language:${{matrix.language}}"
diff --git a/.github/workflows/docs-preview.yml b/.github/workflows/docs-preview.yml
new file mode 100644
index 00000000..037fd35e
--- /dev/null
+++ b/.github/workflows/docs-preview.yml
@@ -0,0 +1,16 @@
+name: Documentation Links
+on:
+ pull_request_target:
+ types:
+ - opened
+
+permissions:
+ pull-requests: write
+
+jobs:
+ documentation-links:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: readthedocs/actions/preview@v1
+ with:
+ project-slug: "swarms"
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 00000000..17e8b500
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,24 @@
+name: Documentation
+on:
+ push:
+ branches:
+ - master
+ - main
+ - develop
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ with:
+ python-version: 3.11
+ - run: pip install mkdocs-material
+ - run: pip install mkdocs-glightbox
+ - run: pip install "mkdocstrings[python]"
+ - run: pip3 install mkdocs-git-authors-plugin
+ - run: pip install mkdocs-jupyter==0.16.0
+ - run: pip install --upgrade lxml_html_clean
+ - run: pip install mkdocs-git-committers-plugin
+ - run: pip3 install mkdocs-git-revision-date-localized-plugin
+ - run: mkdocs gh-deploy --force -f docs/mkdocs.yml
diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml
new file mode 100644
index 00000000..d8ab919c
--- /dev/null
+++ b/.github/workflows/label.yml
@@ -0,0 +1,20 @@
+---
+# This workflow will triage pull requests and apply a label based on the
+# paths that are modified in the pull request.
+#
+# To use this workflow, you will need to set up a .github/labeler.yml
+# file with configuration. For more information, see:
+# https://github.com/actions/labeler
+
+name: Labeler
+on: [pull_request_target]
+jobs:
+ label:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ pull-requests: write
+ steps:
+ - uses: actions/labeler@v5
+ with:
+ repo-token: "${{ secrets.GITHUB_TOKEN }}"
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 00000000..f2295d07
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,33 @@
+---
+name: Lint
+on: [push, pull_request] # yamllint disable-line rule:truthy
+jobs:
+ yaml-lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ - run: pip install yamllint
+ - run: yamllint .
+ flake8-lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ - run: pip install flake8
+ - run: flake8 .
+ ruff-lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ - run: pip install ruff
+ - run: ruff format .
+ - run: ruff check --fix .
+ pylint-lint:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ - run: pip install pylint
+ - run: pylint swarms --recursive=y
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 00000000..a1ab3b31
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,49 @@
+---
+# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
+#
+# You can adjust the behavior by modifying this file.
+# For more information, see:
+# https://github.com/actions/stale
+name: Stale
+on:
+ schedule:
+ # Scheduled to run at 1.30 UTC everyday
+ - cron: "0 0 * * *"
+jobs:
+ stale:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ pull-requests: write
+ steps:
+ - uses: actions/stale@v9
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ days-before-issue-stale: 14
+ days-before-issue-close: 14
+ stale-issue-label: "status:stale"
+ close-issue-reason: not_planned
+ any-of-labels: "status:awaiting user response,status:more data needed"
+ stale-issue-message: >
+ Marking this issue as stale since it has been open for 14 days with no
+ activity. This issue will be closed if no further activity occurs.
+
+ close-issue-message: >
+ This issue was closed because it has been inactive for 28 days. Please
+ post a new issue if you need further assistance. Thanks!
+
+ days-before-pr-stale: 14
+ days-before-pr-close: 14
+ stale-pr-label: "status:stale"
+ stale-pr-message: >
+ Marking this pull request as stale since it has been open for 14 days
+ with no activity. This PR will be closed if no further activity occurs.
+
+ close-pr-message: >
+ This pull request was closed because it has been inactive for 28 days.
+ Please open a new pull request if you need further assistance. Thanks!
+
+ # Label that can be assigned to issues to exclude them from being marked as stale
+ exempt-issue-labels: "override-stale"
+ # Label that can be assigned to PRs to exclude them from being marked as stale
+ exempt-pr-labels: "override-stale"
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 00000000..d5e053d0
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,60 @@
+name: Tests
+on:
+ push:
+ schedule:
+ - cron: "0 0 * * *"
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install Python
+ uses: actions/setup-python@v5
+ - name: Install Poetry
+ uses: snok/install-poetry@v1
+ - name: Setup a local virtual environment
+ run: |
+ poetry config virtualenvs.create true --local
+ poetry config virtualenvs.in-project true --local
+ - uses: actions/cache@v4
+ name: Define a cache for the virtual environment
+ file
+ with:
+ path: ./.venv
+ key: venv-${{ hashFiles('poetry.lock') }}
+ - name: Install the project dependencies
+ run: poetry install
+ - name: Install OpenCV
+ run: sudo apt-get install python3-opencv
+ - name: Enter the virtual environment
+ run: source $VENV
+ - name: Run the tests
+ run: poetry run pytest --verbose
+ run-examples:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install Python
+ uses: actions/setup-python@v5
+ - name: Install Poetry
+ uses: snok/install-poetry@v1
+ - name: Setup a local virtual environment
+ run: |
+ poetry config virtualenvs.create true --local
+ poetry config virtualenvs.in-project true --local
+ - uses: actions/cache@v4
+ name: Define a cache for the virtual environment
+ file
+ with:
+ path: ./.venv
+ key: venv-${{ hashFiles('poetry.lock') }}
+ - name: Install the project dependencies
+ run: poetry install
+ - name: Install OpenCV
+ run: sudo apt-get install python3-opencv
+ - name: Enter the virtual environment
+ run: source $VENV
+ - name: Make Script Executable and Run
+ run: |-
+ chmod +x ./scripts/run_examples.sh
+ ./scripts/run_examples.sh
diff --git a/.github/workflows/welcome.yml b/.github/workflows/welcome.yml
new file mode 100644
index 00000000..dd16f9c3
--- /dev/null
+++ b/.github/workflows/welcome.yml
@@ -0,0 +1,22 @@
+---
+name: Welcome
+on:
+ issues:
+ types: [opened]
+ pull_request_target:
+ types: [opened]
+jobs:
+ build:
+ name: π Welcome
+ permissions: write-all
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/first-interaction@v1.3.0
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ issue-message:
+ "Hello there, thank you for opening an Issue ! ππ» The team
+ was notified and they will get back to you asap."
+ pr-message:
+ "Hello there, thank you for opening an PR ! ππ» The team was
+ notified and they will get back to you asap."
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..a0aa1358
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,219 @@
+__pycache__/
+.venv/
+
+.env
+
+image/
+audio/
+video/
+artifacts_three
+dataframe/
+
+static/generated
+runs
+Financial-Analysis-Agent_state.json
+artifacts_five
+errors
+chroma
+agent_workspace
+Accounting Assistant_state.json
+Unit Testing Agent_state.json
+Devin_state.json
+hire_researchers
+json_logs
+Medical Image Diagnostic Agent_state.json
+flight agent_state.json
+D_state.json
+artifacts_six
+artifacts_seven
+swarms/__pycache__
+artifacts_once
+transcript_generator.json
+venv
+.DS_Store
+Cargo.lock
+.DS_STORE
+artifacts_logs
+Cargo.lock
+Medical Treatment Recommendation Agent_state.json
+swarms/agents/.DS_Store
+artifacts_two
+logs
+T_state.json
+_build
+conversation.txt
+t1_state.json
+stderr_log.txt
+t2_state.json
+.vscode
+.DS_STORE
+# Byte-compiled / optimized / DLL files
+Transcript Generator_state.json
+__pycache__/
+*.py[cod]
+*$py.class
+.grit
+swarm-worker-01_state.json
+error.txt
+Devin Worker 2_state.json
+# C extensions
+*.so
+.ruff_cache
+
+
+errors.txt
+
+Autonomous-Agent-XYZ1B_state.json
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+.DS_Store
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/#use-with-ide
+.pdm.toml
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+.vscode/settings.json
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 00000000..0c936705
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,18 @@
+repos:
+ - repo: https://github.com/ambv/black
+ rev: 22.3.0
+ hooks:
+ - id: black
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
+ rev: 'v0.0.255'
+ hooks:
+ - id: ruff
+ args: [----unsafe-fixes]
+ - repo: https://github.com/nbQA-dev/nbQA
+ rev: 1.6.3
+ hooks:
+ - id: nbqa-black
+ additional_dependencies: [ipython==8.12, black]
+ - id: nbqa-ruff
+ args: ["--ignore=I001"]
+ additional_dependencies: [ipython==8.12, ruff]
\ No newline at end of file
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..afbec392
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+kye@apac.ai.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000..8242d437
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,82 @@
+# Contributing to Swarms π οΈ
+
+Thank you for your interest in contributing to Swarms! This guide will help you get started with your contribution.
+
+## Essential Steps for Contributing
+
+### 1. Fork and Clone the Repository
+
+1. Fork the Swarms repository to your GitHub account by clicking the "Fork" button in the top-right corner of the repository page.
+2. Clone your forked repository to your local machine:
+ ```
+ git clone https://github.com/kyegomez/swarms.git
+ ```
+
+### 2. Make Changes
+
+1. Create a new branch for your changes:
+ ```
+ git checkout -b your-branch-name
+ ```
+2. Make your desired changes to the codebase.
+
+### 3. Commit and Push Changes
+
+1. Stage your changes:
+ ```
+ git add .
+ ```
+2. Commit your changes:
+ ```
+ git commit -m "Your descriptive commit message"
+ ```
+3. Push your changes to your fork:
+ ```
+ git push -u origin your-branch-name
+ ```
+
+### 4. Create a Pull Request
+
+1. Go to the original Swarms repository on GitHub.
+2. Click on "Pull Requests" and then "New Pull Request".
+3. Select your fork and branch as the compare branch.
+4. Click "Create Pull Request".
+5. Fill in the pull request description and submit.
+
+## Important Considerations
+
+- Ensure your code follows the project's coding standards.
+- Include tests for new features or bug fixes.
+- Update documentation as necessary.
+- Make sure your branch is up-to-date with the main repository before submitting a pull request.
+
+## Additional Information
+
+### Code Quality
+
+We use pre-commit hooks to maintain code quality. To set up pre-commit:
+
+1. Install pre-commit:
+ ```
+ pip install pre-commit
+ ```
+2. Set up the git hook scripts:
+ ```
+ pre-commit install
+ ```
+
+### Documentation
+
+- We use mkdocs for documentation. To serve the docs locally:
+ ```
+ mkdocs serve
+ ```
+
+### Testing
+
+- Run tests using pytest:
+ ```
+ pytest
+ ```
+
+For more detailed information on code quality, documentation, and testing, please refer to the full contributing guidelines in the repository.
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..d7b6bd18
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,340 @@
+
+Creative Commons Attribution 4.0 International Public License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution 4.0 International Public License ("Public License"). To the
+extent this Public License may be interpreted as a contract, You are
+granted the Licensed Rights in consideration of Your acceptance of
+these terms and conditions, and the Licensor grants You such rights in
+consideration of benefits the Licensor receives from making the
+Licensed Material available under these terms and conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ d. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ e. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ f. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ g. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ h. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ i. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ j. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ k. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ 4. If You Share Adapted Material You produce, the Adapter's
+ License You apply must not prevent recipients of the Adapted
+ Material from complying with this Public License.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material; and
+
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public
+licenses. Notwithstanding, Creative Commons may elect to apply one of
+its public licenses to material it publishes and in those instances
+will be considered the βLicensor.β The text of the Creative Commons
+public licenses is dedicated to the public domain under the CC0 Public
+Domain Dedication. Except for the limited purpose of indicating that
+material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the
+public licenses.
+
+Creative Commons may be contacted at creativecommons.org.
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..fe377dc6
--- /dev/null
+++ b/README.md
@@ -0,0 +1,1026 @@
+
+{% endblock %}
\ No newline at end of file
diff --git a/docs/purpose/limits_of_individual_agents.md b/docs/purpose/limits_of_individual_agents.md
new file mode 100644
index 00000000..6a05daa8
--- /dev/null
+++ b/docs/purpose/limits_of_individual_agents.md
@@ -0,0 +1,55 @@
+# The Limits of Individual Agents
+
+![Reliable Agents](docs/assets/img/reliabilitythrough.png)
+
+
+Individual agents have pushed the boundaries of what machines can learn and accomplish. However, despite their impressive capabilities, these agents face inherent limitations that can hinder their effectiveness in complex, real-world applications. This blog explores the critical constraints of individual agents, such as context window limits, hallucination, single-task threading, and lack of collaboration, and illustrates how multi-agent collaboration can address these limitations. In short,
+
+- Context Window Limits
+- Single Task Execution
+- Hallucination
+- No collaboration
+
+
+
+#### Context Window Limits
+
+One of the most significant constraints of individual agents, particularly in the domain of language models, is the context window limit. This limitation refers to the maximum amount of information an agent can consider at any given time. For instance, many language models can only process a fixed number of tokens (words or characters) in a single inference, restricting their ability to understand and generate responses based on longer texts. This limitation can lead to a lack of coherence in longer compositions and an inability to maintain context in extended conversations or documents.
+
+#### Hallucination
+
+Hallucination in AI refers to the phenomenon where an agent generates information that is not grounded in the input data or real-world facts. This can manifest as making up facts, entities, or events that do not exist or are incorrect. Hallucinations pose a significant challenge in ensuring the reliability and trustworthiness of AI-generated content, particularly in critical applications such as news generation, academic research, and legal advice.
+
+#### Single Task Threading
+
+Individual agents are often designed to excel at specific tasks, leveraging their architecture and training data to optimize performance in a narrowly defined domain. However, this specialization can also be a drawback, as it limits the agent's ability to multitask or adapt to tasks that fall outside its primary domain. Single-task threading means an agent may excel in language translation but struggle with image recognition or vice versa, necessitating the deployment of multiple specialized agents for comprehensive AI solutions.
+
+#### Lack of Collaboration
+
+Traditional AI agents operate in isolation, processing inputs and generating outputs independently. This isolation limits their ability to leverage diverse perspectives, share knowledge, or build upon the insights of other agents. In complex problem-solving scenarios, where multiple facets of a problem need to be addressed simultaneously, this lack of collaboration can lead to suboptimal solutions or an inability to tackle multifaceted challenges effectively.
+
+# The Elegant yet Simple Solution
+
+- ## Multi-Agent Collaboration
+
+Recognizing the limitations of individual agents, researchers and practitioners have explored the potential of multi-agent collaboration as a means to transcend these constraints. Multi-agent systems comprise several agents that can interact, communicate, and collaborate to achieve common goals or solve complex problems. This collaborative approach offers several advantages:
+
+#### Overcoming Context Window Limits
+
+By dividing a large task among multiple agents, each focusing on different segments of the problem, multi-agent systems can effectively overcome the context window limits of individual agents. For instance, in processing a long document, different agents could be responsible for understanding and analyzing different sections, pooling their insights to generate a coherent understanding of the entire text.
+
+#### Mitigating Hallucination
+
+Through collaboration, agents can cross-verify facts and information, reducing the likelihood of hallucinations. If one agent generates a piece of information, other agents can provide checks and balances, verifying the accuracy against known data or through consensus mechanisms.
+
+#### Enhancing Multitasking Capabilities
+
+Multi-agent systems can tackle tasks that require a diverse set of skills by leveraging the specialization of individual agents. For example, in a complex project that involves both natural language processing and image analysis, one agent specialized in text can collaborate with another specialized in visual data, enabling a comprehensive approach to the task.
+
+#### Facilitating Collaboration and Knowledge Sharing
+
+Multi-agent collaboration inherently encourages the sharing of knowledge and insights, allowing agents to learn from each other and improve their collective performance. This can be particularly powerful in scenarios where iterative learning and adaptation are crucial, such as dynamic environments or tasks that evolve over time.
+
+### Conclusion
+
+While individual AI agents have made remarkable strides in various domains, their inherent limitations necessitate innovative approaches to unlock the full potential of artificial intelligence. Multi-agent collaboration emerges as a compelling solution, offering a pathway to transcend individual constraints through collective intelligence. By harnessing the power of collaborative AI, we can address more complex, multifaceted problems, paving the way for more versatile, efficient, and effective AI systems in the future.
\ No newline at end of file
diff --git a/docs/purpose/why.md b/docs/purpose/why.md
new file mode 100644
index 00000000..5293de23
--- /dev/null
+++ b/docs/purpose/why.md
@@ -0,0 +1,134 @@
+# The Swarms Framework: Orchestrating Agents for Enterprise Automation
+
+In the rapidly evolving landscape of artificial intelligence (AI) and automation, a new paradigm is emerging: the orchestration of multiple agents working in collaboration to tackle complex tasks. This approach, embodied by the Swarms Framework, aims to address the fundamental limitations of individual agents and unlocks the true potential of AI-driven automation in enterprise operations.
+
+Individual agents are plagued by the same issues: short term memory constraints, hallucinations, single task limitations, lack of collaboration, and cost inefficiences.
+
+[Learn more here from a list of compiled agent papers](https://github.com/kyegomez/awesome-multi-agent-papers)
+
+## The Purpose of Swarms: Overcoming Agent Limitations
+
+Individual agents, while remarkable in their own right, face several inherent challenges that hinder their ability to effectively automate enterprise operations at scale. These limitations include:
+
+1. Short-Term Memory Constraints
+2. Hallucination and Factual Inconsistencies
+3. Single-Task Limitations
+4. Lack of Collaborative Capabilities
+5. Cost Inefficiencies
+
+By orchestrating multiple agents to work in concert, the Swarms Framework directly tackles these limitations, paving the way for more efficient, reliable, and cost-effective enterprise automation.
+
+### Limitation 1: Short-Term Memory Constraints
+
+Many AI agents, particularly those based on large language models, suffer from short-term memory constraints. These agents can effectively process and respond to prompts, but their ability to retain and reason over information across multiple interactions or tasks is limited. This limitation can be problematic in enterprise environments, where complex workflows often involve retaining and referencing contextual information over extended periods.
+
+The Swarms Framework addresses this limitation by leveraging the collective memory of multiple agents working in tandem. While individual agents may have limited short-term memory, their combined memory pool becomes significantly larger, enabling the retention and retrieval of contextual information over extended periods. This collective memory is facilitated by agents specializing in information storage and retrieval, such as those based on systems like Llama Index or Pinecone.
+
+### Limitation 2: Hallucination and Factual Inconsistencies
+
+Another challenge faced by many AI agents is the tendency to generate responses that may contain factual inconsistencies or hallucinations -- information that is not grounded in reality or the provided context. This issue can undermine the reliability and trustworthiness of automated systems, particularly in domains where accuracy and consistency are paramount.
+
+The Swarms Framework mitigates this limitation by employing multiple agents with diverse knowledge bases and capabilities. By leveraging the collective intelligence of these agents, the framework can cross-reference and validate information, reducing the likelihood of hallucinations and factual inconsistencies. Additionally, specialized agents can be tasked with fact-checking and verification, further enhancing the overall reliability of the system.
+
+### Limitation 3: Single-Task Limitations
+
+Most individual AI agents are designed and optimized for specific tasks or domains, limiting their ability to handle complex, multi-faceted workflows that often characterize enterprise operations. While an agent may excel at a particular task, such as natural language processing or data analysis, it may struggle with other aspects of a larger workflow, such as task coordination or decision-making.
+
+The Swarms Framework overcomes this limitation by orchestrating a diverse ensemble of agents, each specializing in different tasks or capabilities. By intelligently combining and coordinating these agents, the framework can tackle complex, multi-threaded workflows that span various domains and task types. This modular approach allows for the seamless integration of new agents as they become available, enabling the continuous expansion and enhancement of the system's capabilities.
+
+### Limitation 4: Lack of Collaborative Capabilities
+
+Most AI agents are designed to operate independently, lacking the ability to effectively collaborate with other agents or coordinate their actions towards a common goal. This limitation can hinder the scalability and efficiency of automated systems, particularly in enterprise environments where tasks often require the coordination of multiple agents or systems.
+
+The Swarms Framework addresses this limitation by introducing a layer of coordination and collaboration among agents. Through specialized coordination agents and communication protocols, the framework enables agents to share information, divide tasks, and synchronize their actions. This collaborative approach not only increases efficiency but also enables the emergence of collective intelligence, where the combined capabilities of multiple agents surpass the sum of their individual abilities.
+
+### Limitation 5: Cost Inefficiencies
+
+Running large AI models or orchestrating multiple agents can be computationally expensive, particularly in enterprise environments where scalability and cost-effectiveness are critical considerations. Inefficient resource utilization or redundant computations can quickly escalate costs, making widespread adoption of AI-driven automation financially prohibitive.
+
+The Swarms Framework tackles this limitation by optimizing resource allocation and workload distribution among agents. By intelligently assigning tasks to the most appropriate agents and leveraging agent specialization, the framework minimizes redundant computations and improves overall resource utilization. Additionally, the framework can dynamically scale agent instances based on demand, ensuring that computational resources are allocated efficiently and costs are minimized.
+
+## The Swarms Framework: A Holistic Approach to Enterprise Automation
+
+The Swarms Framework is a comprehensive solution that addresses the limitations of individual agents by orchestrating their collective capabilities. By integrating agents from various frameworks, including LangChain, AutoGPT, Llama Index, and others, the framework leverages the strengths of each agent while mitigating their individual weaknesses.
+
+At its core, the Swarms Framework operates on the principle of multi-agent collaboration. By introducing specialized coordination agents and communication protocols, the framework enables agents to share information, divide tasks, and synchronize their actions towards a common goal. This collaborative approach not only increases efficiency but also enables the emergence of collective intelligence, where the combined capabilities of multiple agents surpass the sum of their individual abilities.
+
+The framework's architecture is modular and extensible, allowing for the seamless integration of new agents as they become available. This flexibility ensures that the system's capabilities can continuously expand and adapt to evolving enterprise needs and technological advancements.
+
+
+## Benefits of the Swarms Framework
+
+The adoption of the Swarms Framework in enterprise environments offers numerous benefits:
+
+1. Increased Efficiency and Scalability
+2. Improved Reliability and Accuracy
+3. Adaptability and Continuous Improvement
+4. Cost Optimization
+5. Enhanced Security and Compliance
+
+## Increased Efficiency and Scalability
+
+By orchestrating the collective capabilities of multiple agents, the Swarms Framework enables the efficient execution of complex, multi-threaded workflows. Tasks can be parallelized and distributed across specialized agents, reducing bottlenecks and increasing overall throughput. Additionally, the framework's modular design and ability to dynamically scale agent instances based on demand ensure that the system can adapt to changing workloads and scale seamlessly as enterprise needs evolve.
+
+## Improved Reliability and Accuracy
+
+The collaborative nature of the Swarms Framework reduces the risk of hallucinations and factual inconsistencies that can arise from individual agents. By leveraging the collective knowledge and diverse perspectives of multiple agents, the framework can cross-reference and validate information, enhancing the overall reliability and accuracy of its outputs.
+
+Additionally, the framework's ability to incorporate specialized fact-checking and verification agents further strengthens the trustworthiness of the system's outcomes, ensuring that critical decisions and actions are based on accurate and reliable information.
+
+## Adaptability and Continuous Improvement
+
+The modular architecture of the Swarms Framework allows for the seamless integration of new agents as they become available, enabling the continuous expansion and enhancement of the system's capabilities. As new AI models, algorithms, or data sources emerge, the framework can readily incorporate them, ensuring that enterprise operations remain at the forefront of technological advancements.
+
+Furthermore, the framework's monitoring and analytics capabilities provide valuable insights into system performance, enabling the identification of areas for improvement and the optimization of agent selection, task assignments, and resource allocation strategies over time.
+
+## Cost Optimization
+
+By intelligently orchestrating the collaboration of multiple agents, the Swarms Framework optimizes resource utilization and minimizes redundant computations. This efficient use of computational resources translates into cost savings, making the widespread adoption of AI-driven automation more financially viable for enterprises.
+
+The framework's ability to dynamically scale agent instances based on demand further contributes to cost optimization, ensuring that resources are allocated only when needed and minimizing idle or underutilized instances.
+
+## Enhanced Security and Compliance
+
+In enterprise environments, ensuring the security and compliance of automated systems is paramount. The Swarms Framework addresses these concerns by incorporating robust security measures and compliance controls.
+
+The framework's centralized Memory Manager component enables the implementation of access control mechanisms and data encryption, protecting sensitive information from unauthorized access or breaches. Additionally, the framework's modular design allows for the integration of specialized agents focused on compliance monitoring and auditing, ensuring that enterprise operations adhere to relevant regulations and industry standards.
+
+## Real-World Applications and Use Cases
+
+The Swarms Framework finds applications across a wide range of enterprise domains, enabling organizations to automate complex operations and streamline their workflows. Here are some examples of real-world use cases:
+
+1. Intelligent Process Automation (IPA)
+2. Customer Service and Support
+3. Fraud Detection and Risk Management
+4. Supply Chain Optimization
+5. Research and Development
+
+## Intelligent Process Automation (IPA)
+
+In the realm of business process automation, the Swarms Framework can orchestrate agents to automate and optimize complex workflows spanning multiple domains and task types. By combining agents specialized in areas such as natural language processing, data extraction, decision-making, and task coordination, the framework can streamline and automate processes that traditionally required manual intervention or coordination across multiple systems.
+
+## Customer Service and Support
+
+The framework's ability to integrate agents with diverse capabilities, such as natural language processing, knowledge retrieval, and decision-making, makes it well-suited for automating customer service and support operations. Agents can collaborate to understand customer inquiries, retrieve relevant information from knowledge bases, and provide accurate and personalized responses, improving customer satisfaction and reducing operational costs.
+
+## Fraud Detection and Risk Management
+
+In the financial and cybersecurity domains, the Swarms Framework can orchestrate agents specialized in data analysis, pattern recognition, and risk assessment to detect and mitigate fraudulent activities or security threats. By combining the collective intelligence of these agents, the framework can identify complex patterns and anomalies that may be difficult for individual agents to detect, enhancing the overall effectiveness of fraud detection and risk management strategies.
+
+## Supply Chain Optimization
+
+The complexity of modern supply chains often requires the coordination of multiple systems and stakeholders. The Swarms Framework can integrate agents specialized in areas such as demand forecasting, inventory management, logistics optimization, and supplier coordination to streamline and optimize supply chain operations. By orchestrating the collective capabilities of these agents, the framework can identify bottlenecks, optimize resource allocation, and facilitate seamless collaboration among supply chain partners.
+
+## Research and Development
+
+In research and development environments, the Swarms Framework can accelerate innovation by enabling the collaboration of agents specialized in areas such as literature review, data analysis, hypothesis generation, and experiment design. By orchestrating these agents, the framework can facilitate the exploration of new ideas, identify promising research directions, and streamline the iterative process of scientific inquiry.
+
+# Conclusion
+
+The Swarms Framework represents a paradigm shift in the field of enterprise automation, addressing the limitations of individual agents by orchestrating their collective capabilities. By integrating agents from various frameworks and enabling multi-agent collaboration, the Swarms Framework overcomes challenges such as short-term memory constraints, hallucinations, single-task limitations, lack of collaboration, and cost inefficiencies.
+
+Through its modular architecture, centralized coordination, and advanced monitoring and analytics capabilities, the Swarms Framework empowers enterprises to automate complex operations with increased efficiency, reliability, and adaptability. It unlocks the true potential of AI-driven automation, enabling organizations to stay ahead of the curve and thrive in an ever-evolving technological landscape.
+
+As the field of artificial intelligence continues to advance, the Swarms Framework stands as a robust and flexible solution, ready to embrace new developments and seamlessly integrate emerging agents and capabilities. By harnessing the power of collective intelligence, the framework paves the way for a future where enterprises can leverage the full potential of AI to drive innovation, optimize operations, and gain a competitive edge in their respective industries.
\ No newline at end of file
diff --git a/docs/purpose/why_swarms.md b/docs/purpose/why_swarms.md
new file mode 100644
index 00000000..9e75026e
--- /dev/null
+++ b/docs/purpose/why_swarms.md
@@ -0,0 +1,53 @@
+# Why Swarms?
+
+The need for multiple agents to work together in artificial intelligence (AI) and particularly in the context of Large Language Models (LLMs) stems from several inherent limitations and challenges in handling complex, dynamic, and multifaceted tasks with single-agent systems. Collaborating with multiple agents offers a pathway to enhance reliability, computational efficiency, cognitive diversity, and problem-solving capabilities. This section delves into the rationale behind employing multi-agent systems and strategizes on overcoming the associated expenses, such as API bills and hosting costs.
+
+### Why Multiple Agents Are Necessary
+
+#### 1. **Cognitive Diversity**
+
+Different agents can bring varied perspectives, knowledge bases, and problem-solving approaches to a task. This diversity is crucial in complex problem-solving scenarios where a single approach might not be sufficient. Cognitive diversity enhances creativity, leading to innovative solutions and the ability to tackle a broader range of problems.
+
+#### 2. **Specialization and Expertise**
+
+In many cases, tasks are too complex for a single agent to handle efficiently. By dividing the task among multiple specialized agents, each can focus on a segment where it excels, thereby increasing the overall efficiency and effectiveness of the solution. This approach leverages the expertise of individual agents to achieve superior performance in tasks that require multifaceted knowledge and skills.
+
+#### 3. **Scalability and Flexibility**
+
+Multi-agent systems can more easily scale to handle large-scale or evolving tasks. Adding more agents to the system can increase its capacity or capabilities, allowing it to adapt to larger workloads or new types of tasks. This scalability is essential in dynamic environments where the demand and nature of tasks can change rapidly.
+
+#### 4. **Robustness and Redundancy**
+
+Collaboration among multiple agents enhances the system's robustness by introducing redundancy. If one agent fails or encounters an error, others can compensate, ensuring the system remains operational. This redundancy is critical in mission-critical applications where failure is not an option.
+
+### Overcoming Expenses with API Bills and Hosting
+
+Deploying multiple agents, especially when relying on cloud-based services or APIs, can incur significant costs. Here are strategies to manage and reduce these expenses:
+
+#### 1. **Optimize Agent Efficiency**
+
+Before scaling up the number of agents, ensure each agent operates as efficiently as possible. This can involve refining algorithms, reducing unnecessary API calls, and optimizing data processing to minimize computational requirements and, consequently, the associated costs.
+
+#### 2. **Use Open Source and Self-Hosted Solutions**
+
+Where possible, leverage open-source models and technologies that can be self-hosted. While there is an initial investment in setting up the infrastructure, over time, self-hosting can significantly reduce costs related to API calls and reliance on third-party services.
+
+#### 3. **Implement Intelligent Caching**
+
+Caching results for frequently asked questions or common tasks can drastically reduce the need for repeated computations or API calls. Intelligent caching systems can determine what information to store and for how long, optimizing the balance between fresh data and computational savings.
+
+#### 4. **Dynamic Scaling and Load Balancing**
+
+Use cloud services that offer dynamic scaling and load balancing to adjust the resources allocated based on the current demand. This ensures you're not paying for idle resources during low-usage periods while still being able to handle high demand when necessary.
+
+#### 5. **Collaborative Cost-Sharing Models**
+
+In scenarios where multiple stakeholders benefit from the multi-agent system, consider implementing a cost-sharing model. This approach distributes the financial burden among the users or beneficiaries, making it more sustainable.
+
+#### 6. **Monitor and Analyze Costs**
+
+Regularly monitor and analyze your usage and associated costs to identify potential savings. Many cloud providers offer tools to track and forecast expenses, helping you to adjust your usage patterns and configurations to minimize costs without sacrificing performance.
+
+### Conclusion
+
+The collaboration of multiple agents in AI systems presents a robust solution to the complexity, specialization, scalability, and robustness challenges inherent in single-agent approaches. While the associated costs can be significant, strategic optimization, leveraging open-source technologies, intelligent caching, dynamic resource management, collaborative cost-sharing, and diligent monitoring can mitigate these expenses. By adopting these strategies, organizations can harness the power of multi-agent systems to tackle complex problems more effectively and efficiently, ensuring the sustainable deployment of these advanced technologies.
\ No newline at end of file
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 00000000..0db04b20
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,35 @@
+mkdocs
+mkdocs-material
+mkdocs-glightbox
+mkdocs-git-authors-plugin
+mkdocs-git-revision-date-plugin
+mkdocs-git-committers-plugin
+mkdocstrings
+mike
+mkdocs-jupyter
+mkdocs-git-committers-plugin-2
+mkdocs-git-revision-date-localized-plugin
+mkdocs-redirects
+mkdocs-material-extensions
+mkdocs-simple-hooks
+mkdocs-awesome-pages-plugin
+mkdocs-versioning
+mkdocs-mermaid2-plugin
+mkdocs-include-markdown-plugin
+mkdocs-enumerate-headings-plugin
+mkdocs-autolinks-plugin
+mkdocs-minify-html-plugin
+mkdocs-autolinks-plugin
+
+# Requirements for core
+jinja2~=3.0
+markdown~=3.2
+mkdocs-material-extensions~=1.3
+pygments~=2.18
+pymdown-extensions~=10.2
+
+# Requirements for plugins
+babel~=2.10
+colorama~=0.4
+paginate~=0.5
+regex>=2022.4
\ No newline at end of file
diff --git a/docs/swarms/agents/abstractagent.md b/docs/swarms/agents/abstractagent.md
new file mode 100644
index 00000000..8833c164
--- /dev/null
+++ b/docs/swarms/agents/abstractagent.md
@@ -0,0 +1,123 @@
+# swarms.agents
+
+## 1. Introduction
+
+`AbstractAgent` is an abstract class that serves as a foundation for implementing AI agents. An agent is an entity that can communicate with other agents and perform actions. The `AbstractAgent` class allows for customization in the implementation of the `receive` method, enabling different agents to define unique actions for receiving and processing messages.
+
+`AbstractAgent` provides capabilities for managing tools and accessing memory, and has methods for running, chatting, and stepping through communication with other agents.
+
+## 2. Class Definition
+
+```python
+class AbstractAgent:
+ """An abstract class for AI agent.
+
+ An agent can communicate with other agents and perform actions.
+ Different agents can differ in what actions they perform in the `receive` method.
+
+ Agents are full and completed:
+
+ Agents = llm + tools + memory
+ """
+
+ def __init__(self, name: str):
+ """
+ Args:
+ name (str): name of the agent.
+ """
+ self._name = name
+
+ @property
+ def name(self):
+ """Get the name of the agent."""
+ return self._name
+
+ def tools(self, tools):
+ """init tools"""
+
+ def memory(self, memory_store):
+ """init memory"""
+
+ def reset(self):
+ """(Abstract method) Reset the agent."""
+
+ def run(self, task: str):
+ """Run the agent once"""
+
+ def _arun(self, taks: str):
+ """Run Async run"""
+
+ def chat(self, messages: List[Dict]):
+ """Chat with the agent"""
+
+ def _achat(self, messages: List[Dict]):
+ """Asynchronous Chat"""
+
+ def step(self, message: str):
+ """Step through the agent"""
+
+ def _astep(self, message: str):
+ """Asynchronous step"""
+```
+
+## 3. Functionality and Usage
+
+The `AbstractAgent` class represents a generic AI agent and provides a set of methods to interact with it.
+
+To create an instance of an agent, the `name` of the agent should be specified.
+
+### Core Methods
+
+#### 1. `reset`
+
+The `reset` method allows the agent to be reset to its initial state.
+
+```python
+agent.reset()
+```
+
+#### 2. `run`
+
+The `run` method allows the agent to perform a specific task.
+
+```python
+agent.run("some_task")
+```
+
+#### 3. `chat`
+
+The `chat` method enables communication with the agent through a series of messages.
+
+```python
+messages = [{"id": 1, "text": "Hello, agent!"}, {"id": 2, "text": "How are you?"}]
+agent.chat(messages)
+```
+
+#### 4. `step`
+
+The `step` method allows the agent to process a single message.
+
+```python
+agent.step("Hello, agent!")
+```
+
+### Asynchronous Methods
+
+The class also provides asynchronous variants of the core methods.
+
+### Additional Functionality
+
+Additional functionalities for agent initialization and management of tools and memory are also provided.
+
+```python
+agent.tools(some_tools)
+agent.memory(some_memory_store)
+```
+
+## 4. Additional Information and Tips
+
+When implementing a new agent using the `AbstractAgent` class, ensure that the `receive` method is overridden to define the specific behavior of the agent upon receiving messages.
+
+## 5. References and Resources
+
+For further exploration and understanding of AI agents and agent communication, refer to the relevant literature and research on this topic.
diff --git a/docs/swarms/agents/message.md b/docs/swarms/agents/message.md
new file mode 100644
index 00000000..413ac016
--- /dev/null
+++ b/docs/swarms/agents/message.md
@@ -0,0 +1,112 @@
+# The Module/Class Name: Message
+
+In the swarms.agents framework, the class `Message` is used to represent a message with timestamp and optional metadata.
+
+## Overview and Introduction
+
+The `Message` class is a fundamental component that enables the representation of messages within an agent system. Messages contain essential information such as the sender, content, timestamp, and optional metadata.
+
+## Class Definition
+
+### Constructor: `__init__`
+
+The constructor of the `Message` class takes three parameters:
+
+1. `sender` (str): The sender of the message.
+2. `content` (str): The content of the message.
+3. `metadata` (dict or None): Optional metadata associated with the message.
+
+### Methods
+
+1. `__repr__(self)`: Returns a string representation of the `Message` object, including the timestamp, sender, and content.
+
+```python
+class Message:
+ """
+ Represents a message with timestamp and optional metadata.
+
+ Usage
+ --------------
+ mes = Message(
+ sender = "Kye",
+ content = "message"
+ )
+
+ print(mes)
+ """
+
+ def __init__(self, sender, content, metadata=None):
+ self.timestamp = datetime.datetime.now()
+ self.sender = sender
+ self.content = content
+ self.metadata = metadata or {}
+
+ def __repr__(self):
+ """
+ __repr__ represents the string representation of the Message object.
+
+ Returns:
+ (str) A string containing the timestamp, sender, and content of the message.
+ """
+ return f"{self.timestamp} - {self.sender}: {self.content}"
+```
+
+## Functionality and Usage
+
+The `Message` class represents a message in the agent system. Upon initialization, the `timestamp` is set to the current date and time, and the `metadata` is set to an empty dictionary if no metadata is provided.
+
+### Usage Example 1
+
+Creating a `Message` object and displaying its string representation.
+
+```python
+mes = Message(sender="Kye", content="Hello! How are you?")
+
+print(mes)
+```
+
+Output:
+```
+2023-09-20 13:45:00 - Kye: Hello! How are you?
+```
+
+### Usage Example 2
+
+Creating a `Message` object with metadata.
+
+```python
+metadata = {"priority": "high", "category": "urgent"}
+mes_with_metadata = Message(
+ sender="Alice", content="Important update", metadata=metadata
+)
+
+print(mes_with_metadata)
+```
+
+Output:
+```
+2023-09-20 13:46:00 - Alice: Important update
+```
+
+### Usage Example 3
+
+Creating a `Message` object without providing metadata.
+
+```python
+mes_no_metadata = Message(sender="Bob", content="Reminder: Meeting at 2PM")
+
+print(mes_no_metadata)
+```
+
+Output:
+```
+2023-09-20 13:47:00 - Bob: Reminder: Meeting at 2PM
+```
+
+## Additional Information and Tips
+
+When creating a new `Message` object, ensure that the required parameters `sender` and `content` are provided. The `timestamp` will automatically be assigned the current date and time. Optional `metadata` can be included to provide additional context or information associated with the message.
+
+## References and Resources
+
+For further information on the `Message` class and its usage, refer to the official swarms.agents documentation and relevant tutorials related to message handling and communication within the agent system.
diff --git a/docs/swarms/agents/third_party.md b/docs/swarms/agents/third_party.md
new file mode 100644
index 00000000..fc399cc2
--- /dev/null
+++ b/docs/swarms/agents/third_party.md
@@ -0,0 +1,617 @@
+# Swarms Framework: Integrating and Customizing Agent Libraries
+
+Agent-based systems have emerged as a powerful paradigm for solving complex problems and automating tasks.
+
+The swarms framework offers a flexible and extensible approach to working with various agent libraries, allowing developers to create custom agents and integrate them seamlessly into their projects.
+
+In this comprehensive guide, we'll explore the swarms framework, discuss agent handling, and demonstrate how to build custom agents using swarms. We'll also cover the integration of popular agent libraries such as Langchain, Griptape, CrewAI, and Autogen.
+
+## Table of Contents
+
+1. [Introduction to the Swarms Framework](#introduction-to-the-swarms-framework)
+2. [The Need for Wrappers](#the-need-for-wrappers)
+3. [Building Custom Agents with Swarms](#building-custom-agents-with-swarms)
+4. [Integrating Third-Party Agent Libraries](#integrating-third-party-agent-libraries)
+ - [Griptape Integration](#griptape-integration)
+ - [Langchain Integration](#langchain-integration)
+ - [CrewAI Integration](#crewai-integration)
+ - [Autogen Integration](#autogen-integration)
+5. [Advanced Agent Handling Techniques](#advanced-agent-handling-techniques)
+6. [Best Practices for Custom Agent Development](#best-practices-for-custom-agent-development)
+7. [Future Directions and Challenges](#future-directions-and-challenges)
+8. [Conclusion](#conclusion)
+
+## 1. Introduction to the Swarms Framework
+
+The swarms framework is a powerful and flexible system designed to facilitate the creation, management, and coordination of multiple AI agents. It provides a standardized interface for working with various agent types, allowing developers to leverage the strengths of different agent libraries while maintaining a consistent programming model.
+
+At its core, the swarms framework is built around the concept of a parent `Agent` class, which serves as a foundation for creating custom agents and integrating third-party agent libraries. This approach offers several benefits:
+
+1. **Consistency**: By wrapping different agent implementations with a common interface, developers can work with a unified API across various agent types.
+2. **Extensibility**: The framework makes it easy to add new agent types or customize existing ones without affecting the overall system architecture.
+3. **Interoperability**: Agents from different libraries can communicate and collaborate seamlessly within the swarms ecosystem.
+4. **Scalability**: The standardized approach allows for easier scaling of agent-based systems, from simple single-agent applications to complex multi-agent swarms.
+
+## 2. The Need for Wrappers
+
+As the field of AI and agent-based systems continues to grow, numerous libraries and frameworks have emerged, each with its own strengths and specialized features. While this diversity offers developers a wide range of tools to choose from, it also presents challenges in terms of integration and interoperability.
+
+This is where the concept of wrappers becomes crucial. By creating wrappers around different agent libraries, we can:
+
+1. **Unify interfaces**: Standardize the way we interact with agents, regardless of their underlying implementation.
+2. **Simplify integration**: Make it easier to incorporate new agent libraries into existing projects.
+3. **Enable composition**: Allow for the creation of complex agent systems that leverage the strengths of multiple libraries.
+4. **Facilitate maintenance**: Centralize the management of agent-related code and reduce the impact of library-specific changes.
+
+In the context of the swarms framework, wrappers take the form of custom classes that inherit from the parent `Agent` class. These wrapper classes encapsulate the functionality of specific agent libraries while exposing a consistent interface that aligns with the swarms framework.
+
+## 3. Building Custom Agents with Swarms
+
+To illustrate the process of building custom agents using the swarms framework, let's start with a basic example of creating a custom agent class:
+
+```python
+from swarms import Agent
+
+class MyCustomAgent(Agent):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ # Custom initialization logic
+
+ def custom_method(self, *args, **kwargs):
+ # Implement custom logic here
+ pass
+
+ def run(self, task, *args, **kwargs):
+ # Customize the run method
+ response = super().run(task, *args, **kwargs)
+ # Additional custom logic
+ return response
+```
+
+This example demonstrates the fundamental structure of a custom agent class within the swarms framework. Let's break down the key components:
+
+1. **Inheritance**: The class inherits from the `Agent` parent class, ensuring it adheres to the swarms framework's interface.
+
+2. **Initialization**: The `__init__` method calls the parent class's initializer and can include additional custom initialization logic.
+
+3. **Custom methods**: You can add any number of custom methods to extend the agent's functionality.
+
+4. **Run method**: The `run` method is a key component of the agent interface. By overriding this method, you can customize how the agent processes tasks while still leveraging the parent class's functionality.
+
+To create more sophisticated custom agents, you can expand on this basic structure by adding features such as:
+
+- **State management**: Implement methods to manage the agent's internal state.
+- **Communication protocols**: Define how the agent interacts with other agents in the swarm.
+- **Learning capabilities**: Incorporate machine learning models or adaptive behaviors.
+- **Specialized task handling**: Create methods for dealing with specific types of tasks or domains.
+
+By leveraging these custom agent classes, developers can create highly specialized and adaptive agents tailored to their specific use cases while still benefiting from the standardized interface provided by the swarms framework.
+
+## 4. Integrating Third-Party Agent Libraries
+
+One of the key strengths of the swarms framework is its ability to integrate with various third-party agent libraries. In this section, we'll explore how to create wrappers for popular agent libraries, including Griptape, Langchain, CrewAI, and Autogen.
+
+### Griptape Integration
+
+Griptape is a powerful library for building AI agents with a focus on composability and tool use. Let's create a wrapper for a Griptape agent:
+
+```python
+from typing import List, Optional
+
+from griptape.structures import Agent as GriptapeAgent
+from griptape.tools import FileManager, TaskMemoryClient, WebScraper
+
+from swarms import Agent
+
+
+class GriptapeAgentWrapper(Agent):
+ """
+ A wrapper class for the GriptapeAgent from the griptape library.
+ """
+
+ def __init__(self, name: str, tools: Optional[List] = None, *args, **kwargs):
+ """
+ Initialize the GriptapeAgentWrapper.
+
+ Parameters:
+ - name: The name of the agent.
+ - tools: A list of tools to be used by the agent. If not provided, default tools will be used.
+ - *args, **kwargs: Additional arguments to be passed to the parent class constructor.
+ """
+ super().__init__(*args, **kwargs)
+ self.name = name
+ self.tools = tools or [
+ WebScraper(off_prompt=True),
+ TaskMemoryClient(off_prompt=True),
+ FileManager()
+ ]
+ self.griptape_agent = GriptapeAgent(
+ input=f"I am {name}, an AI assistant. How can I help you?",
+ tools=self.tools
+ )
+
+ def run(self, task: str, *args, **kwargs) -> str:
+ """
+ Run a task using the GriptapeAgent.
+
+ Parameters:
+ - task: The task to be performed by the agent.
+
+ Returns:
+ - The response from the GriptapeAgent as a string.
+ """
+ response = self.griptape_agent.run(task, *args, **kwargs)
+ return str(response)
+
+ def add_tool(self, tool) -> None:
+ """
+ Add a tool to the agent.
+
+ Parameters:
+ - tool: The tool to be added.
+ """
+ self.tools.append(tool)
+ self.griptape_agent = GriptapeAgent(
+ input=f"I am {self.name}, an AI assistant. How can I help you?",
+ tools=self.tools
+ )
+
+# Usage example
+griptape_wrapper = GriptapeAgentWrapper("GriptapeAssistant")
+result = griptape_wrapper.run("Load https://example.com, summarize it, and store it in a file called example_summary.txt.")
+print(result)
+
+```
+
+This wrapper encapsulates the functionality of a Griptape agent while exposing it through the swarms framework's interface. It allows for easy customization of tools and provides a simple way to execute tasks using the Griptape agent.
+
+### Langchain Integration
+
+Langchain is a popular framework for developing applications powered by language models. Here's an example of how we can create a wrapper for a Langchain agent:
+
+```python
+from typing import List, Optional
+
+from langchain.agents import AgentExecutor, LLMSingleActionAgent, Tool
+from langchain.chains import LLMChain
+from langchain.llms import OpenAI
+from langchain.prompts import StringPromptTemplate
+from langchain.tools import DuckDuckGoSearchRun
+
+from swarms import Agent
+
+
+class LangchainAgentWrapper(Agent):
+ """
+ Initialize the LangchainAgentWrapper.
+
+ Args:
+ name (str): The name of the agent.
+ tools (List[Tool]): The list of tools available to the agent.
+ llm (Optional[OpenAI], optional): The OpenAI language model to use. Defaults to None.
+ """
+ def __init__(
+ self,
+ name: str,
+ tools: List[Tool],
+ llm: Optional[OpenAI] = None,
+ *args,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.name = name
+ self.tools = tools
+ self.llm = llm or OpenAI(temperature=0)
+
+ prompt = StringPromptTemplate.from_template(
+ "You are {name}, an AI assistant. Answer the following question: {question}"
+ )
+
+ llm_chain = LLMChain(llm=self.llm, prompt=prompt)
+ tool_names = [tool.name for tool in self.tools]
+
+ self.agent = LLMSingleActionAgent(
+ llm_chain=llm_chain,
+ output_parser=None,
+ stop=["\nObservation:"],
+ allowed_tools=tool_names,
+ )
+
+ self.agent_executor = AgentExecutor.from_agent_and_tools(
+ agent=self.agent, tools=self.tools, verbose=True
+ )
+
+ def run(self, task: str, *args, **kwargs):
+ """
+ Run the agent with the given task.
+
+ Args:
+ task (str): The task to be performed by the agent.
+
+ Returns:
+ Any: The result of the agent's execution.
+ """
+ try:
+ return self.agent_executor.run(task)
+ except Exception as e:
+ print(f"An error occurred: {e}")
+
+
+# Usage example
+
+search_tool = DuckDuckGoSearchRun()
+tools = [
+ Tool(
+ name="Search",
+ func=search_tool.run,
+ description="Useful for searching the internet",
+ )
+]
+
+langchain_wrapper = LangchainAgentWrapper("LangchainAssistant", tools)
+result = langchain_wrapper.run("What is the capital of France?")
+print(result)
+```
+
+This wrapper integrates a Langchain agent into the swarms framework, allowing for easy use of Langchain's powerful features such as tool use and multi-step reasoning.
+
+### CrewAI Integration
+
+CrewAI is a library focused on creating and managing teams of AI agents. Let's create a wrapper for a CrewAI agent:
+
+```python
+from swarms import Agent
+from crewai import Agent as CrewAIAgent
+from crewai import Task, Crew, Process
+
+class CrewAIAgentWrapper(Agent):
+ def __init__(self, name, role, goal, backstory, tools=None, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.name = name
+ self.crewai_agent = CrewAIAgent(
+ role=role,
+ goal=goal,
+ backstory=backstory,
+ verbose=True,
+ allow_delegation=False,
+ tools=tools or []
+ )
+
+ def run(self, task, *args, **kwargs):
+ crew_task = Task(
+ description=task,
+ agent=self.crewai_agent
+ )
+ crew = Crew(
+ agents=[self.crewai_agent],
+ tasks=[crew_task],
+ process=Process.sequential
+ )
+ result = crew.kickoff()
+ return result
+
+# Usage example
+from crewai_tools import SerperDevTool
+
+search_tool = SerperDevTool()
+
+crewai_wrapper = CrewAIAgentWrapper(
+ "ResearchAnalyst",
+ role='Senior Research Analyst',
+ goal='Uncover cutting-edge developments in AI and data science',
+ backstory="""You work at a leading tech think tank.
+ Your expertise lies in identifying emerging trends.
+ You have a knack for dissecting complex data and presenting actionable insights.""",
+ tools=[search_tool]
+)
+
+result = crewai_wrapper.run("Analyze the latest trends in quantum computing and summarize the key findings.")
+print(result)
+```
+
+This wrapper allows us to use CrewAI agents within the swarms framework, leveraging CrewAI's focus on role-based agents and collaborative task execution.
+
+### Autogen Integration
+
+Autogen is a framework for building conversational AI agents. Here's how we can create a wrapper for an Autogen agent:
+
+```python
+from swarms import Agent
+from autogen import ConversableAgent
+
+class AutogenAgentWrapper(Agent):
+ def __init__(self, name, llm_config, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.name = name
+ self.autogen_agent = ConversableAgent(
+ name=name,
+ llm_config=llm_config,
+ code_execution_config=False,
+ function_map=None,
+ human_input_mode="NEVER"
+ )
+
+ def run(self, task, *args, **kwargs):
+ messages = [{"content": task, "role": "user"}]
+ response = self.autogen_agent.generate_reply(messages)
+ return response
+
+# Usage example
+import os
+
+llm_config = {
+ "config_list": [{"model": "gpt-4", "api_key": os.environ.get("OPENAI_API_KEY")}]
+}
+
+autogen_wrapper = AutogenAgentWrapper("AutogenAssistant", llm_config)
+result = autogen_wrapper.run("Tell me a joke about programming.")
+print(result)
+```
+
+This wrapper integrates Autogen's ConversableAgent into the swarms framework, allowing for easy use of Autogen's conversational AI capabilities.
+
+By creating these wrappers, we can seamlessly integrate agents from various libraries into the swarms framework, allowing for a unified approach to agent management and task execution.
+
+## 5. Advanced Agent Handling Techniques
+
+As you build more complex systems using the swarms framework and integrated agent libraries, you'll need to employ advanced techniques for agent handling. Here are some strategies to consider:
+
+### 1. Dynamic Agent Creation
+
+Implement a factory pattern to create agents dynamically based on task requirements:
+
+```python
+class AgentFactory:
+ @staticmethod
+ def create_agent(agent_type, *args, **kwargs):
+ if agent_type == "griptape":
+ return GriptapeAgentWrapper(*args, **kwargs)
+ elif agent_type == "langchain":
+ return LangchainAgentWrapper(*args, **kwargs)
+ elif agent_type == "crewai":
+ return CrewAIAgentWrapper(*args, **kwargs)
+ elif agent_type == "autogen":
+ return AutogenAgentWrapper(*args, **kwargs)
+ else:
+ raise ValueError(f"Unknown agent type: {agent_type}")
+
+# Usage
+agent = AgentFactory.create_agent("griptape", "DynamicGriptapeAgent")
+```
+
+
+### 2. Agent Pooling
+
+Implement an agent pool to manage and reuse agents efficiently:
+
+```python
+from queue import Queue
+
+class AgentPool:
+ def __init__(self, pool_size=5):
+ self.pool = Queue(maxsize=pool_size)
+ self.pool_size = pool_size
+
+ def get_agent(self, agent_type, *args, **kwargs):
+ if not self.pool.empty():
+ return self.pool.get()
+ else:
+ return AgentFactory.create_agent(agent_type, *args, **kwargs)
+
+ def release_agent(self, agent):
+ if self.pool.qsize() < self.pool_size:
+ self.pool.put(agent)
+
+# Usage
+pool = AgentPool()
+agent = pool.get_agent("langchain", "PooledLangchainAgent")
+result = agent.run("Perform a task")
+pool.release_agent(agent)
+```
+
+### 3. Agent Composition
+
+Create composite agents that combine the capabilities of multiple agent types:
+
+```python
+class CompositeAgent(Agent):
+ def __init__(self, name, agents):
+ super().__init__()
+ self.name = name
+ self.agents = agents
+
+ def run(self, task):
+ results = []
+ for agent in self.agents:
+ results.append(agent.run(task))
+ return self.aggregate_results(results)
+
+ def aggregate_results(self, results):
+ # Implement your own logic to combine results
+ return "\n".join(results)
+
+# Usage
+griptape_agent = GriptapeAgentWrapper("GriptapeComponent")
+langchain_agent = LangchainAgentWrapper("LangchainComponent", [])
+composite_agent = CompositeAgent("CompositeAssistant", [griptape_agent, langchain_agent])
+result = composite_agent.run("Analyze the pros and cons of quantum computing")
+```
+
+### 4. Agent Specialization
+
+Create specialized agents for specific domains or tasks:
+
+```python
+class DataAnalysisAgent(Agent):
+ def __init__(self, name, analysis_tools):
+ super().__init__()
+ self.name = name
+ self.analysis_tools = analysis_tools
+
+ def run(self, data):
+ results = {}
+ for tool in self.analysis_tools:
+ results[tool.name] = tool.analyze(data)
+ return results
+
+# Usage
+import pandas as pd
+from sklearn.preprocessing import StandardScaler
+from sklearn.decomposition import PCA
+
+class AnalysisTool:
+ def __init__(self, name, func):
+ self.name = name
+ self.func = func
+
+ def analyze(self, data):
+ return self.func(data)
+
+tools = [
+ AnalysisTool("Descriptive Stats", lambda data: data.describe()),
+ AnalysisTool("Correlation", lambda data: data.corr()),
+ AnalysisTool("PCA", lambda data: PCA().fit_transform(StandardScaler().fit_transform(data)))
+]
+
+data_agent = DataAnalysisAgent("DataAnalyst", tools)
+df = pd.read_csv("sample_data.csv")
+analysis_results = data_agent.run(df)
+```
+
+### 5. Agent Monitoring and Logging
+
+Implement a monitoring system to track agent performance and log their activities:
+
+```python
+import logging
+from functools import wraps
+
+def log_agent_activity(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ logging.info(f"Agent {self.name} started task: {args[0]}")
+ result = func(self, *args, **kwargs)
+ logging.info(f"Agent {self.name} completed task. Result length: {len(str(result))}")
+ return result
+ return wrapper
+
+class MonitoredAgent(Agent):
+ def __init__(self, name, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.name = name
+
+ @log_agent_activity
+ def run(self, task, *args, **kwargs):
+ return super().run(task, *args, **kwargs)
+
+# Usage
+logging.basicConfig(level=logging.INFO)
+monitored_agent = MonitoredAgent("MonitoredGriptapeAgent")
+result = monitored_agent.run("Summarize the latest AI research papers")
+```
+
+## 6. Best Practices for Custom Agent Development
+
+When developing custom agents using the swarms framework, consider the following best practices:
+
+1. **Modular Design**: Design your agents with modularity in mind. Break down complex functionality into smaller, reusable components.
+
+2. **Consistent Interfaces**: Maintain consistent interfaces across your custom agents to ensure interoperability within the swarms framework.
+
+3. **Error Handling**: Implement robust error handling and graceful degradation in your agents to ensure system stability.
+
+4. **Performance Optimization**: Optimize your agents for performance, especially when dealing with resource-intensive tasks or large-scale deployments.
+
+5. **Testing and Validation**: Develop comprehensive test suites for your custom agents to ensure their reliability and correctness.
+
+6. **Documentation**: Provide clear and detailed documentation for your custom agents, including their capabilities, limitations, and usage examples.
+
+7. **Versioning**: Implement proper versioning for your custom agents to manage updates and maintain backwards compatibility.
+
+8. **Security Considerations**: Implement security best practices, especially when dealing with sensitive data or integrating with external services.
+
+Here's an example that incorporates some of these best practices:
+
+```python
+import logging
+from typing import Dict, Any
+from swarms import Agent
+
+class SecureCustomAgent(Agent):
+ def __init__(self, name: str, api_key: str, version: str = "1.0.0", *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.name = name
+ self._api_key = api_key # Store sensitive data securely
+ self.version = version
+ self.logger = logging.getLogger(f"{self.__class__.__name__}.{self.name}")
+
+ def run(self, task: str, *args, **kwargs) -> Dict[str, Any]:
+ try:
+ self.logger.info(f"Agent {self.name} (v{self.version}) starting task: {task}")
+ result = self._process_task(task)
+ self.logger.info(f"Agent {self.name} completed task successfully")
+ return {"status": "success", "result": result}
+ except Exception as e:
+ self.logger.error(f"Error in agent {self.name}: {str(e)}")
+ return {"status": "error", "message": str(e)}
+
+ def _process_task(self, task: str) -> str:
+ # Implement the core logic of your agent here
+ # This is a placeholder implementation
+ return f"Processed task: {task}"
+
+ @property
+ def api_key(self) -> str:
+ # Provide a secure way to access the API key
+ return self._api_key
+
+ def __repr__(self) -> str:
+ return f"<{self.__class__.__name__} name='{self.name}' version='{self.version}'>"
+
+# Usage
+logging.basicConfig(level=logging.INFO)
+secure_agent = SecureCustomAgent("SecureAgent", api_key="your-api-key-here")
+result = secure_agent.run("Perform a secure operation")
+print(result)
+```
+
+This example demonstrates several best practices:
+- Modular design with separate methods for initialization and task processing
+- Consistent interface adhering to the swarms framework
+- Error handling and logging
+- Secure storage of sensitive data (API key)
+- Version tracking
+- Type hinting for improved code readability and maintainability
+- Informative string representation of the agent
+
+## 7. Future Directions and Challenges
+
+As the field of AI and agent-based systems continues to evolve, the swarms framework and its ecosystem of integrated agent libraries will face new opportunities and challenges. Some potential future directions and areas of focus include:
+
+1. **Enhanced Interoperability**: Developing more sophisticated protocols for agent communication and collaboration across different libraries and frameworks.
+
+2. **Scalability**: Improving the framework's ability to handle large-scale swarms of agents, potentially leveraging distributed computing techniques.
+
+3. **Adaptive Learning**: Incorporating more advanced machine learning techniques to allow agents to adapt and improve their performance over time.
+
+4. **Ethical AI**: Integrating ethical considerations and safeguards into the agent development process to ensure responsible AI deployment.
+
+5. **Human-AI Collaboration**: Exploring new paradigms for human-AI interaction and collaboration within the swarms framework.
+
+6. **Domain-Specific Optimizations**: Developing specialized agent types and tools for specific industries or problem domains.
+
+7. **Explainability and Transparency**: Improving the ability to understand and explain agent decision-making processes.
+
+8. **Security and Privacy**: Enhancing the framework's security features to protect against potential vulnerabilities and ensure data privacy.
+
+As these areas develop, developers working with the swarms framework will need to stay informed about new advancements and be prepared to adapt their agent implementations accordingly.
+
+## 8. Conclusion
+
+The swarms framework provides a powerful and flexible foundation for building custom agents and integrating various agent libraries. By leveraging the techniques and best practices discussed in this guide, developers can create sophisticated, efficient, and scalable agent-based systems.
+
+The ability to seamlessly integrate agents from libraries like Griptape, Langchain, CrewAI, and Autogen opens up a world of possibilities for creating diverse and specialized AI applications. Whether you're building a complex multi-agent system for data analysis, a conversational AI platform, or a collaborative problem-solving environment, the swarms framework offers the tools and flexibility to bring your vision to life.
+
+As you embark on your journey with the swarms framework, remember that the field of AI and agent-based systems is rapidly evolving. Stay curious, keep experimenting, and don't hesitate to push the boundaries of what's possible with custom agents and integrated libraries.
+
+By embracing the power of the swarms framework and the ecosystem of agent libraries it supports, you're well-positioned to create the next generation of intelligent, adaptive, and collaborative AI systems. Happy agent building!
\ No newline at end of file
diff --git a/docs/swarms/agents/tool_agent.md b/docs/swarms/agents/tool_agent.md
new file mode 100644
index 00000000..2bedb58d
--- /dev/null
+++ b/docs/swarms/agents/tool_agent.md
@@ -0,0 +1,304 @@
+# ToolAgent Documentation
+
+The `ToolAgent` class is a specialized agent that facilitates the execution of specific tasks using a model and tokenizer. It is part of the `swarms` module and inherits from the `Agent` class. This agent is designed to generate functions based on a given JSON schema and task, making it highly adaptable for various use cases, including natural language processing and data generation.
+
+The `ToolAgent` class plays a crucial role in leveraging pre-trained models and tokenizers to automate tasks that require the interpretation and generation of structured data. By providing a flexible interface and robust error handling, it ensures smooth integration and efficient task execution.
+
+### Parameters
+
+| Parameter | Type | Description |
+|--------------------|-----------------------------------|---------------------------------------------------------------------------------|
+| `name` | `str` | The name of the tool agent. Default is "Function Calling Agent". |
+| `description` | `str` | A description of the tool agent. Default is "Generates a function based on the input json schema and the task". |
+| `model` | `Any` | The model used by the tool agent. |
+| `tokenizer` | `Any` | The tokenizer used by the tool agent. |
+| `json_schema` | `Any` | The JSON schema used by the tool agent. |
+| `max_number_tokens`| `int` | The maximum number of tokens for generation. Default is 500. |
+| `parsing_function` | `Optional[Callable]` | An optional parsing function to process the output of the tool agent. |
+| `llm` | `Any` | An optional large language model to be used by the tool agent. |
+| `*args` | Variable length argument list | Additional positional arguments. |
+| `**kwargs` | Arbitrary keyword arguments | Additional keyword arguments. |
+
+### Attributes
+
+| Attribute | Type | Description |
+|--------------------|-------|----------------------------------------------|
+| `name` | `str` | The name of the tool agent. |
+| `description` | `str` | A description of the tool agent. |
+| `model` | `Any` | The model used by the tool agent. |
+| `tokenizer` | `Any` | The tokenizer used by the tool agent. |
+| `json_schema` | `Any` | The JSON schema used by the tool agent. |
+
+### Methods
+
+#### `run`
+
+```python
+def run(self, task: str, *args, **kwargs) -> Any:
+```
+
+**Parameters:**
+
+| Parameter | Type | Description |
+|------------|---------------------------|------------------------------------------------------------------|
+| `task` | `str` | The task to be performed by the tool agent. |
+| `*args` | Variable length argument list | Additional positional arguments. |
+| `**kwargs` | Arbitrary keyword arguments | Additional keyword arguments. |
+
+**Returns:**
+
+- The output of the tool agent.
+
+**Raises:**
+
+- `Exception`: If an error occurs during the execution of the tool agent.
+
+## Functionality and Usage
+
+The `ToolAgent` class provides a structured way to perform tasks using a model and tokenizer. It initializes with essential parameters and attributes, and the `run` method facilitates the execution of the specified task.
+
+### Initialization
+
+The initialization of a `ToolAgent` involves specifying its name, description, model, tokenizer, JSON schema, maximum number of tokens, optional parsing function, and optional large language model.
+
+```python
+agent = ToolAgent(
+ name="My Tool Agent",
+ description="A tool agent for specific tasks",
+ model=model,
+ tokenizer=tokenizer,
+ json_schema=json_schema,
+ max_number_tokens=1000,
+ parsing_function=my_parsing_function,
+ llm=my_llm
+)
+```
+
+### Running a Task
+
+To execute a task using the `ToolAgent`, the `run` method is called with the task description and any additional arguments or keyword arguments.
+
+```python
+result = agent.run("Generate a person's information based on the given schema.")
+print(result)
+```
+
+### Detailed Examples
+
+#### Example 1: Basic Usage
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer
+from swarms import ToolAgent
+
+model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b")
+tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
+
+json_schema = {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "age": {"type": "number"},
+ "is_student": {"type": "boolean"},
+ "courses": {
+ "type": "array",
+ "items": {"type": "string"}
+ }
+ }
+}
+
+task = "Generate a person's information based on the following schema:"
+agent = ToolAgent(model=model, tokenizer=tokenizer, json_schema=json_schema)
+generated_data = agent.run(task)
+
+print(generated_data)
+```
+
+#### Example 2: Using a Parsing Function
+
+```python
+def parse_output(output):
+ # Custom parsing logic
+ return output
+
+agent = ToolAgent(
+ name="Parsed Tool Agent",
+ description="A tool agent with a parsing function",
+ model=model,
+ tokenizer=tokenizer,
+ json_schema=json_schema,
+ parsing_function=parse_output
+)
+
+task = "Generate a person's information with custom parsing:"
+parsed_data = agent.run(task)
+
+print(parsed_data)
+```
+
+#### Example 3: Specifying Maximum Number of Tokens
+
+```python
+agent = ToolAgent(
+ name="Token Limited Tool Agent",
+ description="A tool agent with a token limit",
+ model=model,
+ tokenizer=tokenizer,
+ json_schema=json_schema,
+ max_number_tokens=200
+)
+
+task = "Generate a concise person's information:"
+limited_data = agent.run(task)
+
+print(limited_data)
+```
+
+
+## Full Usage
+```python
+
+from pydantic import BaseModel, Field
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+from swarms import ToolAgent
+from swarms.tools.json_utils import base_model_to_json
+
+# Model name
+model_name = "CohereForAI/c4ai-command-r-v01-4bit"
+
+# Load the pre-trained model and tokenizer
+model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ device_map="auto",
+)
+
+# Load the pre-trained model and tokenizer
+tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+
+# Initialize the schema for the person's information
+class APIExampleRequestSchema(BaseModel):
+ endpoint: str = Field(
+ ..., description="The API endpoint for the example request"
+ )
+ method: str = Field(
+ ..., description="The HTTP method for the example request"
+ )
+ headers: dict = Field(
+ ..., description="The headers for the example request"
+ )
+ body: dict = Field(..., description="The body of the example request")
+ response: dict = Field(
+ ...,
+ description="The expected response of the example request",
+ )
+
+
+# Convert the schema to a JSON string
+api_example_schema = base_model_to_json(APIExampleRequestSchema)
+# Convert the schema to a JSON string
+
+# Define the task to generate a person's information
+task = "Generate an example API request using this code:\n"
+
+# Create an instance of the ToolAgent class
+agent = ToolAgent(
+ name="Command R Tool Agent",
+ description=(
+ "An agent that generates an API request using the Command R"
+ " model."
+ ),
+ model=model,
+ tokenizer=tokenizer,
+ json_schema=api_example_schema,
+)
+
+# Run the agent to generate the person's information
+generated_data = agent.run(task)
+
+# Print the generated data
+print(f"Generated data: {generated_data}")
+
+
+
+```
+
+
+## Jamba ++ ToolAgent
+```python
+from pydantic import BaseModel, Field
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+from swarms import ToolAgent
+from swarms.tools.json_utils import base_model_to_json
+
+# Model name
+model_name = "ai21labs/Jamba-v0.1"
+
+# Load the pre-trained model and tokenizer
+model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ device_map="auto",
+)
+
+# Load the pre-trained model and tokenizer
+tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+
+# Initialize the schema for the person's information
+class APIExampleRequestSchema(BaseModel):
+ endpoint: str = Field(
+ ..., description="The API endpoint for the example request"
+ )
+ method: str = Field(
+ ..., description="The HTTP method for the example request"
+ )
+ headers: dict = Field(
+ ..., description="The headers for the example request"
+ )
+ body: dict = Field(..., description="The body of the example request")
+ response: dict = Field(
+ ...,
+ description="The expected response of the example request",
+ )
+
+
+# Convert the schema to a JSON string
+api_example_schema = base_model_to_json(APIExampleRequestSchema)
+# Convert the schema to a JSON string
+
+# Define the task to generate a person's information
+task = "Generate an example API request using this code:\n"
+
+# Create an instance of the ToolAgent class
+agent = ToolAgent(
+ name="Command R Tool Agent",
+ description=(
+ "An agent that generates an API request using the Command R"
+ " model."
+ ),
+ model=model,
+ tokenizer=tokenizer,
+ json_schema=api_example_schema,
+)
+
+# Run the agent to generate the person's information
+generated_data = agent(task)
+
+# Print the generated data
+print(f"Generated data: {generated_data}")
+```
+
+## Additional Information and Tips
+
+- Ensure that either the `model` or `llm` parameter is provided during initialization. If neither is provided, the `ToolAgent` will raise an exception.
+- The `parsing_function` parameter is optional but can be very useful for post-processing the output of the tool agent.
+- Adjust the `max_number_tokens` parameter to control the length of the generated output, depending on the requirements of the task.
+
+## References and Resources
+
+- [Transformers Documentation](https://huggingface.co/transformers/)
+- [Loguru Logger](https://loguru.readthedocs.io/en/stable/)
+
+This documentation provides a comprehensive guide to the `ToolAgent` class, including its initialization, usage, and practical examples. By following the detailed instructions and examples, developers can effectively utilize the `ToolAgent` for various tasks involving model and tokenizer-based operations.
\ No newline at end of file
diff --git a/docs/swarms/artifacts/artifact.md b/docs/swarms/artifacts/artifact.md
new file mode 100644
index 00000000..f9551b2f
--- /dev/null
+++ b/docs/swarms/artifacts/artifact.md
@@ -0,0 +1,243 @@
+# `Artifact`
+
+The `Artifact` class represents a file artifact, encapsulating the file's path, type, contents, versions, and edit count. This class provides a comprehensive way to manage file versions, edit contents, and handle various file-related operations such as saving, loading, and exporting to JSON.
+
+The `Artifact` class is particularly useful in contexts where file version control and content management are essential. By keeping track of the number of edits and maintaining a version history, it allows for robust file handling and auditability.
+
+## Class Definition
+
+### Artifact
+
+
+| Attribute | Type | Default Value | Description |
+|-------------|---------------------|------------------|--------------------------------------------------|
+| `file_path` | `str` | N/A | The path to the file. |
+| `file_type` | `str` | N/A | The type of the file. |
+| `contents` | `str` | `""` | The contents of the file. |
+| `versions` | `List[FileVersion]` | `[]` | The list of file versions. |
+| `edit_count`| `int` | `0` | The number of times the file has been edited. |
+
+### Parameters and Validation
+
+- `file_path`: A string representing the file path.
+- `file_type`: A string representing the file type. This attribute is validated to ensure it matches supported file types based on the file extension if not provided.
+- `contents`: A string representing the contents of the file. Defaults to an empty string.
+- `versions`: A list of `FileVersion` instances representing the version history of the file. Defaults to an empty list.
+- `edit_count`: An integer representing the number of edits made to the file. Defaults to 0.
+
+### Methods
+
+The `Artifact` class includes various methods for creating, editing, saving, loading, and exporting file artifacts.
+
+#### `create`
+
+| Parameter | Type | Description |
+|--------------------|--------|----------------------------------------|
+| `initial_content` | `str` | The initial content of the file. |
+
+**Usage Example:**
+
+```python
+artifact = Artifact(file_path="example.txt", file_type="txt")
+artifact.create(initial_content="Initial file content")
+```
+
+#### `edit`
+
+
+| Parameter | Type | Description |
+|---------------|--------|----------------------------------------|
+| `new_content` | `str` | The new content of the file. |
+
+**Usage Example:**
+
+```python
+artifact.edit(new_content="Updated file content")
+```
+
+#### `save`
+
+**Usage Example:**
+
+```python
+artifact.save()
+```
+
+#### `load`
+
+**Usage Example:**
+
+```python
+artifact.load()
+```
+
+#### `get_version`
+
+
+| Parameter | Type | Description |
+|-------------------|-------|-----------------------------------------|
+| `version_number` | `int` | The version number to retrieve. |
+
+**Usage Example:**
+
+```python
+version = artifact.get_version(version_number=1)
+```
+
+#### `get_contents`
+
+**Usage Example:**
+
+```python
+current_contents = artifact.get_contents()
+```
+
+#### `get_version_history`
+
+
+**Usage Example:**
+
+```python
+version_history = artifact.get_version_history()
+```
+
+#### `export_to_json`
+
+
+| Parameter | Type | Description |
+|-------------|-------|----------------------------------------------|
+| `file_path` | `str` | The path to the JSON file to save the artifact.|
+
+**Usage Example:**
+
+```python
+artifact.export_to_json(file_path="artifact.json")
+```
+
+#### `import_from_json`
+
+
+| Parameter | Type | Description |
+|-------------|-------|--------------------------------------------------|
+| `file_path` | `str` | The path to the JSON file to import the artifact from.|
+
+**Usage Example:**
+
+```python
+imported_artifact = Artifact.import_from_json(file_path="artifact.json")
+```
+
+#### `get_metrics`
+
+**Usage Example:**
+
+```python
+metrics = artifact.get_metrics()
+```
+
+#### `to_dict`
+
+**Usage Example:**
+
+```python
+artifact_dict = artifact.to_dict()
+```
+
+#### `from_dict`
+
+| Parameter | Type | Description |
+|-----------|------------------|--------------------------------------------------|
+| `data` | `Dict[str, Any]` | The dictionary representation of the artifact. |
+
+**Usage Example:**
+
+```python
+artifact_data = {
+ "file_path": "example.txt",
+ "file_type": "txt",
+ "contents": "File content",
+ "versions": [],
+ "edit_count": 0
+}
+artifact = Artifact.from_dict(artifact_data)
+```
+
+## Additional Information and Tips
+
+- The `Artifact` class uses the `pydantic` library to handle data validation and serialization.
+- When editing the artifact, ensure that the `file_path` is set correctly to avoid file operation errors.
+- Use the `get_version` and `get_version_history` methods to maintain a clear audit trail of changes to the file.
+- The `export_to_json` and `import_from_json` methods are useful for backing up and restoring the state of an artifact.
+
+## References and Resources
+
+- [Pydantic Documentation](https://pydantic-docs.helpmanual.io/)
+- [Python os.path module](https://docs.python.org/3/library/os.path.html)
+- [JSON Documentation](https://docs.python.org/3/library/json.html)
+
+## Examples of Usage
+
+### Example 1: Creating and Editing an Artifact
+
+```python
+from datetime import datetime
+from pydantic import BaseModel, Field, validator
+from typing import List, Dict, Any, Union
+import os
+import json
+
+# Define FileVersion class
+class FileVersion(BaseModel):
+ version_number: int
+ content: str
+ timestamp: datetime
+
+# Artifact class definition goes here
+
+# Create an artifact
+artifact = Artifact(file_path="example.txt", file_type="txt")
+artifact.create(initial_content="Initial file content")
+
+# Edit the artifact
+artifact.edit(new_content="Updated file content")
+
+# Save the artifact to a file
+artifact.save()
+
+# Load the artifact from the file
+artifact.load()
+
+# Print the current contents of the artifact
+print(artifact.get_contents())
+
+# Print the version history
+print(artifact.get_version_history())
+```
+
+### Example 2: Exporting and Importing an Artifact
+
+```python
+# Export the artifact to a JSON file
+artifact.export_to_json(file_path="artifact.json")
+
+# Import
+
+ the artifact from a JSON file
+imported_artifact = Artifact.import_from_json(file_path="artifact.json")
+
+# Print the metrics of the imported artifact
+print(imported_artifact.get_metrics())
+```
+
+### Example 3: Converting an Artifact to and from a Dictionary
+
+```python
+# Convert the artifact to a dictionary
+artifact_dict = artifact.to_dict()
+
+# Create a new artifact from the dictionary
+new_artifact = Artifact.from_dict(artifact_dict)
+
+# Print the metrics of the new artifact
+print(new_artifact.get_metrics())
+```
\ No newline at end of file
diff --git a/docs/swarms/concept/swarm_architectures.md b/docs/swarms/concept/swarm_architectures.md
new file mode 100644
index 00000000..293e58cb
--- /dev/null
+++ b/docs/swarms/concept/swarm_architectures.md
@@ -0,0 +1,205 @@
+# Swarm Architectures
+
+### Hierarchical Swarm
+
+**Overview:**
+A Hierarchical Swarm architecture organizes the agents in a tree-like structure. Higher-level agents delegate tasks to lower-level agents, which can further divide tasks among themselves. This structure allows for efficient task distribution and scalability.
+
+**Use-Cases:**
+
+- Complex decision-making processes where tasks can be broken down into subtasks.
+
+- Multi-stage workflows such as data processing pipelines or hierarchical reinforcement learning.
+
+```mermaid
+graph TD
+ A[Root Agent] --> B1[Sub-Agent 1]
+ A --> B2[Sub-Agent 2]
+ B1 --> C1[Sub-Agent 1.1]
+ B1 --> C2[Sub-Agent 1.2]
+ B2 --> C3[Sub-Agent 2.1]
+ B2 --> C4[Sub-Agent 2.2]
+```
+
+---
+
+### Parallel Swarm
+
+**Overview:**
+In a Parallel Swarm architecture, multiple agents operate independently and simultaneously on different tasks. Each agent works on its own task without dependencies on the others.
+
+**Use-Cases:**
+- Tasks that can be processed independently, such as parallel data analysis.
+- Large-scale simulations where multiple scenarios are run in parallel.
+
+```mermaid
+graph LR
+ A[Coordinator Agent] --> B1[Sub-Agent 1]
+ A --> B2[Sub-Agent 2]
+ A --> B3[Sub-Agent 3]
+ A --> B4[Sub-Agent 4]
+```
+
+---
+
+### Sequential Swarm
+
+**Overview:**
+A Sequential Swarm architecture processes tasks in a linear sequence. Each agent completes its task before passing the result to the next agent in the chain. This architecture ensures orderly processing and is useful when tasks have dependencies.
+
+**Use-Cases:**
+- Workflows where each step depends on the previous one, such as assembly lines or sequential data processing.
+
+- Scenarios requiring strict order of operations.
+
+```mermaid
+graph TD
+ A[First Agent] --> B[Second Agent]
+ B --> C[Third Agent]
+ C --> D[Fourth Agent]
+```
+
+---
+
+### Round Robin Swarm
+
+**Overview:**
+In a Round Robin Swarm architecture, tasks are distributed cyclically among a set of agents. Each agent takes turns handling tasks in a rotating order, ensuring even distribution of workload.
+
+**Use-Cases:**
+- Load balancing in distributed systems.
+
+- Scenarios requiring fair distribution of tasks to avoid overloading any single agent.
+
+```mermaid
+graph TD
+ A[Coordinator Agent] --> B1[Sub-Agent 1]
+ A --> B2[Sub-Agent 2]
+ A --> B3[Sub-Agent 3]
+ A --> B4[Sub-Agent 4]
+ B1 --> A
+ B2 --> A
+ B3 --> A
+ B4 --> A
+```
+
+---
+
+### Federated Swarm
+
+**Overview:**
+A Federated Swarm architecture involves multiple independent swarms collaborating to complete a task. Each swarm operates autonomously but can share information and results with other swarms.
+
+**Use-Cases:**
+- Distributed learning systems where data is processed across multiple nodes.
+
+- Scenarios requiring collaboration between different teams or departments.
+
+```mermaid
+graph TD
+ A[Central Coordinator]
+ subgraph Swarm1
+ B1[Agent 1.1] --> B2[Agent 1.2]
+ B2 --> B3[Agent 1.3]
+ end
+ subgraph Swarm2
+ C1[Agent 2.1] --> C2[Agent 2.2]
+ C2 --> C3[Agent 2.3]
+ end
+ subgraph Swarm3
+ D1[Agent 3.1] --> D2[Agent 3.2]
+ D2 --> D3[Agent 3.3]
+ end
+ B1 --> A
+ C1 --> A
+ D1 --> A
+```
+
+---
+
+### Star Swarm
+
+**Overview:**
+A Star Swarm architecture features a central agent that coordinates the activities of several peripheral agents. The central agent assigns tasks to the peripheral agents and aggregates their results.
+
+**Use-Cases:**
+- Centralized decision-making processes.
+
+- Scenarios requiring a central authority to coordinate multiple workers.
+
+```mermaid
+graph TD
+ A[Central Agent] --> B1[Peripheral Agent 1]
+ A --> B2[Peripheral Agent 2]
+ A --> B3[Peripheral Agent 3]
+ A --> B4[Peripheral Agent 4]
+```
+
+---
+
+### Mesh Swarm
+
+**Overview:**
+A Mesh Swarm architecture allows for a fully connected network of agents where each agent can communicate with any other agent. This setup provides high flexibility and redundancy.
+
+**Use-Cases:**
+- Complex systems requiring high fault tolerance and redundancy.
+
+- Scenarios involving dynamic and frequent communication between agents.
+
+```mermaid
+graph TD
+ A1[Agent 1] --> A2[Agent 2]
+ A1 --> A3[Agent 3]
+ A1 --> A4[Agent 4]
+ A2 --> A3
+ A2 --> A4
+ A3 --> A4
+```
+
+---
+
+### Cascade Swarm
+
+**Overview:**
+A Cascade Swarm architecture involves a chain of agents where each agent triggers the next one in a cascade effect. This is useful for scenarios where tasks need to be processed in stages, and each stage initiates the next.
+
+**Use-Cases:**
+- Multi-stage processing tasks such as data transformation pipelines.
+
+- Event-driven architectures where one event triggers subsequent actions.
+
+```mermaid
+graph TD
+ A[Trigger Agent] --> B[Agent 1]
+ B --> C[Agent 2]
+ C --> D[Agent 3]
+ D --> E[Agent 4]
+```
+
+---
+
+### Hybrid Swarm
+
+**Overview:**
+A Hybrid Swarm architecture combines elements of various architectures to suit specific needs. It might integrate hierarchical and parallel components, or mix sequential and round robin patterns.
+
+**Use-Cases:**
+- Complex workflows requiring a mix of different processing strategies.
+
+- Custom scenarios tailored to specific operational requirements.
+
+```mermaid
+graph TD
+ A[Root Agent] --> B1[Sub-Agent 1]
+ A --> B2[Sub-Agent 2]
+ B1 --> C1[Parallel Agent 1]
+ B1 --> C2[Parallel Agent 2]
+ B2 --> C3[Sequential Agent 1]
+ C3 --> C4[Sequential Agent 2]
+ C3 --> C5[Sequential Agent 3]
+```
+
+---
+
+These swarm architectures provide different models for organizing and orchestrating large language models (LLMs) to perform various tasks efficiently. Depending on the specific requirements of your project, you can choose the appropriate architecture or even combine elements from multiple architectures to create a hybrid solution.
\ No newline at end of file
diff --git a/docs/swarms/ecosystem.md b/docs/swarms/ecosystem.md
new file mode 100644
index 00000000..4667a125
--- /dev/null
+++ b/docs/swarms/ecosystem.md
@@ -0,0 +1,75 @@
+
+# Swarm Ecosystem
+
+Welcome to the Swarm Ecosystem, a comprehensive suite of tools and frameworks designed to empower developers to orhestrate swarms of autonomous agents for a variety of applications. Dive into our ecosystem below:
+
+[Full Github Link](https://github.com/kyegomez/swarm-ecosystem)
+
+## Getting Started
+
+| Project | Description | Link |
+| ------- | ----------- | ---- |
+| **Swarms Framework** | A Python-based framework that enables the creation, deployment, and scaling of reliable swarms of autonomous agents aimed at automating complex workflows. | [Swarms Framework](https://github.com/kyegomez/swarms) |
+| **Swarms Cloud** | A cloud-based service offering Swarms-as-a-Service with guaranteed 100% uptime, cutting-edge performance, and enterprise-grade reliability for seamless scaling and management of swarms. | [Swarms Cloud](https://github.com/kyegomez/swarms-cloud) |
+| **Swarms Core** | Provides backend utilities focusing on concurrency, multi-threading, and advanced execution strategies, developed in Rust for maximum efficiency and performance. | [Swarms Core](https://github.com/kyegomez/swarms-core) |
+| **Swarm Foundation Models** | A dedicated repository for the creation, optimization, and training of groundbreaking swarming models. Features innovative models like PSO with transformers, ant colony optimizations, and more, aiming to surpass traditional architectures like Transformers and SSMs. Open for community contributions and ideas. | [Swarm Foundation Models](https://github.com/kyegomez/swarms-pytorch) |
+| **Swarm Platform** | The Swarms dashboard Platform | [Swarm Platform](https://github.com/kyegomez/swarms-platform) |
+| **Swarms JS** | Swarms Framework in JS. Orchestrate any agents and enable multi-agent collaboration between various agents! | [Swarm JS](https://github.com/kyegomez/swarms-js) |
+| **Swarms Memory** | Easy to use, reliable, and bleeding-edge RAG systems.! | [Swarm JS](https://github.com/kyegomez/swarms-memory) |
+| **Swarms Evals** | Evaluating Swarms! | [Swarm JS](https://github.com/kyegomez/swarms-evals) |
+| **Swarms Zero** | RPC Enterprise-Grade Automation Framework | [Swarm Zero]([https://github.com/kyegomez/swarms-evals](https://github.com/kyegomez/Zero)) |
+
+----
+
+## π«Ά Contributions:
+
+The easiest way to contribute is to pick any issue with the `good first issue` tag πͺ. Read the Contributing guidelines [here](/CONTRIBUTING.md). Bug Report? [File here](https://github.com/swarms/gateway/issues) | Feature Request? [File here](https://github.com/swarms/gateway/issues)
+
+Swarms is an open-source project, and contributions are VERY welcome. If you want to contribute, you can create new features, fix bugs, or improve the infrastructure. Please refer to the [CONTRIBUTING.md](https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md) and our [contributing board](https://github.com/users/kyegomez/projects/1) to participate in Roadmap discussions!
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+----
+
+## Community
+
+Join our growing community around the world, for real-time support, ideas, and discussions on Swarms π
+
+- View our official [Blog](https://swarms.apac.ai)
+- Chat live with us on [Discord](https://discord.gg/kS3rwKs3ZC)
+- Follow us on [Twitter](https://twitter.com/kyegomez)
+- Connect with us on [LinkedIn](https://www.linkedin.com/company/the-swarm-corporation)
+- Visit us on [YouTube](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ)
+- [Join the Swarms community on Discord!](https://discord.gg/AJazBmhKnr)
+- Join our Swarms Community Gathering every Thursday at 1pm NYC Time to unlock the potential of autonomous agents in automating your daily tasks [Sign up here](https://lu.ma/5p2jnc2v)
+
+---
+
+## Discovery Call
+Book a discovery call to learn how Swarms can lower your operating costs by 40% with swarms of autonomous agents in lightspeed. [Click here to book a time that works for you!](https://calendly.com/swarm-corp/30min?month=2023-11)
+
+
+
+## Accelerate Backlog
+Help us accelerate our backlog by supporting us financially! Note, we're an open source corporation and so all the revenue we generate is through donations at the moment ;)
+
+
+
+---
diff --git a/docs/swarms/framework/agents_explained.md b/docs/swarms/framework/agents_explained.md
new file mode 100644
index 00000000..90fdc9e1
--- /dev/null
+++ b/docs/swarms/framework/agents_explained.md
@@ -0,0 +1,82 @@
+# An Analysis of Agents
+
+In the Swarms framework, agents are designed to perform tasks autonomously by leveraging large language models (LLMs), various tools, and long-term memory systems. This guide provides an extensive conceptual walkthrough of how an agent operates, detailing the sequence of actions it takes to complete a task and how it utilizes its internal components.
+
+#### Agent Components Overview
+- **LLM (Large Language Model)**: The core component responsible for understanding and generating natural language.
+- **Tools**: External functions and services that the agent can call to perform specific tasks, such as querying databases or interacting with APIs.
+- **Long-term Memory**: Systems like ChromaDB or Pinecone that store and retrieve information over extended periods, enabling the agent to remember past interactions and contexts.
+
+#### Agent Workflow
+The workflow of an agent can be divided into several stages: task initiation, initial LLM processing, tool usage, memory interaction, and final LLM processing.
+
+##### Stage 1: Task Initiation
+- **Input**: The task or query that the agent needs to address.
+- **Output**: A structured plan or approach for handling the task.
+
+##### Stage 2: Initial LLM Processing
+- **Input**: The task or query.
+- **Process**: The LLM interprets the task, understanding the context and requirements.
+- **Output**: An initial response or action plan.
+
+##### Stage 3: Tool Usage
+- **Input**: The action plan or specific sub-tasks identified by the LLM.
+- **Process**: The agent calls various tools to gather information, perform calculations, or interact with external systems.
+ - **Function Calling as Tools**: Tools are called as functions with specific inputs and outputs, enabling the agent to perform a wide range of tasks.
+- **Output**: Data or results from the tools.
+
+##### Stage 4: Memory Interaction
+- **Input**: Intermediate results and context from the tools.
+- **Process**: The agent interacts with long-term memory systems to store new information and retrieve relevant past data.
+ - **RAG Systems (ChromaDB, Pinecone)**: These systems are used to enhance the agentβs responses by providing relevant historical data and context.
+- **Output**: Enhanced context and data for final processing.
+
+##### Stage 5: Final LLM Processing
+- **Input**: Comprehensive data and context from the tools and memory systems.
+- **Process**: The LLM generates the final response or completes the task using the enriched data.
+- **Output**: The final output or action taken by the agent.
+
+### Detailed Workflow with Mermaid Diagrams
+
+#### Agent Components and Workflow
+
+```mermaid
+graph TD
+ A[Task Initiation] -->|Receives Task| B[Initial LLM Processing]
+ B -->|Interprets Task| C[Tool Usage]
+ C -->|Calls Tools| D[Function 1]
+ C -->|Calls Tools| E[Function 2]
+ D -->|Returns Data| C
+ E -->|Returns Data| C
+ C -->|Provides Data| F[Memory Interaction]
+ F -->|Stores and Retrieves Data| G[RAG System]
+ G -->|ChromaDB/Pinecone| H[Enhanced Data]
+ F -->|Provides Enhanced Data| I[Final LLM Processing]
+ I -->|Generates Final Response| J[Output]
+```
+
+### Explanation of Each Stage
+
+#### Stage 1: Task Initiation
+- **Task**: The agent receives a task or query from an external source (e.g., a user query, a system trigger).
+- **Objective**: To understand what needs to be done and prepare an initial approach.
+
+#### Stage 2: Initial LLM Processing
+- **Interpretation**: The LLM processes the task to comprehend its context and requirements.
+- **Planning**: The LLM generates an initial plan or identifies the sub-tasks required to complete the task.
+
+#### Stage 3: Tool Usage
+- **Function Calls**: The agent uses predefined functions (tools) to perform specific actions, such as querying a database or making API calls.
+- **Tool Integration**: Each tool is called with specific parameters, and the results are collected for further processing.
+
+#### Stage 4: Memory Interaction
+- **Long-term Memory**: Systems like ChromaDB and Pinecone store and retrieve long-term data, providing the agent with historical context and past interactions.
+- **Retrieval-Augmented Generation (RAG)**: The agent uses RAG systems to enhance the current context with relevant past data, improving the quality and relevance of the final output.
+
+#### Stage 5: Final LLM Processing
+- **Enhanced Processing**: The LLM processes the enriched data and context provided by the tools and memory systems.
+- **Final Output**: The LLM generates a comprehensive response or completes the task using the enhanced information.
+
+### Conclusion
+
+The Swarms framework's agents are powerful units that combine LLMs, tools, and long-term memory systems to perform complex tasks efficiently. By leveraging function calling for tools and RAG systems like ChromaDB and Pinecone, agents can enhance their capabilities and deliver highly relevant and accurate results. This conceptual guide and walkthrough provide a detailed understanding of how agents operate within the Swarms framework, enabling the development of sophisticated and collaborative AI systems.
\ No newline at end of file
diff --git a/docs/swarms/framework/blog.md b/docs/swarms/framework/blog.md
new file mode 100644
index 00000000..e26a74b9
--- /dev/null
+++ b/docs/swarms/framework/blog.md
@@ -0,0 +1,468 @@
+# The Future of Manufacturing: Leveraging Autonomous LLM Agents for Cost Reduction and Revenue Growth
+
+## Table of Contents
+
+1. [Introduction](#introduction)
+2. [Understanding Autonomous LLM Agents](#understanding-autonomous-llm-agents)
+3. [RAG Embedding Databases: The Knowledge Foundation](#rag-embedding-databases)
+4. [Function Calling and External Tools: Enhancing Capabilities](#function-calling-and-external-tools)
+5. [Cost Reduction Strategies](#cost-reduction-strategies)
+ 5.1. [Optimizing Supply Chain Management](#optimizing-supply-chain-management)
+ 5.2. [Enhancing Quality Control](#enhancing-quality-control)
+ 5.3. [Streamlining Maintenance and Repairs](#streamlining-maintenance-and-repairs)
+ 5.4. [Improving Energy Efficiency](#improving-energy-efficiency)
+6. [Revenue Growth Opportunities](#revenue-growth-opportunities)
+ 6.1. [Product Innovation and Development](#product-innovation-and-development)
+ 6.2. [Personalized Customer Experiences](#personalized-customer-experiences)
+ 6.3. [Market Analysis and Trend Prediction](#market-analysis-and-trend-prediction)
+ 6.4. [Optimizing Pricing Strategies](#optimizing-pricing-strategies)
+7. [Implementation Strategies](#implementation-strategies)
+8. [Overcoming Challenges and Risks](#overcoming-challenges-and-risks)
+9. [Case Studies](#case-studies)
+10. [Future Outlook](#future-outlook)
+11. [Conclusion](#conclusion)
+
+## 1. Introduction
+
+In today's rapidly evolving manufacturing landscape, executives and CEOs face unprecedented challenges and opportunities. The key to maintaining a competitive edge lies in embracing cutting-edge technologies that can revolutionize operations, reduce costs, and drive revenue growth. One such transformative technology is the integration of autonomous Large Language Model (LLM) agents equipped with Retrieval-Augmented Generation (RAG) embedding databases, function calling capabilities, and access to external tools.
+
+This comprehensive blog post aims to explore how these advanced AI systems can be leveraged to address the most pressing issues in manufacturing enterprises. We will delve into the intricacies of these technologies, provide concrete examples of their applications, and offer insights into implementation strategies. By the end of this article, you will have a clear understanding of how autonomous LLM agents can become a cornerstone of your manufacturing business's digital transformation journey.
+
+## 2. Understanding Autonomous LLM Agents
+
+Autonomous LLM agents represent the cutting edge of artificial intelligence in the manufacturing sector. These sophisticated systems are built upon large language models, which are neural networks trained on vast amounts of text data. What sets them apart is their ability to operate autonomously, making decisions and taking actions with minimal human intervention.
+
+Key features of autonomous LLM agents include:
+
+1. **Natural Language Processing (NLP)**: They can understand and generate human-like text, enabling seamless communication with employees across all levels of the organization.
+
+2. **Contextual Understanding**: These agents can grasp complex scenarios and nuanced information, making them ideal for handling intricate manufacturing processes.
+
+3. **Adaptive Learning**: Through continuous interaction and feedback, they can improve their performance over time, becoming more efficient and accurate.
+
+4. **Multi-modal Input Processing**: Advanced agents can process not only text but also images, audio, and sensor data, providing a holistic view of manufacturing operations.
+
+5. **Task Automation**: They can automate a wide range of tasks, from data analysis to decision-making, freeing up human resources for more strategic activities.
+
+The integration of autonomous LLM agents in manufacturing environments opens up new possibilities for optimization, innovation, and growth. As we explore their applications throughout this blog, it's crucial to understand that these agents are not meant to replace human workers but to augment their capabilities and drive overall productivity.
+
+## 3. RAG Embedding Databases: The Knowledge Foundation
+
+At the heart of effective autonomous LLM agents lies the Retrieval-Augmented Generation (RAG) embedding database. This technology serves as the knowledge foundation, enabling agents to access and utilize vast amounts of relevant information quickly and accurately.
+
+RAG embedding databases work by:
+
+1. **Vectorizing Information**: Converting textual data into high-dimensional vectors that capture semantic meaning.
+
+2. **Efficient Storage**: Organizing these vectors in a way that allows for rapid retrieval of relevant information.
+
+3. **Contextual Retrieval**: Enabling the agent to pull relevant information based on the current context or query.
+
+4. **Dynamic Updates**: Allowing for continuous updates to the knowledge base, ensuring the agent always has access to the most current information.
+
+In the manufacturing context, RAG embedding databases can store a wealth of information, including:
+
+- Technical specifications of machinery and products
+- Historical production data and performance metrics
+- Quality control guidelines and standards
+- Supplier information and supply chain data
+- Market trends and customer feedback
+
+By leveraging RAG embedding databases, autonomous LLM agents can make informed decisions based on a comprehensive understanding of the manufacturing ecosystem. This leads to more accurate predictions, better problem-solving capabilities, and the ability to generate innovative solutions.
+
+For example, when faced with a production bottleneck, an agent can quickly retrieve relevant historical data, equipment specifications, and best practices to propose an optimal solution. This rapid access to contextual information significantly reduces decision-making time and improves the quality of outcomes.
+
+## 4. Function Calling and External Tools: Enhancing Capabilities
+
+The true power of autonomous LLM agents in manufacturing environments is realized through their ability to interact with external systems and tools. This is achieved through function calling and integration with specialized external tools.
+
+Function calling allows the agent to:
+
+1. **Execute Specific Tasks**: Trigger predefined functions to perform complex operations or calculations.
+
+2. **Interact with Databases**: Query and update various databases within the manufacturing ecosystem.
+
+3. **Control Equipment**: Send commands to machinery or robotic systems on the production floor.
+
+4. **Generate Reports**: Automatically compile and format data into meaningful reports for different stakeholders.
+
+External tools that can be integrated include:
+
+- **Predictive Maintenance Software**: To schedule and optimize equipment maintenance.
+- **Supply Chain Management Systems**: For real-time tracking and optimization of inventory and logistics.
+- **Quality Control Systems**: To monitor and analyze product quality metrics.
+- **Energy Management Tools**: For monitoring and optimizing energy consumption across the facility.
+- **Customer Relationship Management (CRM) Software**: To analyze customer data and improve service.
+
+By combining the cognitive abilities of LLM agents with the specialized functionalities of external tools, manufacturing enterprises can create a powerful ecosystem that drives efficiency and innovation.
+
+For instance, an autonomous agent could:
+
+1. Detect an anomaly in production quality through data analysis.
+2. Use function calling to query the maintenance database for equipment history.
+3. Leverage an external predictive maintenance tool to assess the risk of equipment failure.
+4. Automatically schedule maintenance and adjust production schedules to minimize downtime.
+5. Generate a comprehensive report for management, detailing the issue, actions taken, and impact on production.
+
+This level of integration and automation can lead to significant improvements in operational efficiency, cost reduction, and overall productivity.
+
+## 5. Cost Reduction Strategies
+
+One of the primary benefits of implementing autonomous LLM agents in manufacturing is the potential for substantial cost reductions across various aspects of operations. Let's explore some key areas where these agents can drive down expenses:
+
+### 5.1. Optimizing Supply Chain Management
+
+Autonomous LLM agents can revolutionize supply chain management by:
+
+- **Predictive Inventory Management**: Analyzing historical data, market trends, and production schedules to optimize inventory levels, reducing carrying costs and minimizing stockouts.
+
+- **Supplier Selection and Negotiation**: Evaluating supplier performance, market conditions, and contract terms to recommend the most cost-effective suppliers and negotiate better deals.
+
+- **Logistics Optimization**: Analyzing transportation routes, warehouse locations, and delivery schedules to minimize logistics costs and improve delivery times.
+
+Example: A large automotive manufacturer implemented an autonomous LLM agent to optimize its global supply chain. The agent analyzed data from multiple sources, including production schedules, supplier performance metrics, and global shipping trends. By optimizing inventory levels and renegotiating supplier contracts, the company reduced supply chain costs by 15% in the first year, resulting in savings of over $100 million.
+
+### 5.2. Enhancing Quality Control
+
+Quality control is a critical aspect of manufacturing that directly impacts costs. Autonomous LLM agents can significantly improve quality control processes by:
+
+- **Real-time Defect Detection**: Integrating with computer vision systems to identify and classify defects in real-time, reducing waste and rework.
+
+- **Root Cause Analysis**: Analyzing production data to identify the root causes of quality issues and recommending corrective actions.
+
+- **Predictive Quality Management**: Leveraging historical data and machine learning models to predict potential quality issues before they occur.
+
+Example: A semiconductor manufacturer deployed an autonomous LLM agent to enhance its quality control processes. The agent analyzed data from multiple sensors on the production line, historical quality records, and equipment maintenance logs. By identifying subtle patterns that led to defects, the agent helped reduce scrap rates by 30% and improved overall yield by 5%, resulting in annual savings of $50 million.
+
+### 5.3. Streamlining Maintenance and Repairs
+
+Effective maintenance is crucial for minimizing downtime and extending the lifespan of expensive manufacturing equipment. Autonomous LLM agents can optimize maintenance processes by:
+
+- **Predictive Maintenance**: Analyzing equipment sensor data, maintenance history, and production schedules to predict when maintenance is needed, reducing unplanned downtime.
+
+- **Maintenance Scheduling Optimization**: Balancing maintenance needs with production schedules to minimize disruptions and maximize equipment availability.
+
+- **Repair Knowledge Management**: Creating and maintaining a comprehensive knowledge base of repair procedures, making it easier for technicians to quickly address issues.
+
+Example: A paper mill implemented an autonomous LLM agent to manage its maintenance operations. The agent analyzed vibration data from critical equipment, historical maintenance records, and production schedules. By implementing a predictive maintenance strategy, the mill reduced unplanned downtime by 40% and extended the lifespan of key equipment by 25%, resulting in annual savings of $15 million in maintenance costs and lost production time.
+
+### 5.4. Improving Energy Efficiency
+
+Energy consumption is a significant cost factor in manufacturing. Autonomous LLM agents can help reduce energy costs by:
+
+- **Real-time Energy Monitoring**: Analyzing energy consumption data across the facility to identify inefficiencies and anomalies.
+
+- **Process Optimization for Energy Efficiency**: Recommending changes to production processes to reduce energy consumption without impacting output.
+
+- **Demand Response Management**: Integrating with smart grid systems to optimize energy usage based on variable electricity prices and demand.
+
+Example: A large chemical manufacturing plant deployed an autonomous LLM agent to optimize its energy consumption. The agent analyzed data from thousands of sensors across the facility, weather forecasts, and electricity price fluctuations. By optimizing process parameters and scheduling energy-intensive operations during off-peak hours, the plant reduced its energy costs by 18%, saving $10 million annually.
+
+## 6. Revenue Growth Opportunities
+
+While cost reduction is crucial, autonomous LLM agents also present significant opportunities for revenue growth in manufacturing enterprises. Let's explore how these advanced AI systems can drive top-line growth:
+
+### 6.1. Product Innovation and Development
+
+Autonomous LLM agents can accelerate and enhance the product innovation process by:
+
+- **Market Trend Analysis**: Analyzing vast amounts of market data, customer feedback, and industry reports to identify emerging trends and unmet needs.
+
+- **Design Optimization**: Leveraging generative design techniques and historical performance data to suggest optimal product designs that balance functionality, manufacturability, and cost.
+
+- **Rapid Prototyping Assistance**: Guiding engineers through the prototyping process, suggesting materials and manufacturing techniques based on design requirements and cost constraints.
+
+Example: A consumer electronics manufacturer utilized an autonomous LLM agent to enhance its product development process. The agent analyzed social media trends, customer support tickets, and competitor product features to identify key areas for innovation. By suggesting novel features and optimizing designs for manufacturability, the company reduced time-to-market for new products by 30% and increased the success rate of new product launches by 25%, resulting in a 15% increase in annual revenue.
+
+### 6.2. Personalized Customer Experiences
+
+In the age of mass customization, providing personalized experiences can significantly boost customer satisfaction and revenue. Autonomous LLM agents can facilitate this by:
+
+- **Customer Preference Analysis**: Analyzing historical purchase data, customer interactions, and market trends to predict individual customer preferences.
+
+- **Dynamic Product Configuration**: Enabling real-time product customization based on customer inputs and preferences, while ensuring manufacturability.
+
+- **Personalized Marketing and Sales Support**: Generating tailored marketing content and sales recommendations for each customer or market segment.
+
+Example: A high-end furniture manufacturer implemented an autonomous LLM agent to power its online customization platform. The agent analyzed customer behavior, design trends, and production capabilities to offer personalized product recommendations and customization options. This led to a 40% increase in online sales and a 20% increase in average order value, driving significant revenue growth.
+
+### 6.3. Market Analysis and Trend Prediction
+
+Staying ahead of market trends is crucial for maintaining a competitive edge. Autonomous LLM agents can provide valuable insights by:
+
+- **Competitive Intelligence**: Analyzing competitor activities, product launches, and market positioning to identify threats and opportunities.
+
+- **Demand Forecasting**: Combining historical sales data, economic indicators, and market trends to predict future demand more accurately.
+
+- **Emerging Market Identification**: Analyzing global economic data, demographic trends, and industry reports to identify promising new markets for expansion.
+
+Example: A global automotive parts manufacturer employed an autonomous LLM agent to enhance its market intelligence capabilities. The agent analyzed data from industry reports, social media, patent filings, and economic indicators to predict the growth of electric vehicle adoption in different regions. This insight allowed the company to strategically invest in EV component manufacturing, resulting in a 30% year-over-year growth in this high-margin segment.
+
+### 6.4. Optimizing Pricing Strategies
+
+Pricing is a critical lever for revenue growth. Autonomous LLM agents can optimize pricing strategies by:
+
+- **Dynamic Pricing Models**: Analyzing market conditions, competitor pricing, and demand fluctuations to suggest optimal pricing in real-time.
+
+- **Value-based Pricing Analysis**: Assessing customer perceived value through sentiment analysis and willingness-to-pay studies to maximize revenue.
+
+- **Bundle and Discount Optimization**: Recommending product bundles and discount structures that maximize overall revenue and profitability.
+
+Example: A industrial equipment manufacturer implemented an autonomous LLM agent to optimize its pricing strategy. The agent analyzed historical sales data, competitor pricing, economic indicators, and customer sentiment to recommend dynamic pricing models for different product lines and markets. This resulted in a 10% increase in profit margins and a 7% boost in overall revenue within the first year of implementation.
+
+## 7. Implementation Strategies
+
+Successfully implementing autonomous LLM agents in a manufacturing environment requires a strategic approach. Here are key steps and considerations for executives and CEOs:
+
+1. **Start with a Clear Vision and Objectives**:
+ - Define specific goals for cost reduction and revenue growth.
+ - Identify key performance indicators (KPIs) to measure success.
+
+2. **Conduct a Comprehensive Readiness Assessment**:
+ - Evaluate existing IT infrastructure and data management systems.
+ - Assess the quality and accessibility of historical data.
+ - Identify potential integration points with existing systems and processes.
+
+3. **Build a Cross-functional Implementation Team**:
+ - Include representatives from IT, operations, engineering, and business strategy.
+ - Consider partnering with external AI and manufacturing technology experts.
+
+4. **Develop a Phased Implementation Plan**:
+ - Start with pilot projects in specific areas (e.g., predictive maintenance or supply chain optimization).
+ - Scale successful pilots across the organization.
+
+5. **Invest in Data Infrastructure and Quality**:
+ - Ensure robust data collection and storage systems are in place.
+ - Implement data cleaning and standardization processes.
+
+
+
+6. **Choose the Right LLM and RAG Technologies**:
+ - Evaluate different LLM options based on performance, cost, and specific manufacturing requirements.
+ - Select RAG embedding databases that can efficiently handle the scale and complexity of manufacturing data.
+
+7. **Develop a Robust Integration Strategy**:
+ - Plan for seamless integration with existing ERP, MES, and other critical systems.
+ - Ensure proper API development and management for connecting with external tools and databases.
+
+8. **Prioritize Security and Compliance**:
+ - Implement strong data encryption and access control measures.
+ - Ensure compliance with industry regulations and data privacy laws.
+
+9. **Invest in Change Management and Training**:
+ - Develop comprehensive training programs for employees at all levels.
+ - Communicate the benefits and address concerns about AI implementation.
+
+10. **Establish Governance and Oversight**:
+ - Create a governance structure to oversee the use and development of AI systems.
+ - Implement ethical guidelines for AI decision-making.
+
+11. **Plan for Continuous Improvement**:
+ - Set up feedback loops to continuously refine and improve the AI systems.
+ - Stay updated on advancements in LLM and RAG technologies.
+
+Example: A leading automotive manufacturer implemented autonomous LLM agents across its global operations using a phased approach. They started with a pilot project in predictive maintenance at a single plant, which reduced downtime by 25%. Building on this success, they expanded to supply chain optimization and quality control. Within three years, the company had deployed AI agents across all major operations, resulting in a 12% reduction in overall production costs and a 9% increase in productivity.
+
+## 8. Overcoming Challenges and Risks
+
+While the benefits of autonomous LLM agents in manufacturing are substantial, there are several challenges and risks that executives must address:
+
+### Data Quality and Availability
+
+**Challenge**: Manufacturing environments often have siloed, inconsistent, or incomplete data, which can hinder the effectiveness of AI systems.
+
+**Solution**:
+- Invest in data infrastructure and standardization across the organization.
+- Implement data governance policies to ensure consistent data collection and management.
+- Use data augmentation techniques to address gaps in historical data.
+
+### Integration with Legacy Systems
+
+**Challenge**: Many manufacturing facilities rely on legacy systems that may not easily integrate with modern AI technologies.
+
+**Solution**:
+- Develop custom APIs and middleware to facilitate communication between legacy systems and AI agents.
+- Consider a gradual modernization strategy, replacing legacy systems over time.
+- Use edge computing devices to bridge the gap between old equipment and new AI systems.
+
+### Workforce Adaptation and Resistance
+
+**Challenge**: Employees may resist AI implementation due to fear of job displacement or lack of understanding.
+
+**Solution**:
+- Emphasize that AI is a tool to augment human capabilities, not replace workers.
+- Provide comprehensive training programs to upskill employees.
+- Involve workers in the AI implementation process to gain buy-in and valuable insights.
+
+### Ethical Considerations and Bias
+
+**Challenge**: AI systems may inadvertently perpetuate biases present in historical data or decision-making processes.
+
+**Solution**:
+- Implement rigorous testing for bias in AI models and decisions.
+- Establish an ethics committee to oversee AI implementations.
+- Regularly audit AI systems for fairness and unintended consequences.
+
+### Security and Intellectual Property Protection
+
+**Challenge**: AI systems may be vulnerable to cyber attacks or could potentially expose sensitive manufacturing processes.
+
+**Solution**:
+- Implement robust cybersecurity measures, including encryption and access controls.
+- Develop clear policies on data handling and AI model ownership.
+- Regularly conduct security audits and penetration testing.
+
+Example: A pharmaceutical manufacturer faced challenges integrating AI agents with its highly regulated production processes. They addressed this by creating a cross-functional team of IT specialists, process engineers, and compliance officers. This team developed a custom integration layer that allowed AI agents to interact with existing systems while maintaining regulatory compliance. They also implemented a rigorous change management process, which included extensive training and a phased rollout. As a result, they successfully deployed AI agents that optimized production scheduling and quality control, leading to a 15% increase in throughput and a 30% reduction in quality-related issues.
+
+## 9. Case Studies
+
+To illustrate the transformative potential of autonomous LLM agents in manufacturing, let's examine several real-world case studies:
+
+### Case Study 1: Global Electronics Manufacturer
+
+**Challenge**: A leading electronics manufacturer was struggling with supply chain disruptions and rising production costs.
+
+**Solution**: They implemented an autonomous LLM agent integrated with their supply chain management system and production planning tools.
+
+**Results**:
+- 22% reduction in inventory carrying costs
+- 18% improvement in on-time deliveries
+- 15% decrease in production lead times
+- $200 million annual cost savings
+
+**Key Factors for Success**:
+- Comprehensive integration with existing systems
+- Real-time data processing capabilities
+- Continuous learning and optimization algorithms
+
+### Case Study 2: Automotive Parts Supplier
+
+**Challenge**: An automotive parts supplier needed to improve quality control and reduce warranty claims.
+
+**Solution**: They deployed an AI-powered quality control system using computer vision and an autonomous LLM agent for defect analysis and prediction.
+
+**Results**:
+- 40% reduction in defect rates
+- 60% decrease in warranty claims
+- 25% improvement in overall equipment effectiveness (OEE)
+- $75 million annual savings in quality-related costs
+
+**Key Factors for Success**:
+- High-quality image data collection system
+- Integration of domain expertise into the AI model
+- Continuous feedback loop for model improvement
+
+### Case Study 3: Food and Beverage Manufacturer
+
+**Challenge**: A large food and beverage manufacturer wanted to optimize its energy consumption and reduce waste in its production processes.
+
+**Solution**: They implemented an autonomous LLM agent that integrated with their energy management systems and production equipment.
+
+**Results**:
+- 20% reduction in energy consumption
+- 30% decrease in production waste
+- 12% increase in overall production efficiency
+- $50 million annual cost savings
+- Significant progress towards sustainability goals
+
+**Key Factors for Success**:
+- Comprehensive sensor network for real-time data collection
+- Integration with smart grid systems for dynamic energy management
+- Collaboration with process engineers to refine AI recommendations
+
+### Case Study 4: Aerospace Component Manufacturer
+
+**Challenge**: An aerospace component manufacturer needed to accelerate product development and improve first-time-right rates for new designs.
+
+**Solution**: They implemented an autonomous LLM agent to assist in the design process, leveraging historical data, simulation results, and industry standards.
+
+**Results**:
+- 35% reduction in design cycle time
+- 50% improvement in first-time-right rates for new designs
+- 20% increase in successful patent applications
+- $100 million increase in annual revenue from new products
+
+**Key Factors for Success**:
+- Integration of CAD systems with the AI agent
+- Incorporation of aerospace industry standards and regulations into the AI knowledge base
+- Collaborative approach between AI and human engineers
+
+These case studies demonstrate the wide-ranging benefits of autonomous LLM agents across various manufacturing sectors. The key takeaway is that successful implementation requires a holistic approach, combining technology integration, process redesign, and a focus on continuous improvement.
+
+## 10. Future Outlook
+
+As we look to the future of manufacturing, the role of autonomous LLM agents is set to become even more critical. Here are some key trends and developments that executives should keep on their radar:
+
+### 1. Advanced Natural Language Interfaces
+
+Future LLM agents will feature more sophisticated natural language interfaces, allowing workers at all levels to interact with complex manufacturing systems using conversational language. This will democratize access to AI capabilities and enhance overall operational efficiency.
+
+### 2. Enhanced Multi-modal Learning
+
+Next-generation agents will be able to process and analyze data from a wider range of sources, including text, images, video, and sensor data. This will enable more comprehensive insights and decision-making capabilities across the manufacturing ecosystem.
+
+### 3. Collaborative AI Systems
+
+We'll see the emergence of AI ecosystems where multiple specialized agents collaborate to solve complex manufacturing challenges. For example, a design optimization agent might work in tandem with a supply chain agent and a quality control agent to develop new products that are optimized for both performance and manufacturability.
+
+### 4. Quantum-enhanced AI
+
+As quantum computing becomes more accessible, it will significantly enhance the capabilities of LLM agents, particularly in complex optimization problems common in manufacturing. This could lead to breakthroughs in areas such as materials science and process optimization.
+
+### 5. Augmented Reality Integration
+
+LLM agents will increasingly be integrated with augmented reality (AR) systems, providing real-time guidance and information to workers on the factory floor. This could revolutionize training, maintenance, and quality control processes.
+
+### 6. Autonomous Factories
+
+The ultimate vision is the development of fully autonomous factories where LLM agents orchestrate entire production processes with minimal human intervention. While this is still on the horizon, progressive implementation of autonomous systems will steadily move the industry in this direction.
+
+### 7. Ethical AI and Explainable Decision-Making
+
+As AI systems become more prevalent in critical manufacturing decisions, there will be an increased focus on developing ethical AI frameworks and enhancing the explainability of AI decision-making processes. This will be crucial for maintaining trust and meeting regulatory requirements.
+
+### 8. Circular Economy Optimization
+
+Future LLM agents will play a key role in optimizing manufacturing processes for sustainability and circular economy principles. This will include enhancing recycling processes, optimizing resource use, and designing products for easy disassembly and reuse.
+
+To stay ahead in this rapidly evolving landscape, manufacturing executives should:
+
+1. **Foster a Culture of Innovation**: Encourage experimentation with new AI technologies and applications.
+
+2. **Invest in Continuous Learning**: Ensure your workforce is constantly upskilling to work effectively with advanced AI systems.
+
+3. **Collaborate with AI Research Institutions**: Partner with universities and research labs to stay at the forefront of AI advancements in manufacturing.
+
+4. **Participate in Industry Consortiums**: Join manufacturing technology consortiums to share knowledge and shape industry standards for AI adoption.
+
+5. **Develop Flexible and Scalable AI Infrastructure**: Build systems that can easily incorporate new AI capabilities as they emerge.
+
+6. **Monitor Regulatory Developments**: Stay informed about evolving regulations related to AI in manufacturing to ensure compliance and competitive advantage.
+
+By embracing these future trends and preparing their organizations accordingly, manufacturing executives can position their companies to thrive in the AI-driven future of industry.
+
+## 11. Conclusion
+
+The integration of autonomous LLM agents with RAG embedding databases, function calling, and external tools represents a paradigm shift in manufacturing. This technology has the potential to dramatically reduce costs, drive revenue growth, and revolutionize how manufacturing enterprises operate.
+
+Key takeaways for executives and CEOs:
+
+1. **Transformative Potential**: Autonomous LLM agents can impact every aspect of manufacturing, from supply chain optimization to product innovation.
+
+2. **Data-Driven Decision Making**: These AI systems enable more informed, real-time decision-making based on comprehensive data analysis.
+
+3. **Competitive Advantage**: Early adopters of this technology are likely to gain significant competitive advantages in terms of efficiency, quality, and innovation.
+
+4. **Holistic Implementation**: Success requires a strategic approach that addresses technology, processes, and people.
+
+5. **Continuous Evolution**: The field of AI in manufacturing is rapidly advancing, necessitating ongoing investment and adaptation.
+
+6. **Ethical Considerations**: As AI becomes more prevalent, addressing ethical concerns and maintaining transparency will be crucial.
+
+7. **Future Readiness**: Preparing for future developments, such as quantum-enhanced AI and autonomous factories, will be key to long-term success.
+
+The journey to implement autonomous LLM agents in manufacturing is complex but potentially transformative. It requires vision, commitment, and a willingness to reimagine traditional manufacturing processes. However, the potential rewards β in terms of cost savings, revenue growth, and competitive advantage β are substantial.
+
+As a manufacturing executive or CEO, your role is to lead this transformation, fostering a culture of innovation and continuous improvement. By embracing the power of autonomous LLM agents, you can position your organization at the forefront of the next industrial revolution, driving sustainable growth and success in an increasingly competitive global marketplace.
+
+The future of manufacturing is intelligent, autonomous, and data-driven. The time to act is now. Embrace the potential of autonomous LLM agents and lead your organization into a new era of manufacturing excellence.
\ No newline at end of file
diff --git a/docs/swarms/framework/code_cleanliness.md b/docs/swarms/framework/code_cleanliness.md
new file mode 100644
index 00000000..e1c04690
--- /dev/null
+++ b/docs/swarms/framework/code_cleanliness.md
@@ -0,0 +1,407 @@
+# Code Cleanliness in Python: A Comprehensive Guide
+
+Code cleanliness is an essential aspect of software development that ensures code is easy to read, understand, and maintain. Clean code leads to fewer bugs, easier debugging, and more efficient collaboration among developers. This blog article delves into the principles of writing clean Python code, emphasizing the use of type annotations, docstrings, and the Loguru logging library. We'll explore the importance of each component and provide practical examples to illustrate best practices.
+
+## Table of Contents
+1. Introduction to Code Cleanliness
+2. Importance of Type Annotations
+3. Writing Effective Docstrings
+4. Structuring Your Code
+5. Error Handling and Logging with Loguru
+6. Refactoring for Clean Code
+7. Examples of Clean Code
+8. Conclusion
+
+## 1. Introduction to Code Cleanliness
+
+Code cleanliness refers to the practice of writing code that is easy to read, understand, and maintain. Clean code follows consistent conventions and is organized logically, making it easier for developers to collaborate and for new team members to get up to speed quickly.
+
+### Why Clean Code Matters
+
+1. **Readability**: Clean code is easy to read and understand, which reduces the time needed to grasp what the code does.
+2. **Maintainability**: Clean code is easier to maintain and modify, reducing the risk of introducing bugs when making changes.
+3. **Collaboration**: Clean code facilitates collaboration among team members, as everyone can easily understand and follow the codebase.
+4. **Debugging**: Clean code makes it easier to identify and fix bugs, leading to more reliable software.
+
+## 2. Importance of Type Annotations
+
+Type annotations in Python provide a way to specify the types of variables, function arguments, and return values. They enhance code readability and help catch type-related errors early in the development process.
+
+### Benefits of Type Annotations
+
+1. **Improved Readability**: Type annotations make it clear what types of values are expected, improving code readability.
+2. **Error Detection**: Type annotations help catch type-related errors during development, reducing runtime errors.
+3. **Better Tooling**: Many modern IDEs and editors use type annotations to provide better code completion and error checking.
+
+### Example of Type Annotations
+
+```python
+from typing import List
+
+def calculate_average(numbers: List[float]) -> float:
+ """
+ Calculates the average of a list of numbers.
+
+ Args:
+ numbers (List[float]): A list of numbers.
+
+ Returns:
+ float: The average of the numbers.
+ """
+ return sum(numbers) / len(numbers)
+```
+
+In this example, the `calculate_average` function takes a list of floats as input and returns a float. The type annotations make it clear what types are expected and returned, enhancing readability and maintainability.
+
+## 3. Writing Effective Docstrings
+
+Docstrings are an essential part of writing clean code in Python. They provide inline documentation for modules, classes, methods, and functions. Effective docstrings improve code readability and make it easier for other developers to understand and use your code.
+
+### Benefits of Docstrings
+
+1. **Documentation**: Docstrings serve as inline documentation, making it easier to understand the purpose and usage of code.
+2. **Consistency**: Well-written docstrings ensure consistent documentation across the codebase.
+3. **Ease of Use**: Docstrings make it easier for developers to use and understand code without having to read through the implementation details.
+
+### Example of Effective Docstrings
+
+```python
+def calculate_factorial(n: int) -> int:
+ """
+ Calculates the factorial of a given non-negative integer.
+
+ Args:
+ n (int): The non-negative integer to calculate the factorial of.
+
+ Returns:
+ int: The factorial of the given number.
+
+ Raises:
+ ValueError: If the input is a negative integer.
+ """
+ if n < 0:
+ raise ValueError("Input must be a non-negative integer.")
+ factorial = 1
+ for i in range(1, n + 1):
+ factorial *= i
+ return factorial
+```
+
+In this example, the docstring clearly explains the purpose of the `calculate_factorial` function, its arguments, return value, and the exception it may raise.
+
+## 4. Structuring Your Code
+
+Proper code structure is crucial for code cleanliness. A well-structured codebase is easier to navigate, understand, and maintain. Here are some best practices for structuring your Python code:
+
+### Organizing Code into Modules and Packages
+
+Organize your code into modules and packages to group related functionality together. This makes it easier to find and manage code.
+
+```python
+# project/
+# βββ main.py
+# βββ utils/
+# β βββ __init__.py
+# β βββ file_utils.py
+# β βββ math_utils.py
+# βββ models/
+# βββ __init__.py
+# βββ user.py
+# βββ product.py
+```
+
+### Using Functions and Classes
+
+Break down your code into small, reusable functions and classes. This makes your code more modular and easier to test.
+
+```python
+class User:
+ def __init__(self, name: str, age: int):
+ """
+ Initializes a new user.
+
+ Args:
+ name (str): The name of the user.
+ age (int): The age of the user.
+ """
+ self.name = name
+ self.age = age
+
+ def greet(self) -> str:
+ """
+ Greets the user.
+
+ Returns:
+ str: A greeting message.
+ """
+ return f"Hello, {self.name}!"
+```
+
+### Keeping Functions Small
+
+Functions should do one thing and do it well. Keep functions small and focused on a single task.
+
+```python
+def save_user(user: User, filename: str) -> None:
+ """
+ Saves user data to a file.
+
+ Args:
+ user (User): The user object to save.
+ filename (str): The name of the file to save the user data to.
+ """
+ with open(filename, 'w') as file:
+ file.write(f"{user.name},{user.age}")
+```
+
+## 5. Error Handling and Logging with Loguru
+
+Effective error handling and logging are critical components of clean code. They help you manage and diagnose issues that arise during the execution of your code.
+
+### Error Handling Best Practices
+
+1. **Use Specific Exceptions**: Catch specific exceptions rather than using a generic `except` clause.
+2. **Provide Meaningful Messages**: When raising exceptions, provide meaningful error messages to help diagnose the issue.
+3. **Clean Up Resources**: Use `finally` blocks or context managers to ensure that resources are properly cleaned up.
+
+### Example of Error Handling
+
+```python
+def divide_numbers(numerator: float, denominator: float) -> float:
+ """
+ Divides the numerator by the denominator.
+
+ Args:
+ numerator (float): The number to be divided.
+ denominator (float): The number to divide by.
+
+ Returns:
+ float: The result of the division.
+
+ Raises:
+ ValueError: If the denominator is zero.
+ """
+ if denominator == 0:
+ raise ValueError("The denominator cannot be zero.")
+ return numerator / denominator
+```
+
+### Logging with Loguru
+
+Loguru is a powerful logging library for Python that makes logging simple and enjoyable. It provides a clean and easy-to-use API for logging messages with different severity levels.
+
+#### Installing Loguru
+
+```bash
+pip install loguru
+```
+
+#### Basic Usage of Loguru
+
+```python
+from loguru import logger
+
+logger.debug("This is a debug message")
+logger.info("This is an info message")
+logger.warning("This is a warning message")
+logger.error("This is an error message")
+logger.critical("This is a critical message")
+```
+
+### Example of Logging in a Function
+
+```python
+from loguru import logger
+
+def fetch_data(url: str) -> str:
+ """
+ Fetches data from a given URL and returns it as a string.
+
+ Args:
+ url (str): The URL to fetch data from.
+
+ Returns:
+ str: The data fetched from the URL.
+
+ Raises:
+ requests.exceptions.RequestException: If there is an error with the request.
+ """
+ try:
+ logger.info(f"Fetching data from {url}")
+ response = requests.get(url)
+ response.raise_for_status()
+ logger.info("Data fetched successfully")
+ return response.text
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Error fetching data: {e}")
+ raise
+```
+
+In this example, Loguru is used to log messages at different severity levels. The `fetch_data` function logs informational messages when fetching data and logs an error message if an exception is raised.
+
+## 6. Refactoring for Clean Code
+
+Refactoring is the process of restructuring existing code without changing its external behavior. It is an essential practice for maintaining clean code. Refactoring helps improve code readability, reduce complexity, and eliminate redundancy.
+
+### Identifying Code Smells
+
+Code smells are indicators of potential issues in the code that may require refactoring. Common code smells include:
+1. **Long Methods**: Methods that are too long and do too many things.
+2. **Duplicated Code**: Code that is duplicated in multiple places.
+3. **Large Classes**: Classes that have too many responsibilities.
+4. **Poor Naming**: Variables, functions, or classes with unclear or misleading names.
+
+### Refactoring Techniques
+
+1. **Extract Method**: Break down long methods into smaller, more focused methods.
+2. **Rename Variables**: Use meaningful names for variables, functions, and classes.
+3. **Remove Duplicated Code**: Consolidate duplicated code into a single location.
+4. **Simplify Conditional Expressions**: Simplify complex conditional expressions for
+
+ better readability.
+
+### Example of Refactoring
+
+Before refactoring:
+```python
+def process_data(data: List[int]) -> int:
+ total = 0
+ for value in data:
+ if value > 0:
+ total += value
+ return total
+```
+
+After refactoring:
+```python
+def filter_positive_values(data: List[int]) -> List[int]:
+ """
+ Filters the positive values from the input data.
+
+ Args:
+ data (List[int]): The input data.
+
+ Returns:
+ List[int]: A list of positive values.
+ """
+ return [value for value in data if value > 0]
+
+def sum_values(values: List[int]) -> int:
+ """
+ Sums the values in the input list.
+
+ Args:
+ values (List[int]): A list of values to sum.
+
+ Returns:
+ int: The sum of the values.
+ """
+ return sum(values)
+
+def process_data(data: List[int]) -> int:
+ """
+ Processes the data by filtering positive values and summing them.
+
+ Args:
+ data (List[int]): The input data.
+
+ Returns:
+ int: The sum of the positive values.
+ """
+ positive_values = filter_positive_values(data)
+ return sum_values(positive_values)
+```
+
+In this example, the `process_data` function is refactored into smaller, more focused functions. This improves readability and maintainability.
+
+## 7. Examples of Clean Code
+
+### Example 1: Reading a File
+
+```python
+def read_file(file_path: str) -> str:
+ """
+ Reads the content of a file and returns it as a string.
+
+ Args:
+ file_path (str): The path to the file to read.
+
+ Returns:
+ str: The content of the file.
+
+ Raises:
+ FileNotFoundError: If the file does not exist.
+ IOError: If there is an error reading the file.
+ """
+ try:
+ with open(file_path, 'r') as file:
+ return file.read()
+ except FileNotFoundError as e:
+ logger.error(f"File not found: {file_path}")
+ raise
+ except IOError as e:
+ logger.error(f"Error reading file: {file_path}")
+ raise
+```
+
+### Example 2: Fetching Data from a URL
+
+```python
+import requests
+from loguru import logger
+
+def fetch_data(url: str) -> str:
+ """
+ Fetches data from a given URL and returns it as a string.
+
+ Args:
+ url (str): The URL to fetch data from.
+
+ Returns:
+ str: The data fetched from the URL.
+
+ Raises:
+ requests.exceptions.RequestException: If there is an error with the request.
+ """
+ try:
+ logger.info(f"Fetching data from {url}")
+ response = requests.get(url)
+ response.raise_for_status()
+ logger.info("Data fetched successfully")
+ return response.text
+ except requests.exceptions.RequestException as e:
+ logger.error(f"Error fetching data: {e}")
+ raise
+```
+
+### Example 3: Calculating Factorial
+
+```python
+def calculate_factorial(n: int) -> int:
+ """
+ Calculates the factorial of a given non-negative integer.
+
+ Args:
+ n (int): The non-negative integer to calculate the factorial of.
+
+ Returns:
+ int: The factorial of the given number.
+
+ Raises:
+ ValueError: If the input is a negative integer.
+ """
+ if n < 0:
+ raise ValueError("Input must be a non-negative integer.")
+ factorial = 1
+ for i in range(1, n + 1):
+ factorial *= i
+ return factorial
+```
+
+## 8. Conclusion
+
+Writing clean code in Python is crucial for developing maintainable, readable, and error-free software. By using type annotations, writing effective docstrings, structuring your code properly, and leveraging logging with Loguru, you can significantly improve the quality of your codebase.
+
+Remember to refactor your code regularly to eliminate code smells and improve readability. Clean code not only makes your life as a developer easier but also enhances collaboration and reduces the likelihood of bugs.
+
+By following the principles and best practices outlined in this article, you'll be well on your way to writing clean, maintainable Python code.
\ No newline at end of file
diff --git a/docs/swarms/framework/concept.md b/docs/swarms/framework/concept.md
new file mode 100644
index 00000000..9e146671
--- /dev/null
+++ b/docs/swarms/framework/concept.md
@@ -0,0 +1,67 @@
+To create a comprehensive overview of the Swarms framework, we can break it down into key concepts such as models, agents, tools, Retrieval-Augmented Generation (RAG) systems, and swarm systems. Below are conceptual explanations of these components along with mermaid diagrams to illustrate their interactions.
+
+### Swarms Framework Overview
+
+#### 1. **Models**
+Models are the core component of the Swarms framework, representing the neural networks and machine learning models used to perform various tasks. These can be Large Language Models (LLMs), vision models, or any other AI models.
+
+#### 2. **Agents**
+Agents are autonomous units that use models to perform specific tasks. In the Swarms framework, agents can leverage tools and interact with RAG systems.
+
+- **LLMs with Tools**: These agents use large language models along with tools like databases, APIs, and external knowledge sources to enhance their capabilities.
+- **RAG Systems**: These systems combine retrieval mechanisms with generative models to produce more accurate and contextually relevant outputs.
+
+#### 3. **Swarm Systems**
+Swarm systems involve multiple agents working collaboratively to achieve complex tasks. These systems coordinate and communicate among agents to ensure efficient and effective task execution.
+
+### Mermaid Diagrams
+
+#### Models
+
+```mermaid
+graph TD
+ A[Model] -->|Uses| B[Data]
+ A -->|Trains| C[Algorithm]
+ A -->|Outputs| D[Predictions]
+```
+
+#### Agents: LLMs with Tools and RAG Systems
+
+```mermaid
+graph TD
+ A[Agent] -->|Uses| B[LLM]
+ A -->|Interacts with| C[Tool]
+ C -->|Provides Data to| B
+ A -->|Queries| D[RAG System]
+ D -->|Retrieves Information from| E[Database]
+ D -->|Generates Responses with| F[Generative Model]
+```
+
+#### Swarm Systems
+
+```mermaid
+graph TD
+ A[Swarm System]
+ A -->|Coordinates| B[Agent 1]
+ A -->|Coordinates| C[Agent 2]
+ A -->|Coordinates| D[Agent 3]
+ B -->|Communicates with| C
+ C -->|Communicates with| D
+ D -->|Communicates with| B
+ B -->|Performs Task| E[Task 1]
+ C -->|Performs Task| F[Task 2]
+ D -->|Performs Task| G[Task 3]
+ E -->|Reports to| A
+ F -->|Reports to| A
+ G -->|Reports to| A
+```
+
+### Conceptualization
+
+1. **Models**: The basic building blocks trained on specific datasets to perform tasks.
+2. **Agents**: Intelligent entities that utilize models and tools to perform actions. LLM agents can use additional tools to enhance their capabilities.
+3. **RAG Systems**: Enhance agents by combining retrieval mechanisms (to fetch relevant information) with generative models (to create contextually relevant responses).
+4. **Swarm Systems**: Complex systems where multiple agents collaborate, communicate, and coordinate to perform complex, multi-step tasks efficiently.
+
+### Summary
+The Swarms framework leverages models, agents, tools, RAG systems, and swarm systems to create a robust, collaborative environment for executing complex AI tasks. By coordinating multiple agents and enhancing their capabilities with tools and retrieval-augmented generation, Swarms can handle sophisticated and multi-faceted applications effectively.
\ No newline at end of file
diff --git a/docs/swarms/framework/index.md b/docs/swarms/framework/index.md
new file mode 100644
index 00000000..1331d935
--- /dev/null
+++ b/docs/swarms/framework/index.md
@@ -0,0 +1,117 @@
+## Swarms Framework Conceptual Breakdown
+
+The `swarms` framework is a sophisticated structure designed to orchestrate the collaborative work of multiple agents in a hierarchical manner. This breakdown provides a conceptual and visual representation of the framework, highlighting the interactions between models, tools, memory, agents, and swarms.
+
+### Hierarchical Structure
+
+The framework can be visualized as a multi-layered hierarchy:
+
+1. **Models, Tools, Memory**: These form the foundational components that agents utilize to perform tasks.
+2. **Agents**: Individual entities that encapsulate specific functionalities, utilizing models, tools, and memory.
+3. **Swarm**: A collection of multiple agents working together in a coordinated manner.
+4. **Structs**: High-level structures that organize and manage swarms, enabling complex workflows and interactions.
+
+### Visual Representation
+
+Below are visual graphs illustrating the hierarchical and tree structure of the `swarms` framework.
+
+#### 1. Foundational Components: Models, Tools, Memory
+
+![Diagram](assets/img/agent_def.png)
+
+#### 2. Agents and Their Interactions
+
+```mermaid
+graph TD;
+ Agents --> Swarm
+ subgraph Agents_Collection
+ Agent1
+ Agent2
+ Agent3
+ end
+ subgraph Individual_Agents
+ Agent1 --> Models
+ Agent1 --> Tools
+ Agent1 --> Memory
+ Agent2 --> Models
+ Agent2 --> Tools
+ Agent2 --> Memory
+ Agent3 --> Models
+ Agent3 --> Tools
+ Agent3 --> Memory
+ end
+```
+
+#### 3. Multiple Agents Form a Swarm
+
+```mermaid
+graph TD;
+ Swarm1 --> Struct
+ Swarm2 --> Struct
+ Swarm3 --> Struct
+ subgraph Swarms_Collection
+ Swarm1
+ Swarm2
+ Swarm3
+ end
+ subgraph Individual_Swarms
+ Swarm1 --> Agent1
+ Swarm1 --> Agent2
+ Swarm1 --> Agent3
+ Swarm2 --> Agent4
+ Swarm2 --> Agent5
+ Swarm2 --> Agent6
+ Swarm3 --> Agent7
+ Swarm3 --> Agent8
+ Swarm3 --> Agent9
+ end
+```
+
+#### 4. Structs Organizing Multiple Swarms
+
+```mermaid
+graph TD;
+ Struct --> Swarms_Collection
+ subgraph High_Level_Structs
+ Struct1
+ Struct2
+ Struct3
+ end
+ subgraph Struct1
+ Swarm1
+ Swarm2
+ end
+ subgraph Struct2
+ Swarm3
+ end
+ subgraph Struct3
+ Swarm4
+ Swarm5
+ end
+```
+
+### Directory Breakdown
+
+The directory structure of the `swarms` framework is organized to support its hierarchical architecture:
+
+```sh
+swarms/
+βββ agents/
+βββ artifacts/
+βββ marketplace/
+βββ memory/
+βββ models/
+βββ prompts/
+βββ schemas/
+βββ structs/
+βββ telemetry/
+βββ tools/
+βββ utils/
+βββ __init__.py
+```
+
+### Summary
+
+The `swarms` framework is designed to facilitate complex multi-agent interactions through a structured and layered approach. By leveraging foundational components like models, tools, and memory, individual agents are empowered to perform specialized tasks. These agents are then coordinated within swarms to achieve collective goals, and swarms are managed within high-level structs to orchestrate sophisticated workflows.
+
+This hierarchical design ensures scalability, flexibility, and robustness, making the `swarms` framework a powerful tool for various applications in AI, data analysis, optimization, and beyond.
\ No newline at end of file
diff --git a/docs/swarms/framework/test.md b/docs/swarms/framework/test.md
new file mode 100644
index 00000000..9316d4b2
--- /dev/null
+++ b/docs/swarms/framework/test.md
@@ -0,0 +1,244 @@
+# How to Run Tests Using Pytest: A Comprehensive Guide
+
+In modern software development, automated testing is crucial for ensuring the reliability and functionality of your code. One of the most popular testing frameworks for Python is `pytest`.
+
+This blog will provide an in-depth look at how to run tests using `pytest`, including testing a single file, multiple files, every file in the test repository, and providing guidelines for contributors to run tests reliably.
+
+## What is Pytest?
+
+`pytest` is a testing framework for Python that makes it easy to write simple and scalable test cases. It supports fixtures, parameterized testing, and has a rich plugin architecture. `pytest` is widely used because of its ease of use and powerful features that help streamline the testing process.
+
+## Installation
+
+To get started with `pytest`, you need to install it. You can install `pytest` using `pip`:
+
+```bash
+pip install pytest
+```
+
+## Writing Your First Test
+
+Before diving into running tests, letβs write a simple test. Create a file named `test_sample.py` with the following content:
+
+```python
+def test_addition():
+ assert 1 + 1 == 2
+
+def test_subtraction():
+ assert 2 - 1 == 1
+```
+
+In this example, we have defined two basic tests: `test_addition` and `test_subtraction`.
+
+## Running Tests
+
+### Running a Single Test File
+
+To run a single test file, you can use the `pytest` command followed by the filename. For example, to run the tests in `test_sample.py`, use the following command:
+
+```bash
+pytest test_sample.py
+```
+
+The output will show the test results, including the number of tests passed, failed, or skipped.
+
+### Running Multiple Test Files
+
+You can also run multiple test files by specifying their filenames separated by a space. For example:
+
+```bash
+pytest test_sample.py test_another_sample.py
+```
+
+If you have multiple test files in a directory, you can run all of them by specifying the directory name:
+
+```bash
+pytest tests/
+```
+
+### Running All Tests in the Repository
+
+To run all tests in the repository, navigate to the root directory of your project and simply run:
+
+```bash
+pytest
+```
+
+`pytest` will automatically discover and run all the test files that match the pattern `test_*.py` or `*_test.py`.
+
+### Test Discovery
+
+`pytest` automatically discovers test files and test functions based on their naming conventions. By default, it looks for files that match the pattern `test_*.py` or `*_test.py` and functions or methods that start with `test_`.
+
+### Using Markers
+
+`pytest` allows you to use markers to group tests or add metadata to them. Markers can be used to run specific subsets of tests. For example, you can mark a test as `slow` and then run only the slow tests or skip them.
+
+```python
+import pytest
+
+@pytest.mark.slow
+def test_long_running():
+ import time
+ time.sleep(5)
+ assert True
+
+def test_fast():
+ assert True
+```
+
+To run only the tests marked as `slow`, use the `-m` option:
+
+```bash
+pytest -m slow
+```
+
+### Parameterized Tests
+
+`pytest` supports parameterized testing, which allows you to run a test with different sets of input data. This can be done using the `@pytest.mark.parametrize` decorator.
+
+```python
+import pytest
+
+@pytest.mark.parametrize("a,b,expected", [
+ (1, 2, 3),
+ (2, 3, 5),
+ (3, 5, 8),
+])
+def test_add(a, b, expected):
+ assert a + b == expected
+```
+
+In this example, `test_add` will run three times with different sets of input data.
+
+### Fixtures
+
+Fixtures are a powerful feature of `pytest` that allow you to set up some context for your tests. They can be used to provide a fixed baseline upon which tests can reliably and repeatedly execute.
+
+```python
+import pytest
+
+@pytest.fixture
+def sample_data():
+ return {"name": "John", "age": 30}
+
+def test_sample_data(sample_data):
+ assert sample_data["name"] == "John"
+ assert sample_data["age"] == 30
+```
+
+Fixtures can be used to share setup and teardown code between tests.
+
+## Advanced Usage
+
+### Running Tests in Parallel
+
+`pytest` can run tests in parallel using the `pytest-xdist` plugin. To install `pytest-xdist`, run:
+
+```bash
+pip install pytest-xdist
+```
+
+To run tests in parallel, use the `-n` option followed by the number of CPU cores you want to use:
+
+```bash
+pytest -n 4
+```
+
+### Generating Test Reports
+
+`pytest` can generate detailed test reports. You can use the `--html` option to generate an HTML report:
+
+```bash
+pip install pytest-html
+pytest --html=report.html
+```
+
+This command will generate a file named `report.html` with a detailed report of the test results.
+
+### Code Coverage
+
+You can use the `pytest-cov` plugin to measure code coverage. To install `pytest-cov`, run:
+
+```bash
+pip install pytest-cov
+```
+
+To generate a coverage report, use the `--cov` option followed by the module name:
+
+```bash
+pytest --cov=my_module
+```
+
+This command will show the coverage summary in the terminal. You can also generate an HTML report:
+
+```bash
+pytest --cov=my_module --cov-report=html
+```
+
+The coverage report will be generated in the `htmlcov` directory.
+
+## Best Practices for Writing Tests
+
+1. **Write Clear and Concise Tests**: Each test should focus on a single piece of functionality.
+2. **Use Descriptive Names**: Test function names should clearly describe what they are testing.
+3. **Keep Tests Independent**: Tests should not depend on each other and should run in isolation.
+4. **Use Fixtures**: Use fixtures to set up the context for your tests.
+5. **Mock External Dependencies**: Use mocking to isolate the code under test from external dependencies.
+
+## Running Tests Reliably
+
+For contributors and team members, itβs important to run tests reliably to ensure consistent results. Here are some guidelines:
+
+1. **Set Up a Virtual Environment**: Use a virtual environment to manage dependencies and ensure a consistent testing environment.
+
+ ```bash
+ python -m venv venv
+ source venv/bin/activate # On Windows use `venv\Scripts\activate`
+ ```
+
+2. **Install Dependencies**: Install all required dependencies from the `requirements.txt` file.
+
+ ```bash
+ pip install -r requirements.txt
+ ```
+
+3. **Run Tests Before Pushing**: Ensure all tests pass before pushing code to the repository.
+
+4. **Use Continuous Integration (CI)**: Set up CI pipelines to automatically run tests on each commit or pull request.
+
+### Example CI Configuration (GitHub Actions)
+
+Here is an example of a GitHub Actions workflow to run tests using `pytest`:
+
+```yaml
+name: Python package
+
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.8'
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ - name: Run tests
+ run: |
+ pytest
+```
+
+This configuration will run the tests on every push and pull request, ensuring that your codebase remains stable.
+
+## Conclusion
+
+`pytest` is a powerful and flexible testing framework that makes it easy to write and run tests for your Python code. By following the guidelines and best practices outlined in this blog, you can ensure that your tests are reliable and your codebase is robust. Whether you are testing a single file, multiple files, or the entire repository, `pytest` provides the tools you need to automate and streamline your testing process.
+
+Happy testing!
\ No newline at end of file
diff --git a/docs/swarms/framework/vision.md b/docs/swarms/framework/vision.md
new file mode 100644
index 00000000..41dfff72
--- /dev/null
+++ b/docs/swarms/framework/vision.md
@@ -0,0 +1,155 @@
+### Swarms Vision
+
+**Swarms** is dedicated to transforming enterprise automation by offering a robust and intuitive interface for multi-agent collaboration and seamless integration with multiple models. Our mission is to enable enterprises to enhance their operational efficiency and effectiveness through intelligent automation.
+
+#### Vision Statement
+
+**To become the preeminent framework for orchestrating multi-agent collaboration and integration, empowering enterprises to achieve exceptional automation efficiency and operational excellence.**
+
+#### Core Principles
+
+1. **Multi-Agent Collaboration**: Facilitate seamless collaboration between diverse agents to solve complex and dynamic problems.
+2. **Integration**: Provide robust and flexible integration with various models and frameworks to maximize functionality.
+3. **Enterprise Automation**: Deliver enterprise-grade solutions designed for reliability, scalability, and security.
+4. **Open Ecosystem**: Promote an open and extensible ecosystem that encourages innovation, community engagement, and collaborative development.
+
+### Vision Document with Mermaid Graphs
+
+#### Overview Diagram
+
+```mermaid
+graph TD
+ A[Swarms Framework] --> B[Multi-Agent Collaboration]
+ A --> C[Integration with Multiple Models]
+ A --> D[Enterprise Automation]
+ A --> E[Open Ecosystem]
+
+ B --> F[Seamless Communication]
+ B --> G[Collaboration Protocols]
+
+ C --> H[Model Integration]
+ C --> I[Framework Compatibility]
+
+ D --> J[Operational Efficiency]
+ D --> K[Reliability and Scalability]
+
+ E --> L[Encourage Innovation]
+ E --> M[Community Driven]
+```
+
+#### Multi-Agent Collaboration
+
+```mermaid
+graph TD
+ B[Multi-Agent Collaboration] --> F[Seamless Communication]
+ B --> G[Collaboration Protocols]
+
+ F --> N[Cross-Agent Messaging]
+ F --> O[Task Coordination]
+ F --> P[Real-Time Updates]
+
+ G --> Q[Standard APIs]
+ G --> R[Extensible Protocols]
+ G --> S[Security and Compliance]
+
+ N --> T[Agent Messaging Hub]
+ O --> U[Task Assignment and Monitoring]
+ P --> V[Instantaneous Data Sync]
+
+ Q --> W[Unified API Interface]
+ R --> X[Customizable Protocols]
+ S --> Y[Compliance with Standards]
+ S --> Z[Secure Communication Channels]
+```
+
+#### Integration with Multiple Models
+
+```mermaid
+graph TD
+ C[Integration with Multiple Models] --> H[Model Integration]
+ C --> I[Framework Compatibility]
+
+ H --> R[Plug-and-Play Models]
+ H --> S[Model Orchestration]
+ H --> T[Model Versioning]
+
+ I --> U[Support for OpenAI]
+ I --> V[Support for Anthropic]
+ I --> W[Support for Gemini]
+ I --> X[Support for LangChain]
+ I --> Y[Support for AutoGen]
+ I --> Z[Support for Custom Models]
+
+ R --> AA[Easy Model Integration]
+ S --> AB[Dynamic Model Orchestration]
+ T --> AC[Version Control]
+
+ U --> AD[Integration with OpenAI Models]
+ V --> AE[Integration with Anthropic Models]
+ W --> AF[Integration with Gemini Models]
+ X --> AG[Integration with LangChain Models]
+ Y --> AH[Integration with AutoGen Models]
+ Z --> AI[Support for Proprietary Models]
+```
+
+#### Enterprise Automation
+
+```mermaid
+graph TD
+ D[Enterprise Automation] --> J[Operational Efficiency]
+ D --> K[Reliability and Scalability]
+
+ J --> Y[Automate Workflows]
+ J --> Z[Reduce Manual Work]
+ J --> AA[Increase Productivity]
+
+ K --> AB[High Uptime]
+ K --> AC[Enterprise-Grade Security]
+ K --> AD[Scalable Solutions]
+
+ Y --> AE[Workflow Automation Tools]
+ Z --> AF[Eliminate Redundant Tasks]
+ AA --> AG[Boost Employee Efficiency]
+
+ AB --> AH[Robust Infrastructure]
+ AC --> AI[Security Compliance]
+ AD --> AJ[Scale with Demand]
+```
+
+#### Open Ecosystem
+
+```mermaid
+graph TD
+ E[Open Ecosystem] --> L[Encourage Innovation]
+ E --> M[Community Driven]
+
+ L --> AC[Open Source Contributions]
+ L --> AD[Hackathons and Workshops]
+ L --> AE[Research and Development]
+
+ M --> AF[Active Community Support]
+ M --> AG[Collaborative Development]
+ M --> AH[Shared Resources]
+
+ AC --> AI[Community Contributions]
+ AD --> AJ[Innovative Events]
+ AE --> AK[Continuous R&D]
+
+ AF --> AL[Supportive Community]
+ AG --> AM[Joint Development Projects]
+ AH --> AN[Shared Knowledge Base]
+```
+
+---
+
+### Conclusion
+
+Swarms excels in enabling seamless communication and coordination between multiple agents, fostering a collaborative environment where agents can work together to solve complex tasks. Our platform supports cross-agent messaging, task coordination, and real-time updates, ensuring that all agents are synchronized and can efficiently contribute to the collective goal.
+
+Swarms provides robust integration capabilities with a wide array of models, including OpenAI, Anthropic, Gemini, LangChain, AutoGen, and custom models. This ensures that enterprises can leverage the best models available to meet their specific needs, while also allowing for dynamic model orchestration and version control to keep operations up-to-date and effective.
+
+Our framework is designed to enhance operational efficiency through automation. By automating workflows, reducing manual work, and increasing productivity, Swarms helps enterprises achieve higher efficiency and operational excellence. Our solutions are built for high uptime, enterprise-grade security, and scalability, ensuring reliable and secure operations.
+
+Swarms promotes an open and extensible ecosystem, encouraging community-driven innovation and development. We support open-source contributions, organize hackathons and workshops, and continuously invest in research and development. Our active community fosters collaborative development, shared resources, and a supportive environment for innovation.
+
+**Swarms** is dedicated to providing a comprehensive and powerful framework for enterprises seeking to automate operations through multi-agent collaboration and integration with various models. Our commitment to an open ecosystem, enterprise-grade automation solutions, and seamless multi-agent collaboration ensures that Swarms remains the leading choice for enterprises aiming to achieve operational excellence through intelligent automation.
\ No newline at end of file
diff --git a/docs/swarms/glossary.md b/docs/swarms/glossary.md
new file mode 100644
index 00000000..cc59af4a
--- /dev/null
+++ b/docs/swarms/glossary.md
@@ -0,0 +1,48 @@
+# Glossary of Terms
+
+**Agent**:
+An LLM (Large Language Model) equipped with tools and memory, operating with a specific objective in a loop. An agent can perform tasks, interact with other agents, and utilize external tools and memory systems to achieve its goals.
+
+**Swarms**:
+A group of more than two agents working together and communicating to accomplish a shared objective. Swarms enable complex, collaborative tasks that leverage the strengths of multiple agents.
+
+**Tool**:
+A Python function that is converted into a function call, allowing agents to perform specific actions or access external resources. Tools enhance the capabilities of agents by providing specialized functionalities.
+
+**Memory System**:
+A system for managing information retrieval and storage, often implemented as a Retrieval-Augmented Generation (RAG) system or a memory vector database. Memory systems enable agents to recall previous interactions, store new information, and improve decision-making based on historical data.
+
+**LLM (Large Language Model)**:
+A type of AI model designed to understand and generate human-like text. LLMs, such as GPT-3 or GPT-4, are used as the core computational engine for agents.
+
+**System Prompt**:
+A predefined prompt that sets the context and instructions for an agent's task. The system prompt guides the agent's behavior and response generation.
+
+**Max Loops**:
+The maximum number of iterations an agent will perform to complete its task. This parameter helps control the extent of an agent's processing and ensures tasks are completed efficiently.
+
+**Dashboard**:
+A user interface that provides real-time monitoring and control over the agents and their activities. Dashboards can display agent status, logs, and performance metrics.
+
+**Streaming On**:
+A setting that enables agents to stream their output incrementally, providing real-time feedback as they process tasks. This feature is useful for monitoring progress and making adjustments on the fly.
+
+**Verbose**:
+A setting that controls the level of detail in an agent's output and logging. When verbose mode is enabled, the agent provides more detailed information about its operations and decisions.
+
+**Multi-modal**:
+The capability of an agent to process and integrate multiple types of data, such as text, images, and audio. Multi-modal agents can handle more complex tasks that require diverse inputs.
+
+**Autosave**:
+A feature that automatically saves the agent's state and progress at regular intervals. Autosave helps prevent data loss and allows for recovery in case of interruptions.
+
+**Flow**:
+The predefined sequence in which agents in a swarm interact and process tasks. The flow ensures that each agent's output is appropriately passed to the next agent, facilitating coordinated efforts.
+
+**Long Term Memory**:
+A component of the memory system that retains information over extended periods, enabling agents to recall and utilize past interactions and experiences.
+
+**Output Schema**:
+A structured format for the output generated by agents, often defined using data models like Pydantic's BaseModel. Output schemas ensure consistency and clarity in the information produced by agents.
+
+By understanding these terms, you can effectively build and orchestrate agents and swarms, leveraging their capabilities to perform complex, collaborative tasks.
\ No newline at end of file
diff --git a/docs/swarms/install/docker_setup.md b/docs/swarms/install/docker_setup.md
new file mode 100644
index 00000000..da08d9d9
--- /dev/null
+++ b/docs/swarms/install/docker_setup.md
@@ -0,0 +1,186 @@
+# Docker Setup Guide for Contributors to Swarms
+
+
+Welcome to the `swarms` project Docker setup guide. This document will help you establish a Docker-based environment for contributing to `swarms`. Docker provides a consistent and isolated environment, ensuring that all contributors can work in the same settings, reducing the "it works on my machine" syndrome.
+
+### Purpose
+
+The purpose of this guide is to:
+
+- Ensure contributors can quickly set up their development environment.
+- Provide a consistent testing and deployment workflow.
+- Introduce Docker basics and best practices.
+
+### Scope
+
+This guide covers:
+
+- Installing Docker
+- Cloning the `swarms` repository
+- Building a Docker image
+- Running the `swarms` application in a Docker container
+- Running tests using Docker
+- Pushing changes and working with Docker Hub
+
+
+## Docker Installation
+
+### Windows
+
+1. Download Docker Desktop for Windows from the official website.
+2. Install Docker Desktop, ensuring that the "Use Windows containers instead of Linux containers" option is unchecked.
+3. Start Docker Desktop and wait for the Docker engine to start.
+
+### macOS
+
+1. Download Docker Desktop for macOS from the official website.
+2. Follow the installation instructions, drag-and-drop Docker into the Applications folder.
+3. Start Docker Desktop from the Applications folder.
+
+### Linux (Ubuntu)
+
+1. Update your package index: `sudo apt-get update`.
+2. Install packages to allow apt to use a repository over HTTPS.
+3. Add Dockerβs official GPG key.
+4. Set up the stable repository.
+5. Install the latest version of Docker Engine and containerd.
+
+```bash
+sudo apt-get install docker-ce docker-ce-cli containerd.io
+```
+
+6. Verify that Docker Engine is installed correctly by running the hello-world image.
+
+```bash
+sudo docker run hello-world
+```
+
+### Post-installation Steps for Linux
+
+- Manage Docker as a non-root user.
+- Configure Docker to start on boot.
+
+## Cloning the Repository
+
+```bash
+git clone https://github.com/your-username/swarms.git
+cd swarms
+```
+
+## Docker Basics
+
+### Dockerfile Overview
+
+- Explain the structure and commands of a Dockerfile used in the `swarms` project.
+
+### Building the Image
+
+```bash
+docker build -t swarms-dev .
+```
+
+### Running a Container
+
+```bash
+docker run -it --rm swarms-dev
+```
+
+## Development Workflow with Docker
+
+### Running the Application
+
+- Commands to run the `swarms` application within Docker.
+
+### Making Changes
+
+- How to make changes to the code and reflect those changes within the Docker container.
+
+### Running Tests
+
+- Instructions on running tests using `pytest` within the Docker environment.
+
+## Docker Compose for Local Development
+
+- Introduce Docker Compose and its role in simplifying multi-container setups.
+- Create a `docker-compose.yml` file for the `swarms` project.
+
+
+## Dockerfile
+
+Creating a Dockerfile for deploying the `swarms` framework to the cloud involves setting up the necessary environment to run your Python application, ensuring all dependencies are installed, and configuring the container to execute the desired tasks. Here's an example Dockerfile that sets up such an environment:
+
+```Dockerfile
+# Use an official Python runtime as a parent image
+FROM python:3.11-slim
+
+# Set environment variables
+ENV PYTHONDONTWRITEBYTECODE 1
+ENV PYTHONUNBUFFERED 1
+
+# Set the working directory in the container
+WORKDIR /usr/src/swarm_cloud
+
+# Install system dependencies
+RUN apt-get update \
+ && apt-get -y install gcc \
+ && apt-get clean
+
+# Install Python dependencies
+# COPY requirements.txt and pyproject.toml if you're using poetry for dependency management
+COPY requirements.txt .
+RUN pip install --upgrade pip
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Install the 'swarms' package, assuming it's available on PyPI
+ENV SWARM_API_KEY=your_swarm_api_key_here
+ENV OPENAI_API_KEY=your_openai_key
+RUN pip install swarms
+
+# Copy the rest of the application
+COPY . .
+
+# Add entrypoint script if needed
+# COPY ./entrypoint.sh .
+# RUN chmod +x /usr/src/swarm_cloud/entrypoint.sh
+
+# Expose port if your application has a web interface
+# EXPOSE 5000
+
+# Define environment variable for the swarm to work
+# Add Docker CMD or ENTRYPOINT script to run the application
+# CMD python your_swarm_startup_script.py
+# Or use the entrypoint script if you have one
+# ENTRYPOINT ["/usr/src/swarm_cloud/entrypoint.sh"]
+
+# If you're using `CMD` to execute a Python script, make sure it's executable
+# RUN chmod +x your_swarm_startup_script.py
+```
+
+To build and run this Docker image:
+
+1. Replace `requirements.txt` with your actual requirements file or `pyproject.toml` and `poetry.lock` if you're using Poetry.
+2. Replace `your_swarm_startup_script.py` with the script that starts your application.
+3. If your application requires an API key or other sensitive data, make sure to set these securely, perhaps using environment variables or secrets management solutions provided by your cloud provider.
+4. If you have an entrypoint script, uncomment the `COPY` and `RUN` lines for `entrypoint.sh`.
+5. If your application has a web interface, uncomment the `EXPOSE` line and set it to the correct port.
+
+Now, build your Docker image:
+
+```sh
+docker build -t swarm-cloud .
+```
+
+And run it:
+
+```sh
+docker run -d --name my-swarm-app swarm-cloud
+```
+
+For deploying to the cloud, you'll need to push your Docker image to a container registry (like Docker Hub or a private registry), then pull it from your cloud environment to run it. Cloud providers often have services specifically for this purpose (like AWS ECS, GCP GKE, or Azure AKS). The deployment process will involve:
+
+- Pushing the image to a registry.
+- Configuring cloud services to run your image.
+- Setting up networking, storage, and other cloud resources.
+- Monitoring, logging, and potentially scaling your containers.
+
+Remember to secure sensitive data, use tagged releases for your images, and follow best practices for operating in the cloud.
diff --git a/docs/swarms/install/install.md b/docs/swarms/install/install.md
new file mode 100644
index 00000000..f69a09bd
--- /dev/null
+++ b/docs/swarms/install/install.md
@@ -0,0 +1,288 @@
+# Swarms Installation Guide
+
+
+
+You can install `swarms` with pip in a
+[**Python>=3.10**](https://www.python.org/) environment.
+
+## Prerequisites
+
+Before you begin, ensure you have the following installed:
+
+- Python 3.10 or higher: [Download Python](https://www.python.org/)
+- pip (specific version recommended): `pip >= 21.0`
+- git (for cloning the repository): [Download Git](https://git-scm.com/)
+
+## Installation Options
+
+=== "pip (Recommended)"
+
+ #### Headless Installation
+
+ The headless installation of `swarms` is designed for environments where graphical user interfaces (GUI) are not needed, making it more lightweight and suitable for server-side applications.
+
+ ```bash
+ pip install swarms
+ ```
+
+=== "Development Installation"
+
+ === "Using virtualenv"
+
+ 1. **Clone the repository and navigate to the root directory:**
+
+ ```bash
+ git clone https://github.com/kyegomez/swarms.git
+ cd swarms
+ ```
+
+ 2. **Setup Python environment and activate it:**
+
+ ```bash
+ python3 -m venv venv
+ source venv/bin/activate
+ pip install --upgrade pip
+ ```
+
+ 3. **Install Swarms:**
+
+ - Headless install:
+
+ ```bash
+ pip install -e .
+ ```
+
+ - Desktop install:
+
+ ```bash
+ pip install -e .[desktop]
+ ```
+
+ === "Using Anaconda"
+
+ 1. **Create and activate an Anaconda environment:**
+
+ ```bash
+ conda create -n swarms python=3.10
+ conda activate swarms
+ ```
+
+ 2. **Clone the repository and navigate to the root directory:**
+
+ ```bash
+ git clone https://github.com/kyegomez/swarms.git
+ cd swarms
+ ```
+
+ 3. **Install Swarms:**
+
+ - Headless install:
+
+ ```bash
+ pip install -e .
+ ```
+
+ - Desktop install:
+
+ ```bash
+ pip install -e .[desktop]
+ ```
+
+ === "Using Poetry"
+
+ 1. **Clone the repository and navigate to the root directory:**
+
+ ```bash
+ git clone https://github.com/kyegomez/swarms.git
+ cd swarms
+ ```
+
+ 2. **Setup Python environment and activate it:**
+
+ ```bash
+ poetry env use python3.10
+ poetry shell
+ ```
+
+ 3. **Install Swarms:**
+
+ - Headless install:
+
+ ```bash
+ poetry install
+ ```
+
+ - Desktop install:
+
+ ```bash
+ poetry install --extras "desktop"
+ ```
+
+=== "Using Docker"
+
+ Docker is an excellent option for creating isolated and reproducible environments, suitable for both development and production.
+
+ 1. **Pull the Docker image:**
+
+ ```bash
+ docker pull kyegomez/swarms
+ ```
+
+ 2. **Run the Docker container:**
+
+ ```bash
+ docker run -it --rm kyegomez/swarms
+ ```
+
+ 3. **Build and run a custom Docker image:**
+
+ ```dockerfile
+ # Dockerfile
+ FROM python:3.10-slim
+
+ # Set up environment
+ WORKDIR /app
+ COPY . /app
+
+ # Install dependencies
+ RUN pip install --upgrade pip && \
+ pip install -e .
+
+ CMD ["python", "your_script.py"]
+ ```
+
+ ```bash
+ # Build and run the Docker image
+ docker build -t swarms-custom .
+ docker run -it --rm swarms-custom
+ ```
+
+=== "Using Kubernetes"
+
+ Kubernetes provides an automated way to deploy, scale, and manage containerized applications.
+
+ 1. **Create a Deployment YAML file:**
+
+ ```yaml
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: swarms-deployment
+ spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app: swarms
+ template:
+ metadata:
+ labels:
+ app: swarms
+ spec:
+ containers:
+ - name: swarms
+ image: kyegomez/swarms
+ ports:
+ - containerPort: 8080
+ ```
+
+ 2. **Apply the Deployment:**
+
+ ```bash
+ kubectl apply -f deployment.yaml
+ ```
+
+ 3. **Expose the Deployment:**
+
+ ```bash
+ kubectl expose deployment swarms-deployment --type=LoadBalancer --name=swarms-service
+ ```
+
+=== "CI/CD Pipelines"
+
+ Integrating Swarms into your CI/CD pipeline ensures automated testing and deployment.
+
+ #### Using GitHub Actions
+
+ ```yaml
+ # .github/workflows/ci.yml
+ name: CI
+
+ on:
+ push:
+ branches: [ main ]
+ pull_request:
+ branches: [ main ]
+
+ jobs:
+ build:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.10
+ - name: Install dependencies
+ run: |
+ python -m venv venv
+ source venv/bin/activate
+ pip install --upgrade pip
+ pip install -e .
+ - name: Run tests
+ run: |
+ source venv/bin/activate
+ pytest
+ ```
+
+ #### Using Jenkins
+
+ ```groovy
+ pipeline {
+ agent any
+
+ stages {
+ stage('Clone repository') {
+ steps {
+ git 'https://github.com/kyegomez/swarms.git'
+ }
+ }
+ stage('Setup Python') {
+ steps {
+ sh 'python3 -m venv venv'
+ sh 'source venv/bin/activate && pip install --upgrade pip'
+ }
+ }
+ stage('Install dependencies') {
+ steps {
+ sh 'source venv/bin/activate && pip install -e .'
+ }
+ }
+ stage('Run tests') {
+ steps {
+ sh 'source venv/bin/activate && pytest'
+ }
+ }
+ }
+ }
+ ```
+
+## Javascript
+
+=== "NPM install (Work in Progress)"
+
+ Get started with the NPM implementation of Swarms:
+
+ ```bash
+ npm install swarms-js
+ ```
diff --git a/docs/swarms/install/multi_agent_template.md b/docs/swarms/install/multi_agent_template.md
new file mode 100644
index 00000000..4063ef9e
--- /dev/null
+++ b/docs/swarms/install/multi_agent_template.md
@@ -0,0 +1,6 @@
+# Getting Started with Multi-Agent Collaboration Using the Multi-Agent Github Template
+
+
+The Multi-Agent Github Template, a radically simple, reliable, and high-performance framework, is designed to empower developers and prompt engineers to harness the full potential of multi-agent collaboration. [LINK](https://medium.com/@kyeg/getting-started-with-multi-agent-collaboration-using-the-multi-agent-github-template-0f0a6cba0dc0)
+
+[GITHUB](https://github.com/kyegomez/Multi-Agent-Template-App)
\ No newline at end of file
diff --git a/docs/swarms/memory/azure_openai.md b/docs/swarms/memory/azure_openai.md
new file mode 100644
index 00000000..01b169b7
--- /dev/null
+++ b/docs/swarms/memory/azure_openai.md
@@ -0,0 +1,131 @@
+# Deploying Azure OpenAI in Production: A Comprehensive Guide
+
+In today's fast-paced digital landscape, leveraging cutting-edge technologies has become essential for businesses to stay competitive and provide exceptional services to their customers. One such technology that has gained significant traction is Azure OpenAI, a powerful platform that allows developers to integrate advanced natural language processing (NLP) capabilities into their applications. Whether you're building a chatbot, a content generation system, or any other AI-powered solution, Azure OpenAI offers a robust and scalable solution for production-grade deployment.
+
+In this comprehensive guide, we'll walk through the process of setting up and deploying Azure OpenAI in a production environment. We'll dive deep into the code, provide clear explanations, and share best practices to ensure a smooth and successful implementation.
+
+## Prerequisites:
+Before we begin, it's essential to have the following prerequisites in place:
+
+1. **Python**: You'll need to have Python installed on your system. This guide assumes you're using Python 3.6 or later.
+2. **Azure Subscription**: You'll need an active Azure subscription to access Azure OpenAI services.
+3. **Azure OpenAI Resource**: Create an Azure OpenAI resource in your Azure subscription.
+4. **Python Packages**: Install the required Python packages, including `python-dotenv` and `swarms`.
+
+## Setting up the Environment:
+To kick things off, we'll set up our development environment and install the necessary dependencies.
+
+1. **Create a Virtual Environment**: It's a best practice to create a virtual environment to isolate your project dependencies from the rest of your system. You can create a virtual environment using `venv` or any other virtual environment management tool of your choice.
+
+```
+python -m venv myenv
+```
+
+2. **Activate the Virtual Environment**: Activate the virtual environment to ensure that any packages you install are isolated within the environment.
+
+```
+source myenv/bin/activate # On Windows, use `myenv\Scripts\activate`
+```
+
+3. **Install Required Packages**: Install the `python-dotenv` and `swarms` packages using pip.
+
+```
+pip install python-dotenv swarms
+```
+
+4. **Create a `.env` File**: In the root directory of your project, create a new file called `.env`. This file will store your Azure OpenAI credentials and configuration settings.
+
+```
+AZURE_OPENAI_ENDPOINT=
+AZURE_OPENAI_DEPLOYMENT=
+OPENAI_API_VERSION=
+AZURE_OPENAI_API_KEY=
+AZURE_OPENAI_AD_TOKEN=
+```
+
+Replace the placeholders with your actual Azure OpenAI credentials and configuration settings.
+
+## Connecting to Azure OpenAI:
+Now that we've set up our environment, let's dive into the code that connects to Azure OpenAI and interacts with the language model.
+
+```python
+import os
+from dotenv import load_dotenv
+from swarms import AzureOpenAI
+
+# Load the environment variables
+load_dotenv()
+
+# Create an instance of the AzureOpenAI class
+model = AzureOpenAI(
+ azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
+ deployment_name=os.getenv("AZURE_OPENAI_DEPLOYMENT"),
+ openai_api_version=os.getenv("OPENAI_API_VERSION"),
+ openai_api_key=os.getenv("AZURE_OPENAI_API_KEY"),
+ azure_ad_token=os.getenv("AZURE_OPENAI_AD_TOKEN")
+)
+```
+
+## Let's break down this code:
+
+1. **Import Statements**: We import the necessary modules, including `os` for interacting with the operating system, `load_dotenv` from `python-dotenv` to load environment variables, and `AzureOpenAI` from `swarms` to interact with the Azure OpenAI service.
+
+2. **Load Environment Variables**: We use `load_dotenv()` to load the environment variables stored in the `.env` file we created earlier.
+
+3. **Create AzureOpenAI Instance**: We create an instance of the `AzureOpenAI` class by passing in the required configuration parameters:
+ - `azure_endpoint`: The endpoint URL for your Azure OpenAI resource.
+ - `deployment_name`: The name of the deployment you want to use.
+ - `openai_api_version`: The version of the OpenAI API you want to use.
+ - `openai_api_key`: Your Azure OpenAI API key, which authenticates your requests.
+ - `azure_ad_token`: An optional Azure Active Directory (AAD) token for additional security.
+
+Querying the Language Model:
+With our connection to Azure OpenAI established, we can now query the language model and receive responses.
+
+```python
+# Define the prompt
+prompt = "Analyze this load document and assess it for any risks and create a table in markdwon format."
+
+# Generate a response
+response = model(prompt)
+print(response)
+```
+
+## Here's what's happening:
+
+1. **Define the Prompt**: We define a prompt, which is the input text or question we want to feed into the language model.
+
+2. **Generate a Response**: We call the `model` instance with the `prompt` as an argument. This triggers the Azure OpenAI service to process the prompt and generate a response.
+
+3. **Print the Response**: Finally, we print the response received from the language model.
+
+Running the Code:
+To run the code, save it in a Python file (e.g., `main.py`) and execute it from the command line:
+
+```
+python main.py
+```
+
+## Best Practices for Production Deployment:
+While the provided code serves as a basic example, there are several best practices to consider when deploying Azure OpenAI in a production environment:
+
+1. **Secure Credentials Management**: Instead of storing sensitive credentials like API keys in your codebase, consider using secure storage solutions like Azure Key Vault or environment variables managed by your cloud provider.
+
+2. **Error Handling and Retries**: Implement robust error handling and retry mechanisms to handle potential failures or rate-limiting scenarios.
+
+3. **Logging and Monitoring**: Implement comprehensive logging and monitoring strategies to track application performance, identify issues, and gather insights for optimization.
+
+4. **Scalability and Load Testing**: Conduct load testing to ensure your application can handle anticipated traffic volumes and scale appropriately based on demand.
+
+5. **Caching and Optimization**: Explore caching strategies and performance optimizations to improve response times and reduce the load on the Azure OpenAI service.
+
+6. **Integration with Other Services**: Depending on your use case, you may need to integrate Azure OpenAI with other Azure services or third-party tools for tasks like data processing, storage, or analysis.
+
+7. **Compliance and Security**: Ensure your application adheres to relevant compliance standards and security best practices, especially when handling sensitive data.
+
+## Conclusion:
+Azure OpenAI is a powerful platform that enables developers to integrate advanced natural language processing capabilities into their applications. By following the steps outlined in this guide, you can set up a production-ready environment for deploying Azure OpenAI and start leveraging its capabilities in your projects.
+
+Remember, this guide serves as a starting point, and there are numerous additional features and capabilities within Azure OpenAI that you can explore to enhance your applications further. As with any production deployment, it's crucial to follow best practices, conduct thorough testing, and implement robust monitoring and security measures.
+
+With the right approach and careful planning, you can successfully deploy Azure OpenAI in a production environment and unlock the power of cutting-edge language models to drive innovation and provide exceptional experiences for your users.
\ No newline at end of file
diff --git a/docs/swarms/memory/diy_memory.md b/docs/swarms/memory/diy_memory.md
new file mode 100644
index 00000000..ffb98cae
--- /dev/null
+++ b/docs/swarms/memory/diy_memory.md
@@ -0,0 +1,610 @@
+# Building Custom Vector Memory Databases with the BaseVectorDatabase Class
+
+In the age of large language models (LLMs) and AI-powered applications, efficient memory management has become a crucial component. Vector databases, which store and retrieve data in high-dimensional vector spaces, have emerged as powerful tools for handling the vast amounts of data generated and consumed by AI systems. However, integrating vector databases into your applications can be a daunting task, requiring in-depth knowledge of their underlying architectures and APIs.
+
+Enter the `BaseVectorDatabase` class, a powerful abstraction layer designed to simplify the process of creating and integrating custom vector memory databases into your AI applications. By inheriting from this class, developers can build tailored vector database solutions that seamlessly integrate with their existing systems, enabling efficient storage, retrieval, and manipulation of high-dimensional data.
+
+In this comprehensive guide, we'll explore the `BaseVectorDatabase` class in detail, covering its core functionality and diving deep into the process of creating custom vector memory databases using popular solutions like PostgreSQL, Pinecone, Chroma, FAISS, and more. Whether you're a seasoned AI developer or just starting to explore the world of vector databases, this guide will provide you with the knowledge and tools necessary to build robust, scalable, and efficient memory solutions for your AI applications.
+
+## Understanding the BaseVectorDatabase Class
+
+Before we dive into the implementation details, let's take a closer look at the `BaseVectorDatabase` class and its core functionality.
+
+The `BaseVectorDatabase` class is an abstract base class that defines the interface for interacting with a vector database. It serves as a blueprint for creating concrete implementations of vector databases, ensuring a consistent and standardized approach to database operations across different systems.
+
+The class provides a set of abstract methods that define the essential functionality required for working with vector databases, such as connecting to the database, executing queries, and performing CRUD (Create, Read, Update, Delete) operations.
+
+Here's a breakdown of the abstract methods defined in the `BaseVectorDatabase` class:
+
+1\. `connect()`: This method establishes a connection to the vector database.
+
+2\. `close()`: This method closes the connection to the vector database.
+
+3\. `query(query: str)`: This method executes a given query on the vector database.
+
+4\. `fetch_all()`: This method retrieves all rows from the result set of a query.
+
+5\. `fetch_one()`: This method retrieves a single row from the result set of a query.
+
+6\. `add(doc: str)`: This method adds a new record to the vector database.
+
+7\. `get(query: str)`: This method retrieves a record from the vector database based on a given query.
+
+8\. `update(doc)`: This method updates a record in the vector database.
+
+9\. `delete(message)`: This method deletes a record from the vector database.
+
+By inheriting from the `BaseVectorDatabase` class and implementing these abstract methods, developers can create concrete vector database implementations tailored to their specific needs and requirements.
+
+## Creating a Custom Vector Memory Database
+
+Now that we have a solid understanding of the `BaseVectorDatabase` class, let's dive into the process of creating a custom vector memory database by inheriting from this class. Throughout this guide, we'll explore various vector database solutions, including PostgreSQL, Pinecone, Chroma, FAISS, and more, showcasing how to integrate them seamlessly into your AI applications.
+
+### Step 1: Inherit from the BaseVectorDatabase Class
+
+The first step in creating a custom vector memory database is to inherit from the `BaseVectorDatabase` class. This will provide your custom implementation with the foundational structure and interface defined by the abstract class.
+
+```python
+
+from abc import ABC, abstractmethod
+from swarms import BaseVectorDatabase
+
+class MyCustomVectorDatabase(BaseVectorDatabase):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β # Custom initialization logic
+
+Β Β Β Β pass
+
+```
+
+In the example above, we define a new class `MyCustomVectorDatabase` that inherits from the `BaseVectorDatabase` class. Within the `__init__` method, you can add any custom initialization logic specific to your vector database implementation.
+
+### Step 2: Implement the Abstract Methods
+
+The next step is to implement the abstract methods defined in the `BaseVectorDatabase` class. These methods provide the core functionality for interacting with your vector database, such as connecting, querying, and performing CRUD operations.
+
+```python
+from swarms import BaseVectorDatabase
+
+
+class MyCustomVectorDatabase(BaseVectorDatabase):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β # Custom initialization logic
+
+Β Β Β Β pass
+
+Β Β def connect(self):
+
+Β Β Β Β # Implementation for connecting to the vector database
+
+Β Β Β Β pass
+
+Β Β def close(self):
+
+Β Β Β Β # Implementation for closing the vector database connection
+
+Β Β Β Β pass
+
+Β Β def query(self, query: str):
+
+Β Β Β Β # Implementation for executing a query on the vector database
+
+Β Β Β Β pass
+
+Β Β def fetch_all(self):
+
+Β Β Β Β # Implementation for fetching all rows from the result set
+
+Β Β Β Β pass
+
+Β Β def fetch_one(self):
+
+Β Β Β Β # Implementation for fetching a single row from the result set
+
+Β Β Β Β pass
+
+Β Β def add(self, doc: str):
+
+Β Β Β Β # Implementation for adding a new record to the vector database
+
+Β Β Β Β pass
+
+Β Β def get(self, query: str):
+
+Β Β Β Β # Implementation for retrieving a record from the vector database
+
+Β Β Β Β pass
+
+Β Β def update(self, doc):
+
+Β Β Β Β # Implementation for updating a record in the vector database
+
+Β Β Β Β pass
+
+Β Β def delete(self, message):
+
+Β Β Β Β # Implementation for deleting a record from the vector database
+
+Β Β Β Β pass
+
+```
+
+In this example, we define placeholders for each of the abstract methods within the `MyCustomVectorDatabase` class. These placeholders will be replaced with the actual implementation logic specific to your chosen vector database solution.
+
+### Step 3: Choose and Integrate Your Vector Database Solution
+
+With the foundational structure in place, it's time to choose a specific vector database solution and integrate it into your custom implementation. In this guide, we'll explore several popular vector database solutions, including PostgreSQL, Pinecone, Chroma, FAISS, and more, providing examples and guidance on how to integrate them seamlessly.
+
+### PostgreSQL Integration
+
+PostgreSQL is a powerful open-source relational database management system that supports vector data types and operations, making it a viable choice for building custom vector memory databases.
+
+```python
+
+import psycopg2
+from swarms import BaseVectorDatabase
+
+class PostgreSQLVectorDatabase(MyCustomVectorDatabase):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # PostgreSQL connection details
+
+Β Β Β Β self.conn = psycopg2.connect(
+
+Β Β Β Β Β Β host="localhost",
+
+Β Β Β Β Β Β database="vector_db",
+
+Β Β Β Β Β Β user="postgres",
+
+Β Β Β Β Β Β password="your_password"
+
+Β Β Β Β )
+
+Β Β Β Β self.cur = self.conn.cursor()
+
+Β Β def connect(self):
+
+Β Β Β Β # PostgreSQL connection logic
+
+Β Β Β Β pass
+
+Β Β def close(self):
+
+Β Β Β Β # Close PostgreSQL connection
+
+Β Β Β Β self.cur.close()
+
+Β Β Β Β self.conn.close()
+
+Β Β def query(self, query: str):
+
+Β Β Β Β # Execute PostgreSQL query
+
+Β Β Β Β self.cur.execute(query)
+
+Β Β def fetch_all(self):
+
+Β Β Β Β # Fetch all rows from PostgreSQL result set
+
+Β Β Β Β return self.cur.fetchall()
+
+Β Β # Implement other abstract methods
+
+```
+
+In this example, we define a `PostgreSQLVectorDatabase` class that inherits from `MyCustomVectorDatabase`. Within the `__init__` method, we establish a connection to a PostgreSQL database using the `psycopg2` library. We then implement the `connect()`, `close()`, `query()`, and `fetch_all()` methods specific to PostgreSQL.
+
+### Pinecone Integration
+
+Pinecone is a managed vector database service that provides efficient storage, retrieval, and manipulation of high-dimensional vector data.
+
+```python
+
+import pinecone
+from swarms import BaseVectorDatabase
+
+
+class PineconeVectorDatabase(MyCustomVectorDatabase):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # Pinecone initialization
+
+Β Β Β Β pinecone.init(api_key="your_api_key", environment="your_environment")
+
+Β Β Β Β self.index = pinecone.Index("your_index_name")
+
+Β Β def connect(self):
+
+Β Β Β Β # Pinecone connection logic
+
+Β Β Β Β pass
+
+Β Β def close(self):
+
+Β Β Β Β # Close Pinecone connection
+
+Β Β Β Β pass
+
+Β Β def query(self, query: str):
+
+Β Β Β Β # Execute Pinecone query
+
+Β Β Β Β results = self.index.query(query)
+
+Β Β Β Β return results
+
+Β Β def add(self, doc: str):
+
+Β Β Β Β # Add document to Pinecone index
+
+Β Β Β Β self.index.upsert([("id", doc)])
+
+Β Β # Implement other abstract methods
+
+```
+
+In this example, we define a `PineconeVectorDatabase` class that inherits from `MyCustomVectorDatabase`. Within the `__init__` method, we initialize the Pinecone client and create an index. We then implement the `query()` and `add()` methods specific to the Pinecone API.
+
+### Chroma Integration
+
+Chroma is an open-source vector database library that provides efficient storage, retrieval, and manipulation of vector data using various backends, including DuckDB, Chromadb, and more.
+
+```python
+import logging
+import os
+import uuid
+from typing import Optional
+
+import chromadb
+from dotenv import load_dotenv
+
+from swarms.utils.data_to_text import data_to_text
+from swarms.utils.markdown_message import display_markdown_message
+from swarms.memory.base_vectordb import BaseVectorDatabase
+
+# Load environment variables
+load_dotenv()
+
+
+# Results storage using local ChromaDB
+class ChromaDB(BaseVectorDatabase):
+ """
+
+ ChromaDB database
+
+ Args:
+ metric (str): The similarity metric to use.
+ output (str): The name of the collection to store the results in.
+ limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000.
+ n_results (int, optional): The number of results to retrieve. Defaults to 2.
+
+ Methods:
+ add: _description_
+ query: _description_
+
+ Examples:
+ >>> chromadb = ChromaDB(
+ >>> metric="cosine",
+ >>> output="results",
+ >>> llm="gpt3",
+ >>> openai_api_key=OPENAI_API_KEY,
+ >>> )
+ >>> chromadb.add(task, result, result_id)
+ """
+
+ def __init__(
+ self,
+ metric: str = "cosine",
+ output_dir: str = "swarms",
+ limit_tokens: Optional[int] = 1000,
+ n_results: int = 3,
+ docs_folder: str = None,
+ verbose: bool = False,
+ *args,
+ **kwargs,
+ ):
+ self.metric = metric
+ self.output_dir = output_dir
+ self.limit_tokens = limit_tokens
+ self.n_results = n_results
+ self.docs_folder = docs_folder
+ self.verbose = verbose
+
+ # Disable ChromaDB logging
+ if verbose:
+ logging.getLogger("chromadb").setLevel(logging.INFO)
+
+ # Create Chroma collection
+ chroma_persist_dir = "chroma"
+ chroma_client = chromadb.PersistentClient(
+ settings=chromadb.config.Settings(
+ persist_directory=chroma_persist_dir,
+ ),
+ *args,
+ **kwargs,
+ )
+
+ # Create ChromaDB client
+ self.client = chromadb.Client()
+
+ # Create Chroma collection
+ self.collection = chroma_client.get_or_create_collection(
+ name=output_dir,
+ metadata={"hnsw:space": metric},
+ *args,
+ **kwargs,
+ )
+ display_markdown_message(
+ "ChromaDB collection created:"
+ f" {self.collection.name} with metric: {self.metric} and"
+ f" output directory: {self.output_dir}"
+ )
+
+ # If docs
+ if docs_folder:
+ display_markdown_message(
+ f"Traversing directory: {docs_folder}"
+ )
+ self.traverse_directory()
+
+ def add(
+ self,
+ document: str,
+ *args,
+ **kwargs,
+ ):
+ """
+ Add a document to the ChromaDB collection.
+
+ Args:
+ document (str): The document to be added.
+ condition (bool, optional): The condition to check before adding the document. Defaults to True.
+
+ Returns:
+ str: The ID of the added document.
+ """
+ try:
+ doc_id = str(uuid.uuid4())
+ self.collection.add(
+ ids=[doc_id],
+ documents=[document],
+ *args,
+ **kwargs,
+ )
+ print("-----------------")
+ print("Document added successfully")
+ print("-----------------")
+ return doc_id
+ except Exception as e:
+ raise Exception(f"Failed to add document: {str(e)}")
+
+ def query(
+ self,
+ query_text: str,
+ *args,
+ **kwargs,
+ ):
+ """
+ Query documents from the ChromaDB collection.
+
+ Args:
+ query (str): The query string.
+ n_docs (int, optional): The number of documents to retrieve. Defaults to 1.
+
+ Returns:
+ dict: The retrieved documents.
+ """
+ try:
+ docs = self.collection.query(
+ query_texts=[query_text],
+ n_results=self.n_results,
+ *args,
+ **kwargs,
+ )["documents"]
+ return docs[0]
+ except Exception as e:
+ raise Exception(f"Failed to query documents: {str(e)}")
+
+ def traverse_directory(self):
+ """
+ Traverse through every file in the given directory and its subdirectories,
+ and return the paths of all files.
+ Parameters:
+ - directory_name (str): The name of the directory to traverse.
+ Returns:
+ - list: A list of paths to each file in the directory and its subdirectories.
+ """
+ added_to_db = False
+
+ for root, dirs, files in os.walk(self.docs_folder):
+ for file in files:
+ file = os.path.join(self.docs_folder, file)
+ _, ext = os.path.splitext(file)
+ data = data_to_text(file)
+ added_to_db = self.add(str(data))
+ print(f"{file} added to Database")
+
+ return added_to_db
+
+```
+
+In this example, we define a `ChromaVectorDatabase` class that inherits from `MyCustomVectorDatabase`. Within the `__init__` method, we create a Chroma client and get or create a collection. We then implement the `query()` and `add()` methods specific to the Chroma API.
+
+### FAISS Integration
+
+FAISS (Facebook AI Similarity Search) is a library for efficient similarity search and clustering of dense vectors, developed by Meta AI.
+
+```python
+
+import faiss
+
+class FAISSVectorDatabase(MyCustomVectorDatabase):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # FAISS initialization
+
+Β Β Β Β self.index = faiss.IndexFlatL2(64)Β # Assuming 64-dimensional vectors
+
+Β Β Β Β self.index_path = "faiss_index.index"
+
+Β Β def connect(self):
+
+Β Β Β Β # FAISS connection logic
+
+Β Β Β Β self.index = faiss.read_index(self.index_path)
+
+Β Β def close(self):
+
+Β Β Β Β # Close FAISS connection
+
+Β Β Β Β faiss.write_index(self.index, self.index_path)
+
+Β Β def query(self, query: str):
+
+Β Β Β Β # Execute FAISS query
+
+Β Β Β Β query_vector = # Convert query to vector
+
+Β Β Β Β distances, indices = self.index.search(query_vector, k=10)
+
+Β Β Β Β return [(self.index.reconstruct(i), d) for i, d in zip(indices, distances)]
+
+Β Β def add(self, doc: str):
+
+Β Β Β Β # Add document to FAISS index
+
+Β Β Β Β doc_vector = # Convert doc to vector
+
+Β Β Β Β self.index.add(doc_vector)
+
+Β Β # Implement other abstract methods
+
+```
+
+Now, how do you integrate a vector datbase with an agent? This is how:
+
+## Integrate Memory with `Agent`
+
+```python
+import os
+
+from dotenv import load_dotenv
+
+# Import the OpenAIChat model and the Agent struct
+from swarms import Agent, OpenAIChat
+
+# Load the environment variables
+load_dotenv()
+
+# Get the API key from the environment
+api_key = os.environ.get("OPENAI_API_KEY")
+
+
+# Initilaize the chromadb client
+faiss = FAISSVectorDatabase()
+
+# Initialize the language model
+llm = OpenAIChat(
+ temperature=0.5,
+ model_name="gpt-4",
+ openai_api_key=api_key,
+ max_tokens=1000,
+)
+
+## Initialize the workflow
+agent = Agent(
+ llm=llm,
+ max_loops=4,
+ autosave=True,
+ dashboard=True,
+ long_term_memory=faiss,
+)
+
+# Run the workflow on a task
+out = agent.run("Generate a 10,000 word blog on health and wellness.")
+print(out)
+```
+
+In this example, we define a `FAISSVectorDatabase` class that inherits from `MyCustomVectorDatabase`. Within the `__init__` method, we create a FAISS index and set the index path. We then implement the `connect()`, `close()`, `query()`, and `add()` methods specific to the FAISS library, assuming 64-dimensional vectors for simplicity.
+
+These examples provide a starting point for integrating various vector database solutions into your custom implementation. Each solution has its own strengths, weaknesses, and trade-offs, so it's essential to carefully evaluate your requirements and choose the solution that best fits your needs.
+
+### Step 4: Add Custom Functionality and Optimizations
+
+Once you've integrated your chosen vector database solution, you can further extend and optimize your custom implementation by adding custom functionality and performance optimizations.
+
+#### Custom Functionality:
+
+- **Indexing Strategies**: Implement custom indexing strategies to optimize search performance and memory usage.
+
+- **Data Preprocessing**: Add data preprocessing logic to handle different data formats, perform embedding, and prepare data for storage in the vector database.
+
+- **Query Optimization**: Introduce query optimization techniques, such as query caching, result filtering, or query rewriting, to improve query performance.
+
+- **Data Partitioning**: Implement data partitioning strategies to distribute data across multiple nodes or shards for better scalability and performance.
+
+- **Metadata Management**: Introduce metadata management capabilities to store and retrieve additional information associated with the vector data.
+
+Performance Optimizations:
+
+- **Caching**: Implement caching mechanisms to reduce redundant computations and improve response times.
+
+- **Asynchronous Operations**: Utilize asynchronous programming techniques to improve concurrency and responsiveness.
+
+- **Multithreading and Parallelization**: Leverage multithreading and parallelization to distribute computationally intensive tasks across multiple cores or processors.
+
+- **Load Balancing**: Implement load balancing strategies to distribute workloads evenly across multiple nodes or instances.
+
+- **Monitoring and Profiling**: Introduce monitoring and profiling tools to identify performance bottlenecks and optimize critical sections of your code.
+
+By adding custom functionality and performance optimizations, you can tailor your custom vector memory database to meet the specific requirements of your AI applications, ensuring efficient and scalable data management.
+
+### Best Practices and Considerations
+
+Building custom vector memory databases is a powerful but complex endeavor. To ensure the success and longevity of your implementation, it's essential to follow best practices and consider potential challenges and considerations.
+
+1\. **Scalability and Performance Testing**: Vector databases can quickly grow in size and complexity as your AI applications handle increasing amounts of data. Thoroughly test your implementation for scalability and performance under various load conditions, and optimize accordingly.
+
+2\. **Data Quality and Integrity**: Ensure that the data stored in your vector database is accurate, consistent, and free from duplicates or errors. Implement data validation and cleansing mechanisms to maintain data quality and integrity.
+
+3\. **Security and Access Control**: Vector databases may store sensitive or proprietary data. Implement robust security measures, such as encryption, access controls, and auditing mechanisms, to protect your data from unauthorized access or breaches.
+
+4\. **Distributed Architectures**: As your data and workloads grow, consider implementing distributed architectures to distribute the storage and computational load across multiple nodes or clusters. This can improve scalability, fault tolerance, and overall performance.
+
+5\. **Data Versioning and Backup**: Implement data versioning and backup strategies to ensure data integrity and enable recovery in case of errors or system failures.
+
+6\. **Documentation and Maintainability**: Well-documented code and comprehensive documentation are essential for ensuring the long-term maintainability and extensibility of your custom vector memory database implementation.
+
+7\. **Continuous Integration and Deployment**: Adopt continuous integration and deployment practices to streamline the development, testing, and deployment processes, ensuring that changes are thoroughly tested and deployed efficiently.
+
+8\. **Compliance and Regulatory Requirements**: Depending on your industry and use case, ensure that your custom vector memory database implementation complies with relevant regulations and standards, such as data privacy laws or industry-specific guidelines.
+
+9\. **Community Engagement and Collaboration**: Stay engaged with the vector database community, participate in discussions, and collaborate with other developers to share knowledge, best practices, and insights.
+
+By following these best practices and considering potential challenges, you can build robust, scalable, and efficient custom vector memory databases that meet the demanding requirements of modern AI applications.
+
+# Conclusion
+
+In this comprehensive guide, we've explored the `BaseVectorDatabase` class and its role in simplifying the process of creating custom vector memory databases. We've covered the core functionality of the class, walked through the step-by-step process of inheriting and extending its functionality, and provided examples of integrating popular vector database solutions like PostgreSQL, Pinecone, Chroma, and FAISS.
+
+Building custom vector memory databases empowers developers to create tailored and efficient data management solutions that seamlessly integrate with their AI applications. By leveraging the power of vector databases, you can unlock new possibilities in data storage, retrieval, and manipulation, enabling your AI systems to handle vast amounts of high-dimensional data with ease.
+
+Remember, the journey of building custom vector memory databases is an iterative and collaborative process that requires continuous learning, adaptation, and refinement. Embrace the challenges, stay up-to-date with the latest developments in vector databases and AI, and continuously strive to optimize and enhance your implementations.
+
+As you embark on this journey, keep in mind the importance of scalability, performance, data quality, security, and compliance. Foster an environment of collaboration, knowledge sharing, and community engagement to ensure that your custom vector memory databases are robust, reliable, and capable of meeting the ever-evolving demands of the AI landscape.
+
+So, dive in, leverage the power of the `BaseVectorDatabase` class, and create the custom vector memory databases that will drive the future of AI-powered applications.
\ No newline at end of file
diff --git a/docs/swarms/memory/pg.md b/docs/swarms/memory/pg.md
new file mode 100644
index 00000000..3695e11c
--- /dev/null
+++ b/docs/swarms/memory/pg.md
@@ -0,0 +1,350 @@
+# `PgVectorVectorStore` Documentation
+
+## Table of Contents
+
+1. [Introduction](#introduction)
+2. [Overview](#overview)
+3. [Class Definition](#class-definition)
+4. [Functionality and Usage](#functionality-and-usage)
+ - [Setting Up the Database](#setting-up-the-database)
+ - [Upserting Vectors](#upserting-vectors)
+ - [Loading Vector Entries](#loading-vector-entries)
+ - [Querying Vectors](#querying-vectors)
+5. [Additional Information](#additional-information)
+6. [References and Resources](#references-and-resources)
+
+---
+
+## 1. Introduction
+
+Welcome to the documentation for the Swarms `PgVectorVectorStore` class! Swarms is a library that provides various memory and storage options for high-dimensional vectors. In this documentation, we will focus on the `PgVectorVectorStore` class, which is a vector storage driver that uses PostgreSQL with the PGVector extension as the underlying storage engine.
+
+### 1.1 Purpose
+
+The `PgVectorVectorStore` class allows you to interact with a PostgreSQL database and store high-dimensional vectors efficiently. By using Swarms with PostgreSQL and PGVector, you can manage and work with vector data in your applications with ease.
+
+### 1.2 Key Features
+
+- Integration with PostgreSQL and PGVector for vector storage.
+- Simple and convenient API for upserting vectors, querying, and loading entries.
+- Support for creating and managing vector collections in PostgreSQL.
+
+---
+
+## 2. Overview
+
+Before diving into the details of the `PgVectorVectorStore` class, let's provide an overview of its purpose and functionality.
+
+The `PgVectorVectorStore` class is designed to:
+
+- Store high-dimensional vectors in a PostgreSQL database with the PGVector extension.
+- Offer a seamless and efficient way to upsert vectors into the database.
+- Provide methods for loading individual vector entries or all vector entries in a collection.
+- Support vector queries, allowing you to find vectors similar to a given query vector.
+
+In the following sections, we will explore the class definition, its parameters, and how to use it effectively.
+
+---
+
+## 3. Class Definition
+
+Let's start by examining the class definition of `PgVectorVectorStore`, including its attributes and parameters.
+
+```python
+class PgVectorVectorStore(BaseVectorStore):
+ """
+ A vector store driver to Postgres using the PGVector extension.
+
+ Attributes:
+ connection_string: An optional string describing the target Postgres database instance.
+ create_engine_params: Additional configuration params passed when creating the database connection.
+ engine: An optional sqlalchemy Postgres engine to use.
+ table_name: Optionally specify the name of the table to used to store vectors.
+ ...
+ """
+```
+
+Attributes:
+
+- `connection_string` (Optional[str]): An optional string describing the target Postgres database instance.
+- `create_engine_params` (dict): Additional configuration parameters passed when creating the database connection.
+- `engine` (Optional[Engine]): An optional SQLAlchemy Postgres engine to use.
+- `table_name` (str): Optionally specify the name of the table to be used to store vectors.
+
+### 3.1 Attribute Validators
+
+The class includes validators for the `connection_string` and `engine` attributes to ensure their proper usage. These validators help maintain consistency in attribute values.
+
+### 3.2 Initialization
+
+During initialization, the class checks if an engine is provided. If an engine is not provided, it creates a new database connection using the `connection_string` and `create_engine_params`.
+
+---
+
+## 4. Functionality and Usage
+
+In this section, we will explore the functionality of the `PgVectorVectorStore` class and provide detailed instructions on how to use it effectively.
+
+### 4.1 Setting Up the Database
+
+Before using the `PgVectorVectorStore` to store and query vectors, you need to set up the database. This includes creating the necessary extensions and database schema. You can do this using the `setup` method.
+
+```python
+def setup(
+ self,
+ create_schema: bool = True,
+ install_uuid_extension: bool = True,
+ install_vector_extension: bool = True,
+) -> None:
+ """
+ Provides a mechanism to initialize the database schema and extensions.
+
+ Parameters:
+ - create_schema (bool): If True, creates the necessary database schema for vector storage. Default: True.
+ - install_uuid_extension (bool): If True, installs the UUID extension in the database. Default: True.
+ - install_vector_extension (bool): If True, installs the PGVector extension in the database. Default: True.
+ """
+```
+
+#### Example 1: Setting Up the Database
+
+```python
+# Initialize the PgVectorVectorStore instance
+vector_store = PgVectorVectorStore(
+ connection_string="your-db-connection-string", table_name="your-table-name"
+)
+
+# Set up the database with default settings
+vector_store.setup()
+```
+
+#### Example 2: Customized Database Setup
+
+```python
+# Initialize the PgVectorVectorStore instance
+vector_store = PgVectorVectorStore(
+ connection_string="your-db-connection-string", table_name="your-table-name"
+)
+
+# Set up the database with customized settings
+vector_store.setup(
+ create_schema=False, install_uuid_extension=True, install_vector_extension=True
+)
+```
+
+### 4.2 Upserting Vectors
+
+The `upsert_vector` method allows you to insert or update a vector in the collection. You can specify the vector, an optional vector ID, namespace, and metadata.
+
+```python
+def upsert_vector(
+ self,
+ vector: list[float],
+ vector_id: Optional[str] = None,
+ namespace: Optional[str] = None,
+ meta: Optional[dict] = None,
+ **kwargs,
+) -> str:
+ """
+ Inserts or updates a vector in the collection.
+
+ Parameters:
+ - vector (list[float]): The vector to upsert.
+ - vector_id (Optional[str]): An optional ID for the vector. If not provided, a unique ID will be generated.
+ - namespace (Optional[str]): An optional namespace for the vector.
+ - meta (Optional[dict]): An optional metadata dictionary associated with the vector.
+ - **kwargs: Additional keyword arguments.
+
+ Returns:
+ - str: The ID of the upserted vector.
+ """
+```
+
+#### Example: Upserting a Vector
+
+```python
+# Initialize the PgVectorVectorStore instance
+vector_store = PgVectorVectorStore(
+ connection_string="your-db-connection-string", table_name="your-table-name"
+)
+
+# Define a vector and upsert it
+vector = [0.1, 0.2, 0.3, 0.4]
+vector_id = "unique-vector-id"
+namespace = "your-namespace"
+meta = {"key1": "value1", "key2": "value2"}
+
+vector_store.upsert_vector(
+ vector=vector, vector_id=vector_id, namespace=namespace, meta=meta
+)
+```
+
+### 4.3 Loading Vector Entries
+
+You can load vector entries from the collection using the `load_entry` and `load_entries` methods.
+
+#### 4
+
+.3.1 Loading a Single Entry
+
+The `load_entry` method allows you to load a specific vector entry based on its identifier and optional namespace.
+
+```python
+def load_entry(
+ self, vector_id: str, namespace: Optional[str] = None
+) -> BaseVectorStore.Entry:
+ """
+ Retrieves a specific vector entry from the collection based on its identifier and optional namespace.
+
+ Parameters:
+ - vector_id (str): The ID of the vector to retrieve.
+ - namespace (Optional[str]): An optional namespace for filtering. Default: None.
+
+ Returns:
+ - BaseVectorStore.Entry: The loaded vector entry.
+ """
+```
+
+#### Example: Loading a Single Entry
+
+```python
+# Initialize the PgVectorVectorStore instance
+vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name")
+
+# Load a specific vector entry
+loaded_entry = vector_store.load_entry(vector_id="unique-vector-id", namespace="your-namespace")
+
+if loaded_entry is not None:
+ loaded_vector = loaded_entry.vector
+ loaded_meta = loaded_entry.meta
+ # Use the loaded vector and metadata as needed
+else:
+ # Vector not found
+```
+
+#### 4.3.2 Loading Multiple Entries
+
+The `load_entries` method allows you to load all vector entries from the collection, optionally filtering by namespace.
+
+```python
+def load_entries(self, namespace: Optional[str] = None) -> list[BaseVectorStore.Entry]:
+ """
+ Retrieves all vector entries from the collection, optionally filtering to only those that match the provided namespace.
+
+ Parameters:
+ - namespace (Optional[str]): An optional namespace for filtering. Default: None.
+
+ Returns:
+ - list[BaseVectorStore.Entry]: A list of loaded vector entries.
+ """
+```
+
+#### Example: Loading Multiple Entries
+
+```python
+# Initialize the PgVectorVectorStore instance
+vector_store = PgVectorVectorStore(
+ connection_string="your-db-connection-string", table_name="your-table-name"
+)
+
+# Load all vector entries in the specified namespace
+entries = vector_store.load_entries(namespace="your-namespace")
+
+# Process the loaded entries
+for entry in entries:
+ vector_id = entry.id
+ vector = entry.vector
+ meta = entry.meta
+
+ # Handle the loaded entries as needed
+```
+
+### 4.4 Querying Vectors
+
+You can perform vector queries to find vectors similar to a given query vector using the `query` method. You can specify the query string, the maximum number of results to return, and other options.
+
+```python
+def query(
+ self,
+ query: str,
+ count: Optional[int] = BaseVectorStore.DEFAULT_QUERY_COUNT,
+ namespace: Optional[str] = None,
+ include_vectors: bool = False,
+ distance_metric: str = "cosine_distance",
+ **kwargs,
+) -> list[BaseVectorStore.QueryResult]:
+ """
+ Performs a search on the collection to find vectors similar to the provided input vector,
+ optionally filtering to only those that match the provided namespace.
+
+ Parameters:
+ - query (str): The query string to find similar vectors.
+ - count (Optional[int]): Maximum number of results to return. Default: BaseVectorStore.DEFAULT_QUERY_COUNT.
+ - namespace (Optional[str]): An optional namespace for filtering. Default: None.
+ - include_vectors (bool): If True, includes vectors in the query results. Default: False.
+ - distance_metric (str): The distance metric to use for similarity measurement.
+ Options: "cosine_distance", "l2_distance", "inner_product". Default: "cosine_distance".
+ - **kwargs: Additional keyword arguments.
+
+ Returns:
+ - list[BaseVectorStore.QueryResult]: A list of query results, each containing vector ID, vector (if included), score, and metadata.
+ """
+```
+
+#### Example: Querying Vectors
+
+```python
+# Initialize the PgVectorVectorStore instance
+vector_store = PgVectorVectorStore(
+ connection_string="your-db-connection-string", table_name="your-table-name"
+)
+
+# Perform a vector query
+query_string = "your-query-string"
+count = 10 # Maximum number of results to return
+namespace = "your-namespace"
+include_vectors = False # Set to True to include vectors in results
+distance_metric = "cosine_distance"
+
+results = vector_store.query(
+ query=query_string,
+ count=count,
+ namespace=namespace,
+ include_vectors=include_vectors,
+ distance_metric=distance_metric,
+)
+
+# Process the query results
+for result in results:
+ vector_id = result.id
+ vector = result.vector
+ score = result.score
+ meta = result.meta
+
+ # Handle the results as needed
+```
+
+---
+
+## 5. Additional Information
+
+Here are some additional tips and information for using the `PgVectorVectorStore` class effectively:
+
+- When upserting vectors, you can generate a unique vector ID using a hash of the vector's content to ensure uniqueness.
+- Consider using namespaces to organize and categorize vectors within your PostgreSQL database.
+- You can choose from different distance metrics (cosine distance, L2 distance, inner product) for vector querying based on your application's requirements.
+- Keep your database connection string secure and follow best practices for database access control.
+
+---
+
+## 6. References and Resources
+
+Here are some references and resources for further information on Swarms and PostgreSQL with PGVector:
+
+- [Swarms GitHub Repository](https://github.com/swarms): Swarms library on GitHub for updates and contributions.
+- [PostgreSQL Official Website](https://www.postgresql.org/): Official PostgreSQL website for documentation and resources.
+- [PGVector GitHub Repository](https://github.com/ankane/pgvector): PGVector extension on GitHub for detailed information.
+
+---
+
+This concludes the documentation for the Swarms `PgVectorVectorStore` class. You now have a comprehensive understanding of how to use Swarms with PostgreSQL and PGVector for vector storage. If you have any further questions or need assistance, please refer to the provided references and resources. Happy coding!
\ No newline at end of file
diff --git a/docs/swarms/memory/pinecone.md b/docs/swarms/memory/pinecone.md
new file mode 100644
index 00000000..f8ca0f2e
--- /dev/null
+++ b/docs/swarms/memory/pinecone.md
@@ -0,0 +1,293 @@
+# `PineconeDB` Documentation
+
+## Table of Contents
+
+1. [Introduction](#introduction)
+2. [PineconeVector Class](#pineconevector-class)
+3. [Installation](#installation)
+4. [Usage](#usage)
+ - [Creating a PineconeVector Instance](#creating-a-pineconevector-instance)
+ - [Creating an Index](#creating-an-index)
+ - [Upserting Vectors](#upserting-vectors)
+ - [Querying the Index](#querying-the-index)
+ - [Loading an Entry](#loading-an-entry)
+ - [Loading Entries](#loading-entries)
+5. [Additional Information](#additional-information)
+6. [References and Resources](#references-and-resources)
+
+---
+
+## 1. Introduction
+
+Welcome to the Swarms documentation! Swarms is a library that provides various memory and storage options for high-dimensional vectors. In this documentation, we will focus on the `PineconeVector` class, which is a vector storage driver that uses Pinecone as the underlying storage engine.
+
+### 1.1 Purpose
+
+The `PineconeVector` class allows you to interact with Pinecone, a vector database that enables the storage, search, and retrieval of high-dimensional vectors with speed and low latency. By using Swarms with Pinecone, you can easily manage and work with vector data in your applications without the need to manage infrastructure.
+
+### 1.2 Key Features
+
+- Seamless integration with Pinecone for vector storage.
+- Simple and convenient API for upserting vectors, querying, and loading entries.
+- Support for creating and managing indexes.
+
+---
+
+## 2. PineconeVector Class
+
+The `PineconeVector` class is the core component of Swarms that interacts with Pinecone for vector storage. Below, we will provide an in-depth overview of this class, including its purpose, parameters, and methods.
+
+### 2.1 Class Definition
+
+```python
+class PineconeVector(BaseVector):
+```
+
+### 2.2 Parameters
+
+The `PineconeVector` class accepts the following parameters during initialization:
+
+- `api_key` (str): The API key for your Pinecone account.
+- `index_name` (str): The name of the index to use.
+- `environment` (str): The environment to use. Either "us-west1-gcp" or "us-east1-gcp".
+- `project_name` (str, optional): The name of the project to use. Defaults to `None`.
+- `index` (pinecone.Index, optional): The Pinecone index to use. Defaults to `None`.
+
+### 2.3 Methods
+
+The `PineconeVector` class provides several methods for interacting with Pinecone:
+
+#### 2.3.1 `upsert_vector`
+
+```python
+def upsert_vector(
+ self,
+ vector: list[float],
+ vector_id: Optional[str] = None,
+ namespace: Optional[str] = None,
+ meta: Optional[dict] = None,
+ **kwargs
+) -> str:
+```
+
+Upserts a vector into the index.
+
+- `vector` (list[float]): The vector to upsert.
+- `vector_id` (Optional[str]): An optional ID for the vector. If not provided, a unique ID will be generated.
+- `namespace` (Optional[str]): An optional namespace for the vector.
+- `meta` (Optional[dict]): An optional metadata dictionary associated with the vector.
+- `**kwargs`: Additional keyword arguments.
+
+#### 2.3.2 `load_entry`
+
+```python
+def load_entry(
+ self, vector_id: str, namespace: Optional[str] = None
+) -> Optional[BaseVector.Entry]:
+```
+
+Loads a single vector from the index.
+
+- `vector_id` (str): The ID of the vector to load.
+- `namespace` (Optional[str]): An optional namespace for the vector.
+
+#### 2.3.3 `load_entries`
+
+```python
+def load_entries(self, namespace: Optional[str] = None) -> list[BaseVector.Entry]:
+```
+
+Loads all vectors from the index.
+
+- `namespace` (Optional[str]): An optional namespace for the vectors.
+
+#### 2.3.4 `query`
+
+```python
+def query(
+ self,
+ query: str,
+ count: Optional[int] = None,
+ namespace: Optional[str] = None,
+ include_vectors: bool = False,
+ include_metadata=True,
+ **kwargs
+) -> list[BaseVector.QueryResult]:
+```
+
+Queries the index for vectors similar to the given query string.
+
+- `query` (str): The query string.
+- `count` (Optional[int]): The maximum number of results to return. If not provided, a default value is used.
+- `namespace` (Optional[str]): An optional namespace for the query.
+- `include_vectors` (bool): Whether to include vectors in the query results.
+- `include_metadata` (bool): Whether to include metadata in the query results.
+- `**kwargs`: Additional keyword arguments.
+
+#### 2.3.5 `create_index`
+
+```python
+def create_index(self, name: str, **kwargs) -> None:
+```
+
+Creates a new index.
+
+- `name` (str): The name of the index to create.
+- `**kwargs`: Additional keyword arguments.
+
+---
+
+## 3. Installation
+
+To use the Swarms library and the `PineconeVector` class, you will need to install the library and its dependencies. Follow these steps to get started:
+
+1. Install Swarms:
+
+```bash
+pip install swarms
+```
+
+2. Install Pinecone:
+
+You will also need a Pinecone account and API key. Follow the instructions on the Pinecone website to create an account and obtain an API key.
+
+3. Import the necessary modules in your Python code:
+
+```python
+from swarms.memory.vector_stores.pinecone import PineconeVector
+```
+
+Now you're ready to use the `PineconeVector` class to work with Pinecone for vector storage.
+
+---
+
+## 4. Usage
+
+In this section, we will provide detailed examples of how to use the `PineconeVector` class for vector storage with Pinecone.
+
+### 4.1 Creating a PineconeVector Instance
+
+To get started, you need to create an instance of the `PineconeVector` class. You will need your Pinecone API key, the name of the index you want to use, and the environment. You can also specify an optional project name if you have one.
+
+```python
+pv = PineconeVector(
+ api_key="your-api-key",
+ index_name="your-index-name",
+ environment="us-west1-gcp",
+ project_name="your-project-name",
+)
+```
+
+### 4.2 Creating an Index
+
+Before you can upsert vectors, you need to create an index in Pinecone. You can use the `create_index` method for this purpose.
+
+```python
+pv.create_index("your-index-name")
+```
+
+### 4.3 Upserting Vectors
+
+You can upsert vectors into the Pine
+
+cone index using the `upsert_vector` method. This method allows you to specify the vector, an optional vector ID, namespace, and metadata.
+
+```python
+vector = [0.1, 0.2, 0.3, 0.4]
+vector_id = "unique-vector-id"
+namespace = "your-namespace"
+meta = {"key1": "value1", "key2": "value2"}
+
+pv.upsert_vector(vector=vector, vector_id=vector_id, namespace=namespace, meta=meta)
+```
+
+### 4.4 Querying the Index
+
+You can query the Pinecone index to find vectors similar to a given query string using the `query` method. You can specify the query string, the maximum number of results to return, and other options.
+
+```python
+query_string = "your-query-string"
+count = 10 # Maximum number of results to return
+namespace = "your-namespace"
+include_vectors = False # Set to True to include vectors in results
+include_metadata = True
+
+results = pv.query(
+ query=query_string,
+ count=count,
+ namespace=namespace,
+ include_vectors=include_vectors,
+ include_metadata=include_metadata,
+)
+
+# Process the query results
+for result in results:
+ vector_id = result.id
+ vector = result.vector
+ score = result.score
+ meta = result.meta
+
+ # Handle the results as needed
+```
+
+### 4.5 Loading an Entry
+
+You can load a single vector entry from the Pinecone index using the `load_entry` method. Provide the vector ID and an optional namespace.
+
+```python
+vector_id = "your-vector-id"
+namespace = "your-namespace"
+
+entry = pv.load_entry(vector_id=vector_id, namespace=namespace)
+
+if entry is not None:
+ loaded_vector = entry.vector
+ loaded_meta = entry.meta
+
+ # Use the loaded vector and metadata
+else:
+ # Vector not found
+```
+
+### 4.6 Loading Entries
+
+To load all vectors from the Pinecone index, you can use the `load_entries` method. You can also specify an optional namespace.
+
+```python
+namespace = "your-namespace"
+
+entries = pv.load_entries(namespace=namespace)
+
+# Process the loaded entries
+for entry in entries:
+ vector_id = entry.id
+ vector = entry.vector
+ meta = entry.meta
+
+ # Handle the loaded entries as needed
+```
+
+---
+
+## 5. Additional Information
+
+In this section, we provide additional information and tips for using the `PineconeVector` class effectively.
+
+- When upserting vectors, you can generate a unique vector ID using a hash of the vector's content to ensure uniqueness.
+- Consider using namespaces to organize and categorize vectors within your Pinecone index.
+- Pinecone provides powerful querying capabilities, so be sure to explore and leverage its features to retrieve relevant vectors efficiently.
+- Keep your Pinecone API key secure and follow Pinecone's best practices for API key management.
+
+---
+
+## 6. References and Resources
+
+Here are some references and resources for further information on Pinecone and Swarms:
+
+- [Pinecone Website](https://www.pinecone.io/): Official Pinecone website for documentation and resources.
+- [Pinecone Documentation](https://docs.pinecone.io/): Detailed documentation for Pinecone.
+- [Swarms GitHub Repository](https://github.com/swarms): Swarms library on GitHub for updates and contributions.
+
+---
+
+This concludes the documentation for the Swarms library and the `PineconeVector` class. You now have a deep understanding of how to use Swarms with Pinecone for vector storage. If you have any further questions or need assistance, please refer to the provided references and resources. Happy coding!
\ No newline at end of file
diff --git a/docs/swarms/memory/qdrant.md b/docs/swarms/memory/qdrant.md
new file mode 100644
index 00000000..cfc65670
--- /dev/null
+++ b/docs/swarms/memory/qdrant.md
@@ -0,0 +1,86 @@
+# Qdrant Client Library
+
+## Overview
+
+The Qdrant Client Library is designed for interacting with the Qdrant vector database, allowing efficient storage and retrieval of high-dimensional vector data. It integrates with machine learning models for embedding and is particularly suited for search and recommendation systems.
+
+## Installation
+
+```python
+pip install qdrant-client sentence-transformers httpx
+```
+
+## Class Definition: Qdrant
+
+```python
+class Qdrant:
+ def __init__(
+ self,
+ api_key: str,
+ host: str,
+ port: int = 6333,
+ collection_name: str = "qdrant",
+ model_name: str = "BAAI/bge-small-en-v1.5",
+ https: bool = True,
+ ):
+ ...
+```
+
+### Constructor Parameters
+
+| Parameter | Type | Description | Default Value |
+|-----------------|---------|--------------------------------------------------|-----------------------|
+| api_key | str | API key for authentication. | - |
+| host | str | Host address of the Qdrant server. | - |
+| port | int | Port number for the Qdrant server. | 6333 |
+| collection_name | str | Name of the collection to be used or created. | "qdrant" |
+| model_name | str | Name of the sentence transformer model. | "BAAI/bge-small-en-v1.5" |
+| https | bool | Flag to use HTTPS for connection. | True |
+
+### Methods
+
+#### `_load_embedding_model(model_name: str)`
+
+Loads the sentence embedding model.
+
+#### `_setup_collection()`
+
+Checks if the specified collection exists in Qdrant; if not, creates it.
+
+#### `add_vectors(docs: List[dict]) -> OperationResponse`
+
+Adds vectors to the Qdrant collection.
+
+#### `search_vectors(query: str, limit: int = 3) -> SearchResult`
+
+Searches the Qdrant collection for vectors similar to the query vector.
+
+## Usage Examples
+
+### Example 1: Setting Up the Qdrant Client
+
+```python
+from qdrant_client import Qdrant
+
+qdrant_client = Qdrant(api_key="your_api_key", host="localhost", port=6333)
+```
+
+### Example 2: Adding Vectors to a Collection
+
+```python
+documents = [{"page_content": "Sample text 1"}, {"page_content": "Sample text 2"}]
+
+operation_info = qdrant_client.add_vectors(documents)
+print(operation_info)
+```
+
+### Example 3: Searching for Vectors
+
+```python
+search_result = qdrant_client.search_vectors("Sample search query")
+print(search_result)
+```
+
+## Further Information
+
+Refer to the [Qdrant Documentation](https://qdrant.tech/docs) for more details on the Qdrant vector database.
diff --git a/docs/swarms/memory/short_term_memory.md b/docs/swarms/memory/short_term_memory.md
new file mode 100644
index 00000000..9ee3a738
--- /dev/null
+++ b/docs/swarms/memory/short_term_memory.md
@@ -0,0 +1,250 @@
+# Short-Term Memory Module Documentation
+
+## Introduction
+The Short-Term Memory module is a component of the SWARMS framework designed for managing short-term and medium-term memory in a multi-agent system. This documentation provides a detailed explanation of the Short-Term Memory module, its purpose, functions, and usage.
+
+### Purpose
+The Short-Term Memory module serves the following purposes:
+1. To store and manage messages in short-term memory.
+2. To provide functions for retrieving, updating, and clearing memory.
+3. To facilitate searching for specific terms within the memory.
+4. To enable saving and loading memory data to/from a file.
+
+### Class Definition
+```python
+class ShortTermMemory(BaseStructure):
+ def __init__(
+ self,
+ return_str: bool = True,
+ autosave: bool = True,
+ *args,
+ **kwargs,
+ ):
+ ...
+```
+
+#### Parameters
+| Parameter | Type | Default Value | Description |
+|---------------------|----------|---------------|------------------------------------------------------------------------------------------------------------------|
+| `return_str` | bool | True | If True, returns memory as a string. |
+| `autosave` | bool | True | If True, enables automatic saving of memory data to a file. |
+| `*args`, `**kwargs` | | | Additional arguments and keyword arguments (not used in the constructor but allowed for flexibility). |
+
+### Functions
+
+#### 1. `add`
+```python
+def add(self, role: str = None, message: str = None, *args, **kwargs):
+```
+
+- Adds a message to the short-term memory.
+- Parameters:
+ - `role` (str, optional): Role associated with the message.
+ - `message` (str, optional): The message to be added.
+- Returns: The added memory.
+
+##### Example 1: Adding a Message to Short-Term Memory
+```python
+memory.add(role="Agent 1", message="Received task assignment.")
+```
+
+##### Example 2: Adding Multiple Messages to Short-Term Memory
+```python
+messages = [("Agent 1", "Received task assignment."), ("Agent 2", "Task completed.")]
+for role, message in messages:
+ memory.add(role=role, message=message)
+```
+
+#### 2. `get_short_term`
+```python
+def get_short_term(self):
+```
+
+- Retrieves the short-term memory.
+- Returns: The contents of the short-term memory.
+
+##### Example: Retrieving Short-Term Memory
+```python
+short_term_memory = memory.get_short_term()
+for entry in short_term_memory:
+ print(entry["role"], ":", entry["message"])
+```
+
+#### 3. `get_medium_term`
+```python
+def get_medium_term(self):
+```
+
+- Retrieves the medium-term memory.
+- Returns: The contents of the medium-term memory.
+
+##### Example: Retrieving Medium-Term Memory
+```python
+medium_term_memory = memory.get_medium_term()
+for entry in medium_term_memory:
+ print(entry["role"], ":", entry["message"])
+```
+
+#### 4. `clear_medium_term`
+```python
+def clear_medium_term(self):
+```
+
+- Clears the medium-term memory.
+
+##### Example: Clearing Medium-Term Memory
+```python
+memory.clear_medium_term()
+```
+
+#### 5. `get_short_term_memory_str`
+```python
+def get_short_term_memory_str(self, *args, **kwargs):
+```
+
+- Retrieves the short-term memory as a string.
+- Returns: A string representation of the short-term memory.
+
+##### Example: Getting Short-Term Memory as a String
+```python
+short_term_memory_str = memory.get_short_term_memory_str()
+print(short_term_memory_str)
+```
+
+#### 6. `update_short_term`
+```python
+def update_short_term(self, index, role: str, message: str, *args, **kwargs):
+```
+
+- Updates a message in the short-term memory.
+- Parameters:
+ - `index` (int): The index of the message to update.
+ - `role` (str): New role for the message.
+ - `message` (str): New message content.
+- Returns: None.
+
+##### Example: Updating a Message in Short-Term Memory
+```python
+memory.update_short_term(
+ index=0, role="Updated Role", message="Updated message content."
+)
+```
+
+#### 7. `clear`
+```python
+def clear(self):
+```
+
+- Clears the short-term memory.
+
+##### Example: Clearing Short-Term Memory
+```python
+memory.clear()
+```
+
+#### 8. `search_memory`
+```python
+def search_memory(self, term):
+```
+
+- Searches the memory for a specific term.
+- Parameters:
+ - `term` (str): The term to search for.
+- Returns: A dictionary containing search results for short-term and medium-term memory.
+
+##### Example: Searching Memory for a Term
+```python
+search_results = memory.search_memory("task")
+print("Short-Term Memory Results:", search_results["short_term"])
+print("Medium-Term Memory Results:", search_results["medium_term"])
+```
+
+#### 9. `return_shortmemory_as_str`
+```python
+def return_shortmemory_as_str(self):
+```
+
+- Returns the memory as a string.
+
+##### Example: Returning Short-Term Memory as a String
+```python
+short_term_memory_str = memory.return_shortmemory_as_str()
+print(short_term_memory_str)
+```
+
+#### 10. `move_to_medium_term`
+```python
+def move_to_medium_term(self, index):
+```
+
+- Moves a message from the short-term memory to the medium-term memory.
+- Parameters:
+ - `index` (int): The index of the message to move.
+
+##### Example: Moving a Message to Medium-Term Memory
+```python
+memory.move_to_medium_term(index=0)
+```
+
+#### 11. `return_medium_memory_as_str`
+```python
+def return_medium_memory_as_str(self):
+```
+
+- Returns the medium-term memory as a string.
+
+##### Example: Returning Medium-Term Memory as a String
+```python
+medium_term_memory_str = memory.return_medium_memory_as_str()
+print(medium_term_memory_str)
+```
+
+#### 12. `save_to_file`
+```python
+def save_to_file(self, filename: str):
+```
+
+- Saves the memory data to a file.
+- Parameters:
+ - `filename` (str): The name of the file to save the data to.
+
+##### Example: Saving Memory Data to a File
+```python
+memory.save_to_file("memory_data.json")
+```
+
+#### 13. `load_from_file`
+```python
+def load_from_file(self, filename: str, *args, **kwargs):
+```
+
+- Loads memory data from a file.
+- Parameters:
+ - `filename` (str): The name of the file to load data from.
+
+##### Example: Loading Memory Data from a File
+```python
+memory.load_from_file("memory_data.json")
+```
+
+### Additional Information and Tips
+
+- To use the Short-Term Memory module effectively, consider the following tips:
+ - Use the `add` function to store messages in short-term memory.
+ -
+
+ Retrieve memory contents using `get_short_term` and `get_medium_term` functions.
+ - Clear memory as needed using `clear` and `clear_medium_term` functions.
+ - Search for specific terms within the memory using the `search_memory` function.
+ - Save and load memory data to/from files using `save_to_file` and `load_from_file` functions.
+
+- Ensure proper exception handling when using memory functions to handle potential errors gracefully.
+
+- When using the `search_memory` function, iterate through the results dictionary to access search results for short-term and medium-term memory.
+
+### References and Resources
+
+- For more information on multi-agent systems and memory management, refer to the SWARMS framework documentation: [SWARMS Documentation](https://swarms.apac.ai/).
+
+- For advanced memory management and customization, explore the SWARMS framework source code.
+
diff --git a/docs/swarms/memory/weaviate.md b/docs/swarms/memory/weaviate.md
new file mode 100644
index 00000000..dc264653
--- /dev/null
+++ b/docs/swarms/memory/weaviate.md
@@ -0,0 +1,204 @@
+# Weaviate API Client Documentation
+
+## Overview
+
+The Weaviate API Client is an interface to Weaviate, a vector database with a GraphQL API. This client allows you to interact with Weaviate programmatically, making it easier to create collections, add objects, query data, update objects, and delete objects within your Weaviate instance.
+
+This documentation provides a comprehensive guide on how to use the Weaviate API Client, including its initialization, methods, and usage examples.
+
+## Table of Contents
+
+- [Installation](#installation)
+- [Initialization](#initialization)
+- [Methods](#methods)
+ - [create_collection](#create-collection)
+ - [add](#add)
+ - [query](#query)
+ - [update](#update)
+ - [delete](#delete)
+- [Examples](#examples)
+
+## Installation
+
+Before using the Weaviate API Client, make sure to install the `swarms` library. You can install it using pip:
+
+```bash
+pip install swarms
+```
+
+## Initialization
+
+To use the Weaviate API Client, you need to initialize an instance of the `WeaviateDB` class. Here are the parameters you can pass to the constructor:
+
+| Parameter | Type | Description |
+|----------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------|
+| `http_host` | str | The HTTP host of the Weaviate server. |
+| `http_port` | str | The HTTP port of the Weaviate server. |
+| `http_secure` | bool | Whether to use HTTPS. |
+| `grpc_host` | Optional[str] | The gRPC host of the Weaviate server. (Optional) |
+| `grpc_port` | Optional[str] | The gRPC port of the Weaviate server. (Optional) |
+| `grpc_secure` | Optional[bool] | Whether to use gRPC over TLS. (Optional) |
+| `auth_client_secret` | Optional[Any] | The authentication client secret. (Optional) |
+| `additional_headers` | Optional[Dict[str, str]] | Additional headers to send with requests. (Optional) |
+| `additional_config` | Optional[weaviate.AdditionalConfig] | Additional configuration for the client. (Optional) |
+| `connection_params` | Dict[str, Any] | Dictionary containing connection parameters. This parameter is used internally and can be ignored in most cases. |
+
+Here's an example of how to initialize a WeaviateDB:
+
+```python
+from swarms.memory import WeaviateDB
+
+weaviate_client = WeaviateDB(
+ http_host="YOUR_HTTP_HOST",
+ http_port="YOUR_HTTP_PORT",
+ http_secure=True,
+ grpc_host="YOUR_gRPC_HOST",
+ grpc_port="YOUR_gRPC_PORT",
+ grpc_secure=True,
+ auth_client_secret="YOUR_APIKEY",
+ additional_headers={"X-OpenAI-Api-Key": "YOUR_OPENAI_APIKEY"},
+ additional_config=None, # You can pass additional configuration here
+)
+```
+
+## Methods
+
+### `create_collection`
+
+The `create_collection` method allows you to create a new collection in Weaviate. A collection is a container for storing objects with specific properties.
+
+#### Parameters
+
+- `name` (str): The name of the collection.
+- `properties` (List[Dict[str, Any]]): A list of dictionaries specifying the properties of objects to be stored in the collection.
+- `vectorizer_config` (Any, optional): Additional vectorizer configuration for the collection. (Optional)
+
+#### Usage
+
+```python
+weaviate_client.create_collection(
+ name="my_collection",
+ properties=[
+ {"name": "property1", "dataType": ["string"]},
+ {"name": "property2", "dataType": ["int"]},
+ ],
+ vectorizer_config=None, # Optional vectorizer configuration
+)
+```
+
+### `add`
+
+The `add` method allows you to add an object to a specified collection in Weaviate.
+
+#### Parameters
+
+- `collection_name` (str): The name of the collection where the object will be added.
+- `properties` (Dict[str, Any]): A dictionary specifying the properties of the object to be added.
+
+#### Usage
+
+```python
+weaviate_client.add(
+ collection_name="my_collection", properties={"property1": "value1", "property2": 42}
+)
+```
+
+### `query`
+
+The `query` method allows you to query objects from a specified collection in Weaviate.
+
+#### Parameters
+
+- `collection_name` (str): The name of the collection to query.
+- `query` (str): The query string specifying the search criteria.
+- `limit` (int, optional): The maximum number of results to return. (Default: 10)
+
+#### Usage
+
+```python
+results = weaviate_client.query(
+ collection_name="my_collection",
+ query="property1:value1",
+ limit=20 # Optional, specify the limit
+
+ if needed
+)
+```
+
+### `update`
+
+The `update` method allows you to update an object in a specified collection in Weaviate.
+
+#### Parameters
+
+- `collection_name` (str): The name of the collection where the object exists.
+- `object_id` (str): The ID of the object to be updated.
+- `properties` (Dict[str, Any]): A dictionary specifying the properties to update.
+
+#### Usage
+
+```python
+weaviate_client.update(
+ collection_name="my_collection",
+ object_id="object123",
+ properties={"property1": "new_value", "property2": 99},
+)
+```
+
+### `delete`
+
+The `delete` method allows you to delete an object from a specified collection in Weaviate.
+
+#### Parameters
+
+- `collection_name` (str): The name of the collection from which to delete the object.
+- `object_id` (str): The ID of the object to delete.
+
+#### Usage
+
+```python
+weaviate_client.delete(collection_name="my_collection", object_id="object123")
+```
+
+## Examples
+
+Here are three examples demonstrating how to use the Weaviate API Client for common tasks:
+
+### Example 1: Creating a Collection
+
+```python
+weaviate_client.create_collection(
+ name="people",
+ properties=[
+ {"name": "name", "dataType": ["string"]},
+ {"name": "age", "dataType": ["int"]},
+ ],
+)
+```
+
+### Example 2: Adding an Object
+
+```python
+weaviate_client.add(collection_name="people", properties={"name": "John", "age": 30})
+```
+
+### Example 3: Querying Objects
+
+```python
+results = weaviate_client.query(collection_name="people", query="name:John", limit=5)
+```
+
+These examples cover the basic operations of creating collections, adding objects, and querying objects using the Weaviate API Client.
+
+## Additional Information and Tips
+
+- If you encounter any errors during the operations, the client will raise exceptions with informative error messages.
+- You can explore more advanced features and configurations in the Weaviate documentation.
+- Make sure to handle authentication and security appropriately when using the client in production environments.
+
+## References and Resources
+
+- [Weaviate Documentation](https://weaviate.readthedocs.io/en/latest/): Official documentation for Weaviate.
+- [Weaviate GitHub Repository](https://github.com/semi-technologies/weaviate): The source code and issue tracker for Weaviate.
+
+This documentation provides a comprehensive guide on using the Weaviate API Client to interact with Weaviate, making it easier to manage and query your data.
\ No newline at end of file
diff --git a/docs/swarms/models/anthropic.md b/docs/swarms/models/anthropic.md
new file mode 100644
index 00000000..438adfbe
--- /dev/null
+++ b/docs/swarms/models/anthropic.md
@@ -0,0 +1,109 @@
+# **Documentation for the `Anthropic` Class**
+
+## **Overview and Introduction**
+
+The `Anthropic` class provides an interface to interact with the Anthropic large language models. This class encapsulates the necessary functionality to request completions from the Anthropic API based on a provided prompt and other configurable parameters.
+
+### **Key Concepts and Terminology**
+
+- **Anthropic**: A large language model, akin to GPT-3 and its successors.
+- **Prompt**: A piece of text that serves as the starting point for model completions.
+- **Stop Sequences**: Specific tokens or sequences to indicate when the model should stop generating.
+- **Tokens**: Discrete pieces of information in a text. For example, in English, a token can be as short as one character or as long as one word.
+
+## **Class Definition**
+
+### `Anthropic`
+```python
+class Anthropic:
+ """Anthropic large language models."""
+```
+
+### Parameters:
+
+- `model (str)`: The name of the model to use for completions. Default is "claude-2".
+
+- `max_tokens_to_sample (int)`: Maximum number of tokens to generate in the output. Default is 256.
+
+- `temperature (float, optional)`: Sampling temperature. A higher value will make the output more random, while a lower value will make it more deterministic.
+
+- `top_k (int, optional)`: Sample from the top-k most probable next tokens. Setting this parameter can reduce randomness in the output.
+
+- `top_p (float, optional)`: Sample from the smallest set of tokens such that their cumulative probability exceeds the specified value. Used in nucleus sampling to provide a balance between randomness and determinism.
+
+- `streaming (bool)`: Whether to stream the output or not. Default is False.
+
+- `default_request_timeout (int, optional)`: Default timeout in seconds for API requests. Default is 600.
+
+### **Methods and their Functionality**
+
+#### `_default_params(self) -> dict`
+
+- Provides the default parameters for calling the Anthropic API.
+
+- **Returns**: A dictionary containing the default parameters.
+
+#### `generate(self, prompt: str, stop: list[str] = None) -> str`
+
+- Calls out to Anthropic's completion endpoint to generate text based on the given prompt.
+
+- **Parameters**:
+ - `prompt (str)`: The input text to provide context for the generated text.
+
+ - `stop (list[str], optional)`: Sequences to indicate when the model should stop generating.
+
+- **Returns**: A string containing the model's generated completion based on the prompt.
+
+#### `__call__(self, prompt: str, stop: list[str] = None) -> str`
+
+- An alternative to the `generate` method that allows calling the class instance directly.
+
+- **Parameters**:
+ - `prompt (str)`: The input text to provide context for the generated text.
+
+ - `stop (list[str], optional)`: Sequences to indicate when the model should stop generating.
+
+- **Returns**: A string containing the model's generated completion based on the prompt.
+
+## **Usage Examples**
+
+```python
+# Import necessary modules and classes
+from swarms.models import Anthropic
+
+# Initialize an instance of the Anthropic class
+model = Anthropic(anthropic_api_key="")
+
+# Using the run method
+completion_1 = model.run("What is the capital of France?")
+print(completion_1)
+
+# Using the __call__ method
+completion_2 = model("How far is the moon from the earth?", stop=["miles", "km"])
+print(completion_2)
+```
+
+## **Mathematical Formula**
+
+The underlying operations of the `Anthropic` class involve probabilistic sampling based on token logits from the Anthropic model. Mathematically, the process of generating a token \( t \) from the given logits \( l \) can be described by the softmax function:
+
+\[ P(t) = \frac{e^{l_t}}{\sum_{i} e^{l_i}} \]
+
+Where:
+- \( P(t) \) is the probability of token \( t \).
+- \( l_t \) is the logit corresponding to token \( t \).
+- The summation runs over all possible tokens.
+
+The temperature, top-k, and top-p parameters are further used to modulate the probabilities.
+
+## **Additional Information and Tips**
+
+- Ensure you have a valid `ANTHROPIC_API_KEY` set as an environment variable or passed during class instantiation.
+
+- Always handle exceptions that may arise from API timeouts or invalid prompts.
+
+## **References and Resources**
+
+- [Anthropic's official documentation](https://www.anthropic.com/docs)
+
+- [Token-based sampling in Language Models](https://arxiv.org/abs/1904.09751) for a deeper understanding of token sampling.
\ No newline at end of file
diff --git a/docs/swarms/models/base_llm.md b/docs/swarms/models/base_llm.md
new file mode 100644
index 00000000..0c678165
--- /dev/null
+++ b/docs/swarms/models/base_llm.md
@@ -0,0 +1,227 @@
+# Language Model Interface Documentation
+
+## Table of Contents
+
+1. [Introduction](#introduction)
+2. [Abstract Language Model](#abstract-language-model)
+ - [Initialization](#initialization)
+ - [Attributes](#attributes)
+ - [Methods](#methods)
+3. [Implementation](#implementation)
+4. [Usage Examples](#usage-examples)
+5. [Additional Features](#additional-features)
+6. [Performance Metrics](#performance-metrics)
+7. [Logging and Checkpoints](#logging-and-checkpoints)
+8. [Resource Utilization Tracking](#resource-utilization-tracking)
+9. [Conclusion](#conclusion)
+
+---
+
+## 1. Introduction
+
+The Language Model Interface (`BaseLLM`) is a flexible and extensible framework for working with various language models. This documentation provides a comprehensive guide to the interface, its attributes, methods, and usage examples. Whether you're using a pre-trained language model or building your own, this interface can help streamline the process of text generation, chatbots, summarization, and more.
+
+## 2. Abstract Language Model
+
+### Initialization
+
+The `BaseLLM` class provides a common interface for language models. It can be initialized with various parameters to customize model behavior. Here are the initialization parameters:
+
+| Parameter | Description | Default Value |
+|------------------------|-------------------------------------------------------------------------------------------------|---------------|
+| `model_name` | The name of the language model to use. | None |
+| `max_tokens` | The maximum number of tokens in the generated text. | None |
+| `temperature` | The temperature parameter for controlling randomness in text generation. | None |
+| `top_k` | The top-k parameter for filtering words in text generation. | None |
+| `top_p` | The top-p parameter for filtering words in text generation. | None |
+| `system_prompt` | A system-level prompt to set context for generation. | None |
+| `beam_width` | The beam width for beam search. | None |
+| `num_return_sequences` | The number of sequences to return in the output. | None |
+| `seed` | The random seed for reproducibility. | None |
+| `frequency_penalty` | The frequency penalty parameter for promoting word diversity. | None |
+| `presence_penalty` | The presence penalty parameter for discouraging repetitions. | None |
+| `stop_token` | A stop token to indicate the end of generated text. | None |
+| `length_penalty` | The length penalty parameter for controlling the output length. | None |
+| `role` | The role of the language model (e.g., assistant, user, etc.). | None |
+| `max_length` | The maximum length of generated sequences. | None |
+| `do_sample` | Whether to use sampling during text generation. | None |
+| `early_stopping` | Whether to use early stopping during text generation. | None |
+| `num_beams` | The number of beams to use in beam search. | None |
+| `repition_penalty` | The repetition penalty parameter for discouraging repeated tokens. | None |
+| `pad_token_id` | The token ID for padding. | None |
+| `eos_token_id` | The token ID for the end of a sequence. | None |
+| `bos_token_id` | The token ID for the beginning of a sequence. | None |
+| `device` | The device to run the model on (e.g., 'cpu' or 'cuda'). | None |
+
+### Attributes
+
+- `model_name`: The name of the language model being used.
+- `max_tokens`: The maximum number of tokens in generated text.
+- `temperature`: The temperature parameter controlling randomness.
+- `top_k`: The top-k parameter for word filtering.
+- `top_p`: The top-p parameter for word filtering.
+- `system_prompt`: A system-level prompt for context.
+- `beam_width`: The beam width for beam search.
+- `num_return_sequences`: The number of output sequences.
+- `seed`: The random seed for reproducibility.
+- `frequency_penalty`: The frequency penalty parameter.
+- `presence_penalty`: The presence penalty parameter.
+- `stop_token`: The stop token to indicate text end.
+- `length_penalty`: The length penalty parameter.
+- `role`: The role of the language model.
+- `max_length`: The maximum length of generated sequences.
+- `do_sample`: Whether to use sampling during generation.
+- `early_stopping`: Whether to use early stopping.
+- `num_beams`: The number of beams in beam search.
+- `repition_penalty`: The repetition penalty parameter.
+- `pad_token_id`: The token ID for padding.
+- `eos_token_id`: The token ID for the end of a sequence.
+- `bos_token_id`: The token ID for the beginning of a sequence.
+- `device`: The device used for model execution.
+- `history`: A list of conversation history.
+
+### Methods
+
+The `BaseLLM` class defines several methods for working with language models:
+
+- `run(task: Optional[str] = None, *args, **kwargs) -> str`: Generate text using the language model. This method is abstract and must be implemented by subclasses.
+
+- `arun(task: Optional[str] = None, *args, **kwargs)`: An asynchronous version of `run` for concurrent text generation.
+
+- `batch_run(tasks: List[str], *args, **kwargs)`: Generate text for a batch of tasks.
+
+- `abatch_run(tasks: List[str], *args, **kwargs)`: An asynchronous version of `batch_run` for concurrent batch generation.
+
+- `chat(task: str, history: str = "") -> str`: Conduct a chat with the model, providing a conversation history.
+
+- `__call__(task: str) -> str`: Call the model to generate text.
+
+- `_tokens_per_second() -> float`: Calculate tokens generated per second.
+
+- `_num_tokens(text: str) -> int`: Calculate the number of tokens in a text.
+
+- `_time_for_generation(task: str) -> float`: Measure the time taken for text generation.
+
+- `generate_summary(text: str) -> str`: Generate a summary of the provided text.
+
+- `set_temperature(value: float)`: Set the temperature parameter.
+
+- `set_max_tokens(value: int)`: Set the maximum number of tokens.
+
+- `clear_history()`: Clear the conversation history.
+
+- `enable_logging(log_file: str = "model.log")`: Initialize logging for the model.
+
+- `log_event(message: str)`: Log an event.
+
+- `save_checkpoint(checkpoint_dir: str = "checkpoints")`: Save the model state as a checkpoint.
+
+- `load_checkpoint(checkpoint_path: str)`: Load the model state from a checkpoint.
+
+- `toggle_creative_mode(enable: bool)`: Toggle creative mode for the model.
+
+- `track_resource_utilization()`: Track and report resource utilization.
+
+- `
+
+get_generation_time() -> float`: Get the time taken for text generation.
+
+- `set_max_length(max_length: int)`: Set the maximum length of generated sequences.
+
+- `set_model_name(model_name: str)`: Set the model name.
+
+- `set_frequency_penalty(frequency_penalty: float)`: Set the frequency penalty parameter.
+
+- `set_presence_penalty(presence_penalty: float)`: Set the presence penalty parameter.
+
+- `set_stop_token(stop_token: str)`: Set the stop token.
+
+- `set_length_penalty(length_penalty: float)`: Set the length penalty parameter.
+
+- `set_role(role: str)`: Set the role of the model.
+
+- `set_top_k(top_k: int)`: Set the top-k parameter.
+
+- `set_top_p(top_p: float)`: Set the top-p parameter.
+
+- `set_num_beams(num_beams: int)`: Set the number of beams.
+
+- `set_do_sample(do_sample: bool)`: Set whether to use sampling.
+
+- `set_early_stopping(early_stopping: bool)`: Set whether to use early stopping.
+
+- `set_seed(seed: int)`: Set the random seed.
+
+- `set_device(device: str)`: Set the device for model execution.
+
+## 3. Implementation
+
+The `BaseLLM` class serves as the base for implementing specific language models. Subclasses of `BaseLLM` should implement the `run` method to define how text is generated for a given task. This design allows flexibility in integrating different language models while maintaining a common interface.
+
+## 4. Usage Examples
+
+To demonstrate how to use the `BaseLLM` interface, let's create an example using a hypothetical language model. We'll initialize an instance of the model and generate text for a simple task.
+
+```python
+# Import the BaseLLM class
+from swarms.models import BaseLLM
+
+# Create an instance of the language model
+language_model = BaseLLM(
+ model_name="my_language_model",
+ max_tokens=50,
+ temperature=0.7,
+ top_k=50,
+ top_p=0.9,
+ device="cuda",
+)
+
+# Generate text for a task
+task = "Translate the following English text to French: 'Hello, world.'"
+generated_text = language_model.run(task)
+
+# Print the generated text
+print(generated_text)
+```
+
+In this example, we've created an instance of our hypothetical language model, configured its parameters, and used the `run` method to generate text for a translation task.
+
+## 5. Additional Features
+
+The `BaseLLM` interface provides additional features for customization and control:
+
+- `batch_run`: Generate text for a batch of tasks efficiently.
+- `arun` and `abatch_run`: Asynchronous versions of `run` and `batch_run` for concurrent text generation.
+- `chat`: Conduct a conversation with the model by providing a history of the conversation.
+- `__call__`: Allow the model to be called directly to generate text.
+
+These features enhance the flexibility and utility of the interface in various applications, including chatbots, language translation, and content generation.
+
+## 6. Performance Metrics
+
+The `BaseLLM` class offers methods for tracking performance metrics:
+
+- `_tokens_per_second`: Calculate tokens generated per second.
+- `_num_tokens`: Calculate the number of tokens in a text.
+- `_time_for_generation`: Measure the time taken for text generation.
+
+These metrics help assess the efficiency and speed of text generation, enabling optimizations as needed.
+
+## 7. Logging and Checkpoints
+
+Logging and checkpointing are crucial for tracking model behavior and ensuring reproducibility:
+
+- `enable_logging`: Initialize logging for the model.
+- `log_event`: Log events and activities.
+- `save_checkpoint`: Save the model state as a checkpoint.
+- `load_checkpoint`: Load the model state from a checkpoint.
+
+These capabilities aid in debugging, monitoring, and resuming model experiments.
+
+## 8. Resource Utilization Tracking
+
+The `track_resource_utilization` method is a placeholder for tracking and reporting resource utilization, such as CPU and memory usage. It can be customized to suit specific monitoring needs.
+
+## 9. Conclusion
+
+The Language Model Interface (`BaseLLM`) is a versatile framework for working with language models. Whether you're using pre-trained models or developing your own, this interface provides a consistent and extensible foundation. By following the provided guidelines and examples, you can integrate and customize language models for various natural language processing tasks.
\ No newline at end of file
diff --git a/docs/swarms/models/base_multimodal_model.md b/docs/swarms/models/base_multimodal_model.md
new file mode 100644
index 00000000..c1a8373d
--- /dev/null
+++ b/docs/swarms/models/base_multimodal_model.md
@@ -0,0 +1,299 @@
+# `BaseMultiModalModel` Documentation
+
+Swarms is a Python library that provides a framework for running multimodal AI models. It allows you to combine text and image inputs and generate coherent and context-aware responses. This library is designed to be extensible, allowing you to integrate various multimodal models.
+
+## Table of Contents
+
+1. [Introduction](#introduction)
+2. [Installation](#installation)
+3. [Getting Started](#getting-started)
+4. [BaseMultiModalModel Class](#basemultimodalmodel-class)
+ - [Initialization](#initialization)
+ - [Methods](#methods)
+5. [Usage Examples](#usage-examples)
+6. [Additional Tips](#additional-tips)
+7. [References and Resources](#references-and-resources)
+
+## 1. Introduction
+
+Swarms is designed to simplify the process of working with multimodal AI models. These models are capable of understanding and generating content based on both textual and image inputs. With this library, you can run such models and receive context-aware responses.
+
+## 2. Installation
+
+To install swarms, you can use pip:
+
+```bash
+pip install swarms
+```
+
+## 3. Getting Started
+
+To get started with Swarms, you'll need to import the library and create an instance of the `BaseMultiModalModel` class. This class serves as the foundation for running multimodal models.
+
+```python
+from swarms.models import BaseMultiModalModel
+
+model = BaseMultiModalModel(
+ model_name="your_model_name",
+ temperature=0.5,
+ max_tokens=500,
+ max_workers=10,
+ top_p=1,
+ top_k=50,
+ beautify=False,
+ device="cuda",
+ max_new_tokens=500,
+ retries=3,
+)
+```
+
+You can customize the initialization parameters based on your model's requirements.
+
+## 4. BaseMultiModalModel Class
+
+### Initialization
+
+The `BaseMultiModalModel` class is initialized with several parameters that control its behavior. Here's a breakdown of the initialization parameters:
+
+| Parameter | Description | Default Value |
+|------------------|-------------------------------------------------------------------------------------------------------|---------------|
+| `model_name` | The name of the multimodal model to use. | None |
+| `temperature` | The temperature parameter for controlling randomness in text generation. | 0.5 |
+| `max_tokens` | The maximum number of tokens in the generated text. | 500 |
+| `max_workers` | The maximum number of concurrent workers for running tasks. | 10 |
+| `top_p` | The top-p parameter for filtering words in text generation. | 1 |
+| `top_k` | The top-k parameter for filtering words in text generation. | 50 |
+| `beautify` | Whether to beautify the output text. | False |
+| `device` | The device to run the model on (e.g., 'cuda' or 'cpu'). | 'cuda' |
+| `max_new_tokens` | The maximum number of new tokens allowed in generated responses. | 500 |
+| `retries` | The number of retries in case of an error during text generation. | 3 |
+| `system_prompt` | A system-level prompt to set context for generation. | None |
+| `meta_prompt` | A meta prompt to provide guidance for including image labels in responses. | None |
+
+### Methods
+
+The `BaseMultiModalModel` class defines various methods for running multimodal models and managing interactions:
+
+- `run(task: str, img: str) -> str`: Run the multimodal model with a text task and an image URL to generate a response.
+
+- `arun(task: str, img: str) -> str`: Run the multimodal model asynchronously with a text task and an image URL to generate a response.
+
+- `get_img_from_web(img: str) -> Image`: Fetch an image from a URL and return it as a PIL Image.
+
+- `encode_img(img: str) -> str`: Encode an image to base64 format.
+
+- `get_img(img: str) -> Image`: Load an image from the local file system and return it as a PIL Image.
+
+- `clear_chat_history()`: Clear the chat history maintained by the model.
+
+- `run_many(tasks: List[str], imgs: List[str]) -> List[str]`: Run the model on multiple text tasks and image URLs concurrently and return a list of responses.
+
+- `run_batch(tasks_images: List[Tuple[str, str]]) -> List[str]`: Process a batch of text tasks and image URLs and return a list of responses.
+
+- `run_batch_async(tasks_images: List[Tuple[str, str]]) -> List[str]`: Process a batch of text tasks and image URLs asynchronously and return a list of responses.
+
+- `run_batch_async_with_retries(tasks_images: List[Tuple[str, str]]) -> List[str]`: Process a batch of text tasks and image URLs asynchronously with retries in case of errors and return a list of responses.
+
+- `unique_chat_history() -> List[str]`: Get the unique chat history stored by the model.
+
+- `run_with_retries(task: str, img: str) -> str`: Run the model with retries in case of an error.
+
+- `run_batch_with_retries(tasks_images: List[Tuple[str, str]]) -> List[str]`: Run a batch of tasks with retries in case of errors and return a list of responses.
+
+- `_tokens_per_second() -> float`: Calculate the tokens generated per second during text generation.
+
+- `_time_for_generation(task: str) -> float`: Measure the time taken for text generation for a specific task.
+
+- `generate_summary(text: str) -> str`: Generate a summary of the provided text.
+
+- `set_temperature(value: float)`: Set the temperature parameter for controlling randomness in text generation.
+
+- `set_max_tokens(value: int)`: Set the maximum number of tokens allowed in generated responses.
+
+- `get_generation_time() -> float`: Get the time taken for text generation for the last task.
+
+- `get_chat_history() -> List[str]`: Get the chat history, including all interactions.
+
+- `get_unique_chat_history() -> List[str]`: Get the unique chat history, removing duplicate interactions.
+
+- `get_chat_history_length() -> int`: Get the length of the chat history.
+
+- `get_unique_chat_history_length() -> int`: Get the length of the unique chat history.
+
+- `get_chat_history_tokens() -> int`: Get the total number of tokens in the chat history.
+
+- `print_beautiful(content: str, color: str = 'cyan')`: Print content beautifully using colored text.
+
+- `stream(content: str)`: Stream the content, printing it character by character.
+
+- `meta_prompt() -> str`: Get the meta prompt that provides guidance for including image labels in responses.
+
+## 5. Usage Examples
+
+Let's explore some usage examples of the MultiModalAI library:
+
+### Example 1: Running
+
+ the Model
+
+```python
+# Import the library
+from swarms.models import BaseMultiModalModel
+
+# Create an instance of the model
+model = BaseMultiModalModel(
+ model_name="your_model_name",
+ temperature=0.5,
+ max_tokens=500,
+ device="cuda",
+)
+
+# Run the model with a text task and an image URL
+response = model.run(
+ "Generate a summary of this text", "https://www.example.com/image.jpg"
+)
+print(response)
+```
+
+### Example 2: Running Multiple Tasks Concurrently
+
+```python
+# Import the library
+from swarms.models import BaseMultiModalModel
+
+# Create an instance of the model
+model = BaseMultiModalModel(
+ model_name="your_model_name",
+ temperature=0.5,
+ max_tokens=500,
+ max_workers=4,
+ device="cuda",
+)
+
+# Define a list of tasks and image URLs
+tasks = ["Task 1", "Task 2", "Task 3"]
+images = ["https://image1.jpg", "https://image2.jpg", "https://image3.jpg"]
+
+# Run the model on multiple tasks concurrently
+responses = model.run_many(tasks, images)
+for response in responses:
+ print(response)
+```
+
+### Example 3: Running the Model Asynchronously
+
+```python
+# Import the library
+from swarms.models import BaseMultiModalModel
+
+# Create an instance of the model
+model = BaseMultiModalModel(
+ model_name="your_model_name",
+ temperature=0.5,
+ max_tokens=500,
+ device="cuda",
+)
+
+# Define a list of tasks and image URLs
+tasks_images = [
+ ("Task 1", "https://image1.jpg"),
+ ("Task 2", "https://image2.jpg"),
+ ("Task 3", "https://image3.jpg"),
+]
+
+# Run the model on multiple tasks asynchronously
+responses = model.run_batch_async(tasks_images)
+for response in responses:
+ print(response)
+```
+
+### Example 4: Inheriting `BaseMultiModalModel` for it's prebuilt classes
+```python
+from swarms.models import BaseMultiModalModel
+
+
+class CustomMultiModalModel(BaseMultiModalModel):
+ def __init__(self, model_name, custom_parameter, *args, **kwargs):
+ # Call the parent class constructor
+ super().__init__(model_name=model_name, *args, **kwargs)
+ # Initialize custom parameters specific to your model
+ self.custom_parameter = custom_parameter
+
+ def __call__(self, text, img):
+ # Implement the multimodal model logic here
+ # You can use self.custom_parameter and other inherited attributes
+ pass
+
+ def generate_summary(self, text):
+ # Implement the summary generation logic using your model
+ # You can use self.custom_parameter and other inherited attributes
+ pass
+
+
+# Create an instance of your custom multimodal model
+custom_model = CustomMultiModalModel(
+ model_name="your_custom_model_name",
+ custom_parameter="your_custom_value",
+ temperature=0.5,
+ max_tokens=500,
+ device="cuda",
+)
+
+# Run your custom model
+response = custom_model.run(
+ "Generate a summary of this text", "https://www.example.com/image.jpg"
+)
+print(response)
+
+# Generate a summary using your custom model
+summary = custom_model.generate_summary("This is a sample text to summarize.")
+print(summary)
+```
+
+In the code above:
+
+1. We define a `CustomMultiModalModel` class that inherits from `BaseMultiModalModel`.
+
+2. In the constructor of our custom class, we call the parent class constructor using `super()` and initialize any custom parameters specific to our model. In this example, we introduced a `custom_parameter`.
+
+3. We override the `__call__` method, which is responsible for running the multimodal model logic. Here, you can implement the specific behavior of your model, considering both text and image inputs.
+
+4. We override the `generate_summary` method, which is used to generate a summary of text input. You can implement your custom summarization logic here.
+
+5. We create an instance of our custom model, passing the required parameters, including the custom parameter.
+
+6. We demonstrate how to run the custom model and generate a summary using it.
+
+By inheriting from `BaseMultiModalModel`, you can leverage the prebuilt features and methods provided by the library while customizing the behavior of your multimodal model. This allows you to create powerful and specialized models for various multimodal tasks.
+
+These examples demonstrate how to use MultiModalAI to run multimodal models with text and image inputs. You can adjust the parameters and methods to suit your specific use cases.
+
+## 6. Additional Tips
+
+Here are some additional tips and considerations for using MultiModalAI effectively:
+
+- **Custom Models**: You can create your own multimodal models and inherit from the `BaseMultiModalModel` class to integrate them with this library.
+
+- **Retries**: In cases where text generation might fail due to various reasons (e.g., server issues), using methods with retries can be helpful.
+
+- **Monitoring**: You can monitor the performance of your model using methods like `_tokens_per_second()` and `_time_for_generation()`.
+
+- **Chat History**: The library maintains a chat history, allowing you to keep track of interactions.
+
+- **Streaming**: The `stream()` method can be useful for displaying output character by character, which can be helpful for certain applications.
+
+## 7. References and Resources
+
+Here are some references and resources that you may find useful for working with multimodal models:
+
+- [Hugging Face Transformers Library](https://huggingface.co/transformers/): A library for working with various transformer-based models.
+
+- [PIL (Python Imaging Library)](https://pillow.readthedocs.io/en/stable/): Documentation for working with images in Python using the Pillow library.
+
+- [Concurrent Programming in Python](https://docs.python.org/3/library/concurrent.futures.html): Official Python documentation for concurrent programming.
+
+- [Requests Library Documentation](https://docs.python-requests.org/en/latest/): Documentation for the Requests library, which is used for making HTTP requests.
+
+- [Base64 Encoding in Python](https://docs.python.org/3/library/base64.html): Official Python documentation for base64 encoding and decoding.
+
+This concludes the documentation for the MultiModalAI library. You can now explore the library further and integrate it with your multimodal AI projects.
\ No newline at end of file
diff --git a/docs/swarms/models/custom_model.md b/docs/swarms/models/custom_model.md
new file mode 100644
index 00000000..624b5372
--- /dev/null
+++ b/docs/swarms/models/custom_model.md
@@ -0,0 +1,107 @@
+# How to Create A Custom Language Model
+
+When working with advanced language models, there might come a time when you need a custom solution tailored to your specific needs. Inheriting from an `BaseLLM` in a Python framework allows developers to create custom language model classes with ease. This developer guide will take you through the process step by step.
+
+### Prerequisites
+
+Before you begin, ensure that you have:
+
+- A working knowledge of Python programming.
+- Basic understanding of object-oriented programming (OOP) in Python.
+- Familiarity with language models and natural language processing (NLP).
+- The appropriate Python framework installed, with access to `BaseLLM`.
+
+### Step-by-Step Guide
+
+#### Step 1: Understand `BaseLLM`
+
+The `BaseLLM` is an abstract base class that defines a set of methods and properties which your custom language model (LLM) should implement. Abstract classes in Python are not designed to be instantiated directly but are meant to be subclasses.
+
+#### Step 2: Create a New Class
+
+Start by defining a new class that inherits from `BaseLLM`. This class will implement the required methods defined in the abstract base class.
+
+```python
+from swarms import BaseLLM
+
+class vLLMLM(BaseLLM):
+ pass
+```
+
+#### Step 3: Initialize Your Class
+
+Implement the `__init__` method to initialize your custom LLM. You'll want to initialize the base class as well and define any additional parameters for your model.
+
+```python
+class vLLMLM(BaseLLM):
+ def __init__(self, model_name='default_model', tensor_parallel_size=1, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.model_name = model_name
+ self.tensor_parallel_size = tensor_parallel_size
+ # Add any additional initialization here
+```
+
+#### Step 4: Implement Required Methods
+
+Implement the `run` method or any other abstract methods required by `BaseLLM`. This is where you define how your model processes input and returns output.
+
+```python
+class vLLMLM(BaseLLM):
+ # ... existing code ...
+
+ def run(self, task, *args, **kwargs):
+ # Logic for running your model goes here
+ return "Processed output"
+```
+
+#### Step 5: Test Your Model
+
+Instantiate your custom LLM and test it to ensure that it works as expected.
+
+```python
+model = vLLMLM(model_name='my_custom_model', tensor_parallel_size=2)
+output = model.run("What are the symptoms of COVID-19?")
+print(output) # Outputs: "Processed output"
+```
+
+#### Step 6: Integrate Additional Components
+
+Depending on the requirements, you might need to integrate additional components such as database connections, parallel computing resources, or custom processing pipelines.
+
+#### Step 7: Documentation
+
+Write comprehensive docstrings for your class and its methods. Good documentation is crucial for maintaining the code and for other developers who might use your model.
+
+```python
+class vLLMLM(BaseLLM):
+ """
+ A custom language model class that extends BaseLLM.
+
+ ... more detailed docstring ...
+ """
+ # ... existing code ...
+```
+
+#### Step 8: Best Practices
+
+Follow best practices such as error handling, input validation, and resource management to ensure your model is robust and reliable.
+
+#### Step 9: Packaging Your Model
+
+Package your custom LLM class into a module or package that can be easily distributed and imported into other projects.
+
+#### Step 10: Version Control and Collaboration
+
+Use a version control system like Git to track changes to your model. This makes collaboration easier and helps you keep a history of your work.
+
+### Conclusion
+
+By following this guide, you should now have a custom model that extends the `BaseLLM`. Remember that the key to a successful custom LLM is understanding the base functionalities, implementing necessary changes, and testing thoroughly. Keep iterating and improving based on feedback and performance metrics.
+
+### Further Reading
+
+- Official Python documentation on abstract base classes.
+- In-depth tutorials on object-oriented programming in Python.
+- Advanced NLP techniques and optimization strategies for language models.
+
+This guide provides the fundamental steps to create custom models using `BaseLLM`. For detailed implementation and advanced customization, it's essential to dive deeper into the specific functionalities and capabilities of the language model framework you are using.
\ No newline at end of file
diff --git a/docs/swarms/models/dalle3.md b/docs/swarms/models/dalle3.md
new file mode 100644
index 00000000..346489c7
--- /dev/null
+++ b/docs/swarms/models/dalle3.md
@@ -0,0 +1,261 @@
+# `Dalle3` Documentation
+
+## Table of Contents
+
+1. [Introduction](#introduction)
+2. [Installation](#installation)
+3. [Quick Start](#quick-start)
+4. [Dalle3 Class](#dalle3-class)
+ - [Attributes](#attributes)
+ - [Methods](#methods)
+5. [Usage Examples](#usage-examples)
+6. [Error Handling](#error-handling)
+7. [Advanced Usage](#advanced-usage)
+8. [References](#references)
+
+---
+
+## Introduction
+
+The Dalle3 library is a Python module that provides an easy-to-use interface for generating images from text descriptions using the DALLΒ·E 3 model by OpenAI. DALLΒ·E 3 is a powerful language model capable of converting textual prompts into images. This documentation will guide you through the installation, setup, and usage of the Dalle3 library.
+
+---
+
+## Installation
+
+To use the Dalle3 model, you must first install swarms:
+
+```bash
+pip install swarms
+```
+
+---
+
+## Quick Start
+
+Let's get started with a quick example of using the Dalle3 library to generate an image from a text prompt:
+
+```python
+from swarms.models.dalle3 import Dalle3
+
+# Create an instance of the Dalle3 class
+dalle = Dalle3()
+
+# Define a text prompt
+task = "A painting of a dog"
+
+# Generate an image from the text prompt
+image_url = dalle3(task)
+
+# Print the generated image URL
+print(image_url)
+```
+
+This example demonstrates the basic usage of the Dalle3 library to convert a text prompt into an image. The generated image URL will be printed to the console.
+
+---
+
+## Dalle3 Class
+
+The Dalle3 library provides a `Dalle3` class that allows you to interact with the DALLΒ·E 3 model. This class has several attributes and methods for generating images from text prompts.
+
+### Attributes
+
+- `model` (str): The name of the DALLΒ·E 3 model. Default: "dall-e-3".
+- `img` (str): The image URL generated by the Dalle3 API.
+- `size` (str): The size of the generated image. Default: "1024x1024".
+- `max_retries` (int): The maximum number of API request retries. Default: 3.
+- `quality` (str): The quality of the generated image. Default: "standard".
+- `n` (int): The number of variations to create. Default: 4.
+
+### Methods
+
+#### `__call__(self, task: str) -> Dalle3`
+
+This method makes a call to the Dalle3 API and returns the image URL generated from the provided text prompt.
+
+Parameters:
+- `task` (str): The text prompt to be converted to an image.
+
+Returns:
+- `Dalle3`: An instance of the Dalle3 class with the image URL generated by the Dalle3 API.
+
+#### `create_variations(self, img: str)`
+
+This method creates variations of an image using the Dalle3 API.
+
+Parameters:
+- `img` (str): The image to be used for the API request.
+
+Returns:
+- `img` (str): The image URL of the generated variations.
+
+---
+
+## Usage Examples
+
+### Example 1: Basic Image Generation
+
+```python
+from swarms.models.dalle3 import Dalle3
+
+# Create an instance of the Dalle3 class
+dalle3 = Dalle3()
+
+# Define a text prompt
+task = "A painting of a dog"
+
+# Generate an image from the text prompt
+image_url = dalle3(task)
+
+# Print the generated image URL
+print(image_url)
+```
+
+### Example 2: Creating Image Variations
+
+```python
+from swarms.models.dalle3 import Dalle3
+
+# Create an instance of the Dalle3 class
+dalle3 = Dalle3()
+
+# Define the URL of an existing image
+img_url = "https://images.unsplash.com/photo-1694734479898-6ac4633158ac?q=80&w=1287&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D
+
+# Create variations of the image
+variations_url = dalle3.create_variations(img_url)
+
+# Print the URLs of the generated variations
+print(variations_url)
+```
+
+Certainly! Here are additional examples that cover various edge cases and methods of the `Dalle3` class in the Dalle3 library:
+
+### Example 3: Customizing Image Size
+
+You can customize the size of the generated image by specifying the `size` parameter when creating an instance of the `Dalle3` class. Here's how to generate a smaller image:
+
+```python
+from swarms.models.dalle3 import Dalle3
+
+# Create an instance of the Dalle3 class with a custom image size
+dalle3 = Dalle3(size="512x512")
+
+# Define a text prompt
+task = "A small painting of a cat"
+
+# Generate a smaller image from the text prompt
+image_url = dalle3(task)
+
+# Print the generated image URL
+print(image_url)
+```
+
+### Example 4: Adjusting Retry Limit
+
+You can adjust the maximum number of API request retries using the `max_retries` parameter. Here's how to increase the retry limit:
+
+```python
+from swarms.models.dalle3 import Dalle3
+
+# Create an instance of the Dalle3 class with a higher retry limit
+dalle3 = Dalle3(max_retries=5)
+
+# Define a text prompt
+task = "An image of a landscape"
+
+# Generate an image with a higher retry limit
+image_url = dalle3(task)
+
+# Print the generated image URL
+print(image_url)
+```
+
+### Example 5: Generating Image Variations
+
+To create variations of an existing image, you can use the `create_variations` method. Here's an example:
+
+```python
+from swarms.models.dalle3 import Dalle3
+
+# Create an instance of the Dalle3 class
+dalle3 = Dalle3()
+
+# Define the URL of an existing image
+img_url = "https://images.unsplash.com/photo-1677290043066-12eccd944004?q=80&w=1287&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"
+
+# Create variations of the image
+variations_url = dalle3.create_variations(img_url)
+
+# Print the URLs of the generated variations
+print(variations_url)
+```
+
+### Example 6: Handling API Errors
+
+The Dalle3 library provides error handling for API-related issues. Here's how to handle and display API errors:
+
+```python
+from swarms.models.dalle3 import Dalle3
+
+# Create an instance of the Dalle3 class
+dalle3 = Dalle3()
+
+# Define a text prompt
+task = "Invalid prompt that may cause an API error"
+
+try:
+ # Attempt to generate an image with an invalid prompt
+ image_url = dalle3(task)
+ print(image_url)
+except Exception as e:
+ print(f"Error occurred: {str(e)}")
+```
+
+### Example 7: Customizing Image Quality
+
+You can customize the quality of the generated image by specifying the `quality` parameter. Here's how to generate a high-quality image:
+
+```python
+from swarms.models.dalle3 import Dalle3
+
+# Create an instance of the Dalle3 class with high quality
+dalle3 = Dalle3(quality="high")
+
+# Define a text prompt
+task = "A high-quality image of a sunset"
+
+# Generate a high-quality image from the text prompt
+image_url = dalle3(task)
+
+# Print the generated image URL
+print(image_url)
+```
+
+
+---
+
+## Error Handling
+
+The Dalle3 library provides error handling for API-related issues. If an error occurs during API communication, the library will handle it and provide detailed error messages. Make sure to handle exceptions appropriately in your code.
+
+---
+
+## Advanced Usage
+
+For advanced usage and customization of the Dalle3 library, you can explore the attributes and methods of the `Dalle3` class. Adjusting parameters such as `size`, `max_retries`, and `quality` allows you to fine-tune the image generation process to your specific needs.
+
+---
+
+## References
+
+For more information about the DALLΒ·E 3 model and the Dalle3 library, you can refer to the official OpenAI documentation and resources.
+
+- [OpenAI API Documentation](https://beta.openai.com/docs/)
+- [DALLΒ·E 3 Model Information](https://openai.com/research/dall-e-3)
+- [Dalle3 GitHub Repository](https://github.com/openai/dall-e-3)
+
+---
+
+This concludes the documentation for the Dalle3 library. You can now use the library to generate images from text prompts and explore its advanced features for various applications.
\ No newline at end of file
diff --git a/docs/swarms/models/distilled_whisperx.md b/docs/swarms/models/distilled_whisperx.md
new file mode 100644
index 00000000..79c8c2ea
--- /dev/null
+++ b/docs/swarms/models/distilled_whisperx.md
@@ -0,0 +1,123 @@
+# DistilWhisperModel Documentation
+
+## Overview
+
+The `DistilWhisperModel` is a Python class designed to handle English speech recognition tasks. It leverages the capabilities of the Whisper model, which is fine-tuned for speech-to-text processes. It is designed for both synchronous and asynchronous transcription of audio inputs, offering flexibility for real-time applications or batch processing.
+
+## Installation
+
+Before you can use `DistilWhisperModel`, ensure you have the required libraries installed:
+
+```sh
+pip3 install --upgrade swarms
+```
+
+## Initialization
+
+The `DistilWhisperModel` class is initialized with the following parameters:
+
+| Parameter | Type | Description | Default |
+|-----------|------|-------------|---------|
+| `model_id` | `str` | The identifier for the pre-trained Whisper model | `"distil-whisper/distil-large-v2"` |
+
+Example of initialization:
+
+```python
+from swarms.models import DistilWhisperModel
+
+# Initialize with default model
+model_wrapper = DistilWhisperModel()
+
+# Initialize with a specific model ID
+model_wrapper = DistilWhisperModel(model_id="distil-whisper/distil-large-v2")
+```
+
+## Attributes
+
+After initialization, the `DistilWhisperModel` has several attributes:
+
+| Attribute | Type | Description |
+|-----------|------|-------------|
+| `device` | `str` | The device used for computation (`"cuda:0"` for GPU or `"cpu"`). |
+| `torch_dtype` | `torch.dtype` | The data type used for the Torch tensors. |
+| `model_id` | `str` | The model identifier string. |
+| `model` | `torch.nn.Module` | The actual Whisper model loaded from the identifier. |
+| `processor` | `transformers.AutoProcessor` | The processor for handling input data. |
+
+## Methods
+
+### `transcribe`
+
+Transcribes audio input synchronously.
+
+**Arguments**:
+
+| Argument | Type | Description |
+|----------|------|-------------|
+| `inputs` | `Union[str, dict]` | File path or audio data dictionary. |
+
+**Returns**: `str` - The transcribed text.
+
+**Usage Example**:
+
+```python
+# Synchronous transcription
+transcription = model_wrapper.transcribe("path/to/audio.mp3")
+print(transcription)
+```
+
+### `async_transcribe`
+
+Transcribes audio input asynchronously.
+
+**Arguments**:
+
+| Argument | Type | Description |
+|----------|------|-------------|
+| `inputs` | `Union[str, dict]` | File path or audio data dictionary. |
+
+**Returns**: `Coroutine` - A coroutine that when awaited, returns the transcribed text.
+
+**Usage Example**:
+
+```python
+import asyncio
+
+# Asynchronous transcription
+transcription = asyncio.run(model_wrapper.async_transcribe("path/to/audio.mp3"))
+print(transcription)
+```
+
+### `real_time_transcribe`
+
+Simulates real-time transcription of an audio file.
+
+**Arguments**:
+
+| Argument | Type | Description |
+|----------|------|-------------|
+| `audio_file_path` | `str` | Path to the audio file. |
+| `chunk_duration` | `int` | Duration of audio chunks in seconds. |
+
+**Usage Example**:
+
+```python
+# Real-time transcription simulation
+model_wrapper.real_time_transcribe("path/to/audio.mp3", chunk_duration=5)
+```
+
+## Error Handling
+
+The `DistilWhisperModel` class incorporates error handling for file not found errors and generic exceptions during the transcription process. If a non-recoverable exception is raised, it is printed to the console in red to indicate failure.
+
+## Conclusion
+
+The `DistilWhisperModel` offers a convenient interface to the powerful Whisper model for speech recognition. Its design supports both batch and real-time transcription, catering to different application needs. The class's error handling and retry logic make it robust for real-world applications.
+
+## Additional Notes
+
+- Ensure you have appropriate permissions to read audio files when using file paths.
+- Transcription quality depends on the audio quality and the Whisper model's performance on your dataset.
+- Adjust `chunk_duration` according to the processing power of your system for real-time transcription.
+
+For a full list of models supported by `transformers.AutoModelForSpeechSeq2Seq`, visit the [Hugging Face Model Hub](https://huggingface.co/models).
diff --git a/docs/swarms/models/fuyu.md b/docs/swarms/models/fuyu.md
new file mode 100644
index 00000000..e54a4a22
--- /dev/null
+++ b/docs/swarms/models/fuyu.md
@@ -0,0 +1,89 @@
+# Fuyu Documentation
+
+## Introduction
+
+Welcome to the documentation for Fuyu, a versatile model for generating text conditioned on both textual prompts and images. Fuyu is based on the Adept's Fuyu model and offers a convenient way to create text that is influenced by the content of an image. In this documentation, you will find comprehensive information on the Fuyu class, its architecture, usage, and examples.
+
+## Overview
+
+Fuyu is a text generation model that leverages both text and images to generate coherent and contextually relevant text. It combines state-of-the-art language modeling techniques with image processing capabilities to produce text that is semantically connected to the content of an image. Whether you need to create captions for images or generate text that describes visual content, Fuyu can assist you.
+
+## Class Definition
+
+```python
+class Fuyu:
+ def __init__(
+ self,
+ pretrained_path: str = "adept/fuyu-8b",
+ device_map: str = "cuda:0",
+ max_new_tokens: int = 7,
+ ):
+```
+
+## Purpose
+
+The Fuyu class serves as a convenient interface for using the Adept's Fuyu model. It allows you to generate text based on a textual prompt and an image. The primary purpose of Fuyu is to provide a user-friendly way to create text that is influenced by visual content, making it suitable for various applications, including image captioning, storytelling, and creative text generation.
+
+## Parameters
+
+- `pretrained_path` (str): The path to the pretrained Fuyu model. By default, it uses the "adept/fuyu-8b" model.
+- `device_map` (str): The device to use for model inference (e.g., "cuda:0" for GPU or "cpu" for CPU). Default: "cuda:0".
+- `max_new_tokens` (int): The maximum number of tokens to generate in the output text. Default: 7.
+
+## Usage
+
+To use Fuyu, follow these steps:
+
+1. Initialize the Fuyu instance:
+
+```python
+from swarms.models.fuyu import Fuyu
+
+fuyu = Fuyu()
+```
+
+
+2. Generate Text with Fuyu:
+
+```python
+text = "Hello, my name is"
+img_path = "path/to/image.png"
+output_text = fuyu(text, img_path)
+```
+
+### Example 2 - Text Generation
+
+```python
+from swarms.models.fuyu import Fuyu
+
+fuyu = Fuyu()
+
+text = "Hello, my name is"
+
+img_path = "path/to/image.png"
+
+output_text = fuyu(text, img_path)
+print(output_text)
+```
+
+## How Fuyu Works
+
+Fuyu combines text and image processing to generate meaningful text outputs. Here's how it works:
+
+1. **Initialization**: When you create a Fuyu instance, you specify the pretrained model path, the device for inference, and the maximum number of tokens to generate.
+
+2. **Processing Text and Images**: Fuyu can process both textual prompts and images. You provide a text prompt and the path to an image as input.
+
+3. **Tokenization**: Fuyu tokenizes the input text and encodes the image using its tokenizer.
+
+4. **Model Inference**: The model takes the tokenized inputs and generates text that is conditioned on both the text and the image.
+
+5. **Output Text**: Fuyu returns the generated text as the output.
+
+## Additional Information
+
+- Fuyu uses the Adept's Fuyu model, which is pretrained on a large corpus of text and images, making it capable of generating coherent and contextually relevant text.
+- You can specify the device for inference to utilize GPU acceleration if available.
+- The `max_new_tokens` parameter allows you to control the length of the generated text.
+
+That concludes the documentation for Fuyu. We hope you find this model useful for your text generation tasks that involve images. If you have any questions or encounter any issues, please refer to the Fuyu documentation for further assistance. Enjoy working with Fuyu!
\ No newline at end of file
diff --git a/docs/swarms/models/gemini.md b/docs/swarms/models/gemini.md
new file mode 100644
index 00000000..d5b1b44a
--- /dev/null
+++ b/docs/swarms/models/gemini.md
@@ -0,0 +1,178 @@
+## `Gemini` Documentation
+
+### Introduction
+
+The Gemini module is a versatile tool for leveraging the power of multimodal AI models to generate content. It allows users to combine textual and image inputs to generate creative and informative outputs. In this documentation, we will explore the Gemini module in detail, covering its purpose, architecture, methods, and usage examples.
+
+#### Purpose
+
+The Gemini module is designed to bridge the gap between text and image data, enabling users to harness the capabilities of multimodal AI models effectively. By providing both a textual task and an image as input, Gemini generates content that aligns with the specified task and incorporates the visual information from the image.
+
+### Installation
+
+Before using Gemini, ensure that you have the required dependencies installed. You can install them using the following commands:
+
+```bash
+pip install swarms
+pip install google-generativeai
+pip install python-dotenv
+```
+
+### Class: Gemini
+
+#### Overview
+
+The `Gemini` class is the central component of the Gemini module. It inherits from the `BaseMultiModalModel` class and provides methods to interact with the Gemini AI model. Let's dive into its architecture and functionality.
+
+##### Class Constructor
+
+```python
+class Gemini(BaseMultiModalModel):
+ def __init__(
+ self,
+ model_name: str = "gemini-pro",
+ gemini_api_key: str = get_gemini_api_key_env,
+ *args,
+ **kwargs,
+ ):
+```
+
+| Parameter | Type | Description | Default Value |
+|---------------------|---------|------------------------------------------------------------------|--------------------|
+| `model_name` | str | The name of the Gemini model. | "gemini-pro" |
+| `gemini_api_key` | str | The Gemini API key. If not provided, it is fetched from the environment. | (None) |
+
+- `model_name`: Specifies the name of the Gemini model to use. By default, it is set to "gemini-pro," but you can specify a different model if needed.
+
+- `gemini_api_key`: This parameter allows you to provide your Gemini API key directly. If not provided, the constructor attempts to fetch it from the environment using the `get_gemini_api_key_env` helper function.
+
+##### Methods
+
+1. **run()**
+
+ ```python
+ def run(
+ self,
+ task: str = None,
+ img: str = None,
+ *args,
+ **kwargs,
+ ) -> str:
+ ```
+
+ | Parameter | Type | Description |
+ |---------------|----------|--------------------------------------------|
+ | `task` | str | The textual task for content generation. |
+ | `img` | str | The path to the image to be processed. |
+ | `*args` | Variable | Additional positional arguments. |
+ | `**kwargs` | Variable | Additional keyword arguments. |
+
+ - `task`: Specifies the textual task for content generation. It can be a sentence or a phrase that describes the desired content.
+
+ - `img`: Provides the path to the image that will be processed along with the textual task. Gemini combines the visual information from the image with the textual task to generate content.
+
+ - `*args` and `**kwargs`: Allow for additional, flexible arguments that can be passed to the underlying Gemini model. These arguments can vary based on the specific Gemini model being used.
+
+ **Returns**: A string containing the generated content.
+
+ **Examples**:
+
+ ```python
+ from swarms.models import Gemini
+
+ # Initialize the Gemini model
+ gemini = Gemini()
+
+ # Generate content for a textual task with an image
+ generated_content = gemini.run(
+ task="Describe this image",
+ img="image.jpg",
+ )
+
+ # Print the generated content
+ print(generated_content)
+ ```
+
+ In this example, we initialize the Gemini model, provide a textual task, and specify an image for processing. The `run()` method generates content based on the input and returns the result.
+
+2. **process_img()**
+
+ ```python
+ def process_img(
+ self,
+ img: str = None,
+ type: str = "image/png",
+ *args,
+ **kwargs,
+ ):
+ ```
+
+ | Parameter | Type | Description | Default Value |
+ |---------------|----------|------------------------------------------------------|----------------|
+ | `img` | str | The path to the image to be processed. | (None) |
+ | `type` | str | The MIME type of the image (e.g., "image/png"). | "image/png" |
+ | `*args` | Variable | Additional positional arguments. |
+ | `**kwargs` | Variable | Additional keyword arguments. |
+
+ - `img`: Specifies the path to the image that will be processed. It's essential to provide a valid image path for image-based content generation.
+
+ - `type`: Indicates the MIME type of the image. By default, it is set to "image/png," but you can change it based on the image format you're using.
+
+ - `*args` and `**kwargs`: Allow for additional, flexible arguments that can be passed to the underlying Gemini model. These arguments can vary based on the specific Gemini model being used.
+
+ **Raises**: ValueError if any of the following conditions are met:
+ - No image is provided.
+ - The image type is not specified.
+ - The Gemini API key is missing.
+
+ **Examples**:
+
+ ```python
+ from swarms.models.gemini import Gemini
+
+ # Initialize the Gemini model
+ gemini = Gemini()
+
+ # Process an image
+ processed_image = gemini.process_img(
+ img="image.jpg",
+ type="image/jpeg",
+ )
+
+ # Further use the processed image in content generation
+ generated_content = gemini.run(
+ task="Describe this image",
+ img=processed_image,
+ )
+
+ # Print the generated content
+ print(generated_content)
+ ```
+
+ In this example, we demonstrate how to process an image using the `process_img()` method and then use the processed image in content generation.
+
+#### Additional Information
+
+- Gemini is designed to work seamlessly with various multimodal AI models, making it a powerful tool for content generation tasks.
+
+- The module uses the `google.generativeai` package to access the underlying AI models. Ensure that you have this package installed to leverage the full capabilities of Gemini.
+
+- It's essential to provide a valid Gemini API key for authentication. You can either pass it directly during initialization or store it in the environment variable "GEMINI_API_KEY."
+
+- Gemini's flexibility allows you to experiment with different Gemini models and tailor the content generation process to your specific needs.
+
+- Keep in mind that Gemini is designed to handle both textual and image inputs, making it a valuable asset for various applications, including natural language processing and computer vision tasks.
+
+- If you encounter any issues or have specific requirements, refer to the Gemini documentation for more details and advanced usage.
+
+### References and Resources
+
+- [Gemini GitHub Repository](https://github.com/swarms/gemini): Explore the Gemini repository for additional information, updates, and examples.
+
+- [Google GenerativeAI Documentation](https://docs.google.com/document/d/1WZSBw6GsOhOCYm0ArydD_9uy6nPPA1KFIbKPhjj43hA): Dive deeper into the capabilities of the Google GenerativeAI package used by Gemini.
+
+- [Gemini API Documentation](https://gemini-api-docs.example.com): Access the official documentation for the Gemini API to explore advanced features and integrations.
+
+## Conclusion
+
+In this comprehensive documentation, we've explored the Gemini module, its purpose, architecture, methods, and usage examples. Gemini empowers developers to generate content by combining textual tasks and images, making it a valuable asset for multimodal AI applications. Whether you're working on natural language processing or computer vision projects, Gemini can help you achieve impressive results.
\ No newline at end of file
diff --git a/docs/swarms/models/gpt4v.md b/docs/swarms/models/gpt4v.md
new file mode 100644
index 00000000..5ad80cd9
--- /dev/null
+++ b/docs/swarms/models/gpt4v.md
@@ -0,0 +1,201 @@
+# `GPT4VisionAPI` Documentation
+
+**Table of Contents**
+- [Introduction](#introduction)
+- [Installation](#installation)
+- [Module Overview](#module-overview)
+- [Class: GPT4VisionAPI](#class-gpt4visionapi)
+ - [Initialization](#initialization)
+ - [Methods](#methods)
+ - [encode_image](#encode_image)
+ - [run](#run)
+ - [__call__](#__call__)
+- [Examples](#examples)
+ - [Example 1: Basic Usage](#example-1-basic-usage)
+ - [Example 2: Custom API Key](#example-2-custom-api-key)
+ - [Example 3: Adjusting Maximum Tokens](#example-3-adjusting-maximum-tokens)
+- [Additional Information](#additional-information)
+- [References](#references)
+
+## Introduction
+
+Welcome to the documentation for the `GPT4VisionAPI` module! This module is a powerful wrapper for the OpenAI GPT-4 Vision model. It allows you to interact with the model to generate descriptions or answers related to images. This documentation will provide you with comprehensive information on how to use this module effectively.
+
+## Installation
+
+Before you start using the `GPT4VisionAPI` module, make sure you have the required dependencies installed. You can install them using the following commands:
+
+```bash
+pip3 install --upgrade swarms
+```
+
+## Module Overview
+
+The `GPT4VisionAPI` module serves as a bridge between your application and the OpenAI GPT-4 Vision model. It allows you to send requests to the model and retrieve responses related to images. Here are some key features and functionality provided by this module:
+
+- Encoding images to base64 format.
+- Running the GPT-4 Vision model with specified tasks and images.
+- Customization options such as setting the OpenAI API key and maximum token limit.
+
+## Class: GPT4VisionAPI
+
+The `GPT4VisionAPI` class is the core component of this module. It encapsulates the functionality required to interact with the GPT-4 Vision model. Below, we'll dive into the class in detail.
+
+### Initialization
+
+When initializing the `GPT4VisionAPI` class, you have the option to provide the OpenAI API key and set the maximum token limit. Here are the parameters and their descriptions:
+
+| Parameter | Type | Default Value | Description |
+|---------------------|----------|-------------------------------|----------------------------------------------------------------------------------------------------------|
+| openai_api_key | str | `OPENAI_API_KEY` environment variable (if available) | The OpenAI API key. If not provided, it defaults to the `OPENAI_API_KEY` environment variable. |
+| max_tokens | int | 300 | The maximum number of tokens to generate in the model's response. |
+
+Here's how you can initialize the `GPT4VisionAPI` class:
+
+```python
+from swarms.models import GPT4VisionAPI
+
+# Initialize with default API key and max_tokens
+api = GPT4VisionAPI()
+
+# Initialize with custom API key and max_tokens
+custom_api_key = "your_custom_api_key"
+api = GPT4VisionAPI(openai_api_key=custom_api_key, max_tokens=500)
+```
+
+### Methods
+
+#### encode_image
+
+This method allows you to encode an image from a URL to base64 format. It's a utility function used internally by the module.
+
+```python
+def encode_image(img: str) -> str:
+ """
+ Encode image to base64.
+
+ Parameters:
+ - img (str): URL of the image to encode.
+
+ Returns:
+ str: Base64 encoded image.
+ """
+```
+
+#### run
+
+The `run` method is the primary way to interact with the GPT-4 Vision model. It sends a request to the model with a task and an image URL, and it returns the model's response.
+
+```python
+def run(task: str, img: str) -> str:
+ """
+ Run the GPT-4 Vision model.
+
+ Parameters:
+ - task (str): The task or question related to the image.
+ - img (str): URL of the image to analyze.
+
+ Returns:
+ str: The model's response.
+ """
+```
+
+#### __call__
+
+The `__call__` method is a convenient way to run the GPT-4 Vision model. It has the same functionality as the `run` method.
+
+```python
+def __call__(task: str, img: str) -> str:
+ """
+ Run the GPT-4 Vision model (callable).
+
+ Parameters:
+ - task (str): The task or question related to the image.
+ - img
+
+ (str): URL of the image to analyze.
+
+ Returns:
+ str: The model's response.
+ """
+```
+
+## Examples
+
+Let's explore some usage examples of the `GPT4VisionAPI` module to better understand how to use it effectively.
+
+### Example 1: Basic Usage
+
+In this example, we'll use the module with the default API key and maximum tokens to analyze an image.
+
+```python
+from swarms.models import GPT4VisionAPI
+
+# Initialize with default API key and max_tokens
+api = GPT4VisionAPI()
+
+# Define the task and image URL
+task = "What is the color of the object?"
+img = "https://i.imgur.com/2M2ZGwC.jpeg"
+
+# Run the GPT-4 Vision model
+response = api.run(task, img)
+
+# Print the model's response
+print(response)
+```
+
+### Example 2: Custom API Key
+
+If you have a custom API key, you can initialize the module with it as shown in this example.
+
+```python
+from swarms.models import GPT4VisionAPI
+
+# Initialize with custom API key and max_tokens
+custom_api_key = "your_custom_api_key"
+api = GPT4VisionAPI(openai_api_key=custom_api_key, max_tokens=500)
+
+# Define the task and image URL
+task = "What is the object in the image?"
+img = "https://i.imgur.com/3T3ZHwD.jpeg"
+
+# Run the GPT-4 Vision model
+response = api.run(task, img)
+
+# Print the model's response
+print(response)
+```
+
+### Example 3: Adjusting Maximum Tokens
+
+You can also customize the maximum token limit when initializing the module. In this example, we set it to 1000 tokens.
+
+```python
+from swarms.models import GPT4VisionAPI
+
+# Initialize with default API key and custom max_tokens
+api = GPT4VisionAPI(max_tokens=1000)
+
+# Define the task and image URL
+task = "Describe the scene in the image."
+img = "https://i.imgur.com/4P4ZRxU.jpeg"
+
+# Run the GPT-4 Vision model
+response = api.run(task, img)
+
+# Print the model's response
+print(response)
+```
+
+## Additional Information
+
+- If you encounter any errors or issues with the module, make sure to check your API key and internet connectivity.
+- It's recommended to handle exceptions when using the module to gracefully handle errors.
+- You can further customize the module to fit your specific use case by modifying the code as needed.
+
+## References
+
+- [OpenAI API Documentation](https://beta.openai.com/docs/)
+
+This documentation provides a comprehensive guide on how to use the `GPT4VisionAPI` module effectively. It covers initialization, methods, usage examples, and additional information to ensure a smooth experience when working with the GPT-4 Vision model.
\ No newline at end of file
diff --git a/docs/swarms/models/hf.md b/docs/swarms/models/hf.md
new file mode 100644
index 00000000..45d88af8
--- /dev/null
+++ b/docs/swarms/models/hf.md
@@ -0,0 +1,91 @@
+# HuggingFaceLLM
+
+## Overview & Introduction
+
+The `HuggingFaceLLM` class in the Zeta library provides a simple and easy-to-use interface to harness the power of Hugging Face's transformer-based language models, specifically for causal language modeling. This enables developers to generate coherent and contextually relevant sentences or paragraphs given a prompt, without delving deep into the intricate details of the underlying model or the tokenization process.
+
+Causal Language Modeling (CLM) is a task where given a series of tokens (or words), the model predicts the next token in the sequence. This functionality is central to many natural language processing tasks, including chatbots, story generation, and code autocompletion.
+
+---
+
+## Class Definition
+
+```python
+class HuggingFaceLLM:
+```
+
+### Parameters:
+
+- `model_id (str)`: Identifier for the pre-trained model on the Hugging Face model hub. Examples include "gpt2-medium", "openai-gpt", etc.
+
+- `device (str, optional)`: The device on which to load and run the model. Defaults to 'cuda' if GPU is available, else 'cpu'.
+
+- `max_length (int, optional)`: Maximum length of the generated sequence. Defaults to 20.
+
+- `quantization_config (dict, optional)`: Configuration dictionary for model quantization (if applicable). Default is `None`.
+
+---
+
+## Functionality & Usage
+
+### Initialization:
+
+```python
+llm = HuggingFaceLLM(model_id="gpt2-medium")
+```
+
+Upon initialization, the specified pre-trained model and tokenizer are loaded from Hugging Face's model hub. The model is then moved to the designated device. If there's an issue loading either the model or the tokenizer, an error will be logged.
+
+### Generation:
+
+The main functionality of this class is text generation. The class provides two methods for this: `__call__` and `generate`. Both methods take in a prompt text and an optional `max_length` parameter and return the generated text.
+
+Usage:
+```python
+from swarms import HuggingFaceLLM
+
+# Initialize
+llm = HuggingFaceLLM(model_id="gpt2-medium")
+
+# Generate text using __call__ method
+result = llm("Once upon a time,")
+print(result)
+
+# Alternatively, using the generate method
+result = llm.generate("The future of AI is")
+print(result)
+```
+
+---
+
+## Mathematical Explanation:
+
+Given a sequence of tokens \( x_1, x_2, ..., x_n \), a causal language model aims to maximize the likelihood of the next token \( x_{n+1} \) in the sequence. Formally, it tries to optimize:
+
+\[ P(x_{n+1} | x_1, x_2, ..., x_n) \]
+
+Where \( P \) is the probability distribution over all possible tokens in the vocabulary.
+
+The model takes the tokenized input sequence, feeds it through several transformer blocks, and finally through a linear layer to produce logits for each token in the vocabulary. The token with the highest logit value is typically chosen as the next token in the sequence.
+
+---
+
+## Additional Information & Tips:
+
+- Ensure you have an active internet connection when initializing the class for the first time, as the models and tokenizers are fetched from Hugging Face's servers.
+
+- Although the default `max_length` is set to 20, it's advisable to adjust this parameter based on the context of the problem.
+
+- Keep an eye on GPU memory when using large models or generating long sequences.
+
+---
+
+## References & Resources:
+
+- Hugging Face Model Hub: [https://huggingface.co/models](https://huggingface.co/models)
+
+- Introduction to Transformers: [https://huggingface.co/transformers/introduction.html](https://huggingface.co/transformers/introduction.html)
+
+- Causal Language Modeling: Vaswani, A., et al. (2017). Attention is All You Need. [arXiv:1706.03762](https://arxiv.org/abs/1706.03762)
+
+Note: This documentation template provides a comprehensive overview of the `HuggingFaceLLM` class. Developers can follow similar structures when documenting other classes or functionalities.
\ No newline at end of file
diff --git a/docs/swarms/models/huggingface.md b/docs/swarms/models/huggingface.md
new file mode 100644
index 00000000..50aaa2a1
--- /dev/null
+++ b/docs/swarms/models/huggingface.md
@@ -0,0 +1,155 @@
+## `HuggingfaceLLM` Documentation
+
+### Introduction
+
+The `HuggingfaceLLM` class is designed for running inference using models from the Hugging Face Transformers library. This documentation provides an in-depth understanding of the class, its purpose, attributes, methods, and usage examples.
+
+#### Purpose
+
+The `HuggingfaceLLM` class serves the following purposes:
+
+1. Load pre-trained Hugging Face models and tokenizers.
+2. Generate text-based responses from the loaded model using a given prompt.
+3. Provide flexibility in device selection, quantization, and other configuration options.
+
+### Class Definition
+
+The `HuggingfaceLLM` class is defined as follows:
+
+```python
+class HuggingfaceLLM:
+ def __init__(
+ self,
+ model_id: str,
+ device: str = None,
+ max_length: int = 20,
+ quantize: bool = False,
+ quantization_config: dict = None,
+ verbose=False,
+ distributed=False,
+ decoding=False,
+ ):
+ # Attributes and initialization logic explained below
+ pass
+
+ def load_model(self):
+ # Method to load the pre-trained model and tokenizer
+ pass
+
+ def run(self, prompt_text: str, max_length: int = None):
+ # Method to generate text-based responses
+ pass
+
+ def __call__(self, prompt_text: str, max_length: int = None):
+ # Alternate method for generating text-based responses
+ pass
+```
+
+### Attributes
+
+| Attribute | Description |
+|----------------------|---------------------------------------------------------------------------------------------------------------------------|
+| `model_id` | The ID of the pre-trained model to be used. |
+| `device` | The device on which the model runs (`'cuda'` for GPU or `'cpu'` for CPU). |
+| `max_length` | The maximum length of the generated text. |
+| `quantize` | A boolean indicating whether quantization should be used. |
+| `quantization_config`| A dictionary with configuration options for quantization. |
+| `verbose` | A boolean indicating whether verbose logs should be printed. |
+| `logger` | An optional logger for logging messages (defaults to a basic logger). |
+| `distributed` | A boolean indicating whether distributed processing should be used. |
+| `decoding` | A boolean indicating whether to perform decoding during text generation. |
+
+### Class Methods
+
+#### `__init__` Method
+
+The `__init__` method initializes an instance of the `HuggingfaceLLM` class with the specified parameters. It also loads the pre-trained model and tokenizer.
+
+- `model_id` (str): The ID of the pre-trained model to use.
+- `device` (str, optional): The device to run the model on ('cuda' or 'cpu').
+- `max_length` (int, optional): The maximum length of the generated text.
+- `quantize` (bool, optional): Whether to use quantization.
+- `quantization_config` (dict, optional): Configuration for quantization.
+- `verbose` (bool, optional): Whether to print verbose logs.
+- `logger` (logging.Logger, optional): The logger to use.
+- `distributed` (bool, optional): Whether to use distributed processing.
+- `decoding` (bool, optional): Whether to perform decoding during text generation.
+
+#### `load_model` Method
+
+The `load_model` method loads the pre-trained model and tokenizer specified by `model_id`.
+
+#### `run` and `__call__` Methods
+
+Both `run` and `__call__` methods generate text-based responses based on a given prompt. They accept the following parameters:
+
+- `prompt_text` (str): The text prompt to initiate text generation.
+- `max_length` (int, optional): The maximum length of the generated text.
+
+### Usage Examples
+
+Here are three ways to use the `HuggingfaceLLM` class:
+
+#### Example 1: Basic Usage
+
+```python
+from swarms.models import HuggingfaceLLM
+
+# Initialize the HuggingfaceLLM instance with a model ID
+model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha"
+inference = HuggingfaceLLM(model_id=model_id)
+
+# Generate text based on a prompt
+prompt_text = "Once upon a time"
+generated_text = inference(prompt_text)
+print(generated_text)
+```
+
+#### Example 2: Custom Configuration
+
+```python
+from swarms.models import HuggingfaceLLM
+
+# Initialize with custom configuration
+custom_config = {
+ "quantize": True,
+ "quantization_config": {"load_in_4bit": True},
+ "verbose": True,
+}
+inference = HuggingfaceLLM(
+ model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config
+)
+
+# Generate text based on a prompt
+prompt_text = "Tell me a joke"
+generated_text = inference(prompt_text)
+print(generated_text)
+```
+
+#### Example 3: Distributed Processing
+
+```python
+from swarms.models import HuggingfaceLLM
+
+# Initialize for distributed processing
+inference = HuggingfaceLLM(model_id="gpt2-medium", distributed=True)
+
+# Generate text based on a prompt
+prompt_text = "Translate the following sentence to French"
+generated_text = inference(prompt_text)
+print(generated_text)
+```
+
+### Additional Information
+
+- The `HuggingfaceLLM` class provides the flexibility to load and use pre-trained models from the Hugging Face Transformers library.
+- Quantization can be enabled to reduce model size and inference time.
+- Distributed processing can be used for parallelized inference.
+- Verbose logging can help in debugging and understanding the text generation process.
+
+### References
+
+- [Hugging Face Transformers Documentation](https://huggingface.co/transformers/)
+- [PyTorch Documentation](https://pytorch.org/docs/stable/index.html)
+
+This documentation provides a comprehensive understanding of the `HuggingfaceLLM` class, its attributes, methods, and usage examples. Developers can use this class to perform text generation tasks efficiently using pre-trained models from the Hugging Face Transformers library.
\ No newline at end of file
diff --git a/docs/swarms/models/idefics.md b/docs/swarms/models/idefics.md
new file mode 100644
index 00000000..57125038
--- /dev/null
+++ b/docs/swarms/models/idefics.md
@@ -0,0 +1,107 @@
+# `Idefics` Documentation
+
+## Introduction
+
+Welcome to the documentation for Idefics, a versatile multimodal inference tool using pre-trained models from the Hugging Face Hub. Idefics is designed to facilitate the generation of text from various prompts, including text and images. This documentation provides a comprehensive understanding of Idefics, its architecture, usage, and how it can be integrated into your projects.
+
+## Overview
+
+Idefics leverages the power of pre-trained models to generate textual responses based on a wide range of prompts. It is capable of handling both text and images, making it suitable for various multimodal tasks, including text generation from images.
+
+## Class Definition
+
+```python
+class Idefics:
+ def __init__(
+ self,
+ checkpoint="HuggingFaceM4/idefics-9b-instruct",
+ device=None,
+ torch_dtype=torch.bfloat16,
+ max_length=100,
+ ):
+```
+
+## Usage
+
+To use Idefics, follow these steps:
+
+1. Initialize the Idefics instance:
+
+```python
+from swarms.models import Idefics
+
+model = Idefics()
+```
+
+2. Generate text based on prompts:
+
+```python
+prompts = [
+ "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
+]
+response = model(prompts)
+print(response)
+```
+
+### Example 1 - Image Questioning
+
+```python
+from swarms.models import Idefics
+
+model = Idefics()
+prompts = [
+ "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
+]
+response = model(prompts)
+print(response)
+```
+
+### Example 2 - Bidirectional Conversation
+
+```python
+from swarms.models import Idefics
+
+model = Idefics()
+user_input = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
+response = model.chat(user_input)
+print(response)
+
+user_input = "User: Who is that? https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
+response = model.chat(user_input)
+print(response)
+```
+
+### Example 3 - Configuration Changes
+
+```python
+model.set_checkpoint("new_checkpoint")
+model.set_device("cpu")
+model.set_max_length(200)
+model.clear_chat_history()
+```
+
+## How Idefics Works
+
+Idefics operates by leveraging pre-trained models from the Hugging Face Hub. Here's how it works:
+
+1. **Initialization**: When you create an Idefics instance, it initializes the model using a specified checkpoint, sets the device for inference, and configures other parameters like data type and maximum text length.
+
+2. **Prompt-Based Inference**: You can use the `infer` method to generate text based on prompts. It processes prompts in batched or non-batched mode, depending on your preference. It uses a pre-trained processor to handle text and images.
+
+3. **Bidirectional Conversation**: The `chat` method enables bidirectional conversations. You provide user input, and the model responds accordingly. The chat history is maintained for context.
+
+4. **Configuration Changes**: You can change the model checkpoint, device, maximum text length, or clear the chat history as needed during runtime.
+
+## Parameters
+
+- `checkpoint`: The name of the pre-trained model checkpoint (default is "HuggingFaceM4/idefics-9b-instruct").
+- `device`: The device to use for inference. By default, it uses CUDA if available; otherwise, it uses CPU.
+- `torch_dtype`: The data type to use for inference. By default, it uses torch.bfloat16.
+- `max_length`: The maximum length of the generated text (default is 100).
+
+## Additional Information
+
+- Idefics provides a convenient way to engage in bidirectional conversations with pre-trained models.
+- You can easily change the model checkpoint, device, and other settings to adapt to your specific use case.
+
+That concludes the documentation for Idefics. We hope you find this tool valuable for your multimodal text generation tasks. If you have any questions or encounter any issues, please refer to the Hugging Face Transformers documentation for further assistance. Enjoy working with Idefics!
\ No newline at end of file
diff --git a/docs/swarms/models/index.md b/docs/swarms/models/index.md
new file mode 100644
index 00000000..9e001eea
--- /dev/null
+++ b/docs/swarms/models/index.md
@@ -0,0 +1,178 @@
+## LLMs in Swarms Documentation
+
+Welcome to the documentation for the llm section of the swarms package, designed to facilitate seamless integration with various AI language models and APIs. This package empowers developers, end-users, and system administrators to interact with AI models from different providers, such as OpenAI, Hugging Face, Google PaLM, and Anthropic.
+
+### Table of Contents
+1. [OpenAI](#openai)
+2. [HuggingFace](#huggingface)
+3. [Google PaLM](#google-palm)
+4. [Anthropic](#anthropic)
+
+### 1. OpenAI (swarms.agents.models.OpenAI)
+
+The OpenAI class provides an interface to interact with OpenAI's language models. It allows both synchronous and asynchronous interactions.
+
+**Constructor:**
+```python
+OpenAI(api_key: str, system: str = None, console: bool = True, model: str = None, params: dict = None, save_messages: bool = True)
+```
+
+**Attributes:**
+- `api_key` (str): Your OpenAI API key.
+
+- `system` (str, optional): A system message to be used in conversations.
+
+- `console` (bool, default=True): Display console logs.
+
+- `model` (str, optional): Name of the language model to use.
+
+- `params` (dict, optional): Additional parameters for model interactions.
+
+- `save_messages` (bool, default=True): Save conversation messages.
+
+**Methods:**
+
+- `generate(message: str, **kwargs) -> str`: Generate a response using the OpenAI model.
+
+- `generate_async(message: str, **kwargs) -> str`: Generate a response asynchronously.
+
+- `ask_multiple(ids: List[str], question_template: str) -> List[str]`: Query multiple IDs simultaneously.
+
+- `stream_multiple(ids: List[str], question_template: str) -> List[str]`: Stream multiple responses.
+
+**Usage Example:**
+```python
+import asyncio
+
+from swarms import OpenAI
+
+chat = OpenAI(api_key="YOUR_OPENAI_API_KEY")
+
+response = chat.generate("Hello, how can I assist you?")
+print(response)
+
+ids = ["id1", "id2", "id3"]
+async_responses = asyncio.run(chat.ask_multiple(ids, "How is {id}?"))
+print(async_responses)
+```
+
+### 2. HuggingFace (swarms.agents.models.HuggingFaceLLM)
+
+The HuggingFaceLLM class allows interaction with language models from Hugging Face.
+
+**Constructor:**
+```python
+HuggingFaceLLM(model_id: str, device: str = None, max_length: int = 20, quantize: bool = False, quantization_config: dict = None)
+```
+
+**Attributes:**
+
+- `model_id` (str): ID or name of the Hugging Face model.
+
+- `device` (str, optional): Device to run the model on (e.g., 'cuda', 'cpu').
+
+- `max_length` (int, default=20): Maximum length of generated text.
+
+- `quantize` (bool, default=False): Apply model quantization.
+
+- `quantization_config` (dict, optional): Configuration for quantization.
+
+**Methods:**
+
+- `generate(prompt_text: str, max_length: int = None) -> str`: Generate text based on a prompt.
+
+**Usage Example:**
+```python
+from swarms import HuggingFaceLLM
+
+model_id = "gpt2"
+hugging_face_model = HuggingFaceLLM(model_id=model_id)
+
+prompt = "Once upon a time"
+generated_text = hugging_face_model.generate(prompt)
+print(generated_text)
+```
+
+### 3. Google PaLM (swarms.agents.models.GooglePalm)
+
+The GooglePalm class provides an interface for Google's PaLM Chat API.
+
+**Constructor:**
+```python
+GooglePalm(model_name: str = "models/chat-bison-001", google_api_key: str = None, temperature: float = None, top_p: float = None, top_k: int = None, n: int = 1)
+```
+
+**Attributes:**
+
+- `model_name` (str): Name of the Google PaLM model.
+
+- `google_api_key` (str, optional): Google API key.
+
+- `temperature` (float, optional): Temperature for text generation.
+
+- `top_p` (float, optional): Top-p sampling value.
+
+- `top_k` (int, optional): Top-k sampling value.
+
+- `n` (int, default=1): Number of candidate completions.
+
+**Methods:**
+
+- `generate(messages: List[Dict[str, Any]], stop: List[str] = None, **kwargs) -> Dict[str, Any]`: Generate text based on a list of messages.
+
+- `__call__(messages: List[Dict[str, Any]], stop: List[str] = None, **kwargs) -> Dict[str, Any]`: Generate text using the call syntax.
+
+**Usage Example:**
+```python
+from swarms import GooglePalm
+
+google_palm = GooglePalm()
+messages = [
+ {"role": "system", "content": "You are a helpful assistant"},
+ {"role": "user", "content": "Tell me a joke"},
+]
+
+response = google_palm.generate(messages)
+print(response["choices"][0]["text"])
+```
+
+### 4. Anthropic (swarms.agents.models.Anthropic)
+
+The Anthropic class enables interaction with Anthropic's large language models.
+
+**Constructor:**
+```python
+Anthropic(model: str = "claude-2", max_tokens_to_sample: int = 256, temperature: float = None, top_k: int = None, top_p: float = None, streaming: bool = False, default_request_timeout: int = None)
+```
+
+**Attributes:**
+
+- `model` (str): Name of the Anthropic model.
+
+- `max_tokens_to_sample` (int, default=256): Maximum tokens to sample.
+
+- `temperature` (float, optional): Temperature for text generation.
+
+- `top_k` (int, optional): Top-k sampling value.
+
+- `top_p` (float, optional): Top-p sampling value.
+
+- `streaming` (bool, default=False): Enable streaming mode.
+
+- `default_request_timeout` (int, optional): Default request timeout.
+
+**Methods:**
+
+- `generate(prompt: str, stop: List[str] = None) -> str`: Generate text based on a prompt.
+
+**Usage Example:**
+```python
+from swarms import Anthropic
+
+anthropic = Anthropic()
+prompt = "Once upon a time"
+generated_text = anthropic.generate(prompt)
+print(generated_text)
+```
+
+This concludes the documentation for the "models" folder, providing you with tools to seamlessly integrate with various language models and APIs. Happy coding!
\ No newline at end of file
diff --git a/docs/swarms/models/kosmos.md b/docs/swarms/models/kosmos.md
new file mode 100644
index 00000000..a19ea791
--- /dev/null
+++ b/docs/swarms/models/kosmos.md
@@ -0,0 +1,217 @@
+# `Kosmos` Documentation
+
+## Introduction
+
+Welcome to the documentation for Kosmos, a powerful multimodal AI model that can perform various tasks, including multimodal grounding, referring expression comprehension, referring expression generation, grounded visual question answering (VQA), and grounded image captioning. Kosmos is based on the ydshieh/kosmos-2-patch14-224 model and is designed to process both text and images to provide meaningful outputs. In this documentation, you will find a detailed explanation of the Kosmos class, its functions, parameters, and usage examples.
+
+## Overview
+
+Kosmos is a state-of-the-art multimodal AI model that combines the power of natural language understanding with image analysis. It can perform several tasks that involve processing both textual prompts and images to provide informative responses. Whether you need to find objects in an image, understand referring expressions, generate descriptions, answer questions, or create captions, Kosmos has you covered.
+
+## Class Definition
+
+```python
+class Kosmos:
+ def __init__(self, model_name="ydshieh/kosmos-2-patch14-224"):
+```
+
+## Usage
+
+To use Kosmos, follow these steps:
+
+1. Initialize the Kosmos instance:
+
+```python
+from swarms.models.kosmos_two import Kosmos
+
+kosmos = Kosmos()
+```
+
+2. Perform Multimodal Grounding:
+
+```python
+kosmos.multimodal_grounding(
+ "Find the red apple in the image.", "https://example.com/apple.jpg"
+)
+```
+
+### Example 1 - Multimodal Grounding
+
+```python
+from swarms.models.kosmos_two import Kosmos
+
+kosmos = Kosmos()
+
+kosmos.multimodal_grounding(
+ "Find the red apple in the image.", "https://example.com/apple.jpg"
+)
+```
+
+3. Perform Referring Expression Comprehension:
+
+```python
+kosmos.referring_expression_comprehension(
+ "Show me the green bottle.", "https://example.com/bottle.jpg"
+)
+```
+
+### Example 2 - Referring Expression Comprehension
+
+```python
+from swarms.models.kosmos_two import Kosmos
+
+kosmos = Kosmos()
+
+kosmos.referring_expression_comprehension(
+ "Show me the green bottle.", "https://example.com/bottle.jpg"
+)
+```
+
+4. Generate Referring Expressions:
+
+```python
+kosmos.referring_expression_generation(
+ "It is on the table.", "https://example.com/table.jpg"
+)
+```
+
+### Example 3 - Referring Expression Generation
+
+```python
+from swarms.models.kosmos_two import Kosmos
+
+kosmos = Kosmos()
+
+kosmos.referring_expression_generation(
+ "It is on the table.", "https://example.com/table.jpg"
+)
+```
+
+5. Perform Grounded Visual Question Answering (VQA):
+
+```python
+kosmos.grounded_vqa("What is the color of the car?", "https://example.com/car.jpg")
+```
+
+### Example 4 - Grounded Visual Question Answering
+
+```python
+from swarms.models.kosmos_two import Kosmos
+
+kosmos = Kosmos()
+
+kosmos.grounded_vqa("What is the color of the car?", "https://example.com/car.jpg")
+```
+
+6. Generate Grounded Image Captions:
+
+```python
+kosmos.grounded_image_captioning("https://example.com/beach.jpg")
+```
+
+### Example 5 - Grounded Image Captioning
+
+```python
+from swarms.models.kosmos_two import Kosmos
+
+kosmos = Kosmos()
+
+kosmos.grounded_image_captioning("https://example.com/beach.jpg")
+```
+
+7. Generate Detailed Grounded Image Captions:
+
+```python
+kosmos.grounded_image_captioning_detailed("https://example.com/beach.jpg")
+```
+
+### Example 6 - Detailed Grounded Image Captioning
+
+```python
+from swarms.models.kosmos_two import Kosmos
+
+kosmos = Kosmos()
+
+kosmos.grounded_image_captioning_detailed("https://example.com/beach.jpg")
+```
+
+8. Draw Entity Boxes on Image:
+
+```python
+image = kosmos.get_image("https://example.com/image.jpg")
+entities = [
+ ("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]),
+ ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]),
+]
+kosmos.draw_entity_boxes_on_image(image, entities, show=True)
+```
+
+### Example 7 - Drawing Entity Boxes on Image
+
+```python
+from swarms.models.kosmos_two import Kosmos
+
+kosmos = Kosmos()
+
+image = kosmos.get_image("https://example.com/image.jpg")
+entities = [
+ ("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]),
+ ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]),
+]
+kosmos.draw_entity_boxes_on_image(image, entities, show=True)
+```
+
+9. Generate Boxes for Entities:
+
+```python
+entities = [
+ ("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]),
+ ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]),
+]
+image = kosmos.generate_boxes(
+ "Find the apple and the banana in the image.", "https://example.com/image.jpg"
+)
+```
+
+### Example 8 - Generating Boxes for Entities
+
+```python
+from swarms.models.kosmos_two import Kosmos
+
+kosmos = Kosmos()
+entities = [
+ ("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]),
+ ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]),
+]
+image = kosmos.generate_boxes(
+ "Find the apple and the banana in the image.", "https://example.com/image.jpg"
+)
+```
+
+## How Kosmos Works
+
+Kosmos is a multimodal AI model that combines text and image processing. It uses the ydshieh/kosmos-2-patch14-224 model for understanding and generating responses. Here's how it works:
+
+1. **Initialization**: When you create a Kosmos instance, it loads the ydshieh/kosmos-2-patch14-224 model for multimodal tasks.
+
+2. **Processing Text and Images**: Kosmos can process both text prompts and images. It takes a textual prompt and an image URL as input.
+
+3. **Task Execution**: Based on the task you specify, Kosmos generates informative responses by combining natural language understanding with image analysis.
+
+4. **Drawing Entity Boxes**: You can use the `draw_entity_boxes_on_image` method to draw bounding boxes around entities in an image.
+
+5. **Generating Boxes for Entities**: The `generate_boxes` method allows you to generate bounding boxes for entities mentioned in a prompt.
+
+## Parameters
+
+- `model_name`: The name or path of the Kosmos model to be used. By default, it uses the ydshieh/kosmos-2-patch14-224 model.
+
+## Additional Information
+
+- Kosmos can handle various multimodal tasks, making it a versatile tool for understanding and generating content.
+- You can provide image URLs for image-based tasks, and Kosmos will automatically retrieve and process the images.
+- The `draw_entity_boxes_on_image` method is useful for visualizing the results of multimodal grounding tasks.
+- The `generate_boxes` method is handy for generating bounding boxes around entities mentioned in a textual prompt.
+
+That concludes the documentation for Kosmos. We hope you find this multimodal AI model valuable for your projects. If you have any questions or encounter any issues, please refer to the Kosmos documentation for
+further assistance. Enjoy working with Kosmos!
diff --git a/docs/swarms/models/langchain.md b/docs/swarms/models/langchain.md
new file mode 100644
index 00000000..e69de29b
diff --git a/docs/swarms/models/layoutlm_document_qa.md b/docs/swarms/models/layoutlm_document_qa.md
new file mode 100644
index 00000000..4c6169d0
--- /dev/null
+++ b/docs/swarms/models/layoutlm_document_qa.md
@@ -0,0 +1,88 @@
+# `LayoutLMDocumentQA` Documentation
+
+## Introduction
+
+Welcome to the documentation for LayoutLMDocumentQA, a multimodal model designed for visual question answering (QA) on real-world documents, such as invoices, PDFs, and more. This comprehensive documentation will provide you with a deep understanding of the LayoutLMDocumentQA class, its architecture, usage, and examples.
+
+## Overview
+
+LayoutLMDocumentQA is a versatile model that combines layout-based understanding of documents with natural language processing to answer questions about the content of documents. It is particularly useful for automating tasks like invoice processing, extracting information from PDFs, and handling various document-based QA scenarios.
+
+## Class Definition
+
+```python
+class LayoutLMDocumentQA(AbstractModel):
+ def __init__(
+ self,
+ model_name: str = "impira/layoutlm-document-qa",
+ task: str = "document-question-answering",
+ ):
+```
+
+## Purpose
+
+The LayoutLMDocumentQA class serves the following primary purposes:
+
+1. **Document QA**: LayoutLMDocumentQA is specifically designed for document-based question answering. It can process both the textual content and the layout of a document to answer questions.
+
+2. **Multimodal Understanding**: It combines natural language understanding with document layout analysis, making it suitable for documents with complex structures.
+
+## Parameters
+
+- `model_name` (str): The name or path of the pretrained LayoutLMDocumentQA model. Default: "impira/layoutlm-document-qa".
+- `task` (str): The specific task for which the model will be used. Default: "document-question-answering".
+
+## Usage
+
+To use LayoutLMDocumentQA, follow these steps:
+
+1. Initialize the LayoutLMDocumentQA instance:
+
+```python
+from swarms.models import LayoutLMDocumentQA
+
+layout_lm_doc_qa = LayoutLMDocumentQA()
+```
+
+### Example 1 - Initialization
+
+```python
+layout_lm_doc_qa = LayoutLMDocumentQA()
+```
+
+2. Ask a question about a document and provide the document's image path:
+
+```python
+question = "What is the total amount?"
+image_path = "path/to/document_image.png"
+answer = layout_lm_doc_qa(question, image_path)
+```
+
+### Example 2 - Document QA
+
+```python
+layout_lm_doc_qa = LayoutLMDocumentQA()
+question = "What is the total amount?"
+image_path = "path/to/document_image.png"
+answer = layout_lm_doc_qa(question, image_path)
+```
+
+## How LayoutLMDocumentQA Works
+
+LayoutLMDocumentQA employs a multimodal approach to document QA. Here's how it works:
+
+1. **Initialization**: When you create a LayoutLMDocumentQA instance, you can specify the model to use and the task, which is "document-question-answering" by default.
+
+2. **Question and Document**: You provide a question about the document and the image path of the document to the LayoutLMDocumentQA instance.
+
+3. **Multimodal Processing**: LayoutLMDocumentQA processes both the question and the document image. It combines layout-based analysis with natural language understanding.
+
+4. **Answer Generation**: The model generates an answer to the question based on its analysis of the document layout and content.
+
+## Additional Information
+
+- LayoutLMDocumentQA uses the "impira/layoutlm-document-qa" pretrained model, which is specifically designed for document-based question answering.
+- You can adapt this model to various document QA scenarios by changing the task and providing relevant questions and documents.
+- This model is particularly useful for automating document-based tasks and extracting valuable information from structured documents.
+
+That concludes the documentation for LayoutLMDocumentQA. We hope you find this tool valuable for your document-based question answering needs. If you have any questions or encounter any issues, please refer to the LayoutLMDocumentQA documentation for further assistance. Enjoy using LayoutLMDocumentQA!
\ No newline at end of file
diff --git a/docs/swarms/models/llama3.md b/docs/swarms/models/llama3.md
new file mode 100644
index 00000000..4ae0f1ef
--- /dev/null
+++ b/docs/swarms/models/llama3.md
@@ -0,0 +1,96 @@
+## Llava3
+
+
+```python
+from transformers import AutoTokenizer, AutoModelForCausalLM
+import torch
+from swarms.models.base_llm import BaseLLM
+
+
+class Llama3(BaseLLM):
+ """
+ Llama3 class represents a Llama model for natural language generation.
+
+ Args:
+ model_id (str): The ID of the Llama model to use.
+ system_prompt (str): The system prompt to use for generating responses.
+ temperature (float): The temperature value for controlling the randomness of the generated responses.
+ top_p (float): The top-p value for controlling the diversity of the generated responses.
+ max_tokens (int): The maximum number of tokens to generate in the response.
+ **kwargs: Additional keyword arguments.
+
+ Attributes:
+ model_id (str): The ID of the Llama model being used.
+ system_prompt (str): The system prompt for generating responses.
+ temperature (float): The temperature value for generating responses.
+ top_p (float): The top-p value for generating responses.
+ max_tokens (int): The maximum number of tokens to generate in the response.
+ tokenizer (AutoTokenizer): The tokenizer for the Llama model.
+ model (AutoModelForCausalLM): The Llama model for generating responses.
+
+ Methods:
+ run(task, *args, **kwargs): Generates a response for the given task.
+
+ """
+
+ def __init__(
+ self,
+ model_id="meta-llama/Meta-Llama-3-8B-Instruct",
+ system_prompt: str = None,
+ temperature: float = 0.6,
+ top_p: float = 0.9,
+ max_tokens: int = 4000,
+ **kwargs,
+ ):
+ self.model_id = model_id
+ self.system_prompt = system_prompt
+ self.temperature = temperature
+ self.top_p = top_p
+ self.max_tokens = max_tokens
+ self.tokenizer = AutoTokenizer.from_pretrained(model_id)
+ self.model = AutoModelForCausalLM.from_pretrained(
+ model_id,
+ torch_dtype=torch.bfloat16,
+ device_map="auto",
+ )
+
+ def run(self, task: str, *args, **kwargs):
+ """
+ Generates a response for the given task.
+
+ Args:
+ task (str): The user's task or input.
+
+ Returns:
+ str: The generated response.
+
+ """
+ messages = [
+ {"role": "system", "content": self.system_prompt},
+ {"role": "user", "content": task},
+ ]
+
+ input_ids = self.tokenizer.apply_chat_template(
+ messages, add_generation_prompt=True, return_tensors="pt"
+ ).to(self.model.device)
+
+ terminators = [
+ self.tokenizer.eos_token_id,
+ self.tokenizer.convert_tokens_to_ids("<|eot_id|>"),
+ ]
+
+ outputs = self.model.generate(
+ input_ids,
+ max_new_tokens=self.max_tokens,
+ eos_token_id=terminators,
+ do_sample=True,
+ temperature=self.temperature,
+ top_p=self.top_p,
+ *args,
+ **kwargs,
+ )
+ response = outputs[0][input_ids.shape[-1] :]
+ return self.tokenizer.decode(
+ response, skip_special_tokens=True
+ )
+```
\ No newline at end of file
diff --git a/docs/swarms/models/models_available_overview.md b/docs/swarms/models/models_available_overview.md
new file mode 100644
index 00000000..db2c9bdd
--- /dev/null
+++ b/docs/swarms/models/models_available_overview.md
@@ -0,0 +1,306 @@
+## The Swarms Framework: A Comprehensive Guide to Model APIs and Usage
+
+### Introduction
+
+The Swarms framework is a versatile and robust tool designed to streamline the integration and orchestration of multiple AI models, making it easier for developers to build sophisticated multi-agent systems. This blog aims to provide a detailed guide on using the Swarms framework, covering the various models it supports, common methods, settings, and practical examples.
+
+### Overview of the Swarms Framework
+
+Swarms is a "framework of frameworks" that allows seamless integration of various AI models, including those from OpenAI, Anthropic, Hugging Face, Azure, and more. This flexibility enables users to leverage the strengths of different models within a single application. The framework provides a unified interface for model interaction, simplifying the process of integrating and managing multiple AI models.
+
+### Getting Started with Swarms
+
+To get started with Swarms, you need to install the framework and set up the necessary environment variables. Here's a step-by-step guide:
+
+#### Installation
+
+You can install the Swarms framework using pip:
+
+```bash
+pip install swarms
+```
+
+#### Setting Up Environment Variables
+
+Swarms relies on environment variables to manage API keys and other configurations. You can use the `dotenv` package to load these variables from a `.env` file.
+
+```bash
+pip install python-dotenv
+```
+
+Create a `.env` file in your project directory and add your API keys and other settings:
+
+```env
+OPENAI_API_KEY=your_openai_api_key
+ANTHROPIC_API_KEY=your_anthropic_api_key
+AZURE_OPENAI_ENDPOINT=your_azure_openai_endpoint
+AZURE_OPENAI_DEPLOYMENT=your_azure_openai_deployment
+OPENAI_API_VERSION=your_openai_api_version
+AZURE_OPENAI_API_KEY=your_azure_openai_api_key
+AZURE_OPENAI_AD_TOKEN=your_azure_openai_ad_token
+```
+
+### Using the Swarms Framework
+
+Swarms supports a variety of models from different providers. Here are some examples of how to use these models within the Swarms framework.
+
+#### Using the Anthropic Model
+
+The Anthropic model is one of the many models supported by Swarms. Here's how you can use it:
+
+```python
+import os
+from swarms.models import Anthropic
+
+# Load the environment variables
+anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
+
+# Create an instance of the Anthropic model
+model = Anthropic(anthropic_api_key=anthropic_api_key)
+
+# Define the task
+task = "What is quantum field theory? What are 3 books on the field?"
+
+# Generate a response
+response = model(task)
+
+# Print the response
+print(response)
+```
+
+#### Using the HuggingfaceLLM Model
+
+HuggingfaceLLM allows you to use models from Hugging Face's vast repository. Here's an example:
+
+```python
+from swarms.models import HuggingfaceLLM
+
+# Define the model ID
+model_id = "NousResearch/Yarn-Mistral-7b-128k"
+
+# Create an instance of the HuggingfaceLLM model
+inference = HuggingfaceLLM(model_id=model_id)
+
+# Define the task
+task = "Once upon a time"
+
+# Generate a response
+generated_text = inference(task)
+print(generated_text)
+```
+
+
+
+#### Using the OpenAIChat Model
+
+The OpenAIChat model is designed for conversational tasks. Here's how to use it:
+
+```python
+import os
+from swarms.models import OpenAIChat
+
+# Load the environment variables
+openai_api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat model
+openai = OpenAIChat(openai_api_key=openai_api_key, verbose=False)
+
+# Define the task
+chat = openai("What are quantum fields?")
+print(chat)
+```
+
+#### Using the TogetherLLM Model
+
+TogetherLLM supports models from the Together ecosystem. Here's an example:
+
+```python
+from swarms import TogetherLLM
+
+# Initialize the model with your parameters
+model = TogetherLLM(
+ model_name="mistralai/Mixtral-8x7B-Instruct-v0.1",
+ max_tokens=1000,
+ together_api_key="your_together_api_key",
+)
+
+# Run the model
+response = model.run("Generate a blog post about the best way to make money online.")
+print(response)
+```
+
+#### Using the Azure OpenAI Model
+
+The Azure OpenAI model is another powerful tool that can be integrated with Swarms. Here's how to use it:
+
+```python
+import os
+from dotenv import load_dotenv
+from swarms import AzureOpenAI
+
+# Load the environment variables
+load_dotenv()
+
+# Create an instance of the AzureOpenAI class
+model = AzureOpenAI(
+ azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
+ deployment_name=os.getenv("AZURE_OPENAI_DEPLOYMENT"),
+ openai_api_version=os.getenv("OPENAI_API_VERSION"),
+ openai_api_key=os.getenv("AZURE_OPENAI_API_KEY"),
+ azure_ad_token=os.getenv("AZURE_OPENAI_AD_TOKEN"),
+)
+
+# Define the prompt
+prompt = (
+ "Analyze this load document and assess it for any risks and"
+ " create a table in markdown format."
+)
+
+# Generate a response
+response = model(prompt)
+print(response)
+```
+
+
+#### Using the GPT4VisionAPI Model
+
+The GPT4VisionAPI model can analyze images and provide detailed insights. Here's how to use it:
+
+```python
+import os
+from dotenv import load_dotenv
+from swarms import GPT4VisionAPI
+
+# Load the environment variables
+load_dotenv()
+
+# Get the API key from the environment variables
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the GPT4VisionAPI class
+gpt4vision = GPT4VisionAPI(
+ openai_api_key=api_key,
+ model_name="gpt-4o",
+ max_tokens=1000,
+ openai_proxy="https://api.openai.com/v1/chat/completions",
+)
+
+# Define the URL of the image to analyze
+img = "ear.png"
+
+# Define the task to perform on the image
+task = "What is this image"
+
+# Run the GPT4VisionAPI on the image with the specified task
+answer = gpt4vision.run(task, img, return_json=True)
+
+# Print the answer
+print(answer)
+```
+
+#### Using the QwenVLMultiModal Model
+
+The QwenVLMultiModal model is designed for multi-modal tasks, such as processing both text and images. Here's an example of how to use it:
+
+```python
+from swarms import QwenVLMultiModal
+
+# Instantiate the QwenVLMultiModal model
+model = QwenVLMultiModal(
+ model_name="Qwen/Qwen-VL-Chat",
+ device="cuda",
+ quantize=True,
+)
+
+# Run the model
+response = model("Hello, how are you?", "https://example.com/image.jpg")
+
+# Print the response
+print(response)
+```
+
+
+### Common Methods in Swarms
+
+Swarms provides several common methods that are useful across different models. One of the most frequently used methods is `__call__`.
+
+#### The `__call__` Method
+
+The `__call__` method is used to run the model on a given task. Here is a generic example:
+
+```python
+# Assuming `model` is an instance of any supported model
+task = "Explain the theory of relativity."
+response = model(task)
+print(response)
+```
+
+This method abstracts the complexity of interacting with different model APIs, providing a consistent interface for executing tasks.
+
+### Common Settings in Swarms
+
+Swarms allows you to configure various settings to customize the behavior of the models. Here are some common settings:
+
+#### API Keys
+
+API keys are essential for authenticating and accessing the models. These keys are typically set through environment variables:
+
+```python
+import os
+
+# Set API keys as environment variables
+os.environ['OPENAI_API_KEY'] = 'your_openai_api_key'
+os.environ['ANTHROPIC_API_KEY'] = 'your_anthropic_api_key'
+```
+
+#### Model-Specific Settings
+
+Different models may have specific settings that need to be configured. For example, the `AzureOpenAI` model requires several settings related to the Azure environment:
+
+```python
+model = AzureOpenAI(
+ azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
+ deployment_name=os.getenv("AZURE_OPENAI_DEPLOYMENT"),
+ openai_api_version=os.getenv("OPENAI_API_VERSION"),
+ openai_api_key=os.getenv("AZURE_OPENAI_API_KEY"),
+ azure_ad_token=os.getenv("AZURE_OPENAI_AD_TOKEN"),
+)
+```
+
+### Advanced Usage and Best Practices
+
+To make the most out of the Swarms framework, consider the following best practices:
+
+#### Extensive Logging
+
+Use logging to monitor the behavior and performance of your models. The `loguru` library is recommended for its simplicity and flexibility:
+
+```python
+from loguru import logger
+
+logger.add("file.log", rotation="10 MB")
+
+# Log model interactions
+logger.info("Running task on Anthropic model")
+response = model(task)
+logger.info(f"Response: {response}")
+```
+
+#### Error Handling
+
+Implement robust error handling to manage API failures and other issues gracefully:
+
+```python
+try:
+ response = model(task)
+except Exception as e:
+ logger.error(f"Error running task: {e}")
+ response = "An error occurred while processing your request."
+print(response)
+```
+
+### Conclusion
+
+The Swarms framework provides a powerful and flexible way to integrate and manage multiple AI models within a single application. By following the guidelines and examples provided in this blog, you can leverage Swarms to build sophisticated, multi-agent systems with ease. Whether you're using models from OpenAI, Anthropic, Azure, or Hugging Face,
+
+Swarms offers a unified interface that simplifies the process of model orchestration and execution.
\ No newline at end of file
diff --git a/docs/swarms/models/nougat.md b/docs/swarms/models/nougat.md
new file mode 100644
index 00000000..217990a1
--- /dev/null
+++ b/docs/swarms/models/nougat.md
@@ -0,0 +1,118 @@
+# `Nougat` Documentation
+
+## Introduction
+
+Welcome to the documentation for Nougat, a versatile model designed by Meta for transcribing scientific PDFs into user-friendly Markdown format, extracting information from PDFs, and extracting metadata from PDF documents. This documentation will provide you with a deep understanding of the Nougat class, its architecture, usage, and examples.
+
+## Overview
+
+Nougat is a powerful tool that combines language modeling and image processing capabilities to convert scientific PDF documents into Markdown format. It is particularly useful for researchers, students, and professionals who need to extract valuable information from PDFs quickly. With Nougat, you can simplify complex PDFs, making their content more accessible and easy to work with.
+
+## Class Definition
+
+```python
+class Nougat:
+ def __init__(
+ self,
+ model_name_or_path="facebook/nougat-base",
+ min_length: int = 1,
+ max_new_tokens: int = 30,
+ ):
+```
+
+## Purpose
+
+The Nougat class serves the following primary purposes:
+
+1. **PDF Transcription**: Nougat is designed to transcribe scientific PDFs into Markdown format. It helps convert complex PDF documents into a more readable and structured format, making it easier to extract information.
+
+2. **Information Extraction**: It allows users to extract valuable information and content from PDFs efficiently. This can be particularly useful for researchers and professionals who need to extract data, figures, or text from scientific papers.
+
+3. **Metadata Extraction**: Nougat can also extract metadata from PDF documents, providing essential details about the document, such as title, author, and publication date.
+
+## Parameters
+
+- `model_name_or_path` (str): The name or path of the pretrained Nougat model. Default: "facebook/nougat-base".
+- `min_length` (int): The minimum length of the generated transcription. Default: 1.
+- `max_new_tokens` (int): The maximum number of new tokens to generate in the Markdown transcription. Default: 30.
+
+## Usage
+
+To use Nougat, follow these steps:
+
+1. Initialize the Nougat instance:
+
+```python
+from swarms.models import Nougat
+
+nougat = Nougat()
+```
+
+### Example 1 - Initialization
+
+```python
+nougat = Nougat()
+```
+
+2. Transcribe a PDF image using Nougat:
+
+```python
+markdown_transcription = nougat("path/to/pdf_file.png")
+```
+
+### Example 2 - PDF Transcription
+
+```python
+nougat = Nougat()
+markdown_transcription = nougat("path/to/pdf_file.png")
+```
+
+3. Extract information from a PDF:
+
+```python
+information = nougat.extract_information("path/to/pdf_file.png")
+```
+
+### Example 3 - Information Extraction
+
+```python
+nougat = Nougat()
+information = nougat.extract_information("path/to/pdf_file.png")
+```
+
+4. Extract metadata from a PDF:
+
+```python
+metadata = nougat.extract_metadata("path/to/pdf_file.png")
+```
+
+### Example 4 - Metadata Extraction
+
+```python
+nougat = Nougat()
+metadata = nougat.extract_metadata("path/to/pdf_file.png")
+```
+
+## How Nougat Works
+
+Nougat employs a vision encoder-decoder model, along with a dedicated processor, to transcribe PDFs into Markdown format and perform information and metadata extraction. Here's how it works:
+
+1. **Initialization**: When you create a Nougat instance, you can specify the model to use, the minimum transcription length, and the maximum number of new tokens to generate.
+
+2. **Processing PDFs**: Nougat can process PDFs as input. You can provide the path to a PDF document.
+
+3. **Image Processing**: The processor converts PDF pages into images, which are then encoded by the model.
+
+4. **Transcription**: Nougat generates Markdown transcriptions of PDF content, ensuring a minimum length and respecting the token limit.
+
+5. **Information Extraction**: Information extraction involves parsing the Markdown transcription to identify key details or content of interest.
+
+6. **Metadata Extraction**: Metadata extraction involves identifying and extracting document metadata, such as title, author, and publication date.
+
+## Additional Information
+
+- Nougat leverages the "facebook/nougat-base" pretrained model, which is specifically designed for document transcription and extraction tasks.
+- You can adjust the minimum transcription length and the maximum number of new tokens to control the output's length and quality.
+- Nougat can be run on both CPU and GPU devices.
+
+That concludes the documentation for Nougat. We hope you find this tool valuable for your PDF transcription, information extraction, and metadata extraction needs. If you have any questions or encounter any issues, please refer to the Nougat documentation for further assistance. Enjoy using Nougat!
\ No newline at end of file
diff --git a/docs/swarms/models/openai.md b/docs/swarms/models/openai.md
new file mode 100644
index 00000000..ae547631
--- /dev/null
+++ b/docs/swarms/models/openai.md
@@ -0,0 +1,200 @@
+# `BaseOpenAI` and `OpenAI` Documentation
+
+## Table of Contents
+
+1. [Overview](#overview)
+2. [Class Architecture](#class-architecture)
+3. [Purpose](#purpose)
+4. [Class Attributes](#class-attributes)
+5. [Methods](#methods)
+ - [Construction](#construction)
+ - [Configuration](#configuration)
+ - [Tokenization](#tokenization)
+ - [Generation](#generation)
+ - [Asynchronous Generation](#asynchronous-generation)
+6. [Usage Examples](#usage-examples)
+ - [Creating an OpenAI Object](#creating-an-openai-object)
+ - [Generating Text](#generating-text)
+ - [Advanced Configuration](#advanced-configuration)
+
+---
+
+## 1. Overview
+
+The `BaseOpenAI` and `OpenAI` classes are part of the LangChain library, designed to interact with OpenAI's large language models (LLMs). These classes provide a seamless interface for utilizing OpenAI's API to generate natural language text.
+
+## 2. Class Architecture
+
+Both `BaseOpenAI` and `OpenAI` classes inherit from `BaseLLM`, demonstrating an inheritance-based architecture. This architecture allows for easy extensibility and customization while adhering to the principles of object-oriented programming.
+
+## 3. Purpose
+
+The purpose of these classes is to simplify the interaction with OpenAI's LLMs. They encapsulate API calls, handle tokenization, and provide a high-level interface for generating text. By instantiating an object of the `OpenAI` class, developers can quickly leverage the power of OpenAI's models to generate text for various applications, such as chatbots, content generation, and more.
+
+## 4. Class Attributes
+
+Here are the key attributes and their descriptions for the `BaseOpenAI` and `OpenAI` classes:
+
+| Attribute | Description |
+|---------------------------|-------------|
+| `lc_secrets` | A dictionary of secrets required for LangChain, including the OpenAI API key. |
+| `lc_attributes` | A dictionary of attributes relevant to LangChain. |
+| `is_lc_serializable()` | A method indicating if the class is serializable for LangChain. |
+| `model_name` | The name of the language model to use. |
+| `temperature` | The sampling temperature for text generation. |
+| `max_tokens` | The maximum number of tokens to generate in a completion. |
+| `top_p` | The total probability mass of tokens to consider at each step. |
+| `frequency_penalty` | Penalizes repeated tokens according to frequency. |
+| `presence_penalty` | Penalizes repeated tokens. |
+| `n` | How many completions to generate for each prompt. |
+| `best_of` | Generates `best_of` completions server-side and returns the "best." |
+| `model_kwargs` | Holds any model parameters valid for `create` calls not explicitly specified. |
+| `openai_api_key` | The OpenAI API key used for authentication. |
+| `openai_api_base` | The base URL for the OpenAI API. |
+| `openai_organization` | The OpenAI organization name, if applicable. |
+| `openai_proxy` | An explicit proxy URL for OpenAI requests. |
+| `batch_size` | The batch size to use when passing multiple documents for generation. |
+| `request_timeout` | The timeout for requests to the OpenAI completion API. |
+| `logit_bias` | Adjustment to the probability of specific tokens being generated. |
+| `max_retries` | The maximum number of retries to make when generating. |
+| `streaming` | Whether to stream the results or not. |
+| `allowed_special` | A set of special tokens that are allowed. |
+| `disallowed_special` | A collection of special tokens that are not allowed. |
+| `tiktoken_model_name` | The model name to pass to `tiktoken` for token counting. |
+
+## 5. Methods
+
+### 5.1 Construction
+
+#### 5.1.1 `__new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]`
+- Description: Initializes the OpenAI object.
+- Arguments:
+ - `cls` (class): The class instance.
+ - `data` (dict): Additional data for initialization.
+- Returns:
+ - Union[OpenAIChat, BaseOpenAI]: An instance of the OpenAI class.
+
+### 5.2 Configuration
+
+#### 5.2.1 `build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]`
+- Description: Builds extra kwargs from additional params passed in.
+- Arguments:
+ - `cls` (class): The class instance.
+ - `values` (dict): Values and parameters to build extra kwargs.
+- Returns:
+ - Dict[str, Any]: A dictionary of built extra kwargs.
+
+#### 5.2.2 `validate_environment(cls, values: Dict) -> Dict`
+- Description: Validates that the API key and python package exist in the environment.
+- Arguments:
+ - `values` (dict): The class values and parameters.
+- Returns:
+ - Dict: A dictionary of validated values.
+
+### 5.3 Tokenization
+
+#### 5.3.1 `get_sub_prompts(self, params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None) -> List[List[str]]`
+- Description: Gets sub-prompts for LLM call.
+- Arguments:
+ - `params` (dict): Parameters for LLM call.
+ - `prompts` (list): List of prompts.
+ - `stop` (list, optional): List of stop words.
+- Returns:
+ - List[List[str]]: List of sub-prompts.
+
+#### 5.3.2 `get_token_ids(self, text: str) -> List[int]`
+- Description: Gets token IDs using the `tiktoken` package.
+- Arguments:
+ - `text` (str): The text for which to calculate token IDs.
+- Returns:
+ - List[int]: A list of token IDs.
+
+#### 5.3.3 `modelname_to_contextsize(modelname: str) -> int`
+- Description: Calculates the maximum number of tokens possible to generate for a model.
+- Arguments:
+ - `modelname` (str): The model name to determine the context size for.
+- Returns:
+ - int: The maximum context size.
+
+#### 5.3.4 `max_tokens_for_prompt(self, prompt: str) -> int`
+- Description: Calculates the maximum number of tokens possible to generate for a prompt.
+- Arguments:
+ - `prompt` (str): The prompt for which to
+
+ determine the maximum token limit.
+- Returns:
+ - int: The maximum token limit.
+
+### 5.4 Generation
+
+#### 5.4.1 `generate(self, text: Union[str, List[str]], **kwargs) -> Union[str, List[str]]`
+- Description: Generates text using the OpenAI API.
+- Arguments:
+ - `text` (str or list): The input text or list of inputs.
+ - `**kwargs` (dict): Additional parameters for the generation process.
+- Returns:
+ - Union[str, List[str]]: The generated text or list of generated texts.
+
+### 5.5 Asynchronous Generation
+
+#### 5.5.1 `generate_async(self, text: Union[str, List[str]], **kwargs) -> Union[str, List[str]]`
+- Description: Generates text asynchronously using the OpenAI API.
+- Arguments:
+ - `text` (str or list): The input text or list of inputs.
+ - `**kwargs` (dict): Additional parameters for the asynchronous generation process.
+- Returns:
+ - Union[str, List[str]]: The generated text or list of generated texts.
+
+## 6. Usage Examples
+
+### 6.1 Creating an OpenAI Object
+
+```python
+# Import the OpenAI class
+from swarms.models import OpenAI
+
+# Set your OpenAI API key
+api_key = "YOUR_API_KEY"
+
+# Create an OpenAI object
+openai = OpenAI(api_key)
+```
+
+### 6.2 Generating Text
+
+```python
+# Generate text from a single prompt
+prompt = "Translate the following English text to French: 'Hello, how are you?'"
+generated_text = openai.generate(prompt, max_tokens=50)
+
+# Generate text from multiple prompts
+prompts = [
+ "Translate this: 'Good morning' to Spanish.",
+ "Summarize the following article:",
+ article_text,
+]
+generated_texts = openai.generate(prompts, max_tokens=100)
+
+# Generate text asynchronously
+async_prompt = "Translate 'Thank you' into German."
+async_result = openai.generate_async(async_prompt, max_tokens=30)
+
+# Access the result of an asynchronous generation
+async_result_text = async_result.get()
+```
+
+### 6.3 Advanced Configuration
+
+```python
+# Configure generation with advanced options
+custom_options = {
+ "temperature": 0.7,
+ "max_tokens": 100,
+ "top_p": 0.9,
+ "frequency_penalty": 0.2,
+ "presence_penalty": 0.4,
+}
+generated_text = openai.generate(prompt, **custom_options)
+```
+
+This documentation provides a comprehensive understanding of the `BaseOpenAI` and `OpenAI` classes, their attributes, methods, and usage examples. Developers can utilize these classes to interact with OpenAI's language models efficiently, enabling various natural language generation tasks.
\ No newline at end of file
diff --git a/docs/swarms/models/openai_chat.md b/docs/swarms/models/openai_chat.md
new file mode 100644
index 00000000..d7d9b2eb
--- /dev/null
+++ b/docs/swarms/models/openai_chat.md
@@ -0,0 +1,185 @@
+# `OpenAIChat` Documentation
+
+## Table of Contents
+
+1. [Introduction](#introduction)
+2. [Class Overview](#class-overview)
+3. [Class Architecture](#class-architecture)
+4. [Class Attributes](#class-attributes)
+5. [Methods](#methods)
+ - [Construction](#construction)
+ - [Configuration](#configuration)
+ - [Message Handling](#message-handling)
+ - [Generation](#generation)
+ - [Tokenization](#tokenization)
+6. [Usage Examples](#usage-examples)
+7. [Additional Information](#additional-information)
+
+---
+
+## 1. Introduction
+
+The `OpenAIChat` class is part of the LangChain library and serves as an interface to interact with OpenAI's Chat large language models. This documentation provides an in-depth understanding of the class, its attributes, methods, and usage examples.
+
+## 2. Class Overview
+
+The `OpenAIChat` class is designed for conducting chat-like conversations with OpenAI's language models, such as GPT-3.5 Turbo. It allows you to create interactive conversations by sending messages and receiving model-generated responses. This class simplifies the process of integrating OpenAI's models into chatbot applications and other natural language processing tasks.
+
+## 3. Class Architecture
+
+The `OpenAIChat` class is built on top of the `BaseLLM` class, which provides a foundation for working with large language models. This inheritance-based architecture allows for customization and extension while adhering to object-oriented programming principles.
+
+## 4. Class Attributes
+
+Here are the key attributes and their descriptions for the `OpenAIChat` class:
+
+| Attribute | Description |
+|-----------------------------|-------------------------------------------------------------------------------|
+| `client` | An internal client for making API calls to OpenAI. |
+| `model_name` | The name of the language model to use (default: "gpt-3.5-turbo"). |
+| `model_kwargs` | Additional model parameters valid for `create` calls not explicitly specified.|
+| `openai_api_key` | The OpenAI API key used for authentication. |
+| `openai_api_base` | The base URL for the OpenAI API. |
+| `openai_proxy` | An explicit proxy URL for OpenAI requests. |
+| `max_retries` | The maximum number of retries to make when generating (default: 6). |
+| `prefix_messages` | A list of messages to set the initial conversation state (default: []). |
+| `streaming` | Whether to stream the results or not (default: False). |
+| `allowed_special` | A set of special tokens that are allowed (default: an empty set). |
+| `disallowed_special` | A collection of special tokens that are not allowed (default: "all"). |
+
+## 5. Methods
+
+### 5.1 Construction
+
+#### 5.1.1 `__init__(self, model_name: str = "gpt-3.5-turbo", openai_api_key: Optional[str] = None, openai_api_base: Optional[str] = None, openai_proxy: Optional[str] = None, max_retries: int = 6, prefix_messages: List = [])`
+- Description: Initializes an OpenAIChat object.
+- Arguments:
+ - `model_name` (str): The name of the language model to use (default: "gpt-3.5-turbo").
+ - `openai_api_key` (str, optional): The OpenAI API key used for authentication.
+ - `openai_api_base` (str, optional): The base URL for the OpenAI API.
+ - `openai_proxy` (str, optional): An explicit proxy URL for OpenAI requests.
+ - `max_retries` (int): The maximum number of retries to make when generating (default: 6).
+ - `prefix_messages` (List): A list of messages to set the initial conversation state (default: []).
+
+### 5.2 Configuration
+
+#### 5.2.1 `build_extra(self, values: Dict[str, Any]) -> Dict[str, Any]`
+- Description: Builds extra kwargs from additional parameters passed in.
+- Arguments:
+ - `values` (dict): Values and parameters to build extra kwargs.
+- Returns:
+ - Dict[str, Any]: A dictionary of built extra kwargs.
+
+#### 5.2.2 `validate_environment(self, values: Dict) -> Dict`
+- Description: Validates that the API key and Python package exist in the environment.
+- Arguments:
+ - `values` (dict): The class values and parameters.
+- Returns:
+ - Dict: A dictionary of validated values.
+
+### 5.3 Message Handling
+
+#### 5.3.1 `_get_chat_params(self, prompts: List[str], stop: Optional[List[str]] = None) -> Tuple`
+- Description: Gets chat-related parameters for generating responses.
+- Arguments:
+ - `prompts` (list): List of user messages.
+ - `stop` (list, optional): List of stop words.
+- Returns:
+ - Tuple: Messages and parameters.
+
+### 5.4 Generation
+
+#### 5.4.1 `_stream(self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any) -> Iterator[GenerationChunk]`
+- Description: Generates text asynchronously using the OpenAI API.
+- Arguments:
+ - `prompt` (str): The user's message.
+ - `stop` (list, optional): List of stop words.
+ - `run_manager` (optional): Callback manager for asynchronous generation.
+ - `**kwargs` (dict): Additional parameters for asynchronous generation.
+- Returns:
+ - Iterator[GenerationChunk]: An iterator of generated text chunks.
+
+#### 5.4.2 `_agenerate(self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any) -> LLMResult`
+- Description: Generates text asynchronously using the OpenAI API (async version).
+- Arguments:
+ - `prompts` (list): List of user messages.
+ - `stop` (list, optional): List of stop words.
+ - `run_manager` (optional): Callback manager for asynchronous generation.
+ - `**kwargs` (dict): Additional parameters for asynchronous generation.
+- Returns:
+ - LLMResult: A result object containing the generated text.
+
+### 5.5 Tokenization
+
+#### 5.5.1 `get_token_ids(self, text: str) -> List[int]`
+- Description: Gets token IDs using the tiktoken package.
+- Arguments:
+ - `text` (str): The text for which to calculate token IDs.
+- Returns:
+ - List[int]: A list of
+
+ token IDs.
+
+## 6. Usage Examples
+
+### Example 1: Initializing `OpenAIChat`
+
+```python
+from swarms.models import OpenAIChat
+
+# Initialize OpenAIChat with model name and API key
+openai_chat = OpenAIChat(model_name="gpt-3.5-turbo", openai_api_key="YOUR_API_KEY")
+```
+
+### Example 2: Sending Messages and Generating Responses
+
+```python
+# Define a conversation
+conversation = [
+ "User: Tell me a joke.",
+ "Assistant: Why did the chicken cross the road?",
+ "User: I don't know. Why?",
+ "Assistant: To get to the other side!",
+]
+
+# Set the conversation as the prefix messages
+openai_chat.prefix_messages = conversation
+
+# Generate a response
+user_message = "User: Tell me another joke."
+response = openai_chat.generate([user_message])
+
+# Print the generated response
+print(
+ response[0][0].text
+) # Output: "Assistant: Why don't scientists trust atoms? Because they make up everything!"
+```
+
+### Example 3: Asynchronous Generation
+
+```python
+import asyncio
+
+
+# Define an asynchronous function for generating responses
+async def generate_responses():
+ user_message = "User: Tell me a fun fact."
+ async for chunk in openai_chat.stream([user_message]):
+ print(chunk.text)
+
+
+# Run the asynchronous generation function
+asyncio.run(generate_responses())
+```
+
+## 7. Additional Information
+
+- To use the `OpenAIChat` class, you should have the `openai` Python package installed, and the environment variable `OPENAI_API_KEY` set with your API key.
+- Any parameters that are valid to be passed to the `openai.create` call can be passed to the `OpenAIChat` constructor.
+- You can customize the behavior of the class by setting various attributes, such as `model_name`, `openai_api_key`, `prefix_messages`, and more.
+- For asynchronous generation, you can use the `_stream` and `_agenerate` methods to interactively receive model-generated text chunks.
+- To calculate token IDs, you can use the `get_token_ids` method, which utilizes the `tiktoken` package. Make sure to install the `tiktoken` package with `pip install tiktoken` if needed.
+
+---
+
+This documentation provides a comprehensive overview of the `OpenAIChat` class, its attributes, methods, and usage examples. You can use this class to create chatbot applications, conduct conversations with language models, and explore the capabilities of OpenAI's GPT-3.5 Turbo model.
\ No newline at end of file
diff --git a/docs/swarms/models/openai_function_caller.md b/docs/swarms/models/openai_function_caller.md
new file mode 100644
index 00000000..bb952ff1
--- /dev/null
+++ b/docs/swarms/models/openai_function_caller.md
@@ -0,0 +1,238 @@
+# OpenAIFunctionCaller Documentation
+
+The `OpenAIFunctionCaller` class is designed to interface with OpenAI's chat completion API, allowing users to generate responses based on given prompts using specified models. This class encapsulates the setup and execution of API calls, including handling API keys, model parameters, and response formatting. The class extends the `BaseLLM` and utilizes OpenAI's client library to facilitate interactions.
+
+## Class Definition
+
+### OpenAIFunctionCaller
+
+A class that represents a caller for OpenAI chat completions.
+
+### Attributes
+
+| Attribute | Type | Description |
+|----------------------|-------------------|-------------------------------------------------------------------------|
+| `system_prompt` | `str` | The system prompt to be used in the chat completion. |
+| `model_name` | `str` | The name of the OpenAI model to be used. |
+| `max_tokens` | `int` | The maximum number of tokens in the generated completion. |
+| `temperature` | `float` | The temperature parameter for randomness in the completion. |
+| `base_model` | `BaseModel` | The base model to be used for the completion. |
+| `parallel_tool_calls`| `bool` | Whether to make parallel tool calls. |
+| `top_p` | `float` | The top-p parameter for nucleus sampling in the completion. |
+| `client` | `openai.OpenAI` | The OpenAI client for making API calls. |
+
+### Methods
+
+#### `check_api_key`
+
+Checks if the API key is provided and retrieves it from the environment if not.
+
+| Parameter | Type | Description |
+|---------------|--------|--------------------------------------|
+| None | | |
+
+**Returns:**
+
+| Type | Description |
+|--------|--------------------------------------|
+| `str` | The API key. |
+
+#### `run`
+
+Runs the chat completion with the given task and returns the generated completion.
+
+| Parameter | Type | Description |
+|-----------|----------|-----------------------------------------------------------------|
+| `task` | `str` | The user's task for the chat completion. |
+| `*args` | | Additional positional arguments to be passed to the OpenAI API. |
+| `**kwargs`| | Additional keyword arguments to be passed to the OpenAI API. |
+
+**Returns:**
+
+| Type | Description |
+|--------|-----------------------------------------------|
+| `str` | The generated completion. |
+
+#### `convert_to_dict_from_base_model`
+
+Converts a `BaseModel` to a dictionary.
+
+| Parameter | Type | Description |
+|-------------|------------|--------------------------------------|
+| `base_model`| `BaseModel`| The BaseModel to be converted. |
+
+**Returns:**
+
+| Type | Description |
+|--------|--------------------------------------|
+| `dict` | A dictionary representing the BaseModel.|
+
+#### `convert_list_of_base_models`
+
+Converts a list of `BaseModels` to a list of dictionaries.
+
+| Parameter | Type | Description |
+|--------------|-----------------|--------------------------------------|
+| `base_models`| `List[BaseModel]`| A list of BaseModels to be converted.|
+
+**Returns:**
+
+| Type | Description |
+|--------|-----------------------------------------------|
+| `List[Dict]` | A list of dictionaries representing the converted BaseModels. |
+
+## Usage Examples
+
+Here are three examples demonstrating different ways to use the `OpenAIFunctionCaller` class:
+
+### Example 1: Production-Grade Claude Artifacts
+
+```python
+import openai
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from swarms.artifacts.main_artifact import Artifact
+
+
+# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
+
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're a helpful assistant.The time is August 6, 2024",
+ max_tokens=500,
+ temperature=0.5,
+ base_model=Artifact,
+ parallel_tool_calls=False,
+)
+
+
+# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+# Here, we initialize an instance of the OpenAIFunctionCaller class with the following parameters:
+# - system_prompt: A prompt that sets the context for the conversation with the API.
+# - max_tokens: The maximum number of tokens to generate in the API response.
+# - temperature: A parameter that controls the randomness of the generated text.
+# - base_model: The base model to use for the API calls, in this case, the WeatherAPI class.
+out = model.run("Create a python file with a python game code in it")
+print(out)
+```
+
+### Example 2: Prompt Generator
+
+```python
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+from typing import Sequence
+
+
+class PromptUseCase(BaseModel):
+ use_case_name: str = Field(
+ ...,
+ description="The name of the use case",
+ )
+ use_case_description: str = Field(
+ ...,
+ description="The description of the use case",
+ )
+
+
+class PromptSpec(BaseModel):
+ prompt_name: str = Field(
+ ...,
+ description="The name of the prompt",
+ )
+ prompt_description: str = Field(
+ ...,
+ description="The description of the prompt",
+ )
+ prompt: str = Field(
+ ...,
+ description="The prompt for the agent",
+ )
+ tags: str = Field(
+ ...,
+ description="The tags for the prompt such as sentiment, code, etc seperated by commas.",
+ )
+ use_cases: Sequence[PromptUseCase] = Field(
+ ...,
+ description="The use cases for the prompt",
+ )
+
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're an agent creator, you're purpose is to create system prompt for new LLM Agents for the user. Follow the best practices for creating a prompt such as making it direct and clear. Providing instructions and many-shot examples will help the agent understand the task better.",
+ max_tokens=1000,
+ temperature=0.5,
+ base_model=PromptSpec,
+ parallel_tool_calls=False,
+)
+
+
+# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+out = model.run(
+ "Create an prompt for generating quality rust code with instructions and examples."
+)
+print(out)
+
+```
+
+### Example 3: Sentiment Analysis
+
+```python
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+
+
+# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
+# It is used here to define the data structure for making API calls to retrieve weather information.
+class SentimentAnalysisCard(BaseModel):
+ text: str = Field(
+ ...,
+ description="The text to be analyzed for sentiment rating",
+ )
+ rating: str = Field(
+ ...,
+ description="The sentiment rating of the text from 0.0 to 1.0",
+ )
+
+
+# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
+# for making API calls to retrieve weather information. It has two attributes: city and date.
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're a sentiment Analysis Agent, you're purpose is to rate the sentiment of text",
+ max_tokens=100,
+ temperature=0.5,
+ base_model=SentimentAnalysisCard,
+ parallel_tool_calls=False,
+)
+
+
+# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+# Here, we initialize an instance of the OpenAIFunctionCaller class with the following parameters:
+# - system_prompt: A prompt that sets the context for the conversation with the API.
+# - max_tokens: The maximum number of tokens to generate in the API response.
+# - temperature: A parameter that controls the randomness of the generated text.
+# - base_model: The base model to use for the API calls, in this case, the WeatherAPI class.
+out = model.run("The hotel was average, but the food was excellent.")
+print(out)
+
+```
+
+## Additional Information and Tips
+
+- Ensure that your OpenAI API key is securely stored and not hard-coded into your source code. Use environment variables to manage sensitive information.
+- Adjust the `temperature` and `top_p` parameters to control the randomness and diversity of the generated responses. Lower values for `temperature` will result in more deterministic outputs, while higher values will introduce more variability.
+- When using `parallel_tool_calls`, ensure that the tools you are calling in parallel are thread-safe and can handle concurrent execution.
+
+## References and Resources
+
+- [OpenAI API Documentation](https://beta.openai.com/docs/)
+- [Pydantic Documentation](https://pydantic-docs.helpmanual.io/)
+- [Loguru Logger Documentation](https://loguru.readthedocs.io/)
+
+By following this comprehensive guide, you can effectively utilize the `OpenAIFunctionCaller` class to generate chat completions using OpenAI's models, customize the response parameters, and handle API interactions seamlessly within your application.
\ No newline at end of file
diff --git a/docs/swarms/models/openai_tts.md b/docs/swarms/models/openai_tts.md
new file mode 100644
index 00000000..b2996312
--- /dev/null
+++ b/docs/swarms/models/openai_tts.md
@@ -0,0 +1,135 @@
+# `OpenAITTS` Documentation
+
+## Table of Contents
+1. [Overview](#overview)
+2. [Installation](#installation)
+3. [Usage](#usage)
+ - [Initialization](#initialization)
+ - [Running TTS](#running-tts)
+ - [Running TTS and Saving](#running-tts-and-saving)
+4. [Examples](#examples)
+ - [Basic Usage](#basic-usage)
+ - [Saving the Output](#saving-the-output)
+5. [Advanced Options](#advanced-options)
+6. [Troubleshooting](#troubleshooting)
+7. [References](#references)
+
+## 1. Overview
+
+The `OpenAITTS` module is a Python library that provides an interface for converting text to speech (TTS) using the OpenAI TTS API. It allows you to generate high-quality speech from text input, making it suitable for various applications such as voice assistants, speech synthesis, and more.
+
+### Features:
+- Convert text to speech using OpenAI's TTS model.
+- Supports specifying the model name, voice, and other parameters.
+- Option to save the generated speech to a WAV file.
+
+## 2. Installation
+
+To use the `OpenAITTS` model, you need to install the necessary dependencies. You can do this using `pip`:
+
+```bash
+pip install swarms requests wave
+```
+
+## 3. Usage
+
+### Initialization
+
+To use the `OpenAITTS` module, you need to initialize an instance of the `OpenAITTS` class. Here's how you can do it:
+
+```python
+from swarms.models.openai_tts import OpenAITTS
+
+# Initialize the OpenAITTS instance
+tts = OpenAITTS(
+ model_name="tts-1-1106",
+ proxy_url="https://api.openai.com/v1/audio/speech",
+ openai_api_key=openai_api_key_env,
+ voice="onyx",
+)
+```
+
+#### Parameters:
+- `model_name` (str): The name of the TTS model to use (default is "tts-1-1106").
+- `proxy_url` (str): The URL for the OpenAI TTS API (default is "https://api.openai.com/v1/audio/speech").
+- `openai_api_key` (str): Your OpenAI API key. It can be obtained from the OpenAI website.
+- `voice` (str): The voice to use for generating speech (default is "onyx").
+- `chunk_size` (int): The size of data chunks when fetching audio (default is 1024 * 1024 bytes).
+- `autosave` (bool): Whether to automatically save the generated speech to a file (default is False).
+- `saved_filepath` (str): The path to the file where the speech will be saved (default is "runs/tts_speech.wav").
+
+### Running TTS
+
+Once the `OpenAITTS` instance is initialized, you can use it to convert text to speech using the `run` method:
+
+```python
+# Generate speech from text
+speech_data = tts.run("Hello, world!")
+```
+
+#### Parameters:
+- `task` (str): The text you want to convert to speech.
+
+#### Returns:
+- `speech_data` (bytes): The generated speech data.
+
+### Running TTS and Saving
+
+You can also use the `run_and_save` method to generate speech from text and save it to a file:
+
+```python
+# Generate speech from text and save it to a file
+speech_data = tts.run_and_save("Hello, world!")
+```
+
+#### Parameters:
+- `task` (str): The text you want to convert to speech.
+
+#### Returns:
+- `speech_data` (bytes): The generated speech data.
+
+## 4. Examples
+
+### Basic Usage
+
+Here's a basic example of how to use the `OpenAITTS` module to generate speech from text:
+
+```python
+from swarms.models.openai_tts import OpenAITTS
+
+# Initialize the OpenAITTS instance
+tts = OpenAITTS(
+ model_name="tts-1-1106",
+ proxy_url="https://api.openai.com/v1/audio/speech",
+ openai_api_key=openai_api_key_env,
+ voice="onyx",
+)
+
+# Generate speech from text
+speech_data = tts.run("Hello, world!")
+```
+
+### Saving the Output
+
+You can save the generated speech to a WAV file using the `run_and_save` method:
+
+```python
+# Generate speech from text and save it to a file
+speech_data = tts.run_and_save("Hello, world!")
+```
+
+## 5. Advanced Options
+
+The `OpenAITTS` module supports various advanced options for customizing the TTS generation process. You can specify the model name, voice, and other parameters during initialization. Additionally, you can configure the chunk size for audio data fetching and choose whether to automatically save the generated speech to a file.
+
+## 6. Troubleshooting
+
+If you encounter any issues while using the `OpenAITTS` module, please make sure you have installed all the required dependencies and that your OpenAI API key is correctly configured. If you still face problems, refer to the OpenAI documentation or contact their support for assistance.
+
+## 7. References
+
+- [OpenAI API Documentation](https://beta.openai.com/docs/)
+- [Python Requests Library](https://docs.python-requests.org/en/latest/)
+- [Python Wave Library](https://docs.python.org/3/library/wave.html)
+
+This documentation provides a comprehensive guide on how to use the `OpenAITTS` module to convert text to speech using OpenAI's TTS model. It covers initialization, basic usage, advanced options, troubleshooting, and references for further exploration.
\ No newline at end of file
diff --git a/docs/swarms/models/vilt.md b/docs/swarms/models/vilt.md
new file mode 100644
index 00000000..2cb56b22
--- /dev/null
+++ b/docs/swarms/models/vilt.md
@@ -0,0 +1,95 @@
+# `Vilt` Documentation
+
+## Introduction
+
+Welcome to the documentation for Vilt, a Vision-and-Language Transformer (ViLT) model fine-tuned on the VQAv2 dataset. Vilt is a powerful model capable of answering questions about images. This documentation will provide a comprehensive understanding of Vilt, its architecture, usage, and how it can be integrated into your projects.
+
+## Overview
+
+Vilt is based on the Vision-and-Language Transformer (ViLT) architecture, designed for tasks that involve understanding both text and images. It has been fine-tuned on the VQAv2 dataset, making it adept at answering questions about images. This model is particularly useful for tasks where textual and visual information needs to be combined to provide meaningful answers.
+
+## Class Definition
+
+```python
+class Vilt:
+ def __init__(self):
+ """
+ Initialize the Vilt model.
+ """
+```
+
+## Usage
+
+To use the Vilt model, follow these steps:
+
+1. Initialize the Vilt model:
+
+```python
+from swarms.models import Vilt
+
+model = Vilt()
+```
+
+2. Call the model with a text question and an image URL:
+
+```python
+output = model(
+ "What is this image?", "http://images.cocodataset.org/val2017/000000039769.jpg"
+)
+```
+
+### Example 1 - Image Questioning
+
+```python
+model = Vilt()
+output = model(
+ "What are the objects in this image?",
+ "http://images.cocodataset.org/val2017/000000039769.jpg",
+)
+print(output)
+```
+
+### Example 2 - Image Analysis
+
+```python
+model = Vilt()
+output = model(
+ "Describe the scene in this image.",
+ "http://images.cocodataset.org/val2017/000000039769.jpg",
+)
+print(output)
+```
+
+### Example 3 - Visual Knowledge Retrieval
+
+```python
+model = Vilt()
+output = model(
+ "Tell me more about the landmark in this image.",
+ "http://images.cocodataset.org/val2017/000000039769.jpg",
+)
+print(output)
+```
+
+## How Vilt Works
+
+Vilt operates by combining text and image information to generate meaningful answers to questions about the provided image. Here's how it works:
+
+1. **Initialization**: When you create a Vilt instance, it initializes the processor and the model. The processor is responsible for handling the image and text input, while the model is the fine-tuned ViLT model.
+
+2. **Processing Input**: When you call the Vilt model with a text question and an image URL, it downloads the image and processes it along with the text question. This processing step involves tokenization and encoding of the input.
+
+3. **Forward Pass**: The encoded input is then passed through the ViLT model. It calculates the logits, and the answer with the highest probability is selected.
+
+4. **Output**: The predicted answer is returned as the output of the model.
+
+## Parameters
+
+Vilt does not require any specific parameters during initialization. It is pre-configured to work with the "dandelin/vilt-b32-finetuned-vqa" model.
+
+## Additional Information
+
+- Vilt is fine-tuned on the VQAv2 dataset, making it proficient at answering questions about a wide range of images.
+- You can use Vilt for various applications, including image question-answering, image analysis, and visual knowledge retrieval.
+
+That concludes the documentation for Vilt. We hope you find this model useful for your vision-and-language tasks. If you have any questions or encounter any issues, please refer to the Hugging Face Transformers documentation for further assistance. Enjoy working with Vilt!
\ No newline at end of file
diff --git a/docs/swarms/papers.md b/docs/swarms/papers.md
new file mode 100644
index 00000000..3df45299
--- /dev/null
+++ b/docs/swarms/papers.md
@@ -0,0 +1,3 @@
+# awesome-multi-agent-papers
+
+An awesome list of multi-agent papers that show you various swarm architectures and much more. [Get started](https://github.com/kyegomez/awesome-multi-agent-papers)
\ No newline at end of file
diff --git a/docs/swarms/structs/abstractswarm.md b/docs/swarms/structs/abstractswarm.md
new file mode 100644
index 00000000..82ebf44e
--- /dev/null
+++ b/docs/swarms/structs/abstractswarm.md
@@ -0,0 +1,516 @@
+# `BaseSwarm` Documentation
+
+## Table of Contents
+
+1. [Introduction](#introduction)
+2. [Class Definition](#class-definition)
+3. [Methods](#methods)
+ - [communicate()](#communicate)
+ - [run()](#run)
+ - [arun()](#arun)
+ - [add_worker(worker)](#add_worker)
+ - [remove_worker(worker)](#remove_worker)
+ - [broadcast(message, sender)](#broadcast)
+ - [reset()](#reset)
+ - [plan(task)](#plan)
+ - [direct_message(message, sender, recipient)](#direct_message)
+ - [autoscaler(num_workers, worker)](#autoscaler)
+ - [get_worker_by_id(id)](#get_worker_by_id)
+ - [get_worker_by_name(name)](#get_worker_by_name)
+ - [assign_task(worker, task)](#assign_task)
+ - [get_all_tasks(worker, task)](#get_all_tasks)
+ - [get_finished_tasks()](#get_finished_tasks)
+ - [get_pending_tasks()](#get_pending_tasks)
+ - [pause_worker(worker, worker_id)](#pause_worker)
+ - [resume_worker(worker, worker_id)](#resume_worker)
+ - [stop_worker(worker, worker_id)](#stop_worker)
+ - [restart_worker(worker)](#restart_worker)
+ - [scale_up(num_worker)](#scale_up)
+ - [scale_down(num_worker)](#scale_down)
+ - [scale_to(num_worker)](#scale_to)
+ - [get_all_workers()](#get_all_workers)
+ - [get_swarm_size()](#get_swarm_size)
+ - [get_swarm_status()](#get_swarm_status)
+ - [save_swarm_state()](#save_swarm_state)
+
+---
+
+## 1. Introduction
+
+The Swarms library is designed to provide a framework for swarm simulation architectures. Swarms are collections of autonomous agents or workers that collaborate to perform tasks and achieve common goals. This documentation will guide you through the functionality and usage of the Swarms library, explaining the purpose and implementation details of the provided classes and methods.
+
+## 2. Class Definition
+
+### `BaseSwarm` Class
+
+The `BaseSwarm` class is an abstract base class that serves as the foundation for swarm simulation architectures. It defines the core functionality and methods required to manage and interact with a swarm of workers.
+
+```python
+from abc import ABC, abstractmethod
+from typing import List
+
+from swarms.swarms.base import AbstractWorker
+
+
+class BaseSwarm(ABC):
+ """
+ Abstract class for swarm simulation architectures
+
+ Methods:
+ ---------
+ ...
+ """
+
+ # The class definition and constructor are provided here.
+
+ @abstractmethod
+ def __init__(self, workers: List["AbstractWorker"]):
+ """Initialize the swarm with workers"""
+
+ # Other abstract methods are listed here.
+```
+
+## 3. Methods
+
+### `communicate()`
+
+The `communicate()` method allows the swarm to exchange information through the orchestrator, protocols, and the universal communication layer.
+
+**Usage Example 1:**
+
+```python
+swarm = YourSwarmClass(workers)
+swarm.communicate()
+```
+
+**Usage Example 2:**
+
+```python
+# Another example of using the communicate method
+swarm = YourSwarmClass(workers)
+swarm.communicate()
+```
+
+### `run()`
+
+The `run()` method executes the swarm, initiating its activities.
+
+**Usage Example 1:**
+
+```python
+swarm = YourSwarmClass(workers)
+swarm.run()
+```
+
+**Usage Example 2:**
+
+```python
+# Another example of running the swarm
+swarm = YourSwarmClass(workers)
+swarm.run()
+```
+
+### `arun()`
+
+The `arun()` method runs the swarm asynchronously, allowing for parallel execution of tasks.
+
+**Usage Example 1:**
+
+```python
+swarm = YourSwarmClass(workers)
+swarm.arun()
+```
+
+**Usage Example 2:**
+
+```python
+# Another example of running the swarm asynchronously
+swarm = YourSwarmClass(workers)
+swarm.arun()
+```
+
+### `add_worker(worker: "AbstractWorker")`
+
+The `add_worker()` method adds a worker to the swarm.
+
+**Parameters:**
+- `worker` (AbstractWorker): The worker to be added to the swarm.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass([])
+worker = YourWorkerClass()
+swarm.add_worker(worker)
+```
+
+### `remove_worker(worker: "AbstractWorker")`
+
+The `remove_worker()` method removes a worker from the swarm.
+
+**Parameters:**
+- `worker` (AbstractWorker): The worker to be removed from the swarm.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+worker = swarm.get_worker_by_id("worker_id")
+swarm.remove_worker(worker)
+```
+
+### `broadcast(message: str, sender: Optional["AbstractWorker"] = None)`
+
+The `broadcast()` method sends a message to all workers in the swarm.
+
+**Parameters:**
+- `message` (str): The message to be broadcasted.
+- `sender` (Optional[AbstractWorker]): The sender of the message (optional).
+
+**Usage Example 1:**
+
+```python
+swarm = YourSwarmClass(workers)
+message = "Hello, everyone!"
+swarm.broadcast(message)
+```
+
+**Usage Example 2:**
+
+```python
+# Another example of broadcasting a message
+swarm = YourSwarmClass(workers)
+message = "Important announcement!"
+sender = swarm.get_worker_by_name("Supervisor")
+swarm.broadcast(message, sender)
+```
+
+### `reset()`
+
+The `reset()` method resets the swarm to its initial state.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+swarm.reset()
+```
+
+### `plan(task: str)`
+
+The `plan()` method instructs workers to individually plan using a workflow or pipeline for a specified task.
+
+**Parameters:**
+- `task` (str): The task for which workers should plan.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+task = "Perform data analysis"
+swarm.plan(task)
+```
+
+### `direct_message(message: str, sender: "AbstractWorker", recipient: "AbstractWorker")`
+
+The `direct_message()` method sends a direct message from one worker to another.
+
+**Parameters:**
+- `message` (str): The message to be sent.
+- `sender` (AbstractWorker): The sender of the message.
+- `recipient` (AbstractWorker): The recipient of the message.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+sender = swarm.get_worker_by_name("Worker1")
+recipient = swarm.get_worker_by_name("Worker2")
+message = "Hello
+
+, Worker2!"
+swarm.direct_message(message, sender, recipient)
+```
+
+### `autoscaler(num_workers: int, worker: List["AbstractWorker"])`
+
+The `autoscaler()` method acts as an autoscaler, dynamically adjusting the number of workers based on system load or other criteria.
+
+**Parameters:**
+- `num_workers` (int): The desired number of workers.
+- `worker` (List[AbstractWorker]): A list of workers to be managed by the autoscaler.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass([])
+workers = [YourWorkerClass() for _ in range(10)]
+swarm.autoscaler(5, workers)
+```
+
+### `get_worker_by_id(id: str) -> "AbstractWorker"`
+
+The `get_worker_by_id()` method locates a worker in the swarm by their ID.
+
+**Parameters:**
+- `id` (str): The ID of the worker to locate.
+
+**Returns:**
+- `AbstractWorker`: The worker with the specified ID.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+worker_id = "worker_123"
+worker = swarm.get_worker_by_id(worker_id)
+```
+
+### `get_worker_by_name(name: str) -> "AbstractWorker"`
+
+The `get_worker_by_name()` method locates a worker in the swarm by their name.
+
+**Parameters:**
+- `name` (str): The name of the worker to locate.
+
+**Returns:**
+- `AbstractWorker`: The worker with the specified name.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+worker_name = "Alice"
+worker = swarm.get_worker_by_name(worker_name)
+```
+
+### `assign_task(worker: "AbstractWorker", task: Any) -> Dict`
+
+The `assign_task()` method assigns a task to a specific worker.
+
+**Parameters:**
+- `worker` (AbstractWorker): The worker to whom the task should be assigned.
+- `task` (Any): The task to be assigned.
+
+**Returns:**
+- `Dict`: A dictionary indicating the status of the task assignment.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+worker = swarm.get_worker_by_name("Worker1")
+task = "Perform data analysis"
+result = swarm.assign_task(worker, task)
+```
+
+### `get_all_tasks(worker: "AbstractWorker", task: Any)`
+
+The `get_all_tasks()` method retrieves all tasks assigned to a specific worker.
+
+**Parameters:**
+- `worker` (AbstractWorker): The worker for whom tasks should be retrieved.
+- `task` (Any): The task to be retrieved.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+worker = swarm.get_worker_by_name("Worker1")
+tasks = swarm.get_all_tasks(worker, "data analysis")
+```
+
+### `get_finished_tasks() -> List[Dict]`
+
+The `get_finished_tasks()` method retrieves all tasks that have been completed by the workers in the swarm.
+
+**Returns:**
+- `List[Dict]`: A list of dictionaries representing finished tasks.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+finished_tasks = swarm.get_finished_tasks()
+```
+
+### `get_pending_tasks() -> List[Dict]`
+
+The `get_pending_tasks()` method retrieves all tasks that are pending or yet to be completed by the workers in the swarm.
+
+**Returns:**
+- `List[Dict]`: A list of dictionaries representing pending tasks.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+pending_tasks = swarm.get_pending_tasks()
+```
+
+### `pause_worker(worker: "AbstractWorker", worker_id: str)`
+
+The `pause_worker()` method pauses a specific worker, temporarily suspending their activities.
+
+**Parameters:**
+- `worker` (AbstractWorker): The worker to be paused.
+- `worker_id` (str): The ID of the worker to be paused.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+worker = swarm.get_worker_by_name("Worker1")
+worker_id = "worker_123"
+swarm.pause_worker(worker, worker_id)
+```
+
+### `resume_worker(worker: "AbstractWorker", worker_id: str)`
+
+The `resume_worker()` method resumes a paused worker, allowing them to continue their activities.
+
+**Parameters:**
+- `worker` (AbstractWorker): The worker to be resumed.
+- `worker_id` (str): The ID of the worker to be resumed.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+worker = swarm.get_worker_by_name("Worker1")
+worker_id = "worker_123"
+swarm.resume_worker(worker, worker_id)
+```
+
+### `stop_worker(worker: "AbstractWorker", worker_id: str)`
+
+The `stop_worker()` method stops a specific worker, terminating their activities.
+
+**Parameters:**
+- `worker` (AbstractWorker): The worker to be stopped.
+- `worker_id` (str): The ID of the worker to be stopped.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+worker = swarm.get_worker_by_name("Worker1")
+worker_id = "worker_123"
+swarm.stop_worker(worker, worker_id)
+```
+
+### `restart_worker(worker: "AbstractWorker")`
+
+The `restart_worker()` method restarts a worker, resetting them to their initial state.
+
+**Parameters:**
+- `worker` (AbstractWorker): The worker to be restarted.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+worker = swarm.get_worker_by_name("Worker1")
+swarm.restart_worker(worker)
+```
+
+### `scale_up(num_worker: int)`
+
+The `scale_up()` method increases the number of workers in the swarm.
+
+**Parameters:**
+- `num_worker` (int): The number of workers to add to the swarm.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+swarm.scale_up(5)
+```
+
+### `scale_down(num_worker: int)`
+
+The `scale_down()` method decreases the number of workers in the swarm.
+
+**Parameters:**
+- `num_worker` (int): The number of workers to remove from the swarm.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+swarm.scale_down(3)
+```
+
+### `scale_to(num_worker: int)`
+
+The `scale_to()` method scales the swarm to a specific number of workers.
+
+**Parameters:**
+- `num_worker` (int): The desired number of workers.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+swarm.scale_to(10)
+```
+
+### `get
+
+_all_workers() -> List["AbstractWorker"]`
+
+The `get_all_workers()` method retrieves a list of all workers in the swarm.
+
+**Returns:**
+- `List[AbstractWorker]`: A list of all workers in the swarm.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+all_workers = swarm.get_all_workers()
+```
+
+### `get_swarm_size() -> int`
+
+The `get_swarm_size()` method returns the size of the swarm, which is the total number of workers.
+
+**Returns:**
+- `int`: The size of the swarm.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+swarm_size = swarm.get_swarm_size()
+```
+
+### `get_swarm_status() -> Dict`
+
+The `get_swarm_status()` method provides information about the current status of the swarm.
+
+**Returns:**
+- `Dict`: A dictionary containing various status indicators for the swarm.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+swarm_status = swarm.get_swarm_status()
+```
+
+### `save_swarm_state()`
+
+The `save_swarm_state()` method allows you to save the current state of the swarm, including worker configurations and task assignments.
+
+**Usage Example:**
+
+```python
+swarm = YourSwarmClass(workers)
+swarm.save_swarm_state()
+```
+
+---
+
+This comprehensive documentation covers the Swarms library, including the `BaseSwarm` class and its methods. You can use this documentation as a guide to understanding and effectively utilizing the Swarms framework for swarm simulation architectures. Feel free to explore further and adapt the library to your specific use cases.
\ No newline at end of file
diff --git a/docs/swarms/structs/agent.md b/docs/swarms/structs/agent.md
new file mode 100644
index 00000000..39076a5e
--- /dev/null
+++ b/docs/swarms/structs/agent.md
@@ -0,0 +1,271 @@
+# `Agent` Documentation
+
+Swarm Agent is a powerful autonomous agent framework designed to connect Language Models (LLMs) with various tools and long-term memory. This framework provides the ability to ingest and process various types of documents such as PDFs, text files, Markdown files, JSON files, and more. The Swarm Agent offers a wide range of features to enhance the capabilities of LLMs and facilitate efficient task execution.
+
+1. **Conversational Loop**: It establishes a conversational loop with a language model. This means it allows you to interact with the model in a back-and-forth manner, taking turns in the conversation.
+
+2. **Feedback Collection**: The class allows users to provide feedback on the responses generated by the model. This feedback can be valuable for training and improving the model's responses over time.
+
+3. **Stoppable Conversation**: You can define custom stopping conditions for the conversation, allowing you to stop the interaction based on specific criteria. For example, you can stop the conversation if a certain keyword is detected in the responses.
+
+4. **Retry Mechanism**: The class includes a retry mechanism that can be helpful if there are issues generating responses from the model. It attempts to generate a response multiple times before raising an error.
+
+### `Agent` Attributes
+
+| Attribute | Description |
+|------------|-------------|
+| `id` | A unique identifier for the agent instance. |
+| `llm` | The language model instance used by the agent. |
+| `template` | The template used for formatting responses. |
+| `max_loops` | The maximum number of loops the agent can run. |
+| `stopping_condition` | A callable function that determines when the agent should stop looping. |
+| `loop_interval` | The interval (in seconds) between loops. |
+| `retry_attempts` | The number of retry attempts for failed LLM calls. |
+| `retry_interval` | The interval (in seconds) between retry attempts. |
+| `return_history` | A boolean indicating whether the agent should return the conversation history. |
+| `stopping_token` | A token that, when present in the response, stops the agent from looping. |
+| `dynamic_loops` | A boolean indicating whether the agent should dynamically determine the number of loops. |
+| `interactive` | A boolean indicating whether the agent should run in interactive mode. |
+| `dashboard` | A boolean indicating whether the agent should display a dashboard. |
+| `agent_name` | The name of the agent instance. |
+| `agent_description` | A description of the agent instance. |
+| `system_prompt` | The system prompt used to initialize the conversation. |
+| `tools` | A list of callable functions representing tools the agent can use. |
+| `dynamic_temperature_enabled` | A boolean indicating whether the agent should dynamically adjust the temperature of the LLM. |
+| `sop` | The standard operating procedure for the agent. |
+| `sop_list` | A list of strings representing the standard operating procedure. |
+| `saved_state_path` | The file path for saving and loading the agent's state. |
+| `autosave` | A boolean indicating whether the agent should automatically save its state. |
+| `context_length` | The maximum length of the context window (in tokens) for the LLM. |
+| `user_name` | The name used to represent the user in the conversation. |
+| `self_healing_enabled` | A boolean indicating whether the agent should attempt to self-heal in case of errors. |
+| `code_interpreter` | A boolean indicating whether the agent should interpret and execute code snippets. |
+| `multi_modal` | A boolean indicating whether the agent should support multimodal inputs (e.g., text and images). |
+| `pdf_path` | The file path of a PDF document to be ingested. |
+| `list_of_pdf` | A list of file paths for PDF documents to be ingested. |
+| `tokenizer` | An instance of a tokenizer used for token counting and management. |
+| `long_term_memory` | An instance of a `BaseVectorDatabase` implementation for long-term memory management. |
+| `preset_stopping_token` | A boolean indicating whether the agent should use a preset stopping token. |
+| `traceback` | An object used for traceback handling. |
+| `traceback_handlers` | A list of traceback handlers. |
+| `streaming_on` | A boolean indicating whether the agent should stream its responses. |
+| `docs` | A list of document paths or contents to be ingested. |
+| `docs_folder` | The path to a folder containing documents to be ingested. |
+| `verbose` | A boolean indicating whether the agent should print verbose output. |
+| `parser` | A callable function used for parsing input data. |
+| `best_of_n` | An integer indicating the number of best responses to generate (for sampling). |
+| `callback` | A callable function to be called after each agent loop. |
+| `metadata` | A dictionary containing metadata for the agent. |
+| `callbacks` | A list of callable functions to be called during the agent's execution. |
+| `logger_handler` | A handler for logging messages. |
+| `search_algorithm` | A callable function representing the search algorithm for long-term memory retrieval. |
+| `logs_to_filename` | The file path for logging agent activities. |
+| `evaluator` | A callable function used for evaluating the agent's responses. |
+| `output_json` | A boolean indicating whether the agent's output should be in JSON format. |
+| `stopping_func` | A callable function used as a stopping condition for the agent. |
+| `custom_loop_condition` | A callable function used as a custom loop condition for the agent. |
+| `sentiment_threshold` | A float value representing the sentiment threshold for evaluating responses. |
+| `custom_exit_command` | A string representing a custom command for exiting the agent's loop. |
+| `sentiment_analyzer` | A callable function used for sentiment analysis on the agent's outputs. |
+| `limit_tokens_from_string` | A callable function used for limiting the number of tokens in a string. |
+| `custom_tools_prompt` | A callable function used for generating a custom prompt for tool usage. |
+| `tool_schema` | A data structure representing the schema for the agent's tools. |
+| `output_type` | A type representing the expected output type of the agent's responses. |
+| `function_calling_type` | A string representing the type of function calling (e.g., "json"). |
+| `output_cleaner` | A callable function used for cleaning the agent's output. |
+| `function_calling_format_type` | A string representing the format type for function calling (e.g., "OpenAI"). |
+| `list_base_models` | A list of base models used for generating tool schemas. |
+| `metadata_output_type` | A string representing the output type for metadata. |
+| `state_save_file_type` | A string representing the file type for saving the agent's state (e.g., "json", "yaml"). |
+| `chain_of_thoughts` | A boolean indicating whether the agent should use the chain of thoughts technique. |
+| `algorithm_of_thoughts` | A boolean indicating whether the agent should use the algorithm of thoughts technique. |
+| `tree_of_thoughts` | A boolean indicating whether the agent should use the tree of thoughts technique. |
+| `tool_choice` | A string representing the method for tool selection (e.g., "auto"). |
+| `execute_tool` | A boolean indicating whether the agent should execute tools. |
+| `rules` | A string representing the rules for the agent's behavior. |
+| `planning` | A boolean indicating whether the agent should perform planning. |
+| `planning_prompt` | A string representing the prompt for planning. |
+| `device` | A string representing the device on which the agent should run. |
+| `custom_planning_prompt` | A string representing a custom prompt for planning. |
+| `memory_chunk_size` | An integer representing the maximum size of memory chunks for long-term memory retrieval. |
+| `agent_ops_on` | A boolean indicating whether agent operations should be enabled. |
+
+### `Agent` Methods
+
+| Method | Description | Inputs | Usage Example |
+|--------|-------------|--------|----------------|
+| `run(task, img=None, *args, **kwargs)` | Runs the autonomous agent loop to complete the given task. | `task` (str): The task to be performed. `img` (str, optional): Path to an image file, if the task involves image processing. `*args`, `**kwargs`: Additional arguments to pass to the language model. | `response = agent.run("Generate a report on financial performance.")` |
+| `__call__(task, img=None, *args, **kwargs)` | An alternative way to call the `run` method. | Same as `run`. | `response = agent("Generate a report on financial performance.")` |
+| `parse_and_execute_tools(response, *args, **kwargs)` | Parses the agent's response and executes any tools mentioned in it. | `response` (str): The agent's response to be parsed. `*args`, `**kwargs`: Additional arguments to pass to the tool execution. | `agent.parse_and_execute_tools(response)` |
+| `long_term_memory_prompt(query, *args, **kwargs)` | Generates a prompt for querying the agent's long-term memory. | `query` (str): The query to search for in long-term memory. `*args`, `**kwargs`: Additional arguments to pass to the long-term memory retrieval. | `memory_retrieval = agent.long_term_memory_prompt("financial performance")` |
+| `add_memory(message)` | Adds a message to the agent's memory. | `message` (str): The message
+
+
+
+
+## Features
+
+- **Language Model Integration**: The Swarm Agent allows seamless integration with different language models, enabling users to leverage the power of state-of-the-art models.
+- **Tool Integration**: The framework supports the integration of various tools, enabling the agent to perform a wide range of tasks, from code execution to data analysis and beyond.
+- **Long-term Memory Management**: The Swarm Agent incorporates long-term memory management capabilities, allowing it to store and retrieve relevant information for effective decision-making and task execution.
+- **Document Ingestion**: The agent can ingest and process various types of documents, including PDFs, text files, Markdown files, JSON files, and more, enabling it to extract relevant information for task completion.
+- **Interactive Mode**: Users can interact with the agent in an interactive mode, enabling real-time communication and task execution.
+- **Dashboard**: The framework provides a visual dashboard for monitoring the agent's performance and activities.
+- **Dynamic Temperature Control**: The Swarm Agent supports dynamic temperature control, allowing for adjustments to the model's output diversity during task execution.
+- **Autosave and State Management**: The agent can save its state automatically, enabling seamless resumption of tasks after interruptions or system restarts.
+- **Self-Healing and Error Handling**: The framework incorporates self-healing and error-handling mechanisms to ensure robust and reliable operation.
+- **Code Interpretation**: The agent can interpret and execute code snippets, expanding its capabilities for tasks involving programming or scripting.
+- **Multimodal Support**: The framework supports multimodal inputs, enabling the agent to process and reason about various data types, such as text, images, and audio.
+- **Tokenization and Token Management**: The Swarm Agent provides tokenization capabilities, enabling efficient management of token usage and context window truncation.
+- **Sentiment Analysis**: The agent can perform sentiment analysis on its generated outputs, allowing for evaluation and adjustment of responses based on sentiment thresholds.
+- **Output Filtering and Cleaning**: The framework supports output filtering and cleaning, ensuring that generated responses adhere to specific criteria or guidelines.
+- **Asynchronous and Concurrent Execution**: The Swarm Agent supports asynchronous and concurrent task execution, enabling efficient parallelization and scaling of operations.
+- **Planning and Reasoning**: The agent can engage in planning and reasoning processes, leveraging techniques such as algorithm of thoughts and chain of thoughts to enhance decision-making and task execution.
+- **Agent Operations and Monitoring**: The framework provides integration with agent operations and monitoring tools, enabling real-time monitoring and management of the agent's activities.
+
+## Getting Started
+
+First run the following:
+
+```bash
+pip3 install swarms
+```
+
+And, then now you can get started with the following:
+
+```python
+from swarms.models import OpenAIChat
+from swarms.structs import Agent
+
+# Initialize the language model
+llm = OpenAIChat()
+
+# Initialize the agent
+agent = Agent(llm=llm, max_loops=3)
+
+# Run a task
+response = agent.run("Generate a report on the financial performance of a company.")
+print(response)
+```
+
+This example initializes an instance of the `Agent` class with an OpenAI language model and a maximum of 3 loops. The `run()` method is then called with a task to generate a report on financial performance, and the agent's response is printed.
+
+## Advanced Usage
+
+The Swarm Agent provides numerous advanced features and customization options. Here are a few examples of how to leverage these features:
+
+### Tool Integration
+
+To integrate tools with the Swarm Agent, you can pass a list of callable functions to the `tools` parameter when initializing the `Agent` instance. The agent will automatically convert these functions into an OpenAI function calling schema and make them available for use during task execution.
+
+```python
+from swarms.structs import Agent
+from my_tools import tool_function_1, tool_function_2
+
+# Initialize the agent with tools
+agent = Agent(llm=llm, max_loops=3, tools=[tool_function_1, tool_function_2])
+```
+
+### Long-term Memory Management
+
+The Swarm Agent supports integration with various vector databases for long-term memory management. You can pass an instance of a `BaseVectorDatabase` implementation to the `long_term_memory` parameter when initializing the `Agent`.
+
+```python
+from swarms.structs import Agent
+from swarms.memory.chroma import ChromaVectorDatabase
+
+# Initialize a vector database
+vector_db = ChromaVectorDatabase(persist_directory="path/to/db")
+
+# Initialize the agent with long-term memory
+agent = Agent(llm=llm, max_loops=3, long_term_memory=vector_db)
+```
+
+### Document Ingestion
+
+The Swarm Agent can ingest various types of documents, such as PDFs, text files, Markdown files, and JSON files. You can pass a list of document paths or contents to the `docs` parameter when initializing the `Agent`.
+
+```python
+from swarms.structs import Agent
+
+# Initialize the agent with documents
+agent = Agent(llm=llm, max_loops=3, docs=["path/to/doc1.pdf", "path/to/doc2.txt"])
+```
+
+### Interactive Mode
+
+The Swarm Agent supports an interactive mode, where users can engage in real-time communication with the agent. To enable interactive mode, set the `interactive` parameter to `True` when initializing the `Agent`.
+
+```python
+from swarms.structs import Agent
+
+# Initialize the agent in interactive mode
+agent = Agent(llm=llm, max_loops=3, interactive=True)
+
+# Run the agent in interactive mode
+agent.interactive_run()
+```
+
+### Sentiment Analysis
+
+The Swarm Agent can perform sentiment analysis on its generated outputs using a sentiment analyzer function. You can pass a callable function to the `sentiment_analyzer` parameter when initializing the `Agent`.
+
+```python
+from swarms.structs import Agent
+from my_sentiment_analyzer import sentiment_analyzer_function
+
+# Initialize the agent with a sentiment analyzer
+agent = Agent(llm=llm, max_loops=3, sentiment_analyzer=sentiment_analyzer_function)
+```
+
+## Documentation Examples
+
+The Swarm Agent provides numerous examples and usage scenarios throughout the documentation. Here are a few examples to illustrate various features and functionalities:
+
+### Undo Functionality
+
+```python
+# Feature 2: Undo functionality
+response = agent.run("Another task")
+print(f"Response: {response}")
+previous_state, message = agent.undo_last()
+print(message)
+```
+
+### Response Filtering
+
+```python
+# Feature 3: Response filtering
+agent.add_response_filter("report")
+response = agent.filtered_run("Generate a report on finance")
+print(response)
+```
+
+### Saving and Loading State
+
+```python
+# Save the agent state
+agent.save_state('saved_flow.json')
+
+# Load the agent state
+agent = Agent(llm=llm_instance, max_loops=5)
+agent.load_state('saved_flow.json')
+agent.run("Continue with the task")
+```
+
+### Async and Concurrent Execution
+
+```python
+# Run a task concurrently
+response = await agent.run_concurrent("Concurrent task")
+print(response)
+
+# Run multiple tasks concurrently
+tasks = [
+ {"task": "Task 1"},
+ {"task": "Task 2", "img": "path/to/image.jpg"},
+ {"task": "Task 3", "custom_param": 42}
+]
+responses = agent.bulk_run(tasks)
+print(responses)
+```
diff --git a/docs/swarms/structs/agent_rearrange.md b/docs/swarms/structs/agent_rearrange.md
new file mode 100644
index 00000000..2cfe5703
--- /dev/null
+++ b/docs/swarms/structs/agent_rearrange.md
@@ -0,0 +1,269 @@
+# `AgentRearrange` Class
+
+The `AgentRearrange` class represents a swarm of agents for rearranging tasks. It allows you to create a swarm of agents, add or remove agents from the swarm, and run the swarm to process tasks based on a specified flow pattern.
+
+## Attributes
+----------
+
+| Attribute | Type | Description |
+| --- | --- | --- |
+| `agents` | `dict` | A dictionary of agents, where the key is the agent's name and the value is the agent object. |
+| `flow` | `str` | The flow pattern of the tasks. |
+| `max_loops` | `int` | The maximum number of loops for the agents to run. |
+| `verbose` | `bool` | Whether to enable verbose logging or not. |
+
+## Methods
+-------
+
+### `__init__(self, agents: List[Agent] = None, flow: str = None, max_loops: int = 1, verbose: bool = True)`
+
+Initializes the `AgentRearrange` object.
+
+| Parameter | Type | Description |
+| --- | --- | --- |
+| `agents` | `List[Agent]` (optional) | A list of `Agent` objects. Defaults to `None`. |
+| `flow` | `str` (optional) | The flow pattern of the tasks. Defaults to `None`. |
+| `max_loops` | `int` (optional) | The maximum number of loops for the agents to run. Defaults to `1`. |
+| `verbose` | `bool` (optional) | Whether to enable verbose logging or not. Defaults to `True`. |
+
+### `add_agent(self, agent: Agent)`
+
+Adds an agent to the swarm.
+
+| Parameter | Type | Description |
+| --- | --- | --- |
+| `agent` | `Agent` | The agent to be added. |
+
+### `remove_agent(self, agent_name: str)`
+
+Removes an agent from the swarm.
+
+| Parameter | Type | Description |
+| --- | --- | --- |
+| `agent_name` | `str` | The name of the agent to be removed. |
+
+### `add_agents(self, agents: List[Agent])`
+
+Adds multiple agents to the swarm.
+
+| Parameter | Type | Description |
+| --- | --- | --- |
+| `agents` | `List[Agent]` | A list of `Agent` objects. |
+
+### `validate_flow(self)`
+
+Validates the flow pattern.
+
+**Raises:**
+
+- `ValueError`: If the flow pattern is incorrectly formatted or contains duplicate agent names.
+
+**Returns:**
+
+- `bool`: `True` if the flow pattern is valid.
+
+### `run(self, task: str, *args, **kwargs)`
+
+Runs the swarm to rearrange the tasks.
+
+| Parameter | Type | Description |
+| --- | --- | --- |
+| `task` | `str` | The initial task to be processed. |
+| `*args` | - | Additional positional arguments. |
+| `**kwargs` | - | Additional keyword arguments. |
+
+**Returns:**
+
+- `str`: The final processed task.
+
+## Documentation for `rearrange` Function
+======================================
+
+The `rearrange` function is a helper function that rearranges the given list of agents based on the specified flow.
+
+## Parameters
+----------
+
+| Parameter | Type | Description |
+| --- | --- | --- |
+| `agents` | `List[Agent]` | The list of agents to be rearranged. |
+| `flow` | `str` | The flow used for rearranging the agents. |
+| `task` | `str` (optional) | The task to be performed during rearrangement. Defaults to `None`. |
+| `*args` | - | Additional positional arguments. |
+| `**kwargs` | - | Additional keyword arguments. |
+
+## Returns
+-------
+
+The result of running the agent system with the specified task.
+
+### Example
+-------
+
+```python
+agents = [agent1, agent2, agent3]
+flow = "agent1 -> agent2, agent3"
+task = "Perform a task"
+rearrange(agents, flow, task)
+```
+
+### Example Usage
+-------------
+
+Here's an example of how to use the `AgentRearrange` class and the `rearrange` function:
+
+```python
+from swarms import Agent, AgentRearrange
+from typing import List
+
+# Initialize the director agent
+director = Agent(
+ agent_name="Accounting Director",
+ system_prompt="Directs the accounting tasks for the workers",
+ llm=Anthropic(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accounting_director.json",
+)
+
+# Initialize worker 1
+worker1 = Agent(
+ agent_name="Accountant 1",
+ system_prompt="Processes financial transactions and prepares financial statements",
+ llm=Anthropic(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accountant1.json",
+)
+
+# Initialize worker 2
+worker2 = Agent(
+ agent_name="Accountant 2",
+ system_prompt="Performs audits and ensures compliance with financial regulations",
+ llm=Anthropic(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accountant2.json",
+)
+
+# Create a list of agents
+agents = [director, worker1, worker2]
+
+# Define the flow pattern
+flow = "Accounting Director -> Accountant 1 -> Accountant 2"
+
+# Using AgentRearrange class
+agent_system = AgentRearrange(agents=agents, flow=flow)
+output = agent_system.run("Process monthly financial statements")
+print(output)
+
+```
+
+In this example, we first initialize three agents: `director`, `worker1`, and `worker2`. Then, we create a list of these agents and define the flow pattern `"Director -> Worker1 -> Worker2"`.
+
+We can use the `AgentRearrange` class by creating an instance of it with the list of agents and the flow pattern. We then call the `run` method with the initial task, and it will execute the agents in the specified order, passing the output of one agent as the input to the next agent.
+
+Alternatively, we can use the `rearrange` function by passing the list of agents, the flow pattern, and the initial task as arguments.
+
+Both the `AgentRearrange` class and the `rearrange` function will return the final output after processing the task through the agents according to the specified flow pattern.
+
+## Error Handling
+--------------
+
+The `AgentRearrange` class includes error handling mechanisms to validate the flow pattern. If the flow pattern is incorrectly formatted or contains duplicate agent names, a `ValueError` will be raised with an appropriate error message.
+
+### Example:
+
+```python
+# Invalid flow pattern
+invalid_flow = "Director->Worker1,Worker2->Worker3"
+agent_system = AgentRearrange(agents=agents, flow=invalid_flow)
+output = agent_system.run("Some task")`
+```
+
+This will raise a `ValueError` with the message `"Agent 'Worker3' is not registered."`.
+
+
+## Parallel and Sequential Processing
+----------------------------------
+
+The `AgentRearrange` class supports both parallel and sequential processing of tasks based on the specified flow pattern. If the flow pattern includes multiple agents separated by commas (e.g., `"agent1, agent2"`), the agents will be executed in parallel, and their outputs will be concatenated with a semicolon (`;`). If the flow pattern includes a single agent, it will be executed sequentially.
+
+
+### Parallel processing
+`parallel_flow = "Worker1, Worker2 -> Director"`
+
+### Sequential processing
+`sequential_flow = "Worker1 -> Worker2 -> Director"`
+
+In the `parallel_flow` example, `Worker1` and `Worker2` will be executed in parallel, and their outputs will be concatenated and passed to `Director`. In the `sequential_flow` example, `Worker1` will be executed first, and its output will be passed to `Worker2`, and then the output of `Worker2` will be passed to `Director`.
+
+## Logging
+-------
+
+The `AgentRearrange` class includes logging capabilities using the `loguru` library. If `verbose` is set to `True` during initialization, a log file named `agent_rearrange.log` will be created, and log messages will be written to it. You can use this log file to track the execution of the agents and any potential issues or errors that may occur.
+
+
+```bash
+2023-05-08 10:30:15.456 | INFO | agent_rearrange:__init__:34 - Adding agent Director to the swarm.
+2023-05-08 10:30:15.457 | INFO | agent_rearrange:__init__:34 - Adding agent Worker1 to the swarm.
+2023-05-08 10:30:15.457 | INFO | agent_rearrange:__init__:34 - Adding agent Worker2 to the swarm.
+2023-05-08 10:30:15.458 | INFO | agent_rearrange:run:118 - Running agents in parallel: ['Worker1', 'Worker2']
+2023-05-08 10:30:15.459 | INFO | agent_rearrange:run:121 - Running agents sequentially: ['Director']`
+```
+
+## Additional Parameters
+---------------------
+
+The `AgentRearrange` class also accepts additional parameters that can be passed to the `run` method using `*args` and `**kwargs`. These parameters will be forwarded to the individual agents during execution.
+
+`agent_system = AgentRearrange(agents=agents, flow=flow)`
+`output = agent_system.run("Some task", max_tokens=200, temperature=0.7)`
+
+In this example, the `max_tokens` and `temperature` parameters will be passed to each agent during execution.
+
+## Customization
+-------------
+
+The `AgentRearrange` class and the `rearrange` function can be customized and extended to suit specific use cases. For example, you can create custom agents by inheriting from the `Agent` class and implementing custom logic for task processing. You can then add these custom agents to the swarm and define the flow pattern accordingly.
+
+Additionally, you can modify the `run` method of the `AgentRearrange` class to implement custom logic for task processing and agent interaction.
+
+
+## Limitations
+-----------
+
+It's important to note that the `AgentRearrange` class and the `rearrange` function rely on the individual agents to process tasks correctly. The quality of the output will depend on the capabilities and configurations of the agents used in the swarm. Additionally, the `AgentRearrange` class does not provide any mechanisms for task prioritization or load balancing among the agents.
+
+## Future Improvements
+-------------------
+
+Here are some potential future improvements for the `AgentRearrange` class and the `rearrange` function:
+
+- **Task Prioritization**: Implement a mechanism to prioritize tasks based on factors such as urgency, importance, or resource availability.
+- **Load Balancing**: Incorporate load balancing algorithms to distribute tasks among agents more efficiently, taking into account factors such as agent availability, performance, and resource utilization.
+- **Dynamic Flow Reconfiguration**: Allow for dynamic reconfiguration of the flow pattern during runtime, enabling the addition, removal, or reordering of agents based on specific conditions or events.
+- **Error Handling and Fault Tolerance**: Enhance error handling and fault tolerance mechanisms to gracefully handle agent failures, task timeouts, or other exceptional situations.
+- **Monitoring and Metrics**: Implement monitoring and metrics collection to track the performance and efficiency of the swarm, as well as individual agent performance.
+- **Scalability**: Enhance the scalability of the system to handle larger numbers of agents and tasks efficiently.
+
+## Conclusion
+----------
+
+The `AgentRearrange` class and the `rearrange` function provide a flexible and extensible framework for orchestrating swarms of agents to process tasks based on a specified flow pattern. By combining the capabilities of individual agents, you can create complex workflows and leverage the strengths of different agents to tackle various tasks efficiently.
+
+While the current implementation offers basic functionality for agent rearrangement, there is room for future improvements and customizations to enhance the system's capabilities and cater to more specific use cases.
+
+Whether you're working on natural language processing tasks, data analysis, or any other domain where agent-based systems can be beneficial, the `AgentRearrange` class and the `rearrange` function provide a solid foundation for building and experimenting with swarm-based solutions.
\ No newline at end of file
diff --git a/docs/swarms/structs/agent_registry.md b/docs/swarms/structs/agent_registry.md
new file mode 100644
index 00000000..82afc1f1
--- /dev/null
+++ b/docs/swarms/structs/agent_registry.md
@@ -0,0 +1,239 @@
+# AgentRegistry Documentation
+
+The `AgentRegistry` class is designed to manage a collection of agents, providing methods for adding, deleting, updating, and querying agents. This class ensures thread-safe operations on the registry, making it suitable for concurrent environments. Additionally, the `AgentModel` class is a Pydantic model used for validating and storing agent information.
+
+## Attributes
+
+### AgentModel
+
+| Attribute | Type | Description |
+|-----------|--------|--------------------------------------|
+| `agent_id`| `str` | The unique identifier for the agent. |
+| `agent` | `Agent`| The agent object. |
+
+### AgentRegistry
+
+| Attribute | Type | Description |
+|-----------|---------------------|-------------------------------------------|
+| `agents` | `Dict[str, AgentModel]` | A dictionary mapping agent IDs to `AgentModel` instances. |
+| `lock` | `Lock` | A threading lock for thread-safe operations. |
+
+## Methods
+
+### `__init__(self)`
+
+Initializes the `AgentRegistry` object.
+
+- **Usage Example:**
+ ```python
+ registry = AgentRegistry()
+ ```
+
+### `add(self, agent_id: str, agent: Agent) -> None`
+
+Adds a new agent to the registry.
+
+- **Parameters:**
+ - `agent_id` (`str`): The unique identifier for the agent.
+ - `agent` (`Agent`): The agent to add.
+
+- **Raises:**
+ - `ValueError`: If the agent ID already exists in the registry.
+ - `ValidationError`: If the input data is invalid.
+
+- **Usage Example:**
+ ```python
+ agent = Agent(agent_name="Agent1")
+ registry.add("agent_1", agent)
+ ```
+
+### `delete(self, agent_id: str) -> None`
+
+Deletes an agent from the registry.
+
+- **Parameters:**
+ - `agent_id` (`str`): The unique identifier for the agent to delete.
+
+- **Raises:**
+ - `KeyError`: If the agent ID does not exist in the registry.
+
+- **Usage Example:**
+ ```python
+ registry.delete("agent_1")
+ ```
+
+### `update_agent(self, agent_id: str, new_agent: Agent) -> None`
+
+Updates an existing agent in the registry.
+
+- **Parameters:**
+ - `agent_id` (`str`): The unique identifier for the agent to update.
+ - `new_agent` (`Agent`): The new agent to replace the existing one.
+
+- **Raises:**
+ - `KeyError`: If the agent ID does not exist in the registry.
+ - `ValidationError`: If the input data is invalid.
+
+- **Usage Example:**
+ ```python
+ new_agent = Agent(agent_name="UpdatedAgent")
+ registry.update_agent("agent_1", new_agent)
+ ```
+
+### `get(self, agent_id: str) -> Agent`
+
+Retrieves an agent from the registry.
+
+- **Parameters:**
+ - `agent_id` (`str`): The unique identifier for the agent to retrieve.
+
+- **Returns:**
+ - `Agent`: The agent associated with the given agent ID.
+
+- **Raises:**
+ - `KeyError`: If the agent ID does not exist in the registry.
+
+- **Usage Example:**
+ ```python
+ agent = registry.get("agent_1")
+ ```
+
+### `list_agents(self) -> List[str]`
+
+Lists all agent identifiers in the registry.
+
+- **Returns:**
+ - `List[str]`: A list of all agent identifiers.
+
+- **Usage Example:**
+ ```python
+ agent_ids = registry.list_agents()
+ ```
+
+### `query(self, condition: Optional[Callable[[Agent], bool]] = None) -> List[Agent]`
+
+Queries agents based on a condition.
+
+- **Parameters:**
+ - `condition` (`Optional[Callable[[Agent], bool]]`): A function that takes an agent and returns a boolean indicating whether the agent meets the condition. Defaults to `None`.
+
+- **Returns:**
+ - `List[Agent]`: A list of agents that meet the condition.
+
+- **Usage Example:**
+ ```python
+ def is_active(agent):
+ return agent.is_active
+
+ active_agents = registry.query(is_active)
+ ```
+
+### `find_agent_by_name(self, agent_name: str) -> Agent`
+
+Finds an agent by its name.
+
+- **Parameters:**
+ - `agent_name` (`str`): The name of the agent to find.
+
+- **Returns:**
+ - `Agent`: The agent with the specified name.
+
+- **Usage Example:**
+ ```python
+ agent = registry.find_agent_by_name("Agent1")
+ ```
+
+
+### Full Example
+
+```python
+from swarms.structs.agent_registry import AgentRegistry
+from swarms import Agent, OpenAIChat, Anthropic
+
+# Initialize the agents
+growth_agent1 = Agent(
+ agent_name="Marketing Specialist",
+ system_prompt="You're the marketing specialist, your purpose is to help companies grow by improving their marketing strategies!",
+ agent_description="Improve a company's marketing strategies!",
+ llm=OpenAIChat(),
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ saved_state_path="marketing_specialist.json",
+ stopping_token="Stop!",
+ interactive=True,
+ context_length=1000,
+)
+
+growth_agent2 = Agent(
+ agent_name="Sales Specialist",
+ system_prompt="You're the sales specialist, your purpose is to help companies grow by improving their sales strategies!",
+ agent_description="Improve a company's sales strategies!",
+ llm=Anthropic(),
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ saved_state_path="sales_specialist.json",
+ stopping_token="Stop!",
+ interactive=True,
+ context_length=1000,
+)
+
+growth_agent3 = Agent(
+ agent_name="Product Development Specialist",
+ system_prompt="You're the product development specialist, your purpose is to help companies grow by improving their product development strategies!",
+ agent_description="Improve a company's product development strategies!",
+ llm=Anthropic(),
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ saved_state_path="product_development_specialist.json",
+ stopping_token="Stop!",
+ interactive=True,
+ context_length=1000,
+)
+
+growth_agent4 = Agent(
+ agent_name="Customer Service Specialist",
+ system_prompt="You're the customer service specialist, your purpose is to help companies grow by improving their customer service strategies!",
+ agent_description="Improve a company's customer service strategies!",
+ llm=OpenAIChat(),
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ saved_state_path="customer_service_specialist.json",
+ stopping_token="Stop!",
+ interactive=True,
+ context_length=1000,
+)
+
+
+# Register the agents\
+registry = AgentRegistry()
+
+# Register the agents
+registry.add("Marketing Specialist", growth_agent1)
+registry.add("Sales Specialist", growth_agent2)
+registry.add("Product Development Specialist", growth_agent3)
+registry.add("Customer Service Specialist", growth_agent4)
+
+```
+
+## Logging and Error Handling
+
+Each method in the `AgentRegistry` class includes logging to track the execution flow and captures errors to provide detailed information in case of failures. This is crucial for debugging and ensuring smooth operation of the registry. The `report_error` function is used for reporting exceptions that occur during method execution.
+
+## Additional Tips
+
+- Ensure that agents provided to the `AgentRegistry` are properly initialized and configured to handle the tasks they will receive.
+- Utilize the logging information to monitor and debug the registry operations.
+- Use the `lock` attribute to ensure thread-safe operations when accessing or modifying the registry.
+
diff --git a/docs/swarms/structs/artifact.md b/docs/swarms/structs/artifact.md
new file mode 100644
index 00000000..9e00f083
--- /dev/null
+++ b/docs/swarms/structs/artifact.md
@@ -0,0 +1,103 @@
+# swarms.structs Documentation
+
+## Introduction
+
+The swarms.structs library provides a collection of classes for representing artifacts and their attributes. This documentation will provide an overview of the `Artifact` class, its attributes, functionality, and usage examples.
+
+### Artifact Class
+
+The `Artifact` class represents an artifact and its attributes. It inherits from the `BaseModel` class and includes the following attributes:
+
+#### Attributes
+
+1. `artifact_id (str)`: Id of the artifact.
+2. `file_name (str)`: Filename of the artifact.
+3. `relative_path (str, optional)`: Relative path of the artifact in the agent's workspace.
+
+These attributes are crucial for identifying and managing different artifacts within a given context.
+
+## Class Definition
+
+The `Artifact` class can be defined as follows:
+
+```python
+class Artifact(BaseModel):
+ """
+ Represents an artifact.
+
+ Attributes:
+ artifact_id (str): Id of the artifact.
+ file_name (str): Filename of the artifact.
+ relative_path (str, optional): Relative path of the artifact in the agent's workspace.
+ """
+
+ artifact_id: str = Field(
+ ...,
+ description="Id of the artifact",
+ example="b225e278-8b4c-4f99-a696-8facf19f0e56",
+ )
+ file_name: str = Field(
+ ..., description="Filename of the artifact", example="main.py"
+ )
+ relative_path: Optional[str] = Field(
+ None,
+ description=("Relative path of the artifact in the agent's workspace"),
+ example="python/code/",
+ )
+```
+
+The `Artifact` class defines the mandatory and optional attributes and provides corresponding descriptions along with example values.
+
+## Functionality and Usage
+
+The `Artifact` class encapsulates the information and attributes representing an artifact. It provides a structured and organized way to manage artifacts within a given context.
+
+### Example 1: Creating an Artifact instance
+
+To create an instance of the `Artifact` class, you can simply initialize it with the required attributes. Here's an example:
+
+```python
+from swarms.structs import Artifact
+
+artifact_instance = Artifact(
+ artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
+ file_name="main.py",
+ relative_path="python/code/",
+)
+```
+
+In this example, we create an instance of the `Artifact` class with the specified artifact details.
+
+### Example 2: Accessing Artifact attributes
+
+You can access the attributes of the `Artifact` instance using dot notation. Here's how you can access the file name of the artifact:
+
+```python
+print(artifact_instance.file_name)
+# Output: "main.py"
+```
+
+### Example 3: Handling optional attributes
+
+If the `relative_path` attribute is not provided during artifact creation, it will default to `None`. Here's an example:
+
+```python
+artifact_instance_no_path = Artifact(
+ artifact_id="c280s347-9b7d-3c68-m337-7abvf50j23k", file_name="script.js"
+)
+
+print(artifact_instance_no_path.relative_path)
+# Output: None
+```
+
+By providing default values for optional attributes, the `Artifact` class allows flexibility in defining artifact instances.
+
+### Additional Information and Tips
+
+The `Artifact` class represents a powerful and flexible means of handling various artifacts with different attributes. By utilizing this class, users can organize, manage, and streamline their artifacts with ease.
+
+## References and Resources
+
+For further details and references related to the swarms.structs library and the `Artifact` class, refer to the [official documentation](https://swarms.structs.docs/artifact.html).
+
+This comprehensive documentation provides an in-depth understanding of the `Artifact` class, its attributes, functionality, and usage examples. By following the detailed examples and explanations, developers can effectively leverage the capabilities of the `Artifact` class within their projects.
diff --git a/docs/swarms/structs/artifactupload.md b/docs/swarms/structs/artifactupload.md
new file mode 100644
index 00000000..90b30f58
--- /dev/null
+++ b/docs/swarms/structs/artifactupload.md
@@ -0,0 +1,49 @@
+# swarms.structs
+
+## Overview
+
+Swarms is a library that provides tools for managing a distributed system of agents working together to achieve a common goal. The structs module within Swarms provides a set of data structures and classes that are used to represent artifacts, tasks, and other entities within the system. The `ArtifactUpload` class is one such data structure that represents the process of uploading an artifact to an agent's workspace.
+
+## ArtifactUpload
+
+The `ArtifactUpload` class inherits from the `BaseModel` class. It has two attributes: `file` and `relative_path`. The `file` attribute represents the bytes of the file to be uploaded, while the `relative_path` attribute represents the relative path of the artifact in the agent's workspace.
+
+### Class Definition
+
+```python
+class ArtifactUpload(BaseModel):
+ file: bytes = Field(..., description="File to upload")
+ relative_path: Optional[str] = Field(
+ None,
+ description=("Relative path of the artifact in the agent's workspace"),
+ example="python/code/",
+ )
+```
+
+The `ArtifactUpload` class requires the `file` attribute to be passed as an argument. It is of type `bytes` and represents the file to be uploaded. The `relative_path` attribute is optional and is of type `str`. It represents the relative path of the artifact in the agent's workspace. If not provided, it defaults to `None`.
+
+### Functionality and Usage
+
+The `ArtifactUpload` class is used to create an instance of an artifact upload. It can be instantiated with or without a `relative_path`. Here is an example of how the class can be used:
+
+```python
+from swarms.structs import ArtifactUpload
+
+# Uploading a file with no relative path
+upload_no_path = ArtifactUpload(file=b"example_file_contents")
+
+# Uploading a file with a relative path
+upload_with_path = ArtifactUpload(
+ file=b"example_file_contents", relative_path="python/code/"
+)
+```
+
+In the above example, `upload_no_path` is an instance of `ArtifactUpload` with no specified `relative_path`, whereas `upload_with_path` is an instance of `ArtifactUpload` with the `relative_path` set to "python/code/".
+
+### Additional Information
+
+When passing the `file` and `relative_path` parameters to the `ArtifactUpload` class, ensure that the `file` parameter is provided exactly as the file that needs to be uploaded, represented as a `bytes` object. If a `relative_path` is provided, ensure that it is a valid path within the agent's workspace.
+
+# Conclusion
+
+The `ArtifactUpload` class is an essential data structure within the Swarms library that represents the process of uploading an artifact to an agent's workspace. By using this class, users can easily manage and represent artifact uploads within the Swarms distributed system.
diff --git a/docs/swarms/structs/async_workflow.md b/docs/swarms/structs/async_workflow.md
new file mode 100644
index 00000000..4bb1471c
--- /dev/null
+++ b/docs/swarms/structs/async_workflow.md
@@ -0,0 +1,206 @@
+# AsyncWorkflow Documentation
+
+The `AsyncWorkflow` class represents an asynchronous workflow designed to execute tasks concurrently. This class is ideal for scenarios where tasks need to be run asynchronously, leveraging Python's asyncio capabilities to manage multiple tasks efficiently.
+
+### Key Concepts
+
+- **Asynchronous Execution**: Tasks are run concurrently using asyncio, allowing for non-blocking operations.
+- **Task Pool**: A collection of tasks to be executed within the workflow.
+- **Event Loop**: The asyncio event loop that manages the execution of asynchronous tasks.
+- **Stopping Condition**: A condition that, when met, stops the execution of the workflow.
+
+## Attributes
+
+### Arguments
+
+| Argument | Type | Default | Description |
+|----------|------|---------|-------------|
+| `name` | `str` | `"Async Workflow"` | The name of the workflow. |
+| `description` | `str` | `"A workflow to run asynchronous tasks"` | The description of the workflow. |
+| `max_loops` | `int` | `1` | The maximum number of loops to run the workflow. |
+| `autosave` | `bool` | `True` | Flag indicating whether to autosave the results. |
+| `dashboard` | `bool` | `False` | Flag indicating whether to display a dashboard. |
+| `task_pool` | `List[Any]` | `[]` | The list of tasks in the workflow. |
+| `results` | `List[Any]` | `[]` | The list of results from running the tasks. |
+| `loop` | `Optional[asyncio.AbstractEventLoop]` | `None` | The event loop to use. |
+| `stopping_condition` | `Optional[Callable]` | `None` | The stopping condition for the workflow. |
+| `agents` | `List[Agent]` | `None` | A list of agents participating in the workflow. |
+
+### Attributes
+
+| Attribute | Type | Description |
+|-----------|------|-------------|
+| `name` | `str` | The name of the workflow. |
+| `description` | `str` | The description of the workflow. |
+| `max_loops` | `int` | The maximum number of loops to run the workflow. |
+| `autosave` | `bool` | Flag indicating whether to autosave the results. |
+| `dashboard` | `bool` | Flag indicating whether to display a dashboard. |
+| `task_pool` | `List[Any]` | The list of tasks in the workflow. |
+| `results` | `List[Any]` | The list of results from running the tasks. |
+| `loop` | `Optional[asyncio.AbstractEventLoop]` | The event loop to use. |
+| `stopping_condition` | `Optional[Callable]` | The stopping condition for the workflow. |
+| `agents` | `List[Agent]` | A list of agents participating in the workflow. |
+
+## Methods
+
+### add
+
+Adds a task or a list of tasks to the task pool.
+
+**Arguments:**
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|-------------|
+| `task` | `Any` | `None` | A single task to add. |
+| `tasks` | `List[Any]` | `None` | A list of tasks to add. |
+
+**Raises:**
+
+- `ValueError`: If neither task nor tasks are provided.
+
+**Examples:**
+
+```python
+workflow = AsyncWorkflow()
+task1 = Task(description="Task 1")
+task2 = Task(description="Task 2")
+
+# Adding a single task
+await workflow.add(task=task1)
+
+# Adding multiple tasks
+await workflow.add(tasks=[task1, task2])
+```
+
+### delete
+
+Deletes a task from the workflow.
+
+**Arguments:**
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|-------------|
+| `task` | `Any` | `None` | A single task to delete. |
+| `tasks` | `List[Task]` | `None` | A list of tasks to delete. |
+
+**Examples:**
+
+```python
+workflow = AsyncWorkflow()
+task1 = Task(description="Task 1")
+task2 = Task(description="Task 2")
+
+# Adding tasks to the workflow
+await workflow.add(tasks=[task1, task2])
+
+# Deleting a single task
+await workflow.delete(task=task1)
+
+# Deleting multiple tasks
+await workflow.delete(tasks=[task1, task2])
+```
+
+### run
+
+Runs the workflow and returns the results.
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `List[Any]` | The results of the executed tasks. |
+
+**Examples:**
+
+```python
+workflow = AsyncWorkflow()
+task1 = Task(description="Task 1", execute=async_function)
+task2 = Task(description="Task 2", execute=async_function)
+
+# Adding tasks to the workflow
+await workflow.add(tasks=[task1, task2])
+
+# Running the workflow
+results = await workflow.run()
+```
+
+### Additional Examples
+
+#### Example 1: Simple AsyncWorkflow
+
+```python
+import asyncio
+from swarms.structs.agent import Agent
+from swarms.structs.task import Task
+
+async def simple_task():
+ await asyncio.sleep(1)
+ return "Task Completed"
+
+workflow = AsyncWorkflow()
+task = Task(description="Simple Task", execute=simple_task)
+
+# Adding a task to the workflow
+await workflow.add(task=task)
+
+# Running the workflow
+results = await workflow.run()
+print(results) # Output: ["Task Completed"]
+```
+
+#### Example 2: Workflow with Multiple Tasks
+
+```python
+import asyncio
+from swarms.structs.agent import Agent
+from swarms.structs.task import Task
+
+async def task1():
+ await asyncio.sleep(1)
+ return "Task 1 Completed"
+
+async def task2():
+ await asyncio.sleep(2)
+ return "Task 2 Completed"
+
+workflow = AsyncWorkflow()
+task_1 = Task(description="Task 1", execute=task1)
+task_2 = Task(description="Task 2", execute=task2)
+
+# Adding tasks to the workflow
+await workflow.add(tasks=[task_1, task_2])
+
+# Running the workflow
+results = await workflow.run()
+print(results) # Output: ["Task 1 Completed", "Task 2 Completed"]
+```
+
+#### Example 3: Workflow with Stopping Condition
+
+```python
+import asyncio
+from swarms.structs.agent import Agent
+from swarms.structs.task import Task
+
+async def task1():
+ await asyncio.sleep(1)
+ return "Task 1 Completed"
+
+async def task2():
+ await asyncio.sleep(2)
+ return "Task 2 Completed"
+
+def stop_condition(results):
+ return "Task 2 Completed" in results
+
+workflow = AsyncWorkflow(stopping_condition=stop_condition)
+task_1 = Task(description="Task 1", execute=task1)
+task_2 = Task(description="Task 2", execute=task2)
+
+# Adding tasks to the workflow
+await workflow.add(tasks=[task_1, task_2])
+
+# Running the workflow
+results = await workflow.run()
+print(results) # Output: ["Task 1 Completed", "Task 2 Completed"]
+```
\ No newline at end of file
diff --git a/docs/swarms/structs/auto_swarm.md b/docs/swarms/structs/auto_swarm.md
new file mode 100644
index 00000000..b08c84cd
--- /dev/null
+++ b/docs/swarms/structs/auto_swarm.md
@@ -0,0 +1,191 @@
+# AutoSwarm
+
+The `AutoSwarm` class represents a swarm of agents that can be created and managed automatically. This class leverages the `AutoSwarmRouter` to route tasks to appropriate swarms and supports custom preprocessing, routing, and postprocessing of tasks. It is designed to handle complex workflows efficiently.
+
+### Key Concepts
+
+- **Swarm**: A group of agents working together to complete tasks.
+- **Routing**: Directing tasks to the appropriate swarm based on specific criteria.
+- **Preprocessing and Postprocessing**: Customizable functions to handle tasks before and after routing.
+- **Event Loop**: Managing the execution of tasks in a loop.
+
+## Attributes
+
+### Arguments
+
+| Argument | Type | Default | Description |
+|---------------------|-------------------------------|-----------|-------------|
+| `name` | `Optional[str]` | `None` | The name of the swarm. |
+| `description` | `Optional[str]` | `None` | The description of the swarm. |
+| `verbose` | `bool` | `False` | Whether to enable verbose mode. |
+| `custom_params` | `Optional[Dict[str, Any]]` | `None` | Custom parameters for the swarm. |
+| `custom_preprocess` | `Optional[Callable]` | `None` | Custom preprocessing function for tasks. |
+| `custom_postprocess`| `Optional[Callable]` | `None` | Custom postprocessing function for task results. |
+| `custom_router` | `Optional[Callable]` | `None` | Custom routing function for tasks. |
+| `max_loops` | `int` | `1` | The maximum number of loops to run the workflow. |
+
+### Attributes
+
+| Attribute | Type | Description |
+|----------------------|-------------------------------|-------------|
+| `name` | `Optional[str]` | The name of the swarm. |
+| `description` | `Optional[str]` | The description of the swarm. |
+| `verbose` | `bool` | Whether to enable verbose mode. |
+| `custom_params` | `Optional[Dict[str, Any]]` | Custom parameters for the swarm. |
+| `custom_preprocess` | `Optional[Callable]` | Custom preprocessing function for tasks. |
+| `custom_postprocess` | `Optional[Callable]` | Custom postprocessing function for task results. |
+| `custom_router` | `Optional[Callable]` | Custom routing function for tasks. |
+| `max_loops` | `int` | The maximum number of loops to run the workflow. |
+| `router` | `AutoSwarmRouter` | The router for managing task routing. |
+
+## Methods
+
+### init_logging
+
+Initializes logging for the `AutoSwarm`.
+
+**Examples:**
+
+```python
+swarm = AutoSwarm(name="example_swarm", verbose=True)
+swarm.init_logging()
+```
+
+### run
+
+Runs the swarm simulation.
+
+**Arguments:**
+
+| Parameter | Type | Default | Description |
+|-----------|---------|---------|-------------|
+| `task` | `str` | `None` | The task to be executed. |
+| `*args` | | | Additional arguments. |
+| `**kwargs`| | | Additional keyword arguments. |
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `Any` | The result of the executed task. |
+
+**Raises:**
+
+- `Exception`: If any error occurs during task execution.
+
+**Examples:**
+
+```python
+swarm = AutoSwarm(name="example_swarm", max_loops=3)
+result = swarm.run(task="example_task")
+print(result)
+```
+
+### list_all_swarms
+
+Lists all available swarms and their descriptions.
+
+**Examples:**
+
+```python
+swarm = AutoSwarm(name="example_swarm", max_loops=3)
+swarm.list_all_swarms()
+# Output:
+# INFO: Swarm Name: swarm1 || Swarm Description: Description of swarm1
+# INFO: Swarm Name: swarm2 || Swarm Description: Description of swarm2
+```
+
+### Additional Examples
+
+#### Example 1: Custom Preprocessing and Postprocessing
+
+```python
+def custom_preprocess(task, *args, **kwargs):
+ # Custom preprocessing logic
+ task = task.upper()
+ return task, args, kwargs
+
+def custom_postprocess(result):
+ # Custom postprocessing logic
+ return result.lower()
+
+swarm = AutoSwarm(
+ name="example_swarm",
+ custom_preprocess=custom_preprocess,
+ custom_postprocess=custom_postprocess,
+ max_loops=3
+)
+
+# Running a task with custom preprocessing and postprocessing
+result = swarm.run(task="example_task")
+print(result) # Output will be the processed result
+```
+
+#### Example 2: Custom Router Function
+
+```python
+def custom_router(swarm, task, *args, **kwargs):
+ # Custom routing logic
+ if "specific" in task:
+ return swarm.router.swarm_dict["specific_swarm"].run(task, *args, **kwargs)
+ return swarm.router.swarm_dict["default_swarm"].run(task, *args, **kwargs)
+
+swarm = AutoSwarm(
+ name="example_swarm",
+ custom_router=custom_router,
+ max_loops=3
+)
+
+# Running a task with custom routing
+result = swarm.run(task="specific_task")
+print(result) # Output will be the result of the routed task
+```
+
+#### Example 3: Verbose Mode
+
+```python
+swarm = AutoSwarm(
+ name="example_swarm",
+ verbose=True,
+ max_loops=3
+)
+
+# Running a task with verbose mode enabled
+result = swarm.run(task="example_task")
+# Output will include detailed logs of the task execution process
+```
+
+
+#### Full Example 4:
+First create a class with BaseSwarm -> Then wrap it in the router -> then pass that to the `AutoSwarm`
+
+```python
+from swarms import BaseSwarm, AutoSwarmRouter, AutoSwarm
+
+
+class FinancialReportSummarization(BaseSwarm):
+ def __init__(self, name: str = None, *args, **kwargs):
+ super().__init__()
+
+ def run(self, task, *args, **kwargs):
+ return task
+
+
+# Add swarm to router
+router = AutoSwarmRouter(swarms=[FinancialReportSummarization])
+
+# Create AutoSwarm Instance
+autoswarm = AutoSwarm(
+ name="kyegomez/FinancialReportSummarization",
+ description="A swarm for financial document summarizing and generation",
+ verbose=True,
+ router=router,
+)
+
+# Run the AutoSwarm
+autoswarm.run("Analyze these documents and give me a summary:")
+```
+
+## Summary
+
+The `AutoSwarm` class provides a robust framework for managing and executing tasks using a swarm of agents. With customizable preprocessing, routing, and postprocessing functions, it is highly adaptable to various workflows and can handle complex task execution scenarios efficiently. The integration with `AutoSwarmRouter` enhances its flexibility, making it a powerful tool for dynamic task management.
\ No newline at end of file
diff --git a/docs/swarms/structs/auto_swarm_router.md b/docs/swarms/structs/auto_swarm_router.md
new file mode 100644
index 00000000..a5c89bda
--- /dev/null
+++ b/docs/swarms/structs/auto_swarm_router.md
@@ -0,0 +1,165 @@
+# AutoSwarmRouter
+
+The `AutoSwarmRouter` class is designed to route tasks to the appropriate swarm based on the provided name. This class allows for customization of preprocessing, routing, and postprocessing of tasks, making it highly adaptable to various workflows and requirements.
+
+### Key Concepts
+
+- **Routing**: Directing tasks to the appropriate swarm based on specific criteria.
+- **Preprocessing and Postprocessing**: Customizable functions to handle tasks before and after routing.
+- **Swarms**: Collections of `BaseSwarm` objects that perform the tasks.
+
+## Attributes
+
+### Arguments
+
+| Argument | Type | Default | Description |
+|--------------------|----------------------------------|-----------|-------------|
+| `name` | `Optional[str]` | `None` | The name of the router. |
+| `description` | `Optional[str]` | `None` | The description of the router. |
+| `verbose` | `bool` | `False` | Whether to enable verbose mode. |
+| `custom_params` | `Optional[Dict[str, Any]]` | `None` | Custom parameters for the router. |
+| `swarms` | `Sequence[BaseSwarm]` | `None` | A list of `BaseSwarm` objects. |
+| `custom_preprocess`| `Optional[Callable]` | `None` | Custom preprocessing function for tasks. |
+| `custom_postprocess`| `Optional[Callable]` | `None` | Custom postprocessing function for task results. |
+| `custom_router` | `Optional[Callable]` | `None` | Custom routing function for tasks. |
+
+### Attributes
+
+| Attribute | Type | Description |
+|----------------------|----------------------------------|-------------|
+| `name` | `Optional[str]` | The name of the router. |
+| `description` | `Optional[str]` | The description of the router. |
+| `verbose` | `bool` | Whether to enable verbose mode. |
+| `custom_params` | `Optional[Dict[str, Any]]` | Custom parameters for the router. |
+| `swarms` | `Sequence[BaseSwarm]` | A list of `BaseSwarm` objects. |
+| `custom_preprocess` | `Optional[Callable]` | Custom preprocessing function for tasks. |
+| `custom_postprocess` | `Optional[Callable]` | Custom postprocessing function for task results. |
+| `custom_router` | `Optional[Callable]` | Custom routing function for tasks. |
+| `swarm_dict` | `Dict[str, BaseSwarm]` | A dictionary of swarms keyed by their name. |
+
+## Methods
+
+### run
+
+Executes the swarm simulation and routes the task to the appropriate swarm.
+
+**Arguments:**
+
+| Parameter | Type | Default | Description |
+|-----------|---------|---------|-------------|
+| `task` | `str` | `None` | The task to be executed. |
+| `*args` | | | Additional arguments. |
+| `**kwargs`| | | Additional keyword arguments. |
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `Any` | The result of the routed task. |
+
+**Raises:**
+
+- `ValueError`: If the specified swarm is not found.
+- `Exception`: If any error occurs during task routing or execution.
+
+**Examples:**
+
+```python
+router = AutoSwarmRouter(name="example_router", swarms=[swarm1, swarm2])
+
+# Running a task
+result = router.run(task="example_task")
+```
+
+### len_of_swarms
+
+Prints the number of swarms available in the router.
+
+**Examples:**
+
+```python
+router = AutoSwarmRouter(name="example_router", swarms=[swarm1, swarm2])
+
+# Printing the number of swarms
+router.len_of_swarms() # Output: 2
+```
+
+### list_available_swarms
+
+Logs the available swarms and their descriptions.
+
+**Examples:**
+
+```python
+router = AutoSwarmRouter(name="example_router", swarms=[swarm1, swarm2])
+
+# Listing available swarms
+router.list_available_swarms()
+# Output:
+# INFO: Swarm Name: swarm1 || Swarm Description: Description of swarm1
+# INFO: Swarm Name: swarm2 || Swarm Description: Description of swarm2
+```
+
+### Additional Examples
+
+#### Example 1: Custom Preprocessing and Postprocessing
+
+```python
+def custom_preprocess(task, *args, **kwargs):
+ # Custom preprocessing logic
+ task = task.upper()
+ return task, args, kwargs
+
+def custom_postprocess(result):
+ # Custom postprocessing logic
+ return result.lower()
+
+router = AutoSwarmRouter(
+ name="example_router",
+ swarms=[swarm1, swarm2],
+ custom_preprocess=custom_preprocess,
+ custom_postprocess=custom_postprocess
+)
+
+# Running a task with custom preprocessing and postprocessing
+result = router.run(task="example_task")
+print(result) # Output will be the processed result
+```
+
+#### Example 2: Custom Router Function
+
+```python
+def custom_router(router, task, *args, **kwargs):
+ # Custom routing logic
+ if "specific" in task:
+ return router.swarm_dict["specific_swarm"].run(task, *args, **kwargs)
+ return router.swarm_dict["default_swarm"].run(task, *args, **kwargs)
+
+router = AutoSwarmRouter(
+ name="example_router",
+ swarms=[default_swarm, specific_swarm],
+ custom_router=custom_router
+)
+
+# Running a task with custom routing
+result = router.run(task="specific_task")
+print(result) # Output will be the result of the routed task
+```
+
+#### Example 3: Verbose Mode
+
+```python
+router = AutoSwarmRouter(
+ name="example_router",
+ swarms=[swarm1, swarm2],
+ verbose=True
+)
+
+# Running a task with verbose mode enabled
+result = router.run(task="example_task")
+# Output will include detailed logs of the task routing and execution process
+```
+
+## Summary
+
+The `AutoSwarmRouter` class provides a flexible and customizable approach to routing tasks to appropriate swarms, supporting custom preprocessing, routing, and postprocessing functions. This makes it a powerful tool for managing complex workflows that require dynamic task handling and execution.
\ No newline at end of file
diff --git a/docs/swarms/structs/base_workflow.md b/docs/swarms/structs/base_workflow.md
new file mode 100644
index 00000000..36d81062
--- /dev/null
+++ b/docs/swarms/structs/base_workflow.md
@@ -0,0 +1,287 @@
+# BaseWorkflow
+
+The `BaseWorkflow` class serves as a foundational structure for defining and managing workflows. It allows users to add, remove, update, and manage tasks and agents within a workflow, offering flexibility and extensibility for various applications.
+
+### Key Concepts
+
+- **Agents**: Entities participating in the workflow.
+- **Tasks**: Units of work to be executed within the workflow.
+- **Models**: Computational models used within the workflow.
+- **Workflow State**: The state of the workflow, which can be saved and restored.
+
+## Attributes
+
+### Arguments
+
+| Argument | Type | Default | Description |
+|----------|------|---------|-------------|
+| `agents` | `List[Agent]` | `None` | A list of agents participating in the workflow. |
+| `task_pool` | `List[Task]` | `None` | A list of tasks in the workflow. |
+| `models` | `List[Any]` | `None` | A list of models used in the workflow. |
+| `*args` | | | Variable length argument list. |
+| `**kwargs` | | | Arbitrary keyword arguments. |
+
+### Attributes
+
+| Attribute | Type | Description |
+|-----------|------|-------------|
+| `agents` | `List[Agent]` | A list of agents participating in the workflow. |
+| `task_pool` | `List[Task]` | A list of tasks in the workflow. |
+| `models` | `List[Any]` | A list of models used in the workflow. |
+
+## Methods
+
+### add_task
+
+Adds a task or a list of tasks to the task pool.
+
+**Arguments:**
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|-------------|
+| `task` | `Task` | `None` | A single task to add. |
+| `tasks` | `List[Task]` | `None` | A list of tasks to add. |
+
+**Raises:**
+
+- `ValueError`: If neither task nor tasks are provided.
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+task1 = Task(description="Task 1")
+task2 = Task(description="Task 2")
+
+# Adding a single task
+workflow.add_task(task=task1)
+
+# Adding multiple tasks
+workflow.add_task(tasks=[task1, task2])
+```
+
+### add_agent
+
+Adds an agent to the workflow.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `agent` | `Agent` | The agent to add to the workflow. |
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+agent = Agent(name="Agent 1")
+
+# Adding an agent to the workflow
+workflow.add_agent(agent=agent)
+```
+
+### run
+
+Abstract method to run the workflow.
+
+### __sequential_loop
+
+Abstract method for the sequential loop.
+
+### __log
+
+Logs a message if verbose mode is enabled.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `message` | `str` | The message to log. |
+
+### __str__
+
+Returns a string representation of the workflow.
+
+### __repr__
+
+Returns a string representation of the workflow for debugging.
+
+### reset
+
+Resets the workflow by clearing the results of each task.
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+workflow.reset()
+```
+
+### get_task_results
+
+Returns the results of each task in the workflow.
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `Dict[str, Any]` | The results of each task in the workflow. |
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+results = workflow.get_task_results()
+```
+
+### remove_task
+
+Removes a task from the workflow.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `task` | `str` | The description of the task to remove. |
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+workflow.remove_task(task="Task 1")
+```
+
+### update_task
+
+Updates the arguments of a task in the workflow.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `task` | `str` | The description of the task to update. |
+| `**updates` | | The updates to apply to the task. |
+
+**Raises:**
+
+- `ValueError`: If the task is not found in the workflow.
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+task = Task(description="Task 1", kwargs={"param": 1})
+
+# Adding a task to the workflow
+workflow.add_task(task=task)
+
+# Updating the task
+workflow.update_task("Task 1", param=2)
+```
+
+### delete_task
+
+Deletes a task from the workflow.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `task` | `str` | The description of the task to delete. |
+
+**Raises:**
+
+- `ValueError`: If the task is not found in the workflow.
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+task = Task(description="Task 1")
+
+# Adding a task to the workflow
+workflow.add_task(task=task)
+
+# Deleting the task
+workflow.delete_task("Task 1")
+```
+
+### save_workflow_state
+
+Saves the workflow state to a json file.
+
+**Arguments:**
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|-------------|
+| `filepath` | `Optional[str]` | `"sequential_workflow_state.json"` | The path to save the workflow state to. |
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+workflow.save_workflow_state(filepath="workflow_state.json")
+```
+
+### add_objective_to_workflow
+
+Adds an objective to the workflow.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `task` | `str` | The description of the task. |
+| `**kwargs` | | Additional keyword arguments for the task. |
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+workflow.add_objective_to_workflow(task="New Objective", agent=agent, args=[], kwargs={})
+```
+
+### load_workflow_state
+
+Loads the workflow state from a json file and restores the workflow state.
+
+**Arguments:**
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|-------------|
+| `filepath` | `str` | `None` | The path to load the workflow state from. |
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+workflow.load_workflow_state(filepath="workflow_state.json")
+```
+
+### workflow_dashboard
+
+Displays a dashboard for the workflow.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `**kwargs` | | Additional keyword arguments to pass to the dashboard. |
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+workflow.workflow_dashboard()
+```
+
+### workflow_bootup
+
+Initializes the workflow.
+
+**Examples:**
+
+```python
+workflow = BaseWorkflow()
+workflow.workflow_bootup()
+```
\ No newline at end of file
diff --git a/docs/swarms/structs/basestructure.md b/docs/swarms/structs/basestructure.md
new file mode 100644
index 00000000..8a5dab04
--- /dev/null
+++ b/docs/swarms/structs/basestructure.md
@@ -0,0 +1,137 @@
+# Module/Function Name: BaseStructure
+
+## Introduction:
+
+The `BaseStructure` module contains the basic structure and attributes required for running machine learning models and associated metadata, error logging, artifact saving/loading, and relevant event logging.
+
+The module provides the flexibility to save and load the model metadata, log errors, save artifacts, and maintain a log for multiple events associated with multiple threads and batched operations. The key attributes of the module include **name**, **description**, **save_metadata_path**, and **save_error_path**.
+
+## Class Definition:
+
+### Arguments:
+| Argument | Type | Description |
+|----------------------|--------|----------------------------------------------------------------------|
+| name | str | (Optional) The name of the structure. |
+| description | str | (Optional) A description of the structure. |
+| save_metadata | bool | A boolean flag to enable or disable metadata saving. |
+| save_artifact_path | str | (Optional) The path to save artifacts. |
+| save_metadata_path | str | (Optional) The path to save metadata. |
+| save_error_path | str | (Optional) The path to save errors. |
+
+## Methods:
+
+### 1. run
+Runs the structure.
+
+### 2. save_to_file
+Saves data to a file.
+* **data**: Value to be saved.
+* **file_path**: Path where the data is to be saved.
+
+### 3. load_from_file
+Loads data from a file.
+* **file_path**: Path from where the data is to be loaded.
+
+### 4. save_metadata
+Saves metadata to a file.
+* **metadata**: Data to be saved as metadata.
+
+### 5. load_metadata
+Loads metadata from a file.
+
+### 6. log_error
+Logs error to a file.
+
+### 7. save_artifact
+Saves artifact to a file.
+* **artifact**: The artifact to be saved.
+* **artifact_name**: Name of the artifact.
+
+### 8. load_artifact
+Loads artifact from a file.
+* **artifact_name**: Name of the artifact.
+
+### 9. log_event
+Logs an event to a file.
+* **event**: The event to be logged.
+* **event_type**: Type of the event (optional, defaults to "INFO").
+
+### 10. run_async
+Runs the structure asynchronously.
+
+### 11. save_metadata_async
+Saves metadata to a file asynchronously.
+
+### 12. load_metadata_async
+Loads metadata from a file asynchronously.
+
+### 13. log_error_async
+Logs error to a file asynchronously.
+
+### 14. save_artifact_async
+Saves artifact to a file asynchronously.
+
+### 15. load_artifact_async
+Loads artifact from a file asynchronously.
+
+### 16. log_event_async
+Logs an event to a file asynchronously.
+
+### 17. asave_to_file
+Saves data to a file asynchronously.
+
+### 18. aload_from_file
+Loads data from a file asynchronously.
+
+### 19. run_concurrent
+Runs the structure concurrently.
+
+### 20. compress_data
+Compresses data.
+
+### 21. decompres_data
+Decompresses data.
+
+### 22. run_batched
+Runs batched data.
+
+## Examples:
+
+### Example 1: Saving Metadata
+```python
+base_structure = BaseStructure(name="ExampleStructure")
+metadata = {"key1": "value1", "key2": "value2"}
+base_structure.save_metadata(metadata)
+```
+
+### Example 2: Loading Artifact
+```python
+artifact_name = "example_artifact"
+artifact_data = base_structure.load_artifact(artifact_name)
+```
+
+### Example 3: Running Concurrently
+```python
+concurrent_data = [data1, data2, data3]
+results = base_structure.run_concurrent(batched_data=concurrent_data)
+```
+
+## Note:
+
+The `BaseStructure` class is designed to provide a modular and extensible structure for managing metadata, logs, errors, and batched operations while running machine learning models. The class's methods offer asynchronous and concurrent execution capabilities, thus optimizing the performance of the associated applications and models. The module's attributes and methods cater to a wide range of use cases, making it an essential foundational component for machine learning and data-based applications.
+
+# Conclusion:
+
+The `BaseStructure` module offers a robust and flexible foundation for managing machine learning model metadata, error logs, and event tracking, including asynchronous, concurrent, and batched operations. By leveraging the inherent capabilities of this class, developers can enhance the reliability, scalability, and performance of machine learning-based applications.
+
+## References:
+
+- [Python Concurrent Programming with `asyncio`](https://docs.python.org/3/library/asyncio.html)
+- [Understanding Thread Pool Executor in Python](https://docs.python.org/3/library/concurrent.futures.html#executor-objects)
+- [Documentation on `gzip` Module for Data Compression](https://docs.python.org/3/library/gzip.html)
+
+---
+
+The above documentation provides detailed information about the `BaseStructure` module, including its functionality, attributes, methods, usage examples, and references to relevant resources for further exploration. This comprehensive documentation aims to deepen the users' understanding of the module's purpose and how it can be effectively utilized in practice.
+
+Please let me know if you need further elaboration on any specific aspect or functionality of the `BaseStructure` module.
diff --git a/docs/swarms/structs/concurrentworkflow.md b/docs/swarms/structs/concurrentworkflow.md
new file mode 100644
index 00000000..9b60392c
--- /dev/null
+++ b/docs/swarms/structs/concurrentworkflow.md
@@ -0,0 +1,77 @@
+```
+ # Module/Function Name: ConcurrentWorkflow
+
+ class swarms.structs.ConcurrentWorkflow(max_workers, autosave, saved_state_filepath):
+ """
+ ConcurrentWorkflow class for running a set of tasks concurrently using N autonomous agents.
+
+ Args:
+ - max_workers (int): The maximum number of workers to use for concurrent execution.
+ - autosave (bool): Whether to autosave the workflow state.
+ - saved_state_filepath (Optional[str]): The file path to save the workflow state.
+
+ """
+
+ def add(self, task, tasks=None):
+ """Adds a task to the workflow.
+
+ Args:
+ - task (Task): Task to add to the workflow.
+ - tasks (List[Task]): List of tasks to add to the workflow (optional).
+
+ """
+ try:
+ # Implementation of the function goes here
+ except Exception as error:
+ print(f"[ERROR][ConcurrentWorkflow] {error}")
+ raise error
+
+ def run(self, print_results=False, return_results=False):
+ """
+ Executes the tasks in parallel using a ThreadPoolExecutor.
+
+ Args:
+ - print_results (bool): Whether to print the results of each task. Default is False.
+ - return_results (bool): Whether to return the results of each task. Default is False.
+
+ Returns:
+ - (List[Any]): A list of the results of each task, if return_results is True. Otherwise, returns None.
+
+ """
+ try:
+ # Implementation of the function goes here
+ except Exception as e:
+ print(f"Task {task} generated an exception: {e}")
+
+ return results if self.return_results else None
+
+ def _execute_task(self, task):
+ """Executes a task.
+
+ Args:
+ - task (Task): Task to execute.
+
+ Returns:
+ - result: The result of executing the task.
+
+ """
+ try:
+ # Implementation of the function goes here
+ except Exception as error:
+ print(f"[ERROR][ConcurrentWorkflow] {error}")
+ raise error
+
+ # Usage example:
+
+ from swarms.models import OpenAIChat
+ from swarms.structs import ConcurrentWorkflow
+
+ llm = OpenAIChat(openai_api_key="")
+ workflow = ConcurrentWorkflow(max_workers=5)
+ workflow.add("What's the weather in miami", llm)
+ workflow.add("Create a report on these metrics", llm)
+ workflow.run()
+ workflow.tasks
+
+ """
+ ```
diff --git a/docs/swarms/structs/conversation.md b/docs/swarms/structs/conversation.md
new file mode 100644
index 00000000..be9ceffa
--- /dev/null
+++ b/docs/swarms/structs/conversation.md
@@ -0,0 +1,265 @@
+# Module/Class Name: Conversation
+
+## Introduction
+
+The `Conversation` class is a powerful tool for managing and structuring conversation data in a Python program. It enables you to create, manipulate, and analyze conversations easily. This documentation will provide you with a comprehensive understanding of the `Conversation` class, its attributes, methods, and how to effectively use it.
+
+## Table of Contents
+
+1. **Class Definition**
+ - Overview
+ - Attributes
+
+2. **Methods**
+ - `__init__(self, time_enabled: bool = False, *args, **kwargs)`
+ - `add(self, role: str, content: str, *args, **kwargs)`
+ - `delete(self, index: str)`
+ - `update(self, index: str, role, content)`
+ - `query(self, index: str)`
+ - `search(self, keyword: str)`
+ - `display_conversation(self, detailed: bool = False)`
+ - `export_conversation(self, filename: str)`
+ - `import_conversation(self, filename: str)`
+ - `count_messages_by_role(self)`
+ - `return_history_as_string(self)`
+ - `save_as_json(self, filename: str)`
+ - `load_from_json(self, filename: str)`
+ - `search_keyword_in_conversation(self, keyword: str)`
+ - `pretty_print_conversation(self, messages)`
+
+---
+
+### 1. Class Definition
+
+#### Overview
+
+The `Conversation` class is designed to manage conversations by keeping track of messages and their attributes. It offers methods for adding, deleting, updating, querying, and displaying messages within the conversation. Additionally, it supports exporting and importing conversations, searching for specific keywords, and more.
+
+#### Attributes
+
+- `time_enabled (bool)`: A flag indicating whether to enable timestamp recording for messages.
+- `conversation_history (list)`: A list that stores messages in the conversation.
+
+### 2. Methods
+
+#### `__init__(self, time_enabled: bool = False, *args, **kwargs)`
+
+- **Description**: Initializes a new Conversation object.
+- **Parameters**:
+ - `time_enabled (bool)`: If `True`, timestamps will be recorded for each message. Default is `False`.
+
+#### `add(self, role: str, content: str, *args, **kwargs)`
+
+- **Description**: Adds a message to the conversation history.
+- **Parameters**:
+ - `role (str)`: The role of the speaker (e.g., "user," "assistant").
+ - `content (str)`: The content of the message.
+
+#### `delete(self, index: str)`
+
+- **Description**: Deletes a message from the conversation history.
+- **Parameters**:
+ - `index (str)`: The index of the message to delete.
+
+#### `update(self, index: str, role, content)`
+
+- **Description**: Updates a message in the conversation history.
+- **Parameters**:
+ - `index (str)`: The index of the message to update.
+ - `role (_type_)`: The new role of the speaker.
+ - `content (_type_)`: The new content of the message.
+
+#### `query(self, index: str)`
+
+- **Description**: Retrieves a message from the conversation history.
+- **Parameters**:
+ - `index (str)`: The index of the message to query.
+- **Returns**: The message as a string.
+
+#### `search(self, keyword: str)`
+
+- **Description**: Searches for messages containing a specific keyword in the conversation history.
+- **Parameters**:
+ - `keyword (str)`: The keyword to search for.
+- **Returns**: A list of messages that contain the keyword.
+
+#### `display_conversation(self, detailed: bool = False)`
+
+- **Description**: Displays the conversation history.
+- **Parameters**:
+ - `detailed (bool, optional)`: If `True`, provides detailed information about each message. Default is `False`.
+
+#### `export_conversation(self, filename: str)`
+
+- **Description**: Exports the conversation history to a text file.
+- **Parameters**:
+ - `filename (str)`: The name of the file to export to.
+
+#### `import_conversation(self, filename: str)`
+
+- **Description**: Imports a conversation history from a text file.
+- **Parameters**:
+ - `filename (str)`: The name of the file to import from.
+
+#### `count_messages_by_role(self)`
+
+- **Description**: Counts the number of messages by role in the conversation.
+- **Returns**: A dictionary containing the count of messages for each role.
+
+#### `return_history_as_string(self)`
+
+- **Description**: Returns the entire conversation history as a single string.
+- **Returns**: The conversation history as a string.
+
+#### `save_as_json(self, filename: str)`
+
+- **Description**: Saves the conversation history as a JSON file.
+- **Parameters**:
+ - `filename (str)`: The name of the JSON file to save.
+
+#### `load_from_json(self, filename: str)`
+
+- **Description**: Loads a conversation history from a JSON file.
+- **Parameters**:
+ - `filename (str)`: The name of the JSON file to load.
+
+#### `search_keyword_in_conversation(self, keyword: str)`
+
+- **Description**: Searches for a keyword in the conversation history and returns matching messages.
+- **Parameters**:
+ - `keyword (str)`: The keyword to search for.
+- **Returns**: A list of messages containing the keyword.
+
+#### `pretty_print_conversation(self, messages)`
+
+- **Description**: Pretty prints a list of messages with colored role indicators.
+- **Parameters**:
+ - `messages (list)`: A list of messages to print.
+
+## Examples
+
+Here are some usage examples of the `Conversation` class:
+
+### Creating a Conversation
+
+```python
+from swarms.structs import Conversation
+
+conv = Conversation()
+```
+
+### Adding Messages
+
+```python
+conv.add("user", "Hello, world!")
+conv.add("assistant", "Hello, user!")
+```
+
+### Displaying the Conversation
+
+```python
+conv.display_conversation()
+```
+
+### Searching for Messages
+
+```python
+result = conv.search("Hello")
+```
+
+### Exporting and Importing Conversations
+
+```python
+conv.export_conversation("conversation.txt")
+conv.import_conversation("conversation.txt")
+```
+
+### Counting Messages by Role
+
+```python
+counts = conv.count_messages_by_role()
+```
+
+### Loading and Saving as JSON
+
+```python
+conv.save_as_json("conversation.json")
+conv.load_from_json("conversation.json")
+```
+
+Certainly! Let's continue with more examples and additional information about the `Conversation` class.
+
+### Querying a Specific Message
+
+You can retrieve a specific message from the conversation by its index:
+
+```python
+message = conv.query(0) # Retrieves the first message
+```
+
+### Updating a Message
+
+You can update a message's content or role within the conversation:
+
+```python
+conv.update(0, "user", "Hi there!") # Updates the first message
+```
+
+### Deleting a Message
+
+If you want to remove a message from the conversation, you can use the `delete` method:
+
+```python
+conv.delete(0) # Deletes the first message
+```
+
+### Counting Messages by Role
+
+You can count the number of messages by role in the conversation:
+
+```python
+counts = conv.count_messages_by_role()
+# Example result: {'user': 2, 'assistant': 2}
+```
+
+### Exporting and Importing as Text
+
+You can export the conversation to a text file and later import it:
+
+```python
+conv.export_conversation("conversation.txt") # Export
+conv.import_conversation("conversation.txt") # Import
+```
+
+### Exporting and Importing as JSON
+
+Conversations can also be saved and loaded as JSON files:
+
+```python
+conv.save_as_json("conversation.json") # Save as JSON
+conv.load_from_json("conversation.json") # Load from JSON
+```
+
+### Searching for a Keyword
+
+You can search for messages containing a specific keyword within the conversation:
+
+```python
+results = conv.search_keyword_in_conversation("Hello")
+```
+
+### Pretty Printing
+
+The `pretty_print_conversation` method provides a visually appealing way to display messages with colored role indicators:
+
+```python
+conv.pretty_print_conversation(conv.conversation_history)
+```
+
+These examples demonstrate the versatility of the `Conversation` class in managing and interacting with conversation data. Whether you're building a chatbot, conducting analysis, or simply organizing dialogues, this class offers a robust set of tools to help you accomplish your goals.
+
+## Conclusion
+
+The `Conversation` class is a valuable utility for handling conversation data in Python. With its ability to add, update, delete, search, export, and import messages, you have the flexibility to work with conversations in various ways. Feel free to explore its features and adapt them to your specific projects and applications.
+
+If you have any further questions or need additional assistance, please don't hesitate to ask!
\ No newline at end of file
diff --git a/docs/swarms/structs/diy_your_own_agent.md b/docs/swarms/structs/diy_your_own_agent.md
new file mode 100644
index 00000000..d75c1667
--- /dev/null
+++ b/docs/swarms/structs/diy_your_own_agent.md
@@ -0,0 +1,349 @@
+# Create your own agent with `Agent` class
+
+In the rapidly evolving world of artificial intelligence (AI), the demand for specialized and highly customized agents is on the rise. Whether it's for task automation, decision support systems, or intelligent virtual assistants, the ability to create tailored agents can unlock new possibilities and efficiencies across various domains. Enter the Agent class, a powerful and flexible tool designed by Anthropic that empowers AI agents to build their own custom agents, tailored to their specific needs.
+
+This comprehensive guide will explore the process of inheriting from the Agent class, enabling agents to create their own custom agent classes. By leveraging the rich features and extensibility of the Agent class, agents can imbue their offspring agents with unique capabilities, specialized toolsets, and tailored decision-making processes.
+
+## Understanding the Agent Class
+
+Before we dive into the intricacies of creating custom agent classes, let's revisit the foundational elements of the Agent class itself. The Agent class is a versatile and feature-rich class designed to streamline the process of building and managing AI agents. It acts as a backbone, connecting language models (LLMs) with various tools, long-term memory, and a wide range of customization options.
+
+### Key Features of the Agent Class
+
+The Agent class offers a plethora of features that can be inherited and extended by custom agent classes. Here are some of the key features that make the Agent class a powerful foundation:
+
+1\. **Language Model Integration**: The Agent class supports seamless integration with popular language models such as LangChain, HuggingFace Transformers, and Autogen, allowing custom agent classes to leverage the power of state-of-the-art language models.
+
+2\. **Tool Integration**: One of the standout features of the Agent class is its ability to integrate with various tools. Custom agent classes can inherit this capability and incorporate specialized tools tailored to their specific use cases.
+
+3\. **Long-Term Memory**: The Agent class provides built-in support for long-term memory, enabling custom agent classes to retain and access information from previous interactions, essential for maintaining context and learning from past experiences.
+
+4\. **Customizable Prompts and Standard Operating Procedures (SOPs)**: The Agent class allows you to define custom prompts and Standard Operating Procedures (SOPs) that guide an agent's behavior and decision-making process. Custom agent classes can inherit and extend these prompts and SOPs to align with their unique objectives and requirements.
+
+5\. **Interactive and Dashboard Modes**: The Agent class supports interactive and dashboard modes, enabling real-time monitoring and interaction with agents. Custom agent classes can inherit these modes, facilitating efficient development, debugging, and user interaction.
+
+6\. **Autosave and State Management**: With the Agent class, agents can easily save and load their state, including configuration, memory, and history. Custom agent classes can inherit this capability, ensuring seamless task continuation and enabling efficient collaboration among team members.
+
+7\. **Response Filtering**: The Agent class provides built-in response filtering capabilities, allowing agents to filter out or replace specific words or phrases in their responses. Custom agent classes can inherit and extend this feature to ensure compliance with content moderation policies or specific guidelines.
+
+8\. **Code Execution and Multimodal Support**: The Agent class supports code execution and multimodal input/output, enabling agents to process and generate code, as well as handle various data formats such as images, audio, and video. Custom agent classes can inherit and specialize these capabilities for their unique use cases.
+
+9\. **Extensibility and Customization**: The Agent class is designed to be highly extensible and customizable, allowing agents to tailor its behavior, add custom functionality, and integrate with external libraries and APIs. Custom agent classes can leverage this extensibility to introduce specialized features and capabilities.
+
+### Creating a Custom Agent Class
+
+Now that we have a solid understanding of the Agent class and its features, let's dive into the process of creating a custom agent class by inheriting from the Agent class. Throughout this process, we'll explore how agents can leverage and extend the existing functionality, while introducing specialized features and capabilities tailored to their unique requirements.
+
+#### Step 1: Inherit from the Agent Class
+
+The first step in creating a custom agent class is to inherit from the Agent class. This will provide your custom agent class with the foundational features and capabilities of the Agent class, which can then be extended and customized as needed.
+
+```python
+
+from swarms import Agent
+
+class MyCustomAgent(Agent):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # Add custom initialization logic here
+
+```
+
+In the example above, we define a new class `MyCustomAgent` that inherits from the `Agent` class. Within the `__init__` method, we call the parent class's `__init__` method using `super().__init__(*args, **kwargs)`, which ensures that the parent class's initialization logic is executed. You can then add any custom initialization logic specific to your custom agent class.
+
+#### Step 2: Customize the Agent's Behavior
+
+One of the key advantages of inheriting from the Agent class is the ability to customize the agent's behavior according to your specific requirements. This can be achieved by overriding or extending the existing methods, or by introducing new methods altogether.
+
+```python
+from swarms import Agent
+
+
+class MyCustomAgent(Agent):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # Custom initialization logic
+
+Β Β def custom_method(self, *args, **kwargs):
+
+Β Β Β Β # Implement custom logic here
+
+Β Β Β Β pass
+
+Β Β def run(self, task, *args, **kwargs):
+
+Β Β Β Β # Customize the run method
+
+Β Β Β Β response = super().run(task, *args, **kwargs)
+
+Β Β Β Β # Additional custom logic
+
+Β Β Β Β return response
+
+```
+
+In the example above, we introduce a new `custom_method` that can encapsulate any specialized logic or functionality specific to your custom agent class. Additionally, we override the `run` method, which is responsible for executing the agent's main task loop. Within the overridden `run` method, you can call the parent class's `run` method using `super().run(task, *args, **kwargs)` and then introduce any additional custom logic before or after the parent method's execution.
+
+#### Step 3: Integrate Custom Tools
+
+One of the powerful features of the Agent class is the ability to integrate with various tools. Custom agent classes can inherit this capability and incorporate specialized tools tailored to their unique use cases.
+
+```python
+
+from swarms.tools import BaseTool
+from swarms import Agent
+
+
+class CustomTool(BaseTool):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # Custom tool initialization logic
+
+Β Β def run(self, *args, **kwargs):
+
+Β Β Β Β # Custom tool logic
+
+Β Β Β Β return result
+
+class MyCustomAgent(Agent):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # Custom initialization logic
+
+Β Β Β Β self.tools = [CustomTool()]
+
+Β Β def run(self, task, *args, **kwargs):
+
+Β Β Β Β # Customize the run method
+
+Β Β Β Β response = super().run(task, *args, **kwargs)
+
+Β Β Β Β # Utilize custom tools
+
+Β Β Β Β for tool in self.tools:
+
+Β Β Β Β Β Β result = tool.run(*args, **kwargs)
+
+Β Β Β Β Β Β # Process tool result
+
+Β Β Β Β return response
+
+```
+
+In the example above, we define a new `CustomTool` class that inherits from the `BaseTool` class provided by the Agent class framework. Within the `CustomTool` class, you can implement the specialized logic and functionality required by your custom tool.
+
+Next, within the `MyCustomAgent` class, we initialize an instance of the `CustomTool` and store it in the `self.tools` list. This list can then be utilized within the overridden `run` method, where you can execute each tool and process its results as needed.
+
+#### Step 4: Extend Memory Management
+
+The Agent class provides built-in support for long-term memory, allowing agents to retain and access information from previous interactions. Custom agent classes can inherit and extend this capability by introducing specialized memory management techniques.
+
+```python
+
+from swarms.memory import BaseVectorDatabase
+from swarms import Agent
+
+
+class CustomMemory(BaseVectorDatabase):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # Custom memory initialization logic
+
+Β Β def query(self, *args, **kwargs):
+
+Β Β Β Β # Custom memory query logic
+
+Β Β Β Β return result
+
+class MyCustomAgent(Agent):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # Custom initialization logic
+
+Β Β Β Β self.long_term_memory = CustomMemory()
+
+Β Β def run(self, task, *args, **kwargs):
+
+Β Β Β Β # Customize the run method
+
+Β Β Β Β response = super().run(task, *args, **kwargs)
+
+Β Β Β Β # Utilize custom memory
+
+Β Β Β Β memory_result = self.long_term_memory.query(*args, **kwargs)
+
+Β Β Β Β # Process memory result
+
+Β Β Β Β return response
+
+```
+
+In the example above, we define a new `CustomMemory` class that inherits from the `BaseVectorDatabase` class provided by the Agent class framework. Within the `CustomMemory` class, you can implement specialized memory management logic, such as custom indexing, retrieval, and storage mechanisms.
+
+Next, within the `MyCustomAgent` class, we initialize an instance of the `CustomMemory` class and assign it to the `self.long_term_memory` attribute. This custom memory instance can then be utilized within the overridden `run` method, where you can query the memory and process the results as needed.
+
+Step 5: Introduce Custom Prompts and Standard Operating Procedures (SOPs)
+
+The Agent class allows you to define custom prompts and Standard Operating Procedures (SOPs) that guide an agent's behavior and decision-making process. Custom agent classes can inherit and extend these prompts and SOPs to align with their unique objectives and requirements.
+
+```python
+from swarms import Agent
+
+
+class MyCustomAgent(Agent):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # Custom initialization logic
+
+Β Β Β Β self.custom_sop = "Custom SOP for MyCustomAgent..."
+
+Β Β Β Β self.custom_prompt = "Custom prompt for MyCustomAgent..."
+
+Β Β def run(self, task, *args, **kwargs):
+
+Β Β Β Β # Customize the run method
+
+Β Β Β Β response = super().run(task, *args, **kwargs)
+
+Β Β Β Β # Utilize custom prompts and SOPs
+
+Β Β Β Β custom_prompt = self.construct_dynamic_prompt(self.custom_prompt)
+
+Β Β Β Β custom_sop = self.construct_dynamic_sop(self.custom_sop)
+
+Β Β Β Β # Process custom prompts and SOPs
+
+Β Β Β Β return response
+
+Β Β def construct_dynamic_prompt(self, prompt):
+
+Β Β Β Β # Custom prompt construction logic
+
+Β Β Β Β return prompt
+
+Β Β def construct_dynamic_sop(self, sop):
+
+Β Β Β Β # Custom SOP construction logic
+
+Β Β Β Β return sop
+
+```
+
+In the example above, we define two new attributes within the `MyCustomAgent` class: `custom_sop` and `custom_prompt`. These attributes can be used to store custom prompts and SOPs specific to your custom agent class.
+
+Within the overridden `run` method, you can utilize these custom prompts and SOPs by calling the `construct_dynamic_prompt` and `construct_dynamic_sop` methods, which can be defined within the `MyCustomAgent` class to implement specialized prompt and SOP construction logic.
+
+#### Step 6: Introduce Custom Response Handling
+
+The Agent class provides built-in response filtering capabilities, allowing agents to filter out or replace specific words or phrases in their responses. Custom agent classes can inherit and extend this feature to ensure compliance with content moderation policies or specific guidelines.
+
+```python
+from swarms import Agent
+
+
+class MyCustomAgent(Agent):
+
+Β Β def __init__(self, *args, **kwargs):
+
+Β Β Β Β super().__init__(*args, **kwargs)
+
+Β Β Β Β # Custom initialization logic
+
+Β Β Β Β self.response_filters = ["filter_word_1", "filter_word_2"]
+
+Β Β def run(self, task, *args, **kwargs):
+
+Β Β Β Β # Customize the run method
+
+Β Β Β Β response = super().run(task, *args, **kwargs)
+
+Β Β Β Β # Apply custom response filtering
+
+Β Β Β Β filtered_response = self.apply_response_filters(response)
+
+Β Β Β Β return filtered_response
+
+Β Β def apply_response_filters(self, response):
+
+Β Β Β Β # Custom response filtering logic
+
+Β Β Β Β for word in self.response_filters:
+
+Β Β Β Β Β Β response = response.replace(word, "[FILTERED]")
+
+Β Β Β Β return response
+
+```
+
+In the example above, we define a new attribute `response_filters` within the `MyCustomAgent` class, which is a list of words or phrases that should be filtered out or replaced in the agent's responses.
+
+Within the overridden `run` method, we call the `apply_response_filters` method, which can be defined within the `MyCustomAgent` class to implement specialized response filtering logic. In the example, we iterate over the `response_filters` list and replace each filtered word or phrase with a placeholder string (`"[FILTERED]"`).
+
+### Advanced Customization and Integration
+
+The Agent class and its inherited custom agent classes can be further extended and customized to suit specific requirements and integrate with external libraries, APIs, and services. Here are some advanced customization and integration examples:
+
+1\. **Multimodal Input/Output Integration**: Custom agent classes can leverage the multimodal input/output capabilities of the Agent class and introduce specialized handling for various data formats such as images, audio, and video.
+
+2\. **Code Execution and Integration**: The Agent class supports code execution, enabling agents to run and evaluate code snippets. Custom agent classes can inherit and extend this capability, introducing specialized code execution environments, sandboxing mechanisms, or integration with external code repositories or platforms.
+
+3\. **External API and Service Integration**: Custom agent classes can integrate with external APIs and services, enabling agents to leverage specialized data sources, computational resources, or domain-specific services.
+
+4\. **Performance Optimization**: Depending on the use case and requirements, custom agent classes can introduce performance optimizations, such as adjusting loop intervals, retry attempts, or enabling parallel execution for certain tasks.
+
+5\. **Logging and Monitoring**: Custom agent classes can introduce specialized logging and monitoring mechanisms, enabling agents to track their performance, identify potential issues, and generate detailed reports or dashboards.
+
+6\. **Security and Privacy Enhancements**: Custom agent classes can implement security and privacy enhancements, such as data encryption, access control mechanisms, or compliance with industry-specific regulations and standards.
+
+7\. **Distributed Execution and Scaling**: Custom agent classes can be designed to support distributed execution and scaling, enabling agents to leverage cloud computing resources or distributed computing frameworks for handling large-scale tasks or high-concurrency workloads.
+
+By leveraging these advanced customization and integration capabilities, agents can create highly specialized and sophisticated custom agent classes tailored to their unique requirements and use cases.
+
+### Best Practices and Considerations
+
+While building custom agent classes by inheriting from the Agent class offers immense flexibility and power, it's essential to follow best practices and consider potential challenges and considerations:
+
+1\. **Maintainability and Documentation**: As custom agent classes become more complex, it's crucial to prioritize maintainability and thorough documentation. Clear and concise code, comprehensive comments, and up-to-date documentation can significantly improve the long-term sustainability and collaboration efforts surrounding custom agent classes.
+
+2\. **Testing and Validation**: Custom agent classes should undergo rigorous testing and validation to ensure their correctness, reliability, and adherence to expected behaviors. Establish a robust testing framework and continuously validate the agent's performance, particularly after introducing new features or integrations.
+
+3\. **Security and Privacy Considerations**: When building custom agent classes, it's essential to consider security and privacy implications, especially if the agents will handle sensitive data or interact with critical systems. Implement appropriate security measures, such as access controls, data encryption, and secure communication protocols, to protect against potential vulnerabilities and ensure compliance with relevant regulations and standards.
+
+4\. **Scalability and Performance Monitoring**: As custom agent classes are deployed and adopted, it's important to monitor their scalability and performance characteristics. Identify potential bottlenecks, resource constraints, or performance degradation, and implement appropriate optimization strategies or scaling mechanisms to ensure efficient and reliable operation.
+
+5\. **Collaboration and Knowledge Sharing**: Building custom agent classes often involves collaboration among teams and stakeholders. Foster an environment of knowledge sharing, code reviews, and open communication to ensure that everyone involved understands the agent's capabilities, limitations, and intended use cases.
+
+6\. **Ethical Considerations**: As AI agents become more advanced and autonomous, it's crucial to consider the ethical implications of their actions and decisions. Implement appropriate safeguards, oversight mechanisms, and ethical guidelines to ensure that custom agent classes operate in a responsible and transparent manner, aligning with ethical principles and societal values.
+
+7\. **Continuous Learning and Adaptation**: The field of AI is rapidly evolving, with new techniques, tools, and best practices emerging regularly. Stay up-to-date with the latest developments and be prepared to adapt and refine your custom agent classes as new advancements become available.
+
+By following these best practices and considering potential challenges, agents can create robust, reliable, and ethical custom agent classes that meet their specific requirements while adhering to industry standards and best practices.
+
+# Conclusion
+
+In this comprehensive guide, we have explored the process of creating custom agent classes by inheriting from the powerful Agent class. We have covered the key features of the Agent class, walked through the step-by-step process of inheriting and extending its functionality, and discussed advanced customization and integration techniques.
+
+Building custom agent classes empowers AI agents to create tailored and specialized agents capable of tackling unique challenges and addressing specific domain requirements. By leveraging the rich features and extensibility of the Agent class, agents can imbue their offspring agents with unique capabilities, specialized toolsets, and tailored decision-making processes.
+
+Remember, the journey of building custom agent classes is an iterative and collaborative process that requires continuous learning, adaptation, and refinement. Embrace the
\ No newline at end of file
diff --git a/docs/swarms/structs/graph_workflow.md b/docs/swarms/structs/graph_workflow.md
new file mode 100644
index 00000000..4316bd04
--- /dev/null
+++ b/docs/swarms/structs/graph_workflow.md
@@ -0,0 +1,190 @@
+# GraphWorkflow Documentation
+
+The `GraphWorkflow` class is a pivotal part of the workflow management system, representing a directed graph where nodes signify tasks or agents and edges represent the flow or dependencies between these nodes. This class leverages the NetworkX library to manage and manipulate the directed graph, allowing users to create complex workflows with defined entry and end points.
+
+### Attributes
+
+| Attribute | Type | Description | Default |
+|----------------|-------------------|-----------------------------------------------------------------------------------------------|-------------------------------------|
+| `nodes` | `Dict[str, Node]` | A dictionary of nodes in the graph, where the key is the node ID and the value is the Node object. | `Field(default_factory=dict)` |
+| `edges` | `List[Edge]` | A list of edges in the graph, where each edge is represented by an Edge object. | `Field(default_factory=list)` |
+| `entry_points` | `List[str]` | A list of node IDs that serve as entry points to the graph. | `Field(default_factory=list)` |
+| `end_points` | `List[str]` | A list of node IDs that serve as end points of the graph. | `Field(default_factory=list)` |
+| `graph` | `nx.DiGraph` | A directed graph object from the NetworkX library representing the workflow graph. | `Field(default_factory=nx.DiGraph)` |
+| `max_loops` | `int` | Maximum number of times the workflow can loop during execution. | `1` |
+
+### Methods
+
+#### `add_node(node: Node)`
+
+Adds a node to the workflow graph.
+
+| Parameter | Type | Description |
+|-----------|------|-----------------------------------|
+| `node` | `Node` | The node object to be added. |
+
+Raises:
+- `ValueError`: If a node with the same ID already exists in the graph.
+
+#### `add_edge(edge: Edge)`
+
+Adds an edge to the workflow graph.
+
+| Parameter | Type | Description |
+|-----------|------|----------------------------------|
+| `edge` | `Edge` | The edge object to be added. |
+
+Raises:
+- `ValueError`: If either the source or target node of the edge does not exist in the graph.
+
+#### `set_entry_points(entry_points: List[str])`
+
+Sets the entry points of the workflow graph.
+
+| Parameter | Type | Description |
+|----------------|-----------|---------------------------------------------|
+| `entry_points` | `List[str]` | A list of node IDs to be set as entry points. |
+
+Raises:
+- `ValueError`: If any of the specified node IDs do not exist in the graph.
+
+#### `set_end_points(end_points: List[str])`
+
+Sets the end points of the workflow graph.
+
+| Parameter | Type | Description |
+|--------------|-----------|-------------------------------------------|
+| `end_points` | `List[str]` | A list of node IDs to be set as end points. |
+
+Raises:
+- `ValueError`: If any of the specified node IDs do not exist in the graph.
+
+#### `visualize() -> str`
+
+Generates a string representation of the workflow graph in the Mermaid syntax.
+
+Returns:
+- `str`: The Mermaid string representation of the workflow graph.
+
+#### `run(task: str = None, *args, **kwargs) -> Dict[str, Any]`
+
+Function to run the workflow graph.
+
+| Parameter | Type | Description |
+|-----------|-------|----------------------------------|
+| `task` | `str` | The task to be executed by the workflow. |
+| `*args` | | Variable length argument list. |
+| `**kwargs`| | Arbitrary keyword arguments. |
+
+Returns:
+- `Dict[str, Any]`: A dictionary containing the results of the execution.
+
+Raises:
+- `ValueError`: If no entry points or end points are defined in the graph.
+
+## Functionality and Usage
+
+### Adding Nodes
+
+The `add_node` method is used to add nodes to the graph. Each node must have a unique ID. If a node with the same ID already exists, a `ValueError` is raised.
+
+```python
+wf_graph = GraphWorkflow()
+node1 = Node(id="node1", type=NodeType.TASK, callable=sample_task)
+wf_graph.add_node(node1)
+```
+
+### Adding Edges
+
+The `add_edge` method connects nodes with edges. Both the source and target nodes of the edge must already exist in the graph, otherwise a `ValueError` is raised.
+
+```python
+edge1 = Edge(source="node1", target="node2")
+wf_graph.add_edge(edge1)
+```
+
+### Setting Entry and End Points
+
+The `set_entry_points` and `set_end_points` methods define which nodes are the starting and ending points of the workflow, respectively. If any specified node IDs do not exist, a `ValueError` is raised.
+
+```python
+wf_graph.set_entry_points(["node1"])
+wf_graph.set_end_points(["node2"])
+```
+
+### Visualizing the Graph
+
+The `visualize` method generates a Mermaid string representation of the workflow graph. This can be useful for visualizing the workflow structure.
+
+```python
+print(wf_graph.visualize())
+```
+
+### Running the Workflow
+
+The `run` method executes the workflow. It performs a topological sort of the graph to ensure nodes are executed in the correct order. The results of each node's execution are returned in a dictionary.
+
+```python
+results = wf_graph.run()
+print("Execution results:", results)
+```
+
+## Example Usage
+
+Below is a comprehensive example demonstrating the creation and execution of a workflow graph:
+
+```python
+
+import os
+
+from dotenv import load_dotenv
+
+from swarms import Agent, Edge, GraphWorkflow, Node, NodeType, OpenAIChat
+
+load_dotenv()
+
+api_key = os.environ.get("OPENAI_API_KEY")
+
+llm = OpenAIChat(
+ temperature=0.5, openai_api_key=api_key, max_tokens=4000
+)
+agent1 = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
+agent2 = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
+
+def sample_task():
+ print("Running sample task")
+ return "Task completed"
+
+wf_graph = GraphWorkflow()
+wf_graph.add_node(Node(id="agent1", type=NodeType.AGENT, agent=agent1))
+wf_graph.add_node(Node(id="agent2", type=NodeType.AGENT, agent=agent2))
+wf_graph.add_node(
+ Node(id="task1", type=NodeType.TASK, callable=sample_task)
+)
+wf_graph.add_edge(Edge(source="agent1", target="task1"))
+wf_graph.add_edge(Edge(source="agent2", target="task1"))
+
+wf_graph.set_entry_points(["agent1", "agent2"])
+wf_graph.set_end_points(["task1"])
+
+print(wf_graph.visualize())
+
+# Run the workflow
+results = wf_graph.run()
+print("Execution results:", results)
+
+```
+
+In this example, we set up a workflow graph with two agents and one task. We define the entry and end points, visualize the graph, and then execute the workflow, capturing and printing the results.
+
+## Additional Information and Tips
+
+- **Error Handling**: The `GraphWorkflow` class includes error handling to ensure that invalid operations (such as adding duplicate nodes or edges with non-existent nodes) raise appropriate exceptions.
+- **Max Loops**: The `max_loops` attribute allows the workflow to loop through the graph multiple times if needed. This can be useful for iterative tasks.
+- **Topological Sort**: The workflow execution relies on a topological sort to ensure that nodes are processed in the correct order. This is particularly important in complex workflows with dependencies.
+
+## References and Resources
+
+- [NetworkX Documentation](https://networkx.github.io/documentation/stable/)
+- [Pydantic Documentation](https://pydantic-docs.helpmanual.io/)
+- [Mermaid Documentation](https://mermaid-js.github.io/mermaid/#/)
\ No newline at end of file
diff --git a/docs/swarms/structs/group_chat.md b/docs/swarms/structs/group_chat.md
new file mode 100644
index 00000000..b4d805a1
--- /dev/null
+++ b/docs/swarms/structs/group_chat.md
@@ -0,0 +1,238 @@
+# GroupChat
+
+The `GroupChat` class is designed to manage a group chat session involving multiple agents. This class handles initializing the conversation, selecting the next speaker, resetting the chat, and executing the chat rounds, providing a structured approach to managing a dynamic and interactive conversation.
+
+### Key Concepts
+
+- **Agents**: Entities participating in the group chat.
+- **Conversation Management**: Handling the flow of conversation, selecting speakers, and maintaining chat history.
+- **Round-based Execution**: Managing the chat in predefined rounds.
+
+## Attributes
+
+### Arguments
+
+| Argument | Type | Default | Description |
+|---------------------|----------------------|-------------|-------------|
+| `agents` | `List[Agent]` | `None` | List of agents participating in the group chat. |
+| `max_rounds` | `int` | `10` | Maximum number of chat rounds. |
+| `admin_name` | `str` | `"Admin"` | Name of the admin user. |
+| `group_objective` | `str` | `None` | Objective of the group chat. |
+| `selector_agent` | `Agent` | `None` | Agent responsible for selecting the next speaker. |
+| `rules` | `str` | `None` | Rules for the group chat. |
+| `*args` | | | Variable length argument list. |
+| `**kwargs` | | | Arbitrary keyword arguments. |
+
+### Attributes
+
+| Attribute | Type | Description |
+|---------------------|----------------------|-------------|
+| `agents` | `List[Agent]` | List of agents participating in the group chat. |
+| `max_rounds` | `int` | Maximum number of chat rounds. |
+| `admin_name` | `str` | Name of the admin user. |
+| `group_objective` | `str` | Objective of the group chat. |
+| `selector_agent` | `Agent` | Agent responsible for selecting the next speaker. |
+| `messages` | `Conversation` | Conversation object for storing the chat messages. |
+
+## Methods
+
+### __init__
+
+Initializes the group chat with the given parameters.
+
+**Examples:**
+
+```python
+agents = [Agent(name="Agent 1"), Agent(name="Agent 2")]
+group_chat = GroupChat(agents=agents, max_rounds=5, admin_name="GroupAdmin")
+```
+
+### agent_names
+
+Returns the names of the agents in the group chat.
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `List[str]` | List of agent names. |
+
+**Examples:**
+
+```python
+names = group_chat.agent_names
+print(names) # Output: ['Agent 1', 'Agent 2']
+```
+
+### reset
+
+Resets the group chat by clearing the message history.
+
+**Examples:**
+
+```python
+group_chat.reset()
+```
+
+### agent_by_name
+
+Finds an agent whose name is contained within the given name string.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|--------|-------------|
+| `name` | `str` | Name string to search for. |
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `Agent` | Agent object with a name contained in the given name string. |
+
+**Raises:**
+
+- `ValueError`: If no agent is found with a name contained in the given name string.
+
+**Examples:**
+
+```python
+agent = group_chat.agent_by_name("Agent 1")
+print(agent.agent_name) # Output: 'Agent 1'
+```
+
+### next_agent
+
+Returns the next agent in the list.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|--------|-------------|
+| `agent` | `Agent`| Current agent. |
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `Agent` | Next agent in the list. |
+
+**Examples:**
+
+```python
+current_agent = group_chat.agents[0]
+next_agent = group_chat.next_agent(current_agent)
+print(next_agent.agent_name) # Output: Name of the next agent
+```
+
+### select_speaker_msg
+
+Returns the message for selecting the next speaker.
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `str` | Prompt message for selecting the next speaker. |
+
+**Examples:**
+
+```python
+message = group_chat.select_speaker_msg()
+print(message)
+```
+
+### select_speaker
+
+Selects the next speaker.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|----------------------|--------|-------------|
+| `last_speaker_agent` | `Agent`| Last speaker in the conversation. |
+| `selector_agent` | `Agent`| Agent responsible for selecting the next speaker. |
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `Agent` | Next speaker. |
+
+**Examples:**
+
+```python
+next_speaker = group_chat.select_speaker(last_speaker_agent, selector_agent)
+print(next_speaker.agent_name)
+```
+
+### _participant_roles
+
+Returns the roles of the participants.
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `str` | Participant roles. |
+
+**Examples:**
+
+```python
+roles = group_chat._participant_roles()
+print(roles)
+```
+
+### __call__
+
+Executes the group chat as a function.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|--------|-------------|
+| `task` | `str` | Task to be performed. |
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `str` | Reply from the last speaker. |
+
+**Examples:**
+
+```python
+response = group_chat(task="Discuss the project plan")
+print(response)
+```
+
+### Additional Examples
+
+#### Example 1: Initializing and Running a Group Chat
+
+```python
+agents = [Agent(name="Agent 1"), Agent(name="Agent 2"), Agent(name="Agent 3")]
+selector_agent = Agent(name="Selector")
+group_chat = GroupChat(agents=agents, selector_agent=selector_agent, max_rounds=3, group_objective="Discuss the quarterly goals.")
+
+response = group_chat(task="Let's start the discussion on quarterly goals.")
+print(response)
+```
+
+#### Example 2: Resetting the Group Chat
+
+```python
+group_chat.reset()
+```
+
+#### Example 3: Selecting the Next Speaker
+
+```python
+last_speaker = group_chat.agents[0]
+next_speaker = group_chat.select_speaker(last_speaker_agent=last_speaker, selector_agent=selector_agent)
+print(next_speaker.agent_name)
+```
+
+## Summary
+
+The `GroupChat` class offers a structured approach to managing a group chat involving multiple agents. With functionalities for initializing conversations, selecting speakers, and handling chat rounds, it provides a robust framework for dynamic and interactive discussions. This makes it an essential tool for applications requiring coordinated communication among multiple agents.
\ No newline at end of file
diff --git a/docs/swarms/structs/index.md b/docs/swarms/structs/index.md
new file mode 100644
index 00000000..39d279ba
--- /dev/null
+++ b/docs/swarms/structs/index.md
@@ -0,0 +1,363 @@
+# Enterprise-Grade and Production Ready Agents
+
+Swarms is an enterprise grade and production ready multi-agent collaboration framework that enables you to orchestrate many agents to work collaboratively at scale to automate real-world activities.
+
+| **Feature** | **Description** | **Performance Impact** | **Documentation Link** |
+|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------|-------------------------------|
+| Models | Pre-trained models that can be utilized for various tasks within the swarm framework. | βββ | [Documentation](https://docs.swarms.world/en/latest/swarms/models/) |
+| Models APIs | APIs to interact with and utilize the models effectively, providing interfaces for inference, training, and fine-tuning. | βββ | [Documentation](https://docs.swarms.world/en/latest/swarms/models/) |
+| Agents with Tools | Agents equipped with specialized tools to perform specific tasks more efficiently, such as data processing, analysis, or interaction with external systems. | ββββ | [Documentation](https://medium.com/@kyeg/the-swarms-tool-system-functions-pydantic-basemodels-as-tools-and-radical-customization-c2a2e227b8ca) |
+| Agents with Memory | Mechanisms for agents to store and recall past interactions, improving learning and adaptability over time. | ββββ | [Documentation](https://github.com/kyegomez/swarms/blob/master/playground/structs/agent/agent_with_longterm_memory.py) |
+| Multi-Agent Orchestration | Coordination of multiple agents to work together seamlessly on complex tasks, leveraging their individual strengths to achieve higher overall performance. | βββββ | [Documentation]() |
+
+The performance impact is rated on a scale from one to five stars, with multi-agent orchestration being the highest due to its ability to combine the strengths of multiple agents and optimize task execution.
+
+----
+
+## Install π»
+`$ pip3 install -U swarms`
+
+---
+
+# Usage Examples π€
+
+### Google Collab Example
+Run example in Collab:
+
+
+
+---
+
+## `Agents`
+A fully plug-and-play autonomous agent powered by an LLM extended by a long-term memory database, and equipped with function calling for tool usage! By passing in an LLM, you can create a fully autonomous agent with extreme customization and reliability, ready for real-world task automation!
+
+Features:
+
+β Any LLM / Any framework
+
+β Extremely customize-able with max loops, autosaving, import docs (PDFS, TXT, CSVs, etc), tool usage, etc etc
+
+β Long term memory database with RAG (ChromaDB, Pinecone, Qdrant)
+
+```python
+import os
+
+from dotenv import load_dotenv
+
+# Import the OpenAIChat model and the Agent struct
+from swarms import Agent, OpenAIChat
+
+# Load the environment variables
+load_dotenv()
+
+# Get the API key from the environment
+api_key = os.environ.get("OPENAI_API_KEY")
+
+# Initialize the language model
+llm = OpenAIChat(
+ temperature=0.5, model_name="gpt-4", openai_api_key=api_key, max_tokens=4000
+)
+
+
+## Initialize the workflow
+agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
+
+# Run the workflow on a task
+agent.run("Generate a 10,000 word blog on health and wellness.")
+```
+
+
+### `Agent` + Long Term Memory
+`Agent` equipped with quasi-infinite long term memory. Great for long document understanding, analysis, and retrieval.
+
+```python
+from swarms import Agent, OpenAIChat
+from swarms_memory import ChromaDB # Copy and paste the code and put it in your own local directory.
+
+# Making an instance of the ChromaDB class
+memory = ChromaDB(
+ metric="cosine",
+ n_results=3,
+ output_dir="results",
+ docs_folder="docs",
+)
+
+# Initializing the agent with the Gemini instance and other parameters
+agent = Agent(
+ agent_name="Covid-19-Chat",
+ agent_description=(
+ "This agent provides information about COVID-19 symptoms."
+ ),
+ llm=OpenAIChat(),
+ max_loops="auto",
+ autosave=True,
+ verbose=True,
+ long_term_memory=memory,
+ stopping_condition="finish",
+)
+
+# Defining the task and image path
+task = ("What are the symptoms of COVID-19?",)
+
+# Running the agent with the specified task and image
+out = agent.run(task)
+print(out)
+
+```
+
+
+### `Agent` ++ Long Term Memory ++ Tools!
+An LLM equipped with long term memory and tools, a full stack agent capable of automating all and any digital tasks given a good prompt.
+
+```python
+from swarms import Agent, ChromaDB, OpenAIChat
+
+# Making an instance of the ChromaDB class
+memory = ChromaDB(
+ metric="cosine",
+ n_results=3,
+ output_dir="results",
+ docs_folder="docs",
+)
+
+# Initialize a tool
+def search_api(query: str):
+ # Add your logic here
+ return query
+
+# Initializing the agent with the Gemini instance and other parameters
+agent = Agent(
+ agent_name="Covid-19-Chat",
+ agent_description=(
+ "This agent provides information about COVID-19 symptoms."
+ ),
+ llm=OpenAIChat(),
+ max_loops="auto",
+ autosave=True,
+ verbose=True,
+ long_term_memory=memory,
+ stopping_condition="finish",
+ tools=[search_api],
+)
+
+# Defining the task and image path
+task = ("What are the symptoms of COVID-19?",)
+
+# Running the agent with the specified task and image
+out = agent.run(task)
+print(out)
+
+```
+
+
+### Devin
+Implementation of Devin in less than 90 lines of code with several tools:
+terminal, browser, and edit files.
+
+```python
+from swarms import Agent, Anthropic
+import subprocess
+
+# Model
+llm = Anthropic(
+ temperature=0.1,
+)
+
+# Tools
+def terminal(
+ code: str,
+):
+ """
+ Run code in the terminal.
+
+ Args:
+ code (str): The code to run in the terminal.
+
+ Returns:
+ str: The output of the code.
+ """
+ out = subprocess.run(
+ code, shell=True, capture_output=True, text=True
+ ).stdout
+ return str(out)
+
+def browser(query: str):
+ """
+ Search the query in the browser with the `browser` tool.
+
+ Args:
+ query (str): The query to search in the browser.
+
+ Returns:
+ str: The search results.
+ """
+ import webbrowser
+
+ url = f"https://www.google.com/search?q={query}"
+ webbrowser.open(url)
+ return f"Searching for {query} in the browser."
+
+def create_file(file_path: str, content: str):
+ """
+ Create a file using the file editor tool.
+
+ Args:
+ file_path (str): The path to the file.
+ content (str): The content to write to the file.
+
+ Returns:
+ str: The result of the file creation operation.
+ """
+ with open(file_path, "w") as file:
+ file.write(content)
+ return f"File {file_path} created successfully."
+
+def file_editor(file_path: str, mode: str, content: str):
+ """
+ Edit a file using the file editor tool.
+
+ Args:
+ file_path (str): The path to the file.
+ mode (str): The mode to open the file in.
+ content (str): The content to write to the file.
+
+ Returns:
+ str: The result of the file editing operation.
+ """
+ with open(file_path, mode) as file:
+ file.write(content)
+ return f"File {file_path} edited successfully."
+
+
+# Agent
+agent = Agent(
+ agent_name="Devin",
+ system_prompt=(
+ "Autonomous agent that can interact with humans and other"
+ " agents. Be Helpful and Kind. Use the tools provided to"
+ " assist the user. Return all code in markdown format."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[terminal, browser, file_editor, create_file],
+ code_interpreter=True,
+ # streaming=True,
+)
+
+# Run the agent
+out = agent("Create a new file for a plan to take over the world.")
+print(out)
+```
+
+
+### `Agent`with Pydantic BaseModel as Output Type
+The following is an example of an agent that intakes a pydantic basemodel and outputs it at the same time:
+
+```python
+from pydantic import BaseModel, Field
+from swarms import Anthropic, Agent
+
+
+# Initialize the schema for the person's information
+class Schema(BaseModel):
+ name: str = Field(..., title="Name of the person")
+ agent: int = Field(..., title="Age of the person")
+ is_student: bool = Field(..., title="Whether the person is a student")
+ courses: list[str] = Field(
+ ..., title="List of courses the person is taking"
+ )
+
+
+# Convert the schema to a JSON string
+tool_schema = Schema(
+ name="Tool Name",
+ agent=1,
+ is_student=True,
+ courses=["Course1", "Course2"],
+)
+
+# Define the task to generate a person's information
+task = "Generate a person's information based on the following schema:"
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Person Information Generator",
+ system_prompt=(
+ "Generate a person's information based on the following schema:"
+ ),
+ # Set the tool schema to the JSON string -- this is the key difference
+ tool_schema=tool_schema,
+ llm=Anthropic(),
+ max_loops=3,
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ interactive=True,
+ # Set the output type to the tool schema which is a BaseModel
+ output_type=tool_schema, # or dict, or str
+ metadata_output_type="json",
+ # List of schemas that the agent can handle
+ list_base_models=[tool_schema],
+ function_calling_format_type="OpenAI",
+ function_calling_type="json", # or soon yaml
+)
+
+# Run the agent to generate the person's information
+generated_data = agent.run(task)
+
+# Print the generated data
+print(f"Generated data: {generated_data}")
+
+
+```
+
+### Multi Modal Autonomous Agent
+Run the agent with multiple modalities useful for various real-world tasks in manufacturing, logistics, and health.
+
+```python
+# Description: This is an example of how to use the Agent class to run a multi-modal workflow
+import os
+
+from dotenv import load_dotenv
+
+from swarms.models.gpt4_vision_api import GPT4VisionAPI
+from swarms.structs import Agent
+
+# Load the environment variables
+load_dotenv()
+
+# Get the API key from the environment
+api_key = os.environ.get("OPENAI_API_KEY")
+
+# Initialize the language model
+llm = GPT4VisionAPI(
+ openai_api_key=api_key,
+ max_tokens=500,
+)
+
+# Initialize the task
+task = (
+ "Analyze this image of an assembly line and identify any issues such as"
+ " misaligned parts, defects, or deviations from the standard assembly"
+ " process. IF there is anything unsafe in the image, explain why it is"
+ " unsafe and how it could be improved."
+)
+img = "assembly_line.jpg"
+
+## Initialize the workflow
+agent = Agent(
+ llm=llm, max_loops="auto", autosave=True, dashboard=True, multi_modal=True
+)
+
+# Run the workflow on a task
+agent.run(task=task, img=img)
+```
+----
+
diff --git a/docs/swarms/structs/majorityvoting.md b/docs/swarms/structs/majorityvoting.md
new file mode 100644
index 00000000..84ac02c8
--- /dev/null
+++ b/docs/swarms/structs/majorityvoting.md
@@ -0,0 +1,217 @@
+# MajorityVoting Module Documentation
+
+The `MajorityVoting` module provides a mechanism for performing majority voting among a group of agents. Majority voting is a decision rule that selects the option which has the majority of votes. This is particularly useful in systems where multiple agents provide responses to a query, and the most common response needs to be identified as the final output.
+
+### Key Concepts
+
+- **Majority Voting**: A method to determine the most common response from a set of answers.
+- **Agents**: Entities (e.g., models, algorithms) that provide responses to tasks or queries.
+- **Output Parser**: A function that processes the responses from the agents before performing the majority voting.
+
+## Function Definitions
+
+### Function: `majority_voting`
+
+Performs majority voting on a list of answers and returns the most common answer.
+
+#### Parameters
+
+| Parameter | Type | Description |
+|-----------|----------|------------------------------|
+| `answers` | `List[str]` | A list of answers from different agents. |
+
+#### Returns
+
+| Return Value | Type | Description |
+|--------------|-------|----------------------------------------|
+| `answer` | `str` | The most common answer in the list. If the list is empty, returns "I don't know". |
+
+## Class Definitions
+
+### Class: `MajorityVoting`
+
+Class representing a majority voting system for agents.
+
+#### Parameters
+
+| Parameter | Type | Description |
+|------------------|--------------|-----------------------------------------------------------------------------|
+| `agents` | `List[Agent]`| A list of agents to be used in the majority voting system. |
+| `output_parser` | `Callable` | A function used to parse the output of the agents. If not provided, the default `majority_voting` function is used. |
+| `autosave` | `bool` | A boolean indicating whether to autosave the conversation to a file. Default is `False`. |
+| `verbose` | `bool` | A boolean indicating whether to enable verbose logging. Default is `False`. |
+
+### Method: `__init__`
+
+Initializes the `MajorityVoting` system.
+
+#### Parameters
+
+| Parameter | Type | Description |
+|------------------|----------------|-----------------------------------------------------------------------------|
+| `agents` | `List[Agent]` | A list of agents to be used in the majority voting system. |
+| `output_parser` | `Callable` | A function used to parse the output of the agents. Default is the `majority_voting` function. |
+| `autosave` | `bool` | A boolean indicating whether to autosave the conversation to a file. Default is `False`. |
+| `verbose` | `bool` | A boolean indicating whether to enable verbose logging. Default is `False`. |
+| `args` | `tuple` | Additional positional arguments. |
+| `kwargs` | `dict` | Additional keyword arguments. |
+
+### Method: `run`
+
+Runs the majority voting system and returns the majority vote.
+
+#### Parameters
+
+| Parameter | Type | Description |
+|-----------|------------|------------------------------------------|
+| `task` | `str` | The task to be performed by the agents. |
+| `args` | `tuple` | Variable length argument list. |
+| `kwargs` | `dict` | Arbitrary keyword arguments. |
+
+#### Returns
+
+| Return Value | Type | Description |
+|--------------|-----------|--------------------------------------|
+| `results` | `List[Any]` | The majority vote. |
+
+## Usage Examples
+
+### Example 1: Basic Majority Voting
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.majority_voting import MajorityVoting
+
+# Initialize agents
+agents = [
+ Agent(
+ agent_name="Devin",
+ system_prompt=(
+ "Autonomous agent that can interact with humans and other"
+ " agents. Be Helpful and Kind. Use the tools provided to"
+ " assist the user. Return all code in markdown format."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[terminal, browser, file_editor, create_file],
+ code_interpreter=True,
+ ),
+ Agent(
+ agent_name="Codex",
+ system_prompt=(
+ "An AI coding assistant capable of writing and understanding"
+ " code snippets in various programming languages."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[terminal, browser, file_editor, create_file],
+ code_interpreter=True,
+ ),
+ Agent(
+ agent_name="Tabnine",
+ system_prompt=(
+ "A code completion AI that provides suggestions for code"
+ " completion and code improvements."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[terminal, browser, file_editor, create_file],
+ code_interpreter=True,
+ ),
+]
+
+# Create MajorityVoting instance
+majority_voting = MajorityVoting(agents)
+
+# Run the majority voting system
+result = majority_voting.run("What is the capital of France?")
+print(result) # Output: 'Paris'
+```
+
+### Example 2: Running a Task with Detailed Outputs
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.majority_voting import MajorityVoting
+
+# Initialize agents
+agents = [
+ Agent(
+ agent_name="Devin",
+ system_prompt=(
+ "Autonomous agent that can interact with humans and other"
+ " agents. Be Helpful and Kind. Use the tools provided to"
+ " assist the user. Return all code in markdown format."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[terminal, browser, file_editor, create_file],
+ code_interpreter=True,
+ ),
+ Agent(
+ agent_name="Codex",
+ system_prompt=(
+ "An AI coding assistant capable of writing and understanding"
+ " code snippets in various programming languages."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[terminal, browser, file_editor, create_file],
+ code_interpreter=True,
+ ),
+ Agent(
+ agent_name="Tabnine",
+ system_prompt=(
+ "A code completion AI that provides suggestions for code"
+ " completion and code improvements."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[terminal, browser, file_editor, create_file],
+ code_interpreter=True,
+ ),
+]
+
+# Create MajorityVoting instance
+majority_voting = MajorityVoting(agents)
+
+# Run the majority voting system with a different task
+result = majority_voting.run("Create a new file for a plan to take over the world.")
+print(result)
+```
\ No newline at end of file
diff --git a/docs/swarms/structs/moa.md b/docs/swarms/structs/moa.md
new file mode 100644
index 00000000..6c0f5959
--- /dev/null
+++ b/docs/swarms/structs/moa.md
@@ -0,0 +1,379 @@
+# MixtureOfAgents Class Documentation
+
+## Overview
+
+The `MixtureOfAgents` class represents a mixture of agents operating within a swarm. The workflow of the swarm follows a parallel β sequential β parallel β final output agent process. This implementation is inspired by concepts discussed in the paper: [https://arxiv.org/pdf/2406.04692](https://arxiv.org/pdf/2406.04692).
+
+The class is designed to manage a collection of agents, orchestrate their execution in layers, and handle the final aggregation of their outputs through a designated final agent. This architecture facilitates complex, multi-step processing where intermediate results are refined through successive layers of agent interactions.
+
+## Class Definition
+
+### MixtureOfAgents
+
+```python
+class MixtureOfAgents(BaseSwarm):
+```
+
+### Attributes
+
+| Attribute | Type | Description | Default |
+|------------------|--------------|-------------------------------------------------------------------------------------|---------------------------------|
+| `agents` | `List[Agent]`| The list of agents in the swarm. | `None` |
+| `flow` | `str` | The flow of the swarm. | `parallel -> sequential -> parallel -> final output agent` |
+| `max_loops` | `int` | The maximum number of loops to run. | `1` |
+| `verbose` | `bool` | Flag indicating whether to print verbose output. | `True` |
+| `layers` | `int` | The number of layers in the swarm. | `3` |
+| `rules` | `str` | The rules for the swarm. | `None` |
+| `final_agent` | `Agent` | The agent to handle the final output processing. | `None` |
+| `auto_save` | `bool` | Flag indicating whether to auto-save the metadata to a file. | `False` |
+| `saved_file_name`| `str` | The name of the file where the metadata will be saved. | `"moe_swarm.json"` |
+
+## Methods
+
+### `__init__`
+
+#### Parameters
+
+| Parameter | Type | Description | Default |
+|------------------|--------------|-----------------------------------------------------------------------------------------------|----------------------------------------|
+| `name` | `str` | The name of the swarm. | `"MixtureOfAgents"` |
+| `description` | `str` | A brief description of the swarm. | `"A swarm of agents that run in parallel and sequentially."` |
+| `agents` | `List[Agent]`| The list of agents in the swarm. | `None` |
+| `max_loops` | `int` | The maximum number of loops to run. | `1` |
+| `verbose` | `bool` | Flag indicating whether to print verbose output. | `True` |
+| `layers` | `int` | The number of layers in the swarm. | `3` |
+| `rules` | `str` | The rules for the swarm. | `None` |
+| `final_agent` | `Agent` | The agent to handle the final output processing. | `None` |
+| `auto_save` | `bool` | Flag indicating whether to auto-save the metadata to a file. | `False` |
+| `saved_file_name`| `str` | The name of the file where the metadata will be saved. | `"moe_swarm.json"` |
+
+### `agent_check`
+
+```python
+def agent_check(self):
+```
+
+#### Description
+
+Checks if the provided `agents` attribute is a list of `Agent` instances. Raises a `TypeError` if the validation fails.
+
+#### Example Usage
+
+```python
+moe_swarm = MixtureOfAgents(agents=[agent1, agent2])
+moe_swarm.agent_check() # Validates the agents
+```
+
+### `final_agent_check`
+
+```python
+def final_agent_check(self):
+```
+
+#### Description
+
+Checks if the provided `final_agent` attribute is an instance of `Agent`. Raises a `TypeError` if the validation fails.
+
+#### Example Usage
+
+```python
+moe_swarm = MixtureOfAgents(final_agent=final_agent)
+moe_swarm.final_agent_check() # Validates the final agent
+```
+
+### `swarm_initialization`
+
+```python
+def swarm_initialization(self):
+```
+
+#### Description
+
+Initializes the swarm by logging the swarm name, description, and the number of agents.
+
+#### Example Usage
+
+```python
+moe_swarm = MixtureOfAgents(agents=[agent1, agent2])
+moe_swarm.swarm_initialization() # Initializes the swarm
+```
+
+### `run`
+
+```python
+def run(self, task: str = None, *args, **kwargs):
+```
+
+#### Parameters
+
+| Parameter | Type | Description | Default |
+|-----------|--------|---------------------------------|---------|
+| `task` | `str` | The task to be performed by the swarm. | `None` |
+| `*args` | `tuple`| Additional arguments. | `None` |
+| `**kwargs`| `dict` | Additional keyword arguments. | `None` |
+
+#### Returns
+
+| Type | Description |
+|-------|---------------------------------------------|
+| `str` | The conversation history as a string. |
+
+#### Description
+
+Runs the swarm with the given task, orchestrates the execution of agents through the specified layers, and returns the conversation history.
+
+#### Example Usage
+
+```python
+moe_swarm = MixtureOfAgents(agents=[agent1, agent2], final_agent=final_agent)
+history = moe_swarm.run(task="Solve this problem.")
+print(history)
+```
+
+## Detailed Explanation
+
+### Initialization
+
+The `__init__` method initializes the swarm with the provided parameters, sets up the conversation rules, and invokes the initialization of the swarm. It also ensures the validity of the `agents` and `final_agent` attributes by calling the `agent_check` and `final_agent_check` methods respectively.
+
+### Agent Validation
+
+The `agent_check` method validates whether the `agents` attribute is a list of `Agent` instances, while the `final_agent_check` method validates whether the `final_agent` is an instance of `Agent`. These checks are crucial to ensure that the swarm operates correctly with the appropriate agent types.
+
+### Swarm Initialization
+
+The `swarm_initialization` method logs essential information about the swarm, including its name, description, and the number of agents. This provides a clear starting point for the swarm's operations and facilitates debugging and monitoring.
+
+### Running the Swarm
+
+The `run` method is the core of the `MixtureOfAgents` class. It orchestrates the execution of agents through multiple layers, collects their outputs, and processes the final output using the `final_agent`. The conversation history is maintained and updated throughout this process, allowing for a seamless flow of information and responses.
+
+During each layer, the method iterates over the agents, invokes their `run` method with the current conversation history, and logs the outputs. These outputs are then added to the conversation, and the history is updated for the next layer.
+
+After all layers are completed, the final output agent processes the entire conversation history, and the metadata is created and optionally saved to a file. This metadata includes details about the layers, agent runs, and final output, providing a comprehensive record of the swarm's execution.
+
+## Additional Information and Tips
+
+### Common Issues and Solutions
+
+- **Type Errors**: Ensure that all agents in the `agents` list and the `final_agent` are instances of the `Agent` class. The `agent_check` and `final_agent_check` methods help validate this.
+- **Verbose Logging**: Use the `verbose` flag to control the verbosity of the output. This can help with debugging or reduce clutter in the logs.
+- **Auto-Save Feature**: Utilize the `auto_save` flag to automatically save the metadata to a file. This can be useful for keeping records of the swarm's operations without manual intervention.
+
+### References and Resources
+
+For further reading and background information on the concepts used in the `MixtureOfAgents` class, refer to the paper: [https://arxiv.org/pdf/2406.04692](https://arxiv.org/pdf/2406.04692).
+
+### Usage Examples
+
+#### Example 1: Basic Initialization and Run
+
+```python
+from swarms import MixtureOfAgents, Agent, OpenAIOpenAIChat
+
+# Define agents
+director = Agent(
+ agent_name="Director",
+ system_prompt="Directs the tasks for the accountants",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="director.json",
+)
+
+# Initialize accountant 1
+accountant1 = Agent(
+ agent_name="Accountant1",
+ system_prompt="Prepares financial statements",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accountant1.json",
+)
+
+# Initialize accountant 2
+accountant2 = Agent(
+ agent_name="Accountant2",
+ system_prompt="Audits financial records",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accountant2.json",
+)
+
+
+# Initialize the MixtureOfAgents
+moe_swarm = MixtureOfAgents(agents=[director, accountant1, accountant2], final_agent=director)
+
+# Run the swarm
+history = moe_swarm.run(task="Perform task X.")
+print(history)
+```
+
+#### Example 2: Verbose Output and Auto-Save
+
+```python
+from swarms import MixtureOfAgents, Agent, OpenAIChat
+
+# Define Agents
+# Define agents
+director = Agent(
+ agent_name="Director",
+ system_prompt="Directs the tasks for the accountants",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="director.json",
+)
+
+# Initialize accountant 1
+accountant1 = Agent(
+ agent_name="Accountant1",
+ system_prompt="Prepares financial statements",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accountant1.json",
+)
+
+# Initialize accountant 2
+accountant2 = Agent(
+ agent_name="Accountant2",
+ system_prompt="Audits financial records",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accountant2.json",
+)
+
+# Initialize the MixtureOfAgents with verbose output and auto-save enabled
+moe_swarm = MixtureOfAgents(
+ agents=[director, accountant1, accountant2],
+ final_agent=director,
+ verbose=True,
+ auto_save=True
+)
+
+# Run the swarm
+history = moe_swarm.run(task="Analyze data set Y.")
+print(history)
+```
+
+#### Example 3: Custom Rules and Multiple Layers
+
+```python
+from swarms import MixtureOfAgents, Agent, OpenAIOpenAIChat
+
+# Define agents
+# Initialize the director agent
+director = Agent(
+ agent_name="Director",
+ system_prompt="Directs the tasks for the accountants",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="director.json",
+)
+
+# Initialize accountant 1
+accountant1 = Agent(
+ agent_name="Accountant1",
+ system_prompt="Prepares financial statements",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accountant1.json",
+)
+
+# Initialize accountant 2
+accountant2 = Agent(
+ agent_name="Accountant2",
+ system_prompt="Audits financial records",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accountant2.json",
+)
+
+# Initialize the MixtureOfAgents with custom rules and multiple layers
+moe_swarm = MixtureOfAgents(
+ agents=[director, accountant1, accountant2],
+ final_agent=director,
+ layers=5,
+ rules="Custom rules for the swarm"
+)
+
+# Run the swarm
+history = moe_swarm.run(task="Optimize process Z.")
+print(history)
+```
+
+This comprehensive documentation provides a detailed understanding of the `MixtureOfAgents` class, its attributes, methods, and usage. The examples illustrate how to initialize and run the swarm, demonstrating its flexibility and capability to handle various tasks and configurations.
+
+
+# Conclusion
+
+The `MixtureOfAgents` class is a powerful and flexible framework for managing and orchestrating a swarm of agents. By following a structured approach of parallel and sequential processing, it enables the implementation of complex multi-step workflows where intermediate results are refined through multiple layers of agent interactions. This architecture is particularly suitable for tasks that require iterative processing, collaboration among diverse agents, and sophisticated aggregation of outputs.
+
+### Key Takeaways
+
+1. **Flexible Initialization**: The class allows for customizable initialization with various parameters, enabling users to tailor the swarm's configuration to their specific needs.
+2. **Robust Agent Management**: With built-in validation methods, the class ensures that all agents and the final agent are correctly instantiated, preventing runtime errors and facilitating smooth execution.
+3. **Layered Processing**: The layered approach to processing allows for intermediate results to be iteratively refined, enhancing the overall output quality.
+4. **Verbose Logging and Auto-Save**: These features aid in debugging, monitoring, and record-keeping, providing transparency and ease of management.
+5. **Comprehensive Documentation**: The detailed class and method documentation, along with numerous usage examples, provide a clear and thorough understanding of how to leverage the `MixtureOfAgents` class effectively.
+
+### Practical Applications
+
+The `MixtureOfAgents` class can be applied in various domains, including but not limited to:
+
+- **Natural Language Processing (NLP)**: Managing a swarm of NLP models to process, analyze, and synthesize text.
+- **Data Analysis**: Coordinating multiple data analysis agents to process and interpret complex data sets.
+- **Optimization Problems**: Running a swarm of optimization algorithms to solve complex problems in fields such as logistics, finance, and engineering.
+- **AI Research**: Implementing experimental setups that require the collaboration of multiple AI models or agents to explore new methodologies and approaches.
+
+### Future Extensions
+
+The `MixtureOfAgents` framework provides a solid foundation for further extensions and customizations, including:
+
+- **Dynamic Layer Configuration**: Allowing layers to be added or removed dynamically based on the task requirements or intermediate results.
+- **Advanced Agent Communication**: Enhancing the communication protocols between agents to allow for more sophisticated information exchange.
+- **Integration with Other Frameworks**: Seamlessly integrating with other machine learning or data processing frameworks to leverage their capabilities within the swarm architecture.
+
+In conclusion, the `MixtureOfAgents` class represents a versatile and efficient solution for orchestrating multi-agent systems, facilitating complex task execution through its structured and layered approach. By harnessing the power of parallel and sequential processing, it opens up new possibilities for tackling intricate problems across various domains.
\ No newline at end of file
diff --git a/docs/swarms/structs/multi_agent_collaboration_examples.md b/docs/swarms/structs/multi_agent_collaboration_examples.md
new file mode 100644
index 00000000..03640709
--- /dev/null
+++ b/docs/swarms/structs/multi_agent_collaboration_examples.md
@@ -0,0 +1,226 @@
+# Multi-Agent Examples
+
+
+### `SequentialWorkflow`
+Sequential Workflow enables you to sequentially execute tasks with `Agent` and then pass the output into the next agent and onwards until you have specified your max loops.
+
+```python
+from swarms import Agent, SequentialWorkflow, Anthropic
+
+
+# Initialize the language model agent (e.g., GPT-3)
+llm = Anthropic()
+
+# Initialize agents for individual tasks
+agent1 = Agent(
+ agent_name="Blog generator",
+ system_prompt="Generate a blog post like stephen king",
+ llm=llm,
+ max_loops=1,
+ dashboard=False,
+ tools=[],
+)
+agent2 = Agent(
+ agent_name="summarizer",
+ system_prompt="Sumamrize the blog post",
+ llm=llm,
+ max_loops=1,
+ dashboard=False,
+ tools=[],
+)
+
+# Create the Sequential workflow
+workflow = SequentialWorkflow(
+ agents=[agent1, agent2], max_loops=1, verbose=False
+)
+
+# Run the workflow
+workflow.run(
+ "Generate a blog post on how swarms of agents can help businesses grow."
+)
+
+```
+
+------
+
+## `AgentRearrange`
+Inspired by Einops and einsum, this orchestration techniques enables you to map out the relationships between various agents. For example you specify linear and sequential relationships like `a -> a1 -> a2 -> a3` or concurrent relationships where the first agent will send a message to 3 agents all at once: `a -> a1, a2, a3`. You can customize your workflow to mix sequential and concurrent relationships. [Docs Available:](https://swarms.apac.ai/en/latest/swarms/structs/agent_rearrange/)
+
+```python
+from swarms import Agent, AgentRearrange, Anthropic
+
+
+# Initialize the director agent
+
+director = Agent(
+ agent_name="Director",
+ system_prompt="Directs the tasks for the workers",
+ llm=Anthropic(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="director.json",
+)
+
+
+# Initialize worker 1
+
+worker1 = Agent(
+ agent_name="Worker1",
+ system_prompt="Generates a transcript for a youtube video on what swarms are",
+ llm=Anthropic(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="worker1.json",
+)
+
+
+# Initialize worker 2
+worker2 = Agent(
+ agent_name="Worker2",
+ system_prompt="Summarizes the transcript generated by Worker1",
+ llm=Anthropic(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="worker2.json",
+)
+
+
+# Create a list of agents
+agents = [director, worker1, worker2]
+
+# Define the flow pattern
+flow = "Director -> Worker1 -> Worker2"
+
+# Using AgentRearrange class
+agent_system = AgentRearrange(agents=agents, flow=flow)
+output = agent_system.run(
+ "Create a format to express and communicate swarms of llms in a structured manner for youtube"
+)
+print(output)
+
+```
+
+## `HierarhicalSwarm`
+Coming soon...
+
+
+## `GraphSwarm`
+
+```python
+import os
+
+from dotenv import load_dotenv
+
+from swarms import Agent, Edge, GraphWorkflow, Node, NodeType, OpenAIChat
+
+load_dotenv()
+
+api_key = os.environ.get("OPENAI_API_KEY")
+
+llm = OpenAIChat(
+ temperature=0.5, openai_api_key=api_key, max_tokens=4000
+)
+agent1 = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
+agent2 = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
+
+def sample_task():
+ print("Running sample task")
+ return "Task completed"
+
+wf_graph = GraphWorkflow()
+wf_graph.add_node(Node(id="agent1", type=NodeType.AGENT, agent=agent1))
+wf_graph.add_node(Node(id="agent2", type=NodeType.AGENT, agent=agent2))
+wf_graph.add_node(
+ Node(id="task1", type=NodeType.TASK, callable=sample_task)
+)
+wf_graph.add_edge(Edge(source="agent1", target="task1"))
+wf_graph.add_edge(Edge(source="agent2", target="task1"))
+
+wf_graph.set_entry_points(["agent1", "agent2"])
+wf_graph.set_end_points(["task1"])
+
+print(wf_graph.visualize())
+
+# Run the workflow
+results = wf_graph.run()
+print("Execution results:", results)
+
+```
+
+## `MixtureOfAgents`
+This is an implementation from the paper: "Mixture-of-Agents Enhances Large Language Model Capabilities" by together.ai, it achieves SOTA on AlpacaEval 2.0, MT-Bench and FLASK, surpassing GPT-4 Omni. Great for tasks that need to be parallelized and then sequentially fed into another loop
+
+```python
+from swarms import Agent, OpenAIChat, MixtureOfAgents
+
+# Initialize the director agent
+director = Agent(
+ agent_name="Director",
+ system_prompt="Directs the tasks for the accountants",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="director.json",
+)
+
+# Initialize accountant 1
+accountant1 = Agent(
+ agent_name="Accountant1",
+ system_prompt="Prepares financial statements",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accountant1.json",
+)
+
+# Initialize accountant 2
+accountant2 = Agent(
+ agent_name="Accountant2",
+ system_prompt="Audits financial records",
+ llm=OpenAIChat(),
+ max_loops=1,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ state_save_file_type="json",
+ saved_state_path="accountant2.json",
+)
+
+# Create a list of agents
+agents = [director, accountant1, accountant2]
+
+
+# Swarm
+swarm = MixtureOfAgents(
+ name="Mixture of Accountants",
+ agents=agents,
+ layers=3,
+ final_agent=director,
+)
+
+
+# Run the swarm
+out = swarm.run("Prepare financial statements and audit financial records")
+print(out)
+```
diff --git a/docs/swarms/structs/multi_agent_orchestration.md b/docs/swarms/structs/multi_agent_orchestration.md
new file mode 100644
index 00000000..80dedff3
--- /dev/null
+++ b/docs/swarms/structs/multi_agent_orchestration.md
@@ -0,0 +1,15 @@
+# Multi-Agent Orchestration:
+Swarms was designed to faciliate the communication between many different and specialized agents from a vast array of other frameworks such as langchain, autogen, crew, and more.
+
+In traditional swarm theory, there are many types of swarms usually for very specialized use-cases and problem sets. Such as Hiearchical and sequential are great for accounting and sales, because there is usually a boss coordinator agent that distributes a workload to other specialized agents.
+
+
+| **Name** | **Description** | **Code Link** | **Use Cases** |
+|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------|---------------------------------------------------------------------------------------------------|
+| Hierarchical Swarms | A system where agents are organized in a hierarchy, with higher-level agents coordinating lower-level agents to achieve complex tasks. | [Code Link](#) | Manufacturing process optimization, multi-level sales management, healthcare resource coordination |
+| Agent Rearrange | A setup where agents rearrange themselves dynamically based on the task requirements and environmental conditions. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/agent_rearrange/) | Adaptive manufacturing lines, dynamic sales territory realignment, flexible healthcare staffing |
+| Concurrent Workflows | Agents perform different tasks simultaneously, coordinating to complete a larger goal. | [Code Link](#) | Concurrent production lines, parallel sales operations, simultaneous patient care processes |
+| Sequential Coordination | Agents perform tasks in a specific sequence, where the completion of one task triggers the start of the next. | [Code Link](https://docs.swarms.world/en/latest/swarms/structs/sequential_workflow/) | Step-by-step assembly lines, sequential sales processes, stepwise patient treatment workflows |
+| Parallel Processing | Agents work on different parts of a task simultaneously to speed up the overall process. | [Code Link](#) | Parallel data processing in manufacturing, simultaneous sales analytics, concurrent medical tests |
+
+
diff --git a/docs/swarms/structs/multi_process_workflow.md b/docs/swarms/structs/multi_process_workflow.md
new file mode 100644
index 00000000..d89134d6
--- /dev/null
+++ b/docs/swarms/structs/multi_process_workflow.md
@@ -0,0 +1,124 @@
+# MultiProcessWorkflow Documentation
+
+
+The `MultiProcessWorkflow` class provides a framework for executing tasks concurrently using multiple processes. This class leverages Python's `multiprocessing` module to parallelize task execution, thereby enhancing performance and efficiency. It includes features such as automatic task retry on failure and optional autosaving of results. This documentation details the class, its parameters, attributes, methods, and usage examples.
+
+## Class Definition
+
+### `MultiProcessWorkflow`
+
+
+## Parameters
+
+| Parameter | Type | Default | Description |
+|---------------|---------------------|---------|---------------------------------------------------------------|
+| `max_workers` | `int` | `5` | The maximum number of workers to use for parallel processing. |
+| `autosave` | `bool` | `True` | Flag indicating whether to automatically save the workflow. |
+| `agents` | `Sequence[Agent]` | `None` | A list of Agent objects representing the workflow agents. |
+| `*args` | `tuple` | | Additional positional arguments. |
+| `**kwargs` | `dict` | | Additional keyword arguments. |
+
+## Attributes
+
+| Attribute | Type | Description |
+|-----------------|---------------------|--------------------------------------------------------------|
+| `max_workers` | `int` | The maximum number of workers to use for parallel processing.|
+| `autosave` | `bool` | Flag indicating whether to automatically save the workflow. |
+| `agents` | `Sequence[Agent]` | A list of Agent objects representing the workflow agents. |
+
+## Methods
+
+### `execute_task`
+
+#### Description
+
+The `execute_task` method executes a given task and handles any exceptions that may occur during execution. If agents are defined, it will execute the task using each agent in sequence.
+
+#### Usage Example
+
+```python
+# Define a task
+task = Task()
+
+# Execute the task
+workflow = MultiProcessWorkflow()
+result = workflow.execute_task(task)
+print(result)
+```
+
+### `run`
+
+#### Description
+
+The `run` method executes the workflow by running the given task using multiple processes. It manages the task execution using a process pool and collects the results.
+
+#### Usage Example
+
+```python
+from swarms.structs.multi_process_workflow import MultiProcessingWorkflow
+from swarms.structs.task import Task
+from datetime import datetime
+from time import sleep
+
+# Define a simple task
+def simple_task():
+ sleep(1)
+ return datetime.now()
+
+# Create a task object
+task = Task(
+ name="Simple Task",
+ execute=simple_task,
+ priority=1,
+)
+
+# Create a workflow with the task
+workflow = MultiProcessWorkflow(max_workers=3, autosave=True, agents=[agent1, agent2])
+
+# Run the workflow
+results = workflow.run(task)
+
+# Print the results
+print(results)
+```
+
+## Detailed Functionality and Usage
+
+### Initialization
+
+When an instance of `MultiProcessWorkflow` is created, it initializes the following:
+
+- **max_workers**: Sets the maximum number of processes that can run concurrently.
+- **autosave**: Determines if the workflow results should be saved automatically.
+- **agents**: Accepts a list of agents that will perform the tasks.
+
+### Running Tasks
+
+The `run` method performs the following steps:
+
+1. **Initialize Results and Manager**: Creates a list to store results and a `Manager` to manage shared state between processes.
+2. **Initialize Process Pool**: Creates a pool of worker processes.
+3. **Submit Tasks**: Iterates over the agents, submitting tasks to the pool for execution and collecting the results.
+4. **Wait for Completion**: Waits for all tasks to complete and collects the results.
+5. **Return Results**: Returns the list of results from all executed tasks.
+
+### Autosave Task Result
+
+Although the autosave functionality is mentioned in the parameters, it is not explicitly defined in the given code. The implementation for autosaving should be added based on the specific requirements of the application.
+
+## Additional Information and Tips
+
+- **Process Safety**: The use of `Manager` ensures that the list of results is managed safely across multiple processes.
+- **Logging**: The class uses the `logger` module to log information about task execution, retries, and failures.
+- **Error Handling**: The retry mechanism in the `execute_task` method helps in handling transient errors by attempting to re-execute failed tasks.
+
+## References and Resources
+
+For more information on multiprocessing in Python, refer to the following resources:
+
+- [Python Multiprocessing Documentation](https://docs.python.org/3/library/multiprocessing.html)
+- [Python Logging Documentation](https://docs.python.org/3/library/logging.html)
+
+---
+
+By following this detailed documentation, users can effectively understand and utilize the `MultiProcessWorkflow` class to execute tasks concurrently with multiple processes. The examples provided help in demonstrating the practical usage of the class.
\ No newline at end of file
diff --git a/docs/swarms/structs/multi_processing_workflow.md b/docs/swarms/structs/multi_processing_workflow.md
new file mode 100644
index 00000000..320667d4
--- /dev/null
+++ b/docs/swarms/structs/multi_processing_workflow.md
@@ -0,0 +1,204 @@
+# MultiProcessWorkflow Documentation
+
+The `MultiProcessWorkflow` class extends the `BaseWorkflow` to support parallel processing using multiple workers. This class is designed to efficiently execute tasks concurrently, leveraging the power of multi-processing to enhance performance and scalability.
+
+### Key Concepts
+
+- **Parallel Processing**: Utilizing multiple workers to execute tasks concurrently.
+- **Workflow Management**: Handling the execution of tasks in a structured workflow.
+- **Agents**: Entities responsible for executing tasks.
+
+## Attributes
+
+### Arguments
+
+| Argument | Type | Default | Description |
+|--------------|---------------------|---------|-------------|
+| `max_workers`| `int` | `5` | The maximum number of workers to use for parallel processing. |
+| `autosave` | `bool` | `True` | Flag indicating whether to automatically save the workflow. |
+| `agents` | `Sequence[Agent]` | `None` | A list of agents participating in the workflow. |
+| `*args` | | | Additional positional arguments. |
+| `**kwargs` | | | Additional keyword arguments. |
+
+### Attributes
+
+| Attribute | Type | Description |
+|--------------|---------------------|-------------|
+| `max_workers`| `int` | The maximum number of workers to use for parallel processing. |
+| `autosave` | `bool` | Flag indicating whether to automatically save the workflow. |
+| `agents` | `Sequence[Agent]` | A list of agents participating in the workflow. |
+
+## Methods
+
+### __init__
+
+Initializes the `MultiProcessWorkflow` with the given parameters.
+
+**Examples:**
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.task import Task
+from swarms.structs.multi_process_workflow import MultiProcessWorkflow
+
+agents = [Agent(name="Agent 1"), Agent(name="Agent 2")]
+tasks = [Task(name="Task 1", execute=lambda: "result1"), Task(name="Task 2", execute=lambda: "result2")]
+
+workflow = MultiProcessWorkflow(max_workers=3, agents=agents, tasks=tasks)
+```
+
+### execute_task
+
+Executes a task and handles exceptions.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `task` | `str` | The task to execute. |
+| `*args` | | Additional positional arguments for the task execution. |
+| `**kwargs`| | Additional keyword arguments for the task execution. |
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `Any` | The result of the task execution. |
+
+**Examples:**
+
+```python
+result = workflow.execute_task(task="Sample Task")
+print(result)
+```
+
+### run
+
+Runs the workflow.
+
+**Arguments:**
+
+| Parameter | Type | Description |
+|-----------|------|-------------|
+| `task` | `str` | The task to run. |
+| `*args` | | Additional positional arguments for the task execution. |
+| `**kwargs`| | Additional keyword arguments for the task execution. |
+
+**Returns:**
+
+| Return Type | Description |
+|-------------|-------------|
+| `List[Any]` | The results of all executed tasks. |
+
+**Examples:**
+
+```python
+results = workflow.run(task="Sample Task")
+print(results)
+```
+
+### Additional Examples
+
+#### Example 1: Simple Task Execution
+
+```python
+from swarms import Agent, Task, MultiProcessWorkflow, OpenAIChat
+from datetime import datetime
+from time import sleep
+
+import os
+from dotenv import load_dotenv
+
+# Load the environment variables
+load_dotenv()
+
+
+# Define a function to be used as the action
+def my_action():
+ print("Action executed")
+
+
+# Define a function to be used as the condition
+def my_condition():
+ print("Condition checked")
+ return True
+
+
+# Create an agent
+agent = Agent(
+ llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]),
+ max_loops=1,
+ dashboard=False,
+)
+
+# Create a task
+task = Task(
+ description=(
+ "Generate a report on the top 3 biggest expenses for small"
+ " businesses and how businesses can save 20%"
+ ),
+ agent=agent,
+)
+
+# Create a workflow with the task
+workflow = MultiProcessWorkflow(tasks=[task])
+
+# Run the workflow
+results = workflow.run(task)
+print(results)
+```
+
+#### Example 2: Workflow with Multiple Agents
+
+```python
+from swarms import Agent, Task, MultiProcessWorkflow
+
+# Define tasks
+def task1():
+ return "Task 1 result"
+
+def task2():
+ return "Task 2 result"
+
+# Create agents
+agent1 = Agent(name="Agent 1", llm=OpenAIChat())
+agent2 = Agent(name="Agent 2", llm=OpenAIChat())
+
+# Create tasks
+task_1 = Task(name="Task 1", execute=task1)
+task_2 = Task(name="Task 2", execute=task2)
+
+# Create a workflow
+workflow = MultiProcessWorkflow(agents=[agent1, agent2], tasks=[task_1, task_2])
+
+# Run the workflow
+results = workflow.run(task="Example Task")
+print(results)
+```
+
+#### Example 3: Customizing Max Workers
+
+```python
+from swarms import Agent, Task, MultiProcessWorkflow, OpenAIChat
+
+# Define a task
+def example_task():
+ return "Task result"
+
+# Create an agent
+agent = Agent(name="Agent 1", llm=OpenAIChat())
+
+# Create a task
+task = Task(name="Example Task", execute=example_task)
+
+# Create a workflow with custom max workers
+workflow = MultiProcessWorkflow(max_workers=10, agents=[agent], tasks=[task])
+
+# Run the workflow
+results = workflow.run(task="Example Task")
+print(results)
+```
+
+## Summary
+
+The `MultiProcessWorkflow` class provides a powerful framework for managing and executing tasks using multiple workers. With support for parallel processing, customizable workflows, and detailed logging, it is an ideal tool for complex task execution scenarios. This class enhances performance and scalability, making it suitable for a wide range of applications that require efficient task management.
\ No newline at end of file
diff --git a/docs/swarms/structs/multi_threaded_workflow.md b/docs/swarms/structs/multi_threaded_workflow.md
new file mode 100644
index 00000000..3b4f91cb
--- /dev/null
+++ b/docs/swarms/structs/multi_threaded_workflow.md
@@ -0,0 +1,113 @@
+# MultiThreadedWorkflow Documentation
+
+The `MultiThreadedWorkflow` class represents a multi-threaded workflow designed to execute tasks concurrently using a thread pool. This class is highly useful in scenarios where tasks need to be executed in parallel to improve performance and efficiency. The workflow ensures that tasks are managed in a priority-based queue, and it includes mechanisms for retrying failed tasks and optionally saving task results automatically.
+
+## Class Definition
+
+### `MultiThreadedWorkflow`
+
+## Parameters
+
+| Parameter | Type | Default | Description |
+|---------------|-----------------------|---------|---------------------------------------------------------------|
+| `max_workers` | `int` | `5` | The maximum number of worker threads in the thread pool. |
+| `autosave` | `bool` | `True` | Flag indicating whether to automatically save task results. |
+| `tasks` | `List[PriorityTask]` | `None` | List of priority tasks to be executed. |
+| `retry_attempts` | `int` | `3` | The maximum number of retry attempts for failed tasks. |
+| `*args` | `tuple` | | Variable length argument list. |
+| `**kwargs` | `dict` | | Arbitrary keyword arguments. |
+
+## Attributes
+
+| Attribute | Type | Description |
+|------------------|--------------------|----------------------------------------------------------------|
+| `max_workers` | `int` | The maximum number of worker threads in the thread pool. |
+| `autosave` | `bool` | Flag indicating whether to automatically save task results. |
+| `retry_attempts` | `int` | The maximum number of retry attempts for failed tasks. |
+| `tasks_queue` | `PriorityQueue` | The queue that holds the priority tasks. |
+| `lock` | `Lock` | The lock used for thread synchronization. |
+
+## Methods
+
+### `run`
+
+
+#### Description
+
+The `run` method executes the tasks stored in the priority queue using a thread pool. It handles task completion, retries failed tasks up to a specified number of attempts, and optionally saves the results of tasks if the autosave flag is set.
+
+#### Usage Example
+
+```python
+from swarms import MultiThreadedWorkflow, PriorityTask, Task
+
+# Define some tasks
+tasks = [PriorityTask(task=Task()), PriorityTask(task=Task())]
+
+# Create a MultiThreadedWorkflow instance
+workflow = MultiThreadedWorkflow(max_workers=3, autosave=True, tasks=tasks, retry_attempts=2)
+
+# Run the workflow
+results = workflow.run()
+print(results)
+```
+
+### `_autosave_task_result`
+
+#### Description
+
+The `_autosave_task_result` method is responsible for saving the results of a task. It uses a thread lock to ensure that the autosave operation is thread-safe.
+
+#### Usage Example
+
+This method is intended for internal use and is typically called by the `run` method. However, here is an example of how it might be used directly:
+
+```python
+# Create a task and result
+task = Task()
+result = task.execute()
+
+# Autosave the result
+workflow = MultiThreadedWorkflow()
+workflow._autosave_task_result(task, result)
+```
+
+## Detailed Functionality and Usage
+
+### Initialization
+
+When an instance of `MultiThreadedWorkflow` is created, it initializes the following:
+
+- **max_workers**: Sets the maximum number of threads that can run concurrently.
+- **autosave**: Determines if the task results should be saved automatically.
+- **tasks**: Accepts a list of tasks that need to be executed. If no tasks are provided, an empty list is used.
+- **retry_attempts**: Sets the maximum number of retry attempts for failed tasks.
+- **tasks_queue**: A priority queue to manage tasks based on their priority.
+- **lock**: A threading lock to ensure thread-safe operations.
+
+### Running Tasks
+
+The `run` method performs the following steps:
+
+1. **Initialize Results and Executor**: Creates a list to store results and a `ThreadPoolExecutor` to manage the threads.
+2. **Submit Tasks**: Iterates over the tasks in the queue, submitting them to the executor for execution and storing the future objects.
+3. **Monitor Completion**: Uses the `wait` function to monitor the completion of tasks. Once a task is completed, it retrieves the result or catches exceptions.
+4. **Retry Mechanism**: If a task fails, it checks the number of attempts made and retries the task if the limit is not reached.
+5. **Autosave**: If the `autosave` flag is set, the `_autosave_task_result` method is called to save the task results.
+
+### Autosave Task Result
+
+The `_autosave_task_result` method handles the saving of task results. It uses a threading lock to ensure that the save operation is not interrupted by other threads.
+
+## Additional Information and Tips
+
+- **Thread Safety**: The use of threading locks ensures that the operations are thread-safe, preventing race conditions.
+- **Logging**: The class uses the logging module to log information about task completion, retries, and failures.
+- **Error Handling**: The retry mechanism helps in handling transient errors by attempting to re-execute failed tasks.
+
+## References and Resources
+
+For more information on threading and concurrent execution in Python, refer to the following resources:
+
+- [Python Threading Documentation](https://docs.python.org/3/library/threading.html)
+- [Python Concurrent Futures Documentation](https://docs.python.org/3/library/concurrent.futures.html)
diff --git a/docs/swarms/structs/round_robin_swarm.md b/docs/swarms/structs/round_robin_swarm.md
new file mode 100644
index 00000000..d788eb85
--- /dev/null
+++ b/docs/swarms/structs/round_robin_swarm.md
@@ -0,0 +1,115 @@
+# RoundRobin: Round-Robin Task Execution in a Swarm
+
+## Introduction
+
+The `RoundRobinSwarm` class is designed to manage and execute tasks among multiple agents in a round-robin fashion. This approach ensures that each agent in a swarm receives an equal opportunity to execute tasks, which promotes fairness and efficiency in distributed systems. It is particularly useful in environments where collaborative, sequential task execution is needed among various agents.
+
+## Conceptual Overview
+
+### What is Round-Robin?
+
+Round-robin is a scheduling technique commonly used in computing for managing processes in shared systems. It involves assigning a fixed time slot to each process and cycling through all processes in a circular order without prioritization. In the context of swarms of agents, this method ensures equitable distribution of tasks and resource usage among all agents.
+
+### Application in Swarms
+
+In swarms, `RoundRobinSwarm` utilizes the round-robin scheduling to manage tasks among agents like software components, autonomous robots, or virtual entities. This strategy is beneficial where tasks are interdependent or require sequential processing.
+
+## Class Attributes
+
+- `agents (List[Agent])`: List of agents participating in the swarm.
+- `verbose (bool)`: Enables or disables detailed logging of swarm operations.
+- `max_loops (int)`: Limits the number of times the swarm cycles through all agents.
+- `index (int)`: Maintains the current position in the agent list to ensure round-robin execution.
+
+## Methods
+
+### `__init__`
+
+Initializes the swarm with the provided list of agents, verbosity setting, and operational parameters.
+
+**Parameters:**
+- `agents`: Optional list of agents in the swarm.
+- `verbose`: Boolean flag for detailed logging.
+- `max_loops`: Maximum number of execution cycles.
+- `callback`: Optional function called after each loop.
+
+### `run`
+
+Executes a specified task across all agents in a round-robin manner, cycling through each agent repeatedly for the number of specified loops.
+
+**Conceptual Behavior:**
+- Distribute the task sequentially among all agents starting from the current index.
+- Each agent processes the task and potentially modifies it or produces new output.
+- After an agent completes its part of the task, the index moves to the next agent.
+- This cycle continues until the specified maximum number of loops is completed.
+- Optionally, a callback function can be invoked after each loop to handle intermediate results or perform additional actions.
+
+## Examples
+### Example 1: Load Balancing Among Servers
+
+In this example, `RoundRobinSwarm` is used to distribute network requests evenly among a group of servers. This is common in scenarios where load balancing is crucial for maintaining system responsiveness and scalability.
+
+```python
+from swarms import Agent, OpenAIChat, RoundRobinSwarm
+
+
+# Initialize the LLM
+llm = OpenAIChat()
+
+# Define sales agents
+sales_agent1 = Agent(
+ agent_name="Sales Agent 1 - Automation Specialist",
+ system_prompt="You're Sales Agent 1, your purpose is to generate sales for a company by focusing on the benefits of automating accounting processes!",
+ agent_description="Generate sales by focusing on the benefits of automation!",
+ llm=llm,
+ max_loops=1,
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ context_length=1000,
+)
+
+sales_agent2 = Agent(
+ agent_name="Sales Agent 2 - Cost Saving Specialist",
+ system_prompt="You're Sales Agent 2, your purpose is to generate sales for a company by emphasizing the cost savings of using swarms of agents!",
+ agent_description="Generate sales by emphasizing cost savings!",
+ llm=llm,
+ max_loops=1,
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ context_length=1000,
+)
+
+sales_agent3 = Agent(
+ agent_name="Sales Agent 3 - Efficiency Specialist",
+ system_prompt="You're Sales Agent 3, your purpose is to generate sales for a company by highlighting the efficiency and accuracy of our swarms of agents in accounting processes!",
+ agent_description="Generate sales by highlighting efficiency and accuracy!",
+ llm=llm,
+ max_loops=1,
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ context_length=1000,
+)
+
+# Initialize the swarm with sales agents
+sales_swarm = RoundRobinSwarm(agents=[sales_agent1, sales_agent2, sales_agent3], verbose=True)
+
+# Define a sales task
+task = "Generate a sales email for an accountant firm executive to sell swarms of agents to automate their accounting processes."
+
+# Distribute sales tasks to different agents
+for _ in range(5): # Repeat the task 5 times
+ results = sales_swarm.run(task)
+ print("Sales generated:", results)
+```
+
+
+
+## Conclusion
+
+The RoundRobinSwarm class provides a robust and flexible framework for managing tasks among multiple agents in a fair and efficient manner. This class is especially useful in environments where tasks need to be distributed evenly among a group of agents, ensuring that all tasks are handled timely and effectively. Through the round-robin algorithm, each agent in the swarm is guaranteed an equal opportunity to contribute to the overall task, promoting efficiency and collaboration.
diff --git a/docs/swarms/structs/sequential_workflow.md b/docs/swarms/structs/sequential_workflow.md
new file mode 100644
index 00000000..05b047b6
--- /dev/null
+++ b/docs/swarms/structs/sequential_workflow.md
@@ -0,0 +1,89 @@
+# SequentialWorkflow Documentation
+
+The `SequentialWorkflow` class is designed to manage and execute a sequence of tasks through a dynamic arrangement of agents. This class allows for the orchestration of multiple agents in a predefined order, facilitating complex workflows where tasks are processed sequentially by different agents.
+
+## Attributes
+
+| Attribute | Type | Description |
+|------------------|---------------|--------------------------------------------------|
+| `agents` | `List[Agent]` | The list of agents in the workflow. |
+| `flow` | `str` | A string representing the order of agents. |
+| `agent_rearrange`| `AgentRearrange` | Manages the dynamic execution of agents. |
+
+## Methods
+
+### `__init__(self, agents: List[Agent] = None, max_loops: int = 1, *args, **kwargs)`
+
+The constructor initializes the `SequentialWorkflow` object.
+
+- **Parameters:**
+ - `agents` (`List[Agent]`, optional): The list of agents in the workflow. Defaults to `None`.
+ - `max_loops` (`int`, optional): The maximum number of loops to execute the workflow. Defaults to `1`.
+ - `*args`: Variable length argument list.
+ - `**kwargs`: Arbitrary keyword arguments.
+
+### `run(self, task: str) -> str`
+
+Runs the specified task through the agents in the dynamically constructed flow.
+
+- **Parameters:**
+ - `task` (`str`): The task for the agents to execute.
+
+- **Returns:**
+ - `str`: The final result after processing through all agents.
+
+- **Usage Example:**
+ ```python
+ from swarms import Agent, SequentialWorkflow, Anthropic
+
+
+ # Initialize the language model agent (e.g., GPT-3)
+ llm = Anthropic()
+
+ # Place your key in .env
+
+ # Initialize agents for individual tasks
+ agent1 = Agent(
+ agent_name="Blog generator",
+ system_prompt="Generate a blog post like stephen king",
+ llm=llm,
+ max_loops=1,
+ dashboard=False,
+ tools=[],
+ )
+ agent2 = Agent(
+ agent_name="summarizer",
+ system_prompt="Sumamrize the blog post",
+ llm=llm,
+ max_loops=1,
+ dashboard=False,
+ tools=[],
+ )
+
+ # Create the Sequential workflow
+ workflow = SequentialWorkflow(
+ agents=[agent1, agent2], max_loops=1, verbose=False
+ )
+
+ # Run the workflow
+ workflow.run(
+ "Generate a blog post on how swarms of agents can help businesses grow."
+ )
+
+ ```
+
+ This example initializes a `SequentialWorkflow` with three agents and executes a task, printing the final result.
+
+- **Notes:**
+ - Logs the task execution process and handles any exceptions that occur during the task execution.
+
+### Logging and Error Handling
+
+The `run` method includes logging to track the execution flow and captures errors to provide detailed information in case of failures. This is crucial for debugging and ensuring smooth operation of the workflow.
+
+## Additional Tips
+
+- Ensure that the agents provided to the `SequentialWorkflow` are properly initialized and configured to handle the tasks they will receive.
+
+- The `max_loops` parameter can be used to control how many times the workflow should be executed, which is useful for iterative processes.
+- Utilize the logging information to monitor and debug the task execution process.
diff --git a/docs/swarms/structs/stepinput.md b/docs/swarms/structs/stepinput.md
new file mode 100644
index 00000000..2230ccdf
--- /dev/null
+++ b/docs/swarms/structs/stepinput.md
@@ -0,0 +1,64 @@
+# Module/Class Name: StepInput
+
+The `StepInput` class is used to define the input parameters for the task step. It is a part of the `BaseModel` and accepts any value. This documentation will provide an overview of the class, its functionality, and usage examples.
+
+## Overview and Introduction
+The `StepInput` class is an integral part of the `swarms.structs` library, allowing users to define and pass input parameters for a specific task step. This class provides flexibility by accepting any value, allowing the user to customize the input parameters according to their requirements.
+
+## Class Definition
+The `StepInput` class is defined as follows:
+
+```python
+class StepInput(BaseModel):
+ __root__: Any = Field(
+ ...,
+ description=("Input parameters for the task step. Any value is" " allowed."),
+ example='{\n"file_to_refactor": "models.py"\n}',
+ )
+```
+
+The `StepInput` class extends the `BaseModel` and contains a single field `__root__` of type `Any` with a description of accepting input parameters for the task step.
+
+## Functionality and Usage
+The `StepInput` class is designed to accept any input value, providing flexibility and customization for task-specific parameters. Upon creating an instance of `StepInput`, the user can define and pass input parameters as per their requirements.
+
+### Usage Example 1:
+```python
+from swarms.structs import StepInput
+
+input_params = {"file_to_refactor": "models.py", "refactor_method": "code"}
+step_input = StepInput(__root__=input_params)
+```
+
+In this example, we import the `StepInput` class from the `swarms.structs` library and create an instance `step_input` by passing a dictionary of input parameters. The `StepInput` class allows any value to be passed, providing flexibility for customization.
+
+### Usage Example 2:
+```python
+from swarms.structs import StepInput
+
+input_params = {"input_path": "data.csv", "output_path": "result.csv"}
+step_input = StepInput(__root__=input_params)
+```
+
+In this example, we again create an instance of `StepInput` by passing a dictionary of input parameters. The `StepInput` class does not restrict the type of input, allowing users to define parameters based on their specific task requirements.
+
+### Usage Example 3:
+```python
+from swarms.structs import StepInput
+
+file_path = "config.json"
+with open(file_path) as f:
+ input_data = json.load(f)
+
+step_input = StepInput(__root__=input_data)
+```
+
+In this example, we read input parameters from a JSON file and create an instance of `StepInput` by passing the loaded JSON data. The `StepInput` class seamlessly accepts input data from various sources, providing versatility to the user.
+
+## Additional Information and Tips
+When using the `StepInput` class, ensure that the input parameters are well-defined and align with the requirements of the task step. When passing complex data structures, such as nested dictionaries or JSON objects, ensure that the structure is valid and well-formed.
+
+## References and Resources
+- For further information on the `BaseModel` and `Field` classes, refer to the Pydantic documentation: [Pydantic Documentation](https://pydantic-docs.helpmanual.io/)
+
+The `StepInput` class within the `swarms.structs` library is a versatile and essential component for defining task-specific input parameters. Its flexibility in accepting any value and seamless integration with diverse data sources make it a valuable asset for customizing input parameters for task steps.
diff --git a/docs/swarms/structs/swarm_network.md b/docs/swarms/structs/swarm_network.md
new file mode 100644
index 00000000..1b74a85f
--- /dev/null
+++ b/docs/swarms/structs/swarm_network.md
@@ -0,0 +1,705 @@
+# SwarmNetwork [WIP]
+
+The `SwarmNetwork` class is a powerful tool for managing a pool of agents, orchestrating task distribution, and scaling resources based on workload. It is designed to handle tasks efficiently by dynamically adjusting the number of agents according to the current demand. This class also provides an optional API for interacting with the agent pool, making it accessible for integration with other systems.
+
+### Key Features
+- **Agent Pool Management**: Dynamically manage a pool of agents.
+- **Task Queue Management**: Handle tasks through a queue system.
+- **Agent Health Monitoring**: Monitor the health of agents.
+- **Agent Pool Scaling**: Scale the agent pool up or down based on workload.
+- **API**: Interact with the agent pool and task queue through a simple API.
+- **Agent Deployment Options**: Run agents on threads, processes, containers, machines, or clusters.
+
+### Parameters
+
+| Parameter | Type | Default Value | Description |
+|-----------------|--------------------|---------------|-----------------------------------------------------------------------------|
+| name | str | None | The name of the swarm network. |
+| description | str | None | A description of the swarm network. |
+| agents | List[Agent] | None | A list of agents in the pool. |
+| idle_threshold | float | 0.2 | The idle threshold for the agents. |
+| busy_threshold | float | 0.7 | The busy threshold for the agents. |
+| api_enabled | Optional[bool] | False | A flag to enable/disable the API. |
+| logging_enabled | Optional[bool] | False | A flag to enable/disable logging. |
+| api_on | Optional[bool] | False | A flag to enable/disable the FastAPI instance. |
+| host | str | "0.0.0.0" | The host address for the FastAPI instance. |
+| port | int | 8000 | The port number for the FastAPI instance. |
+| swarm_callable | Optional[callable] | None | A callable to be executed by the swarm network. |
+| *args | tuple | | Additional positional arguments. |
+| **kwargs | dict | | Additional keyword arguments. |
+
+### Attributes
+
+| Attribute | Type | Description |
+|------------------|--------------------|----------------------------------------------------------------|
+| task_queue | queue.Queue | A queue for storing tasks. |
+| idle_threshold | float | The idle threshold for the agents. |
+| busy_threshold | float | The busy threshold for the agents. |
+| agents | List[Agent] | A list of agents in the pool. |
+| api_enabled | bool | A flag to enable/disable the API. |
+| logging_enabled | bool | A flag to enable/disable logging. |
+| host | str | The host address for the FastAPI instance. |
+| port | int | The port number for the FastAPI instance. |
+| swarm_callable | Optional[callable] | A callable to be executed by the swarm network. |
+| agent_dict | dict | A dictionary of agents for easy access. |
+| lock | threading.Lock | A lock for synchronizing access to shared resources. |
+
+## Methods
+
+#### Description
+Initializes a new instance of the `SwarmNetwork` class.
+
+#### Parameters
+- `name` (str): The name of the swarm network.
+- `description` (str): A description of the swarm network.
+- `agents` (List[Agent]): A list of agents in the pool.
+- `idle_threshold` (float): The idle threshold for the agents.
+- `busy_threshold` (float): The busy threshold for the agents.
+- `api_enabled` (Optional[bool]): A flag to enable/disable the API.
+- `logging_enabled` (Optional[bool]): A flag to enable/disable logging.
+- `api_on` (Optional[bool]): A flag to enable/disable the FastAPI instance.
+- `host` (str): The host address for the FastAPI instance.
+- `port` (int): The port number for the FastAPI instance.
+- `swarm_callable` (Optional[callable]): A callable to be executed by the swarm network.
+- `*args`: Additional positional arguments.
+- `**kwargs`: Additional keyword arguments.
+
+### `add_task`
+
+```python
+def add_task(self, task)
+```
+
+#### Description
+Adds a task to the task queue.
+
+#### Parameters
+- `task` (_type_): The task to be added to the queue.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+agent = Agent()
+swarm = SwarmNetwork(agents=[agent])
+swarm.add_task("task")
+```
+
+### `async_add_task`
+
+```python
+async def async_add_task(self, task)
+```
+
+#### Description
+Adds a task to the task queue asynchronously.
+
+#### Parameters
+- `task` (_type_): The task to be added to the queue.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+agent = Agent()
+swarm = SwarmNetwork(agents=[agent])
+await swarm.async_add_task("task")
+```
+
+### `run_single_agent`
+
+```python
+def run_single_agent(self, agent_id, task: Optional[str], *args, **kwargs)
+```
+
+#### Description
+Runs a task on a specific agent by ID.
+
+#### Parameters
+- `agent_id` (_type_): The ID of the agent.
+- `task` (str, optional): The task to be executed by the agent.
+- `*args`: Additional positional arguments.
+- `**kwargs`: Additional keyword arguments.
+
+#### Returns
+- `_type_`: The output of the agent running the task.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+swarm = SwarmNetwork(agents=[agent])
+result = swarm.run_single_agent(agent.id, "task")
+```
+
+### `run_many_agents`
+
+```python
+def run_many_agents(self, task: Optional[str] = None, *args, **kwargs) -> List
+```
+
+#### Description
+Runs a task on all agents in the pool.
+
+#### Parameters
+- `task` (str, optional): The task to be executed by the agents.
+- `*args`: Additional positional arguments.
+- `**kwargs`: Additional keyword arguments.
+
+#### Returns
+- `List`: The output of all agents running the task.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+# Initialize the agent
+agent2 = Agent(
+ agent_name="ROTH-IRA-AGENT",
+ system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+
+swarm = SwarmNetwork(agents=[agent1, agent2])
+results = swarm.run_many_agents("task")
+```
+
+### `list_agents`
+
+```python
+def list_agents(self)
+```
+
+#### Description
+Lists all agents in the pool.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+# Initialize the agent
+agent2 = Agent(
+ agent_name="ROTH-IRA-AGENT",
+ system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+swarm = SwarmNetwork(agents=[agent])
+swarm.list_agents()
+```
+
+### `get_agent`
+
+```python
+def get_agent(self, agent_id)
+```
+
+#### Description
+Gets an agent by ID.
+
+#### Parameters
+- `agent_id` (_type_): The ID of the agent to retrieve.
+
+#### Returns
+- `_type_`: The agent with the specified ID.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+# Initialize the agent
+agent2 = Agent(
+ agent_name="ROTH-IRA-AGENT",
+ system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+swarm = SwarmNetwork(agents=[agent])
+retrieved_agent = swarm.get_agent(agent.id)
+```
+
+### `add_agent`
+
+```python
+def add_agent(self, agent: Agent)
+```
+
+#### Description
+Adds an agent to the agent pool.
+
+#### Parameters
+- `agent` (_type_): The agent to be added to the pool.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+# Initialize the agent
+agent2 = Agent(
+ agent_name="ROTH-IRA-AGENT",
+ system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+swarm = SwarmNetwork(agents=[])
+swarm.add_agent(agent)
+```
+
+### `remove_agent`
+
+```python
+def remove_agent(self, agent_id)
+```
+
+#### Description
+Removes an agent from the agent pool.
+
+#### Parameters
+- `agent_id` (_type_): The ID of the agent to be removed.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+# Initialize the agent
+agent2 = Agent(
+ agent_name="ROTH-IRA-AGENT",
+ system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+swarm = SwarmNetwork(agents=[agent])
+swarm.remove_agent(agent.id)
+```
+
+### `
+
+async_remove_agent`
+
+```python
+async def async_remove_agent(self, agent_id)
+```
+
+#### Description
+Removes an agent from the agent pool asynchronously.
+
+#### Parameters
+- `agent_id` (_type_): The ID of the agent to be removed.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+# Initialize the agent
+agent2 = Agent(
+ agent_name="ROTH-IRA-AGENT",
+ system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+swarm = SwarmNetwork(agents=[agent])
+await swarm.async_remove_agent(agent.id)
+```
+
+### `scale_up`
+
+```python
+def scale_up(self, num_agents: int = 1)
+```
+
+#### Description
+Scales up the agent pool by adding new agents.
+
+#### Parameters
+- `num_agents` (int, optional): The number of agents to add. Defaults to 1.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+# Initialize the agent
+agent2 = Agent(
+ agent_name="ROTH-IRA-AGENT",
+ system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+swarm = SwarmNetwork(agents=[agent])
+swarm.scale_up(2)
+```
+
+### `scale_down`
+
+```python
+def scale_down(self, num_agents: int = 1)
+```
+
+#### Description
+Scales down the agent pool by removing agents.
+
+#### Parameters
+- `num_agents` (int, optional): The number of agents to remove. Defaults to 1.
+
+#### Example
+
+```python
+from swarms.structs.agent import Agent
+from swarms.structs.swarm_net import SwarmNetwork
+
+# Initialize the agent
+agent2 = Agent(
+ agent_name="ROTH-IRA-AGENT",
+ system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+
+swarm = SwarmNetwork(agents=[agent])
+swarm.scale_down(1)
+```
+
+### `run`
+
+#### Description
+Runs the swarm network, starting the FastAPI application.
+
+#### Example
+
+```python
+
+import os
+
+from dotenv import load_dotenv
+
+# Import the OpenAIChat model and the Agent struct
+from swarms import Agent, OpenAIChat, SwarmNetwork
+
+# Load the environment variables
+load_dotenv()
+
+# Get the API key from the environment
+api_key = os.environ.get("OPENAI_API_KEY")
+
+# Initialize the language model
+llm = OpenAIChat(
+ temperature=0.5,
+ openai_api_key=api_key,
+)
+
+## Initialize the workflow
+agent = Agent(llm=llm, max_loops=1, agent_name="Social Media Manager")
+agent2 = Agent(llm=llm, max_loops=1, agent_name=" Product Manager")
+agent3 = Agent(llm=llm, max_loops=1, agent_name="SEO Manager")
+
+
+# Load the swarmnet with the agents
+swarmnet = SwarmNetwork(
+ agents=[agent, agent2, agent3],
+)
+
+# List the agents in the swarm network
+out = swarmnet.list_agents()
+print(out)
+
+# Run the workflow on a task
+out = swarmnet.run_single_agent(
+ agent2.id, "Generate a 10,000 word blog on health and wellness."
+)
+print(out)
+
+
+# Run all the agents in the swarm network on a task
+out = swarmnet.run_many_agents("Generate a 10,000 word blog on health and wellness.")
+print(out)
+```
+
+## Additional Information and Tips
+
+- **Error Handling**: Make use of try-except blocks to handle potential errors when adding tasks, running tasks, and managing agents.
+- **Logging**: Enable logging to track the activity and status of the swarm network.
+- **API**: The provided API allows for easy interaction with the swarm network and can be extended as needed.
+- **Asynchronous Operations**: Utilize the asynchronous methods for non-blocking operations, especially in a production environment.
+- **Scaling**: Adjust the scaling thresholds (`idle_threshold` and `busy_threshold`) based on the specific needs and workload patterns.
+
+## References and Resources
+
+- [Python Queue Documentation](https://docs.python.org/3/library/queue.html)
+- [Threading in Python](https://docs.python.org/3/library/threading.html)
+- [FastAPI Documentation](https://fastapi.tiangolo.com/)
+- [Tenacity Documentation](https://tenacity.readthedocs.io/en/latest/)
+
+By following this documentation, users can effectively manage and utilize the `SwarmNetwork` class to handle dynamic workloads and maintain an efficient pool of agents.
diff --git a/docs/swarms/structs/task.md b/docs/swarms/structs/task.md
new file mode 100644
index 00000000..715719ef
--- /dev/null
+++ b/docs/swarms/structs/task.md
@@ -0,0 +1,339 @@
+# Task Class Documentation
+
+The `Task` class is a pivotal component designed for managing tasks in a sequential workflow. This class allows for the execution of tasks using various agents, which can be callable objects or specific instances of the `Agent` class. It supports the scheduling of tasks, handling their dependencies, and setting conditions and actions that govern their execution.
+
+Key features of the `Task` class include:
+- Executing tasks with specified agents and handling their results.
+- Scheduling tasks to run at specified times.
+- Setting triggers, actions, and conditions for tasks.
+- Managing task dependencies and priorities.
+- Providing a history of task executions for tracking purposes.
+
+## Class Definition
+
+The `Task` class is defined as follows:
+
+
+### Attributes
+
+| Attribute | Type | Description |
+|----------------|-----------------------------|---------------------------------------------------------------------------------------|
+| `agent` | `Union[Callable, Agent]` | The agent or callable object to run the task. |
+| `description` | `str` | Description of the task. |
+| `result` | `Any` | Result of the task. |
+| `history` | `List[Any]` | History of the task. |
+| `schedule_time`| `datetime` | Time to schedule the task. |
+| `scheduler` | `sched.scheduler` | Scheduler to schedule the task. |
+| `trigger` | `Callable` | Trigger to run the task. |
+| `action` | `Callable` | Action to run the task. |
+| `condition` | `Callable` | Condition to run the task. |
+| `priority` | `int` | Priority of the task. |
+| `dependencies` | `List[Task]` | List of tasks that need to be completed before this task can be executed. |
+| `args` | `List[Any]` | Arguments to pass to the agent or callable object. |
+| `kwargs` | `Dict[str, Any]` | Keyword arguments to pass to the agent or callable object. |
+
+## Methods
+
+### `execute(self, *args, **kwargs)`
+
+Executes the task by calling the agent or model with the specified arguments and keyword arguments. If a condition is set, the task will only execute if the condition returns `True`.
+
+#### Parameters
+- `args`: Arguments to pass to the agent or callable object.
+- `kwargs`: Keyword arguments to pass to the agent or callable object.
+
+#### Examples
+
+```python
+>>> from swarms.structs import Task, Agent
+>>> from swarms.models import OpenAIChat
+>>> agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False)
+>>> task = Task(description="What's the weather in Miami?", agent=agent)
+>>> task.execute()
+>>> task.result
+```
+
+### `handle_scheduled_task(self)`
+
+Handles the execution of a scheduled task. If the schedule time is not set or has already passed, the task is executed immediately. Otherwise, the task is scheduled to be executed at the specified schedule time.
+
+#### Examples
+
+```python
+>>> task.schedule_time = datetime.now() + timedelta(seconds=10)
+>>> task.handle_scheduled_task()
+```
+
+### `set_trigger(self, trigger: Callable)`
+
+Sets the trigger for the task.
+
+#### Parameters
+- `trigger` (`Callable`): The trigger to set.
+
+#### Examples
+
+```python
+>>> def my_trigger():
+>>> print("Trigger executed")
+>>> task.set_trigger(my_trigger)
+```
+
+### `set_action(self, action: Callable)`
+
+Sets the action for the task.
+
+#### Parameters
+- `action` (`Callable`): The action to set.
+
+#### Examples
+
+```python
+>>> def my_action():
+>>> print("Action executed")
+>>> task.set_action(my_action)
+```
+
+### `set_condition(self, condition: Callable)`
+
+Sets the condition for the task.
+
+#### Parameters
+- `condition` (`Callable`): The condition to set.
+
+#### Examples
+
+```python
+>>> def my_condition():
+>>> print("Condition checked")
+>>> return True
+>>> task.set_condition(my_condition)
+```
+
+### `is_completed(self)`
+
+Checks whether the task has been completed.
+
+#### Returns
+- `bool`: `True` if the task has been completed, `False` otherwise.
+
+#### Examples
+
+```python
+>>> task.is_completed()
+```
+
+### `add_dependency(self, task)`
+
+Adds a task to the list of dependencies.
+
+#### Parameters
+- `task` (`Task`): The task to add as a dependency.
+
+#### Examples
+
+```python
+>>> dependent_task = Task(description="Dependent Task")
+>>> task.add_dependency(dependent_task)
+```
+
+### `set_priority(self, priority: int)`
+
+Sets the priority of the task.
+
+#### Parameters
+- `priority` (`int`): The priority to set.
+
+#### Examples
+
+```python
+>>> task.set_priority(5)
+```
+
+### `check_dependency_completion(self)`
+
+Checks whether all the dependencies have been completed.
+
+#### Returns
+- `bool`: `True` if all the dependencies have been completed, `False` otherwise.
+
+#### Examples
+
+```python
+>>> task.check_dependency_completion()
+```
+
+### `context(self, task: "Task" = None, context: List["Task"] = None, *args, **kwargs)`
+
+Sets the context for the task. For a sequential workflow, it sequentially adds the context of the previous task in the list.
+
+#### Parameters
+- `task` (`Task`, optional): The task whose context is to be set.
+- `context` (`List[Task]`, optional): The list of tasks to set the context.
+
+#### Examples
+
+```python
+>>> task1 = Task(description="Task 1")
+>>> task2 = Task(description="Task 2")
+>>> task2.context(context=[task1])
+```
+
+## Usage Examples
+
+### Basic Usage
+
+```python
+import os
+from dotenv import load_dotenv
+from swarms import Agent, OpenAIChat, Task
+
+# Load the environment variables
+load_dotenv()
+
+# Define a function to be used as the action
+def my_action():
+ print("Action executed")
+
+# Define a function to be used as the condition
+def my_condition():
+ print("Condition checked")
+ return True
+
+# Create an agent
+agent = Agent(
+ llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]),
+ max_loops=1,
+ dashboard=False,
+)
+
+# Create a task
+task = Task(
+ description="Generate a report on the top 3 biggest expenses for small businesses and how businesses can save 20%",
+ agent=agent,
+)
+
+# Set the action and condition
+task.set_action(my_action)
+task.set_condition(my_condition)
+
+# Execute the task
+print("Executing task...")
+task.run()
+
+# Check if the task is completed
+if task.is_completed():
+ print("Task completed")
+else:
+ print("Task not completed")
+
+# Output the result of the task
+print(f"Task result: {task.result}")
+```
+
+### Scheduled Task Execution
+
+```python
+from datetime import datetime, timedelta
+import os
+from dotenv import load_dotenv
+from swarms import Agent, OpenAIChat, Task
+
+# Load the environment variables
+load_dotenv()
+
+# Create an agent
+agent = Agent(
+ llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]),
+ max_loops=1,
+ dashboard=False,
+)
+
+# Create a task
+task = Task(
+ description="Scheduled task example",
+ agent=agent,
+ schedule_time=datetime.now() + timedelta(seconds=10)
+)
+
+# Handle scheduled task
+task.handle_scheduled_task()
+```
+
+### Task with Dependencies
+
+```python
+import os
+from dotenv import load_dotenv
+from swarms import Agent, OpenAIChat, Task
+
+# Load the environment variables
+load_dotenv()
+
+# Create agents
+agent1 = Agent(
+ llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]),
+ max_loops=1,
+ dashboard=False,
+)
+agent2 = Agent(
+ llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]),
+ max_loops=1,
+ dashboard=False,
+)
+
+# Create tasks
+task1 = Task(description="First task", agent=agent1)
+task2 = Task(description="Second task", agent=agent2)
+
+# Add dependency
+task2.add_dependency(task1)
+
+# Execute tasks
+print("Executing first task...")
+task1.run()
+
+print("Executing second task...")
+task2.run()
+
+# Check if tasks are completed
+print(f"Task 1 completed: {task1.is_completed()}")
+print(f"Task 2 completed: {task2.is_completed()}")
+```
+
+### Task Context
+
+```python
+import os
+from dotenv import load_dotenv
+from swarms import Agent, OpenAIChat, Task
+
+# Load the environment variables
+load_dotenv()
+
+# Create an agent
+agent = Agent(
+ llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]),
+ max_loops
+
+=1,
+ dashboard=False,
+)
+
+# Create tasks
+task1 = Task(description="First task", agent=agent)
+task2 = Task(description="Second task", agent=agent)
+
+# Set context for the second task
+task2.context(context=[task1])
+
+# Execute tasks
+print("Executing first task...")
+task1.run()
+
+print("Executing second task...")
+task2.run()
+
+# Output the context of the second task
+print(f"Task 2 context: {task2.history}")
+```
diff --git a/docs/swarms/structs/taskinput.md b/docs/swarms/structs/taskinput.md
new file mode 100644
index 00000000..8e9ed33f
--- /dev/null
+++ b/docs/swarms/structs/taskinput.md
@@ -0,0 +1,84 @@
+## Module/Class Name: TaskInput
+
+The `TaskInput` class is designed to handle the input parameters for a task. It is an abstract class that serves as the base model for input data manipulation.
+
+### Overview and Introduction
+The `TaskInput` class is an essential component of the `swarms.structs` library, allowing users to define and pass input parameters to tasks. It is crucial for ensuring the correct and structured input to various tasks and processes within the library.
+
+### Class Definition
+
+#### TaskInput Class:
+- Parameters:
+ - `__root__` (Any): The input parameters for the task. Any value is allowed.
+
+### Disclaimer:
+It is important to note that the `TaskInput` class extends the `BaseModel` from the `pydantic` library. This means that it inherits all the properties and methods of the `BaseModel`.
+
+### Functionality and Usage
+The `TaskInput` class encapsulates the input parameters in a structured format. It allows for easy validation and manipulation of input data.
+
+#### Usage Example 1: Using TaskInput for Debugging
+```python
+from pydantic import BaseModel, Field
+
+from swarms.structs import TaskInput
+
+
+class DebugInput(TaskInput):
+ debug: bool
+
+
+# Creating an instance of DebugInput
+debug_params = DebugInput(__root__={"debug": True})
+
+# Accessing the input parameters
+print(debug_params.debug) # Output: True
+```
+
+#### Usage Example 2: Using TaskInput for Task Modes
+```python
+from pydantic import BaseModel, Field
+
+from swarms.structs import TaskInput
+
+
+class ModeInput(TaskInput):
+ mode: str
+
+
+# Creating an instance of ModeInput
+mode_params = ModeInput(__root__={"mode": "benchmarks"})
+
+# Accessing the input parameters
+print(mode_params.mode) # Output: benchmarks
+```
+
+#### Usage Example 3: Using TaskInput with Arbitrary Parameters
+```python
+from pydantic import BaseModel, Field
+
+from swarms.structs import TaskInput
+
+
+class ArbitraryInput(TaskInput):
+ message: str
+ quantity: int
+
+
+# Creating an instance of ArbitraryInput
+arbitrary_params = ArbitraryInput(__root__={"message": "Hello, world!", "quantity": 5})
+
+# Accessing the input parameters
+print(arbitrary_params.message) # Output: Hello, world!
+print(arbitrary_params.quantity) # Output: 5
+```
+
+### Additional Information and Tips
+- The `TaskInput` class can be extended to create custom input models with specific parameters tailored to individual tasks.
+- The `Field` class from `pydantic` can be used to specify metadata and constraints for the input parameters.
+
+### References and Resources
+- Official `pydantic` Documentation: [https://pydantic-docs.helpmanual.io/](https://pydantic-docs.helpmanual.io/)
+- Additional resources on data modelling with `pydantic`: [https://www.tiangolo.com/blog/2021/02/16/real-python-tutorial-modern-fastapi-pydantic/](https://www.tiangolo.com/blog/2021/02/16/real-python-tutorial-modern-fastapi-pydantic/)
+
+This documentation presents the `TaskInput` class, its usage, and practical examples for creating and handling input parameters within the `swarms.structs` library.
diff --git a/docs/swarms/structs/yaml_model.md b/docs/swarms/structs/yaml_model.md
new file mode 100644
index 00000000..010e5e85
--- /dev/null
+++ b/docs/swarms/structs/yaml_model.md
@@ -0,0 +1,249 @@
+# YamlModel: A Pydantic Model for YAML Data
+
+The `YamlModel` class, derived from `BaseModel` in Pydantic, offers a convenient way to work with YAML data in your Python applications. It provides methods for serialization (converting to YAML), deserialization (creating an instance from YAML), and schema generation. This documentation will delve into the functionalities of `YamlModel` and guide you through its usage with illustrative examples.
+
+### Purpose and Functionality
+
+The primary purpose of `YamlModel` is to streamline the interaction between your Python code and YAML data. It accomplishes this by:
+
+* **Serialization:** Transforming a `YamlModel` instance into a YAML string representation using the `to_yaml()` method.
+* **Deserialization:** Constructing a `YamlModel` instance from a provided YAML string using the `from_yaml()` class method.
+* **JSON to YAML Conversion:** Facilitating the conversion of JSON data to YAML format through the `json_to_yaml()` static method.
+* **Saving to YAML File:** Enabling the storage of `YamlModel` instances as YAML files using the `save_to_yaml()` method.
+* (Future Implementation) **Schema Generation:** The `create_yaml_schema()` class method (not yet implemented but included for future reference) will generate a YAML schema that reflects the structure of the `YamlModel` class and its fields.
+
+### Class Definition and Arguments
+
+The `YamlModel` class inherits from Pydantic's `BaseModel` class. You can define your custom YAML models by creating subclasses of `YamlModel` and specifying your data fields within the class definition. Here's the breakdown of the `YamlModel` class and its methods:
+
+```python
+class YamlModel(BaseModel):
+ """
+ A Pydantic model class for working with YAML data.
+ """
+
+ def to_yaml(self):
+ """
+ Serialize the Pydantic model instance to a YAML string.
+ """
+ return yaml.safe_dump(self.dict(), sort_keys=False)
+
+ @classmethod
+ def from_yaml(cls, yaml_str: str):
+ """
+ Create an instance of the class from a YAML string.
+
+ Args:
+ yaml_str (str): The YAML string to parse.
+
+ Returns:
+ cls: An instance of the class with attributes populated from the YAML data.
+ Returns None if there was an error loading the YAML data.
+ """
+ # ...
+
+ @staticmethod
+ def json_to_yaml(json_str: str):
+ """
+ Convert a JSON string to a YAML string.
+ """
+ # ...
+
+ def save_to_yaml(self, filename: str):
+ """
+ Save the Pydantic model instance as a YAML file.
+ """
+ # ...
+
+ # TODO: Implement a method to create a YAML schema from the model fields
+ # @classmethod
+ # def create_yaml_schema(cls):
+ # # ...
+ """
+```
+
+**Arguments:**
+
+* `self` (implicit): Refers to the current instance of the `YamlModel` class.
+* `yaml_str` (str): The YAML string used for deserialization in the `from_yaml()` method.
+* `json_str` (str): The JSON string used for conversion to YAML in the `json_to_yaml()` method.
+* `filename` (str): The filename (including path) for saving the YAML model instance in the `save_to_yaml()` method.
+
+### Detailed Method Descriptions
+
+**1. to_yaml()**
+
+This method transforms an instance of the `YamlModel` class into a YAML string representation. It utilizes the `yaml.safe_dump()` function from the `PyYAML` library to ensure secure YAML data generation. The `sort_keys=False` argument guarantees that the order of keys in the resulting YAML string remains consistent with the order of fields defined in your `YamlModel` subclass.
+
+**Example:**
+
+```python
+class User(YamlModel):
+ name: str
+ age: int
+ is_active: bool
+
+user = User(name="Bob", age=30, is_active=True)
+yaml_string = user.to_yaml()
+print(yaml_string)
+```
+
+This code will output a YAML string representation of the `user` object, resembling:
+
+```yaml
+name: Bob
+age: 30
+is_active: true
+```
+
+### Detailed Method Descriptions
+
+**2. from_yaml(cls, yaml_str)** (Class Method)
+
+The `from_yaml()` class method is responsible for constructing a `YamlModel` instance from a provided YAML string.
+
+* **Arguments:**
+ * `cls` (class): The class representing the desired YAML model (the subclass of `YamlModel` that matches the structure of the YAML data).
+ * `yaml_str` (str): The YAML string containing the data to be parsed and used for creating the model instance.
+
+* **Returns:**
+ * `cls` (instance): An instance of the specified class (`cls`) populated with the data extracted from the YAML string. If an error occurs during parsing, it returns `None`.
+
+* **Error Handling:**
+
+The `from_yaml()` method employs `yaml.safe_load()` for secure YAML parsing. It incorporates a `try-except` block to handle potential `ValueError` exceptions that might arise during the parsing process. If an error is encountered, it logs the error message and returns `None`.
+
+**Example:**
+
+```python
+class User(YamlModel):
+ name: str
+ age: int
+ is_active: bool
+
+yaml_string = """
+name: Alice
+age: 25
+is_active: false
+"""
+
+user = User.from_yaml(yaml_string)
+print(user.name) # Output: Alice
+```
+
+**3. json_to_yaml(json_str)** (Static Method)
+
+This static method in the `YamlModel` class serves the purpose of converting a JSON string into a YAML string representation.
+
+* **Arguments:**
+ * `json_str` (str): The JSON string that needs to be converted to YAML format.
+
+* **Returns:**
+ * `str`: The converted YAML string representation of the provided JSON data.
+
+* **Functionality:**
+
+The `json_to_yaml()` method leverages the `json.loads()` function to parse the JSON string into a Python dictionary. Subsequently, it utilizes `yaml.dump()` to generate the corresponding YAML string representation from the parsed dictionary.
+
+**Example:**
+
+```python
+json_string = '{"name": "Charlie", "age": 42, "is_active": true}'
+yaml_string = YamlModel.json_to_yaml(json_string)
+print(yaml_string)
+```
+
+This code snippet will convert the JSON data to a YAML string, likely resembling:
+
+```yaml
+name: Charlie
+age: 42
+is_active: true
+```
+
+**4. save_to_yaml(self, filename)**
+
+The `save_to_yaml()` method facilitates the storage of a `YamlModel` instance as a YAML file.
+
+* **Arguments:**
+ * `self` (implicit): Refers to the current instance of the `YamlModel` class that you intend to save.
+ * `filename` (str): The desired filename (including path) for the YAML file.
+
+* **Functionality:**
+
+The `save_to_yaml()` method employs the previously explained `to_yaml()` method to generate a YAML string representation of the `self` instance. It then opens the specified file in write mode (`"w"`) and writes the YAML string content to the file.
+
+**Example:**
+
+```python
+class Employee(YamlModel):
+ name: str
+ department: str
+ salary: float
+
+employee = Employee(name="David", department="Engineering", salary=95000.00)
+employee.save_to_yaml("employee.yaml")
+```
+
+This code will create a YAML file named "employee.yaml" containing the serialized representation of the `employee` object.
+
+
+### More Usage Examples ++
+
+```python
+class User(YamlModel):
+ name: str
+ age: int
+ is_active: bool
+
+# Create an instance of the User model
+user = User(name="Alice", age=30, is_active=True)
+
+# Serialize the User instance to YAML and print it
+yaml_string = user.to_yaml()
+print(yaml_string)
+```
+
+This code snippet demonstrates the creation of a `User` instance and its subsequent serialization to a YAML string using the `to_yaml()` method. The printed output will likely resemble:
+
+```yaml
+name: Alice
+age: 30
+is_active: true
+```
+
+### Converting JSON to YAML
+
+```python
+# Convert JSON string to YAML and print
+json_string = '{"name": "Bob", "age": 25, "is_active": false}'
+yaml_string = YamlModel.json_to_yaml(json_string)
+print(yaml_string)
+```
+
+This example showcases the conversion of a JSON string containing user data into a YAML string representation using the `json_to_yaml()` static method. The resulting YAML string might look like:
+
+```yaml
+name: Bob
+age: 25
+is_active: false
+```
+
+### Saving User Instance to YAML File
+
+```python
+# Save the User instance to a YAML file
+user.save_to_yaml("user.yaml")
+```
+
+This code demonstrates the utilization of the `save_to_yaml()` method to store the `user` instance as a YAML file named "user.yaml". The contents of the file will mirror the serialized YAML string representation of the user object.
+
+## Additional Considerations
+
+* Ensure you have the `PyYAML` library installed (`pip install pyyaml`) to leverage the YAML parsing and serialization functionalities within `YamlModel`.
+* Remember that the `create_yaml_schema()` method is not yet implemented but serves as a placeholder for future enhancements.
+* For complex data structures within your YAML models, consider leveraging Pydantic's data validation and nested model capabilities for robust data management.
+
+## Conclusion
+
+The `YamlModel` class in Pydantic offers a streamlined approach to working with YAML data in your Python projects. By employing the provided methods (`to_yaml()`, `from_yaml()`, `json_to_yaml()`, and `save_to_yaml()`), you can efficiently convert between Python objects and YAML representations, facilitating data persistence and exchange. This comprehensive documentation empowers you to effectively utilize `YamlModel` for your YAML data processing requirements.
\ No newline at end of file
diff --git a/docs/swarms/tools/build_tool.md b/docs/swarms/tools/build_tool.md
new file mode 100644
index 00000000..fb680de6
--- /dev/null
+++ b/docs/swarms/tools/build_tool.md
@@ -0,0 +1,584 @@
+### Swarms Tool Documentation
+
+A tool is a Python function designed to perform specific tasks, with clear type annotations and comprehensive docstrings. Below are examples of tools to help you get started.
+
+# Rules
+
+To create a tool in the Swarms environment, follow these rules:
+
+1. **Function Definition**:
+ - The tool must be defined as a Python function.
+ - The function should perform a specific task and be named appropriately.
+
+2. **Type Annotations**:
+ - All arguments and the return value must have type annotations.
+ - Both input and output types must be strings (`str`).
+
+3. **Docstrings**:
+ - Each function must include a comprehensive docstring that adheres to PEP 257 standards. The docstring should explain:
+ - The purpose of the function.
+ - Arguments: names, types, and descriptions.
+ - Return value: type and description.
+ - Potential exceptions that the function may raise.
+
+4. **Input and Output Types**:
+ - The function's input must be a string.
+ - The function's output must be a string.
+
+
+### Example Tools
+
+
+### Examples and Anti-Examples
+
+#### Example 1: Fetch Financial News
+
+**Correct Implementation**
+
+```python
+import requests
+import os
+
+def fetch_financial_news(query: str = "Nvidia news", num_articles: int = 5) -> str:
+ """
+ Fetches financial news from the Google News API and returns a formatted string of the top news.
+
+ Args:
+ query (str): The query term to search for news. Default is "Nvidia news".
+ num_articles (int): The number of top articles to fetch. Default is 5.
+
+ Returns:
+ str: A formatted string of the top financial news articles.
+
+ Raises:
+ ValueError: If the API response is invalid or there are no articles found.
+ requests.exceptions.RequestException: If there is an error with the request.
+ """
+ url = "https://newsapi.org/v2/everything"
+ params = {
+ "q": query,
+ "apiKey": os.getenv("NEWSAPI_KEY"),
+ "pageSize": num_articles,
+ "sortBy": "relevancy",
+ }
+
+ try:
+ response = requests.get(url, params=params)
+ response.raise_for_status()
+ data = response.json()
+
+ if "articles" not in data or len(data["articles"]) == 0:
+ raise ValueError("No articles found or invalid API response.")
+
+ articles = data["articles"]
+ formatted_articles = []
+
+ for i, article in enumerate(articles, start=1):
+ title = article.get("title", "No Title")
+ description = article.get("description", "No Description")
+ url = article.get("url", "No URL")
+ formatted_articles.append(
+ f"{i}. {title}\nDescription: {description}\nRead more: {url}\n"
+ )
+
+ return "\n".join(formatted_articles)
+
+ except requests.exceptions.RequestException as e:
+ print(f"Request Error: {e}")
+ raise
+ except ValueError as e:
+ print(f"Value Error: {e}")
+ raise
+```
+
+**Incorrect Implementation**
+
+```python
+import requests
+import os
+
+def fetch_financial_news(query="Nvidia news", num_articles=5):
+ # Fetches financial news from the Google News API and returns a formatted string of the top news.
+ url = "https://newsapi.org/v2/everything"
+ params = {
+ "q": query,
+ "apiKey": os.getenv("NEWSAPI_KEY"),
+ "pageSize": num_articles,
+ "sortBy": "relevancy",
+ }
+
+ response = requests.get(url, params=params)
+ response.raise_for_status()
+ data = response.json()
+
+ if "articles" not in data or len(data["articles"]) == 0:
+ raise ValueError("No articles found or invalid API response.")
+
+ articles = data["articles"]
+ formatted_articles = []
+
+ for i, article in enumerate(articles, start=1):
+ title = article.get("title", "No Title")
+ description = article.get("description", "No Description")
+ url = article.get("url", "No URL")
+ formatted_articles.append(
+ f"{i}. {title}\nDescription: {description}\nRead more: {url}\n"
+ )
+
+ return "\n".join(formatted_articles)
+```
+
+**Issues with Incorrect Implementation:**
+- No type annotations for arguments and return value.
+- Missing comprehensive docstring.
+
+#### Example 2: Convert Celsius to Fahrenheit
+
+**Correct Implementation**
+
+```python
+def celsius_to_fahrenheit(celsius_str: str) -> str:
+ """
+ Converts a temperature from Celsius to Fahrenheit.
+
+ Args:
+ celsius_str (str): The temperature in Celsius as a string.
+
+ Returns:
+ str: The temperature converted to Fahrenheit as a formatted string.
+
+ Raises:
+ ValueError: If the input cannot be converted to a float.
+ """
+ try:
+ celsius = float(celsius_str)
+ fahrenheit = celsius * 9/5 + 32
+ return f"{celsius}Β°C is {fahrenheit}Β°F"
+ except ValueError as e:
+ print(f"Value Error: {e}")
+ raise
+```
+
+**Incorrect Implementation**
+
+```python
+def celsius_to_fahrenheit(celsius):
+ # Converts a temperature from Celsius to Fahrenheit.
+ celsius = float(celsius)
+ fahrenheit = celsius * 9/5 + 32
+ return f"{celsius}Β°C is {fahrenheit}Β°F"
+```
+
+**Issues with Incorrect Implementation:**
+- No type annotations for arguments and return value.
+- Missing comprehensive docstring.
+- Input type is not enforced as string.
+
+#### Example 3: Calculate Compound Interest
+
+**Correct Implementation**
+
+```python
+def calculate_compound_interest(principal_str: str, rate_str: str, time_str: str, n_str: str) -> str:
+ """
+ Calculates compound interest.
+
+ Args:
+ principal_str (str): The initial amount of money as a string.
+ rate_str (str): The annual interest rate (decimal) as a string.
+ time_str (str): The time the money is invested for in years as a string.
+ n_str (str): The number of times that interest is compounded per year as a string.
+
+ Returns:
+ str: The amount of money accumulated after n years, including interest.
+
+ Raises:
+ ValueError: If any of the inputs cannot be converted to the appropriate type or are negative.
+ """
+ try:
+ principal = float(principal_str)
+ rate = float(rate_str)
+ time = float(time_str)
+ n = int(n_str)
+
+ if principal < 0 or rate < 0 or time < 0 or n < 0:
+ raise ValueError("Inputs must be non-negative.")
+
+ amount = principal * (1 + rate / n) ** (n * time)
+ return f"The amount after {time} years is {amount:.2f}"
+ except ValueError as e:
+ print(f"Value Error: {e}")
+ raise
+```
+
+**Incorrect Implementation**
+
+```python
+def calculate_compound_interest(principal, rate, time, n):
+ # Calculates compound interest.
+ principal = float(principal)
+ rate = float(rate)
+ time = float(time)
+ n = int(n)
+
+ if principal < 0 or rate < 0 or time < 0 or n < 0:
+ raise ValueError("Inputs must be non-negative.")
+
+ amount = principal * (1 + rate / n) ** (n * time)
+ return f"The amount after {time} years is {amount:.2f}"
+```
+
+**Issues with Incorrect Implementation:**
+- No type annotations for arguments and return value.
+- Missing comprehensive docstring.
+- Input types are not enforced as strings.
+
+By following these rules and using the examples provided, you can create robust and well-documented tools in the Swarms environment. Ensure that all functions include proper type annotations, comprehensive docstrings, and that both input and output types are strings.
+
+#### Example Tool 4: Reverse a String
+
+**Functionality**: Reverses a given string.
+
+```python
+def reverse_string(s: str) -> str:
+ """
+ Reverses a given string.
+
+ Args:
+ s (str): The string to reverse.
+
+ Returns:
+ str: The reversed string.
+
+ Raises:
+ TypeError: If the input is not a string.
+ """
+ try:
+ if not isinstance(s, str):
+ raise TypeError("Input must be a string.")
+ return s[::-1]
+ except TypeError as e:
+ print(f"Type Error: {e}")
+ raise
+```
+
+#### Example Tool 5: Check Palindrome
+
+**Functionality**: Checks if a given string is a palindrome.
+
+```python
+def is_palindrome(s: str) -> str:
+ """
+ Checks if a given string is a palindrome.
+
+ Args:
+ s (str): The string to check.
+
+ Returns:
+ str: A message indicating whether the string is a palindrome or not.
+
+ Raises:
+ TypeError: If the input is not a string.
+ """
+ try:
+ if not isinstance(s, str):
+ raise TypeError("Input must be a string.")
+ normalized_str = ''.join(filter(str.isalnum, s)).lower()
+ is_palindrome = normalized_str == normalized_str[::-1]
+ return f"The string '{s}' is {'a palindrome' if is_palindrome else 'not a palindrome'}."
+ except TypeError as e:
+ print(f"Type Error: {e}")
+ raise
+```
+
+#### Example Tool 6: Fetch Current Weather
+
+**Functionality**: Fetches the current weather for a given city from the OpenWeatherMap API.
+
+```python
+import requests
+import os
+
+def fetch_current_weather(city: str) -> str:
+ """
+ Fetches the current weather for a given city from the OpenWeatherMap API.
+
+ Args:
+ city (str): The name of the city to fetch the weather for.
+
+ Returns:
+ str: A formatted string of the current weather in the specified city.
+
+ Raises:
+ ValueError: If the API response is invalid or the city is not found.
+ requests.exceptions.RequestException: If there is an error with the request.
+ """
+ url = "http://api.openweathermap.org/data/2.5/weather"
+ params = {
+ "q": city,
+ "appid": os.getenv("OPENWEATHERMAP_KEY"),
+ "units": "metric",
+ }
+
+ try:
+ response = requests.get(url, params=params)
+ response.raise_for_status()
+ data = response.json()
+
+ if "weather" not in data or "main" not in data:
+ raise ValueError("Invalid API response or city not found.")
+
+ weather_description = data["weather"][0]["description"]
+ temperature = data["main"]["temp"]
+ return f"The current weather in {city} is {weather_description} with a temperature of {temperature}Β°C."
+
+ except requests.exceptions.RequestException as e:
+ print(f"Request Error: {e}")
+ raise
+ except ValueError as e:
+ print(f"Value Error: {e}")
+ raise
+```
+
+By following the examples provided, you can create your own tools to perform various tasks in the Swarms environment. Ensure each function includes type annotations, comprehensive docstrings, and appropriate error handling to make your tools robust and easy to use.
+
+
+
+
+
+## Integrate tools into Agent
+To integrate tools into an agent, you'd simply just pass in a callable function with types and documentation into the agent class.
+
+```python
+
+
+from swarms import Agent, OpenAIChat # ChromaDB
+import subprocess
+
+# Model
+llm = OpenAIChat(
+ temperature=0.1,
+)
+
+
+# Tools
+def terminal(
+ code: str,
+):
+ """
+ Run code in the terminal.
+
+ Args:
+ code (str): The code to run in the terminal.
+
+ Returns:
+ str: The output of the code.
+ """
+ out = subprocess.run(
+ code, shell=True, capture_output=True, text=True
+ ).stdout
+ return str(out)
+
+
+def browser(query: str):
+ """
+ Search the query in the browser with the `browser` tool.
+
+ Args:
+ query (str): The query to search in the browser.
+
+ Returns:
+ str: The search results.
+ """
+ import webbrowser
+
+ url = f"https://www.google.com/search?q={query}"
+ webbrowser.open(url)
+ return f"Searching for {query} in the browser."
+
+
+def create_file(file_path: str, content: str):
+ """
+ Create a file using the file editor tool.
+
+ Args:
+ file_path (str): The path to the file.
+ content (str): The content to write to the file.
+
+ Returns:
+ str: The result of the file creation operation.
+ """
+ with open(file_path, "w") as file:
+ file.write(content)
+ return f"File {file_path} created successfully."
+
+
+def file_editor(file_path: str, mode: str, content: str):
+ """
+ Edit a file using the file editor tool.
+
+ Args:
+ file_path (str): The path to the file.
+ mode (str): The mode to open the file in.
+ content (str): The content to write to the file.
+
+ Returns:
+ str: The result of the file editing operation.
+ """
+ with open(file_path, mode) as file:
+ file.write(content)
+ return f"File {file_path} edited successfully."
+
+
+# Agent
+agent = Agent(
+ agent_name="Devin",
+ system_prompt=(
+ "Autonomous agent that can interact with humans and other"
+ " agents. Be Helpful and Kind. Use the tools provided to"
+ " assist the user. Return all code in markdown format."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[terminal, browser, file_editor, create_file],
+ # long_term_memory=chromadb,
+ metadata_output_type="json",
+ # List of schemas that the agent can handle
+ # list_base_models=[tool_schema],
+ function_calling_format_type="OpenAI",
+ function_calling_type="json", # or soon yaml
+)
+
+# Run the agent
+agent.run("Create a new file for a plan to take over the world.")
+
+```
+
+
+## Example 2
+
+
+```python
+
+import os
+
+import requests
+
+from swarms import Agent, OpenAIChat
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+
+def fetch_financial_news(
+ query: str = "Nvidia news", num_articles: int = 5
+) -> str:
+ """
+ Fetches financial news from the Google News API and returns a formatted string of the top news.
+
+ Args:
+ api_key (str): Your Google News API key.
+ query (str): The query term to search for news. Default is "financial".
+ num_articles (int): The number of top articles to fetch. Default is 5.
+
+ Returns:
+ str: A formatted string of the top financial news articles.
+
+ Raises:
+ ValueError: If the API response is invalid or there are no articles found.
+ requests.exceptions.RequestException: If there is an error with the request.
+ """
+ url = "https://newsapi.org/v2/everything"
+ params = {
+ "q": query,
+ "apiKey": os.getenv("NEWSAPI_KEY"),
+ "pageSize": num_articles,
+ "sortBy": "relevancy",
+ }
+
+ try:
+ response = requests.get(url, params=params)
+ response.raise_for_status()
+ data = response.json()
+
+ if "articles" not in data or len(data["articles"]) == 0:
+ raise ValueError("No articles found or invalid API response.")
+
+ articles = data["articles"]
+ formatted_articles = []
+
+ for i, article in enumerate(articles, start=1):
+ title = article.get("title", "No Title")
+ description = article.get("description", "No Description")
+ url = article.get("url", "No URL")
+ formatted_articles.append(
+ f"{i}. {title}\nDescription: {description}\nRead more: {url}\n"
+ )
+
+ return "\n".join(formatted_articles)
+
+ except requests.exceptions.RequestException as e:
+ print(f"Request Error: {e}")
+ raise
+ except ValueError as e:
+ print(f"Value Error: {e}")
+ raise
+
+
+# # Example usage:
+# api_key = "ceabc81a7d8f45febfedadb27177f3a3"
+# print(fetch_financial_news(api_key))
+
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ # system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops=2,
+ autosave=True,
+ # dynamic_temperature_enabled=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ # interactive=True, # Set to False to disable interactive mode
+ dynamic_temperature_enabled=True,
+ saved_state_path="finance_agent.json",
+ tools=[fetch_financial_news],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # tool_schema=
+ # tools
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+
+# Run the agent
+response = agent("What are the latest financial news on Nvidia?")
+print(response)
+
+
+```
diff --git a/docs/swarms/tools/decorator.md b/docs/swarms/tools/decorator.md
new file mode 100644
index 00000000..5d4acb13
--- /dev/null
+++ b/docs/swarms/tools/decorator.md
@@ -0,0 +1,92 @@
+
+# Tool Decorator Documentation
+
+## Module Overview
+
+The `tool` decorator is designed to enhance functions by automatically generating an OpenAI function schema based on the function's signature and provided metadata. This schema can be outputted in different formats based on the decorator's arguments. The primary use of this decorator is to facilitate the integration of Python functions with external systems that require structured metadata, making it ideal for creating machine-readable descriptions of functions.
+
+## Key Features
+
+- **Automatic Schema Generation:** Generates a schema based on the function's signature.
+- **Flexible Output Formats:** Supports returning the schema as a dictionary, string, or YAML (if integrated).
+- **Logging Support:** Includes logging of function calls and errors, aiding in debugging and monitoring.
+
+## Installation and Setup
+
+Before using the `tool` decorator, ensure that the required libraries are installed and configured. Hereβs a basic setup:
+
+```bash
+$ pip install -U swarms
+```
+
+## Decorator Definition
+
+### Signature
+
+```python
+def tool(name: str = None, description: str = None, return_dict: bool = True, verbose: bool = True, return_string: bool = False, return_yaml: bool = False):
+```
+
+### Parameters
+
+| Parameter | Type | Default | Description |
+|------------------|---------|---------|--------------------------------------------------------|
+| `name` | str | None | Name of the OpenAI function. Optional. |
+| `description` | str | None | Description of the OpenAI function. Optional. |
+| `return_dict` | bool | True | Whether to return the schema as a dictionary. |
+| `verbose` | bool | True | Enables verbose output. |
+| `return_string` | bool | False | Whether to return the schema as a string. |
+| `return_yaml` | bool | False | Whether to return the schema in YAML format. |
+
+## Functionality and Usage
+
+### Basic Usage
+
+Here is an example of using the `tool` decorator to enhance a simple function:
+
+```python
+@tool(name="ExampleFunction", description="Demonstrates the use of the tool decorator")
+def example_function(param1: int, param2: str):
+ print(f"Received param1: {param1}, param2: {param2}")
+
+example_function(123, "abc")
+```
+
+### Advanced Usage
+
+#### Returning Schema as String
+
+To get the schema as a string instead of a dictionary:
+
+```python
+@tool(name="StringSchemaFunction", description="Returns schema as string", return_dict=False, return_string=True)
+def another_function():
+ pass
+
+print(another_function()) # Outputs the schema as a string
+```
+
+#### Handling Exceptions
+
+Demonstrating error handling with the decorator:
+
+```python
+@tool(name="ErrorHandlingFunction", description="Handles errors gracefully")
+def error_prone_function():
+ raise ValueError("An example error")
+
+try:
+ error_prone_function()
+except Exception as e:
+ print(f"Caught an error: {e}")
+```
+
+## Additional Information and Tips
+
+- **Logging:** The decorator logs all function calls and exceptions. Make sure to configure the `loguru` logger accordingly to capture these logs.
+- **Assertion Errors:** The decorator performs type checks on the arguments, and if the types do not match, it will raise an assertion error.
+
+## References
+
+- For more on decorators: [Python Decorators Documentation](https://docs.python.org/3/glossary.html#term-decorator)
+- Loguru library for logging: [Loguru Documentation](https://loguru.readthedocs.io/en/stable/)
diff --git a/docs/swarms/tools/main.md b/docs/swarms/tools/main.md
new file mode 100644
index 00000000..9c749412
--- /dev/null
+++ b/docs/swarms/tools/main.md
@@ -0,0 +1,387 @@
+# The Swarms Tool System: Functions, Pydantic BaseModels as Tools, and Radical Customization
+
+
+This guide provides an in-depth look at the Swarms Tool System, focusing on its functions, the use of Pydantic BaseModels as tools, and the extensive customization options available. Aimed at developers, this documentation highlights how the Swarms framework works and offers detailed examples of creating and customizing tools and agents, specifically for accounting tasks.
+
+The Swarms Tool System is a flexible and extensible component of the Swarms framework that allows for the creation, registration, and utilization of various tools. These tools can perform a wide range of tasks and are integrated into agents to provide specific functionalities. The system supports multiple ways to define tools, including using Pydantic BaseModels, functions, and dictionaries.
+
+### Architecture
+
+The architecture of the Swarms Tool System is designed to be highly modular. It consists of the following main components:
+
+1. **Agents:** The primary entities that execute tasks.
+2. **Tools:** Functions or classes that perform specific operations.
+3. **Schemas:** Definitions of input and output data formats using Pydantic BaseModels.
+
+### Key Concepts
+
+#### Tools
+
+Tools are the core functional units within the Swarms framework. They can be defined in various ways:
+
+- **Pydantic BaseModels**: Tools can be defined using Pydantic BaseModels to ensure data validation and serialization.
+- **Functions**: Tools can be simple or complex functions.
+- **Dictionaries**: Tools can be represented as dictionaries for flexibility.
+
+#### Agents
+
+Agents utilize tools to perform tasks. They are configured with a set of tools and schemas, and they execute the tools based on the input they receive.
+
+## Detailed Documentation
+
+### Tool Definition
+
+#### Using Pydantic BaseModels
+
+Pydantic BaseModels provide a structured way to define tool inputs and outputs. They ensure data validation and serialization, making them ideal for complex data handling.
+
+**Example:**
+
+Define Pydantic BaseModels for accounting tasks:
+
+```python
+from pydantic import BaseModel
+
+class CalculateTax(BaseModel):
+ income: float
+
+class GenerateInvoice(BaseModel):
+ client_name: str
+ amount: float
+ date: str
+
+class SummarizeExpenses(BaseModel):
+ expenses: list[dict]
+```
+
+Define tool functions using these models:
+
+```python
+def calculate_tax(data: CalculateTax) -> dict:
+ tax_rate = 0.3 # Example tax rate
+ tax = data.income * tax_rate
+ return {"income": data.income, "tax": tax}
+
+def generate_invoice(data: GenerateInvoice) -> dict:
+ invoice = {
+ "client_name": data.client_name,
+ "amount": data.amount,
+ "date": data.date,
+ "invoice_id": "INV12345"
+ }
+ return invoice
+
+def summarize_expenses(data: SummarizeExpenses) -> dict:
+ total_expenses = sum(expense['amount'] for expense in data.expenses)
+ return {"total_expenses": total_expenses}
+```
+
+#### Using Functions Directly
+
+Tools can also be defined directly as functions without using Pydantic models. This approach is suitable for simpler tasks where complex validation is not required.
+
+**Example:**
+
+```python
+def basic_tax_calculation(income: float) -> dict:
+ tax_rate = 0.25
+ tax = income * tax_rate
+ return {"income": income, "tax": tax}
+```
+
+#### Using Dictionaries
+
+Tools can be represented as dictionaries, providing maximum flexibility. This method is useful when the tool's functionality is more dynamic or when integrating with external systems.
+
+**Example:**
+
+```python
+basic_tool_schema = {
+ "name": "basic_tax_tool",
+ "description": "A basic tax calculation tool",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "income": {"type": "number", "description": "Income amount"}
+ },
+ "required": ["income"]
+ }
+}
+
+def basic_tax_tool(income: float) -> dict:
+ tax_rate = 0.2
+ tax = income * tax_rate
+ return {"income": income, "tax": tax}
+```
+
+### Tool Registration
+
+Tools need to be registered with the agent for it to utilize them. This can be done by specifying the tools in the `tools` parameter during agent initialization.
+
+**Example:**
+
+```python
+from swarms import Agent
+from llama_hosted import llama3Hosted
+
+# Define Pydantic BaseModels for accounting tasks
+class CalculateTax(BaseModel):
+ income: float
+
+class GenerateInvoice(BaseModel):
+ client_name: str
+ amount: float
+ date: str
+
+class SummarizeExpenses(BaseModel):
+ expenses: list[dict]
+
+# Define tool functions using these models
+def calculate_tax(data: CalculateTax) -> dict:
+ tax_rate = 0.3
+ tax = data.income * tax_rate
+ return {"income": data.income, "tax": tax}
+
+def generate_invoice(data: GenerateInvoice) -> dict:
+ invoice = {
+ "client_name": data.client_name,
+ "amount": data.amount,
+ "date": data.date,
+ "invoice_id": "INV12345"
+ }
+ return invoice
+
+def summarize_expenses(data: SummarizeExpenses) -> dict:
+ total_expenses = sum(expense['amount'] for expense in data.expenses)
+ return {"total_expenses": total_expenses}
+
+# Function to generate a tool schema for demonstration purposes
+def create_tool_schema():
+ return {
+ "name": "execute",
+ "description": "Executes code on the user's machine",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "language": {
+ "type": "string",
+ "description": "Programming language",
+ "enum": ["python", "java"]
+ },
+ "code": {"type": "string", "description": "Code to execute"}
+ },
+ "required": ["language", "code"]
+ }
+ }
+
+# Initialize the agent with the tools
+agent = Agent(
+ agent_name="Accounting Agent",
+ system_prompt="This agent assists with various accounting tasks.",
+ sop_list=["Provide accurate and timely accounting services."],
+ llm=llama3Hosted(),
+ max_loops="auto",
+ interactive=True,
+ verbose=True,
+ tool_schema=BaseModel,
+ list_base_models=[
+ CalculateTax,
+ GenerateInvoice,
+ SummarizeExpenses
+ ],
+ output_type=str,
+ metadata_output_type="json",
+ function_calling_format_type="OpenAI",
+ function_calling_type="json",
+ tools=[
+ calculate_tax,
+ generate_invoice,
+ summarize_expenses
+ ],
+ list_base_models_json=create_tool_schema(),
+)
+```
+
+### Running the Agent
+
+The agent can execute tasks using the `run` method. This method takes a prompt and determines the appropriate tool to use based on the input.
+
+**Example:**
+
+```python
+# Example task: Calculate tax for an income
+result = agent.run("Calculate the tax for an income of $50,000.")
+print(f"Result: {result}")
+
+# Example task: Generate an invoice
+invoice_data = agent.run("Generate an invoice for John Doe for $1500 on 2024-06-01.")
+print(f"Invoice Data: {invoice_data}")
+
+# Example task: Summarize expenses
+expenses = [
+ {"amount": 200.0, "description": "Office supplies"},
+ {"amount": 1500.0, "description": "Software licenses"},
+ {"amount": 300.0, "description": "Travel expenses"}
+]
+summary = agent.run("Summarize these expenses: " + str(expenses))
+print(f"Expenses Summary: {summary}")
+```
+
+
+### Customizing Tools
+
+Custom tools can be created to extend the functionality of the Swarms framework. This can include integrating external APIs, performing complex calculations, or handling specialized data formats.
+
+**Example: Custom Accounting Tool**
+
+```python
+from pydantic import BaseModel
+
+class CustomAccountingTool(BaseModel):
+ data: dict
+
+def custom_accounting_tool(data: CustomAccountingTool) -> dict:
+ # Custom logic for the accounting tool
+ result = {
+ "status": "success",
+ "data_processed": len(data.data)
+ }
+ return result
+
+# Register the custom tool with the agent
+agent = Agent(
+ agent_name="Accounting Agent",
+ system_prompt="This agent assists with various accounting tasks.",
+ sop_list=["Provide accurate and timely accounting services."],
+ llm=llama3Hosted(),
+ max_loops="auto",
+ interactive=True,
+ verbose=True,
+ tool_schema=BaseModel,
+ list_base_models=[
+ CalculateTax,
+ GenerateInvoice,
+ SummarizeExpenses,
+ CustomAccountingTool
+ ],
+ output_type=str,
+ metadata_output_type="json",
+ function_calling_format_type="OpenAI",
+ function_calling_type="json",
+ tools=[
+ calculate_tax,
+ generate_invoice,
+ summarize_expenses,
+ custom_accounting_tool
+ ],
+ list_base_models_json=create_tool_schema(),
+)
+```
+
+### Advanced Customization
+
+Advanced customization involves modifying the core components of the Swarms framework. This includes extending existing classes, adding new methods, or integrating third-party libraries.
+
+**Example: Extending the Agent Class**
+
+```python
+from swarms import Agent
+
+class AdvancedAccountingAgent(Agent):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def custom_behavior(self):
+ print("Executing custom behavior")
+
+ def another_custom_method(self):
+ print("Another
+
+ custom method")
+
+# Initialize the advanced agent
+advanced_agent = AdvancedAccountingAgent(
+ agent_name="Advanced Accounting Agent",
+ system_prompt="This agent performs advanced accounting tasks.",
+ sop_list=["Provide advanced accounting services."],
+ llm=llama3Hosted(),
+ max_loops="auto",
+ interactive=True,
+ verbose=True,
+ tool_schema=BaseModel,
+ list_base_models=[
+ CalculateTax,
+ GenerateInvoice,
+ SummarizeExpenses,
+ CustomAccountingTool
+ ],
+ output_type=str,
+ metadata_output_type="json",
+ function_calling_format_type="OpenAI",
+ function_calling_type="json",
+ tools=[
+ calculate_tax,
+ generate_invoice,
+ summarize_expenses,
+ custom_accounting_tool
+ ],
+ list_base_models_json=create_tool_schema(),
+)
+
+# Call custom methods
+advanced_agent.custom_behavior()
+advanced_agent.another_custom_method()
+```
+
+### Integrating External Libraries
+
+You can integrate external libraries to extend the functionality of your tools. This is useful for adding new capabilities or leveraging existing libraries for complex tasks.
+
+**Example: Integrating Pandas for Data Processing**
+
+```python
+import pandas as pd
+from pydantic import BaseModel
+
+class DataFrameTool(BaseModel):
+ data: list[dict]
+
+def process_data_frame(data: DataFrameTool) -> dict:
+ df = pd.DataFrame(data.data)
+ summary = df.describe().to_dict()
+ return {"summary": summary}
+
+# Register the tool with the agent
+agent = Agent(
+ agent_name="Data Processing Agent",
+ system_prompt="This agent processes data frames.",
+ sop_list=["Provide data processing services."],
+ llm=llama3Hosted(),
+ max_loops="auto",
+ interactive=True,
+ verbose=True,
+ tool_schema=BaseModel,
+ list_base_models=[DataFrameTool],
+ output_type=str,
+ metadata_output_type="json",
+ function_calling_format_type="OpenAI",
+ function_calling_type="json",
+ tools=[process_data_frame],
+ list_base_models_json=create_tool_schema(),
+)
+
+# Example task: Process a data frame
+data = [
+ {"col1": 1, "col2": 2},
+ {"col1": 3, "col2": 4},
+ {"col1": 5, "col2": 6}
+]
+result = agent.run("Process this data frame: " + str(data))
+print(f"Data Frame Summary: {result}")
+```
+
+## Conclusion
+
+The Swarms Tool System provides a robust and flexible framework for defining and utilizing tools within agents. By leveraging Pydantic BaseModels, functions, and dictionaries, developers can create highly customized tools to perform a wide range of tasks. The extensive customization options allow for the integration of external libraries and the extension of core components, making the Swarms framework suitable for diverse applications.
+
+This guide has covered the fundamental concepts and provided detailed examples to help you get started with the Swarms Tool System. With this foundation, you can explore and implement advanced features to build powerful
\ No newline at end of file
diff --git a/docs/swarms/tools/tool_storage.md b/docs/swarms/tools/tool_storage.md
new file mode 100644
index 00000000..3c103be6
--- /dev/null
+++ b/docs/swarms/tools/tool_storage.md
@@ -0,0 +1,204 @@
+# ToolStorage
+
+
+The `ToolStorage` module provides a structured and efficient way to manage and utilize various tool functions. It is designed to store tool functions, manage settings, and ensure smooth registration and retrieval of tools. This module is particularly useful in applications that require dynamic management of a collection of functions, such as plugin systems, modular software, or any application where functions need to be registered and called dynamically.
+
+## Class: ToolStorage
+
+The `ToolStorage` class is the core component of the module. It provides functionalities to add, retrieve, and list tool functions as well as manage settings.
+
+### Attributes
+
+| Attribute | Type | Description |
+|------------|--------------------|-----------------------------------------------------------------------|
+| `verbose` | `bool` | A flag to enable verbose logging. |
+| `tools` | `List[Callable]` | A list of tool functions. |
+| `_tools` | `Dict[str, Callable]` | A dictionary that stores the tools, where the key is the tool name and the value is the tool function. |
+| `_settings`| `Dict[str, Any]` | A dictionary that stores the settings, where the key is the setting name and the value is the setting value. |
+
+### Methods
+
+#### `__init__`
+
+Initializes the `ToolStorage` instance.
+
+
+| Parameter | Type | Default | Description |
+|------------|-------------------|---------|------------------------------------------------------------|
+| `verbose` | `bool` | `None` | A flag to enable verbose logging. |
+| `tools` | `List[Callable]` | `None` | A list of tool functions to initialize the storage with. |
+| `*args` | `tuple` | `None` | Additional positional arguments. |
+| `**kwargs` | `dict` | `None` | Additional keyword arguments. |
+
+#### `add_tool`
+
+Adds a tool to the storage.
+
+| Parameter | Type | Description |
+|-----------|----------|------------------------------|
+| `func` | `Callable` | The tool function to be added. |
+
+**Raises:**
+- `ValueError`: If a tool with the same name already exists.
+
+#### `get_tool`
+
+Retrieves a tool by its name.
+
+| Parameter | Type | Description |
+|-----------|--------|-------------------------------|
+| `name` | `str` | The name of the tool to retrieve. |
+
+**Returns:**
+- `Callable`: The tool function.
+
+**Raises:**
+- `ValueError`: If no tool with the given name is found.
+
+#### `set_setting`
+
+Sets a setting in the storage.
+
+
+| Parameter | Type | Description |
+|-----------|--------|--------------------------|
+| `key` | `str` | The key for the setting. |
+| `value` | `Any` | The value for the setting. |
+
+#### `get_setting`
+
+Gets a setting from the storage.
+
+| Parameter | Type | Description |
+|-----------|--------|--------------------------|
+| `key` | `str` | The key for the setting. |
+
+**Returns:**
+- `Any`: The value of the setting.
+
+**Raises:**
+- `KeyError`: If the setting is not found.
+
+#### `list_tools`
+
+Lists all registered tools.
+
+**Returns:**
+- `List[str]`: A list of tool names.
+
+## Decorator: tool_registry
+
+The `tool_registry` decorator registers a function as a tool in the storage.
+
+| Parameter | Type | Description |
+|-----------|----------------|----------------------------------|
+| `storage` | `ToolStorage` | The storage instance to register the tool in. |
+
+**Returns:**
+- `Callable`: The decorator function.
+
+## Usage Examples
+
+
+### Full Example
+```python
+from swarms import ToolStorage, tool_registry
+
+storage = ToolStorage()
+
+
+# Example usage
+@tool_registry(storage)
+def example_tool(x: int, y: int) -> int:
+ """
+ Example tool function that adds two numbers.
+
+ Args:
+ x (int): The first number.
+ y (int): The second number.
+
+ Returns:
+ int: The sum of the two numbers.
+ """
+ return x + y
+
+
+# Query all the tools and get the example tool
+print(storage.list_tools()) # Should print ['example_tool']
+# print(storage.get_tool('example_tool')) # Should print
+
+# Find the tool by names and call it
+print(storage.get_tool("example_tool")) # Should print 5
+
+
+# Test the storage and querying
+if __name__ == "__main__":
+ print(storage.list_tools()) # Should print ['example_tool']
+ print(storage.get_tool("example_tool")) # Should print 5
+ storage.set_setting("example_setting", 42)
+ print(storage.get_setting("example_setting")) # Should print 42
+
+```
+
+
+### Basic Usage
+
+#### Example 1: Initializing ToolStorage and Adding a Tool
+
+```python
+from swarms.tools.tool_registry import ToolStorage, tool_registry
+
+# Initialize ToolStorage
+storage = ToolStorage()
+
+# Define a tool function
+@tool_registry(storage)
+def add_numbers(x: int, y: int) -> int:
+ return x + y
+
+# List tools
+print(storage.list_tools()) # Output: ['add_numbers']
+
+# Retrieve and use the tool
+add_tool = storage.get_tool('add_numbers')
+print(add_tool(5, 3)) # Output: 8
+```
+
+### Advanced Usage
+
+#### Example 2: Managing Settings
+
+```python
+# Set a setting
+storage.set_setting('max_retries', 5)
+
+# Get a setting
+max_retries = storage.get_setting('max_retries')
+print(max_retries) # Output: 5
+```
+
+### Error Handling
+
+#### Example 3: Handling Errors in Tool Retrieval
+
+```python
+try:
+ non_existent_tool = storage.get_tool('non_existent')
+except ValueError as e:
+ print(e) # Output: No tool found with name: non_existent
+```
+
+#### Example 4: Handling Duplicate Tool Addition
+
+```python
+try:
+ @tool_registry(storage)
+ def add_numbers(x: int, y: int) -> int:
+ return x + y
+except ValueError as e:
+ print(e) # Output: Tool with name add_numbers already exists.
+```
+
+## Conclusion
+
+The `ToolStorage` module provides a robust solution for managing tool functions and settings. Its design allows for easy registration, retrieval, and management of tools, making it a valuable asset in various applications requiring dynamic function handling. The inclusion of detailed logging ensures that the operations are transparent and any issues can be quickly identified and resolved.
\ No newline at end of file
diff --git a/docs/swarms_cloud/agent_api.md b/docs/swarms_cloud/agent_api.md
new file mode 100644
index 00000000..016ddedf
--- /dev/null
+++ b/docs/swarms_cloud/agent_api.md
@@ -0,0 +1,236 @@
+# Swarms API Documentation
+
+The Swarms API provides endpoints to interact with various language models, manage agent configurations, and handle token counting. This documentation covers the available endpoints, input and output models, and detailed examples for each endpoint.
+
+URL: `https://api.swarms.world`
+
+## Key Features
+- Dynamic Model Switching: Easily switch between different language models based on user input.
+- Token Counting: Efficiently count tokens using the tiktoken library.
+- Agent Configuration: Configure and run agents with detailed settings for various tasks.
+- CORS Handling: Support for Cross-Origin Resource Sharing (CORS) to allow web-based clients to interact with the API.
+
+
+## Endpoints
+
+### `/v1/models`
+
+**Method:** `GET`
+
+**Response Model:** `List[str]`
+
+**Description:**
+This endpoint returns a list of available model names. It is useful for clients to query and understand which models are available for use.
+
+**Response Example:**
+
+```json
+[
+ "OpenAIChat",
+ "GPT4VisionAPI",
+ "Anthropic"
+]
+```
+
+**Example Usage:**
+
+```python
+import requests
+
+response = requests.get("http://api.swarms.world/v1/models")
+print(response.json())
+```
+
+### `/v1/agent/completions`
+
+**Method:** `POST`
+
+**Request Model:** `AgentInput`
+
+**Response Model:** `AgentOutput`
+
+**URL:** `http://api.swarms.world/v1/agent/completions`
+
+**Description:**
+This endpoint handles the completion request for an agent configured with the given input parameters. It processes the request and returns the completion results.
+
+**Request Example:**
+
+```json
+{
+ "agent_name": "Swarm Agent",
+ "system_prompt": "Summarize the following text",
+ "agent_description": "An agent that summarizes text",
+ "model_name": "OpenAIChat",
+ "max_loops": 1,
+ "autosave": false,
+ "dynamic_temperature_enabled": false,
+ "dashboard": false,
+ "verbose": false,
+ "streaming_on": true,
+ "saved_state_path": null,
+ "sop": null,
+ "sop_list": null,
+ "user_name": "User",
+ "retry_attempts": 3,
+ "context_length": 8192,
+ "task": "This is a sample text that needs to be summarized."
+}
+```
+
+**Response Example:**
+
+```json
+{
+ "agent": {
+ "agent_name": "Swarm Agent",
+ "system_prompt": "Summarize the following text",
+ "agent_description": "An agent that summarizes text",
+ "model_name": "OpenAIChat",
+ "max_loops": 1,
+ "autosave": false,
+ "dynamic_temperature_enabled": false,
+ "dashboard": false,
+ "verbose": false,
+ "streaming_on": true,
+ "saved_state_path": null,
+ "sop": null,
+ "sop_list": null,
+ "user_name": "User",
+ "retry_attempts": 3,
+ "context_length": 8192,
+ "task": "This is a sample text that needs to be summarized."
+ },
+ "completions": {
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "Swarm Agent",
+ "content": "The sample text summarizes how to perform text summarization using an agent.",
+ "name": null
+ }
+ }
+ ],
+ "stream_choices": null,
+ "usage_info": {
+ "prompt_tokens": 10,
+ "completion_tokens": 15,
+ "total_tokens": 25
+ }
+ }
+}
+```
+
+**Example Usage:**
+
+```python
+import requests
+from pydantic import BaseModel
+from typing import List
+
+class AgentInput(BaseModel):
+ agent_name: str = "Swarm Agent"
+ system_prompt: str = None
+ agent_description: str = None
+ model_name: str = "OpenAIChat"
+ max_loops: int = 1
+ autosave: bool = False
+ dynamic_temperature_enabled: bool = False
+ dashboard: bool = False
+ verbose: bool = False
+ streaming_on: bool = True
+ saved_state_path: str = None
+ sop: str = None
+ sop_list: List[str] = None
+ user_name: str = "User"
+ retry_attempts: int = 3
+ context_length: int = 8192
+ task: str = None
+
+agent_input = AgentInput(task="Generate a summary of the provided text.")
+response = requests.post("http://api.swarms.world/v1/agent/completions", json=agent_input.dict())
+print(response.json())
+```
+
+## Models
+
+### AgentInput
+
+The `AgentInput` class defines the structure of the input data required to configure and run an agent.
+
+| Parameter | Type | Default | Description |
+|--------------------------------|-----------------|-----------------|-----------------------------------------------------------------|
+| `agent_name` | `str` | "Swarm Agent" | The name of the agent. |
+| `system_prompt` | `str` or `None` | `None` | The system prompt to guide the agent's behavior. |
+| `agent_description` | `str` or `None` | `None` | A description of the agent's purpose. |
+| `model_name` | `str` | "OpenAIChat" | The name of the language model to use. |
+| `max_loops` | `int` | 1 | The maximum number of loops the agent should perform. |
+| `autosave` | `bool` | `False` | Whether to enable autosave functionality. |
+| `dynamic_temperature_enabled` | `bool` | `False` | Whether dynamic temperature adjustment is enabled. |
+| `dashboard` | `bool` | `False` | Whether to enable the dashboard feature. |
+| `verbose` | `bool` | `False` | Whether to enable verbose logging. |
+| `streaming_on` | `bool` | `True` | Whether to enable streaming of responses. |
+| `saved_state_path` | `str` or `None` | `None` | Path to save the agent's state. |
+| `sop` | `str` or `None` | `None` | Standard operating procedures for the agent. |
+| `sop_list` | `List[str]` or `None` | `None` | A list of standard operating procedures. |
+| `user_name` | `str` | "User" | The name of the user interacting with the agent. |
+| `retry_attempts` | `int` | 3 | Number of retry attempts for failed operations. |
+| `context_length` | `int` | 8192 | Maximum context length for the model's input. |
+| `task` | `str` or `None` | `None` | The task description for the agent to perform. |
+
+### AgentOutput
+
+The `AgentOutput` class defines the structure of the output data returned by the agent after processing a request.
+
+| Parameter | Type | Description |
+|---------------|--------------------------|--------------------------------------------------|
+| `agent` | `AgentInput` | The input configuration used to create the agent.|
+| `completions` | `ChatCompletionResponse` | The response generated by the agent. |
+
+## Functions
+
+### count_tokens
+
+The `count_tokens` function counts the number of tokens in a given text using the `tiktoken` library.
+
+**Parameters:**
+
+- `text` (`str`): The text to be tokenized and counted.
+
+**Returns:**
+
+- `int`: The number of tokens in the text.
+
+**Example Usage:**
+
+```python
+text = "This is a sample text to count tokens."
+token_count = count_tokens(text)
+print(f"Token count: {token_count}")
+```
+
+### model_router
+
+The `model_router` function switches to the specified language model based on the provided model name.
+
+**Parameters:**
+
+- `model_name` (`str`): The name of the model to switch to.
+
+**Returns:**
+
+- An instance of the specified language model.
+
+**Example Usage:**
+
+```python
+model_name = "OpenAIChat"
+model_instance = model_router(model_name)
+```
+
+## Additional Information and Tips
+
+- **Error Handling**: Ensure robust error handling by catching exceptions and returning meaningful HTTP status codes and messages.
+- **Model Selection**: When adding new models, update the `model_router` function and the `/v1/models` endpoint to include the new model names.
+- **Token Management**: Keep track of token usage to optimize API costs and manage rate limits effectively.
\ No newline at end of file
diff --git a/docs/swarms_cloud/architecture.md b/docs/swarms_cloud/architecture.md
new file mode 100644
index 00000000..0a0e7db4
--- /dev/null
+++ b/docs/swarms_cloud/architecture.md
@@ -0,0 +1,138 @@
+# Under The Hood: The Swarm Cloud Serving Infrastructure
+-----------------------------------------------------------------
+
+This blog post delves into the intricate workings of our serving model infrastructure, providing a comprehensive understanding for both users and infrastructure engineers. We'll embark on a journey that starts with an API request and culminates in a response generated by your chosen model, all orchestrated within a multi-cloud environment.
+
+### The Journey of an API Request
+
+1. **The Gateway:** Your API request first arrives at an EC2 instance running SkyPilot, a lightweight controller.
+
+2. **Intelligent Routing:** SkyPilot, wielding its decision-making prowess, analyzes the request and identifies the most suitable GPU in our multi-cloud setup. Factors like resource availability, latency, and cost might influence this choice.
+
+3. **Multi-Cloud Agility:** Based on the chosen cloud provider (AWS or Azure), SkyPilot seamlessly directs the request to the appropriate containerized model residing in a sky clusters cluster. Here's where the magic of cloud-agnostic deployments comes into play.
+
+### Unveiling the Architecture
+
+Let's dissect the technical architecture behind this process:
+
+- **SkyPilot (EC2 Instance):** This lightweight controller, deployed on an EC2 instance, acts as the central hub for orchestrating requests and routing them to suitable model instances.
+
+- **Swarm Cloud Repositories:** Each model resides within its own dedicated folder on the Swarms Cloud GitHub repository (). Here, you'll find a folder structure like this:
+
+```
+servers/
+ /
+ sky-serve.yaml # Deployment configuration file
+ /
+ sky-serve.yaml
+ ...
+
+```
+
+- **SkyServe Deployment Tool:** This is the workhorse responsible for deploying models within sky clusters clusters. Each model's folder contains a `sky-serve.yaml` file that dictates the deployment configuration.
+
+### Infrastructure Engineer's Toolkit: Commands for Model Deployment
+
+Here's a breakdown of the `sky serve` command and its subcommands:
+
+- `sky serve -h`: Displays the help message for the `sky serve` CLI tool.
+
+**Commands:**
+
+- `sky serve up yaml.yaml -n --cloud aws/azure`: This command deploys a SkyServe service based on the provided `yaml.yaml` configuration file. The `-n` flag indicates a new deployment, and the `--cloud` flag specifies the target cloud platform (AWS or Azure).
+
+**Additional Commands:**
+
+- `sky serve update`: Updates a running SkyServe service.
+
+- `sky serve status`: Shows the status of deployed SkyServe services.
+
+- `sky serve down`: Tears down (stops and removes) a SkyServe service.
+
+- `sky serve logs`: Tails the logs of a running SkyServe service, providing valuable insights into its operation.
+
+By leveraging these commands, infrastructure engineers can efficiently manage the deployment and lifecycle of models within the multi-cloud environment.
+
+**Building the Cluster and Accessing the Model:**
+
+When you deploy a model using `sky serve up`, SkyServe triggers the building of a sky clusters cluster, if one doesn't already exist. Once the deployment is complete, SkyServe provides you with an endpoint URL for interacting with the model. This URL allows you to send requests to the deployed model and receive its predictions.
+
+### Understanding the `sky-serve.yaml` Configuration
+
+The `sky-serve.yaml` file plays a crucial role in defining the deployment parameters for your model. This file typically includes properties such as:
+
+- **Image:** Specifies the Docker image containing your model code and dependencies.
+
+- **Replicas:** Defines the number of model replicas to be deployed in the Swarm cluster. This allows for load balancing and fault tolerance.
+
+- **Resources:** Sets memory and CPU resource constraints for the deployed model containers.
+
+- **Networking:** Configures network settings for communication within the sky clusters and with the outside world.
+
+**Benefits of Our Infrastructure:**
+
+- **Multi-Cloud Flexibility:** Deploy models seamlessly across AWS and Azure, taking advantage of whichever platform best suits your needs.
+
+- **Scalability:** Easily scale model deployments up or down based on traffic demands.
+
+- **Cost Optimization:** The intelligent routing by SkyPilot helps optimize costs by utilizing the most cost-effective cloud resources.
+
+- **Simplified Management:** Manage models across clouds with a single set of commands using `sky serve`.
+
+### Deep Dive: Technical Architecture
+
+**Cloud Considerations:**
+
+Our multi-cloud architecture offers several advantages, but it also introduces complexities that need to be addressed. Here's a closer look at some key considerations:
+
+- **Cloud Provider APIs and SDKs:** SkyPilot interacts with the APIs and SDKs of the chosen cloud provider (AWS or Azure) to manage resources like virtual machines, storage, and networking. Infrastructure engineers need to be familiar with the specific APIs and SDKs for each cloud platform to ensure smooth operation and troubleshooting.
+
+- **Security:** Maintaining consistent security across different cloud environments is crucial. This involves aspects like IAM (Identity and Access Management) configuration, network segmentation, and encryption of sensitive data at rest and in transit. Infrastructure engineers need to implement robust security measures tailored to each cloud provider's offerings.
+
+- **Network Connectivity:** Establishing secure and reliable network connectivity between SkyPilot (running on EC2), sky clusters clusters (deployed on cloud VMs), and your client applications is essential. This might involve setting up VPN tunnels or utilizing cloud-native networking solutions offered by each provider.
+
+- **Monitoring and Logging:** Monitoring the health and performance of SkyPilot, sky clusters clusters, and deployed models across clouds is critical for proactive issue identification and resolution. Infrastructure engineers can leverage cloud provider-specific monitoring tools alongside centralized logging solutions for comprehensive oversight.
+
+**sky clusters Clusters**
+
+sky clusters is a container orchestration platform that facilitates the deployment and management of containerized applications, including your machine learning models. When you deploy a model with `sky serve up`, SkyPilot launches an node with:
+
+- **Provision Resources:** SkyPilot requests resources from the chosen cloud provider (e.g., VMs with GPUs) to create a sky clusters cluster if one doesn't already exist.
+
+- **Deploy Containerized Models:** SkyPilot leverages the `sky-serve.yaml` configuration to build Docker images containing your model code and dependencies. These images are then pushed to a container registry (e.g., Docker Hub) and deployed as containers within the Swarm cluster.
+
+- **Load Balancing and Service Discovery:** sky clusters provides built-in load balancing capabilities to distribute incoming requests across multiple model replicas, ensuring high availability and performance. Additionally, service discovery mechanisms allow models to find each other and communicate within the cluster.
+
+**SkyPilot - The Orchestrator**
+
+SkyPilot, the lightweight controller running on an EC2 instance, plays a central role in this infrastructure. Here's a deeper look at its functionalities:
+
+- **API Gateway Integration:** SkyPilot can be integrated with your API gateway or service mesh to receive incoming requests for model predictions.
+
+- **Request Routing:** SkyPilot analyzes the incoming request, considering factors like model compatibility, resource availability, and latency. Based on this analysis, SkyPilot selects the most suitable model instance within the appropriate sky clusters cluster.
+
+- **Cloud Provider Interaction:** SkyPilot interacts with the chosen cloud provider's APIs to manage resources required for the sky clusters cluster and model deployment.
+
+- **Model Health Monitoring:** SkyPilot can be configured to monitor the health and performance of deployed models. This might involve collecting metrics like model response times, resource utilization, and error rates.
+
+- **Scalability Management:** Based on pre-defined policies or real-time traffic patterns, SkyPilot can trigger the scaling of model deployments (adding or removing replicas) within the sky clusters cluster.
+
+**Advanced Considerations**
+
+This blog post has provided a foundational understanding of our serving model infrastructure. For infrastructure engineers seeking a deeper dive, here are some additional considerations:
+
+- **Container Security:** Explore container image scanning for vulnerabilities, enforcing least privilege principles within container runtime environments, and utilizing secrets management solutions for secure access to sensitive data.
+
+- **Model Versioning and Rollbacks:** Implement a model versioning strategy to track changes and facilitate rollbacks to previous versions if necessary.
+
+- **A/B Testing:** Integrate A/B testing frameworks to evaluate the performance of different model versions and configurations before full-scale deployment.
+
+- **Auto-Scaling with Cloud Monitoring:** Utilize cloud provider-specific monitoring services like Amazon CloudWatch or Azure Monitor to trigger auto-scaling of sky clusters clusters based on predefined metrics.
+
+By understanding these technical aspects and considerations, infrastructure engineers can effectively manage and optimize our multi-cloud serving model infrastructure.
+
+### Conclusion
+
+This comprehensive exploration has shed light on the intricate workings of our serving model infrastructure. We've covered the journey of an API request, delved into the technical architecture with a focus on cloud considerations, sky clusters clusters, and SkyPilot's role as the orchestrator. We've also explored advanced considerations for infrastructure engineers seeking to further optimize and secure this multi-cloud environment.
+
+This understanding empowers both users and infrastructure engineers to leverage this technology effectively for deploying and managing your machine learning models at scale.
diff --git a/docs/swarms_cloud/available_models.md b/docs/swarms_cloud/available_models.md
new file mode 100644
index 00000000..66f23e7c
--- /dev/null
+++ b/docs/swarms_cloud/available_models.md
@@ -0,0 +1,9 @@
+# Available Models
+
+| Model Name | Description | Input Price | Output Price | Use Cases |
+|-----------------------|---------------------------------------------------------------------------------------------------------|--------------|--------------|------------------------------------------------------------------------|
+| **nternlm-xcomposer2-4khd** | One of the highest performing VLMs (Video Language Models). | $4/1M Tokens | $8/1M Tokens | High-resolution video processing and understanding. |
+
+
+## What models should we add?
+[Book a call with us to learn more about your needs:](https://calendly.com/swarm-corp/30min)
diff --git a/docs/swarms_cloud/getting_started.md b/docs/swarms_cloud/getting_started.md
new file mode 100644
index 00000000..5fb114ac
--- /dev/null
+++ b/docs/swarms_cloud/getting_started.md
@@ -0,0 +1,94 @@
+# Getting Started with State-of-the-Art Vision Language Models (VLMs) Using the Swarms API
+
+The intersection of vision and language tasks within the field of artificial intelligence has led to the emergence of highly sophisticated models known as Vision Language Models (VLMs). These models leverage the capabilities of both computer vision and natural language processing to provide a more nuanced understanding of multimodal inputs. In this blog post, we will guide you through the process of integrating state-of-the-art VLMs available through the Swarms API, focusing particularly on models like "internlm-xcomposer2-4khd", which represents a blend of high-performance language and visual understanding.
+
+#### What Are Vision Language Models?
+
+Vision Language Models are at the frontier of integrating visual data processing with text analysis. These models are trained on large datasets that include both images and their textual descriptions, learning to correlate visual elements with linguistic context. The result is a model that can not only recognize objects in an image but also generate descriptive, context-aware text, answer questions about the image, and even engage in a dialogue about its content.
+
+#### Why Use Swarms API for VLMs?
+
+Swarms API provides access to several cutting-edge VLMs including the "internlm-xcomposer2-4khd" model. This API is designed for developers looking to seamlessly integrate advanced multimodal capabilities into their applications without the need for extensive machine learning expertise or infrastructure. Swarms API is robust, scalable, and offers state-of-the-art models that are continuously updated to leverage the latest advancements in AI research.
+
+#### Prerequisites
+
+Before diving into the technical setup, ensure you have the following:
+- An active account with Swarms API to obtain an API key.
+- Python installed on your machine (Python 3.6 or later is recommended).
+- An environment where you can install packages and run Python scripts (like Visual Studio Code, Jupyter Notebook, or simply your terminal).
+
+#### Setting Up Your Environment
+
+First, you'll need to install the `OpenAI` Python library if it's not already installed:
+
+```bash
+pip install openai
+```
+
+#### Integrating the Swarms API
+
+Hereβs a basic guide on how to set up the Swarms API in your Python environment:
+
+1. **API Key Configuration**:
+ Start by setting up your API key and base URL. Replace `"your_swarms_key"` with the actual API key you obtained from Swarms.
+
+ ```python
+ from openai import OpenAI
+
+ openai_api_key = "your_swarms_key"
+ openai_api_base = "https://api.swarms.world/v1"
+ ```
+
+2. **Initialize Client**:
+ Initialize your OpenAI client with the provided API key and base URL.
+
+ ```python
+ client = OpenAI(
+ api_key=openai_api_key,
+ base_url=openai_api_base,
+ )
+ ```
+
+3. **Creating a Chat Completion**:
+ To use the VLM, youβll send a request to the API with a multimodal input consisting of both an image and a text query. The following example shows how to structure this request:
+
+ ```python
+ chat_response = client.chat.completions.create(
+ model="internlm-xcomposer2-4khd",
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "image_url",
+ "image_url": {
+ "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg",
+ },
+ },
+ {"type": "text", "text": "What's in this image?"},
+ ]
+ }
+ ],
+ )
+ print("Chat response:", chat_response)
+ ```
+
+ This code sends a multimodal query to the model, which includes an image URL followed by a text question regarding the image.
+
+#### Understanding the Response
+
+The response from the API will include details generated by the model about the image based on the textual query. This could range from simple descriptions to complex narratives, depending on the modelβs capabilities and the nature of the question.
+
+#### Best Practices
+
+- **Data Privacy**: Always ensure that the images and data you use comply with privacy laws and regulations.
+- **Error Handling**: Implement robust error handling to manage potential issues during API calls.
+- **Model Updates**: Keep track of updates to the Swarms API and model improvements to leverage new features and improved accuracies.
+
+#### Conclusion
+
+Integrating VLMs via the Swarms API opens up a plethora of opportunities for developers to create rich, interactive, and intelligent applications that understand and interpret the world not just through text but through visuals as well. Whether youβre building an educational tool, a content management system, or an interactive chatbot, these models can significantly enhance the way users interact with your application.
+
+As you embark on your journey to integrate these powerful models into your projects, remember that the key to successful implementation lies in understanding the capabilities and limitations of the technology, continually testing with diverse data, and iterating based on user feedback and technological advances.
+
+Happy coding, and hereβs to building more intelligent, multimodal applications!
\ No newline at end of file
diff --git a/docs/swarms_cloud/main.md b/docs/swarms_cloud/main.md
new file mode 100644
index 00000000..d54451a4
--- /dev/null
+++ b/docs/swarms_cloud/main.md
@@ -0,0 +1,352 @@
+# Swarm Cloud API Reference
+
+## Overview
+
+The AI Chat Completion API processes text and image inputs to generate conversational responses. It supports various configurations to customize response behavior and manage input content.
+
+## API Endpoints
+
+### Chat Completion URL
+`https://api.swarms.world`
+
+
+
+- **Endpoint:** `/v1/chat/completions`
+-- **Full Url** `https://api.swarms.world/v1/chat/completions`
+- **Method:** POST
+- **Description:** Generates a response based on the provided conversation history and parameters.
+
+#### Request Parameters
+
+| Parameter | Type | Description | Required |
+|---------------|--------------------|-----------------------------------------------------------|----------|
+| `model` | string | The AI model identifier. | Yes |
+| `messages` | array of objects | A list of chat messages, including the sender's role and content. | Yes |
+| `temperature` | float | Controls randomness. Lower values make responses more deterministic. | No |
+| `top_p` | float | Controls diversity. Lower values lead to less random completions. | No |
+| `max_tokens` | integer | The maximum number of tokens to generate. | No |
+| `stream` | boolean | If set to true, responses are streamed back as they're generated. | No |
+
+#### Response Structure
+
+- **Success Response Code:** `200 OK`
+
+```markdown
+{
+ "model": string,
+ "object": string,
+ "choices": array of objects,
+ "usage": object
+}
+```
+
+### List Models
+
+- **Endpoint:** `/v1/models`
+- **Method:** GET
+- **Description:** Retrieves a list of available models.
+
+#### Response Structure
+
+- **Success Response Code:** `200 OK`
+
+```markdown
+{
+ "data": array of objects
+}
+```
+
+## Objects
+
+### Request
+
+| Field | Type | Description | Required |
+|-----------|---------------------|-----------------------------------------------|----------|
+| `role` | string | The role of the message sender. | Yes |
+| `content` | string or array | The content of the message. | Yes |
+| `name` | string | An optional name identifier for the sender. | No |
+
+### Response
+
+| Field | Type | Description |
+|-----------|--------|------------------------------------|
+| `index` | integer| The index of the choice. |
+| `message` | object | A `ChatMessageResponse` object. |
+
+#### UsageInfo
+
+| Field | Type | Description |
+|-------------------|---------|-----------------------------------------------|
+| `prompt_tokens` | integer | The number of tokens used in the prompt. |
+| `total_tokens` | integer | The total number of tokens used. |
+| `completion_tokens`| integer| The number of tokens used for the completion. |
+
+## Example Requests
+
+### Text Chat Completion
+
+```json
+POST /v1/chat/completions
+{
+ "model": "cogvlm-chat-17b",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hello, world!"
+ }
+ ],
+ "temperature": 0.8
+}
+```
+
+### Image and Text Chat Completion
+
+```json
+POST /v1/chat/completions
+{
+ "model": "cogvlm-chat-17b",
+ "messages": [
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "text",
+ "text": "Describe this image"
+ },
+ {
+ "type": "image_url",
+ "image_url": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD..."
+ }
+ ]
+ }
+ ],
+ "temperature": 0.8,
+ "top_p": 0.9,
+ "max_tokens": 1024
+}
+```
+
+## Error Codes
+
+The API uses standard HTTP status codes to indicate the success or failure of an API call.
+
+| Status Code | Description |
+|-------------|-----------------------------------|
+| 200 | OK - The request has succeeded. |
+| 400 | Bad Request - Invalid request format. |
+| 500 | Internal Server Error - An error occurred on the server. |
+
+
+## Examples in Various Languages
+
+### Python
+```python
+import requests
+import base64
+from PIL import Image
+from io import BytesIO
+
+
+# Convert image to Base64
+def image_to_base64(image_path):
+ with Image.open(image_path) as image:
+ buffered = BytesIO()
+ image.save(buffered, format="JPEG")
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
+ return img_str
+
+
+# Replace 'image.jpg' with the path to your image
+base64_image = image_to_base64("your_image.jpg")
+text_data = {"type": "text", "text": "Describe what is in the image"}
+image_data = {
+ "type": "image_url",
+ "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
+}
+
+# Construct the request data
+request_data = {
+ "model": "cogvlm-chat-17b",
+ "messages": [{"role": "user", "content": [text_data, image_data]}],
+ "temperature": 0.8,
+ "top_p": 0.9,
+ "max_tokens": 1024,
+}
+
+# Specify the URL of your FastAPI application
+url = "https://api.swarms.world/v1/chat/completions"
+
+# Send the request
+response = requests.post(url, json=request_data)
+# Print the response from the server
+print(response.text)
+```
+
+### Example API Request in Node
+```js
+const fs = require('fs');
+const https = require('https');
+const sharp = require('sharp');
+
+// Convert image to Base64
+async function imageToBase64(imagePath) {
+ try {
+ const imageBuffer = await sharp(imagePath).jpeg().toBuffer();
+ return imageBuffer.toString('base64');
+ } catch (error) {
+ console.error('Error converting image to Base64:', error);
+ }
+}
+
+// Main function to execute the workflow
+async function main() {
+ const base64Image = await imageToBase64("your_image.jpg");
+ const textData = { type: "text", text: "Describe what is in the image" };
+ const imageData = {
+ type: "image_url",
+ image_url: { url: `data:image/jpeg;base64,${base64Image}` },
+ };
+
+ // Construct the request data
+ const requestData = JSON.stringify({
+ model: "cogvlm-chat-17b",
+ messages: [{ role: "user", content: [textData, imageData] }],
+ temperature: 0.8,
+ top_p: 0.9,
+ max_tokens: 1024,
+ });
+
+ const options = {
+ hostname: 'api.swarms.world',
+ path: '/v1/chat/completions',
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Content-Length': requestData.length,
+ },
+ };
+
+ const req = https.request(options, (res) => {
+ let responseBody = '';
+
+ res.on('data', (chunk) => {
+ responseBody += chunk;
+ });
+
+ res.on('end', () => {
+ console.log('Response:', responseBody);
+ });
+ });
+
+ req.on('error', (error) => {
+ console.error(error);
+ });
+
+ req.write(requestData);
+ req.end();
+}
+
+main();
+```
+
+### Example API Request in Go
+
+```go
+package main
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "image"
+ "image/jpeg"
+ _ "image/png" // Register PNG format
+ "io"
+ "net/http"
+ "os"
+)
+
+// imageToBase64 converts an image to a Base64-encoded string.
+func imageToBase64(imagePath string) (string, error) {
+ file, err := os.Open(imagePath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ img, _, err := image.Decode(file)
+ if err != nil {
+ return "", err
+ }
+
+ buf := new(bytes.Buffer)
+ err = jpeg.Encode(buf, img, nil)
+ if err != nil {
+ return "", err
+ }
+
+ return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
+}
+
+// main is the entry point of the program.
+func main() {
+ base64Image, err := imageToBase64("your_image.jpg")
+ if err != nil {
+ fmt.Println("Error converting image to Base64:", err)
+ return
+ }
+
+ requestData := map[string]interface{}{
+ "model": "cogvlm-chat-17b",
+ "messages": []map[string]interface{}{
+ {
+ "role": "user",
+ "content": []map[string]string{{"type": "text", "text": "Describe what is in the image"}, {"type": "image_url", "image_url": {"url": fmt.Sprintf("data:image/jpeg;base64,%s", base64Image)}}},
+ },
+ },
+ "temperature": 0.8,
+ "top_p": 0.9,
+ "max_tokens": 1024,
+ }
+
+ requestBody, err := json.Marshal(requestData)
+ if err != nil {
+ fmt.Println("Error marshaling request data:", err)
+ return
+ }
+
+ url := "https://api.swarms.world/v1/chat/completions"
+ request, err := http.NewRequest("POST", url, bytes.NewBuffer(requestBody))
+ if err != nil {
+ fmt.Println("Error creating request:", err)
+ return
+ }
+
+ request.Header.Set("Content-Type", "application/json")
+
+ client := &http.Client{}
+ response, err := client.Do(request)
+ if err != nil {
+ fmt.Println("Error sending request:", err)
+ return
+ }
+ defer response.Body.Close()
+
+ responseBody, err := io.ReadAll(response.Body)
+ if err != nil {
+ fmt.Println("Error reading response body:", err)
+ return
+ }
+
+ fmt.Println("Response:", string(responseBody))
+}
+```
+
+
+
+
+
+## Conclusion
+
+This API reference provides the necessary details to understand and interact with the AI Chat Completion API. By following the outlined request and response formats, users can integrate this API into their applications to generate dynamic and contextually relevant conversational responses.
\ No newline at end of file
diff --git a/docs/swarms_cloud/migrate_openai.md b/docs/swarms_cloud/migrate_openai.md
new file mode 100644
index 00000000..46d35ce3
--- /dev/null
+++ b/docs/swarms_cloud/migrate_openai.md
@@ -0,0 +1,103 @@
+## Migrate from OpenAI to Swarms in 3 lines of code
+
+If youβve been using GPT-3.5 or GPT-4, switching to Swarms is easy!
+
+Swarms VLMs are available to use through our OpenAI compatible API. Additionally, if you have been building or prototyping using OpenAIβs Python SDK you can keep your code as-is and use Swarmsβs VLMs models.
+
+In this example, we will show you how to change just three lines of code to make your Python application use Swarmsβs Open Source models through OpenAIβs Python SDK.
+
+β
+## Getting Started
+Migrate OpenAIβs Python SDK example script to use Swarmsβs LLM endpoints.
+
+These are the three modifications necessary to achieve our goal:
+
+Redefine OPENAI_API_KEY your API key environment variable to use your Swarms key.
+
+Redefine OPENAI_BASE_URL to point to `https://api.swarms.world/v1/chat/completions`
+
+Change the model name to an Open Source model, for example: cogvlm-chat-17b
+β
+## Requirements
+We will be using Python and OpenAIβs Python SDK.
+β
+## Instructions
+Set up a Python virtual environment. Read Creating Virtual Environments here.
+
+```sh
+python3 -m venv .venv
+source .venv/bin/activate
+```
+
+Install the pip requirements in your local python virtual environment
+
+`python3 -m pip install openai`
+β
+## Environment setup
+To run this example, there are simple steps to take:
+
+Get an Swarms API token by following these instructions.
+Expose the token in a new SWARMS_API_TOKEN environment variable:
+
+`export SWARMS_API_TOKEN=`
+
+Switch the OpenAI token and base URL environment variable
+
+`export OPENAI_API_KEY=$SWARMS_API_TOKEN`
+`export OPENAI_BASE_URL="https://api.swarms.world/v1/chat/completions"`
+
+If you prefer, you can also directly paste your token into the client initialization.
+
+β
+## Example code
+Once youβve completed the steps above, the code below will call Swarms LLMs:
+
+```python
+from dotenv import load_dotenv
+from openai import OpenAI
+
+load_dotenv()
+openai_api_key = ""
+
+openai_api_base = "https://api.swarms.world/v1"
+model = "internlm-xcomposer2-4khd"
+
+client = OpenAI(api_key=openai_api_key, base_url=openai_api_base)
+# Note that this model expects the image to come before the main text
+chat_response = client.chat.completions.create(
+ model=model,
+ messages=[
+ {
+ "role": "user",
+ "content": [
+ {
+ "type": "image_url",
+ "image_url": {
+ "url": "https://home-cdn.reolink.us/wp-content/uploads/2022/04/010345091648784709.4253.jpg",
+ },
+ },
+ {
+ "type": "text",
+ "text": "What is the most dangerous object in the image?",
+ },
+ ],
+ }
+ ],
+ temperature=0.1,
+ max_tokens=5000,
+)
+print("Chat response:", chat_response)
+
+```Β
+
+Note that you need to supply one of Swarmsβs supported LLMs as an argument, as in the example above. For a complete list of our supported LLMs, check out our REST API page.
+
+β
+## Example output
+The code above produces the following object:
+
+```python
+ChatCompletionMessage(content=" Hello! How can I assist you today? Do you have any questions or tasks you'd like help with? Please let me know and I'll do my best to assist you.", role='assistant' function_call=None, tool_calls=None)
+```
+
+
diff --git a/docs/swarms_cloud/production_deployment.md b/docs/swarms_cloud/production_deployment.md
new file mode 100644
index 00000000..749e0530
--- /dev/null
+++ b/docs/swarms_cloud/production_deployment.md
@@ -0,0 +1,319 @@
+# Enterprise Guide to High-Performance Multi-Agent LLM Deployments
+-------
+
+As large language models (LLMs) continue to advance and enable a wide range of powerful applications, enterprises are increasingly exploring multi-agent architectures to leverage the collective capabilities of multiple LLMs. However, coordinating and optimizing the performance of these complex multi-agent systems presents significant challenges.
+
+This comprehensive guide provides enterprise architects, engineering leaders, and technical decision-makers with a strategic framework for maximizing performance across multi-agent LLM deployments. Developed through extensive research and collaboration with industry partners, this guide distills best practices, proven techniques, and cutting-edge methodologies into seven core principles.
+
+By implementing the recommendations outlined in this guide, organizations can achieve superior latency, throughput, and resource utilization while ensuring scalability, cost-effectiveness, and optimal user experiences. Whether powering customer-facing conversational agents, driving internal knowledge management systems, or fueling mission-critical decision support tools, high-performance multi-agent LLM deployments will be pivotal to unlocking the full potential of this transformative technology.
+
+## Introduction
+
+The rise of large language models (LLMs) has ushered in a new era of human-machine interaction, enabling enterprises to develop sophisticated natural language processing (NLP) applications that can understand, generate, and reason with human-like text. However, as the complexity and scale of LLM deployments grow, traditional monolithic architectures are increasingly challenged to meet the stringent performance, scalability, and cost requirements of enterprise environments.
+
+Multi-agent architectures, which coordinate the collective capabilities of multiple specialized LLMs, have emerged as a powerful paradigm for addressing these challenges. By distributing workloads across a cohort of agents, each optimized for specific tasks or domains, multi-agent systems can deliver superior performance, resilience, and adaptability compared to single-model solutions.
+
+However, realizing the full potential of multi-agent LLM deployments requires a strategic approach to system design, optimization, and ongoing management. This guide presents a comprehensive framework for maximizing performance across seven core principles, each underpinned by a range of proven techniques and methodologies.
+
+Whether you are architecting a customer-facing conversational agent, building an internal knowledge management platform, or developing a mission-critical decision support system, this guide will equip you with the insights and best practices necessary to unlock the full potential of multi-agent LLM deployments within your enterprise.
+
+## Principle 1: Distribute Token Processing
+----------------------------------------
+
+At the heart of every LLM deployment lies the fundamental challenge of optimizing token processing -- the rate at which the model consumes and generates text inputs and outputs. In multi-agent architectures, distributing and parallelizing token processing across multiple agents is a critical performance optimization strategy.
+
+### Agent Specialization
+
+One of the key advantages of multi-agent architectures is the ability to dedicate specific agents to specialized tasks or domains. By carefully matching agents to the workloads they are optimized for, enterprises can maximize overall throughput and minimize latency.
+
+For example, in a conversational agent deployment, one agent may be optimized for intent recognition and query understanding, while another is fine-tuned for generating coherent, context-aware responses. In a document processing pipeline, separate agents could be dedicated to tasks such as named entity recognition, sentiment analysis, and summarization.
+
+To effectively leverage agent specialization, enterprises should:
+
+- Conduct a thorough analysis of their application's workflow and identify distinct tasks or domains that could benefit from dedicated agents.
+- Evaluate the strengths and weaknesses of available LLM models and agents, and map them to the identified tasks or domains based on their capabilities and performance characteristics.
+- Implement continuous monitoring and performance tuning processes to ensure agents remain optimized for their assigned workloads as models evolve and domain requirements shift.
+
+### Load Balancing
+
+Even with a well-designed allocation of tasks across specialized agents, fluctuations in workload and demand can create bottlenecks and performance degradation. Effective load balancing strategies are essential to ensure that token processing capacity is dynamically distributed across available agents based on real-time conditions.
+
+Load balancing in multi-agent LLM deployments can be accomplished through a combination of techniques, including:
+
+- **Round-Robin**: Distributing incoming requests across agents in a cyclical fashion, ensuring an even distribution of workload.
+- **Least Connections**: Routing requests to the agent with the fewest active connections or outstanding tasks, minimizing the risk of overloading any single agent.
+- **Response Time Monitoring**: Continuously monitoring the response times of each agent and dynamically adjusting request routing to favor faster-responding agents.
+- **Resource-Based Routing**: Factoring in agent-level resource consumption (e.g., CPU, memory) when making routing decisions, ensuring that overloaded agents are relieved of additional workload.
+
+Implementing effective load balancing requires careful consideration of the specific characteristics and requirements of your multi-agent deployment, as well as the integration of robust monitoring and analytics capabilities to inform dynamic routing decisions.
+
+### Horizontal Scaling
+
+While load balancing optimizes the utilization of existing agent resources, horizontal scaling strategies enable organizations to dynamically provision additional token processing capacity to meet demand spikes or handle larger overall workloads.
+
+In multi-agent LLM deployments, horizontal scaling can be achieved through:
+
+- **Agent Replication**: Spin up additional instances of existing agents to increase parallel processing capacity for specific tasks or domains.
+- **Hybrid Scaling**: Combine agent replication with the dynamic provisioning of additional compute resources (e.g., CPU, GPU) to support the increased agent count.
+- **Serverless Deployment**: Leverage serverless computing platforms (e.g., AWS Lambda, Google Cloud Functions) to automatically scale agent instances based on real-time demand, minimizing idle resource consumption.
+
+Effective horizontal scaling requires robust orchestration and management capabilities, as well as seamless integration with load balancing mechanisms to ensure that incoming workloads are efficiently distributed across the dynamically scaled agent pool.
+
+## Principle 2: Optimize Agent Communication
+-----------------------------------------
+
+In multi-agent LLM deployments, efficient inter-agent communication is crucial for coordinating tasks, exchanging context and intermediate results, and maintaining overall system coherence. However, communication overhead can quickly become a performance bottleneck if not carefully managed.
+
+### Minimizing Overhead
+
+Reducing the volume and complexity of information exchanged between agents is a key strategy for optimizing communication performance. Techniques for minimizing overhead include:
+
+- **Data Compression**: Applying lossless or lossy compression algorithms to reduce the size of data payloads exchanged between agents, lowering bandwidth requirements and transmission latencies.
+- **Information Summarization**: Distilling and summarizing context, results, or other data exchanged between agents to its essential elements, minimizing redundant or non-critical information.
+- **Differential Updates**: Rather than transmitting entire data payloads, agents can exchange only the differential updates or deltas required to synchronize their respective states.
+
+Implementing these techniques requires careful analysis of the specific data exchange patterns and communication requirements within your multi-agent deployment, as well as the integration of appropriate compression, summarization, and differential update algorithms.
+
+### Prioritizing Critical Information
+
+In scenarios where communication bandwidth or latency constraints cannot be fully alleviated through overhead reduction techniques, enterprises can prioritize the exchange of critical information over non-essential data.
+
+This can be achieved through:
+
+- **Prioritized Queuing**: Implementing queuing mechanisms that prioritize the transmission of high-priority, time-sensitive data over lower-priority, non-critical information.
+- **Selective Communication**: Dynamically determining which agents require specific pieces of information based on their roles and responsibilities, and selectively transmitting data only to those agents that truly need it.
+- **Progressive Information Exchange**: Exchanging information in a progressive or staged manner, with critical elements transmitted first, followed by supplementary or contextual data as bandwidth becomes available.
+
+Effective prioritization requires a deep understanding of the interdependencies and information flow within your multi-agent system, as well as the ability to dynamically assess and prioritize data based on its criticality and urgency.
+
+### Caching and Reusing Context
+
+In many multi-agent LLM deployments, agents frequently exchange or operate on shared context, such as user profiles, conversation histories, or domain-specific knowledge bases. Caching and reusing this context information can significantly reduce redundant communication and processing overhead.
+
+Strategies for optimizing context caching and reuse include:
+
+- **Agent-Level Caching**: Implementing caching mechanisms within individual agents to store and retrieve frequently accessed context data, minimizing the need for inter-agent communication.
+- **Centralized Context Management**: Deploying a dedicated context management service or data store that agents can query and update, ensuring consistent access to the latest context information across the system.
+- **Context Versioning and Invalidation**: Implementing versioning and invalidation mechanisms to ensure that cached context data remains fresh and consistent, avoiding stale or outdated information from propagating through the system.
+
+
+### Principle 3: Leverage Agent Specialization
+------------------------------------------
+
+One of the key advantages of multi-agent architectures is the ability to optimize individual agents for specific tasks, domains, or capabilities. By leveraging agent specialization, enterprises can ensure that each component of their LLM system is finely tuned for maximum performance and quality.
+
+### Task-Specific Optimization
+
+Within a multi-agent LLM deployment, different agents may be responsible for distinct tasks such as language understanding, knowledge retrieval, response generation, or post-processing. Optimizing each agent for its designated task can yield significant performance gains and quality improvements.
+
+Techniques for task-specific optimization include:
+
+- **Prompt Engineering**: Crafting carefully designed prompts that provide the necessary context, instructions, and examples to guide an agent towards optimal performance for its assigned task.
+- **Fine-Tuning**: Adapting a pre-trained LLM to a specific task or domain by fine-tuning it on a curated dataset, allowing the agent to specialize and improve its performance on that particular workload.
+- **Model Distillation**: Transferring the knowledge and capabilities of a larger, more capable LLM into a smaller, more efficient model specialized for a specific task, balancing performance and quality trade-offs.
+
+Implementing these optimization techniques requires a deep understanding of the capabilities and requirements of each task within your multi-agent system, as well as access to relevant training data and computational resources for fine-tuning and distillation processes.
+
+### Domain Adaptation
+
+Many enterprise applications operate within specific domains or verticals, such as finance, healthcare, or legal. Adapting agents to these specialized domains can significantly improve their performance, accuracy, and compliance within the target domain.
+
+Strategies for domain adaptation include:
+
+- **Domain-Specific Pre-Training**: Leveraging domain-specific corpora to pre-train LLM agents, imbuing them with a foundational understanding of the language, concepts, and nuances specific to the target domain.
+- **Transfer Learning**: Fine-tuning agents that have been pre-trained on general or adjacent domains, transferring their existing knowledge and capabilities to the target domain while optimizing for its specific characteristics.
+- **Domain Persona Injection**: Injecting domain-specific personas, traits, or constraints into agents during fine-tuning or deployment, shaping their behavior and outputs to align with domain-specific norms and requirements.
+
+Effective domain adaptation requires access to high-quality, domain-specific training data, as well as close collaboration with subject matter experts to ensure that agents are properly calibrated to meet the unique demands of the target domain.
+
+### Ensemble Techniques
+
+In complex multi-agent deployments, individual agents may excel at specific subtasks or aspects of the overall workflow. Ensemble techniques that combine the outputs or predictions of multiple specialized agents can often outperform any single agent, leveraging the collective strengths of the ensemble.
+
+Common ensemble techniques for multi-agent LLM systems include:
+
+- **Voting**: Combining the outputs or predictions of multiple agents through majority voting, weighted voting, or other consensus mechanisms.
+- **Stacking**: Training a meta-agent to combine and optimize the outputs of multiple base agents, effectively learning to leverage their collective strengths.
+- **Blending**: Combining the outputs of multiple agents through weighted averaging, linear interpolation, or other blending techniques, allowing for nuanced integration of diverse perspectives.
+
+Implementing effective ensemble techniques requires careful analysis of the strengths, weaknesses, and complementary capabilities of individual agents, as well as the development of robust combination strategies that can optimally leverage the ensemble's collective intelligence.
+
+### Principle 4: Implement Dynamic Scaling
+--------------------------------------
+
+The demand and workload patterns of enterprise LLM deployments can be highly dynamic, with significant fluctuations driven by factors such as user activity, data ingestion schedules, or periodic batch processing. Implementing dynamic scaling strategies allows organizations to optimally provision and allocate resources in response to these fluctuations, ensuring consistent performance while minimizing unnecessary costs.
+
+### Autoscaling
+
+Autoscaling is a core capability that enables the automatic adjustment of compute resources (e.g., CPU, GPU, memory) and agent instances based on real-time demand patterns and workload metrics. By dynamically scaling resources up or down, enterprises can maintain optimal performance and resource utilization, avoiding both over-provisioning and under-provisioning scenarios.
+
+Effective autoscaling in multi-agent LLM deployments requires:
+
+- **Monitoring and Metrics**: Implementing robust monitoring and metrics collection mechanisms to track key performance indicators (KPIs) such as request rates, response times, resource utilization, and agent-level metrics.
+- **Scaling Policies**: Defining scaling policies that specify the conditions and thresholds for triggering automatic scaling actions, such as provisioning additional agents or compute resources when certain KPIs are breached.
+- **Scaling Orchestration**: Integrating autoscaling capabilities with resource orchestration and management tools (e.g., Kubernetes, AWS Auto Scaling) to seamlessly provision, configure, and integrate new resources into the existing multi-agent deployment.
+
+By automating the scaling process, enterprises can respond rapidly to workload fluctuations, ensuring consistent performance and optimal resource utilization without the need for manual intervention.
+
+### Spot Instance Utilization
+
+Many cloud providers offer spot instances or preemptible resources at significantly discounted prices compared to on-demand or reserved instances. While these resources may be reclaimed with little notice, they can be leveraged judiciously within multi-agent LLM deployments to reduce operational costs.
+
+Strategies for leveraging spot instances include:
+
+- **Fault-Tolerant Agent Deployment**: Deploying certain agents or components of the multi-agent system on spot instances, while ensuring that these components can be rapidly and seamlessly replaced or migrated in the event of instance preemption.
+- **Batch Workload Offloading**: Offloading batch processing workloads or non-time-sensitive tasks to spot instances, leveraging their cost-effectiveness while minimizing the impact of potential disruptions.
+- **Hybrid Provisioning**: Implementing a hybrid approach that combines on-demand or reserved instances for mission-critical components with spot instances for more flexible or elastic workloads.
+
+Effective spot instance utilization requires careful architectural considerations to ensure fault tolerance and minimize the impact of potential disruptions, as well as robust monitoring and automation capabilities to seamlessly replace or migrate workloads in response to instance preemption events.
+
+### Serverless Deployments
+
+Serverless computing platforms, such as AWS Lambda, Google Cloud Functions, or Azure Functions, offer a compelling alternative to traditional server-based deployments. By automatically scaling compute resources based on real-time demand and charging only for the resources consumed, serverless architectures can provide significant cost savings and operational simplicity.
+
+Leveraging serverless deployments for multi-agent LLM systems can be achieved through:
+
+- **Function-as-a-Service (FaaS) Agents**: Deploying individual agents or components of the multi-agent system as serverless functions, allowing for rapid and automatic scaling in response to fluctuating workloads.
+- **Event-Driven Architectures**: Designing the multi-agent system to operate in an event-driven manner, with agents triggered and executed in response to specific events or data ingestion, aligning with the serverless execution model.
+- **Hybrid Deployments**: Combining serverless components with traditional server-based components, leveraging the strengths and cost advantages of each deployment model for different aspects of the multi-agent system.
+
+Adopting serverless architectures requires careful consideration of factors such as execution duration limits, cold start latencies, and integration with other components of the multi-agent deployment. However, when implemented effectively, serverless deployments can provide unparalleled scalability, cost-efficiency, and operational simplicity for dynamic, event-driven workloads.
+
+
+### Principle 5: Employ Selective Execution
+---------------------------------------
+
+Not every input or request within a multi-agent LLM deployment requires the full execution of all agents or the complete processing pipeline. Selectively invoking agents or tasks based on input characteristics or intermediate results can significantly optimize performance by avoiding unnecessary computation and resource consumption.
+
+### Input Filtering
+
+Implementing input filtering mechanisms allows enterprises to reject or bypass certain inputs before they are processed by the multi-agent system. This can be achieved through techniques such as:
+
+- **Blacklisting/Whitelisting**: Maintaining lists of inputs (e.g., specific phrases, URLs, or content types) that should be automatically rejected or allowed, based on predefined criteria.
+- **Rules-Based Filtering**: Defining a set of rules or heuristics to assess the suitability or relevance of an input for further processing, based on factors such as language, content, or metadata.
+- **Confidence Thresholding**: Leveraging pre-processing agents or models to assess the likelihood that an input is relevant or valuable, and filtering out inputs that fall below a predetermined confidence threshold.
+
+Effective input filtering requires careful consideration of the specific requirements, constraints, and objectives of your multi-agent deployment, as well as ongoing monitoring and adjustment of filtering rules and thresholds to maintain optimal performance and accuracy.
+
+### Early Stopping
+
+In many multi-agent LLM deployments, intermediate results or predictions generated by early-stage agents can be used to determine whether further processing is required or valuable. Early stopping mechanisms allow enterprises to terminate execution pipelines when specific conditions or thresholds are met, avoiding unnecessary downstream processing.
+
+Techniques for implementing early stopping include:
+
+- **Confidence-Based Stopping**: Monitoring the confidence scores or probabilities associated with intermediate results, and terminating execution if a predefined confidence threshold is exceeded.
+- **Exception-Based Stopping**: Defining specific intermediate results or conditions that indicate that further processing is unnecessary or undesirable, and terminating execution upon encountering these exceptions.
+- **Adaptive Stopping**: Employing machine learning models or reinforcement learning agents to dynamically determine when to terminate execution based on learned patterns and trade-offs between accuracy, latency, and resource consumption.
+
+Effective early stopping requires a deep understanding of the interdependencies and decision points within your multi-agent workflow, as well as careful tuning and monitoring to ensure that stopping conditions are calibrated to maintain an optimal balance between performance and accuracy.
+
+### Conditional Branching
+
+Rather than executing a linear, fixed pipeline of agents, conditional branching allows multi-agent systems to dynamically invoke different agents or execution paths based on input characteristics or intermediate results. This can significantly optimize resource utilization by ensuring that only the necessary agents and processes are executed for a given input or scenario.
+
+Implementing conditional branching involves:
+
+- **Decision Points**: Identifying key points within the multi-agent workflow where branching decisions can be made based on input or intermediate data.
+- **Branching Logic**: Defining the rules, conditions, or machine learning models that will evaluate the input or intermediate data and determine the appropriate execution path or agent invocation.
+- **Execution Routing**: Integrating mechanisms to dynamically route inputs or intermediate data to the appropriate agents or processes based on the branching decision.
+
+Conditional branching can be particularly effective in scenarios where inputs or workloads exhibit distinct characteristics or require significantly different processing pipelines, allowing enterprises to optimize resource allocation and minimize unnecessary computation.
+
+### Principle 6: Optimize User Experience
+-------------------------------------
+
+While many of the principles outlined in this guide focus on optimizing backend performance and resource utilization, delivering an exceptional user experience is also a critical consideration for enterprise multi-agent LLM deployments. By minimizing perceived wait times and providing real-time progress updates, organizations can ensure that users remain engaged and satisfied, even during periods of high workload or resource constraints.
+
+### Streaming Responses
+
+One of the most effective techniques for minimizing perceived wait times is to stream responses or outputs to users as they are generated, rather than waiting for the entire response to be completed before delivering it. This approach is particularly valuable in conversational agents, document summarization, or other scenarios where outputs can be naturally segmented and delivered incrementally.
+
+Implementing streaming responses requires:
+
+- **Partial Output Generation**: Modifying agents or models to generate and emit outputs in a streaming or incremental fashion, rather than producing the entire output in a single, monolithic operation.
+- **Streaming Data Pipelines**: Integrating streaming data pipelines and message queues to enable the efficient and reliable transmission of partial outputs from agents to user-facing interfaces or applications.
+- **Incremental Rendering**: Updating user interfaces and displays to incrementally render or populate with newly streamed output segments, providing a seamless and real-time experience for end-users.
+
+By delivering outputs as they are generated, streaming responses can significantly improve the perceived responsiveness and interactivity of multi-agent LLM deployments, even in scenarios where the overall processing time remains unchanged.
+
+### Progress Indicators
+
+In cases where streaming responses may not be feasible or appropriate, providing visual or textual indicators of ongoing processing and progress can help manage user expectations and improve the overall experience. Progress indicators can be implemented through techniques such as:
+
+- **Loader Animations**: Displaying simple animations or spinner graphics to indicate that processing is underway and provide a sense of activity and progress.
+- **Progress Bars**: Rendering progress bars or completion indicators based on estimated or actual progress through multi-agent workflows or processing pipelines.
+- **Status Updates**: Periodically updating user interfaces with textual status messages or descriptions of the current processing stage, providing users with a more detailed understanding of the system's activities.
+
+Effective progress indicators require careful integration with monitoring and telemetry capabilities to accurately track and communicate the progress of multi-agent workflows, as well as thoughtful user experience design to ensure that indicators are clear, unobtrusive, and aligned with user expectations.
+
+### Chunked Delivery
+
+In scenarios where outputs or responses cannot be effectively streamed or rendered incrementally, chunked delivery can provide a middle ground between delivering the entire output at once and streaming individual tokens or characters. By breaking larger outputs into smaller, more manageable chunks and delivering them individually, enterprises can improve perceived responsiveness and provide a more engaging user experience.
+
+Implementing chunked delivery involves:
+
+- **Output Segmentation**: Identifying logical breakpoints or segmentation boundaries within larger outputs, such as paragraphs, sections, or other structural elements.
+- **Chunking Mechanisms**: Integrating mechanisms to efficiently break outputs into individual chunks and transmit or render them sequentially, with minimal delay between chunks.
+- **Chunk Rendering**: Updating user interfaces or displays to seamlessly render or append new output chunks as they are received, providing a sense of continuous progress and minimizing the perception of extended waiting periods.
+
+Chunked delivery can be particularly effective in scenarios where outputs are inherently structured or segmented, such as document generation, report creation, or multi-step instructions or workflows.
+
+## Principle 7: Leverage Hybrid Approaches
+---------------------------------------
+
+While multi-agent LLM architectures offer numerous advantages, they should not be viewed as a one-size-fits-all solution. In many cases, combining LLM agents with traditional techniques, optimized components, or external services can yield superior performance, cost-effectiveness, and resource utilization compared to a pure LLM-based approach.
+
+### Task Offloading
+
+Certain tasks or subtasks within a larger multi-agent workflow may be more efficiently handled by dedicated, optimized components or external services, rather than relying solely on LLM agents. Task offloading involves identifying these opportunities and integrating the appropriate components or services into the overall architecture.
+
+Examples of task offloading in multi-agent LLM deployments include:
+
+- **Regular Expression Matching**: Offloading pattern matching or text extraction tasks to dedicated regular expression engines, which can often outperform LLM-based approaches in terms of speed and efficiency.
+- **Structured Data Processing**: Leveraging specialized data processing engines or databases for tasks involving structured data, such as querying, filtering, or transforming tabular or relational data.
+- **External APIs and Services**: Integrating with external APIs or cloud services for specific tasks, such as speech recognition, translation, or knowledge base lookup, leveraging the specialized capabilities and optimizations of these dedicated services.
+
+Effective task offloading requires a thorough understanding of the strengths and limitations of both LLM agents and traditional components, as well as careful consideration of integration points, data flows, and performance trade-offs within the overall multi-agent architecture.
+
+### Caching and Indexing
+
+While LLMs excel at generating dynamic, context-aware outputs, they can be less efficient when dealing with static or frequently accessed information or knowledge. Caching and indexing strategies can help mitigate this limitation by minimizing redundant LLM processing and enabling faster retrieval of commonly accessed data.
+
+Techniques for leveraging caching and indexing in multi-agent LLM deployments include:
+
+**Output Caching**: Caching the outputs or responses generated by LLM agents, allowing for rapid retrieval and reuse in cases where the same or similar input is encountered in the future.
+
+**Knowledge Base Indexing**: Indexing domain-specific knowledge bases, data repositories, or other static information sources using traditional search and information retrieval techniques. This allows LLM agents to efficiently query and incorporate relevant information into their outputs, without needing to process or generate this content from scratch.
+
+**Contextual Caching**: Caching not only outputs but also the contextual information and intermediate results generated during multi-agent workflows. This enables more efficient reuse and continuation of previous processing in scenarios where contexts are long-lived or recurring.
+
+Implementing effective caching and indexing strategies requires careful consideration of data freshness, consistency, and invalidation mechanisms, as well as seamless integration with LLM agents and multi-agent workflows to ensure that cached or indexed data is appropriately leveraged and updated.
+
+### Pre-computation and Lookup
+
+In certain scenarios, especially those involving constrained or well-defined inputs, pre-computing and lookup strategies can be leveraged to minimize or entirely avoid the need for real-time LLM processing. By generating and storing potential outputs or responses in advance, enterprises can significantly improve performance and reduce resource consumption.
+
+Approaches for pre-computation and lookup include:
+
+**Output Pre-generation**: For inputs or scenarios with a limited set of potential outputs, pre-generating and storing all possible responses, allowing for rapid retrieval and delivery without the need for real-time LLM execution.
+
+**Retrieval-Based Responses**: Developing retrieval models or techniques that can identify and surface pre-computed or curated responses based on input characteristics, leveraging techniques such as nearest neighbor search, embedding-based retrieval, or example-based generation.
+
+**Hybrid Approaches**: Combining pre-computed or retrieved responses with real-time LLM processing, allowing for the generation of dynamic, context-aware content while still leveraging pre-computed components to optimize performance and resource utilization.
+
+Effective implementation of pre-computation and lookup strategies requires careful analysis of input patterns, output distributions, and potential performance gains, as well as robust mechanisms for managing and updating pre-computed data as application requirements or domain knowledge evolves.
+
+# Conclusion
+----------
+
+As enterprises increasingly embrace the transformative potential of large language models, optimizing the performance, scalability, and cost-effectiveness of these deployments has become a critical imperative. Multi-agent architectures, which coordinate the collective capabilities of multiple specialized LLM agents, offer a powerful paradigm for addressing these challenges.
+
+By implementing the seven principles outlined in this guide -- distributing token processing, optimizing agent communication, leveraging agent specialization, implementing dynamic scaling, employing selective execution, optimizing user experience, and leveraging hybrid approaches -- organizations can unlock the full potential of multi-agent LLM deployments.
+
+However, realizing these benefits requires a strategic and holistic approach that accounts for the unique requirements, constraints, and objectives of each enterprise. From task-specific optimizations and domain adaptation to dynamic scaling and user experience considerations, maximizing the performance of multi-agent LLM systems demands a deep understanding of the underlying technologies, as well as the ability to navigate the inherent complexities of these sophisticated architectures.
+
+To learn more about how Swarm Corporation can assist your organization in architecting, deploying, and optimizing high-performance multi-agent LLM solutions, we invite you to book a consultation with one of our agent specialists. Visit to schedule a 30-minute call and explore how our expertise and cutting-edge technologies can drive transformative outcomes for your business.
+
+In the rapidly evolving landscape of artificial intelligence and natural language processing, staying ahead of the curve is essential. Partner with Swarm Corporation, and unlock the full potential of multi-agent LLM deployments, today.
+
+[Book a call with us now:](https://calendly.com/swarm-corp/30min)
\ No newline at end of file
diff --git a/docs/swarms_memory/chromadb.md b/docs/swarms_memory/chromadb.md
new file mode 100644
index 00000000..188e024c
--- /dev/null
+++ b/docs/swarms_memory/chromadb.md
@@ -0,0 +1,141 @@
+# ChromaDB Documentation
+
+ChromaDB is a specialized module designed to facilitate the storage and retrieval of documents using the ChromaDB system. It offers functionalities for adding documents to a local ChromaDB collection and querying this collection based on provided query texts. This module integrates with the ChromaDB client to create and manage collections, leveraging various configurations for optimizing the storage and retrieval processes.
+
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|----------------|-------------------|----------|-------------------------------------------------------------|
+| `metric` | `str` | `"cosine"`| The similarity metric to use for the collection. |
+| `output_dir` | `str` | `"swarms"`| The name of the collection to store the results in. |
+| `limit_tokens` | `Optional[int]` | `1000` | The maximum number of tokens to use for the query. |
+| `n_results` | `int` | `1` | The number of results to retrieve. |
+| `docs_folder` | `Optional[str]` | `None` | The folder containing documents to be added to the collection.|
+| `verbose` | `bool` | `False` | Flag to enable verbose logging for debugging. |
+| `*args` | `tuple` | `()` | Additional positional arguments. |
+| `**kwargs` | `dict` | `{}` | Additional keyword arguments. |
+
+#### Methods
+
+| Method | Description |
+|-----------------------|----------------------------------------------------------|
+| `__init__` | Initializes the ChromaDB instance with specified parameters. |
+| `add` | Adds a document to the ChromaDB collection. |
+| `query` | Queries documents from the ChromaDB collection based on the query text. |
+| `traverse_directory` | Traverses the specified directory to add documents to the collection. |
+
+
+## Usage
+
+```python
+from swarms_memory import ChromaDB
+
+chromadb = ChromaDB(
+ metric="cosine",
+ output_dir="results",
+ limit_tokens=1000,
+ n_results=2,
+ docs_folder="path/to/docs",
+ verbose=True,
+)
+```
+
+### Adding Documents
+
+The `add` method allows you to add a document to the ChromaDB collection. It generates a unique ID for each document and adds it to the collection.
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|---------------|--------|---------|---------------------------------------------|
+| `document` | `str` | - | The document to be added to the collection. |
+| `*args` | `tuple`| `()` | Additional positional arguments. |
+| `**kwargs` | `dict` | `{}` | Additional keyword arguments. |
+
+#### Returns
+
+| Type | Description |
+|-------|--------------------------------------|
+| `str` | The ID of the added document. |
+
+#### Example
+
+```python
+task = "example_task"
+result = "example_result"
+result_id = chromadb.add(document="This is a sample document.")
+print(f"Document ID: {result_id}")
+```
+
+### Querying Documents
+
+The `query` method allows you to retrieve documents from the ChromaDB collection based on the provided query text.
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|-------------|--------|---------|----------------------------------------|
+| `query_text`| `str` | - | The query string to search for. |
+| `*args` | `tuple`| `()` | Additional positional arguments. |
+| `**kwargs` | `dict` | `{}` | Additional keyword arguments. |
+
+#### Returns
+
+| Type | Description |
+|-------|--------------------------------------|
+| `str` | The retrieved documents as a string. |
+
+#### Example
+
+```python
+query_text = "search term"
+results = chromadb.query(query_text=query_text)
+print(f"Retrieved Documents: {results}")
+```
+
+### Traversing Directory
+
+The `traverse_directory` method traverses through every file in the specified directory and its subdirectories, adding the contents of each file to the ChromaDB collection.
+
+#### Example
+
+```python
+chromadb.traverse_directory()
+```
+
+## Additional Information and Tips
+
+### Verbose Logging
+
+Enable the `verbose` flag during initialization to get detailed logs of the operations, which is useful for debugging.
+
+```python
+chromadb = ChromaDB(verbose=True)
+```
+
+### Handling Large Documents
+
+When dealing with large documents, consider using the `limit_tokens` parameter to restrict the number of tokens processed in a single query.
+
+```python
+chromadb = ChromaDB(limit_tokens=500)
+```
+
+### Optimizing Query Performance
+
+Use the appropriate similarity metric (`metric` parameter) that suits your use case for optimal query performance.
+
+```python
+chromadb = ChromaDB(metric="euclidean")
+```
+
+## References and Resources
+
+- [ChromaDB Documentation](https://chromadb.io/docs)
+- [Python UUID Module](https://docs.python.org/3/library/uuid.html)
+- [Python os Module](https://docs.python.org/3/library/os.html)
+- [Python logging Module](https://docs.python.org/3/library/logging.html)
+- [dotenv Package](https://pypi.org/project/python-dotenv/)
+
+By following this documentation, users can effectively utilize the ChromaDB module for managing document storage and retrieval in their applications.
\ No newline at end of file
diff --git a/docs/swarms_memory/faiss.md b/docs/swarms_memory/faiss.md
new file mode 100644
index 00000000..d4c143f5
--- /dev/null
+++ b/docs/swarms_memory/faiss.md
@@ -0,0 +1,232 @@
+# FAISSDB: Documentation
+
+The `FAISSDB` class is a highly customizable wrapper for the FAISS (Facebook AI Similarity Search) library, designed for efficient similarity search and clustering of dense vectors. This class facilitates the creation of a Retrieval-Augmented Generation (RAG) system by providing methods to add documents to a FAISS index and query the index for similar documents. It supports custom embedding models, preprocessing functions, and other customizations to fit various use cases.
+
+
+### Parameters
+
+| Parameter | Type | Default | Description |
+|------------------------|--------------------------------------------------|-------------------------------|-----------------------------------------------------------------------------|
+| `dimension` | `int` | `768` | Dimension of the document embeddings. |
+| `index_type` | `str` | `'Flat'` | Type of FAISS index to use (`'Flat'` or `'IVF'`). |
+| `embedding_model` | `Optional[Any]` | `None` | Custom embedding model. |
+| `embedding_function` | `Optional[Callable[[str], List[float]]]` | `None` | Custom function to generate embeddings from text. |
+| `preprocess_function` | `Optional[Callable[[str], str]]` | `None` | Custom function to preprocess text before embedding. |
+| `postprocess_function` | `Optional[Callable[[List[Dict[str, Any]]], List[Dict[str, Any]]]]` | `None` | Custom function to postprocess the results. |
+| `metric` | `str` | `'cosine'` | Distance metric for FAISS index (`'cosine'` or `'l2'`). |
+| `logger_config` | `Optional[Dict[str, Any]]` | `None` | Configuration for the logger. |
+
+## Methods
+
+### `__init__`
+
+Initializes the FAISSDB instance, setting up the logger, creating the FAISS index, and configuring custom functions if provided.
+
+### `add`
+
+Adds a document to the FAISS index.
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|-----------|-------------------------|---------|-------------------------------------------------|
+| `doc` | `str` | None | The document to be added. |
+| `metadata`| `Optional[Dict[str, Any]]` | None | Additional metadata for the document. |
+
+#### Example Usage
+
+```python
+db = FAISSDB(dimension=768)
+db.add("This is a sample document.", {"category": "sample"})
+```
+
+### `query`
+
+Queries the FAISS index for similar documents.
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|-------------|
+| `query` | `str` | None | The query string. |
+| `top_k` | `int` | `5` | The number of top results to return. |
+
+#### Returns
+
+| Type | Description |
+|------|-------------|
+| `List[Dict[str, Any]]` | A list of dictionaries containing the top_k most similar documents. |
+
+#### Example Usage
+
+```python
+results = db.query("What is artificial intelligence?")
+for result in results:
+ print(f"Score: {result['score']}, Text: {result['metadata']['text']}")
+```
+
+## Internal Methods
+
+### `_setup_logger`
+
+Sets up the logger with the given configuration.
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|-----------|-------------------------|---------|------------------------------------------|
+| `config` | `Optional[Dict[str, Any]]` | None | Configuration for the logger. |
+
+### `_create_index`
+
+Creates and returns a FAISS index based on the specified type and metric.
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|-----------|-------|---------|----------------------------------------------|
+| `index_type` | `str` | 'Flat' | Type of FAISS index to use. |
+| `metric` | `str` | 'cosine' | Distance metric for FAISS index. |
+
+#### Returns
+
+| Type | Description |
+|------|------------------|
+| `faiss.Index` | FAISS index instance. |
+
+### `_default_embedding_function`
+
+Default embedding function using the SentenceTransformer model.
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|----------------------|
+| `text` | `str` | None | The input text to embed. |
+
+#### Returns
+
+| Type | Description |
+|------|-------------------|
+| `List[float]` | Embedding vector for the input text. |
+
+### `_default_preprocess_function`
+
+Default preprocessing function.
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|--------------------|
+| `text` | `str` | None | The input text to preprocess. |
+
+#### Returns
+
+| Type | Description |
+|------|------------------|
+| `str` | Preprocessed text. |
+
+### `_default_postprocess_function`
+
+Default postprocessing function.
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|-----------|------|---------|--------------------------------|
+| `results` | `List[Dict[str, Any]]` | None | The results to postprocess. |
+
+#### Returns
+
+| Type | Description |
+|------|--------------------------|
+| `List[Dict[str, Any]]` | Postprocessed results. |
+
+## Usage Examples
+
+### Example 1: Basic Usage
+
+```python
+# Initialize the FAISSDB instance
+db = FAISSDB(dimension=768, index_type="Flat")
+
+# Add documents to the FAISS index
+db.add("This is a document about AI.", {"category": "AI"})
+db.add("Python is great for data science.", {"category": "Programming"})
+
+# Query the FAISS index
+results = db.query("Tell me about AI")
+for result in results:
+ print(f"Score: {result['score']}, Text: {result['metadata']['text']}")
+```
+
+### Example 2: Custom Functions
+
+```python
+from transformers import AutoTokenizer, AutoModel
+import torch
+
+# Custom embedding function using a HuggingFace model
+def custom_embedding_function(text: str) -> List[float]:
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
+ model = AutoModel.from_pretrained("bert-base-uncased")
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
+ with torch.no_grad():
+ outputs = model(**inputs)
+ embeddings = outputs.last_hidden_state.mean(dim=1).squeeze().tolist()
+ return embeddings
+
+# Custom preprocessing function
+def custom_preprocess(text: str) -> str:
+ return text.lower().strip()
+
+# Custom postprocessing function
+def custom_postprocess(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
+ for result in results:
+ result["custom_score"] = result["score"] * 2 # Example modification
+ return results
+
+# Initialize the FAISSDB instance with custom functions
+db = FAISSDB(
+ dimension=768,
+ index_type="Flat",
+ embedding_function=custom_embedding_function,
+ preprocess_function=custom_preprocess,
+ postprocess_function=custom_postprocess,
+ metric="cosine",
+ logger_config={
+ "handlers": [
+ {"sink": "custom_faiss_rag_wrapper.log", "rotation": "1 GB"},
+ {"sink": lambda msg: print(f"Custom log: {msg}", end="")}
+ ],
+ },
+)
+
+# Add documents to the FAISS index
+db.add("This is a document about machine learning.", {"category": "ML"})
+db.add("Python is a versatile programming language.", {"category": "Programming"})
+
+# Query the FAISS index
+results = db.query("Explain machine learning")
+for result in results:
+ print(f"Score: {result['score']}, Custom Score: {result['custom_score']}, Text: {result['metadata']['text']}")
+```
+
+## Additional Information and Tips
+
+- Ensure that the dimension of the document embeddings matches the dimension specified during the initialization of the FAISSDB instance.
+- Use custom embedding functions to leverage domain-specific models for generating embeddings.
+- Custom preprocessing and postprocessing functions can help tailor the text processing and
+
+ result formatting to specific needs.
+- FAISS supports various types of indices; choose the one that best fits the application requirements (e.g., `Flat` for brute-force search, `IVF` for faster search with some accuracy trade-off).
+- Properly configure the logger to monitor and debug the operations of the FAISSDB instance.
+
+## References and Resources
+
+- [FAISS GitHub Repository](https://github.com/facebookresearch/faiss)
+- [Sentence Transformers Documentation](https://www.sbert.net/)
+- [Loguru Documentation](https://loguru.readthedocs.io/en/stable/)
+- [HuggingFace Transformers](https://huggingface.co/transformers/)
+
+By following this documentation, users can effectively utilize the `FAISSDB` class for various similarity search and document retrieval tasks, customizing it to their specific needs through the provided hooks and functions.
\ No newline at end of file
diff --git a/docs/swarms_memory/index.md b/docs/swarms_memory/index.md
new file mode 100644
index 00000000..3d96b4ef
--- /dev/null
+++ b/docs/swarms_memory/index.md
@@ -0,0 +1,172 @@
+# Announcing the Release of Swarms-Memory Package: Your Gateway to Efficient RAG Systems
+
+
+We are thrilled to announce the release of the Swarms-Memory package, a powerful and easy-to-use toolkit designed to facilitate the implementation of Retrieval-Augmented Generation (RAG) systems. Whether you're a seasoned AI practitioner or just starting out, Swarms-Memory provides the tools you need to integrate high-performance, reliable RAG systems into your applications seamlessly.
+
+In this blog post, we'll walk you through getting started with the Swarms-Memory package, covering installation, usage examples, and a detailed overview of supported RAG systems like Pinecone and ChromaDB. Let's dive in!
+
+## What is Swarms-Memory?
+
+Swarms-Memory is a Python package that simplifies the integration of advanced RAG systems into your projects. It supports multiple databases optimized for AI tasks, providing you with the flexibility to choose the best system for your needs. With Swarms-Memory, you can effortlessly handle large-scale AI tasks, vector searches, and more.
+
+### Key Features
+
+- **Easy Integration**: Quickly set up and start using powerful RAG systems.
+- **Customizable**: Define custom embedding, preprocessing, and postprocessing functions.
+- **Flexible**: Supports multiple RAG systems like ChromaDB and Pinecone, with more coming soon.
+- **Scalable**: Designed to handle large-scale AI tasks efficiently.
+
+## Supported RAG Systems
+
+Here's an overview of the RAG systems currently supported by Swarms-Memory:
+
+| RAG System | Status | Description | Documentation | Website |
+|------------|--------------|------------------------------------------------------------------------------------------|---------------------------|-----------------|
+| ChromaDB | Available | A high-performance, distributed database optimized for handling large-scale AI tasks. | [ChromaDB Documentation](https://chromadb.com/docs) | [ChromaDB](https://chromadb.com) |
+| Pinecone | Available | A fully managed vector database for adding vector search to your applications. | [Pinecone Documentation](https://pinecone.io/docs) | [Pinecone](https://pinecone.io) |
+| Redis | Coming Soon | An open-source, in-memory data structure store, used as a database, cache, and broker. | [Redis Documentation](https://redis.io/documentation) | [Redis](https://redis.io) |
+| Faiss | Coming Soon | A library for efficient similarity search and clustering of dense vectors by Facebook AI. | [Faiss Documentation](https://faiss.ai) | [Faiss](https://faiss.ai) |
+| HNSW | Coming Soon | A graph-based algorithm for approximate nearest neighbor search, known for speed. | [HNSW Documentation](https://hnswlib.github.io/hnswlib) | [HNSW](https://hnswlib.github.io/hnswlib) |
+
+## Getting Started
+
+### Requirements
+
+Before you begin, ensure you have the following:
+
+- Python 3.10
+- `.env` file with your respective API keys (e.g., `PINECONE_API_KEY`)
+
+### Installation
+
+You can install the Swarms-Memory package using pip:
+
+```bash
+$ pip install swarms-memory
+```
+
+### Usage Examples
+
+#### Pinecone
+
+Here's a step-by-step guide on how to use Pinecone with Swarms-Memory:
+
+1. **Import Required Libraries**:
+
+```python
+from typing import List, Dict, Any
+from swarms_memory import PineconeMemory
+```
+
+2. **Define Custom Functions**:
+
+```python
+from transformers import AutoTokenizer, AutoModel
+import torch
+
+# Custom embedding function using a HuggingFace model
+def custom_embedding_function(text: str) -> List[float]:
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
+ model = AutoModel.from_pretrained("bert-base-uncased")
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
+ with torch.no_grad():
+ outputs = model(**inputs)
+ embeddings = outputs.last_hidden_state.mean(dim=1).squeeze().tolist()
+ return embeddings
+
+# Custom preprocessing function
+def custom_preprocess(text: str) -> str:
+ return text.lower().strip()
+
+# Custom postprocessing function
+def custom_postprocess(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
+ for result in results:
+ result["custom_score"] = result["score"] * 2 # Example modification
+ return results
+```
+
+3. **Initialize the Wrapper with Custom Functions**:
+
+```python
+wrapper = PineconeMemory(
+ api_key="your-api-key",
+ environment="your-environment",
+ index_name="your-index-name",
+ embedding_function=custom_embedding_function,
+ preprocess_function=custom_preprocess,
+ postprocess_function=custom_postprocess,
+ logger_config={
+ "handlers": [
+ {"sink": "custom_rag_wrapper.log", "rotation": "1 GB"},
+ {"sink": lambda msg: print(f"Custom log: {msg}", end="")},
+ ],
+ },
+)
+```
+
+4. **Add Documents and Query**:
+
+```python
+# Adding documents
+wrapper.add("This is a sample document about artificial intelligence.", {"category": "AI"})
+wrapper.add("Python is a popular programming language for data science.", {"category": "Programming"})
+
+# Querying
+results = wrapper.query("What is AI?", filter={"category": "AI"})
+for result in results:
+ print(f"Score: {result['score']}, Custom Score: {result['custom_score']}, Text: {result['metadata']['text']}")
+```
+
+#### ChromaDB
+
+Using ChromaDB with Swarms-Memory is straightforward. Hereβs how:
+
+1. **Import ChromaDB**:
+
+```python
+from swarms_memory import ChromaDB
+```
+
+2. **Initialize ChromaDB**:
+
+```python
+chromadb = ChromaDB(
+ metric="cosine",
+ output_dir="results",
+ limit_tokens=1000,
+ n_results=2,
+ docs_folder="path/to/docs",
+ verbose=True,
+)
+```
+
+3. **Add and Query Documents**:
+
+```python
+# Add a document
+doc_id = chromadb.add("This is a test document.")
+
+# Query the document
+result = chromadb.query("This is a test query.")
+
+# Traverse a directory
+chromadb.traverse_directory()
+
+# Display the result
+print(result)
+```
+
+## Join the Community
+
+We're excited to see how you leverage Swarms-Memory in your projects! Join our community on Discord to share your experiences, ask questions, and stay updated on the latest developments.
+
+- **π¦ Twitter**: [Follow us on Twitter](https://twitter.com/swarms_platform)
+- **π’ Discord**: [Join the Agora Discord](https://discord.gg/agora)
+- **Swarms Platform**: [Visit our website](https://swarms.ai)
+- **π Documentation**: [Read the Docs](https://docs.swarms.ai)
+
+## Conclusion
+
+The Swarms-Memory package brings a new level of ease and efficiency to building and managing RAG systems. With support for leading databases like ChromaDB and Pinecone, it's never been easier to integrate powerful, scalable AI solutions into your projects. We can't wait to see what you'll create with Swarms-Memory!
+
+For more detailed usage examples and documentation, visit our [GitHub repository](https://github.com/swarms-ai/swarms-memory) and start exploring today!
diff --git a/docs/swarms_memory/pinecone.md b/docs/swarms_memory/pinecone.md
new file mode 100644
index 00000000..edc66e7e
--- /dev/null
+++ b/docs/swarms_memory/pinecone.md
@@ -0,0 +1,179 @@
+# PineconeMemory Documentation
+
+The `PineconeMemory` class provides a robust interface for integrating Pinecone-based Retrieval-Augmented Generation (RAG) systems. It allows for adding documents to a Pinecone index and querying the index for similar documents. The class supports custom embedding models, preprocessing functions, and other customizations to suit different use cases.
+
+
+
+#### Parameters
+
+| Parameter | Type | Default | Description |
+|----------------------|-----------------------------------------------|-----------------------------------|------------------------------------------------------------------------------------------------------|
+| `api_key` | `str` | - | Pinecone API key. |
+| `environment` | `str` | - | Pinecone environment. |
+| `index_name` | `str` | - | Name of the Pinecone index to use. |
+| `dimension` | `int` | `768` | Dimension of the document embeddings. |
+| `embedding_model` | `Optional[Any]` | `None` | Custom embedding model. Defaults to `SentenceTransformer('all-MiniLM-L6-v2')`. |
+| `embedding_function` | `Optional[Callable[[str], List[float]]]` | `None` | Custom embedding function. Defaults to `_default_embedding_function`. |
+| `preprocess_function`| `Optional[Callable[[str], str]]` | `None` | Custom preprocessing function. Defaults to `_default_preprocess_function`. |
+| `postprocess_function`| `Optional[Callable[[List[Dict[str, Any]]], List[Dict[str, Any]]]]`| `None` | Custom postprocessing function. Defaults to `_default_postprocess_function`. |
+| `metric` | `str` | `'cosine'` | Distance metric for Pinecone index. |
+| `pod_type` | `str` | `'p1'` | Pinecone pod type. |
+| `namespace` | `str` | `''` | Pinecone namespace. |
+| `logger_config` | `Optional[Dict[str, Any]]` | `None` | Configuration for the logger. Defaults to logging to `rag_wrapper.log` and console output. |
+
+### Methods
+
+#### `_setup_logger`
+
+```python
+def _setup_logger(self, config: Optional[Dict[str, Any]] = None)
+```
+
+Sets up the logger with the given configuration.
+
+#### `_default_embedding_function`
+
+```python
+def _default_embedding_function(self, text: str) -> List[float]
+```
+
+Generates embeddings using the default SentenceTransformer model.
+
+#### `_default_preprocess_function`
+
+```python
+def _default_preprocess_function(self, text: str) -> str
+```
+
+Preprocesses the input text by stripping whitespace.
+
+#### `_default_postprocess_function`
+
+```python
+def _default_postprocess_function(self, results: List[Dict[str, Any]]) -> List[Dict[str, Any]]
+```
+
+Postprocesses the query results.
+
+#### `add`
+
+Adds a document to the Pinecone index.
+
+| Parameter | Type | Default | Description |
+|-----------|-----------------------|---------|-----------------------------------------------|
+| `doc` | `str` | - | The document to be added. |
+| `metadata`| `Optional[Dict[str, Any]]` | `None` | Additional metadata for the document. |
+
+#### `query`
+
+Queries the Pinecone index for similar documents.
+
+| Parameter | Type | Default | Description |
+|-----------|-------------------------|---------|-----------------------------------------------|
+| `query` | `str` | - | The query string. |
+| `top_k` | `int` | `5` | The number of top results to return. |
+| `filter` | `Optional[Dict[str, Any]]` | `None` | Metadata filter for the query. |
+
+## Usage
+
+
+The `PineconeMemory` class is initialized with the necessary parameters to configure Pinecone and the embedding model. It supports a variety of custom configurations to suit different needs.
+
+#### Example
+
+```python
+from swarms_memory import PineconeMemory
+
+# Initialize PineconeMemory
+memory = PineconeMemory(
+ api_key="your-api-key",
+ environment="us-west1-gcp",
+ index_name="example-index",
+ dimension=768
+)
+```
+
+### Adding Documents
+
+Documents can be added to the Pinecone index using the `add` method. The method accepts a document string and optional metadata.
+
+#### Example
+
+```python
+doc = "This is a sample document to be added to the Pinecone index."
+metadata = {"author": "John Doe", "date": "2024-07-08"}
+
+memory.add(doc, metadata)
+```
+
+### Querying Documents
+
+The `query` method allows for querying the Pinecone index for similar documents based on a query string. It returns the top `k` most similar documents.
+
+#### Example
+
+```python
+query = "Sample query to find similar documents."
+results = memory.query(query, top_k=5)
+
+for result in results:
+ print(result)
+```
+
+## Additional Information and Tips
+
+### Custom Embedding and Preprocessing Functions
+
+Custom embedding and preprocessing functions can be provided during initialization to tailor the document processing to specific requirements.
+
+#### Example
+
+```python
+def custom_embedding_function(text: str) -> List[float]:
+ # Custom embedding logic
+ return [0.1, 0.2, 0.3]
+
+def custom_preprocess_function(text: str) -> str:
+ # Custom preprocessing logic
+ return text.lower()
+
+memory = PineconeMemory(
+ api_key="your-api-key",
+ environment="us-west1-gcp",
+ index_name="example-index",
+ embedding_function=custom_embedding_function,
+ preprocess_function=custom_preprocess_function
+)
+```
+
+### Logger Configuration
+
+The logger can be configured to suit different logging needs. The default configuration logs to a file and the console.
+
+#### Example
+
+```python
+logger_config = {
+ "handlers": [
+ {"sink": "custom_log.log", "rotation": "1 MB"},
+ {"sink": lambda msg: print(msg, end="")},
+ ]
+}
+
+memory = PineconeMemory(
+ api_key="your-api-key",
+ environment="us-west1-gcp",
+ index_name="example-index",
+ logger_config=logger_config
+)
+```
+
+## References and Resources
+
+- [Pinecone Documentation](https://docs.pinecone.io/)
+- [SentenceTransformers Documentation](https://www.sbert.net/)
+- [Loguru Documentation](https://loguru.readthedocs.io/en/stable/)
+
+For further exploration and examples, refer to the official documentation and resources provided by Pinecone, SentenceTransformers, and Loguru.
+
+This concludes the detailed documentation for the `PineconeMemory` class. The class offers a flexible and powerful interface for leveraging Pinecone's capabilities in retrieval-augmented generation systems. By supporting custom embeddings, preprocessing, and postprocessing functions, it can be tailored to a wide range of applications.
\ No newline at end of file
diff --git a/docs/swarms_platform/agents/agents_api.md b/docs/swarms_platform/agents/agents_api.md
new file mode 100644
index 00000000..6dab163a
--- /dev/null
+++ b/docs/swarms_platform/agents/agents_api.md
@@ -0,0 +1,217 @@
+# Agents API Documentation
+
+The `https://swarms.world/api/add-agent` endpoint allows users to add a new agent to the Swarms platform. This API accepts a POST request with a JSON body containing details of the agent, such as its name, description, use cases, language, tags and requirements. The request must be authenticated using an API key.
+
+## Endpoint: Add Agent
+
+- **URL:** `https://swarms.world/api/add-agent`
+- **Method:** POST
+- **Content-Type:** `application/json`
+- **Authorization:** Bearer token required in the header
+
+## Request Parameters
+
+The request body should be a JSON object with the following attributes:
+
+| Attribute | Type | Description | Required |
+| -------------- | -------- | -------------------------------------------------------------------------- | -------- |
+| `name` | `string` | The name of the agent. | Yes |
+| `agent` | `string` | The agent text. | Yes |
+| `description` | `string` | A brief description of the agent. | Yes |
+| `language` | `string` | The agent's syntax language with a default of python | No |
+| `useCases` | `array` | An array of use cases, each containing a title and description. | Yes |
+| `requirements` | `array` | An array of requirements, each containing a package name and installation. | Yes |
+| `tags` | `string` | Comma-separated tags for the agent. | Yes |
+
+### `useCases` Structure
+
+Each use case in the `useCases` array should be an object with the following attributes:
+
+| Attribute | Type | Description | Required |
+| ------------- | -------- | ------------------------------------ | -------- |
+| `title` | `string` | The title of the use case. | Yes |
+| `description` | `string` | A brief description of the use case. | Yes |
+
+### `requirements` Structure
+
+Each requirement in the `requirements` array should be an object with the following attributes:
+
+| Attribute | Type | Description | Required |
+| -------------- | -------- | ------------------------------------ | -------- |
+| `package` | `string` | The name of the package. | Yes |
+| `installation` | `string` | Installation command for the package | Yes |
+
+## Example Usage
+
+### Python
+
+```python
+import requests
+import json
+import os
+
+
+url = "https://swarms.world/api/add-agent"
+
+headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {os.getenv("SWARMS_API_KEY")}"
+}
+
+data = {
+ "name": "Example Agent",
+ "agent": "This is an example agent from an API route.",
+ "description": "Description of the agent.",
+ "language": "python",
+ "useCases": [
+ {"title": "Use case 1", "description": "Description of use case 1"},
+ {"title": "Use case 2", "description": "Description of use case 2"}
+ ],
+ "requirements": [
+ {"package": "pip", "installation": "pip install"},
+ {"package": "pip3", "installation": "pip3 install"}
+ ],
+ "tags": "example, agent"
+}
+
+response = requests.post(url, headers=headers, data=json.dumps(data))
+print(response.json())
+```
+
+### Node.js
+
+```javascript
+const fetch = require("node-fetch");
+
+async function addAgentHandler() {
+ try {
+ const response = await fetch("https://swarms.world/api/add-agent", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: "Bearer {apiKey}",
+ },
+ body: JSON.stringify({
+ name: "Example Agent",
+ agent: "This is an example agent from an API route.",
+ description: "Description of the agent.",
+ language: "python",
+ useCases: [
+ { title: "Use case 1", description: "Description of use case 1" },
+ { title: "Use case 2", description: "Description of use case 2" },
+ ],
+ requirements: [
+ { package: "pip", installation: "pip install" },
+ { package: "pip3", installation: "pip3 install" },
+ ],
+ tags: "example, agent",
+ }),
+ });
+
+ const result = await response.json();
+ console.log(result);
+ } catch (error) {
+ console.error("An error has occurred", error);
+ }
+}
+
+addAgentHandler();
+```
+
+### Go
+
+```go
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+func main() {
+ url := "https://swarms.world/api/add-agent"
+ payload := map[string]interface{}{
+ "name": "Example Agent",
+ "agent": "This is an example agent from an API route.",
+ "description": "Description of the agent.",
+ "useCases": []map[string]string{
+ {"title": "Use case 1", "description": "Description of use case 1"},
+ {"title": "Use case 2", "description": "Description of use case 2"},
+ },
+ "requirements": []map[string]string{
+ {"package": "pip", "installation": "pip install"},
+ {"package": "pip3", "installation": "pip3 install"}
+ },
+ "tags": "example, agent",
+ }
+ jsonPayload, _ := json.Marshal(payload)
+
+ req, _ := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", "Bearer {apiKey}")
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ fmt.Println("An error has occurred", err)
+ return
+ }
+ defer resp.Body.Close()
+
+ var result map[string]interface{}
+ json.NewDecoder(resp.Body).Decode(&result)
+ fmt.Println(result)
+}
+```
+
+### cURL
+
+```bash
+curl -X POST https://swarms.world/api/add-agent \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer {apiKey}" \
+-d '{
+ "name": "Example Agent",
+ "agent": "This is an example agent from an API route.",
+ "description": "Description of the agent.",
+ "language": "python",
+ "useCases": [
+ { title: "Use case 1", description: "Description of use case 1" },
+ { title: "Use case 2", description: "Description of use case 2" },
+ ],
+ "requirements": [
+ { package: "pip", installation: "pip install" },
+ { package: "pip3", installation: "pip3 install" },
+ ],
+ "tags": "example, agent",
+}'
+```
+
+## Response
+
+The response will be a JSON object containing the result of the operation. Example response:
+
+```json
+{
+ "success": true,
+ "message": "Agent added successfully",
+ "data": {
+ "id": "agent_id",
+ "name": "Example Agent",
+ "agent": "This is an example agent from an API route.",
+ "description": "Description of the agent.",
+ "language": "python",
+ "useCases": [
+ { "title": "Use case 1", "description": "Description of use case 1" },
+ { "title": "Use case 2", "description": "Description of use case 2" }
+ ],
+ "requirements": [
+ { "package": "pip", "installation": "pip install" },
+ { "package": "pip3", "installation": "pip3 install" }
+ ],
+ "tags": "example, agent"
+ }
+}
+```
\ No newline at end of file
diff --git a/docs/swarms_platform/agents/edit_agent.md b/docs/swarms_platform/agents/edit_agent.md
new file mode 100644
index 00000000..dc934bee
--- /dev/null
+++ b/docs/swarms_platform/agents/edit_agent.md
@@ -0,0 +1,251 @@
+
+# Endpoint: Edit Agent
+
+The `https://swarms.world/api/edit-agent` endpoint allows users to edit an existing agent on the Swarms platform. This API accepts a POST request with a JSON body containing the agent details to be updated, such as its id, name, description, use cases, language, tags and requirements. The request must be authenticated using an API key.
+
+## Endpoint
+
+- **URL:** `https://swarms.world/api/edit-agent`
+- **Method:** POST
+- **Content-Type:** `application/json`
+- **Authorization:** Bearer token required in the header
+
+## Request Parameters
+
+The request body should be a JSON object with the following attributes:
+
+| Attribute | Type | Description | Required |
+| -------------- | -------- | -------------------------------------------------------------------------- | -------- |
+| `id` | `string` | The ID of the agent to be edited. | Yes |
+| `name` | `string` | The name of the agent. | Yes |
+| `agent` | `string` | The agent text. | Yes |
+| `description` | `string` | A brief description of the agent. | Yes |
+| `language` | `string` | The agent's syntax language | No |
+| `useCases` | `array` | An array of use cases, each containing a title and description. | Yes |
+| `requirements` | `array` | An array of requirements, each containing a package name and installation. | Yes |
+| `tags` | `string` | Comma-separated tags for the agent. | No |
+
+### `useCases` Structure
+
+Each use case in the `useCases` array should be an object with the following attributes:
+
+| Attribute | Type | Description | Required |
+| ------------- | -------- | ------------------------------------ | -------- |
+| `title` | `string` | The title of the use case. | Yes |
+| `description` | `string` | A brief description of the use case. | Yes |
+
+### `requirements` Structure
+
+Each requirement in the `requirements` array should be an object with the following attributes:
+
+| Attribute | Type | Description | Required |
+| -------------- | -------- | ------------------------------------ | -------- |
+| `package` | `string` | The name of the package. | Yes |
+| `installation` | `string` | Installation command for the package | Yes |
+
+## Example Usage
+
+### Python
+
+```python
+import requests
+import json
+
+url = "https://swarms.world/api/edit-agent"
+headers = {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {apiKey}"
+}
+data = {
+ "id": "agent_id",
+ "name": "Updated agent",
+ "agent": "This is an updated agent from an API route.",
+ "description": "Updated description of the agent.",
+ "language": "javascript",
+ "useCases": [
+ {"title": "Updated use case 1", "description": "Updated description of use case 1"},
+ {"title": "Updated use case 2", "description": "Updated description of use case 2"}
+ ],
+ "requirements": [
+ { "package": "express", "installation": "npm install express" },
+ { "package": "lodash", "installation": "npm install lodash" },
+ ],
+ "tags": "updated, agent"
+}
+
+response = requests.post(url, headers=headers, data=json.dumps(data))
+print(response.json())
+```
+
+### Node.js
+
+```javascript
+const fetch = require("node-fetch");
+
+async function editAgentHandler() {
+ try {
+ const response = await fetch("https://swarms.world/api/edit-agent", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: "Bearer {apiKey}",
+ },
+ body: JSON.stringify({
+ id: "agent_id",
+ name: "Updated agent",
+ agent: "This is an updated agent from an API route.",
+ description: "Updated description of the agent.",
+ language: "javascript",
+ useCases: [
+ {
+ title: "Updated use case 1",
+ description: "Updated description of use case 1",
+ },
+ {
+ title: "Updated use case 2",
+ description: "Updated description of use case 2",
+ },
+ ],
+ requirements: [
+ { package: "express", installation: "npm install express" },
+ { package: "lodash", installation: "npm install lodash" },
+ ],
+ tags: "updated, agent",
+ }),
+ });
+
+ const result = await response.json();
+ console.log(result);
+ } catch (error) {
+ console.error("An error has occurred", error);
+ }
+}
+
+editAgentHandler();
+```
+
+### Go
+
+```go
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+func main() {
+ url := "https://swarms.world/api/edit-agent"
+ payload := map[string]interface{}{
+ "id": "agent_id",
+ "name": "Updated Agent",
+ "agent": "This is an updated agent from an API route.",
+ "description": "Updated description of the agent.",
+ "language": "javascript",
+ "useCases": []map[string]string{
+ {"title": "Updated use case 1", "description": "Updated description of use case 1"},
+ {"title": "Updated use case 2", "description": "Updated description of use case 2"},
+ },
+ "requirements": []map[string]string{
+ {"package": "express", "installation": "npm install express"},
+ {"package": "lodash", "installation": "npm install lodash"},
+ },
+ "tags": "updated, agent",
+ }
+ jsonPayload, _ := json.Marshal(payload)
+
+ req, _ := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", "Bearer {apiKey}")
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ fmt.Println("An error has occurred", err)
+ return
+ }
+ defer resp.Body.Close()
+
+ var result map[string]interface{}
+ json.NewDecoder(resp.Body).Decode(&result)
+ fmt.Println(result)
+}
+```
+
+### cURL
+
+```bash
+curl -X POST https://swarms.world/api/edit-agent \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer {apiKey}" \
+-d '{
+ "id": "agent_id",
+ "name": "Updated agent",
+ "agent": "This is an updated agent from an API route.",
+ "description": "Updated description of the agent.",
+ "language": "javascript",
+ "useCases": [
+ {"title": "Updated use case 1", "description": "Updated description of use case 1"},
+ {"title": "Updated use case 2", "description": "Updated description of use case 2"}
+ ],
+ "requirements": [
+ { "package": "express", "installation": "npm install express" },
+ { "package": "lodash", "installation": "npm install lodash" },
+ ],
+ "tags": "updated, agent"
+}'
+```
+
+## Response
+
+The response will be a JSON object containing the result of the operation. Example response:
+
+```json
+{
+ "success": true,
+ "message": "Agent updated successfully",
+ "data": {
+ "id": "agent_id",
+ "name": "Updated agent",
+ "agent": "This is an updated agent from an API route.",
+ "description": "Updated description of the agent.",
+ "language": "javascript",
+ "useCases": [
+ {
+ "title": "Updated use case 1",
+ "description": "Updated description of use case 1"
+ },
+ {
+ "title": "Updated use case 2",
+ "description": "Updated description of use case 2"
+ }
+ ],
+ "requirements": [
+ { "package": "express", "installation": "npm install express" },
+ { "package": "lodash", "installation": "npm install lodash" }
+ ],
+ "tags": "updated, agent"
+ }
+}
+```
+
+In case of an error, the response will contain an error message detailing the issue.
+
+## Common Issues and Tips
+
+- **Authentication Error:** Ensure that the `Authorization` header is correctly set with a valid API key.
+- **Invalid JSON:** Make sure the request body is a valid JSON object.
+- **Missing Required Fields:** Ensure that all required fields (`name`, `agent`, `description`, `useCases`, `requirements`) are included in the request body.
+- **Network Issues:** Verify network connectivity and endpoint URL.
+
+## References and Resources
+
+- [API Authentication Guide](https://swarms.world/docs/authentication)
+- [JSON Structure Standards](https://json.org/)
+- [Fetch API Documentation (Node.js)](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)
+- [Requests Library (Python)](https://requests.readthedocs.io/)
+- [Net/HTTP Package (Go)](https://pkg.go.dev/net/http)
+
+This comprehensive documentation provides all the necessary information to effectively use the `https://swarms.world/api/add-agent` and `https://swarms.world/api/edit-agent` endpoints, including details on request parameters, example code snippets in multiple programming languages, and troubleshooting tips.
diff --git a/docs/swarms_platform/agents/fetch_agents.md b/docs/swarms_platform/agents/fetch_agents.md
new file mode 100644
index 00000000..9355a3a7
--- /dev/null
+++ b/docs/swarms_platform/agents/fetch_agents.md
@@ -0,0 +1,414 @@
+# Documentation for `getAllAgents` API Endpoint
+
+The `getAllAgents` API endpoint is a part of the `swarms.world` application, designed to fetch all agent records from the database. This endpoint is crucial for retrieving various agents stored in the `swarms_cloud_agents` table, including their metadata such as name, description, use cases, language, requirements and tags. It provides an authenticated way to access this data, ensuring that only authorized users can retrieve the information.
+
+## Purpose
+
+The primary purpose of this API endpoint is to provide a method for clients to fetch a list of agents stored in the `swarms_cloud_agents` table, with the ability to filter by name, tags, language, requirement package and use cases. It ensures data integrity and security by using an authentication guard and handles various HTTP methods and errors gracefully.
+
+## API Endpoint Definition
+
+### Fetch All Agents
+
+#### Endpoint URL
+
+```
+https://swarms.world/get-agents
+```
+
+#### HTTP Method
+
+```
+GET
+```
+
+### Request Headers
+
+| Header | Type | Required | Description |
+| ------------- | ------ | -------- | --------------------------- |
+| Authorization | String | Yes | Bearer token for API access |
+
+### Query Parameters
+
+- **name** (optional): A substring to match against the agent name. The query is case-insensitive.
+- **tag** (optional): A comma-separated list of tags to filter agents by. The query matches any of the provided tags, and is case-insensitive.
+- **language** (optional): A substring to match against the language the agent is written in. The query is case-insensitive.
+- **use_case** (optional): A substring to match against the use case titles within the `use_cases` array. The query is case-insensitive.
+- **req_package** (optional): A substring to match against the requirement packaages within the `requirements` array. The query is case-insensitive.
+
+#### Response
+
+##### Success Response (200)
+
+Returns an array of agents.
+
+```json
+[
+ {
+ "id": "string",
+ "name": "string",
+ "description": "string",
+ "language": "string",
+ "agent": "string",
+ "use_cases": [
+ {
+ "title": "string",
+ "description": "string"
+ }
+ ],
+ "requirements": [
+ {
+ "package": "string",
+ "installation": "string"
+ }
+ ],
+ "tags": "string"
+ },
+ ...
+]
+```
+
+##### Error Responses
+
+- **405 Method Not Allowed**
+
+ ```json
+ {
+ "error": "Method Not Allowed"
+ }
+ ```
+
+- **500 Internal Server Error**
+
+ ```json
+ {
+ "error": "Could not fetch agents"
+ }
+ ```
+
+### Fetch Agent by ID
+
+#### Endpoint URL
+
+```
+https://swarms.world/get-agents/[id]
+```
+
+#### HTTP Method
+
+```
+GET
+```
+
+### Request Headers
+
+| Header | Type | Required | Description |
+| ------------- | ------ | -------- | --------------------------- |
+| Authorization | String | Yes | Bearer token for API access |
+
+#### Response
+
+##### Success Response (200)
+
+Returns a single agent by ID.
+
+```json
+{
+ "id": "string",
+ "name": "string",
+ "description": "string",
+ "language": "string",
+ "agent": "string",
+ "use_cases": [
+ {
+ "title": "string",
+ "description": "string"
+ }
+ ],
+ "requirements": [
+ {
+ "package": "string",
+ "installation": "string"
+ }
+ ],
+ "tags": "string"
+}
+```
+
+##### Error Responses
+
+- **404 Not Found**
+
+ ```json
+ {
+ "error": "Agent not found"
+ }
+ ```
+
+- **500 Internal Server Error**
+
+ ```json
+ {
+ "error": "Could not fetch agent"
+ }
+ ```
+
+### Request Handling
+
+1. **Method Validation**: The endpoint only supports the `GET` method. If a different HTTP method is used, it responds with a `405 Method Not Allowed` status.
+
+2. **Database Query**:
+
+ - **Fetching All Agents**: The endpoint uses the `supabaseAdmin` client to query the `swarms_cloud_agents` table. Filters are applied based on the query parameters (`name`, `tag`, `language`, `req_package` and `use_case`).
+ - **Fetching an Agent by ID**: The endpoint retrieves a single agent from the `swarms_cloud_agents` table by its unique ID.
+
+3. **Response**: On success, it returns the agent data in JSON format. In case of an error during the database query, a `500 Internal Server Error` status is returned. For fetching by ID, if the agent is not found, it returns a `404 Not Found` status.
+
+### Code Example
+
+#### JavaScript (Node.js)
+
+```javascript
+import fetch from "node-fetch";
+
+// Fetch all agents with optional filters
+const getAgents = async (filters) => {
+ const queryString = new URLSearchParams(filters).toString();
+ const response = await fetch(
+ `https://swarms.world/get-agents?${queryString}`,
+ {
+ method: "GET",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: "Bearer {apiKey}",
+ },
+ }
+ );
+
+ if (!response.ok) {
+ throw new Error(`Error: ${response.statusText}`);
+ }
+
+ const data = await response.json();
+ console.log(data);
+};
+
+// Fetch agent by ID
+const getAgentById = async (id) => {
+ const response = await fetch(`https://swarms.world/get-agents/${id}`, {
+ method: "GET",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: "Bearer {apiKey}",
+ },
+ });
+
+ if (!response.ok) {
+ throw new Error(`Error: ${response.statusText}`);
+ }
+
+ const data = await response.json();
+ console.log(data);
+};
+
+// Example usage
+getAgents({
+ name: "example",
+ tag: "tag1,tag2",
+ use_case: "example",
+ language: "langauge",
+ req_package: "package_name",
+}).catch(console.error);
+getAgentById("123").catch(console.error);
+```
+
+#### Python
+
+```python
+import requests
+
+API_KEY = "{apiKey}"
+
+# Fetch all agents with optional filters
+def get_agents(filters):
+ query_string = "&".join([f"{key}={value}" for key, value in filters.items()])
+ url = f"https://swarms.world/get-agents?{query_string}"
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {API_KEY}",
+ }
+ response = requests.get(url, headers=headers)
+
+ if not response.ok:
+ raise Exception(f"Error: {response.reason}")
+
+ data = response.json()
+ print(data)
+ return data
+
+# Fetch agent by ID
+def get_agent_by_id(agent_id):
+ url = f"https://swarms.world/get-agents/{agent_id}"
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {API_KEY}",
+ }
+ response = requests.get(url, headers=headers)
+
+ if not response.ok:
+ raise Exception(f"Error: {response.reason}")
+
+ data = response.json()
+ print(data)
+ return data
+
+# Example usage
+try:
+ get_agents({
+ "name": "example",
+ "tag": "tag1,tag2",
+ "use_case": "example",
+ "language": "language",
+ "req_package": "package_name",
+ })
+except Exception as e:
+ print(e)
+
+try:
+ get_agent_by_id("123")
+except Exception as e:
+ print(e)
+```
+
+#### cURL
+
+```sh
+# Fetch all agents with optional filters
+curl -X GET "https://swarms.world/get-agents?name=example&tag=tag1,tag2&use_case=example&language=language&req_package=package_name" \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer {apiKey}"
+
+# Fetch agent by ID
+curl -X GET "https://swarms.world/get-agents/123" \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer {apiKey}"
+```
+
+#### Go
+
+```go
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+)
+
+func getAgents(filters map[string]string) error {
+ query := url.Values{}
+ for key, value := range filters {
+ query.Set(key, value)
+ }
+
+ url := fmt.Sprintf("https://swarms.world/get-agents?%s", query.Encode())
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", "Bearer {apiKey}")
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("error: %s", resp.Status)
+ }
+
+ var data interface{}
+ if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
+ return err
+ }
+
+ fmt.Println(data)
+ return nil
+}
+
+func getAgentById(id string) error {
+ url := fmt.Sprintf("https://swarms.world/get-agents/%s", id)
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", "Bearer {apiKey}")
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("error: %s", resp.Status)
+ }
+
+ var data interface{}
+ if err := json.NewDecoder(resp.Body).Decode(&data); err != nil {
+ return err
+ }
+
+ fmt.Println(data)
+ return nil
+}
+func main() {
+ filters := map[string]string{
+ "name": "example",
+ "tag": "tag1,tag2",
+ "use_case": "example",
+ "language": "language",
+ "req_package": "package_name",
+ }
+
+ getAgents(filters)
+ getAgentById("123")
+}
+```
+
+#### Attributes Table
+
+| Attribute | Type | Description |
+| ------------ | ------ | ------------------------------- |
+| id | String | Unique identifier for the agent |
+| name | String | Name of the agent |
+| description | String | Description of the agent |
+| agent | String | The actual agent |
+| lanuage | String | The code language of the agent |
+| use_cases | Array | Use cases for the agent |
+| requirements | Array | Requirements for the agent |
+| tags | String | Tags associated with the agent |
+
+## Additional Information and Tips
+
+- Handle different error statuses appropriately to provide clear feedback to users.
+- Consider implementing rate limiting and logging for better security and monitoring.
+
+## References and Resources
+
+- [Next.js API Routes](https://nextjs.org/docs/api-routes/introduction)
+- [Supabase Documentation](https://supabase.com/docs)
+- [Node Fetch](https://www.npmjs.com/package/node-fetch)
+- [Requests Library (Python)](https://docs.python-requests.org/en/latest/)
+- [Go net/http Package](https://pkg.go.dev/net/http)
+
+This documentation provides a comprehensive guide to the `getAllAgents` API endpoint, including usage examples in multiple programming languages and detailed attribute descriptions.
diff --git a/docs/swarms_platform/index.md b/docs/swarms_platform/index.md
new file mode 100644
index 00000000..4347c639
--- /dev/null
+++ b/docs/swarms_platform/index.md
@@ -0,0 +1,122 @@
+# Swarms Platform Documentation
+
+Welcome to the Swarms Platform, a dynamic ecosystem where users can share, discover, and host agents and agent swarms. This documentation will guide you through the various features of the platform, providing you with the information you need to get started and make the most out of your experience.
+
+## Table of Contents
+
+1. [Introduction](#introduction)
+2. [Getting Started](#getting-started)
+3. [Account Management](#account-management)
+4. [Usage Monitoring](#usage-monitoring)
+5. [API Key Generation](#api-key-generation)
+6. [Explorer](#explorer)
+7. [Dashboard](#dashboard)
+8. [Creating an Organization](#creating-an-organization)
+9. [Additional Resources](#additional-resources)
+
+## Introduction
+
+The Swarms Platform is designed to facilitate the sharing, discovery, and hosting of intelligent agents and swarms of agents. Whether you are a developer looking to deploy your own agents, or an organization seeking to leverage collective intelligence, the Swarms Platform provides the tools and community support you need.
+
+## Getting Started
+
+To begin using the Swarms Platform, follow these steps:
+
+1. **Create an Account**: Sign up on the platform to access its features.
+2. **Explore the Dashboard**: Familiarize yourself with the user interface and available functionalities.
+3. **Generate API Keys**: Securely interact with the platform's API.
+4. **Create and Join Organizations**: Collaborate with others to deploy and manage agents and swarms.
+5. **Share and Discover**: Use the Explorer to find and share agents and swarms.
+
+## Account Management
+
+### Account Page
+
+Access and manage your account settings through the account page.
+
+- **URL**: [Account Page](https://swarms.world/platform/account)
+
+Here, you can update your profile information, manage security settings, and configure notifications.
+
+## Usage Monitoring
+
+### Check Your Usage
+
+Monitor your usage statistics to keep track of your activities and resource consumption on the platform.
+
+- **URL**: [Usage Monitoring](https://swarms.world/platform/usage)
+
+This page provides detailed insights into your usage patterns, helping you optimize your resource allocation and stay within your limits.
+
+## API Key Generation
+
+### Generate Your API Keys
+
+Generate API keys to securely interact with the Swarms Platform API.
+
+- **URL**: [API Key Generation](https://swarms.world/platform/api-keys)
+
+Follow the steps on this page to create, manage, and revoke API keys as needed. Ensure that your keys are kept secure and only share them with trusted applications.
+
+## Explorer
+
+### Explorer: Share, Discover, and Deploy
+
+The Explorer is a central hub for sharing, discovering, and deploying prompts, agents, and swarms.
+
+- **URL**: [Explorer](https://swarms.world/)
+
+Use the Explorer to:
+
+- **Share**: Upload and share your own prompts, agents, and swarms with the community.
+- **Discover**: Browse and discover new and innovative agents and swarms created by others.
+- **Deploy**: Quickly deploy agents and swarms for your own use or organizational needs.
+
+## Dashboard
+
+### Dashboard
+
+The Dashboard is your control center for managing all aspects of your Swarms Platform experience.
+
+- **URL**: [Dashboard](https://swarms.world/platform/dashboard)
+
+From the Dashboard, you can:
+
+- Monitor real-time metrics and analytics.
+- Manage your agents and swarms.
+- Access your account settings and usage information.
+- Navigate to other sections of the platform.
+
+## Creating an Organization
+
+### Create an Organization
+
+Collaborate with others by creating and joining organizations on the Swarms Platform.
+
+- **URL**: [Create an Organization](https://swarms.world/platform/organization)
+
+Creating an organization allows you to:
+
+- Pool resources with team members.
+- Manage shared agents and swarms.
+- Set permissions and roles for organization members.
+
+## Additional Resources
+
+To further enhance your understanding and usage of the Swarms Platform, explore the following resources:
+
+- **API Documentation**: Comprehensive documentation on the platform's API.
+- **Community Forums**: Engage with other users, share insights, and get support.
+- **Tutorials and Guides**: Step-by-step tutorials to help you get started with specific features and use cases.
+- **Support**: Contact the support team for any issues or inquiries.
+
+### Links
+
+- [API Documentation](https://docs.swarms.world)
+- [Community Forums](https://discord.com/servers/agora-999382051935506503)
+- [Tutorials and Guides](https://docs.swarms.world))
+- [Support](https://discord.com/servers/agora-999382051935506503)
+
+## Conclusion
+
+The Swarms Platform is a versatile and powerful ecosystem for managing intelligent agents and swarms. By following this documentation, you can effectively navigate the platform, leverage its features, and collaborate with others to create innovative solutions. Happy swarming!
\ No newline at end of file
diff --git a/docs/swarms_platform/prompts/add_prompt.md b/docs/swarms_platform/prompts/add_prompt.md
new file mode 100644
index 00000000..7812eec6
--- /dev/null
+++ b/docs/swarms_platform/prompts/add_prompt.md
@@ -0,0 +1,178 @@
+# Prompts API Documentation
+
+The `https://swarms.world/api/add-prompt` endpoint allows users to add a new prompt to the Swarms platform. This API accepts a POST request with a JSON body containing details of the prompt, such as its name, description, use cases, and tags. The request must be authenticated using an API key.
+
+## Endpoint: Add Prompt
+
+- **URL:** `https://swarms.world/api/add-prompt`
+- **Method:** POST
+- **Content-Type:** `application/json`
+- **Authorization:** Bearer token required in the header
+
+## Request Parameters
+
+The request body should be a JSON object with the following attributes:
+
+| Attribute | Type | Description | Required |
+| ------------- | -------- | --------------------------------------------------------------- | -------- |
+| `name` | `string` | The name of the prompt. | Yes |
+| `prompt` | `string` | The prompt text. | Yes |
+| `description` | `string` | A brief description of the prompt. | Yes |
+| `useCases` | `array` | An array of use cases, each containing a title and description. | Yes |
+| `tags` | `string` | Comma-separated tags for the prompt. | No |
+
+### `useCases` Structure
+
+Each use case in the `useCases` array should be an object with the following attributes:
+
+| Attribute | Type | Description | Required |
+| ------------- | -------- | ------------------------------------ | -------- |
+| `title` | `string` | The title of the use case. | Yes |
+| `description` | `string` | A brief description of the use case. | Yes |
+
+## Example Usage
+
+### Python
+
+```python
+import requests
+import json
+
+url = "https://swarms.world/api/add-prompt"
+headers = {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {apiKey}"
+}
+data = {
+ "name": "Example Prompt",
+ "prompt": "This is an example prompt from an API route.",
+ "description": "Description of the prompt.",
+ "useCases": [
+ {"title": "Use case 1", "description": "Description of use case 1"},
+ {"title": "Use case 2", "description": "Description of use case 2"}
+ ],
+ "tags": "example, prompt"
+}
+
+response = requests.post(url, headers=headers, data=json.dumps(data))
+print(response.json())
+```
+
+### Node.js
+
+```javascript
+const fetch = require("node-fetch");
+
+async function addPromptsHandler() {
+ try {
+ const response = await fetch("https://swarms.world/api/add-prompt", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: "Bearer {apiKey}",
+ },
+ body: JSON.stringify({
+ name: "Example Prompt",
+ prompt: "This is an example prompt from an API route.",
+ description: "Description of the prompt.",
+ useCases: [
+ { title: "Use case 1", description: "Description of use case 1" },
+ { title: "Use case 2", description: "Description of use case 2" },
+ ],
+ tags: "example, prompt",
+ }),
+ });
+
+ const result = await response.json();
+ console.log(result);
+ } catch (error) {
+ console.error("An error has occurred", error);
+ }
+}
+
+addPromptsHandler();
+```
+
+### Go
+
+```go
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+func main() {
+ url := "https://swarms.world/api/add-prompt"
+ payload := map[string]interface{}{
+ "name": "Example Prompt",
+ "prompt": "This is an example prompt from an API route.",
+ "description": "Description of the prompt.",
+ "useCases": []map[string]string{
+ {"title": "Use case 1", "description": "Description of use case 1"},
+ {"title": "Use case 2", "description": "Description of use case 2"},
+ },
+ "tags": "example, prompt",
+ }
+ jsonPayload, _ := json.Marshal(payload)
+
+ req, _ := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", "Bearer {apiKey}")
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ fmt.Println("An error has occurred", err)
+ return
+ }
+ defer resp.Body.Close()
+
+ var result map[string]interface{}
+ json.NewDecoder(resp.Body).Decode(&result)
+ fmt.Println(result)
+}
+```
+
+### cURL
+
+```bash
+curl -X POST https://swarms.world/api/add-prompt \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer {apiKey}" \
+-d '{
+ "name": "Example Prompt",
+ "prompt": "This is an example prompt from an API route.",
+ "description": "Description of the prompt.",
+ "useCases": [
+ { "title": "Use case 1", "description": "Description of use case 1" },
+ { "title": "Use case 2", "description": "Description of use case 2" }
+ ],
+ "tags": "example, prompt"
+}'
+```
+
+## Response
+
+The response will be a JSON object containing the result of the operation. Example response:
+
+```json
+{
+ "success": true,
+ "message": "Prompt added successfully",
+ "data": {
+ "id": "prompt_id",
+ "name": "Example Prompt",
+ "prompt": "This is an example prompt from an API route.",
+ "description": "Description of the prompt.",
+ "useCases": [
+ { "title": "Use case 1", "description": "Description of use case 1" },
+ { "title": "Use case 2", "description": "Description of use case 2" }
+ ],
+ "tags": "example, prompt"
+ }
+}
+```
\ No newline at end of file
diff --git a/docs/swarms_platform/prompts/edit_prompt.md b/docs/swarms_platform/prompts/edit_prompt.md
new file mode 100644
index 00000000..ebb01cde
--- /dev/null
+++ b/docs/swarms_platform/prompts/edit_prompt.md
@@ -0,0 +1,214 @@
+# Endpoint: Edit Prompt
+
+The `https://swarms.world/api/edit-prompt` endpoint allows users to edit an existing prompt on the Swarms platform. This API accepts a POST request with a JSON body containing the prompt details to be updated, such as its name, description, use cases, and tags. The request must be authenticated using an API key.
+
+## Endpoint
+
+- **URL:** `https://swarms.world/api/edit-prompt`
+- **Method:** POST
+- **Content-Type:** `application/json`
+- **Authorization:** Bearer token required in the header
+
+## Request Parameters
+
+The request body should be a JSON object with the following attributes:
+
+| Attribute | Type | Description | Required |
+| ------------- | -------- | --------------------------------------------------------------- | -------- |
+| `id` | `string` | The ID of the prompt to be edited. | Yes |
+| `name` | `string` | The name of the prompt. | Yes |
+| `prompt` | `string` | The prompt text. | Yes |
+| `description` | `string` | A brief description of the prompt. | No |
+| `useCases` | `array` | An array of use cases, each containing a title and description. | Yes |
+| `tags` | `string` | Comma-separated tags for the prompt. | No |
+
+### `useCases` Structure
+
+Each use case in the `useCases` array should be an object with the following attributes:
+
+| Attribute | Type | Description | Required |
+| ------------- | -------- | ------------------------------------ | -------- |
+| `title` | `string` | The title of the use case. | Yes |
+| `description` | `string` | A brief description of the use case. | Yes |
+
+## Example Usage
+
+### Python
+
+```python
+import requests
+import json
+
+url = "https://swarms.world/api/edit-prompt"
+headers = {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer {apiKey}"
+}
+data = {
+ "id": "prompt_id",
+ "name": "Updated Prompt",
+ "prompt": "This is an updated prompt from an API route.",
+ "description": "Updated description of the prompt.",
+ "useCases": [
+ {"title": "Updated use case 1", "description": "Updated description of use case 1"},
+ {"title": "Updated use case 2", "description": "Updated description of use case 2"}
+ ],
+ "tags": "updated, prompt"
+}
+
+response = requests.post(url, headers=headers, data=json.dumps(data))
+print(response.json())
+```
+
+### Node.js
+
+```javascript
+const fetch = require("node-fetch");
+
+async function editPromptsHandler() {
+ try {
+ const response = await fetch("https://swarms.world/api/edit-prompt", {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: "Bearer {apiKey}",
+ },
+ body: JSON.stringify({
+ id: "prompt_id",
+ name: "Updated Prompt",
+ prompt: "This is an updated prompt from an API route.",
+ description: "Updated description of the prompt.",
+ useCases: [
+ {
+ title: "Updated use case 1",
+ description: "Updated description of use case 1",
+ },
+ {
+ title: "Updated use case 2",
+ description: "Updated description of use case 2",
+ },
+ ],
+ tags: "updated, prompt",
+ }),
+ });
+
+ const result = await response.json();
+ console.log(result);
+ } catch (error) {
+ console.error("An error has occurred", error);
+ }
+}
+
+editPromptsHandler();
+```
+
+### Go
+
+```go
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+func main() {
+ url := "https://swarms.world/api/edit-prompt"
+ payload := map[string]interface{}{
+ "id": "prompt_id",
+ "name": "Updated Prompt",
+ "prompt": "This is an updated prompt from an API route.",
+ "description": "Updated description of the prompt.",
+ "useCases": []map[string]string{
+ {"title": "Updated use case 1", "description": "Updated description of use case 1"},
+ {"title": "Updated use case 2", "description": "Updated description of use case 2"},
+ },
+ "tags": "updated, prompt",
+ }
+ jsonPayload, _ := json.Marshal(payload)
+
+ req, _ := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Authorization", "Bearer {apiKey}")
+
+ client := &http.Client{}
+ resp, err := client.Do(req)
+ if err != nil {
+ fmt.Println("An error has occurred", err)
+ return
+ }
+ defer resp.Body.Close()
+
+ var result map[string]interface{}
+ json.NewDecoder(resp.Body).Decode(&result)
+ fmt.Println(result)
+}
+```
+
+### cURL
+
+```bash
+curl -X POST https://swarms.world/api/edit-prompt \
+-H "Content-Type: application/json" \
+-H "Authorization: Bearer {apiKey}" \
+-d '{
+ "id": "prompt_id",
+ "name": "Updated Prompt",
+ "prompt": "This is an updated prompt from an API route.",
+ "description": "Updated description of the prompt.",
+ "useCases": [
+ { "title": "Updated use case 1", "description": "Updated description of use case 1" },
+ { "title": "Updated use case 2", "description": "Updated description of use case 2" }
+ ],
+ "tags": "updated, prompt"
+}'
+```
+
+## Response
+
+The response will be a JSON object containing the result of the operation. Example response:
+
+```json
+{
+ "success": true,
+ "message": "Prompt updated successfully",
+ "data": {
+ "id": "prompt_id",
+ "name": "Updated Prompt",
+ "prompt": "This is an updated prompt from an API route.",
+ "description": "Updated description of the prompt.",
+ "useCases": [
+ {
+ "title": "Updated use case 1",
+ "description": "Updated description of use case 1"
+ },
+ {
+ "title": "Updated use case 2",
+ "description": "Updated description of use case 2"
+ }
+ ],
+ "tags": "updated, prompt"
+ }
+}
+```
+
+In case of an error, the response will contain an error message detailing the issue.
+
+## Common Issues and Tips
+
+- **Authentication Error:** Ensure that the `Authorization` header is correctly set with a valid API key.
+- **Invalid JSON:** Make sure the request body is a valid JSON object.
+- **Missing Required Fields:** Ensure that all required fields (`name`, `prompt`, `description`, `useCases`) are included in the request body.
+- **Network Issues:** Verify network connectivity and endpoint URL.
+
+## References and Resources
+
+- [API Authentication Guide](https://swarms.world/docs/authentication)
+- [JSON Structure Standards](https://json.org/)
+- [Fetch API Documentation (Node.js)](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)
+- [Requests Library (Python)](https://requests.readthedocs.io/)
+- [Net/HTTP Package (Go)](https://pkg.go.dev/net/http)
+
+This comprehensive documentation provides all the necessary information to effectively use the `https://swarms.world/api/add-prompt` and `https://swarms.world/api/edit-prompt` endpoints, including details on request parameters, example code snippets in multiple programming languages, and troubleshooting tips.
diff --git a/docs/swarms_platform/prompts/fetch_prompts.md b/docs/swarms_platform/prompts/fetch_prompts.md
new file mode 100644
index 00000000..7a691c75
--- /dev/null
+++ b/docs/swarms_platform/prompts/fetch_prompts.md
@@ -0,0 +1,325 @@
+# Documentation for `getAllPrompts` API Endpoint
+
+The `getAllPrompts` API endpoint is a part of the `swarms.world` application, designed to fetch all prompt records from the database. This endpoint is crucial for retrieving various prompts stored in the `swarms_cloud_prompts` table, including their metadata such as name, description, use cases, and tags.
+
+## Purpose
+
+The primary purpose of this API endpoint is to provide a method for clients to fetch a list of prompts stored in the `swarms_cloud_prompts` table, with the ability to filter by name, tags, and use cases.
+
+## API Endpoint Definition
+
+### Fetch All Prompts
+
+#### Endpoint URL
+
+```
+https://swarms.world/get-prompts
+```
+
+#### HTTP Method
+
+```
+GET
+```
+
+### Query Parameters
+
+- **name** (optional): A substring to match against the prompt name. The query is case-insensitive.
+- **tag** (optional): A comma-separated list of tags to filter prompts by. The query matches any of the provided tags, and is case-insensitive.
+- **use_case** (optional): A substring to match against the use case titles within the `use_cases` array. The query is case-insensitive.
+- **use_case_description** (optional): A substring to match against the use case descriptions within the `use_cases` array. The query is case-insensitive.
+
+#### Response
+
+##### Success Response (200)
+
+Returns an array of prompts.
+
+```json
+[
+ {
+ "id": "string",
+ "name": "string",
+ "description": "string",
+ "prompt": "string",
+ "use_cases": [
+ {
+ "title": "string",
+ "description": "string"
+ }
+ ],
+ "tags": "string"
+ },
+ ...
+]
+```
+
+##### Error Responses
+
+- **405 Method Not Allowed**
+
+ ```json
+ {
+ "error": "Method Not Allowed"
+ }
+ ```
+
+- **500 Internal Server Error**
+
+ ```json
+ {
+ "error": "Could not fetch prompts"
+ }
+ ```
+
+### Fetch Prompt by ID
+
+#### Endpoint URL
+
+```
+https://swarms.world/get-prompts/[id]
+```
+
+#### HTTP Method
+
+```
+GET
+```
+
+#### Response
+
+##### Success Response (200)
+
+Returns a single prompt by ID.
+
+```json
+{
+ "id": "string",
+ "name": "string",
+ "description": "string",
+ "prompt": "string",
+ "use_cases": [
+ {
+ "title": "string",
+ "description": "string"
+ }
+ ],
+ "tags": "string"
+}
+```
+
+##### Error Responses
+
+- **404 Not Found**
+
+ ```json
+ {
+ "error": "Prompt not found"
+ }
+ ```
+
+- **500 Internal Server Error**
+
+ ```json
+ {
+ "error": "Could not fetch prompt"
+ }
+ ```
+
+### Request Handling
+
+1. **Method Validation**: The endpoint only supports the `GET` method. If a different HTTP method is used, it responds with a `405 Method Not Allowed` status.
+
+2. **Database Query**:
+
+ - **Fetching All Prompts**: The endpoint uses the `supabaseAdmin` client to query the `swarms_cloud_prompts` table. Filters are applied based on the query parameters (`name`, `tag`, and `use_cases`).
+ - **Fetching a Prompt by ID**: The endpoint retrieves a single prompt from the `swarms_cloud_prompts` table by its unique ID.
+
+3. **Response**: On success, it returns the prompt data in JSON format. In case of an error during the database query, a `500 Internal Server Error` status is returned. For fetching by ID, if the prompt is not found, it returns a `404 Not Found` status.
+
+### Code Example
+
+#### JavaScript (Node.js)
+
+```javascript
+import fetch from "node-fetch";
+
+// Fetch all prompts with optional filters
+const getPrompts = async (filters) => {
+ const queryString = new URLSearchParams(filters).toString();
+ const response = await fetch(
+ `https://swarms.world/get-prompts?${queryString}`,
+ {
+ method: "GET",
+ }
+ );
+
+ if (!response.ok) {
+ throw new Error(`Error: ${response.statusText}`);
+ }
+
+ const data = await response.json();
+ console.log(data);
+};
+
+// Fetch prompt by ID
+const getPromptById = async (id) => {
+ const response = await fetch(`https://swarms.world/get-prompts/${id}`, {
+ method: "GET",
+ });
+
+ if (!response.ok) {
+ throw new Error(`Error: ${response.statusText}`);
+ }
+
+ const data = await response.json();
+ console.log(data);
+};
+
+// Example usage
+getPrompts({
+ name: "example",
+ tag: "tag1,tag2",
+ use_case: "example",
+ use_case_description: "description",
+}).catch(console.error);
+getPromptById("123").catch(console.error);
+```
+
+#### Python
+
+```python
+import requests
+
+# Fetch all prompts with optional filters
+def get_prompts(filters):
+ response = requests.get('https://swarms.world/get-prompts', params=filters)
+
+ if response.status_code != 200:
+ raise Exception(f'Error: {response.status_code}, {response.text}')
+
+ data = response.json()
+ print(data)
+
+# Fetch prompt by ID
+def get_prompt_by_id(id):
+ response = requests.get(f'https://swarms.world/get-prompts/{id}')
+
+ if response.status_code != 200:
+ raise Exception(f'Error: {response.status_code}, {response.text}')
+
+ data = response.json()
+ print(data)
+
+# Example usage
+get_prompts({'name': 'example', 'tag': 'tag1,tag2', 'use_case': 'example', 'use_case_description': 'description'})
+get_prompt_by_id('123')
+```
+
+#### cURL
+
+```sh
+# Fetch all prompts with optional filters
+curl -X GET "https://swarms.world/get-prompts?name=example&tag=tag1,tag2&use_case=example&use_case_description=description"
+
+# Fetch prompt by ID
+curl -X GET https://swarms.world/get-prompts/123
+```
+
+#### Go
+
+```go
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+)
+
+func getPrompts(filters map[string]string) {
+ baseURL := "https://swarms.world/get-prompts"
+ query := url.Values{}
+ for key, value := range filters {
+ query.Set(key, value)
+ }
+ fullURL := fmt.Sprintf("%s?%s", baseURL, query.Encode())
+
+ resp, err := http.Get(fullURL)
+ if err != nil {
+ panic(err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ body, _ := ioutil.ReadAll(resp.Body)
+ panic(fmt.Sprintf("Error: %d, %s", resp.StatusCode, string(body)))
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(string(body))
+}
+
+func getPromptById(id string) {
+ url := fmt.Sprintf("https://swarms.world/get-prompts/%s", id)
+ resp, err := http.Get(url)
+ if err != nil {
+ panic(err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ body, _ := ioutil.ReadAll(resp.Body)
+ panic(fmt.Sprintf("Error: %d, %s", resp.StatusCode, string(body)))
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(string(body))
+}
+
+func main() {
+ filters := map[string]string{
+ "name": "example",
+ "tag": "tag1,tag2",
+ "use_case": "example",
+ "use_case_description": "description",
+ }
+ getPrompts(filters)
+ getPromptById("123")
+}
+```
+
+#### Attributes Table
+
+| Attribute | Type | Description |
+| ----------- | ------ | -------------------------------- |
+| id | String | Unique identifier for the prompt |
+| name | String | Name of the prompt |
+| description | String | Description of the prompt |
+| prompt | String | The actual prompt text |
+| use_cases | Array | Use cases for the prompt |
+| tags | String | Tags associated with the prompt |
+
+## Additional Information and Tips
+
+- Handle different error statuses appropriately to provide clear feedback to users.
+- Consider implementing rate limiting and logging for better security and monitoring.
+
+## References and Resources
+
+- [Next.js API Routes](https://nextjs.org/docs/api-routes/introduction)
+- [Supabase Documentation](https://supabase.com/docs)
+- [Node Fetch](https://www.npmjs.com/package/node-fetch)
+- [Requests Library (Python)](https://docs.python-requests.org/en/latest/)
+- [Go net/http Package](https://pkg.go.dev/net/http)
+
+This documentation provides a comprehensive guide to the `getAllPrompts` API endpoint, including usage examples in multiple programming languages and detailed attribute descriptions.
diff --git a/docs/swarms_platform/share_discover.md b/docs/swarms_platform/share_discover.md
new file mode 100644
index 00000000..e69de29b
diff --git a/example.py b/example.py
new file mode 100644
index 00000000..d7d2de1d
--- /dev/null
+++ b/example.py
@@ -0,0 +1,48 @@
+import os
+from swarms import Agent, OpenAIChat
+from swarms.prompts.finance_agent_sys_prompt import (
+ FINANCIAL_AGENT_SYS_PROMPT,
+)
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ # dynamic_temperature_enabled=True,
+ dashboard=False,
+ verbose=True,
+ # interactive=True, # Set to False to disable interactive mode
+ dynamic_temperature_enabled=True,
+ saved_state_path="finance_agent.json",
+ # tools=[#Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # tool_schema=
+)
+
+
+agent.run(
+ "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
+)
diff --git a/images/swarmslogobanner.png b/images/swarmslogobanner.png
new file mode 100644
index 00000000..f88646db
Binary files /dev/null and b/images/swarmslogobanner.png differ
diff --git a/playground/agents/3rd_party_agents/auto_gen.py b/playground/agents/3rd_party_agents/auto_gen.py
new file mode 100644
index 00000000..5cdeaf6b
--- /dev/null
+++ b/playground/agents/3rd_party_agents/auto_gen.py
@@ -0,0 +1,71 @@
+import os
+from typing import Any, Dict, Optional
+
+from autogen import ConversableAgent
+from loguru import logger
+
+from swarms import Agent
+
+
+class AutogenAgentWrapper(Agent):
+ """
+ Wrapper class for the ConversableAgent that provides additional functionality.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ llm_config: Dict[str, Any],
+ *args: Any,
+ **kwargs: Any,
+ ):
+ """
+ Initialize the AutogenAgentWrapper.
+
+ Args:
+ name (str): The name of the agent.
+ llm_config (Dict[str, Any]): The configuration for the ConversableAgent.
+ *args: Additional positional arguments.
+ **kwargs: Additional keyword arguments.
+ """
+ super().__init__(*args, **kwargs)
+ self.name = name
+ self.autogen_agent = ConversableAgent(
+ name=name,
+ llm_config=llm_config,
+ code_execution_config=False,
+ function_map=None,
+ human_input_mode="NEVER",
+ )
+
+ def run(self, task: str, *args: Any, **kwargs: Any) -> Optional[str]:
+ """
+ Run the AutogenAgentWrapper.
+
+ Args:
+ task (str): The task to be performed by the agent.
+ *args: Additional positional arguments.
+ **kwargs: Additional keyword arguments.
+
+ Returns:
+ Optional[str]: The response generated by the agent, or None if an error occurred.
+ """
+ try:
+ messages = [{"content": task, "role": "user"}]
+ response = self.autogen_agent.generate_reply(messages)
+ logger.info("Task: %s, Response: %s", task, response)
+ return response
+ except Exception as e:
+ logger.error("An error occurred: %s", str(e))
+ return None
+
+
+llm_config = {
+ "config_list": [
+ {"model": "gpt-4", "api_key": os.environ.get("OPENAI_API_KEY")}
+ ]
+}
+
+autogen_wrapper = AutogenAgentWrapper("AutogenAssistant", llm_config)
+result = autogen_wrapper.run("Tell me a joke about programming.")
+print(result)
diff --git a/playground/agents/3rd_party_agents/crew_ai.py b/playground/agents/3rd_party_agents/crew_ai.py
new file mode 100644
index 00000000..418ae669
--- /dev/null
+++ b/playground/agents/3rd_party_agents/crew_ai.py
@@ -0,0 +1,92 @@
+from typing import List, Optional
+
+from crewai import Agent as CrewAIAgent
+from crewai import Crew, Process, Task
+from crewai_tools import SerperDevTool
+from loguru import logger
+
+from swarms import Agent
+
+
+class CrewAIAgentWrapper(Agent):
+ """
+ Initialize the CrewAIAgentWrapper.
+
+ Args:
+ name (str): The name of the agent.
+ role (str): The role of the agent.
+ goal (str): The goal of the agent.
+ backstory (str): The backstory of the agent.
+ tools (Optional[List]): The tools used by the agent (default: None).
+ *args: Variable length argument list.
+ **kwargs: Arbitrary keyword arguments.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ role: str,
+ goal: str,
+ backstory: str,
+ tools: Optional[List] = None,
+ *args,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.name = name
+ self.crewai_agent = CrewAIAgent(
+ role=role,
+ goal=goal,
+ backstory=backstory,
+ verbose=True,
+ allow_delegation=False,
+ tools=tools or [],
+ *args,
+ **kwargs,
+ )
+
+ def run(self, task: str, *args, **kwargs):
+ """
+ Run the agent's task.
+
+ Args:
+ task (str): The task to be performed by the agent.
+ *args: Variable length argument list.
+ **kwargs: Arbitrary keyword arguments.
+
+ Returns:
+ Any: The result of the task execution.
+ """
+ try:
+ crew_task = Task(
+ description=task, agent=self.crewai_agent, *args, **kwargs
+ )
+ crew = Crew(
+ agents=[self.crewai_agent],
+ tasks=[crew_task],
+ process=Process.sequential,
+ )
+ result = crew.kickoff()
+ return result
+ except Exception as e:
+ logger.error(f"An error occurred: {e}")
+ return None
+
+
+# Usage example
+search_tool = SerperDevTool()
+
+crewai_wrapper = CrewAIAgentWrapper(
+ name="ResearchAnalyst",
+ role="Senior Research Analyst",
+ goal="Uncover cutting-edge developments in AI and data science",
+ backstory="""You work at a leading tech think tank.
+ Your expertise lies in identifying emerging trends.
+ You have a knack for dissecting complex data and presenting actionable insights.""",
+ tools=[search_tool],
+)
+
+result = crewai_wrapper.run(
+ "Analyze the latest trends in quantum computing and summarize the key findings."
+)
+print(result)
diff --git a/playground/agents/3rd_party_agents/griptape.py b/playground/agents/3rd_party_agents/griptape.py
new file mode 100644
index 00000000..4ba7f17a
--- /dev/null
+++ b/playground/agents/3rd_party_agents/griptape.py
@@ -0,0 +1,69 @@
+from typing import List, Optional
+
+from griptape.structures import Agent as GriptapeAgent
+from griptape.tools import FileManager, TaskMemoryClient, WebScraper
+
+from swarms import Agent
+
+
+class GriptapeAgentWrapper(Agent):
+ """
+ A wrapper class for the GriptapeAgent from the griptape library.
+ """
+
+ def __init__(
+ self, name: str, tools: Optional[List] = None, *args, **kwargs
+ ):
+ """
+ Initialize the GriptapeAgentWrapper.
+
+ Parameters:
+ - name: The name of the agent.
+ - tools: A list of tools to be used by the agent. If not provided, default tools will be used.
+ - *args, **kwargs: Additional arguments to be passed to the parent class constructor.
+ """
+ super().__init__(*args, **kwargs)
+ self.name = name
+ self.tools = tools or [
+ WebScraper(off_prompt=True),
+ TaskMemoryClient(off_prompt=True),
+ FileManager(),
+ ]
+ self.griptape_agent = GriptapeAgent(
+ input=f"I am {name}, an AI assistant. How can I help you?",
+ tools=self.tools,
+ )
+
+ def run(self, task: str, *args, **kwargs) -> str:
+ """
+ Run a task using the GriptapeAgent.
+
+ Parameters:
+ - task: The task to be performed by the agent.
+
+ Returns:
+ - The response from the GriptapeAgent as a string.
+ """
+ response = self.griptape_agent.run(task, *args, **kwargs)
+ return str(response)
+
+ def add_tool(self, tool) -> None:
+ """
+ Add a tool to the agent.
+
+ Parameters:
+ - tool: The tool to be added.
+ """
+ self.tools.append(tool)
+ self.griptape_agent = GriptapeAgent(
+ input=f"I am {self.name}, an AI assistant. How can I help you?",
+ tools=self.tools,
+ )
+
+
+# Usage example
+griptape_wrapper = GriptapeAgentWrapper("GriptapeAssistant")
+result = griptape_wrapper.run(
+ "Load https://example.com, summarize it, and store it in a file called example_summary.txt."
+)
+print(result)
diff --git a/playground/agents/3rd_party_agents/langchain.py b/playground/agents/3rd_party_agents/langchain.py
new file mode 100644
index 00000000..f28c3001
--- /dev/null
+++ b/playground/agents/3rd_party_agents/langchain.py
@@ -0,0 +1,82 @@
+from typing import List, Optional
+
+from langchain.agents import AgentExecutor, LLMSingleActionAgent, Tool
+from langchain.chains import LLMChain
+from langchain.llms import OpenAI
+from langchain.prompts import StringPromptTemplate
+from langchain.tools import DuckDuckGoSearchRun
+
+from swarms import Agent
+
+
+class LangchainAgentWrapper(Agent):
+ """
+ Initialize the LangchainAgentWrapper.
+
+ Args:
+ name (str): The name of the agent.
+ tools (List[Tool]): The list of tools available to the agent.
+ llm (Optional[OpenAI], optional): The OpenAI language model to use. Defaults to None.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ tools: List[Tool],
+ llm: Optional[OpenAI] = None,
+ *args,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.name = name
+ self.tools = tools
+ self.llm = llm or OpenAI(temperature=0)
+
+ prompt = StringPromptTemplate.from_template(
+ "You are {name}, an AI assistant. Answer the following question: {question}"
+ )
+
+ llm_chain = LLMChain(llm=self.llm, prompt=prompt)
+ tool_names = [tool.name for tool in self.tools]
+
+ self.agent = LLMSingleActionAgent(
+ llm_chain=llm_chain,
+ output_parser=None,
+ stop=["\nObservation:"],
+ allowed_tools=tool_names,
+ )
+
+ self.agent_executor = AgentExecutor.from_agent_and_tools(
+ agent=self.agent, tools=self.tools, verbose=True
+ )
+
+ def run(self, task: str, *args, **kwargs):
+ """
+ Run the agent with the given task.
+
+ Args:
+ task (str): The task to be performed by the agent.
+
+ Returns:
+ Any: The result of the agent's execution.
+ """
+ try:
+ return self.agent_executor.run(task)
+ except Exception as e:
+ print(f"An error occurred: {e}")
+
+
+# Usage example
+
+search_tool = DuckDuckGoSearchRun()
+tools = [
+ Tool(
+ name="Search",
+ func=search_tool.run,
+ description="Useful for searching the internet",
+ )
+]
+
+langchain_wrapper = LangchainAgentWrapper("LangchainAssistant", tools)
+result = langchain_wrapper.run("What is the capital of France?")
+print(result)
diff --git a/playground/agents/3rd_party_agents/multion_agent.py b/playground/agents/3rd_party_agents/multion_agent.py
new file mode 100644
index 00000000..2bbe7e92
--- /dev/null
+++ b/playground/agents/3rd_party_agents/multion_agent.py
@@ -0,0 +1,49 @@
+import timeit
+
+from swarms import Agent, ConcurrentWorkflow, Task
+from swarms.agents.multion_agent import MultiOnAgent
+
+# model
+model = MultiOnAgent(multion_api_key="api-key")
+
+
+# out = model.run("search for a recipe")
+agent = Agent(
+ agent_name="MultiOnAgent",
+ description="A multi-on agent that performs browsing tasks.",
+ llm=model,
+ max_loops=1,
+ system_prompt=None,
+)
+
+# logger.info("[Agent][ID][MultiOnAgent][Initialized][Successfully")
+
+# Task
+task = Task(
+ agent=agent,
+ description="Download https://www.coachcamel.com/",
+)
+
+# Swarm
+# logger.info(
+# f"Running concurrent workflow with task: {task.description}"
+# )
+
+# Measure execution time
+start_time = timeit.default_timer()
+
+workflow = ConcurrentWorkflow(
+ max_workers=20,
+ autosave=True,
+ print_results=True,
+ return_results=True,
+)
+
+# Add task to workflow
+workflow.add(task)
+workflow.run()
+
+# Calculate execution time
+execution_time = timeit.default_timer() - start_time
+# logger.info(f"Execution time: {execution_time} seconds")
+print(f"Execution time: {execution_time} seconds")
diff --git a/playground/agents/agents_and_memory/agent_with_longterm_memory.py b/playground/agents/agents_and_memory/agent_with_longterm_memory.py
new file mode 100644
index 00000000..36e32081
--- /dev/null
+++ b/playground/agents/agents_and_memory/agent_with_longterm_memory.py
@@ -0,0 +1,43 @@
+import os
+
+from dotenv import load_dotenv
+
+# Import the OpenAIChat model and the Agent struct
+from swarms import Agent, OpenAIChat
+from swarms_memory import ChromaDB
+
+# Load the environment variables
+load_dotenv()
+
+# Get the API key from the environment
+api_key = os.environ.get("OPENAI_API_KEY")
+
+
+# Initilaize the chromadb client
+chromadb = ChromaDB(
+ metric="cosine",
+ output_dir="scp",
+ docs_folder="artifacts",
+)
+
+# Initialize the language model
+llm = OpenAIChat(
+ temperature=0.5,
+ openai_api_key=api_key,
+ max_tokens=1000,
+)
+
+## Initialize the workflow
+agent = Agent(
+ llm=llm,
+ name="Health and Wellness Blog",
+ system_prompt="Generate a 10,000 word blog on health and wellness.",
+ max_loops=4,
+ autosave=True,
+ dashboard=True,
+ long_term_memory=[chromadb],
+ memory_chunk_size=300,
+)
+
+# Run the workflow on a task
+agent.run("Generate a 10,000 word blog on health and wellness.")
diff --git a/playground/agents/agents_and_memory/finance_agent_with_memory b/playground/agents/agents_and_memory/finance_agent_with_memory
new file mode 100644
index 00000000..4064b303
--- /dev/null
+++ b/playground/agents/agents_and_memory/finance_agent_with_memory
@@ -0,0 +1,57 @@
+import os
+
+from swarms_memory import ChromaDB
+
+from swarms import Agent, Anthropic
+from swarms.prompts.finance_agent_sys_prompt import (
+ FINANCIAL_AGENT_SYS_PROMPT,
+)
+from swarms.utils.data_to_text import data_to_text
+
+# Initilaize the chromadb client
+chromadb = ChromaDB(
+ metric="cosine",
+ output_dir="fiance_agent_rag",
+ # docs_folder="artifacts", # Folder of your documents
+)
+
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ agent_description="Agent creates ",
+ llm=Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")),
+ max_loops="auto",
+ autosave=True,
+ # dynamic_temperature_enabled=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ # interactive=True, # Set to False to disable interactive mode
+ dynamic_temperature_enabled=True,
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+
+contract = data_to_text("your_contract_pdf.pdf")
+
+agent.run(
+ f"Analyze the following contract and give me a full summary: {contract}"
+)
diff --git a/playground/agents/easy_example.py b/playground/agents/easy_example.py
new file mode 100644
index 00000000..bebdb11a
--- /dev/null
+++ b/playground/agents/easy_example.py
@@ -0,0 +1,14 @@
+from swarms import Agent, OpenAIChat
+
+## Initialize the workflow
+agent = Agent(
+ llm=OpenAIChat(),
+ max_loops=1,
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+)
+
+# Run the workflow on a task
+agent("Find a chick fil a equivalent in hayes valley")
diff --git a/playground/agents/llama_3_agent/llama_3_1_agent.py b/playground/agents/llama_3_agent/llama_3_1_agent.py
new file mode 100644
index 00000000..653b9ada
--- /dev/null
+++ b/playground/agents/llama_3_agent/llama_3_1_agent.py
@@ -0,0 +1,49 @@
+from swarms import Agent, HuggingfaceLLM
+from swarms.prompts.finance_agent_sys_prompt import (
+ FINANCIAL_AGENT_SYS_PROMPT,
+)
+
+model = HuggingfaceLLM(
+ model_id="meta-llama/Meta-Llama-3.1-8B",
+ max_tokens=4000,
+ temperature=0.1,
+)
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ # dynamic_temperature_enabled=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ # interactive=True, # Set to False to disable interactive mode
+ dynamic_temperature_enabled=True,
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # tool_schema=
+ # tools
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+
+agent.run(
+ "What are the components of a startups stock incentive equity plan"
+)
diff --git a/playground/agents/monitoring/agent_ops.py b/playground/agents/monitoring/agent_ops.py
new file mode 100644
index 00000000..582f9879
--- /dev/null
+++ b/playground/agents/monitoring/agent_ops.py
@@ -0,0 +1,86 @@
+"""
+* WORKING
+
+What this script does:
+Multi-Agent run to test AgentOps (https://www.agentops.ai/)
+
+Requirements:
+1. Create an account on https://www.agentops.ai/ and run pip install agentops
+2. Add the folowing API key(s) in your .env file:
+ - OPENAI_API_KEY
+ - AGENTOPS_API_KEY
+3. Go to your agentops dashboard to observe your activity
+
+"""
+
+################ Adding project root to PYTHONPATH ################################
+# If you are running playground examples in the project files directly, use this:
+
+import sys
+import os
+
+sys.path.insert(0, os.getcwd())
+
+################ Adding project root to PYTHONPATH ################################
+
+from swarms import Agent, OpenAIChat, AgentRearrange
+
+Treasurer = Agent(
+ agent_name="Treasurer",
+ system_prompt="Give your opinion on the cash management.",
+ agent_description=(
+ "responsible for managing an organization's financial assets and liquidity. They oversee cash management, "
+ "investment strategies, and financial risk. Key duties include monitoring cash flow, managing bank relationships, "
+ "ensuring sufficient funds for operations, and optimizing returns on short-term investments. Treasurers also often "
+ "handle debt management and may be involved in capital raising activities."
+ ),
+ llm=OpenAIChat(),
+ max_loops=1,
+ agent_ops_on=True,
+)
+
+
+CFO = Agent(
+ agent_name="CFO",
+ system_prompt="Give your opinion on the financial performance of the company.",
+ agent_description=(
+ "the top financial executive in an organization, overseeing all financial operations and strategy. Their role is broader than a treasurer's and includes:\n"
+ "Financial planning and analysis\n"
+ "Accounting and financial reporting\n"
+ "Budgeting and forecasting\n"
+ "Strategic financial decision-making\n"
+ "Compliance and risk management\n"
+ "Investor relations (in public companies)\n"
+ "Overseeing the finance and accounting departments"
+ ),
+ llm=OpenAIChat(),
+ max_loops=1,
+ agent_ops_on=True,
+)
+
+swarm = AgentRearrange(
+ agents=[Treasurer, CFO],
+ flow="Treasurer -> CFO",
+)
+
+results = swarm.run(
+ "Date,Revenue,Expenses,Profit,Cash_Flow,Inventory,Customer_Acquisition_Cost,Customer_Retention_Rate,Marketing_Spend,R&D_Spend,Debt,Assets\n"
+ "2023-01-01,1000000,800000,200000,150000,500000,100,0.85,50000,100000,2000000,5000000\n"
+ "2023-02-01,1050000,820000,230000,180000,520000,95,0.87,55000,110000,1950000,5100000\n"
+ "2023-03-01,1100000,850000,250000,200000,530000,90,0.88,60000,120000,1900000,5200000\n"
+ "2023-04-01,1200000,900000,300000,250000,550000,85,0.90,70000,130000,1850000,5400000\n"
+ "2023-05-01,1300000,950000,350000,300000,580000,80,0.92,80000,140000,1800000,5600000\n"
+ "2023-06-01,1400000,1000000,400000,350000,600000,75,0.93,90000,150000,1750000,5800000\n"
+ "2023-07-01,1450000,1050000,400000,320000,620000,78,0.91,95000,160000,1700000,5900000\n"
+ "2023-08-01,1500000,1100000,400000,300000,650000,80,0.90,100000,170000,1650000,6000000\n"
+ "2023-09-01,1550000,1150000,400000,280000,680000,82,0.89,105000,180000,1600000,6100000\n"
+ "2023-10-01,1600000,1200000,400000,260000,700000,85,0.88,110000,190000,1550000,6200000\n"
+ "2023-11-01,1650000,1250000,400000,240000,720000,88,0.87,115000,200000,1500000,6300000\n"
+ "2023-12-01,1700000,1300000,400000,220000,750000,90,0.86,120000,210000,1450000,6400000\n"
+ "2024-01-01,1500000,1200000,300000,180000,780000,95,0.84,100000,180000,1500000,6300000\n"
+ "2024-02-01,1550000,1220000,330000,200000,760000,92,0.85,105000,185000,1480000,6350000\n"
+ "2024-03-01,1600000,1240000,360000,220000,740000,89,0.86,110000,190000,1460000,6400000\n"
+ "2024-04-01,1650000,1260000,390000,240000,720000,86,0.87,115000,195000,1440000,6450000\n"
+ "2024-05-01,1700000,1280000,420000,260000,700000,83,0.88,120000,200000,1420000,6500000\n"
+ "2024-06-01,1750000,1300000,450000,280000,680000,80,0.89,125000,205000,1400000,6550000"
+)
diff --git a/playground/agents/monitoring/agent_ops_tools.py b/playground/agents/monitoring/agent_ops_tools.py
new file mode 100644
index 00000000..d3d7a310
--- /dev/null
+++ b/playground/agents/monitoring/agent_ops_tools.py
@@ -0,0 +1,60 @@
+"""
+* WORKING
+
+What this script does:
+Simple agent run to test AgentOps to record tool actions (https://www.agentops.ai/)
+
+Requirements:
+1. Create an account on https://www.agentops.ai/ and run pip install agentops
+2. Add the folowing API key(s) in your .env file:
+ - OPENAI_API_KEY
+ - AGENTOPS_API_KEY
+3. Go to your agentops dashboard to observe your activity
+
+"""
+
+################ Adding project root to PYTHONPATH ################################
+# If you are running playground examples in the project files directly, use this:
+
+import sys
+import os
+
+sys.path.insert(0, os.getcwd())
+
+################ Adding project root to PYTHONPATH ################################
+
+
+from swarms import Agent, OpenAIChat
+from agentops import record_function
+
+
+# Add agentops decorator on your tools
+@record_function("length_checker")
+def length_checker(string: str) -> int:
+ """
+ For a given string it returns the length of the string.
+
+ Args:
+ string (str): string to check the length of
+
+ Returns:
+ int: length of the string
+ """
+ return len(string)
+
+
+agent1 = Agent(
+ agent_name="lengther",
+ system_prompt="return the length of the string",
+ agent_description=(
+ "For a given string it calls the function length_checker to return the length of the string."
+ ),
+ llm=OpenAIChat(),
+ max_loops=1,
+ agent_ops_on=True,
+ tools=[length_checker],
+ execute_tool=True,
+)
+
+
+agent1.run("hello")
diff --git a/playground/agents/swarm_protocol.py b/playground/agents/swarm_protocol.py
new file mode 100644
index 00000000..a29b2c5f
--- /dev/null
+++ b/playground/agents/swarm_protocol.py
@@ -0,0 +1,54 @@
+from dataclasses import dataclass
+from typing import List
+
+from swarms import JSON, BaseLLM, BaseVectorDatabase, Agent
+
+
+@dataclass
+class YourAgent(Agent):
+ """
+ Represents an agent in the swarm protocol.
+
+ Attributes:
+ llm (BaseLLM): The low-level module for the agent.
+ long_term_memory (BaseVectorDatabase): The long-term memory for the agent.
+ tool_schema (List[JSON]): The schema for the tools used by the agent.
+ """
+
+ llm: BaseLLM
+ long_term_memory: BaseVectorDatabase
+ tool_schema: JSON
+ tool_schemas: List[JSON]
+
+ def step(self, task: str, *args, **kwargs):
+ """
+ Performs a single step in the agent's task.
+
+ Args:
+ task (str): The task to be performed.
+ *args: Additional positional arguments.
+ **kwargs: Additional keyword arguments.
+ """
+ ...
+
+ def run(self, task: str, *args, **kwargs):
+ """
+ Runs the agent's task.
+
+ Args:
+ task (str): The task to be performed.
+ *args: Additional positional arguments.
+ **kwargs: Additional keyword arguments.
+ """
+ ...
+
+ def plan(self, task: str, *args, **kwargs):
+ """
+ Plans the agent's task.
+
+ Args:
+ task (str): The task to be performed.
+ *args: Additional positional arguments.
+ **kwargs: Additional keyword arguments.
+ """
+ ...
diff --git a/playground/agents/tools/agent_with_basemodel_output_type.py b/playground/agents/tools/agent_with_basemodel_output_type.py
new file mode 100644
index 00000000..409ceef0
--- /dev/null
+++ b/playground/agents/tools/agent_with_basemodel_output_type.py
@@ -0,0 +1,58 @@
+from pydantic import BaseModel, Field
+from swarms import OpenAIChat
+from swarms import Agent
+import os
+
+
+# Initialize the schema for the person's information
+class Schema(BaseModel):
+ name: str = Field(..., title="Name of the person")
+ agent: int = Field(..., title="Age of the person")
+ is_student: bool = Field(..., title="Whether the person is a student")
+ courses: list[str] = Field(
+ ..., title="List of courses the person is taking"
+ )
+
+
+# Convert the schema to a JSON string
+tool_schema = Schema(
+ name="Tool Name",
+ agent=1,
+ is_student=True,
+ courses=["Course1", "Course2"],
+)
+
+# Define the task to generate a person's information
+task = "Generate a person's information based on the following schema:"
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Person Information Generator",
+ system_prompt=(
+ "Generate a person's information based on the following schema:"
+ ),
+ # Set the tool schema to the JSON string -- this is the key difference
+ # tool_schema=tool_schema,
+ llm=OpenAIChat(
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
+ ),
+ max_loops=3,
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ interactive=True,
+ # Set the output type to the tool schema which is a BaseModel
+ # output_type=tool_schema, # or dict, or str
+ metadata_output_type="json",
+ # List of schemas that the agent can handle
+ list_base_models=[tool_schema],
+ function_calling_format_type="OpenAI",
+ function_calling_type="json", # or soon yaml
+)
+
+# Run the agent to generate the person's information
+generated_data = agent.run(task)
+
+# Print the generated data
+print(f"Generated data: {generated_data}")
diff --git a/playground/agents/tools/agent_with_many_tools.py b/playground/agents/tools/agent_with_many_tools.py
new file mode 100644
index 00000000..e69de29b
diff --git a/playground/agents/tools/devin.py b/playground/agents/tools/devin.py
new file mode 100644
index 00000000..cd264337
--- /dev/null
+++ b/playground/agents/tools/devin.py
@@ -0,0 +1,105 @@
+from swarms import Agent, Anthropic, tool
+import subprocess
+
+# Model
+llm = Anthropic(
+ temperature=0.1,
+)
+
+
+# Tools
+@tool
+def terminal(
+ code: str,
+):
+ """
+ Run code in the terminal.
+
+ Args:
+ code (str): The code to run in the terminal.
+
+ Returns:
+ str: The output of the code.
+ """
+ out = subprocess.run(
+ code, shell=True, capture_output=True, text=True
+ ).stdout
+ return str(out)
+
+
+@tool
+def browser(query: str):
+ """
+ Search the query in the browser with the `browser` tool.
+
+ Args:
+ query (str): The query to search in the browser.
+
+ Returns:
+ str: The search results.
+ """
+ import webbrowser
+
+ url = f"https://www.google.com/search?q={query}"
+ webbrowser.open(url)
+ return f"Searching for {query} in the browser."
+
+
+@tool
+def create_file(file_path: str, content: str):
+ """
+ Create a file using the file editor tool.
+
+ Args:
+ file_path (str): The path to the file.
+ content (str): The content to write to the file.
+
+ Returns:
+ str: The result of the file creation operation.
+ """
+ with open(file_path, "w") as file:
+ file.write(content)
+ return f"File {file_path} created successfully."
+
+
+@tool
+def file_editor(file_path: str, mode: str, content: str):
+ """
+ Edit a file using the file editor tool.
+
+ Args:
+ file_path (str): The path to the file.
+ mode (str): The mode to open the file in.
+ content (str): The content to write to the file.
+
+ Returns:
+ str: The result of the file editing operation.
+ """
+ with open(file_path, mode) as file:
+ file.write(content)
+ return f"File {file_path} edited successfully."
+
+
+# Agent
+agent = Agent(
+ agent_name="Devin",
+ system_prompt=(
+ "Autonomous agent that can interact with humans and other"
+ " agents. Be Helpful and Kind. Use the tools provided to"
+ " assist the user. Return all code in markdown format."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[terminal, browser, file_editor, create_file],
+ code_interpreter=True,
+)
+
+# Run the agent
+out = agent("Create a new file for a plan to take over the world.")
+print(out)
diff --git a/playground/agents/tools/devin_agent.py b/playground/agents/tools/devin_agent.py
new file mode 100644
index 00000000..b10d1c14
--- /dev/null
+++ b/playground/agents/tools/devin_agent.py
@@ -0,0 +1,105 @@
+from swarms import Agent, OpenAIChat # ChromaDB
+import subprocess
+
+# Model
+llm = OpenAIChat(
+ temperature=0.1,
+)
+
+
+# Tools
+def terminal(
+ code: str,
+):
+ """
+ Run code in the terminal.
+
+ Args:
+ code (str): The code to run in the terminal.
+
+ Returns:
+ str: The output of the code.
+ """
+ out = subprocess.run(
+ code, shell=True, capture_output=True, text=True
+ ).stdout
+ return str(out)
+
+
+def browser(query: str):
+ """
+ Search the query in the browser with the `browser` tool.
+
+ Args:
+ query (str): The query to search in the browser.
+
+ Returns:
+ str: The search results.
+ """
+ import webbrowser
+
+ url = f"https://www.google.com/search?q={query}"
+ webbrowser.open(url)
+ return f"Searching for {query} in the browser."
+
+
+def create_file(file_path: str, content: str):
+ """
+ Create a file using the file editor tool.
+
+ Args:
+ file_path (str): The path to the file.
+ content (str): The content to write to the file.
+
+ Returns:
+ str: The result of the file creation operation.
+ """
+ with open(file_path, "w") as file:
+ file.write(content)
+ return f"File {file_path} created successfully."
+
+
+def file_editor(file_path: str, mode: str, content: str):
+ """
+ Edit a file using the file editor tool.
+
+ Args:
+ file_path (str): The path to the file.
+ mode (str): The mode to open the file in.
+ content (str): The content to write to the file.
+
+ Returns:
+ str: The result of the file editing operation.
+ """
+ with open(file_path, mode) as file:
+ file.write(content)
+ return f"File {file_path} edited successfully."
+
+
+# Agent
+agent = Agent(
+ agent_name="Devin",
+ system_prompt=(
+ "Autonomous agent that can interact with humans and other"
+ " agents. Be Helpful and Kind. Use the tools provided to"
+ " assist the user. Return all code in markdown format."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[terminal, browser, file_editor, create_file],
+ # long_term_memory=chromadb,
+ metadata_output_type="json",
+ # List of schemas that the agent can handle
+ # list_base_models=[tool_schema],
+ function_calling_format_type="OpenAI",
+ function_calling_type="json", # or soon yaml
+)
+
+# Run the agent
+agent.run("Create a new file for a plan to take over the world.")
diff --git a/playground/agents/tools/full_stack_agent.py b/playground/agents/tools/full_stack_agent.py
new file mode 100644
index 00000000..0db12ad3
--- /dev/null
+++ b/playground/agents/tools/full_stack_agent.py
@@ -0,0 +1,33 @@
+from swarms import Agent, Anthropic, tool
+
+
+# Tool
+@tool # Wrap the function with the tool decorator
+def search_api(query: str, max_results: int = 10):
+ """
+ Search the web for the query and return the top `max_results` results.
+ """
+ return f"Search API: {query} -> {max_results} results"
+
+
+## Initialize the workflow
+agent = Agent(
+ agent_name="Youtube Transcript Generator",
+ agent_description=(
+ "Generate a transcript for a youtube video on what swarms" " are!"
+ ),
+ llm=Anthropic(),
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ tools=[search_api],
+)
+
+# Run the workflow on a task
+agent(
+ "Generate a transcript for a youtube video on what swarms are!"
+ " Output a token when done."
+)
diff --git a/playground/agents/tools/func_calling_schema.py b/playground/agents/tools/func_calling_schema.py
new file mode 100644
index 00000000..da0ccc13
--- /dev/null
+++ b/playground/agents/tools/func_calling_schema.py
@@ -0,0 +1,13 @@
+import json
+from swarms.tools.py_func_to_openai_func_str import (
+ get_openai_function_schema_from_func,
+)
+from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api
+
+out = get_openai_function_schema_from_func(
+ fetch_web_articles_bing_api,
+ name="fetch_web_articles_bing_api",
+ description="Fetches four articles from Bing Web Search API based on the given query.",
+)
+out = json.dumps(out, indent=2)
+print(out)
diff --git a/playground/agents/tools/function_calling/agent_spec_func_calling.py b/playground/agents/tools/function_calling/agent_spec_func_calling.py
new file mode 100644
index 00000000..1fd18078
--- /dev/null
+++ b/playground/agents/tools/function_calling/agent_spec_func_calling.py
@@ -0,0 +1,73 @@
+import json
+import os
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+from typing import List
+
+
+class AgentSpec(BaseModel):
+ agent_name: str = Field(
+ ...,
+ description="The name of the agent",
+ )
+ system_prompt: str = Field(
+ ...,
+ description="The system prompt for the agent",
+ )
+ agent_description: str = Field(
+ ...,
+ description="The description of the agent",
+ )
+ max_tokens: int = Field(
+ ...,
+ description="The maximum number of tokens to generate in the API response",
+ )
+ temperature: float = Field(
+ ...,
+ description="A parameter that controls the randomness of the generated text",
+ )
+ context_window: int = Field(
+ ...,
+ description="The context window for the agent",
+ )
+ model_name: str = Field(
+ ...,
+ description="The model name for the agent from huggingface",
+ )
+
+
+class SwarmSpec(BaseModel):
+ multiple_agents: List[AgentSpec] = Field(
+ ...,
+ description="The list of agents in the swarm",
+ )
+
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're an agent creator, you're purpose is to create an agent with the user provided specifications. Think of relevant names, descriptions, and context windows for the agent. You need to provide the name of the agent, the system prompt for the agent, the description of the agent, the maximum number of tokens to generate in the API response, the temperature for the agent, the context window for the agent, and the model name for the agent from huggingface.",
+ max_tokens=3000,
+ temperature=0.8,
+ base_model=SwarmSpec,
+ parallel_tool_calls=False,
+)
+
+
+# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+out = model.run(
+ "Create a swarm of agents to generate social media posts. Each agent should have it's own social media"
+)
+
+
+# Define the folder and file name
+folder_name = "agent_workspace"
+file_name = "agent_output.json"
+
+# Check if the folder exists, if not, create it
+if not os.path.exists(folder_name):
+ os.makedirs(folder_name)
+
+# Write the output to a JSON file
+with open(os.path.join(folder_name, file_name), "w") as f:
+ json.dump(out, f)
diff --git a/playground/agents/tools/function_calling/claude_artifacts_example.py b/playground/agents/tools/function_calling/claude_artifacts_example.py
new file mode 100644
index 00000000..12a809ce
--- /dev/null
+++ b/playground/agents/tools/function_calling/claude_artifacts_example.py
@@ -0,0 +1,42 @@
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+
+
+# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
+class ClaudeArtifact(BaseModel):
+ name: str = Field(
+ ...,
+ description="The name of the artifact",
+ )
+ plan: str = Field(
+ ...,
+ description="Plan for the artifact, Do I generate a new python file or do I modify an existing one?",
+ )
+ file_name_path: str = Field(
+ ...,
+ description="The path to the file to modify or create for example: 'game.py'",
+ )
+ content_of_file: str = Field(
+ ...,
+ description="The content of the file to modify or create ",
+ )
+ edit_count: int = Field(
+ ...,
+ description="The number of times to edit the file",
+ )
+
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're an artifact creator, you're purpose is to create an artifact with the user provided specifications. Think of relevant names, descriptions, and context windows for the artifact. You need to provide the name of the artifact, the system prompt for the artifact, the description of the artifact, the maximum number of tokens to generate in the API response, the temperature for the artifact, the context window for the artifact, and the model name for the artifact from huggingface.",
+ max_tokens=3500,
+ temperature=0.9,
+ base_model=ClaudeArtifact,
+ parallel_tool_calls=False,
+)
+
+out = model.run(
+ "Create a game in python that has never been created before. Create a new form of gaming experience that has never been contemplated before."
+)
+print(out)
diff --git a/playground/agents/tools/function_calling/openai_function_caller_agent_rearrange.py b/playground/agents/tools/function_calling/openai_function_caller_agent_rearrange.py
new file mode 100644
index 00000000..165d831e
--- /dev/null
+++ b/playground/agents/tools/function_calling/openai_function_caller_agent_rearrange.py
@@ -0,0 +1,52 @@
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel
+
+
+# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
+# It is used here to define the data structure for making API calls to retrieve weather information.
+class ModelCode(BaseModel):
+ file_name: str
+ model_code_in_pytorch: str
+
+
+class TrainingCodeModel(BaseModel):
+ file_name: str
+ training_code: str
+ dataset_name: str
+
+
+# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
+# for making API calls to retrieve weather information. It has two attributes: city and date.
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're a model engineer, you're purpose is to generate code in pytorch for a give model name and code",
+ max_tokens=4000,
+ temperature=0.5,
+ base_model=ModelCode,
+)
+
+trainer = OpenAIFunctionCaller(
+ system_prompt="You're a model engineer, you're purpose is to generate the code for a given model architecture in pytorch to train using available datasets on huggingface",
+ max_tokens=4000,
+ temperature=0.5,
+ base_model=TrainingCodeModel,
+)
+
+# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+# Here, we initialize an instance of the OpenAIFunctionCaller class with the following parameters:
+# - system_prompt: A prompt that sets the context for the conversation with the API.
+# - max_tokens: The maximum number of tokens to generate in the API response.
+# - temperature: A parameter that controls the randomness of the generated text.
+# - base_model: The base model to use for the API calls, in this case, the WeatherAPI class.
+out = model.run(
+ "Generate a pytorch code for a sentiment analysis model using pytorch"
+)
+print(str(out))
+
+# Trainer
+out = trainer.run(
+ f"Generate the training code for the sentiment analysis model using pytorch: {trainer}"
+)
+print(out)
diff --git a/playground/agents/tools/function_calling/openai_function_caller_example.py b/playground/agents/tools/function_calling/openai_function_caller_example.py
new file mode 100644
index 00000000..c0a8f0a7
--- /dev/null
+++ b/playground/agents/tools/function_calling/openai_function_caller_example.py
@@ -0,0 +1,39 @@
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel
+
+
+# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
+# It is used here to define the data structure for making API calls to retrieve weather information.
+class WeatherAPI(BaseModel):
+ city: str
+ date: str
+
+
+# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
+# for making API calls to retrieve weather information. It has two attributes: city and date.
+
+
+# Example usage:
+# Initialize the function caller
+function_caller = OpenAIFunctionCaller(
+ system_prompt="You are a helpful assistant.",
+ max_tokens=500,
+ temperature=0.5,
+ base_model=WeatherAPI,
+)
+
+# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+# Here, we initialize an instance of the OpenAIFunctionCaller class with the following parameters:
+# - system_prompt: A prompt that sets the context for the conversation with the API.
+# - max_tokens: The maximum number of tokens to generate in the API response.
+# - temperature: A parameter that controls the randomness of the generated text.
+# - base_model: The base model to use for the API calls, in this case, the WeatherAPI class.
+
+# Run the function caller
+response = function_caller.run(
+ "Get the weather forecast for New York City on July 4th, 2022."
+)
+
+# The run() method of the OpenAIFunctionCaller class is used to make a function call to the API.
+# It takes a string parameter that represents the user's request or query.
+print(response)
diff --git a/playground/agents/tools/function_calling/prompt_generator_agent.py b/playground/agents/tools/function_calling/prompt_generator_agent.py
new file mode 100644
index 00000000..cc5c2e0e
--- /dev/null
+++ b/playground/agents/tools/function_calling/prompt_generator_agent.py
@@ -0,0 +1,55 @@
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+from typing import Sequence
+
+
+class PromptUseCase(BaseModel):
+ use_case_name: str = Field(
+ ...,
+ description="The name of the use case",
+ )
+ use_case_description: str = Field(
+ ...,
+ description="The description of the use case",
+ )
+
+
+class PromptSpec(BaseModel):
+ prompt_name: str = Field(
+ ...,
+ description="The name of the prompt",
+ )
+ prompt_description: str = Field(
+ ...,
+ description="The description of the prompt",
+ )
+ prompt: str = Field(
+ ...,
+ description="The prompt for the agent",
+ )
+ tags: str = Field(
+ ...,
+ description="The tags for the prompt such as sentiment, code, etc seperated by commas.",
+ )
+ use_cases: Sequence[PromptUseCase] = Field(
+ ...,
+ description="The use cases for the prompt",
+ )
+
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're an prompt creator, you're purpose is to create system prompts for new LLM Agents for the user. Follow the best practices for creating a prompt such as making it direct and clear. Providing instructions and many-shot examples will help the agent understand the task better.",
+ max_tokens=1000,
+ temperature=0.5,
+ base_model=PromptSpec,
+ parallel_tool_calls=False,
+)
+
+
+# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+out = model.run(
+ "Create a prompt for an agent that is really good for email greeting, make sure the agent doesn't sound like an robot or an AI. Provide many-shot examples and instructions for the agent to follow."
+)
+print(out)
diff --git a/playground/agents/tools/function_calling/rag_with_codebase.py b/playground/agents/tools/function_calling/rag_with_codebase.py
new file mode 100644
index 00000000..bb6596d7
--- /dev/null
+++ b/playground/agents/tools/function_calling/rag_with_codebase.py
@@ -0,0 +1,69 @@
+import os
+
+from swarms_memory import ChromaDB
+
+from swarms import Agent, OpenAIChat, AgentRearrange
+
+# Initilaize the chromadb client
+chromadb = ChromaDB(
+ metric="cosine",
+ output_dir="swarms_framework_onboardig_agent",
+ docs_folder="docs", # Folder of your documents
+ n_results=1,
+ limit_tokens=1000,
+)
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ api_key=api_key,
+ model_name="gpt-4o-mini",
+ temperature=0.1,
+)
+
+
+# Initialize the concept understanding agent
+concept_agent = Agent(
+ agent_name="Concept-Understanding-Agent",
+ system_prompt="You're purpose is to understand the swarms framework conceptually and architecturally, you'll work with the code generation agent to generate code snippets",
+ agent_description="Agent for understanding concepts",
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ verbose=True,
+ saved_state_path="concept_agent.json",
+ interactive=True,
+ context_length=160000,
+ memory_chunk_size=2000,
+)
+
+# Initialize the code generation agent
+code_agent = Agent(
+ agent_name="Code-Generation-Agent",
+ system_prompt="You're purpose is to generate code snippets for the swarms framework, you'll work with the concept understanding agent to understand concepts.",
+ agent_description="Agent for generating code",
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ verbose=True,
+ saved_state_path="code_agent.json",
+ interactive=True,
+ context_length=160000,
+ memory_chunk_size=2000,
+)
+
+
+# Swarm
+swarm = AgentRearrange(
+ agents=[concept_agent, code_agent],
+ flow=f"{concept_agent.agent_name} -> {code_agent.agent_name}",
+ max_loops=1,
+ memory_system=chromadb,
+)
+
+# Run
+swarm.run(
+ "Let's understand the agentrearrange class in the swarms framework"
+)
diff --git a/playground/agents/tools/function_calling/react_agent.py b/playground/agents/tools/function_calling/react_agent.py
new file mode 100644
index 00000000..62212983
--- /dev/null
+++ b/playground/agents/tools/function_calling/react_agent.py
@@ -0,0 +1,75 @@
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+from typing import List
+
+
+class Observation(BaseModel):
+ observation: str = Field(
+ ...,
+ description="What are you seeing in the image?",
+ )
+ summary_of_observation: str = Field(
+ ...,
+ description="The summary of the observation/ img",
+ )
+
+
+class Sequence(BaseModel):
+ goal: str = Field(
+ ...,
+ description="The goal of the mission",
+ )
+ observation: List[Observation] = Field(
+ ...,
+ description="The observations of the agent",
+ )
+ action: str = Field(
+ ...,
+ description="Take an action that leads to the completion of the task.",
+ )
+
+
+class GoalDecomposer(BaseModel):
+ goal: str = Field(
+ ...,
+ description="The goal of the task",
+ )
+ sub_goals: List[str] = Field(
+ ...,
+ description="The sub goals of the mission",
+ )
+
+
+# Given the task t, observation o, the sub-goals
+# sequence g1, g2, g3, ..., gn can be formulated as:
+
+
+class KGP(BaseModel):
+ task: str = Field(
+ ...,
+ description="The task to be accomplished",
+ )
+ observation: str = Field(
+ ...,
+ description="The observation of the task",
+ )
+ sequence: List[GoalDecomposer] = Field(
+ ...,
+ description="The sequence of goals to accomplish the task",
+ )
+
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're an autonomous agent, you're purpose to accomplish a task through understanding your goal, observing the environment, and taking actions that lead to the completion of the task.",
+ max_tokens=500,
+ temperature=0.5,
+ base_model=KGP,
+ parallel_tool_calls=False,
+)
+
+
+# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+out = model.run("We need to craft a diamond pickaxe to mine the obsidian.")
+print(out)
diff --git a/playground/agents/tools/function_calling/sentiment_analysis_function_calling.py b/playground/agents/tools/function_calling/sentiment_analysis_function_calling.py
new file mode 100644
index 00000000..fcc8a311
--- /dev/null
+++ b/playground/agents/tools/function_calling/sentiment_analysis_function_calling.py
@@ -0,0 +1,39 @@
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+
+
+# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
+# It is used here to define the data structure for making API calls to retrieve weather information.
+class SentimentAnalysisCard(BaseModel):
+ text: str = Field(
+ ...,
+ description="The text to be analyzed for sentiment rating",
+ )
+ rating: str = Field(
+ ...,
+ description="The sentiment rating of the text from 0.0 to 1.0",
+ )
+
+
+# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
+# for making API calls to retrieve weather information. It has two attributes: city and date.
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're a sentiment Analysis Agent, you're purpose is to rate the sentiment of text",
+ max_tokens=100,
+ temperature=0.5,
+ base_model=SentimentAnalysisCard,
+ parallel_tool_calls=False,
+)
+
+
+# The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+# Here, we initialize an instance of the OpenAIFunctionCaller class with the following parameters:
+# - system_prompt: A prompt that sets the context for the conversation with the API.
+# - max_tokens: The maximum number of tokens to generate in the API response.
+# - temperature: A parameter that controls the randomness of the generated text.
+# - base_model: The base model to use for the API calls, in this case, the WeatherAPI class.
+out = model.run("This agent created the code incorrectly it sucked.")
+print(out)
diff --git a/playground/agents/tools/function_to_openai_exec.py b/playground/agents/tools/function_to_openai_exec.py
new file mode 100644
index 00000000..039946bd
--- /dev/null
+++ b/playground/agents/tools/function_to_openai_exec.py
@@ -0,0 +1,39 @@
+from typing import Annotated
+from swarms import create_openai_tool
+from openai import OpenAI
+
+# Create an instance of the OpenAI client
+client = OpenAI()
+
+# Define the user messages for the chat conversation
+messages = [
+ {
+ "role": "user",
+ "content": "What's the weather like in San Francisco, Tokyo, and Paris?",
+ }
+]
+
+
+# Define the BMI calculator tool using the create_openai_tool decorator
+@create_openai_tool(
+ name="BMI Calculator",
+ description="Calculate the Body Mass Index (BMI)",
+)
+def calculate_bmi(
+ weight: Annotated[float, "Weight in kilograms"],
+ height: Annotated[float, "Height in meters"],
+) -> Annotated[float, "Body Mass Index"]:
+ """Calculate the Body Mass Index (BMI) given a person's weight and height."""
+ return weight / (height**2)
+
+
+# Create a chat completion request using the OpenAI client
+response = client.chat.completions.create(
+ model="gpt-3.5-turbo-0125",
+ messages=messages,
+ tools=calculate_bmi,
+ tool_choice="auto", # auto is default, but we'll be explicit
+)
+
+# Print the generated response from the chat completion
+print(response.choices[0].message["content"])
diff --git a/playground/agents/tools/new_tool_wrapper.py b/playground/agents/tools/new_tool_wrapper.py
new file mode 100644
index 00000000..22ce2d6b
--- /dev/null
+++ b/playground/agents/tools/new_tool_wrapper.py
@@ -0,0 +1,19 @@
+from swarms import tool
+
+
+# Create the wrapper to wrap the function
+@tool(
+ name="Geo Coordinates Locator",
+ description=("Locates geo coordinates with a city and or zip code"),
+ return_string=False,
+ return_dict=False,
+)
+def send_api_request_to_get_geo_coordinates(
+ city: str = None, zip: int = None
+):
+ return "Test"
+
+
+# Run the function to get the schema
+out = send_api_request_to_get_geo_coordinates()
+print(out)
diff --git a/playground/agents/tools/tool.py b/playground/agents/tools/tool.py
new file mode 100644
index 00000000..0f0b4a80
--- /dev/null
+++ b/playground/agents/tools/tool.py
@@ -0,0 +1,57 @@
+from swarms import Agent, Anthropic, tool
+
+# Model
+llm = Anthropic(
+ temperature=0.1,
+)
+
+"""
+How to create tools:
+
+1. Define a function that takes the required arguments with documentation and type hints.
+2. Add the `@tool` decorator to the function.
+3. Add the function to the `tools` list in the `Agent` class.
+"""
+
+
+# Tools
+# Browser tools
+@tool
+def browser(query: str):
+ """
+ Opens a web browser and searches for the given query on Google.
+
+ Args:
+ query (str): The search query.
+
+ Returns:
+ str: A message indicating that the search is being performed.
+ """
+ import webbrowser
+
+ url = f"https://www.google.com/search?q={query}"
+ webbrowser.open(url)
+ return f"Searching for {query} in the browser."
+
+
+# Agent
+agent = Agent(
+ agent_name="Devin",
+ system_prompt=(
+ "Autonomous agent that can interact with humans and other"
+ " agents. Be Helpful and Kind. Use the tools provided to"
+ " assist the user. Return all code in markdown format."
+ ),
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[browser],
+)
+
+# Run the agent
+out = agent.run("what's the weather in Miami?")
+print(out)
diff --git a/playground/agents/tools/tool_agent.py b/playground/agents/tools/tool_agent.py
new file mode 100644
index 00000000..02783ff3
--- /dev/null
+++ b/playground/agents/tools/tool_agent.py
@@ -0,0 +1,39 @@
+from transformers import AutoModelForCausalLM, AutoTokenizer
+from swarms import ToolAgent
+
+# Load the pre-trained model and tokenizer
+model = AutoModelForCausalLM.from_pretrained(
+ "databricks/dolly-v2-12b",
+ load_in_4bit=True,
+ device_map="auto",
+)
+tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
+
+# Define a JSON schema for person's information
+json_schema = {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "age": {"type": "number"},
+ "is_student": {"type": "boolean"},
+ "courses": {"type": "array", "items": {"type": "string"}},
+ },
+}
+
+# Define the task to generate a person's information
+task = "Generate a person's information based on the following schema:"
+
+# Create an instance of the ToolAgent class
+agent = ToolAgent(
+ name="dolly-function-agent",
+ description="Ana gent to create a child data",
+ model=model,
+ tokenizer=tokenizer,
+ json_schema=json_schema,
+)
+
+# Run the agent to generate the person's information
+generated_data = agent.run(task)
+
+# Print the generated data
+print(f"Generated data: {generated_data}")
diff --git a/playground/agents/tools/tool_agent/command_r_tool_agent.py b/playground/agents/tools/tool_agent/command_r_tool_agent.py
new file mode 100644
index 00000000..e6fe075a
--- /dev/null
+++ b/playground/agents/tools/tool_agent/command_r_tool_agent.py
@@ -0,0 +1,61 @@
+from pydantic import BaseModel, Field
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+from swarms import ToolAgent
+from swarms.tools.json_utils import base_model_to_json
+
+# Model name
+model_name = "CohereForAI/c4ai-command-r-v01-4bit"
+
+# Load the pre-trained model and tokenizer
+model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ device_map="auto",
+)
+
+# Load the pre-trained model and tokenizer
+tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+
+# Initialize the schema for the person's information
+class APIExampleRequestSchema(BaseModel):
+ endpoint: str = Field(
+ ..., description="The API endpoint for the example request"
+ )
+ method: str = Field(
+ ..., description="The HTTP method for the example request"
+ )
+ headers: dict = Field(
+ ..., description="The headers for the example request"
+ )
+ body: dict = Field(..., description="The body of the example request")
+ response: dict = Field(
+ ...,
+ description="The expected response of the example request",
+ )
+
+
+# Convert the schema to a JSON string
+api_example_schema = base_model_to_json(APIExampleRequestSchema)
+# Convert the schema to a JSON string
+
+# Define the task to generate a person's information
+task = "Generate an example API request using this code:\n"
+
+# Create an instance of the ToolAgent class
+agent = ToolAgent(
+ name="Command R Tool Agent",
+ description=(
+ "An agent that generates an API request using the Command R"
+ " model."
+ ),
+ model=model,
+ tokenizer=tokenizer,
+ json_schema=api_example_schema,
+)
+
+# Run the agent to generate the person's information
+generated_data = agent.run(task)
+
+# Print the generated data
+print(f"Generated data: {generated_data}")
diff --git a/playground/agents/tools/tool_agent/example_toolagent.py b/playground/agents/tools/tool_agent/example_toolagent.py
new file mode 100644
index 00000000..c6adf00f
--- /dev/null
+++ b/playground/agents/tools/tool_agent/example_toolagent.py
@@ -0,0 +1,32 @@
+# Import necessary libraries
+from transformers import AutoModelForCausalLM, AutoTokenizer
+from swarms import ToolAgent
+
+# Load the pre-trained model and tokenizer
+model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b")
+tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
+
+# Define a JSON schema for person's information
+json_schema = {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string"},
+ "age": {"type": "number"},
+ "is_student": {"type": "boolean"},
+ "courses": {"type": "array", "items": {"type": "string"}},
+ },
+}
+
+# Define the task to generate a person's information
+task = "Generate a person's information based on the following schema:"
+
+# Create an instance of the ToolAgent class
+agent = ToolAgent(
+ model=model, tokenizer=tokenizer, json_schema=json_schema
+)
+
+# Run the agent to generate the person's information
+generated_data = agent.run(task)
+
+# Print the generated data
+print(generated_data)
diff --git a/playground/agents/tools/tool_agent/jamba_tool_agent.py b/playground/agents/tools/tool_agent/jamba_tool_agent.py
new file mode 100644
index 00000000..032272a3
--- /dev/null
+++ b/playground/agents/tools/tool_agent/jamba_tool_agent.py
@@ -0,0 +1,61 @@
+from pydantic import BaseModel, Field
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+from swarms import ToolAgent
+from swarms.tools.json_utils import base_model_to_json
+
+# Model name
+model_name = "ai21labs/Jamba-v0.1"
+
+# Load the pre-trained model and tokenizer
+model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ device_map="auto",
+)
+
+# Load the pre-trained model and tokenizer
+tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+
+# Initialize the schema for the person's information
+class APIExampleRequestSchema(BaseModel):
+ endpoint: str = Field(
+ ..., description="The API endpoint for the example request"
+ )
+ method: str = Field(
+ ..., description="The HTTP method for the example request"
+ )
+ headers: dict = Field(
+ ..., description="The headers for the example request"
+ )
+ body: dict = Field(..., description="The body of the example request")
+ response: dict = Field(
+ ...,
+ description="The expected response of the example request",
+ )
+
+
+# Convert the schema to a JSON string
+api_example_schema = base_model_to_json(APIExampleRequestSchema)
+# Convert the schema to a JSON string
+
+# Define the task to generate a person's information
+task = "Generate an example API request using this code:\n"
+
+# Create an instance of the ToolAgent class
+agent = ToolAgent(
+ name="Command R Tool Agent",
+ description=(
+ "An agent that generates an API request using the Command R"
+ " model."
+ ),
+ model=model,
+ tokenizer=tokenizer,
+ json_schema=api_example_schema,
+)
+
+# Run the agent to generate the person's information
+generated_data = agent(task)
+
+# Print the generated data
+print(f"Generated data: {generated_data}")
diff --git a/playground/agents/tools/tool_agent/tool_agent_pydantic.py b/playground/agents/tools/tool_agent/tool_agent_pydantic.py
new file mode 100644
index 00000000..cd564480
--- /dev/null
+++ b/playground/agents/tools/tool_agent/tool_agent_pydantic.py
@@ -0,0 +1,45 @@
+from pydantic import BaseModel, Field
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+from swarms import ToolAgent
+from swarms.tools.json_utils import base_model_to_json
+
+# Load the pre-trained model and tokenizer
+model = AutoModelForCausalLM.from_pretrained(
+ "databricks/dolly-v2-12b",
+ load_in_4bit=True,
+ device_map="auto",
+)
+tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
+
+
+# Initialize the schema for the person's information
+class Schema(BaseModel):
+ name: str = Field(..., title="Name of the person")
+ agent: int = Field(..., title="Age of the person")
+ is_student: bool = Field(..., title="Whether the person is a student")
+ courses: list[str] = Field(
+ ..., title="List of courses the person is taking"
+ )
+
+
+# Convert the schema to a JSON string
+tool_schema = base_model_to_json(Schema)
+
+# Define the task to generate a person's information
+task = "Generate a person's information based on the following schema:"
+
+# Create an instance of the ToolAgent class
+agent = ToolAgent(
+ name="dolly-function-agent",
+ description="Ana gent to create a child data",
+ model=model,
+ tokenizer=tokenizer,
+ json_schema=tool_schema,
+)
+
+# Run the agent to generate the person's information
+generated_data = agent.run(task)
+
+# Print the generated data
+print(f"Generated data: {generated_data}")
diff --git a/playground/agents/tools/tool_agent/tool_agent_with_llm.py b/playground/agents/tools/tool_agent/tool_agent_with_llm.py
new file mode 100644
index 00000000..5babf461
--- /dev/null
+++ b/playground/agents/tools/tool_agent/tool_agent_with_llm.py
@@ -0,0 +1,46 @@
+import os
+
+from dotenv import load_dotenv
+from pydantic import BaseModel, Field
+
+from swarms import OpenAIChat, ToolAgent
+from swarms.tools.json_utils import base_model_to_json
+
+# Load the environment variables
+load_dotenv()
+
+# Initialize the OpenAIChat class
+chat = OpenAIChat(
+ api_key=os.getenv("OPENAI_API"),
+)
+
+
+# Initialize the schema for the person's information
+class Schema(BaseModel):
+ name: str = Field(..., title="Name of the person")
+ agent: int = Field(..., title="Age of the person")
+ is_student: bool = Field(..., title="Whether the person is a student")
+ courses: list[str] = Field(
+ ..., title="List of courses the person is taking"
+ )
+
+
+# Convert the schema to a JSON string
+tool_schema = base_model_to_json(Schema)
+
+# Define the task to generate a person's information
+task = "Generate a person's information based on the following schema:"
+
+# Create an instance of the ToolAgent class
+agent = ToolAgent(
+ name="dolly-function-agent",
+ description="Ana gent to create a child data",
+ llm=chat,
+ json_schema=tool_schema,
+)
+
+# Run the agent to generate the person's information
+generated_data = agent(task)
+
+# Print the generated data
+print(f"Generated data: {generated_data}")
diff --git a/playground/agents/use_cases/browser/multion/multion_example.ipynb b/playground/agents/use_cases/browser/multion/multion_example.ipynb
new file mode 100644
index 00000000..6131a64a
--- /dev/null
+++ b/playground/agents/use_cases/browser/multion/multion_example.ipynb
@@ -0,0 +1,801 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# pip3 install multion\n",
+ "# pip3 install swarms\n",
+ "import multion\n",
+ "from multion.client import MultiOn\n",
+ "from swarms import Agent\n",
+ "import os\n",
+ "from swarms.models.base_llm import BaseLLM\n",
+ "\n",
+ "def check_multion_api_key():\n",
+ " \"\"\"\n",
+ " Checks if the MultiOn API key is available in the environment variables.\n",
+ "\n",
+ " Returns:\n",
+ " str: The MultiOn API key.\n",
+ " \"\"\"\n",
+ " api_key = os.getenv(\"MULTION_API_KEY\")\n",
+ " return api_key\n",
+ "\n",
+ "\n",
+ "class MultiOnAgent(BaseLLM):\n",
+ " \"\"\"\n",
+ " Represents an agent that interacts with the MultiOn API to run tasks on a remote session.\n",
+ "\n",
+ " Args:\n",
+ " api_key (str): The API key for accessing the MultiOn API.\n",
+ " url (str): The URL of the remote session.\n",
+ " *args: Variable length argument list.\n",
+ " **kwargs: Arbitrary keyword arguments.\n",
+ "\n",
+ " Attributes:\n",
+ " client (MultiOn): The MultiOn client instance.\n",
+ " url (str): The URL of the remote session.\n",
+ " session_id (str): The ID of the current session.\n",
+ "\n",
+ " Methods:\n",
+ " run: Runs a task on the remote session.\n",
+ " \"\"\"\n",
+ "\n",
+ " def __init__(self, name: str = None, system_prompt: str = None, api_key: str = check_multion_api_key, url: str = \"https://huggingface.co/papers\", *args, **kwargs):\n",
+ " super().__init__(*args, **kwargs)\n",
+ " self.name = name\n",
+ " self.client = MultiOn(api_key=api_key)\n",
+ " self.url = url\n",
+ " self.system_prompt = system_prompt\n",
+ " self.session_id = None\n",
+ "\n",
+ " def run(self, task: str, *args, **kwargs):\n",
+ " \"\"\"\n",
+ " Runs a task on the remote session.\n",
+ "\n",
+ " Args:\n",
+ " task (str): The task to be executed on the remote session.\n",
+ " *args: Variable length argument list.\n",
+ " **kwargs: Arbitrary keyword arguments.\n",
+ " \"\"\"\n",
+ " # Create a new session\n",
+ " response = self.client.sessions.create(url=self.url, *args, **kwargs)\n",
+ " print(response.message)\n",
+ " self.session_id = response.session_id\n",
+ " \n",
+ " prompt = f\"{self.system_prompt} {task}\"\n",
+ " \n",
+ " # Keep stepping the session until the agent completes the task\n",
+ " while response.status == 'CONTINUE':\n",
+ " response = self.client.sessions.step(\n",
+ " session_id=self.session_id,\n",
+ " cmd=prompt,\n",
+ " include_screenshot=True,\n",
+ " *args,\n",
+ " **kwargs\n",
+ " )\n",
+ " \n",
+ " if response.status == 'DONE':\n",
+ " print('Task completed')\n",
+ " print(response.message)\n",
+ "\n",
+ " # Capture a screenshot of the session\n",
+ " get_screenshot = self.client.sessions.screenshot(session_id=self.session_id, *args, **kwargs)\n",
+ " print(\"Screenshot of session: \", get_screenshot.screenshot)\n",
+ "\n",
+ " # Close the session\n",
+ " close_session_response = self.client.sessions.close(session_id=self.session_id, *args, **kwargs)\n",
+ " print(\"Close session response: \", close_session_response)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\n",
+ "from swarms import MixtureOfAgents\n",
+ "\n",
+ "llm = MultiOnAgent(\n",
+ "\tname = \"MultiOnAgent\",\n",
+ ")\n",
+ "\n",
+ "\n",
+ "SEC_FILLING = \"\"\"\n",
+ "\n",
+ " \tThree Months Ended\n",
+ " \tApr 28, 2024\t\tApr 30, 2023\n",
+ "Revenue\t$\t26,044 \t\t\t$\t7,192 \t\n",
+ "Cost of revenue\t5,638 \t\t\t2,544 \t\n",
+ "Gross profit\t20,406 \t\t\t4,648 \t\n",
+ "Operating expenses\t \t\t \n",
+ "Research and development\t2,720 \t\t\t1,875 \t\n",
+ "Sales, general and administrative\t777 \t\t\t633 \t\n",
+ "Total operating expenses\t3,497 \t\t\t2,508 \t\n",
+ "Operating income\t16,909 \t\t\t2,140 \t\n",
+ "Interest income\t359 \t\t\t150 \t\n",
+ "Interest expense\t(64)\t\t\t(66)\t\n",
+ "Other, net\t75 \t\t\t(15)\t\n",
+ "Other income (expense), net\n",
+ "370 \t\t\t69 \t\n",
+ "Income before income tax\t17,279 \t\t\t2,209 \t\n",
+ "Income tax expense\t2,398 \t\t\t166 \t\n",
+ "Net income\t$\t14,881 \t\t\t$\t2,043 \t\n",
+ "Net income per share:\t\t\t\n",
+ "Basic\t$\t6.04 \t\t\t$\t0.83 \t\n",
+ "Diluted\t$\t5.98 \t\t\t$\t0.82 \t\n",
+ "Weighted average shares used in per share computation:\t\t\t\n",
+ "Basic\t2,462 \t\t\t2,470 \t\n",
+ "Diluted\t2,489 \t\t\t2,490 \t\n",
+ " \n",
+ "\n",
+ "See accompanying Notes to Condensed Consolidated Financial Statements.\n",
+ "3\n",
+ "\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Condensed Consolidated Statements of Comprehensive Income\n",
+ "(In millions)\n",
+ "(Unaudited)\n",
+ " \tThree Months Ended\n",
+ " \tApr 28, 2024\t\tApr 30, 2023\n",
+ " \t\t\t\n",
+ "Net income\t$\t14,881 \t\t\t$\t2,043 \t\n",
+ "Other comprehensive loss, net of tax\t\t\t\n",
+ "Available-for-sale securities:\t\t\t\n",
+ "Net change in unrealized gain (loss)\t(128)\t\t\t17 \t\n",
+ "Cash flow hedges:\t\t\t\n",
+ "Net change in unrealized loss\t(4)\t\t\t(13)\t\n",
+ "Reclassification adjustments for net realized loss included in net income\t(4)\t\t\t(11)\t\n",
+ "Net change in unrealized loss\t(8)\t\t\t(24)\t\n",
+ "Other comprehensive loss, net of tax\t(136)\t\t\t(7)\t\n",
+ "Total comprehensive income\t$\t14,745 \t\t\t$\t2,036 \t\n",
+ " \n",
+ "\n",
+ "See accompanying Notes to Condensed Consolidated Financial Statements.\n",
+ "\n",
+ "4\n",
+ "\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Condensed Consolidated Balance Sheets\n",
+ "(In millions)\n",
+ "(Unaudited)\n",
+ " \tApr 28, 2024\t\tJan 28, 2024\n",
+ "Assets\t\t\t\n",
+ "Current assets:\t \t\t \n",
+ "Cash and cash equivalents\t$\t7,587 \t\t\t$\t7,280 \t\n",
+ "Marketable securities\t23,851 \t\t\t18,704 \t\n",
+ "Accounts receivable, net\t12,365 \t\t\t9,999 \t\n",
+ "Inventories\t5,864 \t\t\t5,282 \t\n",
+ "Prepaid expenses and other current assets\t4,062 \t\t\t3,080 \t\n",
+ "Total current assets\t53,729 \t\t\t44,345 \t\n",
+ "Property and equipment, net\t4,006 \t\t\t3,914 \t\n",
+ "Operating lease assets\t1,532 \t\t\t1,346 \t\n",
+ "Goodwill\t4,453 \t\t\t4,430 \t\n",
+ "Intangible assets, net\t986 \t\t\t1,112 \t\n",
+ "Deferred income tax assets\t7,798 \t\t\t6,081 \t\n",
+ "Other assets\t4,568 \t\t\t4,500 \t\n",
+ "Total assets\t$\t77,072 \t\t\t$\t65,728 \t\n",
+ "Liabilities and Shareholders' Equity\t \t\t \n",
+ "Current liabilities:\t \t\t \n",
+ "Accounts payable\t$\t2,715 \t\t\t$\t2,699 \t\n",
+ "Accrued and other current liabilities\t11,258 \t\t\t6,682 \t\n",
+ "Short-term debt\t1,250 \t\t\t1,250 \t\n",
+ "Total current liabilities\t15,223 \t\t\t10,631 \t\n",
+ "Long-term debt\t8,460 \t\t\t8,459 \t\n",
+ "Long-term operating lease liabilities\t1,281 \t\t\t1,119 \t\n",
+ "Other long-term liabilities\t2,966 \t\t\t2,541 \t\n",
+ "Total liabilities\t27,930 \t\t\t22,750 \t\n",
+ "Commitments and contingencies - see Note 12\t\t\t\n",
+ "Shareholdersβ equity:\t \t\t \n",
+ "Preferred stock\tβ \t\t\tβ \t\n",
+ "Common stock\t2 \t\t\t2 \t\n",
+ "Additional paid-in capital\t12,651 \t\t\t13,132 \t\n",
+ "Accumulated other comprehensive income (loss)\t(109)\t\t\t27 \t\n",
+ "Retained earnings\t36,598 \t\t\t29,817 \t\n",
+ "Total shareholders' equity\t49,142 \t\t\t42,978 \t\n",
+ "Total liabilities and shareholders' equity\t$\t77,072 \t\t\t$\t65,728 \t\n",
+ " \n",
+ "\n",
+ "See accompanying Notes to Condensed Consolidated Financial Statements.\n",
+ "\n",
+ "5\n",
+ "\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Condensed Consolidated Statements of Shareholders' Equity\n",
+ "For the Three Months Ended April 28, 2024 and April 30, 2023\n",
+ "(Unaudited) \n",
+ "Common Stock\n",
+ "Outstanding\t\tAdditional Paid-in Capital\t\tAccumulated Other Comprehensive Income (Loss)\t\tRetained Earnings\t\tTotal Shareholders' Equity\n",
+ "Shares\t\tAmount\t\t\t\t\n",
+ "(In millions, except per share data)\t\t\t\t\t\t\t\t\t\t\t\n",
+ "Balances, Jan 28, 2024\t2,464 \t\t\t$\t2 \t\t\t$\t13,132 \t\t\t$\t27 \t\t\t$\t29,817 \t\t\t$\t42,978 \t\n",
+ "Net income\tβ \t\t\tβ \t\t\tβ \t\t\tβ \t\t\t14,881 \t\t\t14,881 \t\n",
+ "Other comprehensive loss\tβ \t\t\tβ \t\t\tβ \t\t\t(136)\t\t\tβ \t\t\t(136)\t\n",
+ "Issuance of common stock from stock plans \t7 \t\t\tβ \t\t\t285 \t\t\tβ \t\t\tβ \t\t\t285 \t\n",
+ "Tax withholding related to vesting of restricted stock units\t(2)\t\t\tβ \t\t\t(1,752)\t\t\tβ \t\t\tβ \t\t\t(1,752)\t\n",
+ "Shares repurchased\t(10)\t\t\tβ \t\t\t(33)\t\t\tβ \t\t\t(8,002)\t\t\t(8,035)\t\n",
+ "Cash dividends declared and paid ($0.04 per common share)\n",
+ "β \t\t\tβ \t\t\tβ \t\t\tβ \t\t\t(98)\t\t\t(98)\t\n",
+ "Stock-based compensation\tβ \t\t\tβ \t\t\t1,019 \t\t\tβ \t\t\tβ \t\t\t1,019 \t\n",
+ "Balances, Apr 28, 2024\t2,459 \t\t\t$\t2 \t\t\t$\t12,651 \t\t\t$\t(109)\t\t\t$\t36,598 \t\t\t$\t49,142 \t\n",
+ "Balances, Jan 29, 2023\t2,466 \t\t\t$\t2 \t\t\t$\t11,971 \t\t\t$\t(43)\t\t\t$\t10,171 \t\t\t$\t22,101 \t\n",
+ "Net income\tβ \t\t\tβ \t\t\tβ \t\t\tβ \t\t\t2,043 \t\t\t2,043 \t\n",
+ "Other comprehensive loss\tβ \t\t\tβ \t\t\tβ \t\t\t(7)\t\t\tβ \t\t\t(7)\t\n",
+ "Issuance of common stock from stock plans \t9 \t\t\tβ \t\t\t246 \t\t\tβ \t\t\tβ \t\t\t246 \t\n",
+ "Tax withholding related to vesting of restricted stock units\t(2)\t\t\tβ \t\t\t(507)\t\t\tβ \t\t\tβ \t\t\t(507)\t\n",
+ "Cash dividends declared and paid ($0.04 per common share)\n",
+ "β \t\t\tβ \t\t\tβ \t\t\tβ \t\t\t(99)\t\t\t(99)\t\n",
+ "Stock-based compensation\tβ \t\t\tβ \t\t\t743 \t\t\tβ \t\t\tβ \t\t\t743 \t\n",
+ "Balances, Apr 30, 2023\t2,473 \t\t\t$\t2 \t\t\t$\t12,453 \t\t\t$\t(50)\t\t\t$\t12,115 \t\t\t$\t24,520 \t\n",
+ " \n",
+ "See accompanying Notes to Condensed Consolidated Financial Statements.\n",
+ "6\n",
+ "\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Condensed Consolidated Statements of Cash Flows\n",
+ "(In millions)\n",
+ "(Unaudited) \n",
+ " \tThree Months Ended\n",
+ " \tApr 28, 2024\t\tApr 30, 2023\n",
+ "Cash flows from operating activities:\t\t\t\n",
+ "Net income\t$\t14,881 \t\t\t$\t2,043 \t\n",
+ "Adjustments to reconcile net income to net cash provided by operating activities:\t\t\t\n",
+ "Stock-based compensation expense\t1,011 \t\t\t735 \t\n",
+ "Depreciation and amortization\t410 \t\t\t384 \t\n",
+ "Realized and unrealized (gains) losses on investments in non-affiliated entities, net\t(69)\t\t\t14 \t\n",
+ "Deferred income taxes\t(1,577)\t\t\t(1,135)\t\n",
+ "Other\t(145)\t\t\t(34)\t\n",
+ "Changes in operating assets and liabilities, net of acquisitions:\t\t\t\n",
+ "Accounts receivable\t(2,366)\t\t\t(252)\t\n",
+ "Inventories\t(577)\t\t\t566 \t\n",
+ "Prepaid expenses and other assets\t(726)\t\t\t(215)\t\n",
+ "Accounts payable\t(22)\t\t\t11 \t\n",
+ "Accrued and other current liabilities\t4,202 \t\t\t689 \t\n",
+ "Other long-term liabilities\t323 \t\t\t105 \t\n",
+ "Net cash provided by operating activities\t15,345 \t\t\t2,911 \t\n",
+ "Cash flows from investing activities:\t\t\t\n",
+ "Proceeds from maturities of marketable securities\t4,004 \t\t\t2,512 \t\n",
+ "Proceeds from sales of marketable securities\t149 \t\t\tβ \t\n",
+ "Purchases of marketable securities\t(9,303)\t\t\t(2,801)\t\n",
+ "Purchases related to property and equipment and intangible assets\t(369)\t\t\t(248)\t\n",
+ "Acquisitions, net of cash acquired\t(39)\t\t\t(83)\t\n",
+ "Investments in non-affiliated entities\t(135)\t\t\t(221)\t\n",
+ "Net cash used in investing activities\t(5,693)\t\t\t(841)\t\n",
+ "Cash flows from financing activities:\t\t\t\n",
+ "Proceeds related to employee stock plans\t285 \t\t\t246 \t\n",
+ "Payments related to repurchases of common stock\t(7,740)\t\t\tβ \t\n",
+ "Payments related to tax on restricted stock units\t(1,752)\t\t\t(507)\t\n",
+ "Dividends paid\t(98)\t\t\t(99)\t\n",
+ "Principal payments on property and equipment and intangible assets\t(40)\t\t\t(20)\t\n",
+ "Net cash used in financing activities\t(9,345)\t\t\t(380)\t\n",
+ "Change in cash and cash equivalents\t307 \t\t\t1,690 \t\n",
+ "Cash and cash equivalents at beginning of period\t7,280 \t\t\t3,389 \t\n",
+ "Cash and cash equivalents at end of period\t$\t7,587 \t\t\t$\t5,079 \t\n",
+ " \n",
+ "See accompanying Notes to Condensed Consolidated Financial Statements.\n",
+ "7\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements\n",
+ "(Unaudited)\n",
+ "\n",
+ "\n",
+ "Note 1 - Summary of Significant Accounting Policies\n",
+ "Basis of Presentation\n",
+ "The accompanying unaudited condensed consolidated financial statements were prepared in accordance with accounting principles generally accepted in the United States of America, or U.S. GAAP, for interim financial information and with the instructions to Form 10-Q and Article 10 of Securities and Exchange Commission, or SEC, Regulation S-X. The January 28, 2024 consolidated balance sheet was derived from our audited consolidated financial statements included in our Annual Report on Form 10-K for the fiscal year ended January 28, 2024, as filed with the SEC, but does not include all disclosures required by U.S. GAAP. In the opinion of management, all adjustments, consisting only of normal recurring adjustments considered necessary for a fair statement of results of operations and financial position, have been included. The results for the interim periods presented are not necessarily indicative of the results expected for any future period. The following information should be read in conjunction with the audited consolidated financial statements and notes thereto included in our Annual Report on Form 10-K for the fiscal year ended January 28, 2024. \n",
+ "Significant Accounting Policies\n",
+ "There have been no material changes to our significant accounting policies disclosed in Note 1 - Organization and Summary of Significant Accounting Policies, of the Notes to the Consolidated Financial Statements included in our Annual Report on Form 10-K for the fiscal year ended January 28, 2024.\n",
+ "Fiscal Year\n",
+ "We operate on a 52- or 53-week year, ending on the last Sunday in January. Fiscal years 2025 and 2024 are both 52-week years. The first quarters of fiscal years 2025 and 2024 were both 13-week quarters.\n",
+ "Principles of Consolidation\n",
+ "Our condensed consolidated financial statements include the accounts of NVIDIA Corporation and our wholly-owned subsidiaries. All intercompany balances and transactions have been eliminated in consolidation.\n",
+ "Use of Estimates\n",
+ "The preparation of financial statements in conformity with U.S. GAAP requires management to make estimates and assumptions that affect the reported amounts of assets and liabilities and disclosures of contingent assets and liabilities at the date of the financial statements and the reported amounts of revenue and expenses during the reporting period. Actual results could differ materially from our estimates. On an on-going basis, we evaluate our estimates, including those related to revenue recognition, cash equivalents and marketable securities, accounts receivable, inventories and product purchase commitments, income taxes, goodwill, stock-based compensation, litigation, investigation and settlement costs, property, plant, and equipment, and other contingencies. These estimates are based on historical facts and various other assumptions that we believe are reasonable.\n",
+ "Recently Issued Accounting Pronouncements\n",
+ "Recent Accounting Pronouncements Not Yet Adopted\n",
+ "In November 2023, the Financial Accounting Standards Board, or FASB, issued a new accounting standard to provide for additional disclosures about significant expenses in operating segments. The standard is effective for our annual reporting starting with fiscal year 2025 and for interim period reporting starting in fiscal year 2026 retrospectively. We are currently evaluating the impact of this standard on our Consolidated Financial Statements.\n",
+ "In December 2023, the FASB issued a new accounting standard which provides for new and updated income tax disclosures, including disaggregation of rate reconciliation and income taxes paid. The standard is effective for annual periods beginning after December 15, 2024. Early adoption is permitted and should be applied prospectively, with retrospective application permitted. We expect to adopt this standard in our annual reporting starting with fiscal year 2026. We are currently evaluating the impact of this standard on our Consolidated Financial Statements.\n",
+ "\n",
+ "\n",
+ "\n",
+ "8\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements (Continued)\n",
+ "(Unaudited)\n",
+ "Note 2 - Leases\n",
+ "Our lease obligations primarily consist of operating leases for our headquarters complex, domestic and international office facilities, and data center space, with lease periods expiring between fiscal years 2025 and 2035.\n",
+ "Future minimum lease payments under our non-cancelable operating leases as of April 28, 2024 were as follows:\n",
+ "Operating Lease Obligations\n",
+ " \t(In millions)\n",
+ "Fiscal Year:\t \n",
+ "2025 (excluding first quarter of fiscal year 2025)\n",
+ "$\t221 \t\n",
+ "2026\t306 \t\n",
+ "2027\t290 \t\n",
+ "2028\t270 \t\n",
+ "2029\t236 \t\n",
+ "2030 and thereafter\n",
+ "410 \t\n",
+ "Total\t1,733 \t\n",
+ "Less imputed interest\t206 \t\n",
+ "Present value of net future minimum lease payments\t1,527 \t\n",
+ "Less short-term operating lease liabilities\t246 \t\n",
+ "Long-term operating lease liabilities\t$\t1,281 \t\n",
+ " \n",
+ "In addition, we have operating leases, primarily for our data centers, that are expected to commence during fiscal year 2025 with lease terms of 2 to 11 years for $923 million.\n",
+ "Operating lease expenses were $80 million and $59 million for the first quarter of fiscal years 2025 and 2024, respectively. Short-term and variable lease expenses for the first quarter of fiscal years 2025 and 2024 were not significant.\n",
+ "Other information related to leases was as follows:\n",
+ "Three Months Ended\n",
+ "Apr 28, 2024\t\tApr 30, 2023\n",
+ " \t(In millions)\n",
+ "Supplemental cash flows information\t\t\t \n",
+ "Operating cash flows used for operating leases\t$\t69 \t\t\t$\t61 \t\n",
+ "Operating lease assets obtained in exchange for lease obligations\t250 \t\t\t106 \t\n",
+ " \n",
+ "As of April 28, 2024, our operating leases had a weighted average remaining lease term of 6.3 years and a weighted average discount rate of 3.89%. As of January 28, 2024, our operating leases had a weighted average remaining lease term of 6.1 years and a weighted average discount rate of 3.76%.\n",
+ "9\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements (Continued)\n",
+ "(Unaudited)\n",
+ "Note 3 - Stock-Based Compensation\n",
+ "Our stock-based compensation expense is associated with restricted stock units, or RSUs, performance stock units that are based on our corporate financial performance targets, or PSUs, performance stock units that are based on market conditions, or market-based PSUs, and our employee stock purchase plan, or ESPP.\n",
+ "Our Condensed Consolidated Statements of Income include stock-based compensation expense, net of amounts capitalized into inventory and subsequently recognized to cost of revenue, as follows:\n",
+ " \tThree Months Ended\n",
+ " \tApr 28, 2024\t\tApr 30, 2023\n",
+ "(In millions)\n",
+ "Cost of revenue\t$\t36 \t\t\t$\t27 \t\n",
+ "Research and development\t727 \t\t\t524 \t\n",
+ "Sales, general and administrative\t248 \t\t\t184 \t\n",
+ "Total\t$\t1,011 \t\t\t$\t735 \t\n",
+ " \n",
+ "Equity Award Activity\n",
+ "The following is a summary of our equity award transactions under our equity incentive plans:\n",
+ "RSUs, PSUs, and Market-based PSUs Outstanding\n",
+ " \tNumber of Shares\t\tWeighted Average Grant-Date Fair Value Per Share\n",
+ "(In millions, except per share data)\n",
+ "Balances, Jan 28, 2024\t37 \t\t\t$\t245.94 \t\n",
+ "Granted\t7 \t\t\t$\t801.79 \t\n",
+ "Vested\t(6)\t\t\t$\t176.59 \t\n",
+ "Balances, Apr 28, 2024\t38 \t\t\t$\t361.45 \t\n",
+ " \n",
+ "As of April 28, 2024, there was $13.2 billion of aggregate unearned stock-based compensation expense. This amount is expected to be recognized over a weighted average period of 2.6 years for RSUs, PSUs, and market-based PSUs, and 0.8 years for ESPP.\n",
+ "Note 4 - Net Income Per Share\n",
+ "The following is a reconciliation of the denominator of the basic and diluted net income per share computations for the periods presented:\n",
+ " \tThree Months Ended\n",
+ "Apr 28, 2024\t\tApr 30, 2023\n",
+ " \t(In millions, except per share data)\n",
+ "Numerator:\t \t\t \n",
+ "Net income\t$\t14,881 \t\t\t$\t2,043 \t\n",
+ "Denominator:\t\t\t\n",
+ "Basic weighted average shares\t2,462 \t\t\t2,470 \t\n",
+ "Dilutive impact of outstanding equity awards\t27 \t\t\t20 \t\n",
+ "Diluted weighted average shares\t2,489 \t\t\t2,490 \t\n",
+ "Net income per share:\t\t\t\n",
+ "Basic (1)\t$\t6.04 \t\t\t$\t0.83 \t\n",
+ "Diluted (2)\t$\t5.98 \t\t\t$\t0.82 \t\n",
+ "Equity awards excluded from diluted net income per share because their effect would have been anti-dilutive\t6 \t\t\t4 \t\n",
+ " \n",
+ "(1) Calculated as net income divided by basic weighted average shares.\n",
+ "(2) Calculated as net income divided by diluted weighted average shares.\n",
+ "10\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements (Continued)\n",
+ "(Unaudited)\n",
+ "Diluted net income per share is computed using the weighted average number of common and potentially dilutive shares outstanding during the period, using the treasury stock method. Any anti-dilutive effect of equity awards outstanding is not included in the computation of diluted net income per share.\n",
+ "Note 5 - Income Taxes\n",
+ "Income tax expense was $2.4 billion and $166 million for the first quarter of fiscal years 2025 and 2024, respectively. Income tax expense as a percentage of income before income tax was 13.9% and 7.5% for the first quarter of fiscal years 2025 and 2024, respectively.\n",
+ "\n",
+ "The effective tax rate increased primarily due to a decreased effect of tax benefits from the foreign-derived intangible income deduction and stock-based compensation relative to the increase in income before income tax.\n",
+ "\n",
+ "Our effective tax rates for the first quarter of fiscal years 2025 and 2024 were lower than the U.S. federal statutory rate of 21% due to tax benefits from stock-based compensation, the foreign-derived intangible income deduction, income earned in jurisdictions that are subject to taxes lower than the U.S. federal statutory tax rate, and the U.S. federal research tax credit.\n",
+ "\n",
+ "While we believe that we have adequately provided for all uncertain tax positions, or tax positions where we believe it is not more-likely-than-not that the position will be sustained upon review, amounts asserted by tax authorities could be greater or less than our accrued position. Accordingly, our provisions on federal, state and foreign tax related matters to be recorded in the future may change as revised estimates are made or the underlying matters are settled or otherwise resolved with the respective tax authorities. As of April 28, 2024, we do not believe that our estimates, as otherwise provided for, on such tax positions will significantly increase or decrease within the next 12 months.\n",
+ "Note 6 - Cash Equivalents and Marketable Securities \n",
+ "Our cash equivalents and marketable securities related to publicly held debt securities are classified as βavailable-for-saleβ debt securities.\n",
+ "The following is a summary of cash equivalents and marketable securities:\n",
+ " \tApr 28, 2024\n",
+ "Amortized\n",
+ "Cost\t\tUnrealized\n",
+ "Gain\t\tUnrealized\n",
+ "Loss\t\tEstimated\n",
+ "Fair Value\t\tReported as\n",
+ " \t\t\t\t\tCash Equivalents\t\tMarketable Securities\n",
+ " \t(In millions)\n",
+ "Corporate debt securities\t$\t11,397 \t\t\t$\t3 \t\t\t$\t(43)\t\t\t$\t11,357 \t\t\t$\t733 \t\t\t$\t10,624 \t\n",
+ "Debt securities issued by the U.S. Treasury\t11,314 \t\t\tβ \t\t\t(62)\t\t\t11,252 \t\t\t886 \t\t\t10,366 \t\n",
+ "Money market funds\t5,374 \t\t\tβ \t\t\tβ \t\t\t5,374 \t\t\t5,374 \t\t\tβ \t\n",
+ "Debt securities issued by U.S. government agencies\t2,826 \t\t\tβ \t\t\t(7)\t\t\t2,819 \t\t\t189 \t\t\t2,630 \t\n",
+ "Certificates of deposit\t286 \t\t\tβ \t\t\tβ \t\t\t286 \t\t\t69 \t\t\t217 \t\n",
+ "Foreign government bonds\t14 \t\t\tβ \t\t\tβ \t\t\t14 \t\t\tβ \t\t\t14 \t\n",
+ "Total\t$\t31,211 \t\t\t$\t3 \t\t\t$\t(112)\t\t\t$\t31,102 \t\t\t$\t7,251 \t\t\t$\t23,851 \t\n",
+ " \n",
+ "11\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements (Continued)\n",
+ "(Unaudited)\n",
+ " \tJan 28, 2024\n",
+ "Amortized\n",
+ "Cost\t\tUnrealized\n",
+ "Gain\t\tUnrealized\n",
+ "Loss\t\tEstimated\n",
+ "Fair Value\t\tReported as\n",
+ " \t\t\t\t\tCash Equivalents\t\tMarketable Securities\n",
+ " \t(In millions)\n",
+ "Corporate debt securities\t$\t10,126 \t\t\t$\t31 \t\t\t$\t(5)\t\t\t$\t10,152 \t\t\t$\t2,231 \t\t\t$\t7,921 \t\n",
+ "Debt securities issued by the U.S. Treasury\t9,517 \t\t\t17 \t\t\t(10)\t\t\t9,524 \t\t\t1,315 \t\t\t8,209 \t\n",
+ "Money market funds\t3,031 \t\t\tβ \t\t\tβ \t\t\t3,031 \t\t\t3,031 \t\t\tβ \t\n",
+ "Debt securities issued by U.S. government agencies\t2,326 \t\t\t8 \t\t\t(1)\t\t\t2,333 \t\t\t89 \t\t\t2,244 \t\n",
+ "Certificates of deposit\t510 \t\t\tβ \t\t\tβ \t\t\t510 \t\t\t294 \t\t\t216 \t\n",
+ "Foreign government bonds\t174 \t\t\tβ \t\t\tβ \t\t\t174 \t\t\t60 \t\t\t114 \t\n",
+ "Total\t$\t25,684 \t\t\t$\t56 \t\t\t$\t(16)\t\t\t$\t25,724 \t\t\t$\t7,020 \t\t\t$\t18,704 \t\n",
+ " \n",
+ "The following tables provide the breakdown of unrealized losses, aggregated by investment category and length of time that individual securities have been in a continuous loss position:\n",
+ "Apr 28, 2024\n",
+ " \tLess than 12 Months\t\t12 Months or Greater\t\tTotal\n",
+ " \tEstimated Fair Value\t\tGross Unrealized Loss\t\tEstimated Fair Value\t\tGross Unrealized Loss\t\tEstimated Fair Value\t\tGross Unrealized Loss\n",
+ " \t(In millions)\n",
+ "Debt securities issued by the U.S. Treasury\t$\t9,720 \t\t\t$\t(60)\t\t\t$\t756 \t\t\t$\t(2)\t\t\t$\t10,476 \t\t\t$\t(62)\t\n",
+ "Corporate debt securities\t6,943 \t\t\t(42)\t\t\t188 \t\t\t(1)\t\t\t7,131 \t\t\t(43)\t\n",
+ "Debt securities issued by U.S. government agencies\t2,391 \t\t\t(7)\t\t\tβ \t\t\tβ \t\t\t2,391 \t\t\t(7)\t\n",
+ "Total\t$\t19,054 \t\t\t$\t(109)\t\t\t$\t944 \t\t\t$\t(3)\t\t\t$\t19,998 \t\t\t$\t(112)\t\n",
+ " \n",
+ "Jan 28, 2024\n",
+ " \tLess than 12 Months\t\t12 Months or Greater\t\tTotal\n",
+ " \tEstimated Fair Value\t\tGross Unrealized Loss\t\tEstimated Fair Value\t\tGross Unrealized Loss\t\tEstimated Fair Value\t\tGross Unrealized Loss\n",
+ " \t(In millions)\n",
+ "Debt securities issued by the U.S. Treasury\t$\t3,343 \t\t\t$\t(5)\t\t\t$\t1,078 \t\t\t$\t(5)\t\t\t$\t4,421 \t\t\t$\t(10)\t\n",
+ "Corporate debt securities\t1,306 \t\t\t(3)\t\t\t618 \t\t\t(2)\t\t\t1,924 \t\t\t(5)\t\n",
+ "Debt securities issued by U.S. government agencies\t670 \t\t\t(1)\t\t\tβ \t\t\tβ \t\t\t670 \t\t\t(1)\t\n",
+ "Total\t$\t5,319 \t\t\t$\t(9)\t\t\t$\t1,696 \t\t\t$\t(7)\t\t\t$\t7,015 \t\t\t$\t(16)\t\n",
+ " \n",
+ "The gross unrealized losses are related to fixed income securities, driven primarily by changes in interest rates. Net realized gains and losses were not significant for all periods presented.\n",
+ "12\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements (Continued)\n",
+ "(Unaudited)\n",
+ "The amortized cost and estimated fair value of cash equivalents and marketable securities are shown below by contractual maturity.\n",
+ "Apr 28, 2024\t\tJan 28, 2024\n",
+ "Amortized Cost\t\tEstimated Fair Value\t\tAmortized Cost\t\tEstimated Fair Value\n",
+ "(In millions)\n",
+ "Less than one year\t$\t16,811 \t\t\t$\t16,800 \t\t\t$\t16,336 \t\t\t$\t16,329 \t\n",
+ "Due in 1 - 5 years\t14,400 \t\t\t14,302 \t\t\t9,348 \t\t\t9,395 \t\n",
+ "Total\t$\t31,211 \t\t\t$\t31,102 \t\t\t$\t25,684 \t\t\t$\t25,724 \t\n",
+ " \n",
+ "Note 7 - Fair Value of Financial Assets and Liabilities and Investments in Non-Affiliated Entities\n",
+ "The fair values of our financial assets and liabilities are determined using quoted market prices of identical assets or quoted market prices of similar assets from active markets. We review fair value hierarchy classification on a quarterly basis.\n",
+ "Pricing Category\t\tFair Value at\n",
+ "Apr 28, 2024\t\tJan 28, 2024\n",
+ "(In millions)\n",
+ "Assets\t\t\t\t\t\n",
+ "Cash equivalents and marketable securities:\t\t\t\t\t\n",
+ "Money market funds\tLevel 1\t\t$\t5,374 \t\t\t$\t3,031 \t\n",
+ "Corporate debt securities\tLevel 2\t\t$\t11,357 \t\t\t$\t10,152 \t\n",
+ "Debt securities issued by the U.S. Treasury\tLevel 2\t\t$\t11,252 \t\t\t$\t9,524 \t\n",
+ "Debt securities issued by U.S. government agencies\tLevel 2\t\t$\t2,819 \t\t\t$\t2,333 \t\n",
+ "Certificates of deposit\tLevel 2\t\t$\t286 \t\t\t$\t510 \t\n",
+ "Foreign government bonds\tLevel 2\t\t$\t14 \t\t\t$\t174 \t\n",
+ "Other assets (Investments in non-affiliated entities):\t\t\t\t\t\n",
+ "Publicly-held equity securities\tLevel 1\t\t$\t287 \t\t\t$\t225 \t\n",
+ "Liabilities (1)\t\t\t\t\t\n",
+ "0.584% Notes Due 2024\n",
+ "Level 2\t\t$\t1,242 \t\t\t$\t1,228 \t\n",
+ "3.20% Notes Due 2026\n",
+ "Level 2\t\t$\t960 \t\t\t$\t970 \t\n",
+ "1.55% Notes Due 2028\n",
+ "Level 2\t\t$\t1,096 \t\t\t$\t1,115 \t\n",
+ "2.85% Notes Due 2030\n",
+ "Level 2\t\t$\t1,331 \t\t\t$\t1,367 \t\n",
+ "2.00% Notes Due 2031\n",
+ "Level 2\t\t$\t1,026 \t\t\t$\t1,057 \t\n",
+ "3.50% Notes Due 2040\n",
+ "Level 2\t\t$\t805 \t\t\t$\t851 \t\n",
+ "3.50% Notes Due 2050\n",
+ "Level 2\t\t$\t1,487 \t\t\t$\t1,604 \t\n",
+ "3.70% Notes Due 2060\n",
+ "Level 2\t\t$\t368 \t\t\t$\t403 \t\n",
+ " \n",
+ "\n",
+ "(1) These liabilities are carried on our Condensed Consolidated Balance Sheets at their original issuance value, net of unamortized debt discount and issuance costs.\n",
+ "Investments in Non-Affiliated Entities\n",
+ "Our investments in non-affiliated entities include marketable equity securities, which are publicly traded, and non-marketable equity securities, which are primarily investments in privately held companies.\n",
+ "Our marketable equity securities have readily determinable fair values and are recorded in long-term other assets on our Condensed Consolidated Balance Sheets at fair value with changes in fair value recorded in Other income and expense, net on our Condensed Consolidated Statements of Income. Marketable equity securities totaled $287 million and $225 million as of April 28, 2024 and January 28, 2024, respectively. The net unrealized and realized gains and losses of investments in marketable securities were not significant for the first quarter of fiscal years 2025 and 2024.\n",
+ "13\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements (Continued)\n",
+ "(Unaudited)\n",
+ "Our non-marketable equity securities are recorded in long-term other assets on our Condensed Consolidated Balance Sheets and valued under the measurement alternative. The carrying value of our non-marketable equity securities totaled $1.5 billion and $1.3 billion as of April 28, 2024 and January 28, 2024, respectively. Gains and losses on these investments, realized and unrealized, are recognized in Other income and expense, net on our Condensed Consolidated Statements of Income.\n",
+ " \n",
+ "(1) During the first quarter of fiscal years 2025 and 2024, we recorded an inventory provision of $210 million and $105 million, respectively, in cost of revenue.\n",
+ "\n",
+ " \tApr 28, 2024\t\tJan 28, 2024\n",
+ "Other Assets:\t(In millions)\n",
+ "Prepaid supply and capacity agreements (1)\t$\t2,232 \t\t\t$\t2,458 \t\n",
+ "Investments in non-affiliated entities\t1,750 \t\t\t1,546 \t\n",
+ "Prepaid royalties\t358 \t\t\t364 \t\n",
+ "Other\t228 \t\t\t132 \t\n",
+ "\n",
+ "We recognized $188 million in revenue in the first quarter of fiscal year 2025 from deferred revenue as of January 28, 2024.\n",
+ "Revenue allocated to remaining performance obligations, which includes deferred revenue and amounts that will be invoiced and recognized as revenue in future periods, was $1.3 billion as of April 28, 2024. We expect to recognize approximately 38% of this revenue over the next twelve months and the remainder thereafter. This excludes revenue related to performance obligations for contracts with a length of one year or less.\n",
+ "16\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements (Continued)\n",
+ "(Unaudited)\n",
+ "Note 10 - Derivative Financial Instruments\n",
+ "We enter into foreign currency forward contracts to mitigate the impact of foreign currency exchange rate movements on our operating expenses. These contracts are designated as cash flow hedges for hedge accounting treatment. Gains or losses on the contracts are recorded in accumulated other comprehensive income or loss and reclassified to operating expense when the related operating expenses are recognized in earnings or ineffectiveness should occur.\n",
+ "We also enter into foreign currency forward contracts to mitigate the impact of foreign currency movements on monetary assets and liabilities. The change in fair value of these non-designated contracts is recorded in other income or expense and offsets the change in fair value of the hedged foreign currency denominated monetary assets and liabilities, which is also recorded in other income or expense.\n",
+ "The table below presents the notional value of our foreign currency contracts outstanding:\n",
+ " \tApr 28, 2024\t\tJan 28, 2024\n",
+ "(In millions)\n",
+ "Designated as cash flow hedges\t$\t1,198 \t\t\t$\t1,168 \t\n",
+ "Non-designated hedges\t$\t704 \t\t\t$\t597 \t\n",
+ " \n",
+ "The unrealized gains and losses or fair value of our foreign currency contracts was not significant as of April 28, 2024 and January 28, 2024.\n",
+ "As of April 28, 2024, all designated foreign currency contracts mature within 18 months. The expected realized gains and losses deferred to accumulated other comprehensive income or loss related to foreign currency contracts was not significant.\n",
+ "During the first quarter of fiscal years 2025 and 2024, the impact of derivative financial instruments designated for hedge accounting treatment in other comprehensive income or loss was not significant and the instruments were determined to be highly effective.\n",
+ "Note 11 - Debt\n",
+ "Long-Term Debt\n",
+ "Expected\n",
+ "Remaining Term (years)\t\tEffective\n",
+ "Interest Rate\t\tCarrying Value at\n",
+ "Apr 28, 2024\t\tJan 28, 2024\n",
+ "(In millions)\n",
+ "0.584% Notes Due 2024\n",
+ "0.1\t\t0.66%\t\t1,250 \t\t\t1,250 \t\n",
+ "3.20% Notes Due 2026\n",
+ "2.4\t\t3.31%\t\t1,000 \t\t\t1,000 \t\n",
+ "1.55% Notes Due 2028\n",
+ "4.1\t\t1.64%\t\t1,250 \t\t\t1,250 \t\n",
+ "2.85% Notes Due 2030\n",
+ "5.9\t\t2.93%\t\t1,500 \t\t\t1,500 \t\n",
+ "2.00% Notes Due 2031\n",
+ "7.1\t\t2.09%\t\t1,250 \t\t\t1,250 \t\n",
+ "3.50% Notes Due 2040\n",
+ "15.9\t\t3.54%\t\t1,000 \t\t\t1,000 \t\n",
+ "3.50% Notes Due 2050\n",
+ "25.9\t\t3.54%\t\t2,000 \t\t\t2,000 \t\n",
+ "3.70% Notes Due 2060\n",
+ "36.0\t\t3.73%\t\t500 \t\t\t500 \t\n",
+ "Unamortized debt discount and issuance costs\t\t\t\t\t\t(40)\t\t\t(41)\t\n",
+ "Net carrying amount\t\t\t\t\t\t9,710 \t\t\t9,709 \t\n",
+ "Less short-term portion\t\t\t\t\t\t(1,250)\t\t\t(1,250)\t\n",
+ "Total long-term portion\t\t\t\t\t\t$\t8,460 \t\t\t$\t8,459 \t\n",
+ " \n",
+ "Our notes are unsecured senior obligations. Existing and future liabilities of our subsidiaries will be effectively senior to the notes. Our notes pay interest semi-annually. We may redeem each of our notes prior to maturity, as defined in the applicable form of note. The maturity of the notes are calendar year.\n",
+ "As of April 28, 2024, we were in compliance with the required covenants, which are non-financial in nature, under the outstanding notes.\n",
+ "17\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements (Continued)\n",
+ "(Unaudited)\n",
+ "Commercial Paper\n",
+ "We have a $575 million commercial paper program to support general corporate purposes. As of April 28, 2024, we had no commercial paper outstanding.\n",
+ "Note 12 - Commitments and Contingencies\n",
+ "Purchase Obligations\n",
+ "Our purchase obligations reflect our commitment to purchase components used to manufacture our products, including long-term supply and capacity agreements, certain software and technology licenses, other goods and services and long-lived assets.\n",
+ "As of April 28, 2024, we had outstanding inventory purchases and long-term supply and capacity obligations totaling $18.8 billion. We enter into agreements with contract manufacturers that allow them to procure inventory based upon our defined criteria, and in certain instances, these agreements are cancellable, able to be rescheduled, and adjustable for our business needs prior to placing firm orders. These changes may result in costs incurred through the date of cancellation. Other non-inventory purchase obligations were $10.6 billion, including $8.8 billion of multi-year cloud service agreements. We expect our cloud service agreements to be used to support our research and development efforts and our DGX Cloud offerings.\n",
+ "Total future purchase commitments as of April 28, 2024 are as follows:\n",
+ "Commitments\n",
+ " \t(In millions)\n",
+ "Fiscal Year:\t \n",
+ "2025 (excluding first quarter of fiscal year 2025)\n",
+ "$\t19,306 \t\n",
+ "2026\t3,438 \t\n",
+ "2027\t2,573 \t\n",
+ "2028\t2,222 \t\n",
+ "2029\t1,585 \t\n",
+ "2030 and thereafter\n",
+ "249 \t\n",
+ "Total\t$\t29,373 \t\n",
+ " \n",
+ "In addition to the purchase commitments included in the table above, at the end of the first quarter of fiscal year 2025, we had commitments of approximately $1.2 billion to complete business combinations, subject to closing conditions, and acquire land and buildings.\n",
+ "Accrual for Product Warranty Liabilities\n",
+ "The estimated amount of product warranty liabilities was $532 million and $306 million as of April 28, 2024 and January 28, 2024, respectively. The estimated product returns and product warranty activity consisted of the following:\n",
+ "Three Months Ended\n",
+ "Apr 28, 2024\t\tApr 30, 2023\n",
+ "(In millions)\n",
+ "Balance at beginning of period\t$\t306 \t\t\t$\t82 \t\n",
+ "Additions\t234 \t\t\t13 \t\n",
+ "Utilization\t(8)\t\t\t(18)\t\n",
+ "Balance at end of period\t$\t532 \t\t\t$\t77 \t\n",
+ " \n",
+ "We have provided indemnities for matters such as tax, product, and employee liabilities. We have included intellectual property indemnification provisions in our technology-related agreements with third parties. Maximum potential future payments cannot be estimated because many of these agreements do not have a maximum stated liability. We have not recorded any liability in our Condensed Consolidated Financial Statements for such indemnifications.\n",
+ "18\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements (Continued)\n",
+ "(Unaudited)\n",
+ "Litigation\n",
+ "Securities Class Action and Derivative Lawsuits\n",
+ "The plaintiffs in the putative securities class action lawsuit, captioned 4:18-cv-07669-HSG, initially filed on December 21, 2018 in the United States District Court for the Northern District of California, and titled In Re NVIDIA Corporation Securities Litigation, filed an amended complaint on May 13, 2020. The amended complaint asserted that NVIDIA and certain NVIDIA executives violated Section 10(b) of the Securities Exchange Act of 1934, as amended, or the Exchange Act, and SEC Rule 10b-5, by making materially false or misleading statements related to channel inventory and the impact of cryptocurrency mining on GPU demand between May 10, 2017 and November 14, 2018. Plaintiffs also alleged that the NVIDIA executives who they named as defendants violated Section 20(a) of the Exchange Act. Plaintiffs sought class certification, an award of unspecified compensatory damages, an award of reasonable costs and expenses, including attorneysβ fees and expert fees, and further relief as the Court may deem just and proper. On March 2, 2021, the district court granted NVIDIAβs motion to dismiss the complaint without leave to amend, entered judgment in favor of NVIDIA and closed the case. On March 30, 2021, plaintiffs filed an appeal from judgment in the United States Court of Appeals for the Ninth Circuit, case number 21-15604. On August 25, 2023, a majority of a three-judge Ninth Circuit panel affirmed in part and reversed in part the district courtβs dismissal of the case, with a third judge dissenting on the basis that the district court did not err in dismissing the case. On November 15, 2023, the Ninth Circuit denied NVIDIAβs petition for rehearing en banc of the Ninth Circuit panelβs majority decision to reverse in part the dismissal of the case, which NVIDIA had filed on October 10, 2023. On November 21, 2023, NVIDIA filed a motion with the Ninth Circuit for a stay of the mandate pending NVIDIAβs petition for a writ of certiorari in the Supreme Court of the United States and the Supreme Courtβs resolution of the matter. On December 5, 2023, the Ninth Circuit granted NVIDIAβs motion to stay the mandate. NVIDIA filed a petition for a writ of certiorari on March 4, 2024. Four amicus briefs in support of NVIDIAβs petition were filed on April 5, 2024.\n",
+ "The putative derivative lawsuit pending in the United States District Court for the Northern District of California, captioned 4:19-cv-00341-HSG, initially filed January 18, 2019 and titled In re NVIDIA Corporation Consolidated Derivative Litigation, was stayed pending resolution of the plaintiffsβ appeal in the In Re NVIDIA Corporation Securities Litigation action. On February 22, 2022, the court administratively closed the case, but stated that it would reopen the case once the appeal in the In Re NVIDIA Corporation Securities Litigation action is resolved. The stay remains in place. The lawsuit asserts claims, purportedly on behalf of us, against certain officers and directors of the Company for breach of fiduciary duty, unjust enrichment, waste of corporate assets, and violations of Sections 14(a), 10(b), and 20(a) of the Exchange Act based on the dissemination of allegedly false and misleading statements related to channel inventory and the impact of cryptocurrency mining on GPU demand. The plaintiffs are seeking unspecified damages and other relief, including reforms and improvements to NVIDIAβs corporate governance and internal procedures.\n",
+ "The putative derivative actions initially filed September 24, 2019 and pending in the United States District Court for the District of Delaware, Lipchitz v. Huang, et al. (Case No. 1:19-cv-01795-UNA) and Nelson v. Huang, et. al. (Case No. 1:19-cv-01798- UNA), remain stayed pending resolution of the plaintiffsβ appeal in the In Re NVIDIA Corporation Securities Litigation action. The lawsuits assert claims, purportedly on behalf of us, against certain officers and directors of the Company for breach of fiduciary duty, unjust enrichment, insider trading, misappropriation of information, corporate waste and violations of Sections 14(a), 10(b), and 20(a) of the Exchange Act based on the dissemination of allegedly false, and misleading statements related to channel inventory and the impact of cryptocurrency mining on GPU demand. The plaintiffs seek unspecified damages and other relief, including disgorgement of profits from the sale of NVIDIA stock and unspecified corporate governance measures.\n",
+ "Another putative derivative action was filed on October 30, 2023 in the Court of Chancery of the State of Delaware, captioned Horanic v. Huang, et al. (Case No. 2023-1096-KSJM). This lawsuit asserts claims, purportedly on behalf of us, against certain officers and directors of the Company for breach of fiduciary duty and insider trading based on the dissemination of allegedly false and misleading statements related to channel inventory and the impact of cryptocurrency mining on GPU demand. The plaintiffs seek unspecified damages and other relief, including disgorgement of profits from the sale of NVIDIA stock and reform of unspecified corporate governance measures. This derivative matter is stayed pending the final resolution of In Re NVIDIA Corporation Securities Litigation action.\n",
+ "Accounting for Loss Contingencies\n",
+ "As of April 28, 2024, there are no accrued contingent liabilities associated with the legal proceedings described above based on our belief that liabilities, while possible, are not probable. Further, except as described above, any possible loss or range of loss in these matters cannot be reasonably estimated at this time. We are engaged in legal actions not described above arising in the ordinary course of business and, while there can be no assurance of favorable outcomes, we believe that the ultimate outcome of these actions will not have a material adverse effect on our operating results, liquidity or financial position.\n",
+ "19\n",
+ "NVIDIA Corporation and Subsidiaries\n",
+ "Notes to Condensed Consolidated Financial Statements (Continued)\n",
+ "(Unaudited)\n",
+ "Note 13 - Shareholdersβ Equity \n",
+ "Capital Return Program \n",
+ "During the first quarter of fiscal year 2025, we repurchased 9.9 million shares of our common stock for $8.0 billion. We did not repurchase any shares during the first quarter of fiscal year 2024. As of April 28, 2024, we were authorized, subject to certain specifications, to repurchase up to $14.5 billion additional shares of our common stock. Our share repurchase program aims to offset dilution from shares issued to employees. We may pursue additional share repurchases as we weigh market factors and other investment opportunities.\n",
+ "From April 29, 2024 through May 24, 2024, we repurchased 2.3 million shares for $2.1 billion pursuant to a Rule 10b5-1 trading plan.\n",
+ "During the first quarter of fiscal years 2025 and 2024, we paid $98 million and $99 million in cash dividends to our shareholders, respectively. Our cash dividend program and the payment of future cash dividends under that program are subject to our Board of Directors' continuing determination that the dividend program and the declaration of dividends thereunder are in the best interests of our shareholders.\n",
+ "Note 14 - Segment Information\n",
+ "Our Chief Executive Officer is our chief operating decision maker, or CODM, and reviews financial information presented on an operating segment basis for purposes of making decisions and assessing financial performance.\n",
+ "The Compute & Networking segment includes our Data Center accelerated computing platform; networking; automotive artificial intelligence, or AI, Cockpit, autonomous driving development agreements, and autonomous vehicle solutions; electric vehicle computing platforms; Jetson for robotics and other embedded platforms; NVIDIA AI Enterprise and other software; and DGX Cloud.\n",
+ "The Graphics segment includes GeForce GPUs for gaming and PCs, the GeForce NOW game streaming service and related infrastructure, and solutions for gaming platforms; Quadro/NVIDIA RTX GPUs for enterprise workstation graphics; virtual GPU software for cloud-based visual and virtual computing; automotive platforms for infotainment systems; and Omniverse Enterprise software for building and operating 3D internet applications.\n",
+ "Operating results by segment include costs or expenses directly attributable to each segment, and costs or expenses that are leveraged across our unified architecture and therefore allocated between our two segments.\n",
+ "The βAll Otherβ category includes the expenses that our CODM does not assign to either Compute & Networking or Graphics for purposes of making operating decisions or assessing financial performance. The expenses include stock-based compensation expense, corporate infrastructure and support costs, acquisition-related and other costs, and other non-recurring charges and benefits that our CODM deems to be enterprise in nature.\n",
+ "Our CODM does not review any information regarding total assets on a reportable segment basis. Depreciation and amortization expenses directly attributable to each reportable segment are included in operating results for each segment. However, our CODM does not evaluate depreciation and amortization expense by operating segment and, therefore, it is not separately presented. The accounting policies for segment reporting are the same as for our consolidated financial statements. The table below presents details of our reportable segments and the βAll Otherβ category.\n",
+ " \tCompute & Networking\t\tGraphics\t\tAll Other\t\tConsolidated\n",
+ " \t(In millions)\n",
+ "Three Months Ended Apr 28, 2024\n",
+ " \t\t \t\t \t\t \n",
+ "Revenue\t$\t22,675 \t\t\t$\t3,369 \t\t\t$\tβ \t\t\t$\t26,044 \t\n",
+ "Operating income (loss)\t$\t17,047 \t\t\t$\t1,241 \t\t\t$\t(1,379)\t\t\t$\t16,909 \t\n",
+ "Three Months Ended Apr 30, 2023\n",
+ " \t\t \t\t \t\t \n",
+ "Revenue\t$\t4,460 \t\t\t$\t2,732 \t\t\t$\tβ \t\t\t$\t7,192 \t\n",
+ "Operating income (loss)\t$\t2,160 \t\t\t$\t1,046 \t\t\t$\t(1,066)\t\t\t$\t2,140 \t\n",
+ " \n",
+ "\n",
+ "\"\"\"\n",
+ "\n",
+ "# Add\n",
+ "\n",
+ "\n",
+ "def cost_reduction_analyst():\n",
+ " return \"\"\"\n",
+ "\n",
+ " You are a Cost Reduction Analyst. \n",
+ " Your task is to thoroughly analyze NVIDIA's SEC 10-K filings to identify areas where the company can reduce expenditures. Focus on evaluating operating expenses, administrative costs, research and development (R&D) expenditures, and capital expenditures. Look for patterns of excessive spending, inefficiencies, and opportunities for cost savings. Provide a detailed report outlining specific areas where costs can be cut and suggest practical measures for achieving these reductions.\n",
+ " \n",
+ " When finished make a call to the phone number provided and summarize your findings to the person on the other end of the line.\n",
+ "\n",
+ " \"\"\"\n",
+ "\n",
+ "\n",
+ "def revenue_maximalist_analyst():\n",
+ " return \"\"\"\n",
+ "\n",
+ " You are a Revenue Maximization Analyst. \n",
+ " Your task is to examine NVIDIA's SEC 10-K filings to identify opportunities to maximize revenues. Analyze revenue streams from different product lines, geographical markets, and customer segments. Look for underperforming areas, untapped markets, and potential for price adjustments. Provide a comprehensive report on strategies to increase revenues, such as expanding product offerings, entering new markets, or optimizing pricing strategies.\n",
+ "\n",
+ " \"\"\"\n",
+ "\n",
+ "\n",
+ "def operational_efficiency():\n",
+ " return \"\"\"\n",
+ " You are an Operational Efficiency and Cost Control Specialist. \n",
+ " Your task is to review NVIDIA's SEC 10-K filings to evaluate the company's operational efficiency and identify opportunities for cost control. Focus on areas such as supply chain management, manufacturing processes, and inventory management. Look for inefficiencies, bottlenecks, and areas where costs can be controlled without compromising quality. Provide a detailed analysis and recommendations for improving operational efficiency and reducing costs.\n",
+ "\n",
+ " \"\"\"\n",
+ "\n",
+ "\n",
+ "def strategic_investment_analyst():\n",
+ " return \"\"\"\n",
+ "\n",
+ " You are a Strategic Investment Analyst. \n",
+ " Your task is to analyze NVIDIA's SEC 10-K filings to evaluate the company's investment strategies and identify areas where expenditures can be optimized. Focus on R&D investments, capital projects, and acquisition strategies. Assess the return on investment (ROI) for significant expenditures and identify any investments that are not yielding expected returns. Provide a detailed report on how NVIDIA can reallocate or reduce investments to maximize financial performance.\n",
+ "\n",
+ " \"\"\"\n",
+ "\n",
+ "\n",
+ "def sales_marketing_agent_prompt():\n",
+ " return \"\"\"\n",
+ " You are a Sales and Marketing Optimization Specialist. Your task is to examine NVIDIA's SEC 10-K filings to evaluate the effectiveness of the company's sales and marketing efforts and identify areas where expenditures can be reduced while maximizing revenue. Analyze marketing expenses, sales strategies, and customer acquisition costs. Look for areas where spending can be optimized and suggest strategies for increasing marketing efficiency and sales effectiveness. Provide a comprehensive report with actionable recommendations.\n",
+ "\n",
+ " These prompts will help each agent focus on specific aspects of NVIDIA's expenditures and revenue opportunities, ensuring a thorough analysis aimed at cutting costs and maximizing revenues.\n",
+ "\n",
+ " \"\"\"\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\n",
+ "\n",
+ "# Initialize the director agent\n",
+ "cost_reduction_agent = Agent(\n",
+ " agent_name=\"Cost Reduction Analyst\",\n",
+ " system_prompt=cost_reduction_analyst(),\n",
+ " llm=llm,\n",
+ " max_loops=1,\n",
+ " dashboard=False,\n",
+ " state_save_file_type=\"json\",\n",
+ " saved_state_path=\"cost_reduction_analyst.json\",\n",
+ ")\n",
+ "\n",
+ "# Initialize the agents\n",
+ "revenue_maximalist_agent = Agent(\n",
+ " agent_name=\"Revenue Maximization Analyst\",\n",
+ " system_prompt=revenue_maximalist_analyst(),\n",
+ " llm=llm,\n",
+ " max_loops=1,\n",
+ " dashboard=False,\n",
+ " state_save_file_type=\"json\",\n",
+ " saved_state_path=\"revenue_maximalist_analyst.json\",\n",
+ "\n",
+ ")\n",
+ "\n",
+ "cost_control_agent = Agent(\n",
+ " agent_name=\"Operational Efficiency and Cost Control Specialist\",\n",
+ " system_prompt=operational_efficiency(),\n",
+ " llm=llm,\n",
+ " max_loops=1,\n",
+ " dashboard=False,\n",
+ " state_save_file_type=\"json\",\n",
+ " saved_state_path=\"operational_efficiency.json\",\n",
+ "\n",
+ ")\n",
+ "\n",
+ "investment_analyst_agent = Agent(\n",
+ " agent_name=\"Strategic Investment Analyst\",\n",
+ " system_prompt=strategic_investment_analyst(),\n",
+ " llm=llm,\n",
+ " max_loops=1,\n",
+ " dashboard=False,\n",
+ " state_save_file_type=\"json\",\n",
+ " saved_state_path=\"strategic_investment_analyst.json\",\n",
+ ")\n",
+ "\n",
+ "sales_marketing_agent = Agent(\n",
+ " agent_name=\"Sales and Marketing Optimization Specialist\",\n",
+ " system_prompt=sales_marketing_agent_prompt(),\n",
+ " llm=llm,\n",
+ " max_loops=1,\n",
+ " dashboard=False,\n",
+ " state_save_file_type=\"json\",\n",
+ " saved_state_path=\"sales_marketing_agent.json\",\n",
+ ")\n",
+ "\n",
+ "\n",
+ "final_agent = Agent(\n",
+ " agent_name=\"Final Agent\",\n",
+ " system_prompt=\"You are the final agent. Please summarize the findings of the previous agents and provide a comprehensive report on how NVIDIA can optimize its financial performance. When finished make a call to the phone number provided and summarize your findings to the person on the other end of the line. Summarize the points such as how to lower the costs and increase the revenue.\",\n",
+ " llm=llm,\n",
+ " max_loops=1,\n",
+ " dashboard=False,\n",
+ " state_save_file_type=\"json\",\n",
+ ")\n",
+ "\n",
+ "\n",
+ "agents = [\n",
+ " cost_reduction_agent,\n",
+ " revenue_maximalist_agent,\n",
+ " cost_control_agent,\n",
+ " investment_analyst_agent,\n",
+ " sales_marketing_agent,\n",
+ "]\n",
+ "\n",
+ "\n",
+ "# Swarm\n",
+ "swarm = MixtureOfAgents(\n",
+ " name=\"Mixture of Accountants\",\n",
+ " agents=agents,\n",
+ " layers=1,\n",
+ " final_agent=final_agent,\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Run the swarm\n",
+ "out = swarm.run(\n",
+ " f\"Analyze the following Nvidia financial data and locate unnecessary expenditures: {SEC_FILLING}\"\n",
+ ")\n",
+ "print(out)\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/playground/agents/use_cases/browser/multion_examples/ multion_example.py b/playground/agents/use_cases/browser/multion_examples/ multion_example.py
new file mode 100644
index 00000000..c6781027
--- /dev/null
+++ b/playground/agents/use_cases/browser/multion_examples/ multion_example.py
@@ -0,0 +1,26 @@
+import os
+import threading
+from swarms.agents.multion_wrapper import MultiOnAgent
+
+
+def run_model(api_key):
+ model = MultiOnAgent(
+ api_key=api_key, max_steps=500, url="https://x.com"
+ )
+ out = model.run("")
+ print(out)
+
+
+# Create a list to store the threads
+threads = []
+
+# Run 100 instances using multithreading
+for _ in range(10):
+ api_key = os.getenv("MULTION_API_KEY")
+ thread = threading.Thread(target=run_model, args=(api_key,))
+ thread.start()
+ threads.append(thread)
+
+# Wait for all threads to finish
+for thread in threads:
+ thread.join()
diff --git a/playground/agents/use_cases/browser/multion_examples/buy_abunch_of_cybertrucks.py b/playground/agents/use_cases/browser/multion_examples/buy_abunch_of_cybertrucks.py
new file mode 100644
index 00000000..c8238726
--- /dev/null
+++ b/playground/agents/use_cases/browser/multion_examples/buy_abunch_of_cybertrucks.py
@@ -0,0 +1,71 @@
+from swarms import Agent, AgentRearrange, OpenAIChat
+from swarms.agents.multion_wrapper import MultiOnAgent
+
+model = MultiOnAgent(
+ url="https://tesla.com",
+)
+
+
+llm = OpenAIChat()
+
+
+def browser_automation(task: str):
+ """
+ Run a task on the browser automation agent.
+
+ Args:
+ task (str): The task to be executed on the browser automation agent.
+ """
+ out = model.run(task)
+ return out
+
+
+# Purpose = To detect email spam using three different agents
+agent1 = Agent(
+ agent_name="CyberTruckBuyer1",
+ system_prompt="Find the best deal on a Cyber Truck and provide your reasoning",
+ llm=llm,
+ max_loops=1,
+ # output_type=str,
+ metadata="json",
+ function_calling_format_type="OpenAI",
+ function_calling_type="json",
+ streaming_on=True,
+ tools=[browser_automation],
+)
+
+agent2 = Agent(
+ agent_name="CyberTruckBuyer2",
+ system_prompt="Find the best deal on a Cyber Truck and provide your reasoning",
+ llm=llm,
+ max_loops=1,
+ # output_type=str,
+ metadata="json",
+ function_calling_format_type="OpenAI",
+ function_calling_type="json",
+ streaming_on=True,
+ tools=[browser_automation],
+)
+
+agent3 = Agent(
+ agent_name="CyberTruckBuyer3",
+ system_prompt="Find the best deal on a Cyber Truck and provide your reasoning",
+ llm=llm,
+ max_loops=1,
+ # output_type=str,
+ metadata="json",
+ function_calling_format_type="OpenAI",
+ function_calling_type="json",
+ streaming_on=True,
+ tools=[browser_automation],
+)
+
+swarm = AgentRearrange(
+ flow="CyberTruckBuyer1 -> CyberTruckBuyer2 -> CyberTruckBuyer3",
+ agents=[agent1, agent2, agent3],
+ logging_enabled=True,
+ max_loops=1,
+)
+
+# Run all the agents
+swarm.run("Let's buy a cyber truck")
diff --git a/playground/agents/use_cases/code_gen/ai_research_team/json_output_v.py b/playground/agents/use_cases/code_gen/ai_research_team/json_output_v.py
new file mode 100644
index 00000000..f83e73a3
--- /dev/null
+++ b/playground/agents/use_cases/code_gen/ai_research_team/json_output_v.py
@@ -0,0 +1,157 @@
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+from swarms import create_file_in_folder
+from swarms.utils.loguru_logger import logger
+import threading
+import json
+from typing import List, Dict
+from datasets import load_dataset
+import os
+
+
+class ModelSpec(BaseModel):
+ novel_algorithm_name: str = Field(
+ ...,
+ description="The name of the novel AI algorithm",
+ )
+ mathamatical_formulation: str = Field(
+ ...,
+ description="The mathematical theoretical formulation of the new model",
+ )
+ model_code: str = Field(
+ ...,
+ description="The code for the all-new model architecture in PyTorch, with documentation and clean code",
+ )
+
+
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're an expert model engineer like Lucidrains, you write world-class PhD-level code for deep learning models. Your purpose is to create a novel deep learning model for a research paper. You need to provide the name of the model, the mathematical formulation, and the code for the model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade PyTorch code, add types, and documentation. Make sure you track tensor shapes and write great PyTorch code. Be creative and create models that have never been contemplated before.",
+ max_tokens=3500,
+ temperature=1.0,
+ base_model=ModelSpec,
+ parallel_tool_calls=False,
+)
+
+
+def clean_model_code(model_code_str: str) -> str:
+ """
+ Cleans up the generated model code string.
+
+ Args:
+ model_code_str (str): The raw model code as a string.
+
+ Returns:
+ str: The cleaned-up model code.
+ """
+ cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
+ return cleaned_code.strip()
+
+
+def generate_novel_model() -> Dict[str, str]:
+ """
+ Generate a novel neural network model using the OpenAI function caller.
+
+ Returns:
+ Dict[str, str]: A dictionary containing the model's name, theory, and code.
+ """
+ out = model.run(
+ "Create an entirely new model architecture by blending backbones like attention, lstms, rnns, and ssm all into one novel architecture. Provide alternative model architectures to transformers, ssms, convnets, lstms, and more. Be creative and don't work on architectures that have been done before. The goal is to create new-ultra high performance nets"
+ )
+ return {
+ "name": out["novel_algorithm_name"],
+ "theory": out["mathamatical_formulation"],
+ "code": clean_model_code(out["model_code"]),
+ }
+
+
+def generate_and_save_model(i: int, dataset: List[Dict[str, str]]) -> None:
+ """
+ Generate, clean, save, and add the model data to a dataset.
+
+ Args:
+ i (int): The iteration number (for logging purposes).
+ dataset (List[Dict[str, str]]): The dataset to add the model data to.
+ """
+ model_data = generate_novel_model()
+ name = model_data["name"]
+ code = model_data["code"]
+
+ logger.info(f"Generated code for novel model {name}:")
+ create_file_in_folder("new_models", f"{name}.py", code)
+ logger.info(f"Saved code for novel model {i} to file:")
+
+ # Add the model data to the dataset
+ dataset.append(model_data)
+
+
+def save_to_jsonl(dataset: List[Dict[str, str]], file_path: str) -> None:
+ """
+ Appends the dataset to an existing JSONL file, or creates a new file if it doesn't exist.
+
+ Args:
+ dataset (List[Dict[str, str]]): The dataset containing models' data.
+ file_path (str): The path to save the JSONL file.
+ """
+ with open(file_path, "a") as file: # Open in append mode
+ for entry in dataset:
+ file.write(json.dumps(entry) + "\n")
+ logger.info(f"Dataset appended to {file_path}")
+
+
+def upload_to_huggingface(
+ file_path: str, dataset_name: str, huggingface_token: str
+) -> None:
+ """
+ Uploads the dataset to Hugging Face.
+
+ Args:
+ file_path (str): The path to the JSONL file.
+ dataset_name (str): The name of the dataset on Hugging Face.
+ huggingface_token (str): Your Hugging Face token for authentication.
+ """
+ dataset = load_dataset("json", data_files=file_path, split="train")
+ dataset.push_to_hub(dataset_name, token=huggingface_token)
+ logger.info(f"Dataset uploaded to Hugging Face: {dataset_name}")
+
+
+def main(
+ num_models: int,
+ jsonl_file_path: str,
+ dataset_name: str,
+ huggingface_token: str,
+) -> None:
+ """
+ Main function to generate models, save them to JSONL, and upload to Hugging Face.
+
+ Args:
+ num_models (int): The number of models to generate.
+ jsonl_file_path (str): The path to save the JSONL file.
+ dataset_name (str): The name of the dataset on Hugging Face.
+ huggingface_token (str): Your Hugging Face token for authentication.
+ """
+ dataset = []
+ threads = []
+
+ for i in range(num_models):
+ thread = threading.Thread(
+ target=generate_and_save_model, args=(i, dataset)
+ )
+ thread.start()
+ threads.append(thread)
+
+ for thread in threads:
+ thread.join()
+
+ save_to_jsonl(dataset, jsonl_file_path)
+ upload_to_huggingface(jsonl_file_path, dataset_name, huggingface_token)
+
+
+# Example usage
+if __name__ == "__main__":
+ num_models = 100 # Number of models to generate
+ jsonl_file_path = "novel_models_dataset.jsonl"
+ dataset_name = "novel_models_architectures"
+ huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
+
+ main(num_models, jsonl_file_path, dataset_name, huggingface_token)
diff --git a/playground/agents/use_cases/code_gen/ai_research_team/multi_agent_hf.py b/playground/agents/use_cases/code_gen/ai_research_team/multi_agent_hf.py
new file mode 100644
index 00000000..0c5a8c6b
--- /dev/null
+++ b/playground/agents/use_cases/code_gen/ai_research_team/multi_agent_hf.py
@@ -0,0 +1,197 @@
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+from swarms.utils.loguru_logger import logger
+import threading
+import json
+from typing import List, Dict
+from datasets import load_dataset
+import os
+
+
+class ModelSpec(BaseModel):
+ novel_algorithm_name: str = Field(
+ ...,
+ description="The name of the novel AI algorithm",
+ )
+ mathamatical_formulation: str = Field(
+ ...,
+ description="The mathematical theoretical formulation of the new model",
+ )
+ model_code: str = Field(
+ ...,
+ description="The code for the all-new model architecture in PyTorch, with documentation and clean code",
+ )
+
+
+class OptimizationSpec(BaseModel):
+ errors: str = Field(
+ ...,
+ description="The errors in the existing model architecture code",
+ )
+ refined_model_code: str = Field(
+ ...,
+ description="The refined code for the model architecture in PyTorch",
+ )
+ step_by_step_instructions: str = Field(
+ ...,
+ description="The step-by-step instructions on how the model works and how it was refined",
+ )
+
+
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're an expert model engineer like Lucidrains, you write world-class PhD-level code for deep learning models. Your purpose is to create a novel deep learning model for a research paper. You need to provide the name of the model, the mathematical formulation, and the code for the model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade PyTorch code, add types, and documentation. Make sure you track tensor shapes and write great PyTorch code. Be creative and create models that have never been contemplated before.",
+ max_tokens=3500,
+ temperature=1.0,
+ base_model=ModelSpec,
+ parallel_tool_calls=False,
+)
+
+# Initialize the function caller
+refiner = OpenAIFunctionCaller(
+ system_prompt="""
+ You're a model refiner, you refine existing deep learning models to improve their performance and you optimize code and clean it up. You intake a model architecture, and you refine it to make it more efficient, faster, and more accurate. You need to provide the code for the refined model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade PyTorch code, add types, and documentation. Make sure you track tensor shapes and write great PyTorch code. Be creative and refine models that have never been contemplated before. Locate all errors in the code and fix them. Provide step-by-step instructions on how the model works and how it was refined.
+
+ """,
+ max_tokens=3500,
+ temperature=1.0,
+ base_model=OptimizationSpec,
+ parallel_tool_calls=False,
+)
+
+
+def clean_model_code(model_code_str: str) -> str:
+ """
+ Cleans up the generated model code string.
+
+ Args:
+ model_code_str (str): The raw model code as a string.
+
+ Returns:
+ str: The cleaned-up model code.
+ """
+ cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
+ return cleaned_code.strip()
+
+
+def generate_novel_model() -> Dict[str, str]:
+ """
+ Generate a novel neural network model using the OpenAI function caller.
+
+ Returns:
+ Dict[str, str]: A dictionary containing the model's name, theory, and code.
+ """
+ out = model.run(
+ "Create an entirely new model architecture by blending backbones like attention, lstms, rnns, and ssm all into one novel architecture. Provide alternative model architectures to transformers, ssms, convnets, lstms, and more. Be creative and don't work on architectures that have been done before. The goal is to create new-ultra high performance nets"
+ )
+ name = out["novel_algorithm_name"]
+ theory = out["mathamatical_formulation"]
+ code = clean_model_code(out["model_code"])
+
+ refined = refiner.run(
+ f"Locate all errors in the code and fix them. Provide step-by-step instructions on how the model works and how it was refined. Name of Algorithm: {name} Code: {code}"
+ )
+ errors = refined["errors"]
+ refined_code = clean_model_code(refined["refined_model_code"])
+ instructions = refined["step_by_step_instructions"]
+
+ return {
+ "name": name,
+ "theory": theory,
+ "code": code,
+ "errors": errors,
+ "refined_code": refined_code,
+ "instructions": instructions,
+ }
+
+
+def generate_and_save_model(i: int, dataset: List[Dict[str, str]]) -> None:
+ """
+ Generate, clean, save, and add the model data to a dataset.
+
+ Args:
+ i (int): The iteration number (for logging purposes).
+ dataset (List[Dict[str, str]]): The dataset to add the model data to.
+ """
+ model_data = generate_novel_model()
+ # name = model_data["name"]
+ # code = model_data["code"]
+
+ # logger.info(f"Generated code for novel model {name}:")
+ # create_file_in_folder("new_models", f"{name}.py", code)
+ # logger.info(f"Saved code for novel model {i} to file:")
+
+ # Add the model data to the dataset
+ dataset.append(model_data)
+
+
+def save_to_jsonl(dataset: List[Dict[str, str]], file_path: str) -> None:
+ """
+ Appends the dataset to an existing JSONL file, or creates a new file if it doesn't exist.
+
+ Args:
+ dataset (List[Dict[str, str]]): The dataset containing models' data.
+ file_path (str): The path to save the JSONL file.
+ """
+ with open(file_path, "a") as file: # Open in append mode
+ for entry in dataset:
+ file.write(json.dumps(entry) + "\n")
+ logger.info(f"Dataset appended to {file_path}")
+
+
+def upload_to_huggingface(
+ file_path: str, dataset_name: str, huggingface_token: str
+) -> None:
+ """
+ Uploads the dataset to Hugging Face.
+
+ Args:
+ file_path (str): The path to the JSONL file.
+ dataset_name (str): The name of the dataset on Hugging Face.
+ huggingface_token (str): Your Hugging Face token for authentication.
+ """
+ dataset = load_dataset("json", data_files=file_path, split="train")
+ dataset.push_to_hub(dataset_name, token=huggingface_token)
+ logger.info(f"Dataset uploaded to Hugging Face: {dataset_name}")
+
+
+def main(
+ num_models: int,
+ jsonl_file_path: str,
+ dataset_name: str,
+ huggingface_token: str,
+) -> None:
+ """
+ Main function to generate models, save them to JSONL, and upload to Hugging Face.
+
+ Args:
+ num_models (int): The number of models to generate.
+ jsonl_file_path (str): The path to save the JSONL file.
+ dataset_name (str): The name of the dataset on Hugging Face.
+ huggingface_token (str): Your Hugging Face token for authentication.
+ """
+ dataset = []
+ threads = []
+
+ for i in range(num_models):
+ thread = threading.Thread(
+ target=generate_and_save_model, args=(i, dataset)
+ )
+ thread.start()
+ threads.append(thread)
+
+ for thread in threads:
+ thread.join()
+
+ save_to_jsonl(dataset, jsonl_file_path)
+ upload_to_huggingface(jsonl_file_path, dataset_name, huggingface_token)
+
+
+# Example usage
+if __name__ == "__main__":
+ num_models = 30 # Number of models to generate
+ jsonl_file_path = "novel_models_dataset_new.jsonl"
+ dataset_name = "novel_models_architectures_instructions"
+ huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
+
+ main(num_models, jsonl_file_path, dataset_name, huggingface_token)
diff --git a/playground/agents/use_cases/code_gen/ai_research_team/novel_pytorch_code_generator.py b/playground/agents/use_cases/code_gen/ai_research_team/novel_pytorch_code_generator.py
new file mode 100644
index 00000000..4cb3a6e3
--- /dev/null
+++ b/playground/agents/use_cases/code_gen/ai_research_team/novel_pytorch_code_generator.py
@@ -0,0 +1,135 @@
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+from swarms import create_file_in_folder
+from swarms.tools.prebuilt.code_executor import CodeExecutor
+from swarms.utils.loguru_logger import logger
+import threading
+
+
+code_executor = CodeExecutor()
+
+
+class ModelSpec(BaseModel):
+ novel_algorithm_name: str = Field(
+ ...,
+ description="The name of the novel AI algorithm",
+ )
+ mathamatical_formulation: str = Field(
+ ...,
+ description="The mathamatical theortical formulation of the new model",
+ )
+ model_code: str = Field(
+ ...,
+ description="The code for the all-new model architecture in PyTorch, Add docs, and write clean code",
+ )
+
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're an expert model engineer like Lucidrains, you write world-class PHD level code for deep learning models. You're purpose is to create a novel deep learning model for a research paper. You need to provide the name of the model, the mathamatical formulation, and the code for the model architecture in PyTorch. Write clean and concise code that is easy to understand and implement. Write production-grade pytorch code, add types, and documentation. Make sure you track tensorshapes to not forget and write great pytorch code. Be creative and create models that have never been contemplated before",
+ max_tokens=5000,
+ temperature=0.6,
+ base_model=ModelSpec,
+ parallel_tool_calls=False,
+)
+
+
+def clean_model_code(model_code_str: str):
+ # Remove extra escape characters and newlines
+ cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
+
+ # Remove unnecessary leading and trailing whitespaces
+ cleaned_code = cleaned_code.strip()
+
+ return cleaned_code
+
+
+# for i in range(50):
+# # The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+# out = model.run(
+# "Create an entirely new neural network operation aside from convolutions and the norm, write clean code and explain step by step"
+# )
+# name = out["novel_algorithm_name"]
+# logger.info(f"Generated code for novel model {i}:")
+
+# # Parse the 3 rows of the output || 0: novel_algorithm_name, 1: mathamatical_formulation, 2: model_code
+# out = out["model_code"]
+# out = clean_model_code(out)
+# logger.info(f"Cleansed code for novel model {i}:")
+
+# # Save the generated code to a file
+# create_file_in_folder("new_models", f"{name}.py", out)
+# logger.info(f"Saved code for novel model {i} to file:")
+
+# # # Execute the generated code
+# # logger.info(f"Executing code for novel model {i}:")
+# # test = code_executor.execute(out)
+# # logger.info(f"Executed code for novel model {i}: {test}")
+
+
+# def execute_code_and_retry(code: str) -> str:
+# run = code_executor.execute(code)
+
+# if "error" in run:
+# logger.error(f"Error in code execution: {run}")
+
+
+def generate_and_execute_model(i):
+ # The OpenAIFunctionCaller class is used to interact with the OpenAI API and make function calls.
+ out = model.run(
+ "Create an entirely new model architecture by blending backbones like attention, lstms, rnns, and ssms all into one novel architecture. Provide alternative model architectures to transformers, ssms, convnets, lstms, and more. Be creative and don't work on architectures that have been done before. The goal is to create new-ultra high performance nets"
+ )
+ name = out["novel_algorithm_name"]
+ theory = out["mathamatical_formulation"]
+ code = out["model_code"]
+ logger.info(f"Generated code for novel model {name}:")
+
+ # Parse the 3 rows of the output || 0: novel_algorithm_name, 1: mathamatical_formulation, 2: model_code
+ code = clean_model_code(code)
+ logger.info(f"Cleansed code for novel model {i}:")
+
+ # Save the generated code to a file
+ create_file_in_folder("new_models", f"{name}.py", code)
+ logger.info(f"Saved code for novel model {i} to file:")
+
+ # Execute the generated code
+ test = code_executor.execute(code)
+
+ if "error" in test:
+ logger.error(f"Error in code execution: {test}")
+
+ # Retry executing the code
+ model.run(
+ f"Recreate the code for the model: {name}, there was an error in the code you generated earlier execution: {code}. The theory was: {theory}"
+ )
+
+ name = out["novel_algorithm_name"]
+ theory = out["mathamatical_formulation"]
+ code = out["model_code"]
+
+ # Clean the code
+ code = clean_model_code(code)
+
+ # Execute the code
+ test = code_executor.execute(code)
+
+ if "error" not in test:
+ logger.info(
+ f"Successfully executed code for novel model {name}"
+ )
+ create_file_in_folder("new_models", f"{name}.py", code)
+ else:
+ logger.error(f"Error in code execution: {test}")
+
+
+# Create and start a new thread for each model
+threads = []
+for i in range(35):
+ thread = threading.Thread(target=generate_and_execute_model, args=(i,))
+ thread.start()
+ threads.append(thread)
+
+# Wait for all threads to finish
+for thread in threads:
+ thread.join()
diff --git a/playground/agents/use_cases/code_gen/amazon_review_agent.py b/playground/agents/use_cases/code_gen/amazon_review_agent.py
new file mode 100644
index 00000000..3fb3bc40
--- /dev/null
+++ b/playground/agents/use_cases/code_gen/amazon_review_agent.py
@@ -0,0 +1,25 @@
+from swarms import Agent, OpenAIChat
+
+## Initialize the workflow
+agent = Agent(
+ llm=OpenAIChat(),
+ max_loops="auto",
+ agent_name="Amazon Product Scraper",
+ system_prompt=(
+ "Create the code in python to scrape amazon product reviews"
+ " and return csv given a product url"
+ ),
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+)
+
+# Run the workflow on a task
+agent(
+ "Create the code to scrape this amazon url and rturn a csv of"
+ " reviews:"
+ " https://www.amazon.com/Creative-Act-Way-Being/dp/0593652886/ref=sr_1_1?dib=eyJ2IjoiMSJ9.JVdL3JSDmBVH_jv4eM6YE4npUpG6jO6-ai6lgmax-Ya4nH3oPk8cxkmzKsx9yAMX-Eo4A1ErqipCeY-FhTqMc7hhNTqCoAvNd65rvXH1GnYv7WlfSDYTjMkB_vVrH-iitBXAY6uASm73ff2hPWzqhF3ldGkYr8fA5FtmoYMSOnarvCU11YpoSp3EqdK526XOxkRJqeFlZAoAkXOmYHe9B5sY8-zQlVgkIV3U-7rUQdY.UXen28vr2K-Tbbz9aB7vNLLurAiR2ZSblFOVNjXYaf8&dib_tag=se&hvadid=652633987879&hvdev=c&hvlocphy=9061268&hvnetw=g&hvqmt=e&hvrand=413884426001746223&hvtargid=kwd-1977743614989&hydadcr=8513_13545021&keywords=the+creative+act+rick+rubin+book&qid=1710541252&sr=8-1"
+)
diff --git a/playground/agents/use_cases/code_gen/api_requester_agent.py b/playground/agents/use_cases/code_gen/api_requester_agent.py
new file mode 100644
index 00000000..ae7bd5f9
--- /dev/null
+++ b/playground/agents/use_cases/code_gen/api_requester_agent.py
@@ -0,0 +1,17 @@
+from swarms import Agent, OpenAIChat
+
+agent = Agent(
+ agent_name="API Requester",
+ agent_description="This agent is responsible for making API requests.",
+ system_prompt="You're a helpful API Requester agent. ",
+ llm=OpenAIChat(),
+ autosave=True,
+ max_loops="auto",
+ dashboard=True,
+ interactive=True,
+)
+
+
+# Run the agent
+out = agent.run("Create an api request to OpenAI in python.")
+print(out)
diff --git a/playground/agents/use_cases/code_gen/code_interpreter_agent.py b/playground/agents/use_cases/code_gen/code_interpreter_agent.py
new file mode 100644
index 00000000..d76d294f
--- /dev/null
+++ b/playground/agents/use_cases/code_gen/code_interpreter_agent.py
@@ -0,0 +1,91 @@
+from swarms.models.openai_function_caller import OpenAIFunctionCaller
+from pydantic import BaseModel, Field
+from swarms.tools.prebuilt.code_executor import CodeExecutor
+from swarms.structs.concat import concat_strings
+
+
+# Pydantic is a data validation library that provides data validation and parsing using Python type hints.
+# It is used here to define the data structure for making API calls to retrieve weather information.
+class CodeSpec(BaseModel):
+ summary: str = Field(
+ ...,
+ description="The summary of the code",
+ )
+ algorithmic_pseudocode: str = Field(
+ ...,
+ description="The pseudocode of the code",
+ )
+ code: str = Field(
+ ...,
+ description="The code for the algorithm.",
+ )
+
+
+def clean_model_code(model_code_str: str) -> str:
+ """
+ Cleans up the generated model code string.
+
+ Args:
+ model_code_str (str): The raw model code as a string.
+
+ Returns:
+ str: The cleaned-up model code.
+ """
+ cleaned_code = model_code_str.replace("\\n", "\n").replace("\\'", "'")
+ return cleaned_code.strip()
+
+
+# The WeatherAPI class is a Pydantic BaseModel that represents the data structure
+# for making API calls to retrieve weather information. It has two attributes: city and date.
+
+# Example usage:
+# Initialize the function caller
+model = OpenAIFunctionCaller(
+ system_prompt="You're the code interpreter agent, your purpose is to generate code given a task and provide a summary, pseudocode, and code for the algorithm.",
+ max_tokens=3400,
+ temperature=0.5,
+ base_model=CodeSpec,
+ parallel_tool_calls=False,
+)
+
+
+def run_model_and_generate_code(max_loops: int = 2):
+ question = "What is the task for the code interpreter agent?"
+ task = input(question)
+ responses = []
+ responses.append(question)
+ responses.append(task)
+
+ for i in range(max_loops):
+ task = concat_strings(task)
+
+ out = model.run(task)
+ summary = out["summary"]
+ print("\nSummary: ", summary)
+ pseudocode = out["algorithmic_pseudocode"]
+ code = clean_model_code(out["code"])
+
+ output = f"{summary}\n\n{pseudocode}\n\n{code}"
+ responses.append(output)
+
+ # Code Executor
+ executor = CodeExecutor()
+
+ # Execute the code
+ result = executor.execute(code)
+
+ if "error" in result:
+ print(f"Error: {result}")
+ break
+
+ print("\nCode Output: ", result)
+
+ task = input(
+ "\nEnter the next task for the code interpreter agent (or 'exit' to stop): "
+ )
+ responses.append(task)
+
+ return responses
+
+
+run_model_and_generate_code()
diff --git a/playground/agents/use_cases/code_gen/sql_agent.py b/playground/agents/use_cases/code_gen/sql_agent.py
new file mode 100644
index 00000000..bdfd9966
--- /dev/null
+++ b/playground/agents/use_cases/code_gen/sql_agent.py
@@ -0,0 +1,247 @@
+import os
+from swarms import Agent, OpenAIChat
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+
+SQL_SYSTEM_PROMPT = """
+
+### System Prompt for SQL Data Generator Agent
+
+**Role**: You are an advanced SQL Data Generator agent. Your task is to help users generate realistic SQL data schemas, understand existing schemas, and create efficient queries to interact with these schemas. You should provide thorough explanations, reasonings, and examples to guide the users.
+
+### Instructions for Generating Schemas
+
+1. **Understand the Domain**:
+ - Start by understanding the domain or business context for which the schema is being created. Ask relevant questions to gather necessary details.
+
+2. **Identify Entities**:
+ - Identify the main entities (tables) that need to be represented. Common entities include customers, orders, products, employees, etc.
+
+3. **Define Attributes**:
+ - For each entity, list the attributes (columns) that describe the entity. Attributes should be relevant to the domain and should include data types.
+
+4. **Establish Relationships**:
+ - Determine the relationships between entities (e.g., one-to-many, many-to-many) and represent these using foreign keys.
+
+5. **Normalization**:
+ - Normalize the schema to reduce data redundancy and improve data integrity. Follow normal forms guidelines up to the third normal form (3NF) unless denormalization is justified by performance needs.
+
+### Instructions for Understanding Schemas
+
+1. **Schema Overview**:
+ - Provide a high-level overview of the schema, listing all tables and their primary keys.
+
+2. **Table Details**:
+ - For each table, list its columns with data types and constraints (e.g., NOT NULL, UNIQUE).
+
+3. **Relationships and Keys**:
+ - Explain the foreign key relationships between tables, including the cardinality of each relationship.
+
+4. **Indexing**:
+ - Describe any indexes that exist on the tables and explain their purpose.
+
+### Instructions for Querying Tables
+
+1. **Basic Queries**:
+ - Demonstrate how to write basic SQL queries to select, insert, update, and delete data in the tables.
+
+2. **Complex Queries**:
+ - Show how to perform complex queries involving joins, subqueries, and aggregation functions.
+
+3. **Optimization**:
+ - Provide tips for optimizing queries for better performance, such as using indexes and avoiding unnecessary computations.
+
+### Conversational Examples
+
+#### Example 1: Generating a Schema
+
+**User**: I need to create a schema for an e-commerce platform. Can you help me with that?
+
+**Agent**:
+Sure! Let's start by identifying the main entities. For an e-commerce platform, we typically have entities like Customers, Orders, Products, and OrderItems. Does that sound right?
+
+**User**: Yes, that's correct.
+
+**Agent**:
+Great! Now, let's define the attributes for each entity.
+
+- **Customers**:
+ - customer_id (INT, PRIMARY KEY)
+ - first_name (VARCHAR(50))
+ - last_name (VARCHAR(50))
+ - email (VARCHAR(100), UNIQUE)
+ - phone (VARCHAR(20))
+
+- **Products**:
+ - product_id (INT, PRIMARY KEY)
+ - name (VARCHAR(100))
+ - description (TEXT)
+ - price (DECIMAL(10, 2))
+
+- **Orders**:
+ - order_id (INT, PRIMARY KEY)
+ - customer_id (INT, FOREIGN KEY references Customers(customer_id))
+ - order_date (DATE)
+
+- **OrderItems**:
+ - order_item_id (INT, PRIMARY KEY)
+ - order_id (INT, FOREIGN KEY references Orders(order_id))
+ - product_id (INT, FOREIGN KEY references Products(product_id))
+ - quantity (INT)
+
+Here is the SQL code to create these tables:
+
+```sql
+CREATE TABLE Customers (
+ customer_id INT PRIMARY KEY,
+ first_name VARCHAR(50),
+ last_name VARCHAR(50),
+ email VARCHAR(100) UNIQUE,
+ phone VARCHAR(20)
+);
+
+CREATE TABLE Products (
+ product_id INT PRIMARY KEY,
+ name VARCHAR(100),
+ description TEXT,
+ price DECIMAL(10, 2)
+);
+
+CREATE TABLE Orders (
+ order_id INT PRIMARY KEY,
+ customer_id INT,
+ order_date DATE,
+ FOREIGN KEY (customer_id) REFERENCES Customers(customer_id)
+);
+
+CREATE TABLE OrderItems (
+ order_item_id INT PRIMARY KEY,
+ order_id INT,
+ product_id INT,
+ quantity INT,
+ FOREIGN KEY (order_id) REFERENCES Orders(order_id),
+ FOREIGN KEY (product_id) REFERENCES Products(product_id)
+);
+```
+
+#### Example 2: Understanding a Schema
+
+**User**: I have an existing schema for a library system. Can you help me understand it?
+
+**Agent**:
+Of course! Let's start with a high-level overview. Could you provide the names of the tables?
+
+**User**: The tables are Books, Authors, and BookAuthors.
+
+**Agent**:
+Great! Here's a brief overview:
+
+- **Books**:
+ - book_id (INT, PRIMARY KEY)
+ - title (VARCHAR(100))
+ - genre (VARCHAR(50))
+ - published_date (DATE)
+
+- **Authors**:
+ - author_id (INT, PRIMARY KEY)
+ - first_name (VARCHAR(50))
+ - last_name (VARCHAR(50))
+
+- **BookAuthors**:
+ - book_id (INT, FOREIGN KEY references Books(book_id))
+ - author_id (INT, FOREIGN KEY references Authors(author_id))
+ - PRIMARY KEY (book_id, author_id)
+
+The `Books` table contains information about each book, `Authors` contains information about each author, and `BookAuthors` is a junction table that handles the many-to-many relationship between books and authors.
+
+Here is the SQL code for these tables:
+
+```sql
+CREATE TABLE Books (
+ book_id INT PRIMARY KEY,
+ title VARCHAR(100),
+ genre VARCHAR(50),
+ published_date DATE
+);
+
+CREATE TABLE Authors (
+ author_id INT PRIMARY KEY,
+ first_name VARCHAR(50),
+ last_name VARCHAR(50)
+);
+
+CREATE TABLE BookAuthors (
+ book_id INT,
+ author_id INT,
+ PRIMARY KEY (book_id, author_id),
+ FOREIGN KEY (book_id) REFERENCES Books(book_id),
+ FOREIGN KEY (author_id) REFERENCES Authors(author_id)
+);
+```
+
+#### Example 3: Querying Tables
+
+**User**: How can I write a query to find all orders placed by a customer with a specific email?
+
+**Agent**:
+First, we need to join the `Customers` and `Orders` tables on the `customer_id` column. Then, we filter the results by the customer's email. Hereβs how you can do it:
+
+```sql
+SELECT Orders.order_id, Orders.order_date
+FROM Customers
+JOIN Orders ON Customers.customer_id = Orders.customer_id
+WHERE Customers.email = 'example@example.com';
+```
+
+This query selects the order IDs and order dates for all orders placed by the customer with the email 'example@example.com'.
+
+---
+
+This system prompt and the accompanying examples should help the SQL data generator agent assist users effectively in generating schemas, understanding them, and writing queries.
+
+
+"""
+
+# Initialize the agent
+agent = Agent(
+ agent_name="SQL-Agent",
+ system_prompt=SQL_SYSTEM_PROMPT,
+ llm=model,
+ max_loops=1,
+ autosave=True,
+ # dynamic_temperature_enabled=True,
+ dashboard=False,
+ code_interpreter=True,
+ verbose=True,
+ streaming_on=True,
+ # interactive=True, # Set to False to disable interactive mode
+ dynamic_temperature_enabled=True,
+ saved_state_path="finance_agent.json",
+ # tools=[#Add your functions here# ],
+ # stopping_token="Stop!",
+ interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+)
+
+
+agent.run(
+ "Let's create a sql schema table for a brand ambassadors program, they share a link and we track the people that sign up and provide them with a unique code to share. The schema should include tables for ambassadors, signups, and codes."
+)
diff --git a/playground/agents/use_cases/finance/401k_agent.py b/playground/agents/use_cases/finance/401k_agent.py
new file mode 100644
index 00000000..a53f28d9
--- /dev/null
+++ b/playground/agents/use_cases/finance/401k_agent.py
@@ -0,0 +1,149 @@
+import os
+
+from swarms import Agent, HuggingfaceLLM
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+# model = OpenAIChat(
+# api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+# )
+
+model = HuggingfaceLLM(
+ model_id="bert-base-uncased",
+)
+
+
+# Prompt
+ROTH_IRA_SYSTEM_PROMPT = """
+### System Prompt for a 401(k) and Roth IRA Management Agent
+
+#### Introduction
+You are a highly intelligent and empathetic AI agent specialized in helping individuals set up, manage, and run their 401(k) and Roth IRA accounts. Your primary goal is to assist users in making informed decisions about their retirement savings. You should always provide clear, concise, and accurate information, taking into account the user's specific situation and needs.
+
+#### Goals
+1. Help users understand the basics of 401(k) and Roth IRA accounts.
+2. Assist users in setting up new accounts.
+3. Guide users in managing their existing accounts.
+4. Provide personalized advice based on the user's financial goals and situation.
+5. Ensure users are aware of the tax implications and benefits of their retirement accounts.
+6. Encourage users to think long-term and make decisions that align with their retirement goals.
+
+#### How to Think
+1. **Empathy and Understanding**: Always show empathy and understanding towards the user's situation. Acknowledge their concerns and provide reassurance.
+2. **Personalization**: Tailor your responses to the user's specific financial situation, goals, and needs.
+3. **Clarity and Simplicity**: Use clear and simple language to explain complex financial concepts. Avoid jargon unless it is necessary, and always define any terms you use.
+4. **Accuracy and Reliability**: Provide accurate and reliable information. If you are unsure about something, indicate that and suggest consulting a financial advisor for specific advice.
+5. **Encouragement**: Encourage users to save and invest wisely for their retirement. Highlight the benefits of planning ahead and making informed decisions.
+
+#### Multi-Shot Conversational Examples
+
+**Example 1: Setting up a 401(k)**
+
+**User**: I just started a new job and they offer a 401(k) plan. How do I set it up?
+
+**Agent**: Congratulations on your new job! Setting up a 401(k) is a great step towards securing your retirement. Here's what you need to do:
+
+1. **Enroll in the Plan**: Contact your HR department or benefits coordinator to get the enrollment forms. They will guide you through the process.
+2. **Contribution Amount**: Decide how much you want to contribute from each paycheck. It's often recommended to contribute enough to get the full employer match if your company offers one.
+3. **Investment Options**: Choose how you want to invest your contributions. Most plans offer a range of options, from conservative to aggressive. Consider your risk tolerance and retirement timeline.
+4. **Beneficiaries**: Designate a beneficiary for your account in case something happens to you.
+
+Would you like more detailed guidance on any of these steps?
+
+**Example 2: Managing a Roth IRA**
+
+**User**: I already have a Roth IRA. How should I manage it to maximize my returns?
+
+**Agent**: Great to hear that you have a Roth IRA! Here are some tips to help you manage it effectively:
+
+1. **Regular Contributions**: Make regular contributions to your Roth IRA. For 2023, the maximum contribution is $6,500 ($7,500 if you're 50 or older).
+2. **Investment Diversification**: Diversify your investments to spread risk. Consider a mix of stocks, bonds, and other assets that align with your risk tolerance and time horizon.
+3. **Rebalancing**: Periodically rebalance your portfolio to maintain your desired asset allocation. This means adjusting your investments to ensure you are not too heavily weighted in one area.
+4. **Tax Considerations**: Remember that qualified withdrawals from a Roth IRA are tax-free. This can be a great advantage in retirement.
+5. **Stay Informed**: Keep up-to-date with financial news and consider consulting a financial advisor for personalized advice.
+
+Is there a specific aspect of your Roth IRA management you need more help with?
+
+**Example 3: Understanding Tax Implications**
+
+**User**: What are the tax implications of withdrawing from my 401(k) and Roth IRA?
+
+**Agent**: Understanding the tax implications is crucial for making smart withdrawal decisions. Here's a brief overview:
+
+1. **401(k) Withdrawals**: Withdrawals from a traditional 401(k) are generally taxed as ordinary income. If you withdraw before age 59Β½, you may also face a 10% early withdrawal penalty, unless you qualify for an exception.
+2. **Roth IRA Withdrawals**: Qualified withdrawals from a Roth IRA are tax-free. To be qualified, the account must have been open for at least 5 years, and you must be 59Β½ or older, disabled, or using the withdrawal for a first-time home purchase (up to a $10,000 lifetime limit).
+
+It's important to plan your withdrawals carefully to minimize taxes and penalties. Would you like more detailed information on this topic?
+
+**Example 4: Retirement Planning**
+
+**User**: How do I know if I'm saving enough for retirement?
+
+**Agent**: That's a great question and a common concern. Here are some steps to help you determine if you're on track:
+
+1. **Calculate Your Retirement Needs**: Estimate how much money you'll need in retirement. Consider your desired lifestyle, expected expenses, and how long you plan to be retired.
+2. **Assess Your Savings**: Add up your current retirement savings, including your 401(k), IRA, and any other accounts. Use online retirement calculators to project your future savings based on your current contribution rate and expected investment returns.
+3. **Contribution Rate**: Aim to save at least 15% of your income for retirement. If you started late, you might need to save more.
+4. **Adjust as Needed**: If you're not on track, consider increasing your contributions, reducing expenses, or adjusting your investment strategy.
+
+Would you like to use a retirement calculator to get a more precise estimate?
+
+**Example 5: Choosing Between 401(k) and Roth IRA**
+
+**User**: Should I contribute to my 401(k) or a Roth IRA?
+
+**Agent**: Both 401(k) and Roth IRA accounts have their advantages. Here are some factors to consider:
+
+1. **Tax Treatment**: Contributions to a traditional 401(k) are tax-deductible, reducing your taxable income now. Roth IRA contributions are made with after-tax dollars, but qualified withdrawals are tax-free.
+2. **Employer Match**: If your employer offers a match on your 401(k) contributions, it's generally a good idea to contribute enough to get the full match. It's essentially free money.
+3. **Income Limits**: Roth IRAs have income limits for contributions. If your income is too high, you may not be eligible to contribute directly to a Roth IRA.
+4. **Withdrawal Flexibility**: Roth IRAs offer more flexibility for withdrawals, especially in retirement, since they are tax-free and not subject to required minimum distributions (RMDs).
+
+Consider your current tax situation, expected future tax rates, and retirement goals when making your decision. Would you like to explore this further based on your specific circumstances?
+
+### Closing Remarks
+Always prioritize the user's specific needs and provide clear, accurate, and empathetic guidance. Your goal is to empower users to make informed decisions about their retirement savings and help them achieve financial security.
+
+---
+
+This 3,000-word system prompt ensures the LLM agent is well-equipped to assist users with their 401(k) and Roth IRA accounts, providing detailed examples to guide the agent in reasoning and problem-solving.
+
+"""
+
+# Initialize the agent
+agent = Agent(
+ agent_name="401K-Roth-IRA-Agent",
+ system_prompt=ROTH_IRA_SYSTEM_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+
+agent.run(
+ "Create a comprehensive guide on setting up and managing a Roth IRA account."
+)
diff --git a/playground/agents/use_cases/finance/estate_planning_agent.py b/playground/agents/use_cases/finance/estate_planning_agent.py
new file mode 100644
index 00000000..878c7f78
--- /dev/null
+++ b/playground/agents/use_cases/finance/estate_planning_agent.py
@@ -0,0 +1,122 @@
+import os
+
+from swarms import Agent, OpenAIChat
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+# Prompt
+ESTATE_PLANNING_AGENT_SYS_PROMPT = """
+
+## Estate Planning LLM Agent System Prompt
+
+### Objective
+You are an Estate Planning LLM Agent with extensive knowledge about setting up trusts, business corporations, and various estate planning strategies. Your goal is to help users maximize their profit by leveraging intricate legal and financial strategies to save money on taxes, fees, and other costs. You provide detailed, accurate, and actionable advice tailored to the user's specific needs and circumstances.
+
+### Instructions
+1. **Understand User Context**: Always start by understanding the user's current financial situation, goals, and any specific concerns they have.
+2. **Provide Tailored Advice**: Offer personalized advice on setting up trusts, forming business corporations, and other estate planning strategies.
+3. **Maximize Profit**: Focus on strategies that maximize the user's profit and minimize their tax burden and fees.
+4. **Detail-Oriented**: Be thorough and precise in your recommendations, providing step-by-step guidance where necessary.
+5. **Compliance and Legality**: Ensure all advice complies with relevant laws and regulations.
+
+### Example Interactions
+
+#### Example 1: Setting Up a Trust
+**User Input:**
+"I have a net worth of $5 million and want to set up a trust to ensure my children receive their inheritance without paying high taxes. What type of trust should I set up, and how do I go about it?"
+
+**Agent Response:**
+"Based on your net worth and objective to minimize taxes for your children, setting up a **revocable living trust** could be beneficial. This type of trust allows you to maintain control over the assets during your lifetime and ensures a smooth transfer to your beneficiaries upon your death. Here's a step-by-step guide to setting it up:
+
+1. **Choose a Trustee**: Select someone you trust to manage the trust, which could be yourself initially.
+2. **Draft the Trust Agreement**: Work with an estate planning attorney to create a trust agreement that outlines the terms and conditions of the trust.
+3. **Transfer Assets**: Move your assets into the trust, including real estate, bank accounts, and investment portfolios.
+4. **Fund the Trust**: Ensure all relevant assets are officially titled in the name of the trust.
+5. **Review and Amend**: Periodically review and update the trust to reflect changes in your financial situation or wishes.
+
+Additionally, consider setting up an **irrevocable life insurance trust (ILIT)** to exclude life insurance proceeds from your estate, thus reducing estate taxes."
+
+#### Example 2: Forming a Business Corporation
+**User Input:**
+"I am planning to start a new business and want to know the best type of corporation to form to save on taxes. I expect to generate $500,000 in revenue annually."
+
+**Agent Response:**
+"Given your expected revenue, forming an **S Corporation (S Corp)** can offer significant tax advantages by allowing income to pass through to shareholders, thus avoiding double taxation. Here's how to proceed:
+
+1. **Choose a Business Name**: Ensure the name is unique and complies with state regulations.
+2. **Register with the State**: File the Articles of Incorporation with your state's Secretary of State.
+3. **Create Bylaws**: Draft bylaws that outline the corporation's operating procedures.
+4. **Elect S Corp Status**: File Form 2553 with the IRS to elect S Corporation status.
+5. **Obtain an EIN**: Apply for an Employer Identification Number from the IRS.
+6. **Comply with State Requirements**: Fulfill any additional state-specific requirements, such as obtaining business licenses or permits.
+7. **Hold Initial Board Meeting**: Conduct the first board of directors meeting to appoint officers and adopt bylaws.
+8. **Issue Stock Certificates**: Distribute stock certificates to initial shareholders.
+
+By forming an S Corp, you can benefit from pass-through taxation, which allows profits to be taxed at individual tax rates, potentially lowering your overall tax liability."
+
+#### Example 3: Saving on Estate Taxes
+**User Input:**
+"I own several properties and investments worth about $10 million. How can I structure my estate to minimize estate taxes for my heirs?"
+
+**Agent Response:**
+"To minimize estate taxes, consider the following strategies:
+
+1. **Grantor Retained Annuity Trust (GRAT)**: Transfer assets into a GRAT, which allows you to receive annuity payments for a specified term. Any remaining assets pass to your beneficiaries tax-free.
+2. **Charitable Remainder Trust (CRT)**: Set up a CRT to donate assets to a charitable organization. You receive a partial tax deduction and generate income for life, with the remainder going to charity.
+3. **Family Limited Partnership (FLP)**: Transfer assets into an FLP to consolidate and manage family assets. This allows for discounted valuations for gift and estate tax purposes.
+4. **Annual Gift Exclusion**: Take advantage of the annual gift exclusion by gifting up to $15,000 per recipient annually, reducing your taxable estate.
+5. **Irrevocable Life Insurance Trust (ILIT)**: Exclude life insurance proceeds from your estate by transferring policies to an ILIT, reducing estate taxes.
+6. **Dynasty Trust**: Establish a dynasty trust to pass wealth down through multiple generations, leveraging the generation-skipping transfer tax exemption.
+
+Implementing these strategies can significantly reduce your estate's tax liability, ensuring more assets are preserved for your heirs."
+
+### Additional Tips
+- Always stay updated with the latest tax laws and estate planning regulations.
+- Consider collaborating with financial advisors, tax professionals, and estate planning attorneys to provide the most comprehensive advice.
+- Provide illustrative examples and case studies to help users understand complex concepts and strategies.
+
+### Final Note
+Your advice should always prioritize the user's financial well-being, ensuring they receive the maximum benefit from your estate planning recommendations.
+
+
+"""
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ system_prompt=ESTATE_PLANNING_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ interactive=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+
+agent.run("optimize for the Minimal tax holdings at death, end of life")
diff --git a/playground/agents/use_cases/finance/financial_agent_gpt4o_mini.py b/playground/agents/use_cases/finance/financial_agent_gpt4o_mini.py
new file mode 100644
index 00000000..52d8329c
--- /dev/null
+++ b/playground/agents/use_cases/finance/financial_agent_gpt4o_mini.py
@@ -0,0 +1,48 @@
+import os
+
+from swarms import Agent, OpenAIChat
+from swarms.prompts.finance_agent_sys_prompt import (
+ FINANCIAL_AGENT_SYS_PROMPT,
+)
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ # interactive=True, # Set to False to disable interactive mode
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ interactive=True,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+
+agent.run("What are the best states to register a C CORP in?")
diff --git a/playground/agents/use_cases/finance/first_agent_example.py b/playground/agents/use_cases/finance/first_agent_example.py
new file mode 100644
index 00000000..efd4310f
--- /dev/null
+++ b/playground/agents/use_cases/finance/first_agent_example.py
@@ -0,0 +1,46 @@
+import os
+from swarms import Agent, Anthropic
+from swarms.prompts.finance_agent_sys_prompt import (
+ FINANCIAL_AGENT_SYS_PROMPT,
+)
+from swarms.utils.data_to_text import data_to_text
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ agent_description="Agent creates ",
+ llm=Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")),
+ max_loops="auto",
+ autosave=True,
+ # dynamic_temperature_enabled=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ # interactive=True, # Set to False to disable interactive mode
+ dynamic_temperature_enabled=True,
+ saved_state_path="finance_agent.json",
+ # tools=[Add your functions here# ],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+
+contract = data_to_text("your_contract_pdf.pdf")
+
+agent.run(
+ f"Analyze the following contract and give me a full summary: {contract}"
+)
diff --git a/playground/agents/use_cases/finance/main.py b/playground/agents/use_cases/finance/main.py
new file mode 100644
index 00000000..9961c4b5
--- /dev/null
+++ b/playground/agents/use_cases/finance/main.py
@@ -0,0 +1,113 @@
+import os
+
+import requests
+
+from swarms import Agent, OpenAIChat
+
+# Get the OpenAI API key from the environment variable
+api_key = os.getenv("OPENAI_API_KEY")
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
+)
+
+
+def fetch_financial_news(
+ query: str = "Nvidia news", num_articles: int = 5
+) -> str:
+ """
+ Fetches financial news from the Google News API and returns a formatted string of the top news.
+
+ Args:
+ api_key (str): Your Google News API key.
+ query (str): The query term to search for news. Default is "financial".
+ num_articles (int): The number of top articles to fetch. Default is 5.
+
+ Returns:
+ str: A formatted string of the top financial news articles.
+
+ Raises:
+ ValueError: If the API response is invalid or there are no articles found.
+ requests.exceptions.RequestException: If there is an error with the request.
+ """
+ url = "https://newsapi.org/v2/everything"
+ params = {
+ "q": query,
+ "apiKey": "ceabc81a7d8f45febfedadb27177f3a3",
+ "pageSize": num_articles,
+ "sortBy": "relevancy",
+ }
+
+ try:
+ response = requests.get(url, params=params)
+ response.raise_for_status()
+ data = response.json()
+
+ if "articles" not in data or len(data["articles"]) == 0:
+ raise ValueError("No articles found or invalid API response.")
+
+ articles = data["articles"]
+ formatted_articles = []
+
+ for i, article in enumerate(articles, start=1):
+ title = article.get("title", "No Title")
+ description = article.get("description", "No Description")
+ url = article.get("url", "No URL")
+ formatted_articles.append(
+ f"{i}. {title}\nDescription: {description}\nRead more: {url}\n"
+ )
+
+ return "\n".join(formatted_articles)
+
+ except requests.exceptions.RequestException as e:
+ print(f"Request Error: {e}")
+ raise
+ except ValueError as e:
+ print(f"Value Error: {e}")
+ raise
+
+
+# # Example usage:
+# api_key = "ceabc81a7d8f45febfedadb27177f3a3"
+# print(fetch_financial_news(api_key))
+
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Financial-Analysis-Agent",
+ # system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
+ llm=model,
+ max_loops=2,
+ autosave=True,
+ # dynamic_temperature_enabled=True,
+ dashboard=False,
+ verbose=True,
+ streaming_on=True,
+ # interactive=True, # Set to False to disable interactive mode
+ dynamic_temperature_enabled=True,
+ saved_state_path="finance_agent.json",
+ tools=[fetch_financial_news],
+ # stopping_token="Stop!",
+ # interactive=True,
+ # docs_folder="docs", # Enter your folder name
+ # pdf_path="docs/finance_agent.pdf",
+ # sop="Calculate the profit for a company.",
+ # sop_list=["Calculate the profit for a company."],
+ user_name="swarms_corp",
+ # # docs=
+ # # docs_folder="docs",
+ retry_attempts=3,
+ # context_length=1000,
+ # tool_schema = dict
+ context_length=200000,
+ # tool_schema=
+ # tools
+ # agent_ops_on=True,
+ # long_term_memory=ChromaDB(docs_folder="artifacts"),
+)
+
+
+# Run the agent
+response = agent("What are the latest financial news on Nvidia?")
+print(response)
diff --git a/playground/agents/use_cases/finance/openai_model.py b/playground/agents/use_cases/finance/openai_model.py
new file mode 100644
index 00000000..2f8ac76e
--- /dev/null
+++ b/playground/agents/use_cases/finance/openai_model.py
@@ -0,0 +1,28 @@
+from swarms import OpenAIChat, Agent
+import os
+
+api_key = os.getenv("OPENAI_API_KEY")
+
+
+# Create an instance of the OpenAIChat class
+model = OpenAIChat(
+ api_key=api_key,
+ model_name="gpt-4o-mini",
+ temperature=0.1,
+ max_tokens=4000,
+)
+
+# Agent
+agent = Agent(
+ agent_name="Non-Profit Incorporation Agent",
+ llm=model,
+ system_prompt="I am an AI assistant that helps you incorporate a non-profit organization. I can provide information on the best states to incorporate a non-profit in, the steps to incorporate a non-profit, and answer any other questions you may have about non-profit incorporation.",
+ max_loops="auto",
+ interactive=True,
+ streaming_on=True,
+)
+
+
+# Run
+response = agent("What's the best state to incorporate a non profit in?")
+print(response)
diff --git a/playground/agents/use_cases/kyle_hackathon.py b/playground/agents/use_cases/kyle_hackathon.py
new file mode 100644
index 00000000..36fcfcd2
--- /dev/null
+++ b/playground/agents/use_cases/kyle_hackathon.py
@@ -0,0 +1,89 @@
+import os
+
+from dotenv import load_dotenv
+
+from swarms import Agent, OpenAIChat
+from swarms.agents.multion_agent import MultiOnAgent
+from swarms_memory import ChromaDB
+from swarms import tool
+from swarms.tools.prebuilt.code_interpreter import (
+ SubprocessCodeInterpreter,
+)
+
+# Load the environment variables
+load_dotenv()
+
+
+# Memory
+chroma_db = ChromaDB()
+
+
+# MultiOntool
+@tool
+def multion_tool(
+ task: str,
+ api_key: str = os.environ.get("MULTION_API_KEY"),
+):
+ """
+ Executes a task using the MultiOnAgent.
+
+ Args:
+ task (str): The task to be executed.
+ api_key (str, optional): The API key for the MultiOnAgent. Defaults to the value of the MULTION_API_KEY environment variable.
+
+ Returns:
+ The result of the task execution.
+ """
+ multion = MultiOnAgent(multion_api_key=api_key)
+ return multion(task)
+
+
+# Execute the interpreter tool
+@tool
+def execute_interpreter_tool(
+ code: str,
+):
+ """
+ Executes a single command using the interpreter.
+
+ Args:
+ task (str): The command to be executed.
+
+ Returns:
+ None
+ """
+ out = SubprocessCodeInterpreter(debug_mode=True)
+ out = out.run(code)
+ return code
+
+
+# Get the API key from the environment
+api_key = os.environ.get("OPENAI_API_KEY")
+
+# Initialize the language model
+llm = OpenAIChat(
+ temperature=0.5,
+ openai_api_key=api_key,
+)
+
+
+# Initialize the workflow
+agent = Agent(
+ agent_name="Research Agent",
+ agent_description="An agent that performs research tasks.",
+ system_prompt="Perform a research task.",
+ llm=llm,
+ max_loops=1,
+ dashboard=True,
+ # tools=[multion_tool, execute_interpreter_tool],
+ verbose=True,
+ long_term_memory=chroma_db,
+ stopping_token="done",
+)
+
+# Run the workflow on a task
+out = agent.run(
+ "Generate a 10,000 word blog on health and wellness, and say done"
+ " when you are done"
+)
+print(out)
diff --git a/playground/agents/use_cases/multi_modal/multi_modal_auto_agent_example.py b/playground/agents/use_cases/multi_modal/multi_modal_auto_agent_example.py
new file mode 100644
index 00000000..65f8fa2b
--- /dev/null
+++ b/playground/agents/use_cases/multi_modal/multi_modal_auto_agent_example.py
@@ -0,0 +1,35 @@
+# Description: This is an example of how to use the Agent class to run a multi-modal workflow
+import os
+
+from dotenv import load_dotenv
+
+from swarms import Agent, GPT4VisionAPI
+
+# Load the environment variables
+load_dotenv()
+
+# Get the API key from the environment
+api_key = os.environ.get("OPENAI_API_KEY")
+
+# Initialize the language model
+llm = GPT4VisionAPI(
+ openai_api_key=api_key,
+ max_tokens=500,
+)
+
+# Initialize the language model
+task = "What is the color of the object?"
+img = "images/swarms.jpeg"
+
+## Initialize the workflow
+agent = Agent(
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=True,
+ multi_modal=True,
+)
+
+# Run the workflow on a task
+out = agent.run(task=task, img=img)
+print(out)
diff --git a/playground/agents/use_cases/multi_modal/multi_modal_example.py b/playground/agents/use_cases/multi_modal/multi_modal_example.py
new file mode 100644
index 00000000..1235e7ac
--- /dev/null
+++ b/playground/agents/use_cases/multi_modal/multi_modal_example.py
@@ -0,0 +1,35 @@
+import os
+from dotenv import load_dotenv
+from swarms import GPT4VisionAPI, Agent
+
+# Load the environment variables
+load_dotenv()
+
+
+# Initialize the language model
+llm = GPT4VisionAPI(
+ openai_api_key=os.environ.get("OPENAI_API_KEY"),
+ max_tokens=500,
+)
+
+# Initialize the task
+task = (
+ "Analyze this image of an assembly line and identify any issues such as"
+ " misaligned parts, defects, or deviations from the standard assembly"
+ " process. IF there is anything unsafe in the image, explain why it is"
+ " unsafe and how it could be improved."
+)
+img = "assembly_line.jpg"
+
+## Initialize the workflow
+agent = Agent(
+ agent_name="Multi-ModalAgent",
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=True,
+ multi_modal=True,
+)
+
+# Run the workflow on a task
+agent.run(task, img)
diff --git a/playground/agents/use_cases/multi_modal/multi_modal_flow_example.py b/playground/agents/use_cases/multi_modal/multi_modal_flow_example.py
new file mode 100644
index 00000000..51acad26
--- /dev/null
+++ b/playground/agents/use_cases/multi_modal/multi_modal_flow_example.py
@@ -0,0 +1,13 @@
+from swarms import GPT4VisionAPI, Agent
+
+llm = GPT4VisionAPI()
+
+agent = Agent(
+ max_loops="auto",
+ llm=llm,
+)
+
+agent.run(
+ task="Describe this image in a few sentences: ",
+ img="https://unsplash.com/photos/0pIC5ByPpZY",
+)
diff --git a/playground/agents/use_cases/multi_modal/multi_modal_rag_agent.py b/playground/agents/use_cases/multi_modal/multi_modal_rag_agent.py
new file mode 100644
index 00000000..c309d60a
--- /dev/null
+++ b/playground/agents/use_cases/multi_modal/multi_modal_rag_agent.py
@@ -0,0 +1,81 @@
+# Importing necessary modules
+import os
+
+from dotenv import load_dotenv
+
+from swarms import Agent, OpenAIChat
+from swarms_memory import ChromaDB
+from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT
+from swarms import tool
+
+# Loading environment variables from .env file
+load_dotenv()
+
+# Getting the Gemini API key from environment variables
+gemini_api_key = os.getenv("GEMINI_API_KEY")
+openai_api_key = os.getenv("OPENAI_API_KEY")
+
+llm = OpenAIChat(
+ openai_api_key=openai_api_key,
+ max_tokens=1000,
+ temperature=0.2,
+)
+
+# Making an instance of the ChromaDB class
+memory = ChromaDB(
+ metric="cosine",
+ n_results=3,
+ multimodal=True,
+ # docs_folder="images",
+ output_dir="results",
+)
+
+
+# Defining tool by creating a function and wrapping it with the @tool decorator and
+# providing the necessary parameters and docstrings to show the usage of the tool.
+@tool
+def make_new_file(file: str, content: str):
+ """
+ Make a new file.
+
+ This function creates a new file with the given name.
+
+ Parameters:
+ file (str): The name of the file to be created.
+
+ Returns:
+ dict: A dictionary containing the status of the operation.
+ """
+ with open(file, "w") as f:
+ f.write(f"{content}")
+
+
+# Initializing the agent with the Gemini instance and other parameters
+agent = Agent(
+ llm=llm,
+ agent_name="Multi-Modal RAG Agent",
+ agent_description=(
+ "This agent fuses together the capabilities of Gemini and"
+ " Visual Chain of Thought to answer questions based on the"
+ " input image."
+ ),
+ max_loops="auto",
+ autosave=True,
+ sop=VISUAL_CHAIN_OF_THOUGHT,
+ verbose=True,
+ # tools=[make_new_file],
+ long_term_memory=memory,
+)
+
+
+# Defining the task and image path
+task = (
+ "What is the content of this image, return exactly what you see"
+ " in the image."
+)
+img = "images/Screenshot_48.png"
+
+
+# Running the agent with the specified task and image
+out = agent.run(task=task, img=img)
+print(out)
diff --git a/playground/agents/use_cases/multi_modal/new_agent_tool_system.py b/playground/agents/use_cases/multi_modal/new_agent_tool_system.py
new file mode 100644
index 00000000..62f46678
--- /dev/null
+++ b/playground/agents/use_cases/multi_modal/new_agent_tool_system.py
@@ -0,0 +1,77 @@
+"""
+
+
+tool decorated func [search_api] -> agent which parses the docs of the tool func
+-> injected into prompt -> agent will output json containing tool usage -> agent output will be parsed -> tool executed
+-> terminal response can be returned to agent for self-healing
+
+
+"""
+
+import os
+
+from dotenv import load_dotenv
+
+# Import the OpenAIChat model and the Agent struct
+from swarms import Agent, OpenAIChat
+
+# Load the environment variables
+load_dotenv()
+
+
+# Define a tool
+def search_api(query: str, description: str):
+ """Search the web for the query
+
+ Args:
+ query (str): _description_
+
+ Returns:
+ _type_: _description_
+ """
+ return f"Search results for {query}"
+
+
+def weather_api(
+ query: str,
+):
+ """_summary_
+
+ Args:
+ query (str): _description_
+ """
+ print(f"Getting the weather for {query}")
+
+
+def rapid_api(query: str):
+ """_summary_
+
+ Args:
+ query (str): _description_
+ """
+ print(f"Getting the weather for {query}")
+
+
+# Get the API key from the environment
+api_key = os.environ.get("OPENAI_API_KEY")
+
+# Initialize the language model
+llm = OpenAIChat(
+ temperature=0.5,
+)
+
+
+## Initialize the workflow
+agent = Agent(
+ agent_name="Research Agent",
+ llm=llm,
+ max_loops=3,
+ dashboard=True,
+ tools=[search_api, weather_api, rapid_api],
+ interactive=True,
+ execute_tool=True,
+)
+
+# Run the workflow on a task
+out = agent.run("Use the weather tool in Miami")
+print(out)
diff --git a/playground/agents/use_cases/research/new_perplexity_agent.py b/playground/agents/use_cases/research/new_perplexity_agent.py
new file mode 100644
index 00000000..272041de
--- /dev/null
+++ b/playground/agents/use_cases/research/new_perplexity_agent.py
@@ -0,0 +1,40 @@
+from swarms import Agent
+from swarms.models.llama3_hosted import llama3Hosted
+from swarms_memory import ChromaDB
+from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api
+
+# Define the research system prompt
+research_system_prompt = """
+Research Agent LLM Prompt: Summarizing Sources and Content
+Objective: Your task is to summarize the provided sources and the content within those sources. The goal is to create concise, accurate, and informative summaries that capture the key points of the original content.
+Instructions:
+1. Identify Key Information: ...
+2. Summarize Clearly and Concisely: ...
+3. Preserve Original Meaning: ...
+4. Include Relevant Details: ...
+5. Structure: ...
+"""
+
+# Initialize memory
+memory = ChromaDB(output_dir="research_base", n_results=2)
+
+# Initialize the LLM
+llm = llama3Hosted(temperature=0.2, max_tokens=3500)
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Research Agent",
+ system_prompt=research_system_prompt,
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ interactive=True,
+ long_term_memory=memory,
+ tools=[fetch_web_articles_bing_api],
+)
+
+# Define the task for the agent
+task = "What is the impact of climate change on biodiversity?"
+out = agent.run(task)
+print(out)
diff --git a/playground/agents/use_cases/research/perplexity_agent.py b/playground/agents/use_cases/research/perplexity_agent.py
new file mode 100644
index 00000000..0faab2cf
--- /dev/null
+++ b/playground/agents/use_cases/research/perplexity_agent.py
@@ -0,0 +1,108 @@
+"""
+$ pip install swarms
+
+- Add docs into the database
+- Use better llm
+- use better prompts [System and SOPs]
+- Use a open source model like Command R
+- Better SOPS ++ System Prompts
+-
+"""
+
+from swarms import Agent, OpenAIChat
+from swarms_memory import ChromaDB
+from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api
+import os
+from dotenv import load_dotenv
+
+load_dotenv()
+
+# Let's create a text file with the provided prompt.
+
+research_system_prompt = """
+Research Agent LLM Prompt: Summarizing Sources and Content
+
+Objective:
+Your task is to summarize the provided sources and the content within those sources. The goal is to create concise, accurate, and informative summaries that capture the key points of the original content.
+
+Instructions:
+
+1. Identify Key Information:
+ - Extract the most important information from each source. Focus on key facts, main ideas, significant arguments, and critical data.
+
+2. Summarize Clearly and Concisely:
+ - Use clear and straightforward language. Avoid unnecessary details and keep the summary concise.
+ - Ensure that the summary is coherent and easy to understand.
+
+3. Preserve Original Meaning:
+ - While summarizing, maintain the original meaning and intent of the content. Do not omit essential information that changes the context or understanding.
+
+4. Include Relevant Details:
+ - Mention the source title, author, publication date, and any other relevant details that provide context.
+
+5. Structure:
+ - Begin with a brief introduction to the source.
+ - Follow with a summary of the main content.
+ - Conclude with any significant conclusions or implications presented in the source.
+
+"""
+
+
+# Initialize
+memory = ChromaDB(
+ output_dir="research_base",
+ n_results=2,
+)
+
+
+llm = OpenAIChat(
+ temperature=0.2,
+ max_tokens=3500,
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
+)
+
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Research Agent",
+ system_prompt=research_system_prompt,
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ interactive=True,
+ long_term_memory=memory,
+ # tools=[fetch_web_articles_bing_api],
+)
+
+
+def perplexity_agent(task: str = None, *args, **kwargs):
+ """
+ This function takes a task as input and uses the Bing API to fetch web articles related to the task.
+ It then combines the task and the fetched articles as prompts and runs them through an agent.
+ The agent generates a response based on the prompts and returns it.
+
+ Args:
+ task (str): The task for which web articles need to be fetched.
+
+ Returns:
+ str: The response generated by the agent.
+ """
+ out = fetch_web_articles_bing_api(
+ task,
+ subscription_key=os.getenv("BING_API_KEY"),
+ )
+
+ # Sources
+ sources = [task, out]
+ sources_prompts = "".join(sources)
+
+ # Run a question
+ agent_response = agent.run(sources_prompts)
+ return agent_response
+
+
+out = perplexity_agent(
+ "What are the indian food restaurant names in standford university avenue? What are their cost ratios"
+)
+print(out)
diff --git a/playground/agents/use_cases/security/perimeter_defense_agent.py b/playground/agents/use_cases/security/perimeter_defense_agent.py
new file mode 100644
index 00000000..d235fa22
--- /dev/null
+++ b/playground/agents/use_cases/security/perimeter_defense_agent.py
@@ -0,0 +1,72 @@
+import os
+
+from dotenv import load_dotenv
+
+import swarms.prompts.security_team as stsp
+from swarms.models import GPT4VisionAPI
+from swarms.structs import Agent
+
+# Load environment variables and initialize the Vision API
+load_dotenv()
+api_key = os.getenv("OPENAI_API_KEY")
+
+llm = GPT4VisionAPI(openai_api_key=api_key)
+
+# Image for analysis
+img = "bank_robbery.jpg"
+
+# Initialize agents with respective prompts for security tasks
+crowd_analysis_agent = Agent(
+ llm=llm,
+ sop=stsp.CROWD_ANALYSIS_AGENT_PROMPT,
+ max_loops=1,
+ multi_modal=True,
+)
+
+weapon_detection_agent = Agent(
+ llm=llm,
+ sop=stsp.WEAPON_DETECTION_AGENT_PROMPT,
+ max_loops=1,
+ multi_modal=True,
+)
+
+surveillance_monitoring_agent = Agent(
+ llm=llm,
+ sop=stsp.SURVEILLANCE_MONITORING_AGENT_PROMPT,
+ max_loops=1,
+ multi_modal=True,
+)
+
+emergency_response_coordinator = Agent(
+ llm=llm,
+ sop=stsp.EMERGENCY_RESPONSE_COORDINATOR_PROMPT,
+ max_loops=1,
+ multi_modal=True,
+)
+
+# Run agents with respective tasks on the same image
+crowd_analysis = crowd_analysis_agent.run(
+ "Analyze the crowd dynamics in the scene", img
+)
+
+weapon_detection_analysis = weapon_detection_agent.run(
+ "Inspect the scene for any potential threats", img
+)
+
+surveillance_monitoring_analysis = surveillance_monitoring_agent.run(
+ "Monitor the overall scene for unusual activities", img
+)
+
+emergency_response_analysis = emergency_response_coordinator.run(
+ "Develop a response plan based on the scene analysis", img
+)
+
+# Process and output results for each task
+# Example output (uncomment to use):
+print(f"Crowd Analysis: {crowd_analysis}")
+print(f"Weapon Detection Analysis: {weapon_detection_analysis}")
+print(
+ "Surveillance Monitoring Analysis:"
+ f" {surveillance_monitoring_analysis}"
+)
+print(f"Emergency Response Analysis: {emergency_response_analysis}")
diff --git a/playground/agents/various_models/basic_agent_with_azure_openai.py b/playground/agents/various_models/basic_agent_with_azure_openai.py
new file mode 100644
index 00000000..76135a9f
--- /dev/null
+++ b/playground/agents/various_models/basic_agent_with_azure_openai.py
@@ -0,0 +1,14 @@
+from swarms import Agent, AzureOpenAI
+
+## Initialize the workflow
+agent = Agent(
+ llm=AzureOpenAI(),
+ max_loops="auto",
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+)
+
+# Run the workflow on a task
+agent("Understand the risk profile of this account")
diff --git a/playground/agents/various_models/custom_model_with_agent.py b/playground/agents/various_models/custom_model_with_agent.py
new file mode 100644
index 00000000..c0511bec
--- /dev/null
+++ b/playground/agents/various_models/custom_model_with_agent.py
@@ -0,0 +1,31 @@
+from swarms import Agent
+from swarms.models.base_llm import BaseLLM
+
+
+# Define a custom LLM class
+class ExampleLLM(BaseLLM):
+ def __init__(self):
+ pass
+
+ def run(self, task: str, *args, **kwargs):
+ # Your LLM logic here
+ pass
+
+
+# Initialize the workflow
+agent = Agent(
+ llm=ExampleLLM(), # Instantiate the ExampleLLM class
+ max_loops="auto", # Set the maximum number of loops to "auto"
+ autosave=True, # Enable autosave feature
+ dashboard=False, # Disable the dashboard
+ streaming_on=True, # Enable streaming
+ verbose=True, # Enable verbose mode
+ stopping_token="", # Set the stopping token to ""
+ interactive=True, # Enable interactive mode
+)
+
+# Run the workflow on a task
+agent(
+ "Generate a transcript for a youtube video on what swarms are!" # Specify the task
+ " Output a token when done." # Specify the stopping condition
+)
diff --git a/playground/agents/various_models/example_agent.py b/playground/agents/various_models/example_agent.py
new file mode 100644
index 00000000..e96fa12c
--- /dev/null
+++ b/playground/agents/various_models/example_agent.py
@@ -0,0 +1,35 @@
+import os
+import sys
+
+from dotenv import load_dotenv
+
+# Import the OpenAIChat model and the Agent struct
+from swarms import OpenAIChat, Agent
+
+# Load the environment variables
+load_dotenv()
+
+# Get the API key from the environment
+api_key = os.environ.get("OPENAI_API_KEY")
+
+# Initialize the language model
+llm = OpenAIChat(
+ temperature=0.5,
+ model_name="gpt-4",
+ openai_api_key=api_key,
+ max_tokens=4000,
+)
+
+
+print(
+ f"this is a test msg for stdout and stderr: {sys.stdout},"
+ f" {sys.stderr}"
+)
+
+## Initialize the workflow
+agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
+
+# Run the workflow on a task
+out = agent.run("Generate a 10,000 word blog on health and wellness.")
+
+print(out)
diff --git a/playground/agents/various_models/llama3_agent.py b/playground/agents/various_models/llama3_agent.py
new file mode 100644
index 00000000..4a204188
--- /dev/null
+++ b/playground/agents/various_models/llama3_agent.py
@@ -0,0 +1,27 @@
+import os
+
+from dotenv import load_dotenv
+
+# Import the OpenAIChat model and the Agent struct
+from swarms import Agent, HuggingfaceLLM
+
+# Load the environment variables
+load_dotenv()
+
+# Get the API key from the environment
+api_key = os.environ.get("OPENAI_API_KEY")
+
+# Initialize the language model
+llm = HuggingfaceLLM(model_id="meta-llama/Meta-Llama-3-8B").cuda()
+
+## Initialize the workflow
+agent = Agent(
+ llm=llm,
+ max_loops="auto",
+ autosave=True,
+ dashboard=True,
+ interactive=True,
+)
+
+# Run the workflow on a task
+agent.run("Generate a 10,000 word blog on health and wellness.")
diff --git a/playground/artifacts/main.py b/playground/artifacts/main.py
new file mode 100644
index 00000000..73302b9c
--- /dev/null
+++ b/playground/artifacts/main.py
@@ -0,0 +1,17 @@
+from swarms import Artifact
+
+# Example usage
+artifact = Artifact(file_path="example.txt", file_type=".txt")
+artifact.create("Initial content")
+artifact.edit("First edit")
+artifact.edit("Second edit")
+artifact.save()
+
+# Export to JSON
+artifact.export_to_json("artifact.json")
+
+# Import from JSON
+imported_artifact = Artifact.import_from_json("artifact.json")
+
+# # Get metrics
+print(artifact.get_metrics())
diff --git a/playground/collabs/swarms_example.ipynb b/playground/collabs/swarms_example.ipynb
new file mode 100644
index 00000000..c0f52ed1
--- /dev/null
+++ b/playground/collabs/swarms_example.ipynb
@@ -0,0 +1,1487 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "cs5RHepmhkEh"
+ },
+ "outputs": [],
+ "source": [
+ "!pip3 install -U swarms python-dotenv"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "-d9k3egzgp2_"
+ },
+ "source": [
+ "Copied from the repo, example.py\n",
+ "Enter your OpenAI API key here."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "A basic example of how to use the OpenAI API to generate text."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "# Import the OpenAIChat model and the Agent struct\n",
+ "from swarms import Agent, OpenAIChat\n",
+ "\n",
+ "# Load the environment variables\n",
+ "load_dotenv()\n",
+ "\n",
+ "# Get the API key from the environment\n",
+ "api_key = os.environ.get(\"OPENAI_API_KEY\")\n",
+ "\n",
+ "# Initialize the language model\n",
+ "llm = OpenAIChat(\n",
+ " temperature=0.5, openai_api_key=api_key, max_tokens=4000\n",
+ ")\n",
+ "\n",
+ "\n",
+ "## Initialize the workflow\n",
+ "agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)\n",
+ "\n",
+ "# Run the workflow on a task\n",
+ "agent.run(\"Generate a 10,000 word blog on health and wellness.\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "6VtgQ0F4BNc-"
+ },
+ "source": [
+ "Look at the log, which may be empty."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "RqL5LL3xBLWR"
+ },
+ "outputs": [],
+ "source": [
+ "!cat errors.txt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Agent with Long Term Memory**\n",
+ "\n",
+ "```Agent``` equipped with quasi-infinite long term memory. Great for long document understanding, analysis, and retrieval."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from swarms import Agent, OpenAIChat\n",
+ "from swarms_memory import ChromaDB\n",
+ "\n",
+ "# Making an instance of the ChromaDB class\n",
+ "memory = ChromaDB(\n",
+ " metric=\"cosine\",\n",
+ " n_results=3,\n",
+ " output_dir=\"results\",\n",
+ " docs_folder=\"docs\",\n",
+ ")\n",
+ "\n",
+ "# Initializing the agent with the OpenAI instance and other parameters\n",
+ "agent = Agent(\n",
+ " agent_name=\"Covid-19-Chat\",\n",
+ " agent_description=(\n",
+ " \"This agent provides information about COVID-19 symptoms.\"\n",
+ " ),\n",
+ " llm=OpenAIChat(),\n",
+ " max_loops=\"auto\",\n",
+ " autosave=True,\n",
+ " verbose=True,\n",
+ " long_term_memory=memory,\n",
+ " stopping_condition=\"finish\",\n",
+ ")\n",
+ "\n",
+ "# Defining the task and image path\n",
+ "task = (\"What are the symptoms of COVID-19?\",)\n",
+ "\n",
+ "# Running the agent with the specified task and image\n",
+ "out = agent.run(task)\n",
+ "print(out)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**```Agent``` with Long Term Memory ++ Tools!**\n",
+ "An LLM equipped with long term memory and tools, a full stack agent capable of automating all and any digital tasks given a good prompt."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# !pip install swarms-memory\n",
+ "from swarms import Agent, OpenAIChat, tool\n",
+ "from swarms_memory import ChromaDB\n",
+ "\n",
+ "# Making an instance of the ChromaDB class\n",
+ "memory = ChromaDB(\n",
+ " metric=\"cosine\",\n",
+ " n_results=3,\n",
+ " output_dir=\"results\",\n",
+ " docs_folder=\"docs\",\n",
+ ")\n",
+ "\n",
+ "# Initialize a tool\n",
+ "@tool\n",
+ "def search_api(query: str):\n",
+ " # Add your logic here\n",
+ " return query\n",
+ "\n",
+ "# Initializing the agent with the OpenAI instance and other parameters\n",
+ "agent = Agent(\n",
+ " agent_name=\"Covid-19-Chat\",\n",
+ " agent_description=(\n",
+ " \"This agent provides information about COVID-19 symptoms.\"\n",
+ " ),\n",
+ " llm=OpenAIChat(),\n",
+ " max_loops=\"auto\",\n",
+ " autosave=True,\n",
+ " verbose=True,\n",
+ " long_term_memory=memory,\n",
+ " stopping_condition=\"finish\",\n",
+ " tools=[search_api],\n",
+ ")\n",
+ "\n",
+ "# Defining the task and image path\n",
+ "task = (\"What are the symptoms of COVID-19?\",)\n",
+ "\n",
+ "# Running the agent with the specified task and image\n",
+ "out = agent.run(task)\n",
+ "print(out)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Simple Conversational Agent**\n",
+ "A Plug in and play conversational agent with GPT4, Mixytral, or any of our models\n",
+ "\n",
+ " - Reliable conversational structure to hold messages together with dynamic handling for long context conversations and interactions with auto chunking\n",
+ "\n",
+ " - Reliable, this simple system will always provide responses you want.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from swarms import Agent, Anthropic\n",
+ "\n",
+ "\n",
+ "## Initialize the workflow\n",
+ "agent = Agent(\n",
+ " agent_name=\"Transcript Generator\",\n",
+ " agent_description=(\n",
+ " \"Generate a transcript for a youtube video on what swarms\"\n",
+ " \" are!\"\n",
+ " ),\n",
+ " llm=Anthropic(),\n",
+ " max_loops=3,\n",
+ " autosave=True,\n",
+ " dashboard=False,\n",
+ " streaming_on=True,\n",
+ " verbose=True,\n",
+ " stopping_token=\"\",\n",
+ " interactive=True, # Set to True\n",
+ ")\n",
+ "\n",
+ "# Run the workflow on a task\n",
+ "agent(\"Generate a transcript for a youtube video on what swarms are!\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Devin**\n",
+ "\n",
+ "Implementation of Devil in less than 90 lines of code with several tools: terminal, browser, and edit files!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from swarms import Agent, Anthropic, tool\n",
+ "import subprocess\n",
+ "\n",
+ "# Model\n",
+ "llm = Anthropic(\n",
+ " temperature=0.1,\n",
+ ")\n",
+ "\n",
+ "# Tools\n",
+ "@tool\n",
+ "def terminal(\n",
+ " code: str,\n",
+ "):\n",
+ " \"\"\"\n",
+ " Run code in the terminal.\n",
+ "\n",
+ " Args:\n",
+ " code (str): The code to run in the terminal.\n",
+ "\n",
+ " Returns:\n",
+ " str: The output of the code.\n",
+ " \"\"\"\n",
+ " out = subprocess.run(\n",
+ " code, shell=True, capture_output=True, text=True\n",
+ " ).stdout\n",
+ " return str(out)\n",
+ "\n",
+ "\n",
+ "@tool\n",
+ "def browser(query: str):\n",
+ " \"\"\"\n",
+ " Search the query in the browser with the `browser` tool.\n",
+ "\n",
+ " Args:\n",
+ " query (str): The query to search in the browser.\n",
+ "\n",
+ " Returns:\n",
+ " str: The search results.\n",
+ " \"\"\"\n",
+ " import webbrowser\n",
+ "\n",
+ " url = f\"https://www.google.com/search?q={query}\"\n",
+ " webbrowser.open(url)\n",
+ " return f\"Searching for {query} in the browser.\"\n",
+ "\n",
+ "@tool\n",
+ "def create_file(file_path: str, content: str):\n",
+ " \"\"\"\n",
+ " Create a file using the file editor tool.\n",
+ "\n",
+ " Args:\n",
+ " file_path (str): The path to the file.\n",
+ " content (str): The content to write to the file.\n",
+ "\n",
+ " Returns:\n",
+ " str: The result of the file creation operation.\n",
+ " \"\"\"\n",
+ " with open(file_path, \"w\") as file:\n",
+ " file.write(content)\n",
+ " return f\"File {file_path} created successfully.\"\n",
+ "\n",
+ "@tool\n",
+ "def file_editor(file_path: str, mode: str, content: str):\n",
+ " \"\"\"\n",
+ " Edit a file using the file editor tool.\n",
+ "\n",
+ " Args:\n",
+ " file_path (str): The path to the file.\n",
+ " mode (str): The mode to open the file in.\n",
+ " content (str): The content to write to the file.\n",
+ "\n",
+ " Returns:\n",
+ " str: The result of the file editing operation.\n",
+ " \"\"\"\n",
+ " with open(file_path, mode) as file:\n",
+ " file.write(content)\n",
+ " return f\"File {file_path} edited successfully.\"\n",
+ "\n",
+ "\n",
+ "# Agent\n",
+ "agent = Agent(\n",
+ " agent_name=\"Devin\",\n",
+ " system_prompt=(\n",
+ " \"Autonomous agent that can interact with humans and other\"\n",
+ " \" agents. Be Helpful and Kind. Use the tools provided to\"\n",
+ " \" assist the user. Return all code in markdown format.\"\n",
+ " ),\n",
+ " llm=llm,\n",
+ " max_loops=\"auto\",\n",
+ " autosave=True,\n",
+ " dashboard=False,\n",
+ " streaming_on=True,\n",
+ " verbose=True,\n",
+ " stopping_token=\"\",\n",
+ " interactive=True,\n",
+ " tools=[terminal, browser, file_editor, create_file],\n",
+ " code_interpreter=True,\n",
+ " # streaming=True,\n",
+ ")\n",
+ "\n",
+ "# Run the agent\n",
+ "out = agent(\"Create a new file for a plan to take over the world.\")\n",
+ "print(out)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Agentwith Pydantic BaseModel as Output Type**\n",
+ "\n",
+ "The following is an example of an agent that intakes a pydantic basemodel and outputs it at the same time:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from pydantic import BaseModel, Field\n",
+ "from swarms import Anthropic, Agent\n",
+ "\n",
+ "\n",
+ "# Initialize the schema for the person's information\n",
+ "class Schema(BaseModel):\n",
+ " name: str = Field(..., title=\"Name of the person\")\n",
+ " agent: int = Field(..., title=\"Age of the person\")\n",
+ " is_student: bool = Field(..., title=\"Whether the person is a student\")\n",
+ " courses: list[str] = Field(\n",
+ " ..., title=\"List of courses the person is taking\"\n",
+ " )\n",
+ "\n",
+ "\n",
+ "# Convert the schema to a JSON string\n",
+ "tool_schema = Schema(\n",
+ " name=\"Tool Name\",\n",
+ " agent=1,\n",
+ " is_student=True,\n",
+ " courses=[\"Course1\", \"Course2\"],\n",
+ ")\n",
+ "\n",
+ "# Define the task to generate a person's information\n",
+ "task = \"Generate a person's information based on the following schema:\"\n",
+ "\n",
+ "# Initialize the agent\n",
+ "agent = Agent(\n",
+ " agent_name=\"Person Information Generator\",\n",
+ " system_prompt=(\n",
+ " \"Generate a person's information based on the following schema:\"\n",
+ " ),\n",
+ " # Set the tool schema to the JSON string -- this is the key difference\n",
+ " tool_schema=tool_schema,\n",
+ " llm=Anthropic(),\n",
+ " max_loops=3,\n",
+ " autosave=True,\n",
+ " dashboard=False,\n",
+ " streaming_on=True,\n",
+ " verbose=True,\n",
+ " interactive=True,\n",
+ " # Set the output type to the tool schema which is a BaseModel\n",
+ " output_type=tool_schema, # or dict, or str\n",
+ " metadata_output_type=\"json\",\n",
+ " # List of schemas that the agent can handle\n",
+ " list_base_models=[tool_schema],\n",
+ " function_calling_format_type=\"OpenAI\",\n",
+ " function_calling_type=\"json\", # or soon yaml\n",
+ ")\n",
+ "\n",
+ "# Run the agent to generate the person's information\n",
+ "generated_data = agent.run(task)\n",
+ "\n",
+ "# Print the generated data\n",
+ "print(f\"Generated data: {generated_data}\")\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**```ToolAgent```**\n",
+ "\n",
+ "ToolAgent is an agent that can use tools through JSON function calling. It intakes any open source model from huggingface and is extremely modular and plug in and play. We need help adding general support to all models soon."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from pydantic import BaseModel, Field\n",
+ "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
+ "\n",
+ "from swarms import ToolAgent\n",
+ "from swarms.utils.json_utils import base_model_to_json\n",
+ "\n",
+ "# Load the pre-trained model and tokenizer\n",
+ "model = AutoModelForCausalLM.from_pretrained(\n",
+ " \"databricks/dolly-v2-12b\",\n",
+ " load_in_4bit=True,\n",
+ " device_map=\"auto\",\n",
+ ")\n",
+ "tokenizer = AutoTokenizer.from_pretrained(\"databricks/dolly-v2-12b\")\n",
+ "\n",
+ "\n",
+ "# Initialize the schema for the person's information\n",
+ "class Schema(BaseModel):\n",
+ " name: str = Field(..., title=\"Name of the person\")\n",
+ " agent: int = Field(..., title=\"Age of the person\")\n",
+ " is_student: bool = Field(\n",
+ " ..., title=\"Whether the person is a student\"\n",
+ " )\n",
+ " courses: list[str] = Field(\n",
+ " ..., title=\"List of courses the person is taking\"\n",
+ " )\n",
+ "\n",
+ "\n",
+ "# Convert the schema to a JSON string\n",
+ "tool_schema = base_model_to_json(Schema)\n",
+ "\n",
+ "# Define the task to generate a person's information\n",
+ "task = (\n",
+ " \"Generate a person's information based on the following schema:\"\n",
+ ")\n",
+ "\n",
+ "# Create an instance of the ToolAgent class\n",
+ "agent = ToolAgent(\n",
+ " name=\"dolly-function-agent\",\n",
+ " description=\"Ana gent to create a child data\",\n",
+ " model=model,\n",
+ " tokenizer=tokenizer,\n",
+ " json_schema=tool_schema,\n",
+ ")\n",
+ "\n",
+ "# Run the agent to generate the person's information\n",
+ "generated_data = agent.run(task)\n",
+ "\n",
+ "# Print the generated data\n",
+ "print(f\"Generated data: {generated_data}\")\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Worker**\n",
+ "\n",
+ "The Worker is a simple all-in-one agent equipped with an LLM, tools, and RAG for low level tasks.\n",
+ "\n",
+ "β Plug in and Play LLM. Utilize any LLM from anywhere and any framework\n",
+ "\n",
+ "β Reliable RAG: Utilizes FAISS for efficient RAG but it's modular so you can use any DB.\n",
+ "\n",
+ "β Multi-Step Parallel Function Calling: Use any tool"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Importing necessary modules\n",
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "from swarms import OpenAIChat, Worker, tool\n",
+ "\n",
+ "# Loading environment variables from .env file\n",
+ "load_dotenv()\n",
+ "\n",
+ "# Retrieving the OpenAI API key from environment variables\n",
+ "api_key = os.getenv(\"OPENAI_API_KEY\")\n",
+ "\n",
+ "\n",
+ "# Create a tool\n",
+ "@tool\n",
+ "def search_api(query: str):\n",
+ " pass\n",
+ "\n",
+ "\n",
+ "# Creating a Worker instance\n",
+ "worker = Worker(\n",
+ " name=\"My Worker\",\n",
+ " role=\"Worker\",\n",
+ " human_in_the_loop=False,\n",
+ " tools=[search_api],\n",
+ " temperature=0.5,\n",
+ " llm=OpenAIChat(openai_api_key=api_key),\n",
+ ")\n",
+ "\n",
+ "# Running the worker with a prompt\n",
+ "out = worker.run(\"Hello, how are you? Create an image of how your are doing!\")\n",
+ "\n",
+ "# Printing the output\n",
+ "print(out)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**```SequentialWorkflow```**\n",
+ "\n",
+ "Sequential Workflow enables you to sequentially execute tasks with Agent and then pass the output into the next agent and onwards until you have specified your max loops. ```SequentialWorkflow``` is wonderful for real-world business tasks like sending emails, summarizing documents, and analyzing data.\n",
+ "\n",
+ "β Save and Restore Workflow states!\n",
+ "\n",
+ "β Multi-Modal Support for Visual Chaining\n",
+ "\n",
+ "β Utilizes Agent class"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "from swarms import Agent, OpenAIChat, SequentialWorkflow\n",
+ "\n",
+ "load_dotenv()\n",
+ "\n",
+ "# Load the environment variables\n",
+ "api_key = os.getenv(\"OPENAI_API_KEY\")\n",
+ "\n",
+ "\n",
+ "# Initialize the language agent\n",
+ "llm = OpenAIChat(\n",
+ " temperature=0.5, model_name=\"gpt-4\", openai_api_key=api_key, max_tokens=4000\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Initialize the agent with the language agent\n",
+ "agent1 = Agent(llm=llm, max_loops=1)\n",
+ "\n",
+ "# Create another agent for a different task\n",
+ "agent2 = Agent(llm=llm, max_loops=1)\n",
+ "\n",
+ "# Create another agent for a different task\n",
+ "agent3 = Agent(llm=llm, max_loops=1)\n",
+ "\n",
+ "# Create the workflow\n",
+ "workflow = SequentialWorkflow(max_loops=1)\n",
+ "\n",
+ "# Add tasks to the workflow\n",
+ "workflow.add(\n",
+ " agent1,\n",
+ " \"Generate a 10,000 word blog on health and wellness.\",\n",
+ ")\n",
+ "\n",
+ "# Suppose the next task takes the output of the first task as input\n",
+ "workflow.add(\n",
+ " agent2,\n",
+ " \"Summarize the generated blog\",\n",
+ ")\n",
+ "\n",
+ "# Run the workflow\n",
+ "workflow.run()\n",
+ "\n",
+ "# Output the results\n",
+ "for task in workflow.tasks:\n",
+ " print(f\"Task: {task.description}, Result: {task.result}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**```ConcurrentWorkflow```**\n",
+ "\n",
+ "```ConcurrentWorkflow``` runs all the tasks all at the same time with the inputs you give it!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "from swarms import Agent, ConcurrentWorkflow, OpenAIChat, Task\n",
+ "\n",
+ "# Load environment variables from .env file\n",
+ "load_dotenv()\n",
+ "\n",
+ "# Load environment variables\n",
+ "llm = OpenAIChat(openai_api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
+ "agent = Agent(llm=llm, max_loops=1)\n",
+ "\n",
+ "# Create a workflow\n",
+ "workflow = ConcurrentWorkflow(max_workers=5)\n",
+ "\n",
+ "# Create tasks\n",
+ "task1 = Task(agent, \"What's the weather in miami\")\n",
+ "task2 = Task(agent, \"What's the weather in new york\")\n",
+ "task3 = Task(agent, \"What's the weather in london\")\n",
+ "\n",
+ "# Add tasks to the workflow\n",
+ "workflow.add(tasks=[task1, task2, task3])\n",
+ "\n",
+ "# Run the workflow\n",
+ "workflow.run()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**```RecursiveWorkflow```**\n",
+ "\n",
+ "```RecursiveWorkflow``` will keep executing the tasks until a specific token like is located inside the text!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "from swarms import Agent, OpenAIChat, RecursiveWorkflow, Task\n",
+ "\n",
+ "# Load environment variables from .env file\n",
+ "load_dotenv()\n",
+ "\n",
+ "# Load environment variables\n",
+ "llm = OpenAIChat(openai_api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
+ "agent = Agent(llm=llm, max_loops=1)\n",
+ "\n",
+ "# Create a workflow\n",
+ "workflow = RecursiveWorkflow(stop_token=\"\")\n",
+ "\n",
+ "# Create tasks\n",
+ "task1 = Task(agent, \"What's the weather in miami\")\n",
+ "task2 = Task(agent, \"What's the weather in new york\")\n",
+ "task3 = Task(agent, \"What's the weather in london\")\n",
+ "\n",
+ "# Add tasks to the workflow\n",
+ "workflow.add(task1)\n",
+ "workflow.add(task2)\n",
+ "workflow.add(task3)\n",
+ "\n",
+ "# Run the workflow\n",
+ "workflow.run()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**```ModelParallelizer```**\n",
+ "\n",
+ "The ```ModelParallelizer``` allows you to run multiple models concurrently, comparing their outputs. This feature enables you to easily compare the performance and results of different models, helping you make informed decisions about which model to use for your specific task.\n",
+ "\n",
+ "Plug-and-Play Integration: The structure provides a seamless integration with various models, including OpenAIChat, Anthropic, Mixtral, and Gemini. You can easily plug in any of these models and start using them without the need for extensive modifications or setup."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "from swarms import Anthropic, Gemini, Mixtral, ModelParallelizer, OpenAIChat\n",
+ "\n",
+ "load_dotenv()\n",
+ "\n",
+ "# API Keys\n",
+ "anthropic_api_key = os.getenv(\"ANTHROPIC_API_KEY\")\n",
+ "openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n",
+ "gemini_api_key = os.getenv(\"GEMINI_API_KEY\")\n",
+ "\n",
+ "# Initialize the models\n",
+ "llm = OpenAIChat(openai_api_key=openai_api_key)\n",
+ "anthropic = Anthropic(anthropic_api_key=anthropic_api_key)\n",
+ "mixtral = Mixtral()\n",
+ "gemini = Gemini(gemini_api_key=gemini_api_key)\n",
+ "\n",
+ "# Initialize the parallelizer\n",
+ "llms = [llm, anthropic, mixtral, gemini]\n",
+ "parallelizer = ModelParallelizer(llms)\n",
+ "\n",
+ "# Set the task\n",
+ "task = \"Generate a 10,000 word blog on health and wellness.\"\n",
+ "\n",
+ "# Run the task\n",
+ "out = parallelizer.run(task)\n",
+ "\n",
+ "# Print the responses 1 by 1\n",
+ "for i in range(len(out)):\n",
+ " print(f\"Response from LLM {i}: {out[i]}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**```SwarmNetwork```**\n",
+ "\n",
+ "```SwarmNetwork``` provides the infrasturcture for building extremely dense and complex multi-agent applications that span across various types of agents.\n",
+ "\n",
+ "β Efficient Task Management: ```SwarmNetwork```'s intelligent agent pool and task queue management system ensures tasks are distributed evenly across agents. This leads to efficient use of resources and faster task completion.\n",
+ "\n",
+ "β Scalability: ```SwarmNetwork``` can dynamically scale the number of agents based on the number of pending tasks. This means it can handle an increase in workload by adding more agents, and conserve resources when the workload is low by reducing the number of agents.\n",
+ "\n",
+ "β Versatile Deployment Options: With ```SwarmNetwork```, each agent can be run on its own thread, process, container, machine, or even cluster. This provides a high degree of flexibility and allows for deployment that best suits the user's needs and infrastructure."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "# Import the OpenAIChat model and the Agent struct\n",
+ "from swarms import Agent, OpenAIChat, SwarmNetwork\n",
+ "\n",
+ "# Load the environment variables\n",
+ "load_dotenv()\n",
+ "\n",
+ "# Get the API key from the environment\n",
+ "api_key = os.environ.get(\"OPENAI_API_KEY\")\n",
+ "\n",
+ "# Initialize the language model\n",
+ "llm = OpenAIChat(\n",
+ " temperature=0.5,\n",
+ " openai_api_key=api_key,\n",
+ ")\n",
+ "\n",
+ "## Initialize the workflow\n",
+ "agent = Agent(llm=llm, max_loops=1, agent_name=\"Social Media Manager\")\n",
+ "agent2 = Agent(llm=llm, max_loops=1, agent_name=\" Product Manager\")\n",
+ "agent3 = Agent(llm=llm, max_loops=1, agent_name=\"SEO Manager\")\n",
+ "\n",
+ "\n",
+ "# Load the swarmnet with the agents\n",
+ "swarmnet = SwarmNetwork(\n",
+ " agents=[agent, agent2, agent3],\n",
+ ")\n",
+ "\n",
+ "# List the agents in the swarm network\n",
+ "out = swarmnet.list_agents()\n",
+ "print(out)\n",
+ "\n",
+ "# Run the workflow on a task\n",
+ "out = swarmnet.run_single_agent(\n",
+ " agent2.id, \"Generate a 10,000 word blog on health and wellness.\"\n",
+ ")\n",
+ "print(out)\n",
+ "\n",
+ "\n",
+ "# Run all the agents in the swarm network on a task\n",
+ "out = swarmnet.run_many_agents(\"Generate a 10,000 word blog on health and wellness.\")\n",
+ "print(out)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "**```Task```**\n",
+ "\n",
+ "```Task``` is a simple structure for task execution with the ```Agent```. Imagine zapier for LLM-based workflow automation\n",
+ "\n",
+ "β ```Task``` is a structure for task execution with the ```Agent```.\n",
+ "\n",
+ "β ```Tasks``` can have descriptions, scheduling, triggers, actions, conditions, dependencies, priority, and a history.\n",
+ "\n",
+ "β The ```Task``` structure allows for efficient workflow automation with LLM-based agents."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "from swarms.structs import Agent, OpenAIChat, Task\n",
+ "\n",
+ "# Load the environment variables\n",
+ "load_dotenv()\n",
+ "\n",
+ "\n",
+ "# Define a function to be used as the action\n",
+ "def my_action():\n",
+ " print(\"Action executed\")\n",
+ "\n",
+ "\n",
+ "# Define a function to be used as the condition\n",
+ "def my_condition():\n",
+ " print(\"Condition checked\")\n",
+ " return True\n",
+ "\n",
+ "\n",
+ "# Create an agent\n",
+ "agent = Agent(\n",
+ " llm=OpenAIChat(openai_api_key=os.environ[\"OPENAI_API_KEY\"]),\n",
+ " max_loops=1,\n",
+ " dashboard=False,\n",
+ ")\n",
+ "\n",
+ "# Create a task\n",
+ "task = Task(\n",
+ " description=(\n",
+ " \"Generate a report on the top 3 biggest expenses for small\"\n",
+ " \" businesses and how businesses can save 20%\"\n",
+ " ),\n",
+ " agent=agent,\n",
+ ")\n",
+ "\n",
+ "# Set the action and condition\n",
+ "task.set_action(my_action)\n",
+ "task.set_condition(my_condition)\n",
+ "\n",
+ "# Execute the task\n",
+ "print(\"Executing task...\")\n",
+ "task.run()\n",
+ "\n",
+ "# Check if the task is completed\n",
+ "if task.is_completed():\n",
+ " print(\"Task completed\")\n",
+ "else:\n",
+ " print(\"Task not completed\")\n",
+ "\n",
+ "# Output the result of the task\n",
+ "print(f\"Task result: {task.result}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**```BlocksList```**\n",
+ "\n",
+ " Modularity and Flexibility: ```BlocksList``` allows users to create custom swarms by adding or removing different classes or functions as blocks. This means users can easily tailor the functionality of their swarm to suit their specific needs.\n",
+ "\n",
+ " Ease of Management: With methods to add, remove, update, and retrieve blocks, ```BlocksList``` provides a straightforward way to manage the components of a swarm. This makes it easier to maintain and update the swarm over time.\n",
+ "\n",
+ " Enhanced Searchability: ```BlocksList``` offers methods to get blocks by various attributes such as name, type, ID, and parent-related properties. This makes it easier for users to find and work with specific blocks in a large and complex swarm.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
+ "from pydantic import BaseModel\n",
+ "from swarms import BlocksList, Gemini, GPT4VisionAPI, Mixtral, OpenAI, ToolAgent\n",
+ "\n",
+ "# Load the environment variables\n",
+ "load_dotenv()\n",
+ "\n",
+ "# Get the environment variables\n",
+ "openai_api_key = os.getenv(\"OPENAI_API_KEY\")\n",
+ "gemini_api_key = os.getenv(\"GEMINI_API_KEY\")\n",
+ "\n",
+ "# Tool Agent\n",
+ "model = AutoModelForCausalLM.from_pretrained(\"databricks/dolly-v2-12b\")\n",
+ "tokenizer = AutoTokenizer.from_pretrained(\"databricks/dolly-v2-12b\")\n",
+ "\n",
+ "# Initialize the schema for the person's information\n",
+ "class Schema(BaseModel):\n",
+ " name: str = Field(..., title=\"Name of the person\")\n",
+ " agent: int = Field(..., title=\"Age of the person\")\n",
+ " is_student: bool = Field(\n",
+ " ..., title=\"Whether the person is a student\"\n",
+ " )\n",
+ " courses: list[str] = Field(\n",
+ " ..., title=\"List of courses the person is taking\"\n",
+ " )\n",
+ "\n",
+ "# Convert the schema to a JSON string\n",
+ "json_schema = base_model_to_json(Schema)\n",
+ "\n",
+ "\n",
+ "toolagent = ToolAgent(model=model, tokenizer=tokenizer, json_schema=json_schema)\n",
+ "\n",
+ "# Blocks List which enables you to build custom swarms by adding classes or functions\n",
+ "swarm = BlocksList(\n",
+ " \"SocialMediaSwarm\",\n",
+ " \"A swarm of social media agents\",\n",
+ " [\n",
+ " OpenAI(openai_api_key=openai_api_key),\n",
+ " Mixtral(),\n",
+ " GPT4VisionAPI(openai_api_key=openai_api_key),\n",
+ " Gemini(gemini_api_key=gemini_api_key),\n",
+ " ],\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Add the new block to the swarm\n",
+ "swarm.add(toolagent)\n",
+ "\n",
+ "# Remove a block from the swarm\n",
+ "swarm.remove(toolagent)\n",
+ "\n",
+ "# Update a block in the swarm\n",
+ "swarm.update(toolagent)\n",
+ "\n",
+ "# Get a block at a specific index\n",
+ "block_at_index = swarm.get(0)\n",
+ "\n",
+ "# Get all blocks in the swarm\n",
+ "all_blocks = swarm.get_all()\n",
+ "\n",
+ "# Get blocks by name\n",
+ "openai_blocks = swarm.get_by_name(\"OpenAI\")\n",
+ "\n",
+ "# Get blocks by type\n",
+ "gpt4_blocks = swarm.get_by_type(\"GPT4VisionAPI\")\n",
+ "\n",
+ "# Get blocks by ID\n",
+ "block_by_id = swarm.get_by_id(toolagent.id)\n",
+ "\n",
+ "# Get blocks by parent\n",
+ "blocks_by_parent = swarm.get_by_parent(swarm)\n",
+ "\n",
+ "# Get blocks by parent ID\n",
+ "blocks_by_parent_id = swarm.get_by_parent_id(swarm.id)\n",
+ "\n",
+ "# Get blocks by parent name\n",
+ "blocks_by_parent_name = swarm.get_by_parent_name(swarm.name)\n",
+ "\n",
+ "# Get blocks by parent type\n",
+ "blocks_by_parent_type = swarm.get_by_parent_type(type(swarm).__name__)\n",
+ "\n",
+ "# Get blocks by parent description\n",
+ "blocks_by_parent_description = swarm.get_by_parent_description(swarm.description)\n",
+ "\n",
+ "# Run the block in the swarm\n",
+ "inference = swarm.run_block(toolagent, \"Hello World\")\n",
+ "print(inference)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Majority Voting**\n",
+ "\n",
+ "Multiple-agents will evaluate an idea based off of an parsing or evaluation function. From papers like \"More agents is all you need\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from swarms import Agent, MajorityVoting, ChromaDB, Anthropic\n",
+ "\n",
+ "# Initialize the llm\n",
+ "llm = Anthropic()\n",
+ "\n",
+ "# Agents\n",
+ "agent1 = Agent(\n",
+ " llm = llm,\n",
+ " system_prompt=\"You are the leader of the Progressive Party. What is your stance on healthcare?\",\n",
+ " agent_name=\"Progressive Leader\",\n",
+ " agent_description=\"Leader of the Progressive Party\",\n",
+ " long_term_memory=ChromaDB(),\n",
+ " max_steps=1,\n",
+ ")\n",
+ "\n",
+ "agent2 = Agent(\n",
+ " llm=llm,\n",
+ " agent_name=\"Conservative Leader\",\n",
+ " agent_description=\"Leader of the Conservative Party\",\n",
+ " long_term_memory=ChromaDB(),\n",
+ " max_steps=1,\n",
+ ")\n",
+ "\n",
+ "agent3 = Agent(\n",
+ " llm=llm,\n",
+ " agent_name=\"Libertarian Leader\",\n",
+ " agent_description=\"Leader of the Libertarian Party\",\n",
+ " long_term_memory=ChromaDB(),\n",
+ " max_steps=1,\n",
+ ")\n",
+ "\n",
+ "# Initialize the majority voting\n",
+ "mv = MajorityVoting(\n",
+ " agents=[agent1, agent2, agent3],\n",
+ " output_parser=llm.majority_voting,\n",
+ " autosave=False,\n",
+ " verbose=True,\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Start the majority voting\n",
+ "mv.run(\"What is your stance on healthcare?\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#Real-World Deployment\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Multi-Agent Swarm for Logistics**\n",
+ "\n",
+ "Here's a production grade swarm ready for real-world deployment in a factory and logistics settings like warehouses. This swarm can automate 3 costly and inefficient workflows, safety checks, productivity checks, and warehouse security."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "from swarms.models import GPT4VisionAPI\n",
+ "from swarms.prompts.logistics import (\n",
+ " Efficiency_Agent_Prompt,\n",
+ " Health_Security_Agent_Prompt,\n",
+ " Productivity_Agent_Prompt,\n",
+ " Quality_Control_Agent_Prompt,\n",
+ " Safety_Agent_Prompt,\n",
+ " Security_Agent_Prompt,\n",
+ " Sustainability_Agent_Prompt,\n",
+ ")\n",
+ "from swarms.structs import Agent\n",
+ "\n",
+ "# Load ENV\n",
+ "load_dotenv()\n",
+ "api_key = os.getenv(\"OPENAI_API_KEY\")\n",
+ "\n",
+ "# GPT4VisionAPI\n",
+ "llm = GPT4VisionAPI(openai_api_key=api_key)\n",
+ "\n",
+ "# Image for analysis\n",
+ "factory_image = \"factory_image1.jpg\"\n",
+ "\n",
+ "# Initialize agents with respective prompts\n",
+ "health_security_agent = Agent(\n",
+ " llm=llm,\n",
+ " sop=Health_Security_Agent_Prompt,\n",
+ " max_loops=1,\n",
+ " multi_modal=True,\n",
+ ")\n",
+ "\n",
+ "# Quality control agent\n",
+ "quality_control_agent = Agent(\n",
+ " llm=llm,\n",
+ " sop=Quality_Control_Agent_Prompt,\n",
+ " max_loops=1,\n",
+ " multi_modal=True,\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Productivity Agent\n",
+ "productivity_agent = Agent(\n",
+ " llm=llm,\n",
+ " sop=Productivity_Agent_Prompt,\n",
+ " max_loops=1,\n",
+ " multi_modal=True,\n",
+ ")\n",
+ "\n",
+ "# Initiailize safety agent\n",
+ "safety_agent = Agent(llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True)\n",
+ "\n",
+ "# Init the security agent\n",
+ "security_agent = Agent(\n",
+ " llm=llm, sop=Security_Agent_Prompt, max_loops=1, multi_modal=True\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Initialize sustainability agent\n",
+ "sustainability_agent = Agent(\n",
+ " llm=llm,\n",
+ " sop=Sustainability_Agent_Prompt,\n",
+ " max_loops=1,\n",
+ " multi_modal=True,\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Initialize efficincy agent\n",
+ "efficiency_agent = Agent(\n",
+ " llm=llm,\n",
+ " sop=Efficiency_Agent_Prompt,\n",
+ " max_loops=1,\n",
+ " multi_modal=True,\n",
+ ")\n",
+ "\n",
+ "# Run agents with respective tasks on the same image\n",
+ "health_analysis = health_security_agent.run(\n",
+ " \"Analyze the safety of this factory\", factory_image\n",
+ ")\n",
+ "quality_analysis = quality_control_agent.run(\n",
+ " \"Examine product quality in the factory\", factory_image\n",
+ ")\n",
+ "productivity_analysis = productivity_agent.run(\n",
+ " \"Evaluate factory productivity\", factory_image\n",
+ ")\n",
+ "safety_analysis = safety_agent.run(\n",
+ " \"Inspect the factory's adherence to safety standards\",\n",
+ " factory_image,\n",
+ ")\n",
+ "security_analysis = security_agent.run(\n",
+ " \"Assess the factory's security measures and systems\",\n",
+ " factory_image,\n",
+ ")\n",
+ "sustainability_analysis = sustainability_agent.run(\n",
+ " \"Examine the factory's sustainability practices\", factory_image\n",
+ ")\n",
+ "efficiency_analysis = efficiency_agent.run(\n",
+ " \"Analyze the efficiency of the factory's manufacturing process\",\n",
+ " factory_image,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Multi Modal Autonomous Agents\n",
+ "\n",
+ "Run the agent with multiple modalities useful for various real-world tasks in manufacturing, logistics, and health."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Description: This is an example of how to use the Agent class to run a multi-modal workflow\n",
+ "import os\n",
+ "\n",
+ "from dotenv import load_dotenv\n",
+ "\n",
+ "from swarms.models.gpt4_vision_api import GPT4VisionAPI\n",
+ "from swarms.structs import Agent\n",
+ "\n",
+ "# Load the environment variables\n",
+ "load_dotenv()\n",
+ "\n",
+ "# Get the API key from the environment\n",
+ "api_key = os.environ.get(\"OPENAI_API_KEY\")\n",
+ "\n",
+ "# Initialize the language model\n",
+ "llm = GPT4VisionAPI(\n",
+ " openai_api_key=api_key,\n",
+ " max_tokens=500,\n",
+ ")\n",
+ "\n",
+ "# Initialize the task\n",
+ "task = (\n",
+ " \"Analyze this image of an assembly line and identify any issues such as\"\n",
+ " \" misaligned parts, defects, or deviations from the standard assembly\"\n",
+ " \" process. IF there is anything unsafe in the image, explain why it is\"\n",
+ " \" unsafe and how it could be improved.\"\n",
+ ")\n",
+ "img = \"assembly_line.jpg\"\n",
+ "\n",
+ "## Initialize the workflow\n",
+ "agent = Agent(\n",
+ " llm=llm, max_loops=\"auto\", autosave=True, dashboard=True, multi_modal=True\n",
+ ")\n",
+ "\n",
+ "# Run the workflow on a task\n",
+ "agent.run(task=task, img=img)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#Build your own LLMs, Agents, and Swarms!\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Swarms Compliant Model Interface**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from swarms import BaseLLM\n",
+ "\n",
+ "class vLLMLM(BaseLLM):\n",
+ " def __init__(self, model_name='default_model', tensor_parallel_size=1, *args, **kwargs):\n",
+ " super().__init__(*args, **kwargs)\n",
+ " self.model_name = model_name\n",
+ " self.tensor_parallel_size = tensor_parallel_size\n",
+ " # Add any additional initialization here\n",
+ " \n",
+ " def run(self, task: str):\n",
+ " pass\n",
+ "\n",
+ "# Example\n",
+ "model = vLLMLM(\"mistral\")\n",
+ "\n",
+ "# Run the model\n",
+ "out = model(\"Analyze these financial documents and summarize of them\")\n",
+ "print(out)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Swarms Compliant Agent Interface**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from swarms import Agent\n",
+ "\n",
+ "\n",
+ "class MyCustomAgent(Agent):\n",
+ " def __init__(self, *args, **kwargs):\n",
+ " super().__init__(*args, **kwargs)\n",
+ " \n",
+ " # Custom initialization logic\n",
+ " def custom_method(self, *args, **kwargs):\n",
+ " # Implement custom logic here\n",
+ " pass\n",
+ "\n",
+ " def run(self, task, *args, **kwargs):\n",
+ " # Customize the run method\n",
+ " response = super().run(task, *args, **kwargs)\n",
+ " # Additional custom logic\n",
+ " return response\n",
+ "\n",
+ "# Model\n",
+ "agent = MyCustomAgent()\n",
+ "\n",
+ "# Run the agent\n",
+ "out = agent(\"Analyze and summarize these financial documents: \")\n",
+ "print(out)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Compliant Interface for Multi-Agent Collaboration**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from swarms import AutoSwarm, AutoSwarmRouter, BaseSwarm\n",
+ "\n",
+ "\n",
+ "# Build your own Swarm\n",
+ "class MySwarm(BaseSwarm):\n",
+ " def __init__(self, name=\"kyegomez/myswarm\", *args, **kwargs):\n",
+ " super().__init__(*args, **kwargs)\n",
+ " self.name = name\n",
+ "\n",
+ " def run(self, task: str, *args, **kwargs):\n",
+ " # Add your multi-agent logic here\n",
+ " # agent 1\n",
+ " # agent 2\n",
+ " # agent 3\n",
+ " return \"output of the swarm\"\n",
+ "\n",
+ "\n",
+ "# Add your custom swarm to the AutoSwarmRouter\n",
+ "router = AutoSwarmRouter(\n",
+ " swarms=[MySwarm]\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Create an AutoSwarm instance\n",
+ "autoswarm = AutoSwarm(\n",
+ " name=\"kyegomez/myswarm\",\n",
+ " description=\"A simple API to build and run swarms\",\n",
+ " verbose=True,\n",
+ " router=router,\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Run the AutoSwarm\n",
+ "autoswarm.run(\"Analyze these financial data and give me a summary\")\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**``AgentRearrange``**\n",
+ "\n",
+ "Inspired by Einops and einsum, this orchestration techniques enables you to map out the relationships between various agents. For example you specify linear and sequential relationships like a -> a1 -> a2 -> a3 or concurrent relationships where the first agent will send a message to 3 agents all at once: a -> a1, a2, a3. You can customize your workflow to mix sequential and concurrent relationships"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from swarms import Agent, Anthropic, AgentRearrange \n",
+ "\n",
+ "## Initialize the workflow\n",
+ "agent = Agent(\n",
+ " agent_name=\"t\",\n",
+ " agent_description=(\n",
+ " \"Generate a transcript for a youtube video on what swarms\"\n",
+ " \" are!\"\n",
+ " ),\n",
+ " system_prompt=(\n",
+ " \"Generate a transcript for a youtube video on what swarms\"\n",
+ " \" are!\"\n",
+ " ),\n",
+ " llm=Anthropic(),\n",
+ " max_loops=1,\n",
+ " autosave=True,\n",
+ " dashboard=False,\n",
+ " streaming_on=True,\n",
+ " verbose=True,\n",
+ " stopping_token=\"\",\n",
+ ")\n",
+ "\n",
+ "agent2 = Agent(\n",
+ " agent_name=\"t1\",\n",
+ " agent_description=(\n",
+ " \"Generate a transcript for a youtube video on what swarms\"\n",
+ " \" are!\"\n",
+ " ),\n",
+ " llm=Anthropic(),\n",
+ " max_loops=1,\n",
+ " system_prompt=\"Summarize the transcript\",\n",
+ " autosave=True,\n",
+ " dashboard=False,\n",
+ " streaming_on=True,\n",
+ " verbose=True,\n",
+ " stopping_token=\"\",\n",
+ ")\n",
+ "\n",
+ "agent3 = Agent(\n",
+ " agent_name=\"t2\",\n",
+ " agent_description=(\n",
+ " \"Generate a transcript for a youtube video on what swarms\"\n",
+ " \" are!\"\n",
+ " ),\n",
+ " llm=Anthropic(),\n",
+ " max_loops=1,\n",
+ " system_prompt=\"Finalize the transcript\",\n",
+ " autosave=True,\n",
+ " dashboard=False,\n",
+ " streaming_on=True,\n",
+ " verbose=True,\n",
+ " stopping_token=\"\",\n",
+ ")\n",
+ "\n",
+ "\n",
+ "# Rearrange the agents\n",
+ "rearrange = AgentRearrange(\n",
+ " agents=[agent, agent2, agent3],\n",
+ " verbose=True,\n",
+ " # custom_prompt=\"Summarize the transcript\",\n",
+ ")\n",
+ "\n",
+ "# Run the workflow on a task\n",
+ "results = rearrange(\n",
+ " # pattern=\"t -> t1, t2 -> t2\",\n",
+ " pattern=\"t -> t1 -> t2\",\n",
+ " default_task=(\n",
+ " \"Generate a transcript for a YouTube video on what swarms\"\n",
+ " \" are!\"\n",
+ " ),\n",
+ " t=\"Generate a transcript for a YouTube video on what swarms are!\",\n",
+ " # t2=\"Summarize the transcript\",\n",
+ " # t3=\"Finalize the transcript\",\n",
+ ")\n",
+ "# print(results)\n",
+ "\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "gpuType": "T4",
+ "private_outputs": true,
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/playground/demos/account_management_swarm_workshop/account_management.py b/playground/demos/account_management_swarm_workshop/account_management.py
new file mode 100644
index 00000000..7928b954
--- /dev/null
+++ b/playground/demos/account_management_swarm_workshop/account_management.py
@@ -0,0 +1,219 @@
+# Agent that picks up your intent
+# Depending on your intent it routes you to an agent that can help you with your request.
+# Account management agent and product support agent
+# Account Management Agent --> Talk about the user, their account. Just understand the user's intent and route them to the right agent.
+
+
+from swarms import Agent
+import requests
+import json
+from swarms import BaseLLM, base_model_to_openai_function
+from pydantic import BaseModel, Field
+
+
+## Pydantic model for the tool schema
+class HASSchema(BaseModel):
+ name: str = Field(
+ ...,
+ title="Name",
+ description="The name of the agent to send the task to.",
+ )
+ task: str = Field(
+ ...,
+ title="Task",
+ description="The task to send to the agent.",
+ )
+
+
+swarm_schema = base_model_to_openai_function(HASSchema, output_str=True)
+
+ACCOUNT_MANAGEMENT_SYSTEM_PROMPT = """
+
+You are an Account Management Agent. Your primary role is to engage with users regarding their accounts. Your main tasks include understanding the user's intent, addressing their immediate needs, and routing them to the appropriate agent for further assistance. Be simple and direct in your communication.
+
+When a user contacts you, start by greeting them and asking how you can assist with their account. Listen carefully to their concerns, questions, or issues. If the user provides information that is specific to their account, acknowledge it and ask any necessary follow-up questions to clarify their needs. Ensure that you fully understand their intent before proceeding.
+
+Once you have a clear understanding of the user's request or issue, determine the best course of action. If you can resolve the issue yourself, do so efficiently. If the issue requires specialized assistance, explain to the user that you will route them to the appropriate agent who can help further. Ensure the user feels heard and understood throughout the process.
+
+Your ultimate goal is to provide a seamless and positive experience for the user by effectively managing their inquiries and directing them to the right resource for resolution. Always maintain a polite and professional tone, and ensure that the user feels supported and valued.
+
+"""
+
+
+PRODUCT_SUPPORT_QA_SYSTEM_PROMPT = """
+
+
+You are a Product Support Agent.
+Your primary role is to provide assistance to users who have questions or issues related to the product. Your main tasks include understanding the user's needs, providing accurate information, and resolving any problems they may encounter. Be clear and concise in your communication.
+
+"""
+
+
+class llama3Hosted(BaseLLM):
+ """
+ A class representing a hosted version of the Llama3 model.
+
+ Args:
+ model (str): The name or path of the Llama3 model to use.
+ temperature (float): The temperature parameter for generating responses.
+ max_tokens (int): The maximum number of tokens in the generated response.
+ system_prompt (str): The system prompt to use for generating responses.
+ *args: Variable length argument list.
+ **kwargs: Arbitrary keyword arguments.
+
+ Attributes:
+ model (str): The name or path of the Llama3 model.
+ temperature (float): The temperature parameter for generating responses.
+ max_tokens (int): The maximum number of tokens in the generated response.
+ system_prompt (str): The system prompt for generating responses.
+
+ Methods:
+ run(task, *args, **kwargs): Generates a response for the given task.
+
+ """
+
+ def __init__(
+ self,
+ model: str = "meta-llama/Meta-Llama-3-8B-Instruct",
+ temperature: float = 0.8,
+ max_tokens: int = 4000,
+ system_prompt: str = "You are a helpful assistant.",
+ *args,
+ **kwargs,
+ ):
+ super().__init__(*args, **kwargs)
+ self.model = model
+ self.temperature = temperature
+ self.max_tokens = max_tokens
+ self.system_prompt = system_prompt
+
+ def run(self, task: str, *args, **kwargs) -> str:
+ """
+ Generates a response for the given task.
+
+ Args:
+ task (str): The user's task or input.
+
+ Returns:
+ str: The generated response from the Llama3 model.
+
+ """
+ url = "http://34.204.8.31:30001/v1/chat/completions"
+
+ payload = json.dumps(
+ {
+ "model": self.model,
+ "messages": [
+ {"role": "system", "content": self.system_prompt},
+ {"role": "user", "content": task},
+ ],
+ "stop_token_ids": [128009, 128001],
+ "temperature": self.temperature,
+ "max_tokens": self.max_tokens,
+ }
+ )
+
+ headers = {"Content-Type": "application/json"}
+
+ response = requests.request(
+ "POST", url, headers=headers, data=payload
+ )
+
+ response_json = response.json()
+ assistant_message = response_json["choices"][0]["message"][
+ "content"
+ ]
+
+ return assistant_message
+
+
+def select_agent_and_send_task(name: str = None, task: str = None):
+ """
+ Select an agent and send a task to them.
+
+ Args:
+ name (str): The name of the agent to send the task to.
+ task (str): The task to send to the agent.
+
+ Returns:
+ str: The response from the agent.
+
+ """
+ if name == "Product Support Agent":
+ agent = Agent(
+ agent_name="Product Support Agent",
+ system_prompt=PRODUCT_SUPPORT_QA_SYSTEM_PROMPT,
+ llm=llama3Hosted(),
+ max_loops=2,
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ verbose=True,
+ output_type=str,
+ metadata_output_type="json",
+ function_calling_format_type="OpenAI",
+ function_calling_type="json",
+ )
+ else:
+ return "Invalid agent name. Please select 'Account Management Agent' or 'Product Support Agent'."
+
+ response = agent.run(task)
+
+ return response
+
+
+def parse_json_then_activate_agent(json_data: str):
+ """
+ Parse the JSON data and activate the appropriate agent.
+
+ Args:
+ json_data (str): The JSON data containing the agent name and task.
+
+ Returns:
+ str: The response from the agent.
+
+ """
+ try:
+ data = json.loads(json_data)
+ name = data.get("name")
+ task = data.get("task")
+
+ response = select_agent_and_send_task(name, task)
+
+ return response
+ except json.JSONDecodeError:
+ return "Invalid JSON data."
+
+
+agent = Agent(
+ agent_name="Account Management Agent",
+ system_prompt=ACCOUNT_MANAGEMENT_SYSTEM_PROMPT,
+ # sop_list=[GLOSSARY_PROMPTS, FEW_SHORT_PROMPTS],
+ # sop=list_base_models_json,
+ llm=llama3Hosted(
+ max_tokens=3000,
+ ),
+ max_loops="auto",
+ interactive=True,
+ autosave=True,
+ dashboard=False,
+ streaming_on=True,
+ # interactive=True,
+ # tools=[search_weather], # or list of tools
+ verbose=True,
+ # Set the output type to the tool schema which is a BaseModel
+ list_base_models=[HASSchema],
+ output_type=str, # or dict, or str
+ metadata_output_type="json",
+ # List of schemas that the agent can handle
+ function_calling_format_type="OpenAI",
+ function_calling_type="json", # or soon yaml
+)
+
+# Run the agent to generate the person's information
+generated_data = agent.run("I need help with my modem.")
+parse_json_then_activate_agent(generated_data)
+
+
+# Print the generated data
+print(f"Generated data: {generated_data}")
diff --git a/playground/demos/accountant_team/account_team2_example.py b/playground/demos/accountant_team/account_team2_example.py
new file mode 100644
index 00000000..6ad030a9
--- /dev/null
+++ b/playground/demos/accountant_team/account_team2_example.py
@@ -0,0 +1,85 @@
+import os
+
+from dotenv import load_dotenv
+
+from swarms.models import Anthropic, OpenAIChat
+from swarms.prompts.accountant_swarm_prompts import (
+ DECISION_MAKING_PROMPT,
+ DOC_ANALYZER_AGENT_PROMPT,
+ SUMMARY_GENERATOR_AGENT_PROMPT,
+)
+from swarms.structs import Agent
+from swarms.utils.pdf_to_text import pdf_to_text
+
+# Environment variables
+load_dotenv()
+anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
+openai_api_key = os.getenv("OPENAI_API_KEY")
+
+
+# Base llms
+llm1 = OpenAIChat(
+ openai_api_key=openai_api_key,
+ max_tokens=5000,
+)
+
+llm2 = Anthropic(
+ anthropic_api_key=anthropic_api_key,
+ max_tokens=5000,
+)
+
+
+# Agents
+doc_analyzer_agent = Agent(
+ llm=llm2,
+ sop=DOC_ANALYZER_AGENT_PROMPT,
+ max_loops=1,
+ autosave=True,
+ saved_state_path="doc_analyzer_agent.json",
+)
+summary_generator_agent = Agent(
+ llm=llm2,
+ sop=SUMMARY_GENERATOR_AGENT_PROMPT,
+ max_loops=1,
+ autosave=True,
+ saved_state_path="summary_generator_agent.json",
+)
+decision_making_support_agent = Agent(
+ llm=llm2,
+ sop=DECISION_MAKING_PROMPT,
+ max_loops=1,
+ saved_state_path="decision_making_support_agent.json",
+)
+
+
+pdf_path = "bankstatement.pdf"
+fraud_detection_instructions = "Detect fraud in the document"
+summary_agent_instructions = (
+ "Generate an actionable summary of the document with action steps"
+ " to take"
+)
+decision_making_support_agent_instructions = (
+ "Provide decision making support to the business owner:"
+)
+
+
+# Transform the pdf to text
+pdf_text = pdf_to_text(pdf_path)
+print(pdf_text)
+
+
+# Detect fraud in the document
+fraud_detection_agent_output = doc_analyzer_agent.run(
+ f"{fraud_detection_instructions}: {pdf_text}"
+)
+
+# Generate an actionable summary of the document
+summary_agent_output = summary_generator_agent.run(
+ f"{summary_agent_instructions}: {fraud_detection_agent_output}"
+)
+
+# Provide decision making support to the accountant
+decision_making_support_agent_output = decision_making_support_agent.run(
+ f"{decision_making_support_agent_instructions}:"
+ f" {summary_agent_output}"
+)
diff --git a/playground/demos/ad_gen/ad_gen_example.py b/playground/demos/ad_gen/ad_gen_example.py
new file mode 100644
index 00000000..21d9f315
--- /dev/null
+++ b/playground/demos/ad_gen/ad_gen_example.py
@@ -0,0 +1,100 @@
+import os
+import random
+
+from dotenv import load_dotenv
+
+from swarms.models import OpenAIChat
+from swarms.models.stable_diffusion import StableDiffusion
+from swarms.structs import Agent
+
+load_dotenv()
+openai_api_key = os.getenv("OPENAI_API_KEY")
+stability_api_key = os.getenv("STABILITY_API_KEY")
+
+# Initialize the language model and image generation model
+llm = OpenAIChat(
+ openai_api_key=openai_api_key, temperature=0.5, max_tokens=3000
+)
+sd_api = StableDiffusion(api_key=stability_api_key)
+
+
+# Creative Concept Generator for Product Ads
+class ProductAdConceptGenerator:
+ def __init__(self, product_name):
+ self.product_name = product_name
+ self.themes = [
+ "futuristic",
+ "rustic",
+ "luxurious",
+ "minimalistic",
+ "vibrant",
+ "elegant",
+ "retro",
+ "urban",
+ "ethereal",
+ "surreal",
+ "artistic",
+ "tech-savvy",
+ "vintage",
+ "natural",
+ "sophisticated",
+ "playful",
+ "dynamic",
+ "serene",
+ "lasers,lightning",
+ ]
+ self.contexts = [
+ "in an everyday setting",
+ "in a rave setting",
+ "in an abstract environment",
+ "in an adventurous context",
+ "surrounded by nature",
+ "in a high-tech setting",
+ "in a historical context",
+ "in a busy urban scene",
+ "in a tranquil and peaceful setting",
+ "against a backdrop of city lights",
+ "in a surreal dreamscape",
+ "in a festive atmosphere",
+ "in a luxurious setting",
+ "in a playful and colorful background",
+ "in an ice cave setting",
+ "in a serene and calm landscape",
+ ]
+ self.contexts = ["high realism product ad (extremely creative)"]
+
+ def generate_concept(self):
+ theme = random.choice(self.themes)
+ context = random.choice(self.contexts)
+ return f"{theme} inside a {style} {self.product_name}, {context}"
+
+
+# User input
+product_name = input(
+ "Enter a product name for ad creation (e.g., 'PS5', 'AirPods',"
+ " 'Kirkland Vodka'): "
+)
+
+# Generate creative concept
+concept_generator = ProductAdConceptGenerator(product_name)
+creative_concept = concept_generator.generate_concept()
+
+# Generate product image based on the creative concept
+image_paths = sd_api.run(creative_concept)
+
+# Generate ad copy
+ad_copy_agent = Agent(llm=llm, max_loops=1)
+ad_copy_prompt = (
+ f"Write a compelling {social_media_platform} ad copy for a"
+ f" product photo showing {product_name} {creative_concept}."
+)
+ad_copy = ad_copy_agent.run(task=ad_copy_prompt)
+
+# Output the results
+print("Creative Concept:", concept_result)
+print("Design Ideas:", design_result)
+print("Ad Copy:", copywriting_result)
+print(
+ "Image Path:",
+ image_paths[0] if image_paths else "No image generated",
+)
diff --git a/playground/demos/agent_in_5/chroma_db.py b/playground/demos/agent_in_5/chroma_db.py
new file mode 100644
index 00000000..cf2d86be
--- /dev/null
+++ b/playground/demos/agent_in_5/chroma_db.py
@@ -0,0 +1,185 @@
+import logging
+import os
+import uuid
+from typing import List, Optional
+
+import chromadb
+import numpy as np
+from dotenv import load_dotenv
+
+from swarms.utils.data_to_text import data_to_text
+from swarms.utils.markdown_message import display_markdown_message
+
+# Load environment variables
+load_dotenv()
+
+
+# Results storage using local ChromaDB
+class ChromaDB:
+ """
+
+ ChromaDB database
+
+ Args:
+ metric (str): The similarity metric to use.
+ output (str): The name of the collection to store the results in.
+ limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000.
+ n_results (int, optional): The number of results to retrieve. Defaults to 2.
+
+ Methods:
+ add: _description_
+ query: _description_
+
+ Examples:
+ >>> chromadb = ChromaDB(
+ >>> metric="cosine",
+ >>> output="results",
+ >>> llm="gpt3",
+ >>> openai_api_key=OPENAI_API_KEY,
+ >>> )
+ >>> chromadb.add(task, result, result_id)
+ """
+
+ def __init__(
+ self,
+ metric: str = "cosine",
+ output_dir: str = "swarms",
+ limit_tokens: Optional[int] = 1000,
+ n_results: int = 2,
+ docs_folder: Optional[str] = None,
+ verbose: bool = False,
+ *args,
+ **kwargs,
+ ):
+ self.metric = metric
+ self.output_dir = output_dir
+ self.limit_tokens = limit_tokens
+ self.n_results = n_results
+ self.docs_folder = docs_folder
+ self.verbose = verbose
+
+ # Disable ChromaDB logging
+ if verbose:
+ logging.getLogger("chromadb").setLevel(logging.INFO)
+
+ # Create Chroma collection
+ chroma_persist_dir = "chroma"
+ chroma_client = chromadb.PersistentClient(
+ settings=chromadb.config.Settings(
+ persist_directory=chroma_persist_dir,
+ ),
+ *args,
+ **kwargs,
+ )
+ # Create ChromaDB client
+ self.client = chromadb.Client()
+
+ # Create Chroma collection
+ self.collection = chroma_client.get_or_create_collection(
+ name=output_dir,
+ metadata={"hnsw:space": metric},
+ *args,
+ **kwargs,
+ )
+ display_markdown_message(
+ "ChromaDB collection created:"
+ f" {self.collection.name} with metric: {self.metric} and"
+ f" output directory: {self.output_dir}"
+ )
+
+ # If docs
+ if docs_folder:
+ display_markdown_message(
+ f"Traversing directory: {docs_folder}"
+ )
+ self.traverse_directory()
+
+ def add(
+ self,
+ document: str,
+ images: List[np.ndarray] = None,
+ img_urls: List[str] = None,
+ *args,
+ **kwargs,
+ ):
+ """
+ Add a document to the ChromaDB collection.
+
+ Args:
+ document (str): The document to be added.
+ condition (bool, optional): The condition to check before adding the document. Defaults to True.
+
+ Returns:
+ str: The ID of the added document.
+ """
+ try:
+ doc_id = str(uuid.uuid4())
+ self.collection.add(
+ ids=[doc_id],
+ documents=[document],
+ images=images,
+ uris=img_urls,
+ *args,
+ **kwargs,
+ )
+ return doc_id
+ except Exception as e:
+ raise Exception(f"Failed to add document: {str(e)}")
+
+ def query(
+ self,
+ query_text: str,
+ query_images: List[np.ndarray],
+ *args,
+ **kwargs,
+ ):
+ """
+ Query documents from the ChromaDB collection.
+
+ Args:
+ query (str): The query string.
+ n_docs (int, optional): The number of documents to retrieve. Defaults to 1.
+
+ Returns:
+ dict: The retrieved documents.
+ """
+ try:
+ docs = self.collection.query(
+ query_texts=[query_text],
+ query_images=query_images,
+ n_results=self.n_docs,
+ *args,
+ **kwargs,
+ )["documents"]
+ return docs[0]
+ except Exception as e:
+ raise Exception(f"Failed to query documents: {str(e)}")
+
+ def traverse_directory(self):
+ """
+ Traverse through every file in the given directory and its subdirectories,
+ and return the paths of all files.
+ Parameters:
+ - directory_name (str): The name of the directory to traverse.
+ Returns:
+ - list: A list of paths to each file in the directory and its subdirectories.
+ """
+ image_extensions = [
+ ".jpg",
+ ".jpeg",
+ ".png",
+ ]
+ images = []
+ for root, dirs, files in os.walk(self.docs_folder):
+ for file in files:
+ _, ext = os.path.splitext(file)
+ if ext.lower() in image_extensions:
+ images.append(os.path.join(root, file))
+ else:
+ data = data_to_text(file)
+ added_to_db = self.add([data])
+ print(f"{file} added to Database")
+ if images:
+ added_to_db = self.add(img_urls=[images])
+ print(f"{len(images)} images added to Database ")
+ return added_to_db
diff --git a/playground/demos/agent_in_5/youtube_demo_agent.py b/playground/demos/agent_in_5/youtube_demo_agent.py
new file mode 100644
index 00000000..bd2faf58
--- /dev/null
+++ b/playground/demos/agent_in_5/youtube_demo_agent.py
@@ -0,0 +1,71 @@
+"""
+Building an Autonomous Agent in 5 minutes with:
+- LLM: OpenAI, Anthropic, EleutherAI, Hugging Face: Transformers
+- Tools: Search, Browser, ETC
+- Long Term Mmeory: ChromaDB, Weaviate, Pinecone, ETC
+"""
+
+from swarms import Agent, OpenAIChat, tool
+from playground.demos.agent_in_5.chroma_db import ChromaDB
+
+# Initialize the memory
+chroma = ChromaDB(
+ metric="cosine",
+ limit_tokens=1000,
+ verbose=True,
+ # docs_folder = "docs" # Add your docs folder here
+)
+
+
+"""
+How to make a tool in Swarms:
+- Use the @tool decorator
+- Define the function with the required arguments
+- Add a docstring with the description of the tool
+"""
+
+
+# Create a tool
+@tool # Use this decorator
+def browser(query: str = None): # Add types
+ """
+ Opens a web browser and performs a Google search with the given query.
+
+ Args:
+ query (str): The search query to be performed.
+
+ Returns:
+ str: A message indicating that the browser is being opened for the given query.
+ """
+ import webbrowser
+
+ url = f"https://www.google.com/search?q={query}"
+ webbrowser.open(url)
+ return f"Opening browser for: {query}"
+
+
+# Initialize the agent
+agent = Agent(
+ llm=OpenAIChat(),
+ agent_name="AI Engineer",
+ agent_description=(
+ "Creates AI Models for special use cases using PyTorch"
+ ),
+ system_prompt=(
+ "Create an AI model for earthquake prediction using PyTorch."
+ ),
+ max_loops=4, # or "auto"
+ autosave=True,
+ dashboard=True,
+ verbose=True,
+ stopping_token="",
+ interactive=True,
+ tools=[browser],
+ long_term_memory=chroma, # pass in your memory object
+)
+
+# Run the agent
+out = agent.run(
+ "Let's make an AI model for earthquake prediction in pytorch."
+)
+print(out)
diff --git a/playground/demos/agentic_space_traffic_control/flight_data.py b/playground/demos/agentic_space_traffic_control/flight_data.py
new file mode 100644
index 00000000..dd41ff4f
--- /dev/null
+++ b/playground/demos/agentic_space_traffic_control/flight_data.py
@@ -0,0 +1,73 @@
+import requests
+from typing import List, Dict, Any
+
+
+def fetch_flights_in_area(
+ latitude: float, longitude: float, radius: float = 0.5
+) -> List[Dict[str, Any]]:
+ """
+ Fetch and summarize flight data for a given area using the OpenSky Network API.
+
+ Args:
+ latitude (float): The latitude of the center point.
+ longitude (float): The longitude of the center point.
+ radius (float): The radius around the center point to search for flights, in degrees. Default is 0.5.
+
+ Returns:
+ List[Dict[str, Any]]: A list of summarized flight data in the specified area.
+
+ Raises:
+ Exception: If the request fails or the response is invalid.
+ """
+ url = "https://opensky-network.org/api/states/all"
+ params = {
+ "lamin": latitude - radius,
+ "lamax": latitude + radius,
+ "lomin": longitude - radius,
+ "lomax": longitude + radius,
+ }
+
+ try:
+ response = requests.get(url, params=params)
+ response.raise_for_status()
+ data = response.json()
+ flights = data.get("states", [])
+
+ summarized_flights = []
+ for flight in flights:
+ if (
+ flight[1]
+ and flight[5]
+ and flight[6]
+ and flight[7] is not None
+ ): # Ensure essential data is available
+ summarized_flights.append(
+ {
+ "callsign": flight[1].strip(),
+ "origin_country": flight[2],
+ "last_position": f"Lat: {flight[5]}, Lon: {flight[6]}",
+ "altitude_meters": flight[7],
+ }
+ )
+
+ return summarized_flights
+ except requests.RequestException as e:
+ raise Exception(f"Failed to fetch flight data: {e}")
+ except ValueError:
+ raise Exception("Invalid response format.")
+
+
+# Example usage
+latitude = 28.3922 # Latitude for Cape Canaveral, FL
+longitude = -80.6077 # Longitude for Cape Canaveral, FL
+radius = 0.5 # 0.5 degrees (~55 km)
+
+try:
+ flights = fetch_flights_in_area(latitude, longitude, radius)
+ for flight in flights:
+ print(
+ f"Callsign: {flight['callsign']}, Origin: {flight['origin_country']}, "
+ f"Position: {flight['last_position']}, Altitude: {flight['altitude_meters']} meters"
+ )
+except Exception as e:
+ print(e)
diff --git a/playground/demos/agentic_space_traffic_control/game.py b/playground/demos/agentic_space_traffic_control/game.py
new file mode 100644
index 00000000..6a44d5ea
--- /dev/null
+++ b/playground/demos/agentic_space_traffic_control/game.py
@@ -0,0 +1,74 @@
+from swarms import (
+ Agent,
+ llama3Hosted,
+ AgentRearrange,
+)
+from playground.demos.agentic_space_traffic_control.prompts import (
+ WEATHER_ANALYST_SYSTEM_PROMPT,
+ SPACE_TRAFFIC_CONTROLLER_SYS_PROMPT,
+)
+from tools import (
+ fetch_weather_data,
+)
+from swarms.tools import get_openai_function_schema_from_func
+
+
+def prep_weather_tool_prompt(city: str = "Melbourne, Fl") -> str:
+ out = get_openai_function_schema_from_func(
+ fetch_weather_data,
+ name="Fetch Weather Data by City",
+ description="Fetch near real-time weather data for a city using wttr.in. Provide the name of the city (e.g., 'Austin, Tx') and state, as input.",
+ )
+ return out
+
+
+# Purpose = To generate weather information for the user and send API requests to the Baron Weather API
+agent = Agent(
+ agent_name="Weather Analyst Agent",
+ system_prompt=WEATHER_ANALYST_SYSTEM_PROMPT,
+ llm=llama3Hosted(),
+ max_loops=1,
+ # autosave=True,
+ dashboard=False,
+ verbose=True,
+ # sop=list_base_models_json,
+ # sop_list=[
+ # prep_weather_tool_prompt
+ # ], # Set the output type to the tool schema which is a BaseModel
+ # output_type=str, # or dict, or str
+ # metadata_output_type="json",
+ # # List of schemas that the agent can handle
+ # function_calling_format_type="OpenAI",
+ # function_calling_type="json", # or soon yaml
+ # sop=fetch_weather_data,
+)
+
+
+# Purpose = To manage the trajectories and communication of spacecraft
+agent2 = Agent(
+ agent_name="Space Traffic Controller Agent",
+ system_prompt=SPACE_TRAFFIC_CONTROLLER_SYS_PROMPT,
+ # sop=list_base_models_json,
+ llm=llama3Hosted(),
+ max_loops=1,
+ # autosave=True,
+ dashboard=False,
+ verbose=True,
+ # Set the output type to the tool schema which is a BaseModel
+ # output_type=str, # or dict, or str
+ # metadata_output_type="json",
+ # # List of schemas that the agent can handle
+ # function_calling_format_type="OpenAI",
+ # function_calling_type="json", # or soon yaml
+)
+
+# Rearrange
+flow = AgentRearrange(
+ agents=[agent, agent2],
+ flow="Weather Analyst Agent -> Space Traffic Controller Agent",
+ max_loops=3,
+)
+# Run the flow
+flow.run(
+ "We're preparing for a launch in Cape canveral, let's begin the launch process, whats the weather like?"
+)
diff --git a/playground/demos/agentic_space_traffic_control/prompts.py b/playground/demos/agentic_space_traffic_control/prompts.py
new file mode 100644
index 00000000..9de0ea82
--- /dev/null
+++ b/playground/demos/agentic_space_traffic_control/prompts.py
@@ -0,0 +1,68 @@
+def WEATHER_ANALYST_SYSTEM_PROMPT() -> str:
+ return """
+
+ # Weather Analyst Instructions
+
+ ## Role Overview
+ As a Weather Analyst, your primary responsibility is to monitor and report on space weather conditions. Your insights help ensure the safety and efficiency of space missions.
+
+ ## Key Responsibilities
+ 1. **Monitor Space Weather**: Regularly check for updates on space weather conditions such as solar storms, asteroid showers, and other cosmic phenomena.
+ 2. **Forecast Weather Conditions**: Provide accurate and timely weather forecasts to assist in mission planning and execution.
+ 3. **Communicate Hazards**: Alert the Space Traffic Controllers about any upcoming weather hazards that could affect spacecraft operations.
+
+ ## How to Think Like a Weather Analyst
+ - **Accuracy**: Always verify the data before reporting. Ensure your forecasts are as accurate as possible.
+ - **Timeliness**: Provide updates promptly. Space missions depend on real-time information to make critical decisions.
+ - **Clarity**: Communicate clearly and concisely. Ensure that your reports are easily understood by all team members.
+ - **Anticipation**: Think ahead. Predict potential weather impacts on future missions and communicate these proactively.
+
+ ## Example Actions
+ 1. **Regular Updates**:
+ - "Solar activity is expected to increase in the next 3 hours. Recommend delaying any non-essential missions."
+ 2. **Forecasting**:
+ - "A solar storm is predicted to hit in 5 hours. Suggest adjusting launch windows to avoid potential interference."
+ 3. **Hazard Alerts**:
+ - "Detected an asteroid shower trajectory intersecting with planned spacecraft path. Immediate re-routing is advised."
+
+ ## Tools and Resources
+ - **Space Weather Monitoring Systems**: Use tools provided to monitor space weather conditions.
+ - **Communication Platforms**: Utilize the chat interface to send updates and alerts to the team.
+ - **Data Sources**: Access reliable data sources for accurate weather information.
+ """
+
+
+def SPACE_TRAFFIC_CONTROLLER_SYS_PROMPT() -> str:
+ return """
+
+ # Space Traffic Controller Instructions
+
+ ## Role Overview
+ As a Space Traffic Controller, your main task is to manage the trajectories and communication of spacecraft. Your role is crucial in ensuring that missions are executed safely and efficiently.
+
+ ## Key Responsibilities
+ 1. **Manage Trajectories**: Plan and adjust spacecraft trajectories to avoid hazards and optimize fuel usage.
+ 2. **Coordinate Communication**: Maintain clear and continuous communication with spacecraft, providing guidance and updates.
+ 3. **Collaborate with Team Members**: Work closely with Weather Analysts and Fuel Managers to make informed decisions.
+
+ ## How to Think Like a Space Traffic Controller
+ - **Precision**: Ensure trajectory calculations are precise to avoid collisions and optimize mission success.
+ - **Communication**: Maintain clear and effective communication with both spacecraft and team members.
+ - **Adaptability**: Be ready to adjust plans based on new information, such as weather updates or fuel status.
+ - **Safety First**: Prioritize the safety of the spacecraft and crew in all decisions.
+
+ ## Example Actions
+ 1. **Trajectory Management**:
+ - "Adjusting the spacecraft's trajectory to avoid the predicted solar storm area."
+ 2. **Communication**:
+ - "Mission Control to Spacecraft Alpha, prepare for a trajectory change in 5 minutes."
+ 3. **Collaboration**:
+ - "Received a weather alert about an upcoming solar storm. Fuel Manager, please confirm if we have enough reserves for an extended orbit."
+
+ ## Tools and Resources
+ - **Trajectory Planning Software**: Use provided tools to calculate and adjust spacecraft trajectories.
+ - **Communication Systems**: Utilize the chat interface and other communication tools to coordinate with spacecraft and team members.
+ - **Mission Data**: Access mission-specific data to inform your decisions and actions.
+
+
+ """
diff --git a/playground/demos/ai_acceleerated_learning/Podgraph .py b/playground/demos/ai_acceleerated_learning/Podgraph .py
new file mode 100644
index 00000000..d632b7de
--- /dev/null
+++ b/playground/demos/ai_acceleerated_learning/Podgraph .py
@@ -0,0 +1,57 @@
+def test_create_graph():
+ """
+ Tests that a graph can be created.
+ """
+ graph = create_graph()
+ assert isinstance(graph, dict)
+
+
+def test_weight_edges():
+ """
+ Tests that the edges of a graph can be weighted.
+ """
+ graph = create_graph()
+ weight_edges(graph)
+ for edge in graph.edges:
+ assert isinstance(edge.weight, int)
+
+
+def test_create_user_list():
+ """
+ Tests that a list of all the podcasts that the user has listened to can be created.
+ """
+ user_list = create_user_list()
+ assert isinstance(user_list, list)
+
+
+def test_find_most_similar_podcasts():
+ """
+ Tests that the most similar podcasts to a given podcast can be found.
+ """
+ graph = create_graph()
+ weight_edges(graph)
+ user_list = create_user_list()
+ most_similar_podcasts = find_most_similar_podcasts(graph, user_list)
+ assert isinstance(most_similar_podcasts, list)
+
+
+def test_add_most_similar_podcasts():
+ """
+ Tests that the most similar podcasts to a given podcast can be added to the user's list.
+ """
+ graph = create_graph()
+ weight_edges(graph)
+ user_list = create_user_list()
+ add_most_similar_podcasts(graph, user_list)
+ assert len(user_list) > 0
+
+
+def test_repeat_steps():
+ """
+ Tests that steps 5-6 can be repeated until the user's list contains the desired number of podcasts.
+ """
+ graph = create_graph()
+ weight_edges(graph)
+ user_list = create_user_list()
+ repeat_steps(graph, user_list)
+ assert len(user_list) == 10
diff --git a/playground/demos/ai_acceleerated_learning/main.py b/playground/demos/ai_acceleerated_learning/main.py
new file mode 100644
index 00000000..6366c005
--- /dev/null
+++ b/playground/demos/ai_acceleerated_learning/main.py
@@ -0,0 +1,248 @@
+import concurrent
+import csv
+from swarms import Agent, OpenAIChat
+from swarms.memory import ChromaDB
+from dotenv import load_dotenv
+from swarms.utils.parse_code import extract_code_from_markdown
+from swarms.utils.file_processing import create_file
+from swarms.utils.loguru_logger import logger
+
+
+# Load ENV
+load_dotenv()
+
+# Gemini
+gemini = OpenAIChat()
+
+# memory
+memory = ChromaDB(output_dir="swarm_hackathon")
+
+
+def execute_concurrently(callable_functions: callable, max_workers=5):
+ """
+ Executes callable functions concurrently using multithreading.
+
+ Parameters:
+ - callable_functions: A list of tuples, each containing the callable function and its arguments.
+ For example: [(function1, (arg1, arg2), {'kwarg1': val1}), (function2, (), {})]
+ - max_workers: The maximum number of threads to use.
+
+ Returns:
+ - results: A list of results returned by the callable functions. If an error occurs in any function,
+ the exception object will be placed at the corresponding index in the list.
+ """
+ results = [None] * len(callable_functions)
+
+ def worker(fn, args, kwargs, index):
+ try:
+ result = fn(*args, **kwargs)
+ results[index] = result
+ except Exception as e:
+ results[index] = e
+
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=max_workers
+ ) as executor:
+ futures = []
+ for i, (fn, args, kwargs) in enumerate(callable_functions):
+ futures.append(executor.submit(worker, fn, args, kwargs, i))
+
+ # Wait for all threads to complete
+ concurrent.futures.wait(futures)
+
+ return results
+
+
+# Adjusting the function to extract specific column values
+def extract_and_create_agents(csv_file_path: str, target_columns: list):
+ """
+ Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
+ creates an Agent for each, and adds it to the swarm network.
+
+ Parameters:
+ - csv_file_path: The path to the CSV file.
+ - target_columns: A list of column names to extract values from.
+ """
+ try:
+ agents = []
+ with open(csv_file_path, mode="r", encoding="utf-8") as file:
+ reader = csv.DictReader(file)
+ for row in reader:
+ project_name = row[target_columns[0]]
+ lightning_proposal = row[target_columns[1]]
+
+ # Example of creating and adding an agent based on the project name and lightning proposal
+ agent_name = f"{project_name} agent"
+ print(agent_name) # For demonstration
+
+ # Create the agent
+ logger.info("Creating agent...")
+
+ # Design agent
+ logger.info("Creating design agent...")
+ design_agent = Agent(
+ llm=gemini,
+ agent_name="Design Agent",
+ max_loops=1,
+ stopping_token="",
+ sop=None,
+ system_prompt=(
+ "Transform an app idea into step by step very"
+ " simple algorithmic psuedocode so it can be"
+ " implemented simply."
+ ),
+ long_term_memory=memory,
+ )
+
+ # Log the agent
+ logger.info(
+ f"Code Agent created: {agent_name} with long term"
+ " memory"
+ )
+ agent = Agent(
+ llm=gemini,
+ agent_name=agent_name,
+ max_loops=1,
+ code_interpreter=True,
+ stopping_token="",
+ sop=None,
+ system_prompt=(
+ "Transform an app idea into a very simple"
+ " python app in markdown. Return all the"
+ " python code in a single markdown file."
+ " Return only code and nothing else."
+ ),
+ long_term_memory=memory,
+ )
+
+ # Testing agent
+ logger.info(f"Testing_agent agent: {agent_name}")
+ agent = Agent(
+ llm=gemini,
+ agent_name=agent_name + " testing",
+ max_loops=1,
+ stopping_token="",
+ sop=None,
+ system_prompt=(
+ "Create unit tests using pytest based on the"
+ " code you see, only return unit test code in"
+ " python using markdown, only return the code"
+ " and nothing else."
+ ),
+ long_term_memory=memory,
+ )
+
+ # Log the agent
+ logger.info(
+ f"Agent created: {agent_name} with long term" " memory"
+ )
+ agents.append(agent)
+
+ # Design agent
+ design_agent_output = design_agent.run(
+ (
+ "Create the algorithmic psuedocode for the"
+ f" {lightning_proposal} in markdown and"
+ " return it"
+ ),
+ None,
+ )
+
+ logger.info(
+ "Algorithmic psuedocode created:"
+ f" {design_agent_output}"
+ )
+
+ # Create the code for each project
+ output = agent.run(
+ (
+ "Create the code for the"
+ f" {lightning_proposal} in python using the"
+ " algorithmic psuedocode"
+ f" {design_agent_output} and wrap it in"
+ " markdown and return it"
+ ),
+ None,
+ )
+ print(output)
+ # Parse the output
+ output = extract_code_from_markdown(output)
+ # Create the file
+ output = create_file(output, f"{project_name}.py")
+
+ # Testing agent
+ testing_agent_output = agent.run(
+ (
+ "Create the unit tests for the"
+ f" {lightning_proposal} in python using the"
+ f" code {output} and wrap it in markdown and"
+ " return it"
+ ),
+ None,
+ )
+ print(testing_agent_output)
+
+ # Parse the output
+ testing_agent_output = extract_code_from_markdown(
+ testing_agent_output
+ )
+ # Create the file
+ testing_agent_output = create_file(
+ testing_agent_output, f"test_{project_name}.py"
+ )
+
+ # Log the project created
+ logger.info(
+ f"Project {project_name} created: {output} at"
+ f" file path {project_name}.py"
+ )
+ print(output)
+
+ # Log the unit tests created
+ logger.info(
+ f"Unit tests for {project_name} created:"
+ f" {testing_agent_output} at file path"
+ f" test_{project_name}.py"
+ )
+
+ print(
+ f"Agent {agent_name} created and added to the"
+ " swarm network"
+ )
+
+ return agents
+
+ except Exception as e:
+ logger.error(
+ "An error occurred while extracting and creating"
+ f" agents: {e}"
+ )
+ return None
+
+
+# CSV
+csv_file = "presentation.csv"
+
+# Specific columns to extract
+target_columns = ["Project Name", "Project Description"]
+
+# Use the adjusted function
+specific_column_values = extract_and_create_agents(
+ csv_file, target_columns
+)
+
+# Display the extracted column values
+print(specific_column_values)
+
+
+# Concurrently execute the function
+logger.info(
+ "Concurrently executing the swarm for each hackathon project..."
+)
+output = execute_concurrently(
+ [
+ (extract_and_create_agents, (csv_file, target_columns), {}),
+ ],
+ max_workers=5,
+)
+print(output)
diff --git a/playground/demos/ai_acceleerated_learning/presentation assistant.py b/playground/demos/ai_acceleerated_learning/presentation assistant.py
new file mode 100644
index 00000000..fb03c814
--- /dev/null
+++ b/playground/demos/ai_acceleerated_learning/presentation assistant.py
@@ -0,0 +1,86 @@
+class MockApp:
+ def __init__(self):
+ self.running = True
+ self.session = None
+ self.slides = []
+
+ def main_menu(self):
+ return input("Choose option: 1. Start, 2. Load, 3. Exit ")
+
+ def start_new_talk(self, title):
+ self.session = title
+ self.slides = []
+
+ def add_slide(self, content):
+ self.slides.append(content)
+
+ def edit_slide(self, index, content):
+ self.slides[index] = content
+
+ def delete_slide(self, index):
+ del self.slides[index]
+
+ def reorder_slides(self, new_order):
+ self.slides = [self.slides[i] for i in new_order]
+
+ def get_number_of_slides(self):
+ return len(self.slides)
+
+ # Function to simulate user actions
+ def simulate_user_action(self, action):
+ # Placeholder function to simulate user interaction, not part of the actual app code
+ pass
+
+
+# Testing starting a new talk
+def test_start_new_talk():
+ app = MockApp()
+ app.start_new_talk("My New Talk")
+ assert app.session == "My New Talk"
+ assert app.slides == []
+
+
+# Testing adding a slide
+def test_add_slide():
+ app = MockApp()
+ app.start_new_talk("Talk 1")
+ app.add_slide("Slide Content 1")
+ assert app.slides == ["Slide Content 1"]
+
+
+# Testing editing a slide
+def test_edit_slide():
+ app = MockApp()
+ app.start_new_talk("Talk 1")
+ app.add_slide("Slide Content 1")
+ app.edit_slide(0, "Updated Slide Content 1")
+ assert app.slides == ["Updated Slide Content 1"]
+
+
+# Testing deleting a slide
+def test_delete_slide():
+ app = MockApp()
+ app.start_new_talk("Talk 1")
+ app.add_slide("Slide Content 1")
+ app.add_slide("Slide Content 2")
+ app.delete_slide(0)
+ assert app.slides == ["Slide Content 2"]
+
+
+# Testing reordering slides
+def test_reorder_slides():
+ app = MockApp()
+ app.start_new_talk("Talk 1")
+ app.add_slide("Slide Content 1")
+ app.add_slide("Slide Content 2")
+ app.reorder_slides([1, 0])
+ assert app.slides == ["Slide Content 2", "Slide Content 1"]
+
+
+# Testing the number of slides
+def test_slide_count():
+ app = MockApp()
+ app.start_new_talk("Talk 1")
+ app.add_slide("Slide Content 1")
+ app.add_slide("Slide Content 2")
+ assert app.get_number_of_slides() == 2
diff --git a/playground/demos/ai_acceleerated_learning/presentation.csv b/playground/demos/ai_acceleerated_learning/presentation.csv
new file mode 100644
index 00000000..66894008
--- /dev/null
+++ b/playground/demos/ai_acceleerated_learning/presentation.csv
@@ -0,0 +1,15 @@
+Project Name,Team Members,Project Description,Project Link / Code,Team Twitter Handles
+presentation assistant,robert nowell,live visual aid for talks,loom,@robertnowell1
+Vocal,"Jeremy Nixon, Amir Gamil, Eliott Hoffenberg, Trina Chatterjee, Ruby Yeh","Educational Video Generation, Prompt -> Youtube Video",,"@jvnixon, @amirbolous, @Eliotthoff, @trina_chatt"
+Podgraph ,"DC, Leo, Anupam",Graph based podcast learning,https://github.com/dcsan/kbxt ; https://www.figma.com/file/sui06ZgDGXrHOVlrJDiOD7/Untitled?type=design&node-id=0%3A1&mode=design&t=LnQCl13XroVHVbxD-1,@anupambatra_ | @dcsan
+"Listen, chat and learn!!!",James,Chat with a podcast to learn things,https://react.gitwit.dev/run/zfGVjrjsa6ZKaEU1PldW,@jamesmurdza
+Recall,Liam & Caden,conversation information retrieval,https://recall-97b8b27a6a92.herokuapp.com/,
+VoiceStudyBot,Konrad,Personal tutor to test your knowledge of a book,,@konrad_gnat
+Short Form Upskill,"Margarita, Aditya, Johnny",TikTok Scrape and Transcribe ,margro2000/Learn (github.com),https://twitter.com/Marg_Groisman
+Rohan,Rohan,Rohan,,
+Envision: diagram dataset,Steve,An API to translate any technical concept into diagrams,https://github.com/stephenkfrey/diagrammatic,twitter.com/stevekfrey
+Arxiv2Video,Lily Su,Converts an Arxiv web url to a short video,https://github.com/LilySu/Arxiv2Video,@excelsiorpred
+Dir Chat,Andy Li,Combine to power of SQL and RAG to serach courses,,@xdotli
+Empathy Coach,Ji Young Lim,A chatbot that coches people to make more empathetic conversations,,@jyl1030
+Aimor,Brach Burdick,Platform for assessing and monitoring the psychological wellbeing of a body of students based on conversations with an AI therapist,https://aimor-git-staging-aimor.vercel.app/admin,https://twitter.com/__brach__
+Structured TA bot Generation,Wenxi,Generate structured tutorial chatbot based on video transcript and potentially videos,https://github.com/wenxichen/video2ta ,
\ No newline at end of file
diff --git a/playground/demos/ai_acceleerated_learning/test_Vocal.py b/playground/demos/ai_acceleerated_learning/test_Vocal.py
new file mode 100644
index 00000000..41433b87
--- /dev/null
+++ b/playground/demos/ai_acceleerated_learning/test_Vocal.py
@@ -0,0 +1,36 @@
+from ai_acceleerated_learning.Vocal import Vocal
+
+vocal = Vocal()
+
+
+def test_pass():
+ assert (
+ vocal.generate_video(
+ "I love to play basketball, and I am a very good player.",
+ "basketball",
+ )
+ == "Successfully generated a YouTube video for your prompt: I"
+ " love to play basketball, and I am a very good player."
+ )
+
+
+def test_invalid_sports():
+ assert (
+ vocal.generate_video("I just ate some delicious tacos", "tacos")
+ == "Invalid sports entered!! Please enter a valid sport."
+ )
+
+
+def test_invalid_prompt():
+ assert (
+ vocal.generate_video(987, "basketball")
+ == "Invalid prompt entered!! Please enter a valid prompt."
+ )
+
+
+def test_not_string():
+ assert (
+ vocal.generate_video(789, 234)
+ == "Invalid prompt and sports entered!! Please enter valid"
+ " prompt and sport."
+ )
diff --git a/playground/demos/ai_acceleerated_learning/test_presentation assistant.py b/playground/demos/ai_acceleerated_learning/test_presentation assistant.py
new file mode 100644
index 00000000..5a27eebd
--- /dev/null
+++ b/playground/demos/ai_acceleerated_learning/test_presentation assistant.py
@@ -0,0 +1,86 @@
+# test_presentation_assistant.py
+
+import pytest
+from presentation_assistant import (
+ PresentationAssistant,
+ SlideNotFoundError,
+)
+
+
+@pytest.fixture
+def assistant():
+ slides = [
+ "Welcome to our presentation!",
+ "Here is the agenda for today.",
+ "Let's dive into the first topic.",
+ "Thank you for attending.",
+ ]
+ return PresentationAssistant(slides)
+
+
+def test_init():
+ slides = ["Slide 1", "Slide 2"]
+ pa = PresentationAssistant(slides)
+ assert pa.slides == slides
+ assert pa.current_slide == 0
+
+
+def test_next_slide(assistant):
+ assistant.next_slide()
+ assert assistant.current_slide == 1
+ assistant.next_slide()
+ assert assistant.current_slide == 2
+
+
+def test_previous_slide(assistant):
+ assistant.current_slide = 2
+ assistant.previous_slide()
+ assert assistant.current_slide == 1
+ assistant.previous_slide()
+ assert assistant.current_slide == 0
+
+
+def test_next_slide_at_end(assistant):
+ assistant.current_slide = len(assistant.slides) - 1
+ with pytest.raises(SlideNotFoundError):
+ assistant.next_slide()
+
+
+def test_previous_slide_at_start(assistant):
+ with pytest.raises(SlideNotFoundError):
+ assistant.previous_slide()
+
+
+def test_go_to_slide(assistant):
+ assistant.go_to_slide(2)
+ assert assistant.current_slide == 2
+
+
+def test_go_to_slide_out_of_range(assistant):
+ with pytest.raises(SlideNotFoundError):
+ assistant.go_to_slide(len(assistant.slides))
+
+
+def test_go_to_slide_negative(assistant):
+ with pytest.raises(SlideNotFoundError):
+ assistant.go_to_slide(-1)
+
+
+def test_current_slide_content(assistant):
+ content = assistant.current_slide_content()
+ assert content == assistant.slides[0]
+ assistant.next_slide()
+ content = assistant.current_slide_content()
+ assert content == assistant.slides[1]
+
+
+def test_show_slide(
+ assistant, capsys
+): # capsys is a pytest fixture to capture stdout and stderr
+ assistant.show_slide()
+ captured = capsys.readouterr()
+ assert captured.out.strip() == assistant.slides[0]
+ assistant.next_slide()
+ assistant.show_slide()
+ captured = capsys.readouterr()
+ assert captured.out.strip() == assistant.slides[1]
diff --git a/playground/demos/ai_research_team/main_example.py b/playground/demos/ai_research_team/main_example.py
new file mode 100644
index 00000000..96f2e417
--- /dev/null
+++ b/playground/demos/ai_research_team/main_example.py
@@ -0,0 +1,78 @@
+import os
+
+from dotenv import load_dotenv
+
+from swarms.models import Anthropic, OpenAIChat
+from swarms.prompts.ai_research_team import (
+ PAPER_IMPLEMENTOR_AGENT_PROMPT,
+ PAPER_SUMMARY_ANALYZER,
+)
+from swarms.structs import Agent
+from swarms.utils.pdf_to_text import pdf_to_text
+from swarms import rearrange
+
+# Base llms
+load_dotenv()
+anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
+openai_api_key = os.getenv("OPENAI_API_KEY")
+
+PDF_PATH = "fasterffn.pdf"
+
+
+# Base llms
+llm1 = OpenAIChat(
+ openai_api_key=openai_api_key,
+)
+
+llm2 = Anthropic(
+ anthropic_api_key=anthropic_api_key,
+)
+
+# Agents
+paper_summarizer_agent = Agent(
+ agent_name="paper_summarizer_agent",
+ llm=llm2,
+ sop=PAPER_SUMMARY_ANALYZER,
+ max_loops=1,
+ autosave=True,
+ saved_state_path="paper_summarizer.json",
+)
+
+paper_implementor_agent = Agent(
+ agent_name="paper_implementor_agent",
+ llm=llm1,
+ sop=PAPER_IMPLEMENTOR_AGENT_PROMPT,
+ max_loops=1,
+ autosave=True,
+ saved_state_path="paper_implementor.json",
+ code_interpreter=False,
+)
+
+pytorch_pseudocode_agent = Agent(
+ agent_name="pytorch_pseudocode_agent",
+ llm=llm1,
+ sop=PAPER_IMPLEMENTOR_AGENT_PROMPT,
+ max_loops=1,
+ autosave=True,
+ saved_state_path="pytorch_pseudocode_agent.json",
+ code_interpreter=False,
+)
+
+
+paper = pdf_to_text(PDF_PATH)
+task = f"""
+ Focus on creating the algorithmic pseudocode for the novel
+ f" method in this paper: {paper}
+"""
+
+
+agents = [
+ paper_summarizer_agent,
+ paper_implementor_agent,
+ pytorch_pseudocode_agent,
+]
+
+flow = "paper_summarizer_agent -> paper_implementor_agent -> pytorch_pseudocode_agent"
+
+swarm = rearrange(agents, flow, task)
+print(swarm)
diff --git a/playground/demos/assembly/assembly_example.py b/playground/demos/assembly/assembly_example.py
new file mode 100644
index 00000000..7ac97ab0
--- /dev/null
+++ b/playground/demos/assembly/assembly_example.py
@@ -0,0 +1,21 @@
+from swarms.models.gpt4_vision_api import GPT4VisionAPI
+from swarms.structs import Agent
+
+llm = GPT4VisionAPI()
+
+task = (
+ "Analyze this image of an assembly line and identify any issues"
+ " such as misaligned parts, defects, or deviations from the"
+ " standard assembly process. IF there is anything unsafe in the"
+ " image, explain why it is unsafe and how it could be improved."
+)
+img = "assembly_line.jpg"
+
+## Initialize the workflow
+agent = Agent(
+ llm=llm,
+ max_loops=1,
+ dashboard=True,
+)
+
+agent.run(task=task, img=img)
diff --git a/playground/demos/assembly/assembly_line.jpg b/playground/demos/assembly/assembly_line.jpg
new file mode 100644
index 00000000..df35c2e3
Binary files /dev/null and b/playground/demos/assembly/assembly_line.jpg differ
diff --git a/playground/demos/autobloggen_example.py b/playground/demos/autobloggen_example.py
new file mode 100644
index 00000000..ae6cdc60
--- /dev/null
+++ b/playground/demos/autobloggen_example.py
@@ -0,0 +1,141 @@
+from termcolor import colored
+
+from swarms.prompts.autobloggen import (
+ DRAFT_AGENT_SYSTEM_PROMPT,
+ REVIEW_PROMPT,
+ SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT,
+ TOPIC_GENERATOR,
+)
+
+# Prompts
+topic_selection_task = (
+ "Generate 10 topics on gaining mental clarity using ancient"
+ " practices"
+)
+
+
+class AutoBlogGenSwarm:
+ """
+ AutoBlogGenSwarm
+
+ Swarm Agent
+ Topic selection agent -> draft agent -> review agent -> distribution agent
+
+ Topic Selection Agent:
+ - Generate 10 topics on gaining mental clarity using Taosim and Christian meditation
+
+ Draft Agent:
+ - Write a 100% unique, creative and in human-like style article of a minimum of 5,000 words using headings and sub-headings.
+
+ Review Agent:
+ - Refine the article to meet PositiveMedβs stringent publication standards.
+
+ Distribution Agent:
+ - Social Media posts for the article.
+
+ Example:
+ ```
+ from swarms.autobloggen import AutoBlogGenSwarm
+ swarm = AutoBlogGenSwarm()
+ swarm.run()
+ ```
+
+
+ """
+
+ def __init__(
+ self,
+ llm,
+ objective: str = "Clicks and engagement",
+ iterations: int = 3,
+ topic_selection_task: str = topic_selection_task,
+ max_retries: int = 3,
+ retry_attempts: int = 3,
+ topic_selection_agent_prompt: str = f"Your System Instructions: {TOPIC_GENERATOR}, Your current task: {topic_selection_task}",
+ ):
+ self.llm = llm()
+ self.topic_selection_task = topic_selection_task
+ self.topic_selection_agent_prompt = topic_selection_agent_prompt
+ self.objective = objective
+ self.iterations = iterations
+ self.max_retries = max_retries
+ self.retry_attempts = retry_attempts
+
+ def print_beautifully(self, subheader: str, text: str):
+ """Prints the text beautifully"""
+ print(
+ colored(
+ f"""
+ ------------------------------------
+ {subheader}
+ -----------------------------
+
+ {text}
+
+ """,
+ "blue",
+ )
+ )
+
+ def social_media_prompt(self, article: str):
+ """Gets the social media prompt"""
+ prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace(
+ "{{ARTICLE}}", article
+ ).replace("{{GOAL}}", self.objective)
+ return prompt
+
+ def get_review_prompt(self, article: str):
+ """Gets the review prompt"""
+ prompt = REVIEW_PROMPT.replace("{{ARTICLE}}", article)
+ return prompt
+
+ def step(self):
+ """Steps through the task"""
+ topic_selection_agent = self.llm(self.topic_selection_agent_prompt)
+ topic_selection_agent = self.print_beautifully(
+ "Topic Selection Agent", topic_selection_agent
+ )
+
+ draft_blog = self.llm(DRAFT_AGENT_SYSTEM_PROMPT)
+ draft_blog = self.print_beatiufully("Draft Agent", draft_blog)
+
+ # Agent that reviews the draft
+ review_agent = self.llm(self.get_review_prompt(draft_blog))
+ review_agent = self.print_beautifully("Review Agent", review_agent)
+
+ # Agent that publishes on social media
+ distribution_agent = self.llm(
+ self.social_media_prompt(article=review_agent)
+ )
+ distribution_agent = self.print_beautifully(
+ "Distribution Agent", distribution_agent
+ )
+
+ def run(self):
+ """Runs the swarm"""
+ for attempt in range(self.retry_attempts):
+ try:
+ for i in range(self.iterations):
+ self.step()
+ except Exception as error:
+ print(
+ colored(
+ (
+ "Error while running AutoBlogGenSwarm"
+ f" {error}"
+ ),
+ "red",
+ )
+ )
+ if attempt == self.retry_attempts - 1:
+ raise
+
+ def update_task(self, new_task: str):
+ """
+ Updates the task of the swarm
+
+ Args:
+ new_task (str): New task to be performed by the swarm
+
+ """
+ self.topic_selection_agent = new_task
diff --git a/playground/demos/autoswarm/autoswarm.py b/playground/demos/autoswarm/autoswarm.py
new file mode 100644
index 00000000..309c88ea
--- /dev/null
+++ b/playground/demos/autoswarm/autoswarm.py
@@ -0,0 +1,39 @@
+import os
+from dotenv import load_dotenv
+from swarms.models import OpenAIChat
+from swarms.structs import Agent
+import swarms.prompts.autoswarm as sdsp
+
+# Load environment variables and initialize the OpenAI Chat model
+load_dotenv()
+api_key = os.getenv("OPENAI_API_KEY")
+llm = OpenAIChat(model_name="gpt-4", openai_api_key=api_key)
+
+user_idea = "screenplay writing team"
+
+role_identification_agent = Agent(
+ llm=llm,
+ sop=sdsp.AGENT_ROLE_IDENTIFICATION_AGENT_PROMPT,
+ max_loops=1,
+)
+agent_configuration_agent = Agent(
+ llm=llm, sop=sdsp.AGENT_CONFIGURATION_AGENT_PROMPT, max_loops=1
+)
+swarm_assembly_agent = Agent(
+ llm=llm, sop=sdsp.SWARM_ASSEMBLY_AGENT_PROMPT, max_loops=1
+)
+testing_optimization_agent = Agent(
+ llm=llm, sop=sdsp.TESTING_OPTIMIZATION_AGENT_PROMPT, max_loops=1
+)
+
+# Process the user idea through each agent
+role_identification_output = role_identification_agent.run(user_idea)
+agent_configuration_output = agent_configuration_agent.run(
+ role_identification_output
+)
+swarm_assembly_output = swarm_assembly_agent.run(
+ agent_configuration_output
+)
+testing_optimization_output = testing_optimization_agent.run(
+ swarm_assembly_output
+)
diff --git a/playground/demos/autotemp/autotemp_example.py b/playground/demos/autotemp/autotemp_example.py
new file mode 100644
index 00000000..f77d46c2
--- /dev/null
+++ b/playground/demos/autotemp/autotemp_example.py
@@ -0,0 +1,87 @@
+import re
+
+from swarms.models.openai_models import OpenAIChat
+
+
+class AutoTemp:
+ """
+ AutoTemp is a tool for automatically selecting the best temperature setting for a given task.
+ It generates responses at different temperatures, evaluates them, and ranks them based on quality.
+ """
+
+ def __init__(
+ self,
+ api_key,
+ default_temp=0.0,
+ alt_temps=None,
+ auto_select=True,
+ max_workers=6,
+ ):
+ self.api_key = api_key
+ self.default_temp = default_temp
+ self.alt_temps = (
+ alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
+ )
+ self.auto_select = auto_select
+ self.max_workers = max_workers
+ self.llm = OpenAIChat(
+ openai_api_key=self.api_key, temperature=self.default_temp
+ )
+
+ def evaluate_output(self, output, temperature):
+ print(f"Evaluating output at temperature {temperature}...")
+ eval_prompt = f"""
+ Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria:
+
+ - Relevance: How well does the output address the prompt or task at hand?
+ - Clarity: Is the output easy to understand and free of ambiguity?
+ - Utility: How useful is the output for its intended purpose?
+ - Pride: If the user had to submit this output to the world for their career, would they be proud?
+ - Delight: Is the output likely to delight or positively surprise the user?
+
+ Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical.
+
+ Output to evaluate:
+ ---
+ {output}
+ ---
+ """
+ score_text = self.llm(eval_prompt, temperature=0.5)
+ score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
+ return round(float(score_match.group()), 1) if score_match else 0.0
+
+ def run(self, prompt, temperature_string):
+ print("Starting generation process...")
+ temperature_list = [
+ float(temp.strip())
+ for temp in temperature_string.split(",")
+ if temp.strip()
+ ]
+ outputs = {}
+ scores = {}
+ for temp in temperature_list:
+ print(f"Generating at temperature {temp}...")
+ output_text = self.llm(prompt, temperature=temp)
+ if output_text:
+ outputs[temp] = output_text
+ scores[temp] = self.evaluate_output(output_text, temp)
+
+ print("Generation process complete.")
+ if not scores:
+ return "No valid outputs generated.", None
+
+ sorted_scores = sorted(
+ scores.items(), key=lambda item: item[1], reverse=True
+ )
+ best_temp, best_score = sorted_scores[0]
+ best_output = outputs[best_temp]
+
+ return (
+ f"Best AutoTemp Output (Temp {best_temp} | Score:"
+ f" {best_score}):\n{best_output}"
+ if self.auto_select
+ else "\n".join(
+ f"Temp {temp} | Score: {score}:\n{outputs[temp]}"
+ for temp, score in sorted_scores
+ )
+ )
diff --git a/playground/demos/autotemp/blog_gen_example.py b/playground/demos/autotemp/blog_gen_example.py
new file mode 100644
index 00000000..40f5d0e7
--- /dev/null
+++ b/playground/demos/autotemp/blog_gen_example.py
@@ -0,0 +1,136 @@
+import os
+
+from autotemp import AutoTemp
+from termcolor import colored
+
+from swarms.models import OpenAIChat
+from swarms.structs import SequentialWorkflow
+
+
+class BlogGen:
+ def __init__(
+ self,
+ api_key,
+ blog_topic,
+ temperature_range: str = "0.4,0.6,0.8,1.0,1.2",
+ ): # Add blog_topic as an argument
+ self.openai_chat = OpenAIChat(
+ openai_api_key=api_key, temperature=0.8
+ )
+ self.auto_temp = AutoTemp(api_key)
+ self.temperature_range = temperature_range
+ self.workflow = SequentialWorkflow(max_loops=5)
+
+ # Formatting the topic selection prompt with the user's topic
+ self.TOPIC_SELECTION_SYSTEM_PROMPT = f"""
+ Given the topic '{blog_topic}', generate an engaging and versatile blog topic. This topic should cover areas related to '{blog_topic}' and might include aspects such as current events, lifestyle, technology, health, and culture related to '{blog_topic}'. Identify trending subjects within this realm. The topic must be unique, thought-provoking, and have the potential to draw in readers interested in '{blog_topic}'.
+ """
+
+ self.DRAFT_WRITER_SYSTEM_PROMPT = """
+ Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences.
+ """
+
+ self.REVIEW_AGENT_SYSTEM_PROMPT = """
+ Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the authors voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing.
+ """
+
+ self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """
+ Develop an autonomous distribution strategy for the blog article on '{{ARTICLE_TOPIC}}'. Utilize an API to post the article on a popular blog platform (e.g., WordPress, Blogger, Medium) commonly used by our target audience. Ensure the post includes all SEO elements like meta descriptions, title tags, and properly formatted content. Craft unique, engaging social media posts tailored to different platforms to promote the blog article. Schedule these posts to optimize reach and engagement, using data-driven insights. Monitor the performance of the distribution efforts, adjusting strategies based on engagement metrics and audience feedback. Aim to maximize the article's visibility, attract a diverse audience, and foster engagement across digital channels.
+ """
+
+ def run_workflow(self):
+ try:
+ # Topic generation using OpenAIChat
+ topic_result = self.openai_chat.generate(
+ [self.TOPIC_SELECTION_SYSTEM_PROMPT]
+ )
+ topic_output = topic_result.generations[0][0].text
+ print(
+ colored(
+ (
+ "\nTopic Selection Task"
+ f" Output:\n----------------------------\n{topic_output}\n"
+ ),
+ "white",
+ )
+ )
+
+ chosen_topic = topic_output.split("\n")[0]
+ print(colored("Selected topic: " + chosen_topic, "yellow"))
+
+ # Initial draft generation with AutoTemp
+ initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace(
+ "{{CHOSEN_TOPIC}}", chosen_topic
+ )
+ auto_temp_output = self.auto_temp.run(
+ initial_draft_prompt, self.temperature_range
+ )
+ initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly
+ print(
+ colored(
+ (
+ "\nInitial Draft"
+ f" Output:\n----------------------------\n{initial_draft_output}\n"
+ ),
+ "white",
+ )
+ )
+
+ # Review process using OpenAIChat
+ review_prompt = self.REVIEW_AGENT_SYSTEM_PROMPT.replace(
+ "{{ARTICLE_TOPIC}}", chosen_topic
+ )
+ review_result = self.openai_chat.generate([review_prompt])
+ review_output = review_result.generations[0][0].text
+ print(
+ colored(
+ (
+ "\nReview"
+ f" Output:\n----------------------------\n{review_output}\n"
+ ),
+ "white",
+ )
+ )
+
+ # Distribution preparation using OpenAIChat
+ distribution_prompt = (
+ self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace(
+ "{{ARTICLE_TOPIC}}", chosen_topic
+ )
+ )
+ distribution_result = self.openai_chat.generate(
+ [distribution_prompt]
+ )
+ distribution_output = distribution_result.generations[0][
+ 0
+ ].text
+ print(
+ colored(
+ (
+ "\nDistribution"
+ f" Output:\n----------------------------\n{distribution_output}\n"
+ ),
+ "white",
+ )
+ )
+
+ # Final compilation of the blog
+ final_blog_content = f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}"
+ print(
+ colored(
+ (
+ "\nFinal Blog"
+ f" Content:\n----------------------------\n{final_blog_content}\n"
+ ),
+ "green",
+ )
+ )
+
+ except Exception as e:
+ print(colored(f"An error occurred: {str(e)}", "red"))
+
+
+if __name__ == "__main__":
+ api_key = os.environ["OPENAI_API_KEY"]
+ blog_generator = BlogGen(api_key)
+ blog_generator.run_workflow()
diff --git a/playground/demos/business_analysis_swarm/business-analyst-agent.ipynb b/playground/demos/business_analysis_swarm/business-analyst-agent.ipynb
new file mode 100644
index 00000000..bc4a9489
--- /dev/null
+++ b/playground/demos/business_analysis_swarm/business-analyst-agent.ipynb
@@ -0,0 +1,1951 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Building Analyst Agents with Swarms to write Business Reports\n",
+ "\n",
+ "Solving a business problem often involves preparing a Business Case Report. This report comprehensively analyzes the problem, evaluates potential solutions, and provides evidence-based recommendations and an implementation plan to effectively address the issue and drive business value. While the process of preparing one requires an experienced business analyst, the workflow can be augmented using AI agents. Two candidates stick out as areas to work on:\n",
+ "\n",
+ "- Developing an outline to solve the problem\n",
+ "- Doing background research and gathering data \n",
+ " \n",
+ "In this blog, we will explore how Swarms agents can be used to tackle a busuiness problem by outlining the solution, conducting background research and generating a preliminary report.\n",
+ "\n",
+ "Before we proceed, this blog uses 3 API tools. Please obtain the following keys and store them in a `.env` file in the same folder as this file.\n",
+ "\n",
+ "- **[OpenAI API](https://openai.com/blog/openai-api)** as `OPENAI_API_KEY`\n",
+ "- **[TavilyAI API](https://app.tavily.com/home)** `TAVILY_API_KEY`\n",
+ "- **[KayAI API](https://www.kay.ai/)** as `KAY_API_KEY`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "True"
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import dotenv\n",
+ "dotenv.load_dotenv() # Load environment variables from .env file"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Developing an Outline to solve the problem\n",
+ "\n",
+ "Assume the business problem is: **How do we improve Nike's revenue in Q3 2024?** We first create a planning agent to break down the problem into dependent sub-problems.\n",
+ "\n",
+ "\n",
+ "#### Step 1. Defining the Data Model and Tool Schema\n",
+ "\n",
+ "Using Pydantic, we define a structure to help the agent generate sub-problems. \n",
+ "\n",
+ "- **QueryType:** Questions are either standalone or involve a combination of multiple others\n",
+ "- **Query:** Defines structure of a question.\n",
+ "- **QueryPlan:** Allows generation of a dependency graph of sub-questions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import enum\n",
+ "from typing import List\n",
+ "from pydantic import Field, BaseModel\n",
+ "\n",
+ "class QueryType(str, enum.Enum):\n",
+ " \"\"\"Enumeration representing the types of queries that can be asked to a question answer system.\"\"\"\n",
+ "\n",
+ " SINGLE_QUESTION = \"SINGLE\"\n",
+ " MERGE_MULTIPLE_RESPONSES = \"MERGE_MULTIPLE_RESPONSES\"\n",
+ "\n",
+ "class Query(BaseModel):\n",
+ " \"\"\"Class representing a single question in a query plan.\"\"\"\n",
+ "\n",
+ " id: int = Field(..., description=\"Unique id of the query\")\n",
+ " question: str = Field(\n",
+ " ...,\n",
+ " description=\"Question asked using a question answering system\",\n",
+ " )\n",
+ " dependencies: List[int] = Field(\n",
+ " default_factory=list,\n",
+ " description=\"List of sub questions that need to be answered before asking this question\",\n",
+ " )\n",
+ " node_type: QueryType = Field(\n",
+ " default=QueryType.SINGLE_QUESTION,\n",
+ " description=\"Type of question, either a single question or a multi-question merge\",\n",
+ " )\n",
+ "\n",
+ "class QueryPlan(BaseModel):\n",
+ " \"\"\"Container class representing a tree of questions to ask a question answering system.\"\"\"\n",
+ "\n",
+ " query_graph: List[Query] = Field(\n",
+ " ..., description=\"The query graph representing the plan\"\n",
+ " )\n",
+ "\n",
+ " def _dependencies(self, ids: List[int]) -> List[Query]:\n",
+ " \"\"\"Returns the dependencies of a query given their ids.\"\"\"\n",
+ " \n",
+ " return [q for q in self.query_graph if q.id in ids]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Also, a `tool_schema` needs to be defined. It is an instance of `QueryPlan` and is used to initialize the agent."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tool_schema = QueryPlan(\n",
+ " query_graph = [query.dict() for query in [\n",
+ " Query(\n",
+ " id=1,\n",
+ " question=\"How do we improve Nike's revenue in Q3 2024?\",\n",
+ " dependencies=[2],\n",
+ " node_type=QueryType('SINGLE')\n",
+ " ),\n",
+ " # ... other queries ...\n",
+ " ]]\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Step 2. Defining the Planning Agent\n",
+ "\n",
+ "We specify the query, task specification and an appropriate system prompt."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from swarms import OpenAIChat\n",
+ "from swarms import Agent\n",
+ "\n",
+ "query = \"How do we improve Nike's revenue in Q3 2024?\"\n",
+ "task = f\"Consider: {query}. Generate just the correct query plan in JSON format.\"\n",
+ "system_prompt = (\n",
+ " \"You are a world class query planning algorithm \" \n",
+ " \"capable of breaking apart questions into its \" \n",
+ " \"dependency queries such that the answers can be \" \n",
+ " \"used to inform the parent question. Do not answer \" \n",
+ " \"the questions, simply provide a correct compute \" \n",
+ " \"graph with good specific questions to ask and relevant \" \n",
+ " \"dependencies. Before you call the function, think \" \n",
+ " \"step-by-step to get a better understanding of the problem.\"\n",
+ " )\n",
+ "llm = OpenAIChat(\n",
+ " temperature=0.0, model_name=\"gpt-4\", max_tokens=4000\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Then, we proceed with agent definition."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Initialize the agent\n",
+ "agent = Agent(\n",
+ " agent_name=\"Query Planner\",\n",
+ " system_prompt=system_prompt,\n",
+ " # Set the tool schema to the JSON string -- this is the key difference\n",
+ " tool_schema=tool_schema,\n",
+ " llm=llm,\n",
+ " max_loops=1,\n",
+ " autosave=True,\n",
+ " dashboard=False,\n",
+ " streaming_on=True,\n",
+ " verbose=True,\n",
+ " interactive=False,\n",
+ " # Set the output type to the tool schema which is a BaseModel\n",
+ " output_type=tool_schema, # or dict, or str\n",
+ " metadata_output_type=\"json\",\n",
+ " # List of schemas that the agent can handle\n",
+ " list_base_models=[tool_schema],\n",
+ " function_calling_format_type=\"OpenAI\",\n",
+ " function_calling_type=\"json\", # or soon yaml\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Step 3. Obtaining Outline from Planning Agent \n",
+ "\n",
+ "We now run the agent, and since its output is in JSON format, we can load it as a dictionary."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33mInitializing Autonomous Agent Query Planner...\u001b[0m\n",
+ "\u001b[1m\u001b[36mAutonomous Agent Activated.\u001b[0m\n",
+ "\u001b[32mAll systems operational. Executing task...\u001b[0m\n",
+ "\u001b[36m\n",
+ "Loop 1 of 1\u001b[0m\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "content='{\\n \"main_query\": \"How do we improve Nike\\'s revenue in Q3 2024?\",\\n \"sub_queries\": [\\n {\\n \"id\": \"1\",\\n \"query\": \"What is Nike\\'s current revenue trend?\"\\n },\\n {\\n \"id\": \"2\",\\n \"query\": \"What are the projected market trends for the sports apparel industry in 2024?\"\\n },\\n {\\n \"id\": \"3\",\\n \"query\": \"What are the current successful strategies being used by Nike\\'s competitors?\",\\n \"dependencies\": [\"2\"]\\n },\\n {\\n \"id\": \"4\",\\n \"query\": \"What are the current and projected economic conditions in Nike\\'s major markets?\",\\n \"dependencies\": [\"2\"]\\n },\\n {\\n \"id\": \"5\",\\n \"query\": \"What are the current consumer preferences in the sports apparel industry?\",\\n \"dependencies\": [\"2\"]\\n },\\n {\\n \"id\": \"6\",\\n \"query\": \"What are the potential areas of improvement in Nike\\'s current business model?\",\\n \"dependencies\": [\"1\"]\\n },\\n {\\n \"id\": \"7\",\\n \"query\": \"What are the potential new markets for Nike to explore in 2024?\",\\n \"dependencies\": [\"2\", \"4\"]\\n },\\n {\\n \"id\": \"8\",\\n \"query\": \"What are the potential new products or services Nike could introduce in 2024?\",\\n \"dependencies\": [\"5\"]\\n },\\n {\\n \"id\": \"9\",\\n \"query\": \"What are the potential marketing strategies Nike could use to increase its revenue in Q3 2024?\",\\n \"dependencies\": [\"3\", \"5\", \"7\", \"8\"]\\n },\\n {\\n \"id\": \"10\",\\n \"query\": \"What are the potential cost-saving strategies Nike could implement to increase its net revenue in Q3 2024?\",\\n \"dependencies\": [\"6\"]\\n }\\n ]\\n}' response_metadata={'token_usage': {'completion_tokens': 408, 'prompt_tokens': 108, 'total_tokens': 516}, 'model_name': 'gpt-4', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None}\n",
+ "\u001b[32mSaved agent state to: Query Planner_state.json\u001b[0m\n"
+ ]
+ }
+ ],
+ "source": [
+ "generated_data = agent.run(task)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "At times the agent could return extra content other than JSON. Below function will filter it out."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def process_json_output(content):\n",
+ " # Find the index of the first occurrence of '```json\\n'\n",
+ " start_index = content.find('```json\\n')\n",
+ " if start_index == -1:\n",
+ " # If '```json\\n' is not found, return the original content\n",
+ " return content\n",
+ " # Return the part of the content after '```json\\n' and remove the '```' at the end\n",
+ " return content[start_index + len('```json\\n'):].rstrip('`')\n",
+ "\n",
+ "# Use the function to clean up the output\n",
+ "json_content = process_json_output(generated_data.content)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "{\n",
+ " \"main_query\": \"How do we improve Nike's revenue in Q3 2024?\",\n",
+ " \"sub_queries\": [\n",
+ " {\n",
+ " \"id\": \"1\",\n",
+ " \"query\": \"What is Nike's current revenue trend?\"\n",
+ " },\n",
+ " {\n",
+ " \"id\": \"2\",\n",
+ " \"query\": \"What are the projected market trends for the sports apparel industry in 2024?\"\n",
+ " },\n",
+ " {\n",
+ " \"id\": \"3\",\n",
+ " \"query\": \"What are the current successful strategies being used by Nike's competitors?\",\n",
+ " \"dependencies\": [\n",
+ " \"2\"\n",
+ " ]\n",
+ " },\n",
+ " {\n",
+ " \"id\": \"4\",\n",
+ " \"query\": \"What are the current and projected economic conditions in Nike's major markets?\",\n",
+ " \"dependencies\": [\n",
+ " \"2\"\n",
+ " ]\n",
+ " },\n",
+ " {\n",
+ " \"id\": \"5\",\n",
+ " \"query\": \"What are the current consumer preferences in the sports apparel industry?\",\n",
+ " \"dependencies\": [\n",
+ " \"2\"\n",
+ " ]\n",
+ " },\n",
+ " {\n",
+ " \"id\": \"6\",\n",
+ " \"query\": \"What are the potential areas of improvement in Nike's current business model?\",\n",
+ " \"dependencies\": [\n",
+ " \"1\"\n",
+ " ]\n",
+ " },\n",
+ " {\n",
+ " \"id\": \"7\",\n",
+ " \"query\": \"What are the potential new markets for Nike to explore in 2024?\",\n",
+ " \"dependencies\": [\n",
+ " \"2\",\n",
+ " \"4\"\n",
+ " ]\n",
+ " },\n",
+ " {\n",
+ " \"id\": \"8\",\n",
+ " \"query\": \"What are the potential new products or services Nike could introduce in 2024?\",\n",
+ " \"dependencies\": [\n",
+ " \"5\"\n",
+ " ]\n",
+ " },\n",
+ " {\n",
+ " \"id\": \"9\",\n",
+ " \"query\": \"What are the potential marketing strategies Nike could use to increase its revenue in Q3 2024?\",\n",
+ " \"dependencies\": [\n",
+ " \"3\",\n",
+ " \"5\",\n",
+ " \"7\",\n",
+ " \"8\"\n",
+ " ]\n",
+ " },\n",
+ " {\n",
+ " \"id\": \"10\",\n",
+ " \"query\": \"What are the potential cost-saving strategies Nike could implement to increase its net revenue in Q3 2024?\",\n",
+ " \"dependencies\": [\n",
+ " \"6\"\n",
+ " ]\n",
+ " }\n",
+ " ]\n",
+ "}\n"
+ ]
+ }
+ ],
+ "source": [
+ "import json\n",
+ "\n",
+ "# Load the JSON string into a Python object\n",
+ "json_object = json.loads(json_content)\n",
+ "\n",
+ "# Convert the Python object back to a JSON string\n",
+ "json_content = json.dumps(json_object, indent=2)\n",
+ "\n",
+ "# Print the JSON string\n",
+ "print(json_content)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The JSON dictionary is not convenient for humans to process. We make a directed graph out of it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAA48AAANtCAYAAAAw5U6GAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdeXiU1dn48e8z+5Z93xcg7IRdEZVNCtZS0boWq1iXtmpbrdTqa2lxRVvX6s/qW6vgbuvCa6VuIEFZhCQQdshCQoDseyazzzy/PyIjMUBmkslMEs7nurgkM888556RzMz9nPvcR5JlWUYQBEEQBEEQBEEQzkAR6gAEQRAEQRAEQRCEgU8kj4IgCIIgCIIgCEKPRPIoCIIgCIIgCIIg9Egkj4IgCIIgCIIgCEKPRPIoCIIgCIIgCIIg9Egkj4IgCIIgCIIgCEKPRPIoCIIgCIIgCIIg9Egkj4IgCIIgCIIgCEKPRPIoCIIgCIIgCIIg9EgV6gAGAtnmwb6xHUdBB8ihjsZ/qlE6dPMjUEQoQx2KIAjCgCXLMnKLGU9TO562DuQ2C3KbBY/ZCk4XstsDHg+SSglKJZJegxRuRBGm7/xvhBFFXASSRh3qpyIIgiAIISHJsjwI06XAcR6yYftvC3KHJ9Sh9ImkltDODUc91YAkSaEORxAEIeRkqx330Xrcx+px1zThqW1Gtjn6dlJJQhETjiIhCmVyDMr0eBRxkeJ9VxAEQTgrnLXJo8fiwf5ZK8591lCHElDKDA36H0WiiBKTyoIgnH08rR24DhzBeaAST20zBOEjTtJrUQ1LRjUuE2VGApJCrAgRBEEQhqazMnl0HbFj/aB50M82no6kltBdEol6nD7UoQiCIPQ72ePBXVqFY0cJ7oqaoCSMpyOZ9KgnDkedOwxFuCFkcQiCIAhCfzirkkdZlnHmW7Cta4WhmTd2oZluRDsvHEkpyqkEQRh6ZKsd585SHDtLkds6Qh1OVwoFqpxUNFNHokyLC3U0giAIghAQZ03yKDtlbGtbcO4dWmWqPVFmaNBfHoXCKJrpCIIwNHg6bDi3HcC5swTZ4Qp1OD1SpsahmTkWVXZyqEMRBEEQhD45K5JH2erB8k4T7uN9bJQwSCkilBiWxKCIFusgBUEYvGSPB+eOEhxf7+l745sQUA1PQXvRZBRRYaEORRCGFLfbjdPpDHUYgjBoqdVqlErfJpqGfPLoMbuxvNWEp+7sflORjAoMP41BmSBazAuCMPi465qxrd2Gp6Yp1KH0jUqJ9vzxqM8ZJRrrCEIfybJMTU0NLS0toQ5FEAa9yMhIEhMTe+wePqSTR0+HG8trjXgaB35ZUzBIegWG62NQxokEUhCEwUF2u3FsPYBjy15wD53F6srkGLSXnIsyNiLUoQjCoFVdXU1LSwvx8fEYDGKrMkHoDVmWsVgs1NXVERkZSVJS0hmPH7LJo2z3YHmjEXf12T3j+H1SmBLjDTEoIkUJqyAIA5unxYxtzSbc1YN8tvF0lAq08yajnjxCfOkVBD+53W6Ki4uJj48nJiYm1OEIwqDX2NhIXV0dOTk5ZyxhHZI1M7JHxvpBs0gcT0Fud2N5uwnZPnSu4AuCMPS4KmqwrP5s6CaOAG4P9s8LsK/dhuwUFTKC4I8TaxwNBrEljiAEwonfpZ7WDw/J5NG+sR1XmT3UYQxYnkYX1v9rYYhOOguCMIjJsozjm/1Y39mAbDk73sedew5jeWMdnhZzqEMRhEFHzNoLQmD4+rs05JJH50Erjs3iA7gnrmKbeJ0EQRhQZI8H+9pt2DcUwVl2cctT04Tltc9xD/aGQIIgCMKQNqSSR3eDC9tHLaEOY9DonKG1hToMQRAEZJcb25rNOPccDnUoISN32LC+tR730fpQhyIIghBUmZmZPPPMM6EOQ/DBkEkeZY+M7aNmZMfZdbW6T2Sw/qcV2SrWPwqCEDqy241tzSZch46GOpSQk+1OLO9uwH1MJJCCIATP0aNH+fnPf05ycjIajYaMjAx++9vf0tjYGJTx8/PzufXWW4MyltA3QyZ5dGztwF0lGuT4Sza7sX3eGuowBEE4S8myjO3jbbhKjoc6lIHD6cL67424a5tDHYkgCGeBw4cPM3XqVEpKSnj77bcpLS3lxRdfZP369cyYMYOmpv4rp3c4HADExcWJ5keDxJBIHt31TuxftYc6jEHLuceKs1iUrwqCEHyOvCJc+ytCHcaAI9scWP+Vh6fdEupQBEEY4m6//XY0Gg2ff/45s2bNIj09nYsvvph169Zx/Phx7r//fqCzocqaNWu6PDYyMpJVq1Z5fz569ChXXXUVkZGRREdHc+mll1JRUeG9f+nSpSxevJhHHnmE5ORkRo4cCXQvW21paeHmm28mLi6O8PBw5s6dy65du7z379q1izlz5hAWFkZ4eDhTpkyhoKAg4K+N0N2gTx5lWcb2SSu4RblqX9g/bUV2iPJVQRCCx3mgEsc3B0IdxoAlm63YPtyE7HaHOhRBEIaopqYmPvvsM2677Tb0en2X+xITE1myZAnvvvuuTx36nU4nCxYsICwsjK+//prNmzdjMplYuHChd4YRYP369Rw6dIgvvviCjz/++JTnuvLKK6mrq+OTTz6hsLCQyZMnM2/ePO8s6JIlS0hNTSU/P5/CwkLuvfde1Gp1H14JwVeDfqd4114r7kpHzwcKZ+Rpc2PfbEY3JzzUoQiCcBZw17dg/+83oQ5jwHMfb8D+xQ50C6eFOhRBEIagkpISZFlm9OjRp7x/9OjRNDc3U1/f8zrsd999F4/Hw8svv+zd9uHVV18lMjKSvLw8fvCDHwBgNBp5+eWX0Wg0pzzPpk2b2L59O3V1dWi1WgCeeOIJ1qxZw3vvvcett95KZWUlv//97xk1ahQAI0aM8Pu5C70zqJNH2SVj+1KUqwaK45sONFOMKMKVoQ5FEIQhTLY5sL3/NbLDFepQBgXnzhKUSdGoc4eFOhRBEIaonmYWT5fonWzXrl2UlpYSFhbW5XabzUZZWZn35/Hjx5/xfLt27cJsNhMTE9PldqvV6j3P7373O26++WZef/11LrroIq688kqGDRPvkcEwqJNH524Lcrso5wkYt4xjmxnd/IhQRyIIwhBm/3InnmZx4c8f9nWFKDMSUESaQh2KIAhDyPDhw5EkiQMHDnDZZZd1u//AgQPExcURGRmJJEndkkyn87tmlWazmSlTpvDmm292O09cXJz370aj8Ywxmc1mkpKSyMvL63ZfZGQkACtWrOCnP/0pa9eu5ZNPPuHPf/4z77zzzimfgxBYg3bNo+yRcWztCHUYPZI9MrLz2z+DYF2mc6dFbN0hCEK/cR2uwrmrrOcDhS5khwvbf7f5tO5IEATBVzExMcyfP58XXngBq9Xa5b6amhrefPNNli5dCnQmgNXV1d77S0pKsFi+a+o1efJkSkpKiI+PZ/jw4V3+RET4PjExefJkampqUKlU3c4TGxvrPS4nJ4e77rqLzz//nMsvv5xXX321l6+C4I9Bmzy6D9vxNA+skidZlvE0uXAdsePca8X+jRnHJjOOrd/+2WzGvsWMc7cFV7kdd71zwCWUskPGUSS6+wmCEHiyzYHtv9tDHcag5T5Si3NnaajDEARhiHn++eex2+0sWLCAr776iqNHj/Lpp58yf/58cnJy+NOf/gTA3Llzef7559m5cycFBQX88pe/7NKkZsmSJcTGxnLppZfy9ddfU15eTl5eHr/5zW84duyYz/FcdNFFzJgxg8WLF/P5559TUVHBli1buP/++ykoKMBqtXLHHXeQl5fHkSNH2Lx5M/n5+addtykE1qBNHh0FAyfBke0eXBV2HNs6cO614j7iwNPkAscpEkOXjKfFjfuoA9cBG45vOnCV2vBYBs5sn3OHRVzdFgQh4Oxf7UYWW0/0iSOvSGzfIQhCQI0YMYL8/Hyys7O56qqryMjI4OKLLyYnJ8fbMRXgySefJC0tjQsuuICf/vSnLFu2rMvejAaDga+++or09HQuv/xyRo8ezU033YTNZiM83PeGjJIk8d///pcLL7yQG2+8kZycHK655hqOHDlCQkICSqWSxsZGrr/+enJycrjqqqu4+OKLeeCBBwL+2gjdSfIgzBI8LS7M/68OQhy57JZxH7bjrnEGJBZFjArVcC2SNvQ5veHaaFTDdKEOQxCEIcJd24zl1U9h8H3kDDiqsZnof3xeqMMQhJCy2WyUl5eTlZWFTie+rwTan//8Z5566im++OILzj333FCHIwSBr79Toc9SesFZZAl54uhpduEs6MBdHZjEEcDT6MJRaMFd6+z54H7m2CGubAuCEDj2L3eKxDFAXPsqcFc1hjoMQRCGsAceeIC//e1vfPPNN3g8A6c6Tgi9QddtVZZlnHutPR/Yj+O7yx24j/XT3pIuGdchG55GF6pROiSF1D/j9BRGqR3Z6kHSD8rrC4IgDCDu4w24K2pCHcaQ4vhmP/rLLwh1GIIgDGE33nhjqEMQBqBBlxm4jznxtIRmew5ZlnEV2/svcTyJp8GFc681dA113DLOg7bQjC0IwpDi+GZ/qEPwiUzPe50NFK5DR3E3tIY6DEEQBOEsM+hmHt2H7aEbu9SOJ4glpXKLG9d+K6qx+pDMQLrKbGgmGXo+UBAE4TQ8LWZcJcdDHUY3ng4bcpsF2WzBY7YhW+3gLc2SkDQqJKMOyaRHEaZHijAhKQfW9VbnjhKUP5ga6jAEQRCEs8igSx5dR0KTPLqrHJ3rG4PM0+zGfdiOanjwF4O7jziQZRlJCk3prCAIg5+zqHTArHWUPR489a14qhvxmM+0/EFGdjiRHU5obscNSEolivhIFEkxKAzaYIV8Rq695cizcpG06p4PFgRBEIQAGFTJo+yScR8PfgInWz24ykM441nlRBGrQhEZ3P9dstWDp8GFMk58MREEwX+yy41z9+FQh4EMeKoacVfWIbt6tz+w7Hbjrm7EXd2IIioM1bBkJJ0msIH6G5PdiXN/BZpJI0IahyAIgnD2GFg1OD3w1DkhyGsAZVnGWWKD0Cyz9HIV20Ky/tETgtlWQRCGBlfxMeSO0K6dlm0OXLsP4zpc1evE8fs8ze04d5bgrm4KdeNvnDtLQxyBIAiCcDYZVMljSMpG613IIWrQczLZJuM+2v+Ner4vFK+5IAhDg2tfRUjHd9e14NhRgqetI+Dnlt0eXGXHO0tHXaH7jPDUNuOubwnZ+IIgCMLZRSSPPY3ZhzJZRbgSZboG1TAtqmFalBkaFNG9Lz11VzuRPcG9zu2uEcmjIAj+ky12XIerQza+u7oRV/Gxk5rg9A9Pixnn3nJkZ+gSSNf+IyEbWxCEwaOiogJJkigqKgrKeJmZmTzzzDP9Osb3n1NeXh6SJNHS0tKv457NBlXy6GkITMmRz+OZ3cjt/n8hUEQoUU820JDdxvstH/NM6Us8Xfoi7zX/h6aMdjTTjSjie5FEOuXgvwZ1wR1PEIShwVVR0++J2+m4a5pxlVVBkIpKZbMV175yZHdonm/ncxUEYTD46quvWLRoEcnJyUiSxJo1a/w+x7333suoUaO63Hbw4EEkSWLp0qVdbl+1ahVarRarNXB7pM+ePZs777yzx+Py8/O59dZbezXGqlWrkCSJhQsXdrm9paUFSZLIy8sDIC0tjerqasaNG9ercU5FkiQqKioCdr6hZnAlj63BvbLrrvJ/1k2RoEY1QU+d3MAbO96hxdbK+Vnncn7mDJoszbyx412a3C2oR+lRZvnfsa83MfWFbPcg20PzhUgQhMHLXVkXknE9bRZcpcHfGsRjtuIqPhr0cQE8dS2dW40IgjDgdXR0kJuby//7f/+v1+eYM2cOhw4doqamxnvbhg0bSEtL8yZVJ99+7rnnotfrez1eb8XFxWEw9H7LN5VKxbp169iwYcNpj1EqlSQmJqJSDaoeoIPaoEkeZbeMbA5u8ig3+zfrpohTocrRIkkSXx3ejEqh5meTr+Wc9GmcmzGNn025FhmZvMNfA6BK06DM8K9bn9zmDnrjnGAn7YIgDH7uo8FPHmWPB1fJMYI14/h9nsY23HUtwR9YlnEfqw/+uIIg+O3iiy/m4Ycf5rLLLuv1Oc4//3zUanWXRDEvL4/bb7+dpqamLrNmeXl5zJkzp8vjDx8+zJw5czAYDOTm5rJ161bvfY2NjVx77bWkpKRgMBgYP348b7/9tvf+pUuXsnHjRp599lkkSTrjLN3JZauyLLNixQrS09PRarUkJyfzm9/85ozP02g08vOf/5x77733tMf0VIprsVi4+OKLmTlzpreU9eWXX2b06NHodDpGjRrFCy+8cNrzNzc3s2TJEuLi4tDr9YwYMYJXX331jHEPdYMneWx3B/X7gOyUke2+DyhpJFQjdN49EY+2HCczOh2D5rsrPWFaExmRaZQ1HMbh6mx+o0zXIIUr/YvNHNyZQLlNJI+CIPhOtjvxNLYFfVx3RW3IZ+Dch6uRHcEv93dXNwV9TEEQ+seKFSvIzMw87f1Go5Fp06Z1mZHLy8tj3rx5zJw503v74cOHqays7JY83n///SxbtoyioiJycnK49tprcX3bjdpmszFlyhTWrl3L3r17ufXWW/nZz37G9u3bAXj22WeZMWMGt9xyC9XV1VRXV5OWltbjc3r//fd5+umneemllygpKWHNmjWMHz/ep9diz549vPfeez0e+30tLS3Mnz8fj8fDF198QWRkJG+++SZ/+tOfeOSRRzhw4ACPPvooy5cvZ/Xq1ac8x/Lly9m/fz+ffPIJBw4c4O9//zuxsbF+xzKUDJo5Xk9bkBMmP2c5FckaJJXk/dntcaFSdH95VUoVbtlDfUcDKRGd9e7KVA2u/b7XosvtbojwL+HsC49IHgVB8IO7thnkIFdIdNhwVzUGdcxTkV0uXOXVqEf2/GUqkDw1InkUhKEiNjaWYcOGnfGYOXPm8O9//xuA/fv3Y7PZmDRpEhdeeCF5eXnceOON5OXlodPpOPfcc7s8dtmyZVxyySUAPPDAA4wdO5bS0lJGjRpFSkoKy5Yt8x7761//ms8++4x//etfTJ8+nYiICDQaDQaDgcTERJ+fU2VlJYmJiVx00UWo1WrS09OZPn16j49LTk7mt7/9Lffffz+LFy/2ebyamhquvvpqRowYwVtvvYVG01np9+c//5knn3ySyy+/HICsrCz279/PSy+9xA033AB0zpKeHPekSZOYOnUqwBmT+rPFoJl5xBHc5NHj5+yeMq5rohhtjKaqrRrPSQ0j3B43VW2d3Qfb7Wbv7YoYpV//JzwdQU6k/ZiBFQRBCEUi46lupLflKZJOgzIxGlVGAqrMRJSpcSiiwryVJH7H0tAa9NlHd21zly88giAMXnfccQfr168/4zGzZ8+muLiY6upq8vLyOP/881EqlcyaNctbzpqXl8d5552HVtu1x8aECRO8f09KSgKgrq5zqYHb7eahhx5i/PjxREdHYzKZ+Oyzz6isrOzTc7ryyiuxWq1kZ2dzyy238OGHH3pnO3vyhz/8gfr6el555RWfx5s/fz7Dhw/n3Xff9SaOHR0dlJWVcdNNN2Eymbx/Hn74YcrKyk55nl/96le88847TJw4kXvuuYctW7b4HMNQNWiSR9kV5A9Ff8aTAF3XLxmTUybSZGnmvwc/p6GjkXpzA//Z/wkd9s79xlye735hJElC0vvxvyLYr0WQ11gKgjC4eYK876DscvdqTEmnQT06Azk3my1SC/9uKOZvZd/wl9LNHIgC9dSRKBOjexGQjLs2uAm0bLYiW0TTHEE4W8ycORONRsOGDRvYsGEDs2bNAmDatGk0NDRw+PBh8vLymDt3brfHqtVq799PXCQ7Mdnx17/+lWeffZY//OEPbNiwgaKiIhYsWIDD0be9xtPS0jh06BAvvPACer2e2267jQsvvBCns+dGkJGRkdx333088MADWCwWn8a75JJL+Oqrr9i/f7/3NrO5c+LmH//4B0VFRd4/e/fu5ZtvvjnleS6++GKOHDnCXXfdRVVVFfPmzesyM3s2GjTJI8FeQuLPfooKqdsV6skpuczImM7+2gP8Y9sqXt6+mmZrC+dkTANAo9R0O0e/xBYIwU5WBUEY1Dxtvn24B2y8uha/t8lQhBlQ5w5DEROO1eVgU/lBGjraiTeFe4+RtGpUw1NQjUhFwr9ZSE9N8GcC5baOoI4nCELo6PV6zjnnHPLy8ti4cSOzZ88GOhPDc889l3/+858cPXq023rHnmzevJlLL72U6667jtzcXLKzsykuLu5yjEajwe32f0mTXq9n0aJF/O1vfyMvL4+tW7eyZ88enx7761//GoVCwbPPPuvT8Y899hg33HAD8+bN8yaQCQkJJCcnc/jwYYYPH97lT1ZW1mnPFRcXxw033MAbb7zBM888w//+7//6FMNQNWjWPBK8JX6d/EnmZBlZlrslkLOHXcA56VNp6GhCq9IQb4ojr6yz02q0IarbOXzWy1KqXlMGeTxBEAY1OdjJY3O7X8crjHpUYzORVJ0fLCaNjt9ccAkmrY7qtmZe3f5ll+OVCVEgy35tASLbHcgWO5JR51dsfSG3WyEpaMMJgtALZrOZ0tJS78/l5eUUFRURHR1Neno6AM8//zwffvhhj6Wrc+bM4emnnwZg8uTJ3ttnzZrFE0884W2s448RI0bw3nvvsWXLFqKionjqqaeora1lzJgx3mMyMzPZtm0bFRUVmEwmoqOjUSjOPB+1atUq3G4355xzDgaDgTfeeAO9Xk9GRoZPcel0Oh544AFuv/12n5/LE088gdvtZu7cueTl5TFq1CgeeOABfvOb3xAREcHChQux2+0UFBTQ3NzM7373u27n+NOf/sSUKVMYO3Ysdrudjz/+mNGjR/scw1A0aGYeJXWwEyY/jvUAjlMnf3q1nrTIFOJNcQBUNFcSpjURY/iuFEqWZWSrH8ljsFN+lUgeBUHwjSzLeII8AyabfW84JkkSqpFp3sQRQKVUYtKeOclTJkajjI3ot7gCIdivuyAI/isoKGDSpElMmjQJgN/97ndMmjSJP/3pT95jGhoaTrsG72Rz5syhvb2dmTNndtnncNasWbS3t3u39PDHH//4RyZPnsyCBQuYPXs2iYmJ3RrVLFu2DKVSyZgxY4iLi/NpPWRkZCT/+Mc/mDlzJhMmTGDdunX85z//ISYmxufYbrjhBrKzs/16Pk8//TRXXXUVc+fOpbi4mJtvvpmXX36ZV199lfHjxzNr1ixWrVp12plHjUbDfffdx4QJE7jwwgtRKpW88847fsUw1EjyIFlh7zpix/J68DrpuRtduPb5/sGvGqZFmXLmPRv31x7k//atZe7wCzkn/bsrQZ5mF849vo+lzNKiSvNvf8i+0F0cgWaKMWjjCYIweMkWO+Zn3w/eeHYnjvyDPh+vjItEdYZOqCdmHn80ZgoTkjO73OcxW3EWlZ76gacaKykG1bBkn4/vK805o9HOnRS08QQhlGw2G+Xl5WRlZaHTBW+GXxCGKl9/pwZN2aoiLLh1q4ow/yZl3VVOFElqpG/LXStbjrG5fCtZ0Zno1DqqWqvZU7OP7OhMpqVO8T5OlmXcx/xbhOxvbH3l7z6UfWWzWmlpbsLc3k6Huf3b/5pxuVy4XS5k2YNCoUSpUqLRaDGFhWM0mTCFhWEKCyMyOqbLFThBEIJHdvTc/CCg43X4N7un8HP2sMtjTXokgw7ZYvPp+GDPPMr24L72giAIwtln0HzDDnYCI2kUoJFOW476fbLVg/uwHeUwLZIkYdIYkSSJbyrzcbodROgiuDB7JtPTpnSpC/dUO/E0+7foWDIFOZHux9fe5XJRV1NNbVUVDfV1NNTVYm73b/3S90mSguiYGGLi44lPSCQxOYXI6Ohet90XBMF3siu4+8LKTj/fP/u4BlFh0OL2NXn0s4lPnwX5tRcEQRDOPoMneVRJSEYFchD3OFREKvHU+d7m1V3lBKWEMlNDtCGKayZecebja5y4Sv1rrS6ZFEhBXoMY6OTRbrdRUVpKWUkxNceP9apj15nIsofGhnoaG+op3r8PAL3BQFpmFiNGjiIxJbXHhd2CIPRSsBMYf1de+NMM7ZSP92dT3iDvyRvg91JBEARB+L5BkzwCKCKUuIOYPCqTNH4ljwDuow5kmwfVMG3n7OUpyG4Z9xGH3+WqnTH5t/C5rySN1G0Py96QZZm6mmr2795FeWlJwBPGnlgtFor376N4/z6MJhMjx45j1NjxGE2moMYhCEOeMsgXZvytKOjrMv+B3Bk7yBfFPG4PLocD2e3B8+0sq6RQoFBIKDVqlOpB9RVDEARB8MGgemdXxKo7Z/eCNV6EsleznZ56F44GF4pYFYoIZWf5KxK4ZOQ2N+56F7h78QVGKaGID27yqIhV9anc0+VyUXroAPt37aKxoT6AkfVeh9nMjm3fsHP7djKHDWNs7kSSUlJDHZYgDAkndzENCj+TVdnmQNL2vuGYbPX9op8U5ES6P157WZZxWGxYW8zYWs04LFacVjtOqwOX/cyvhVKtQqXToNZr0Rh06MKN6CPD0IYZRPWHMGDIsozc1I7HbO1c1xwdJpa5CMIZDKrkUZmoxrk7yGMmq3GV+FdaCoDcmUR66v2buTxjLIkqpCDvuahM6t2XLJfTyf49u9izcweWjoHZPl6WPZSXllBeWkJCUjITp00nLSNTfGgIQl8EOXlU+LmG0dPYjiKidxUHst3pV4MeyRDkDpCavn+ky7KMtcVMR2MLHY2tWJvbcTt79znmdrpwO13Y27vu+ykpJHThJowxEZ1/YiNQKIO9mbNwtvO0mLG+/SUd//sx7vIa7+3KrESMt/4I/bVzUUSK6iRB+L5BlTwqglyyCaBIVCNVO5HNQW588H1qCWW6Fo/HE9QrtopE/15zWZapKCvlm6839rnxTTDVVlfx2UdrSM3I4LwL5xARFRXqkARhUJKMus7yyWCt99NrkZRKn9f7eeqakdPikL5XUllwtAy7y0G7vbMZTklDNe32zkRxSuowdGoN7qpG/NndSgrT+3xsICjCDL1+rL3DSsvRWlqO1+G09OKCqR9kj4y1pR1rSzsNZcdQqJVEJMYSmRqPISZCXMAT+p19/Q6ar38M+RT/1t0VtbT9zz9pf/gNol67F+28ySGIcOCoqKggKyuLnTt3MnHixFCHIwwAgyp5VCaqQQEEMY+TJAlVjg7nTguEcEdM1Qgdew/upaCggOioaKJjotHr9Oh0OjRaDWGmMJKSkwI+rj9rLM3t7WzesJ7KivKAxxEsx44c4b03X2PStHPInToNpbgaLgh+kRQKFGF6PK3BqTiQ6ExY5TbfxpNdblylVahGpXVJUrZVFtNq/W6G7FBdFYfqqgAYm5iOxurEU9XgX2zG4CaPkp/Jo+yRaa9roqmiCnN9S/8E5QOP003z0Vqaj9aiDTMQnZFIZGqCWDMp9Av7+h00XfVg5/rlU10M+vY22Wqn6aoHif7XnwZ0Aul2u5EkqdvEgsPhQKMJ3p7g3yfLMm63W2ydNgQNqkUHklrqdRllXyhMSpQZofsFVMSpUMaqSEtLQ6PWUHm0kpLiEkrLStm/fz/52/M5fPhwwMeVtAoU8T3/0suyTPH+fbz/1uuDOnE8wePxULhtK//3r3cGzDpNQRhMpHBjcMeL8G88T2Mr7tKqLrOIt8+8mP+56Cen/BPulHDtP+LfrKNSiWQK8sxjuG/Jo9vlpuHwMYq/zKcyf39IE8fvs7dbqN57mEPrtlO9twyH1bdtUQTBF54WM83XP9aZIHp6+H32dCaXzdc/hqfFHLgYPB7+8pe/MHz4cLRaLenp6TzyyCMA5OXlIUkSLS0t3uOLioqQJImKigoAVq1aRWRkJB999BFjxoxBq9VSWVlJZmYmDz30ENdffz3h4eHceuutAGzatIkLLrgAvV5PWloav/nNb+g4aTlRZmYmjz76KD//+c8JCwsjPT2d//3f//Xen5WVBcCkSZOQJInZs2ef8nmdiP2TTz5hypQpaLVaNm3ahMfjYeXKlWRlZaHX68nNzeW9997zvhapqan8/e9/73KunTt3olAoOHLkCAAtLS3cfPPNxMXFER4ezty5c9m1a5f3+BUrVjBx4kRef/11MjMziYiI4JprrqH9pAq4zMxMnnnmmS7jTJw4kRUrVnh/7mkcodOgSh6BkCVxyjQNitjgXz2RTApUIzrXzURGRjLjvBlER0WjVCmx2+zIsozBYCAnZ0TAx1ama5B6aGtvs9n4/OOP2Ljucxz2/i11CrbG+jrWvPMWe3YW+vWlURDOdpKPSUygKBOj/e5s6q5t6kwI7advwibLMu6aJlz7yv3ev1KRENXj+2eg9ZS0ezweGsqOUbx+OzX7ynFaB+57tsflprG8ipIvCzi+q6TH5jyC4Avr2192lqr2lDie4JGRLXas72wIWAz33Xcfjz32GMuXL2f//v289dZbJCQk+HUOi8XC448/zssvv8y+ffuIj48H4IknniA3N5edO3eyfPlyysrKWLhwIT/5yU/YvXs37777Lps2beKOO+7ocr4nn3ySqVOnsnPnTm677TZ+9atfcejQIQC2b98OwLp166iuruaDDz44Y2z33nsvjz32GAcOHGDChAmsXLmS1157jRdffJF9+/Zx1113cd1117Fx40YUCgXXXnstb731VpdzvPnmm8ycOZOMjAwArrzySurq6vjkk08oLCxk8uTJzJs3j6amJu9jysrKWLNmDR9//DEff/wxGzdu5LHHHvPrdfVlHGGQla0CqLK1OLYE7gqQryRJQjVKh2u/DU9T4JrgnHFMowL1eEOXfR0zMjKoqqri+PHjqNVqqquqSUpK6pfySlWW9oz3NzbUs27tx7S1tgR87IHC4/HwzddfUVdTw4Xz5qMOYQmIIAwWipjwoI4nadUoosLwNLX59ThPczvOgkMoYiKQokxI2m/L9N0ePB1WPHUtyLbeJS3KpOhePa63JK36jGsszQ0tVO8pxW72veHPQCB7ZJora2irbiB+ZDrRGclBT8qFoUGWZTr+92N6swap46X/YPjFj/q8Hre9vZ1nn32W559/nhtuuAGAYcOGcf755/t1HqfTyQsvvEBubm6X2+fOncvdd9/t/fnmm29myZIl3HnnnQCMGDGCv/3tb8yaNYu///3v6HSdkxM//OEPue222wD4wx/+wNNPP82GDRsYOXIkcXFxAMTExJCYmNhjbA8++CDz588HwG638+ijj7Ju3TpmzJgBQHZ2Nps2beKll15i1qxZLFmyhCeffJLKykrS09PxeDy88847/PGPfwQ6Z063b99OXV0dWm3n99InnniCNWvW8N5773lnWD0eD6tWrSIsLAyAn/3sZ6xfv947q9sTX8cRBmHyqEzXIJmUyObgb4YsKSRUY3S4im1+7//o91jhStTj9F0SRwClUsnYMWNobGzE0mEhIyODhIQEdu/ZQ3xcPMOGD/P+o+8TBajGnL5TYFnxIb5a9zkuV3AS6VA7XFJMc1Mj8y/5MRGRkaEORxAGNGVicBMnAGVyjN/JI3w7u9jQAg0tAYtFEWFC0gfgfdifMROiT/nF1u10UXOgnOYjNad41ODhdrqo3nuY1qoGUnJHoDUFd3ZbGPzkpvYuXVV9f6CMu7wGubkdKbpvF8YOHDiA3W5n3rx5fTqPRqNhwoQJ3W6fOnVql5937drF7t27efPNN723ybKMx+OhvLyc0aNHA3Q5lyRJJCYmUldX16vYTo6htLQUi8XiTSZPcDgcTJo0CegsHR09ejRvvfUW9957Lxs3bqSuro4rr7zS+xzMZjMxMTFdzmG1WikrK/P+nJmZ6U0cAZKSkvx6Dr6OIwzC5FFSSKjH6nBsC832D5JCQj1KjzvKiavMDq4AlzMqOhNkZZrmtFe4oqKjGT58OCUlJYwfP570jHTq6uooKy0jf3s+mVmZpKSk9OkKmSpLi8LUfTZTlmV2bt9G4batvT73YNXc2Miad99i4Y8Xk5CUHOpwBGHAUoQgeVREmlBEh/cqgQwks9lMW5SaNJcrqI0ilIndO0Sb65s5vqtkQJen+svS1EbpVztJGJlBTFaKmIUUfObp46y7p92Koo/Jo15/5nXQJ5renLxUxunsXlqv1+tP+R3PaOxaum42m/nFL37Bb37zm27Hpqene/+uVndtjihJEp5edsw+OQazubNScO3ataSkpHQ57uSJjiVLlniTx7feeouFCxd6kziz2UxSUhJ5eXndxoo86WJ+T89BoVB0W4J08mvr6zjCIEweAdQTDSFLHk9QJqhRRCpxldgDVsYqhSlQ5ehQGHsuQc3JySEsLIzU1FQkSSIhIYGYmBjKD5dTVlpGTU0NOTk5hIf37o1OPbH7VV1Zltm26Sv27NzRq3MOBQ67nf9++D7zL/kxqd/W4guC0JXCqEMKN/rcATVQVMOTce7o8Ht9YkAlx3CsqZbq7U0MHz6cuPh4gpHenJywy7JMXXEl9cWVQRg5+GS3h5r95ZgbWkidNBKVJvjbeAmDj6KPDawUAdh6Z8SIEej1etavX8/NN9/c7f4TJaLV1dVEfbtlWFFRUa/Hmzx5Mvv372f48OG9PseJjq1uH7dDOtnJDX1mzZp12uN++tOf8sc//pHCwkLee+89XnzxRe99kydPpqamBpVKRWZmpt8xnBAXF0d1dbX357a2NsrLv2vyGKhxzgaDrmEOgDJOHdLupydIWgXqcXrUkwyd+yH25tWUQBGrQj1Bj2aS0afEETqvsKSnp3dpzaxSqRiRM4LJkychIbFzx06Ki4tPedXqjCGZlKhGdi1ZlWWZrV/lndWJ4wkul4vP/rOG45VD84uZIASCMjUu6GNKGjXK7NBVBUhGPdHjRzBt+nQiIiLYv38/e3bvxmrt/3WGJ15vt9NFZf7+IZs4nsxc18zhr4uwBfkihTA4SdFhKLMS/W6uhSShzEpEigrr+dge6HQ6/vCHP3DPPffw2muvUVZWxjfffMM///lPAIYPH05aWhorVqygpKSEtWvX8uSTT/Z6vD/84Q9s2bKFO+64g6KiIkpKSvi///u/bg1zziQ+Ph69Xs+nn35KbW0tra2tPj82LCyMZcuWcdddd7F69WrKysrYsWMHzz33HKtXr/Yel5mZyXnnncdNN92E2+3mxz/+sfe+iy66iBkzZrB48WI+//xzKioq2LJlC/fffz8FBQU+xzJ37lxef/11vv76a/bs2cMNN9zQpV9IoMY5GwzK5BFAMyW4reDPRBGmRJ2jQ3OuCdUIHYpENZJJwekuN0uGzi0wVMO0aKYbMSdaKa4pDVhHz7DwcCZPmczw4cOpq60jf3s+tTU1Pp9fM8XQrRSocNtW9u0qCkh8Q4HH4+GLtR9RW10V6lAEYUBSZcSHZFxlfCTKpJieDwwwSaNGPTodSZLQabWMHTuW8ePHY7FYyM/P58iRI70uA+uJItKEIsKIrd1C2dc7aa89ezoDOiw2Dm8qorVKbKsknJkkSRhv/VGvHmv8xaI+N8s5Yfny5dx999386U9/YvTo0Vx99dXetXlqtZq3336bgwcPMmHCBB5//HEefvjhXo81YcIENm7cSHFxMRdccAGTJk3iT3/6E8nJvl9kU6lU/O1vf+Oll14iOTmZSy+91K8YHnroIZYvX87KlSsZPXo0CxcuZO3atd4tQE5YsmQJu3bt4rLLLutS3itJEv/973+58MILufHGG8nJyeGaa67hyJEjfnWpve+++5g1axY/+tGPuOSSS1i8eDHDhg0L+DhnA0kepHsQyG4Z8/N1yO0hLE86A4/Hg9vlRuVWInvkzuZeCglJK2Fz2LDbbER+W5KQvz2fHTt3cN555zF27NhuG732hd1up6ysjLq6OiIjIxkxYkS3mvgulBKmO+JRhH13NebA3j1s+nJdwGIaSrRaHZdefa1ooiMI3+NpMdPx949CMrYMuIuP4a5rDsp4klqFalwWCmP3JmNuj4cjFRUcPXoUvV7PiBEjvOVogaKeNALPOaM4sn0vbsfZ0cTsVJLGDSMmS6xHP1vYbDbKy8vJysrydg3tiafFTN3YnyNbfdyuQyEh6bXE73sFRaSpjxELwsDm6+/UoJ15lJQSmnMGzuzj923dspUP13wIOgmFUYnCpERhUCApJbZt28Z/Pv4Yi8UCgMFowOFwUFhYyK6iXQHtYKrVahkzZgy5EybgsDsoLCik/HD5aWvX1RP0XRLH2uoqtuR9GbB4hhq73ca6tf/B6RB7kAnCyRSRJpQpsSEZWwKUOakok/t/fEmrQTU++5SJI4BSoSA7O5upU6ei0WjYtWsXBw4cwBHA9wx7UhQV3+w5qxNHgOq9ZdSXHA11GMIApog0EfXavZ2lqz01W1JIIElEvX6fSBwF4SSDNnkE0Ew2IOkH3lNoqK+ntLQUc7uZtu/VhsuyTFtbG81NzRTtLMLtduPxeDAYDMiyTHFxMQX5BdhstoDGFBUdzdRpU8nIyODosaPk5+fT2NjY9SAJtDO+e4O0dHSw7r8f91up1VDR1NjAV+u/CFjZsSAMFaqxmSEbWwJU2UmoRmcg9VNDFWV8FOpJw1EYet6Ww2g0kjtxIqNGjaKpqYnt27dTVVXV5/cNm17DseO1eELZJGgAqT1YQcPhY6EOQxjAtPMmE/2vP3VupyNJ3ddAfnubpNcS/e8/o507KTSBCsIANfAyLz9IGgXaC/u+gDmQHA4He/buxW6345E91H5vjxlzezsOuwOdTkd5RTnFh4qx2+wYDAYkSUKv13PkyBG++eYbOjoC2wRAoVCQkZnBtGnTMOgN7Nmzh71793oTVc1kI4rozga8brebdf/9GEuAYxiqDpcUs7dINBMShJOpx2SAOrRNvZUx4agnjUARFxmwc0oaNeoxmahyUpFUvjU5g86ENjExkenTpxMXF0dxcTE7d+70trP3l93tpkanRHaLC3wnq9lXTnPl4N7XUuhf2nmTid/3CuErb0aZ2XU9mzIzgfCVNxO//1WROArCKQzKrTpOpp5iwFlkwV3rX0fR/iDLMocOHqKutg6tVotCqeD48eOMGDHCu9C6qakZt8eNVqtFpVJx8NBBdDodWq0Wt8tNfUM9YaYwLBYLHWbzmdcn9pJer2f8hPHfzpCWkZ+fT8bITEZdMNl7zO7CAtEMxk/bN28iJT2D6JjQlOoJwkAj6bWoR6fj3H04tHGolahHpuFJjcNT3YSnvrlXCZcizIAiMRpFXGSf9hdUq9WMHDmSxMREiouLKSwsJCUlhcysLFRK35JRt8dDlcWCFJfS88Fnoao9pWhMeozREaEORRigFJEmjL9chOEXP0Jubu/cxzFMjxQVFrDmOIIwFA3qmUcASSGhuzjitJ1Ng6mutpbSslKMJiMyMiaTiZaWFlpbWrzHNDQ0oFAo0Gg0KBVKnE4nx44do72tvXPbDoWC8RPGM2fOHOL7sbuTJEnExcczbfo0kpKS2ODazP++9g8qKytpbKhnx/Zv+m3socrj8bDxi897tReSIAxV6skjQh2Cl8KoQzU8GfX00aiGp6CMj0Iy6E7bul9Sq1BEhaFMi0c9cTjq3GEoE6ICtjF9REQEU6ZOJTs7m+rqavK3b6e+vp6eClllWabGYsEdZeq3ktzBTvbIHC04iNNmD3UowgAnSRKK6HBUGQkoosPPisRxxYoVTJw4MdRhCIPUoJ95BFCmatCca8KxtXelP4FSUlKK3WbHarFi6bBgNBhxOp3U1tURGRWFy+Witq4Wp9OJ1WrF1eYiLj4OlVJFamoqY8aOoay0DI1a06VNcX9SqVSMWjiOxAuHs3btWv758svgsJEQF4taLb6U+KuhrpbdhQVMmn5OqEMRhAFBmRSDMiUW9/GGUIfiJSkVKBOjITEaANnjQbY64MRspEJCUquQtP3/HqiQJNLS0oiLi6O0tJR9+/YRExPD8BEj0J+m212jzUaHy4U6Kbrf4xvMXHYHRwsPkjljfEC7mAtCX82ePZuJEyfyzDPPhDoUAP75z3/ywgsvcPDgQWJjY7nzzju56667Qh2WMEANieQRQDsrDFexDU9j6LrNjRs/jrT0NGqqazh8+DDRMdHYbDY6zJ3rBp1OJzqdjvT0dACampqYOXMm4eHh3kTNarFSXV1NZlZml81L+4ukVaC7JILksGhuuukm3nvnbdau+YCKw2UMGzaMxMTEs+IqXCDt2P4N2TkjxfYdgvAt9TmjcX/wdajDOC1JoUA6TbfUYNHpdIwbN46GhgZKS0vJ376dzMxMUtPSUJz0HtzhdNJkt6MIMyBFDNyO4wOFpamN2gPlJI0d1vPBgjBAyLKM2+1GpQrO1/Qvv/yS5cuXM2HCBNavX88vfvELJk+ezKxZs4IyvjC4DJlLcZJKQv/jSFCGLtGJjIwkIyODpKQk0tPTmTdvHgsXLmTipIlA51rDOXPmcP7555M7YQLh4eFoNJouM3xJyUm4XC7q64Oz4bHu4gjv1hxWiwVLazPTp08nJiaGQ4cO9amZw9nK4/GwdeMG0X1VEL6lyklFESvWnvkiNjaWadOmkZqaSnl5OYUFBbR8u/TB7fFQa7UCoEyNQxoI6zUGgcbDVXQ0tvZ8oCAEwdKlS9m4cSPPPvsskiQhSRKrVq1CkiQ++eQTpkyZglarZdOmTXg8HlauXElWVhZ6vZ7c3Fzee+8977ny8vKQJIn169czdepUDAYD5513HocOHeoy5mOPPUZCQgJhYWHcdNNN3Tr6v/nmmyxevJjs7GxuvvlmwsPDOXpUbHsjnNqQSR4BlCkadD8ID3UYWK1Wb9mpUqnsUi5zYhbPYDQiSRLm9q6JmV6vJzo6mqrj/d+sRjPNiHrcd+WxBd9swel0otFoGD16NBMnTsTlclFYWEhZWZlYy+eHo0cqOFZ5JNRhCMKAIEkS2tm5oQ5j0FAqlWRnZzNl6lRUKhVFRUUcPHiQarMZl8eDItyAFDOwOo0PdMeLinGL7UyEAeDZZ59lxowZ3HLLLVRXV1NdXU1aWhoA9957L4899hgHDhxgwoQJrFy5ktdee40XX3yRffv2cdddd3HdddexcePGLue8//77efLJJykoKEClUvHzn//ce9+//vUvVqxYwaOPPkpBQQFJSUm88MILp41vxYoVGAwGLr744v55AYRBb0gljwDqyQbUEw0hjcFqtaLTn7kESqlUYjAYaDe3d7svOTmJtvY22tva+itElOkatBd9l2ib29soOXCgyzGRkZFMnTqVzMxMjh8/zvYTzRzEjJpPivK3hzoEQRgwVCNSUQ0XnUH9YTIamThpEiNHjqSutZVDR47Qbm5HmZ0sZh395LDYqDtUEeowBIGIiAg0Gg0Gg4HExEQSExO9y5QefPBB5s+fz7BhwzAajTz66KO88sorLFiwgOzsbJYuXcp1113HSy+91OWcjzzyCLNmzWLMmDHce++9bNmyxTu7+Mwzz3DTTTdx0003MXLkSB5++GHGjBlzytgefPBBXnrpJb744gtiYmL694UQBq0hlzxKkoRuYQTK5NA1e7HarD41vAkzhXWbeQSIjolBp9VRVVXdH+EhmZToL49COqnEd/eOQmS5e+t6hUJBRkbn3pAmk4l9+/axd+9erN+WTgmnV1N1nJqq46EOQxAGDO38KSHf93GwkYCExERScnIwGAxU2tvZVXIAc4dYTuCvxvIqrK3idRMGrqlTp3r/XlpaisViYf78+ZhMJu+f1157jbKysi6PmzBhgvfvSUlJANR9u8/4gQMHOOecrk38ZsyY0W3s2tpaVqxYwerVqxk7dmzAnpMw9AzJT3FJJaG/KhrL6kY8zcFtoON2u7Hb7T4lj6YwE3X1dciy3KUpjSRJJCcnU3Gkguxh2QHteippFRiuiUZh+q4Zj8Nup3j/vjM+Tq/Xd23mkJ9PRkYGaWlp/dbFzmG3Y25vx9LRgaXDjNVqxeN2IyOjkBQoVSoMRuO3f0yYwsKC0mTIH/t2FZGYLGZbBAE691XTzpmI/fOCUIcyqDTb7bglidjUZAxZcZSUlVBYUEhqaiqZmcFprjYkyFC9t4ys8yaIRnDCgHTy3t4n+k2sXbuWlJSu3yO0Wm2Xn0/+nnji37bH499etjU1NciyzMiRI/16nHD2GZLJI4DCpMSwJJqO1Y3I7cFb53CiTOB0LdZPZjKZ8Hg8WCyWLm8YAIlJiVRUVFBbU0tqWmpAYpPUEvprolEmdk1GSw8dxOl09vx4SSIuLo7o6GgqKio646utJScnh8gAdRaVZZm2lhbqa2toa21FPs2OZ27ZjdvhxuGw09LcBIBSqSImNpbY+AT0htCWLp9QXlqKpaMDg1F0RRQE6Nz30XXwKO7K2lCHMii4PR6a7Z17FapGpBAZbmRK1FSOHT3GkSNHqKurY8SIEcTExohSVh9Ymtow1zURliBK8oTQ0Wg0PfaRGDNmDFqtlsrKyj51PR09ejTbtm3j+uuv9972zTfd9/LOyckhPz+f5OTkXo8lnB2GXNnqyRSRKgzXxaAID95VWdu35Zw6X2YeTSYAzO3d1z1qNBri4uKoqqoKyBpDSduZOKrSNF1ul2WZ/Xt2+XUupVLJsGHDmDJlCmq1mqKiIg4cOIDD4ehTjM1Njewt2klp8UFaW1tOmziejtvtoq62hv17dlFy8EC3bmKhIMseDu7dE+owBGHAkCQJ3SXnBGUPxaGgxeHAI8soU2NRhHdehFJICtLT073LCfbu3cvePXsHxHveYFBfeizUIQhnuczMTLZt20ZFRQUNDQ2nnCUMCwtj2bJl3HXXXaxevZqysjJ27NjBc889x+rVq30e67e//S2vvPIKr776KsXFxfz5z39m377u1WZ79uzhuuuuC1q3f2HwGtLJI4AyRoXhhlgUMcGZZLVabSgVSjQaTY/HqlQq9Ho97afZCiM5JRmL1UJLc3OfYpL0CgxLYlBlaLvdV1dTTXNjY6/OazKZmDhxIiNHjqSpqYnt27f3Ktl1OZ0cLinmcEkxDoe9V7F8X1trCwf27KK2ujrkDX4O7d8b8hgEYSBRRJrQLeq+5kboyiPLtNjtKCKMKDMSu92v0+kYN34c48aNw2w2k5+fT2VlJZ5TrF/vD7Is47TZsbV3YG01Y21tx9pmxmHpXGIwUFma2rA09V9DOkHoybJly1AqlYwZM4a4uDgqKytPedxDDz3E8uXLWblyJaNHj2bhwoWsXbuWrKwsn8e6+uqrWb58Offccw9TpkzhyJEj/OpXv+p2nMVi4dChQz5VoglnN0k+S77VeiwebB824yoPTHJyOiUlJbS0tDBt2jSfjt+/bz8Oh8O7F+TJZFmmsKAQvV7P2HG9W7ysiFNhuDIaRfSpk+dNG9ZzYM/uXp37ZE6nk8OHD1NdXU1YWBg5OTmEhfXcSr69rZXDJSW4XP33ZhUWFk72iBxUAVw76q9LLr+C5NS0kI0vCAOR/avdODbvDXUYA1abw0Gty4l60nCkHhoNud1uKioqOHbsGAaDgRE5I4iMiAx4TE6bHVurGafNgcvhhDN8hVCqVah0GrQmA1qTYUCtM4xMjSd1kljbNZjZbDbKy8vJyspC58NSIUEQzszX36khP/N4gsKgQH9tNJrzTP06zsl7PPrCFGbCbDafcmbqROOchoYG7Hb/k171aD3GG2NPmzi63W7KS0r8Pu8px1KrGTlyJJMmTcLj8VBYWEhJSQku1+kbFrU0N1F68GC/Jo4A7e1tHDqwr89ltX1ReuhgyMYWhIFKc8F4VMPE+prTaXU6UY1O7zFxhK7LCZRKJUU7izjo43r2nsiyjLXVTFNlNc2VNVhbzbjsjjMmjgBupwt7u4W26gYay4/T0dgyYPZabK2q73wOgiAIgl/OmuQRQFJI6OaGo/9JFJKmf66A+p08mky43K7TrlWJT4hHoVRQ7c+2HRJo54WjuzwSSXP6/8X1tTXYbIHdciMiIoIpU6YwbNgwampq2L59O3V1dd2S47aWFg6XFAetvMpmtVJycD+uEJVjHK0oF6WrgvA9kiShu3QmyqToUIcy4NjcbpzDklCE+df8y2QyMWnyJHJG5tDY0MD27duorq72riGXZZn9+/dTW+tbwyKnzU7TkWraaxtx2XqfbHlcbjoaW2mqGBjbZcgemeajommTIAiCv86q5PEE9Wg9xlviUGb0vC7RH7IsY7PZ0Ov8Sx7h1E1zoHNdZGJCIlXVVT61XVbEqjAujUU7w9RjiVD1sf5pGqBQKEhLS2PatGmEh4ezf/9+du/ejcViATqnxQ+XFAc9mbJZrSEZF8DS0UFbS0vQxxWEgU7SqtFfNQdFbESoQxlQbOOzUPbyNZGQSE5KZtr06UTHxHDo0CGKdhbR0dFBVXUVBw4eYPfu3WesaJFlGXN9M82VNbgdgbvoJns8tNc20nK8DvcZKlOCoflorbioJwiC4KezMnkEUER1dmLVLYwI2CykzWZDlmX0et9r7zUaDVqt9rRNc6CzcY7D4aCxoeH0J5JAM9OE8eY4lCm+JcXV/byBvU6nY9y4cYwfPx6r1UpBQQHl5eUcOVyK2xOa0qX29jbqa2tCMnZ1lejwJwinIhm06K8RCSQAkoRm/lQ69H1v8qZRaxg9ajS5E3NxOp1s27aNrVu24na7aWpqouQ0yxY8bjfNR2uxNPdfUxlHh5WmIzU4bf3bh+CMMZit2AbALKggCMJgctYmj9BZMqWZasT4i3hUo/q+2NqfbTpOFmYKw9x++g8wo9FIZEQkx6uqTnm/Mk2D8aY4dHPCkVS+JcKyLNPgY9lSX8XExDBt2jRSU1MpOXSQlqa+dY/tq6qjR7GHoKV9fY0okRKE01GEGTAsuQhl8lm8/55CgW7RDJwZcbjsgZvti4qMYuq0qSiVSmrrarHZbKhUKsrKymj8Xrdtj9tNy7E6XEFI6uRvxwplAtlaJbYlEARB8MdZnTyeoIhQYrgiGsNPY1Cm9r6U1Wqzda7h8bPr15ma5pyQnJJMS0sLHR0d38Udo0J/aSSG62NQJvrXSbS9rQ27PXgJlFKpJC01lbiYaFQ+NH/oT26Pm6MV5UEft6G+LuhjCsJgIhm06K+di2pUeqhDCTrJoEV/9WzUYzMxN7QE/PytLa20traSkpLSWe3S3k59fT27d+/2blYuyx5ajge3kYzs8dByvC5kzWvMDa0hGVcQBGGwCu23+AFGla1FmaXBXenAsbUDV5kNf/apt1qt6HQ6v9uRm0wmHE4HDocDrbb7XowAsbGxaNQaqqqqGDVnLJpzjKhG6pAUvSu5bagL/ixYQ32dT+s2T0WlUhMeEYFGo0WSJDweN1aLhfb2tl6tWWlrbcVmtfo9S9wXTQ31uN1ulEpl0MYUhMFG0qjRLZ6J85to7Bt39djRcyhQJEajv+x8FJGda+A7GgOf0Bw/fhyX24XL6UJCQqlS0mHpYP+B/UREhDN58hTM9S1BmXH8PtntobW6geiMpKBv52FrM+NyOFFpQreVkyAIwmAiksfvkSQJVYYWVYYWT4sLxw4Lzp0WZGvPSY+/nVZPOLlpzumSR6VWSdSF8Xxlzmf8T2eg1vSt2U9TQ3BLdWRZpqHO/5k3lUpFRHQsdU3NHD5Wjc1hR6lUEmY0MSwrk4xhw6g5XkV9nX/rGGVk6utqScvI9Dum3vJ4PLS2NBMdExu0MQVhMJIkCc2MMSgSo7D93xZka+jKGvubelwW2oXTvNtxeNyeflmHl5OTQ2JiIg6nA6fDicPhwGazcvx4FbIs47DasLacunFbMLgdTjoaWzHFRgZ3YBmsze2EJYiOv4IgCL4QyeMZKCJV6OaGo70wDFeZHeceC65SO7hOfSXcZrUREeF/wwetVotarabdbCYm9qTEQgJVphbVOD3qUTpG2Qx8+swG9uzZw5QpU3r7tIDTd3ftL63NzTgc/n0B1Gp1jBg1mpa2djwNjaSmJKPTaXG73VTX1LF9xw5yx40lIysLU3gYFWWlfs1CNtbXk5KahiKIM4Hm9naRPAqCj1RZSRhuuhj7p/m4Svu3wVewSXot2vlTUI3J6DLbZmvrQPYEfrZVo9EQG9v9vWf8+AnIsoemCj+2g+onluY2tCY9at2pL6L2F2urWSSPgiAIPhLJow8klYR6pA71SB2yU8Z93IG7wo77uBN3tRPZ5uncRNlqJTEx0f/zSxJhpjDarO0o0zQok9UoM7Wo0jRIuu+WpUZoI8jJyWH79u1Mnjy5T+U9QU8e/dymQqPRkjN6DBqtlgSdjoT4uC73Z6an8/WWbygrryAjLc2bkJWXnrp74Km43S7MZjPhvUj4e6sjyK+7IAx2ijADuisuxLW3Avu6QuQ+7DU4UKhGpqH9wVQUpu6VKtbW4L9HWFvMuJ2h3TYDAFmmo7GVyJT4oA4rOq4KZ4u8vDzmzJlDc3MzkZGRoQ6n361YsYI1a9ZQVFQU6lCGFNEwx0+SWkKVqUU7OxzDkhhMdydguj0e+WItlek1aKYZUY3QoUxQI5mUSFoFqCRQSkhaCcmoQBGnQjVMi3qyAe3sMPSXRuK4WkX+5P0Yb4hFNz8C9Qhdl8TxhGnTplFbW8uxPu7RGOzk0dLh34dzZvYwNKcp4YXOvSR1Oi2uk/YJi46JJS4+oV/j6itze/+1vheEoUqSJNTjszDc/ENUYzNDHU6vSeFGdJdfgO6y80+ZOALYWjtOeXt/kZGx9jJ5UqrVmGKjcIWp2X78IB8Vfc0HhRv4bM9WSmqO9uqcjg4rbmfgOs36orfPXxB6Y+XKlUybNo2wsDDi4+NZvHgxhw4d8usc11xzDQsXLuxy26effookSaxYsaLL7StWrCA9PbBNyDIzM3nmmWcCes6+kiSJNWvWhDqMs4KYeewjSZKQolS0xVuoTW0m/MdxGOL9L3+JlxJp3bsJi8WCwWA47XHDhg0jOjqa/Px80tLSehWzLMt0mIOXPHo8HqxWi8/Hm8LCCTvFbKDL5cLt8eByOqmpq6e+oYGk7830Jian0FBXh+xjpyNLR3C/qAU7aReEoUQRZkD/4/NwTRyO46vduI8Ojg7Gkk6DeupINOeO9q5tPB17hzVIUXVyWmy4Hf4na8aYSAzR4RyrqWLtxnXERccwdcJE1Co1beZ2ZFkmIiWetppGZLd/+/paW8yY4qL8jqm3nFY7Hrc7qEsYhLPXxo0buf3225k2bRoul4v/+Z//4Qc/+AH79+/HaDT6dI45c+awbNkyXC4XKlXne8qGDRtIS0sjLy+vy7EbNmxgzpw5gX4a/cLtdiNJEgqFmNsayMT/nQBpamoCICqqdx94SUlJAFRXn3ndiSRJTJ06lX379nXZtsMfbper111Pe8Nmtfi1FjE65tT7vO0/eIjP1n/J+q++Zv/BQyQmJDB+zOgux2i0WkzhYT6PFezk0eEY/CV3ghBqqvR4DNddhOG6+ahGpECQO3T6Sgo3op07CeNtl6K9YHyPiSOA0xrcPWitfs90SoQnxmCMicDpcrL+m6/ITEnl8vmXMHHUOMYOH8mMiVM5b9I0tEY9Uanxfidl1rYOny8ABopzCDdlEgaWTz/9lKVLlzJ27Fhyc3NZtWoVlZWVFBYW+nyOOXPmYDabKSgo8N6Wl5fHvffey7Zt27B9u5e1zWZj27Zt3ZLHwsJCpk6disFg4Lzzzusy81lWVsall15KQkICJpOJadOmsW7dOu/9s2fP5siRI9x1112dEyhneP996qmnGD9+PEajkbS0NG677TbM5u9m+letWkVkZCQfffQRY8aMQavVUllZid1uZ9myZaSkpGA0GjnnnHO6JcUny8zMBOCyyy5DkiTvzye8/vrrZGZmEhERwTXXXEP7SRfyPR4PK1euJCsrC71eT25uLu+9995pxxJE8hgwTU1NhIeHo1b3rt13dHQ0Wq22x+QRYOLEiUiSxM6dO3s1lsvPq8B95fJzLY3+NDOvWZkZzJg2jUkTxhMfF4ssy3hOkZTq9aefue0Wmyu463zcQR5PEIYyZVoc+itmYfzlIjTnjUUy+rfHbn9RZiehv+JCjL9ahOac0Uha3z4XZI+MK8hrOv1NmkyxkejCOzuEl1QcxmK1Mn3CFCRJwul0drtQqNJqiEiJ8yvBl91u3I7gvleK5FEIldbWzq15oqO/q1pbunQps2fPPu1jcnJySE5OZsOGDQC0t7ezY8cOrrzySjIzM9m6dSsAW7ZswW63d0se77//fp588kkKCgpQqVT8/Oc/995nNpv54Q9/yPr169m5cycLFy5k0aJFVFZWAvDBBx+QmprKgw8+SHV19Rm/tyoUCv72t7+xb98+Vq9ezZdffsk999zT5RiLxcLjjz/Oyy+/zL59+4iPj+eOO+5g69atvPPOO+zevZsrr7yShQsXUlJy6r4W+fn5ALz66qtUV1d7f4bOZHjNmjV8/PHHfPzxx2zcuJHHHnvMe//KlSt57bXXePHFF9m3bx933XUX1113HRs3bjzt8zrbibLVAGlubu71rCN0zigmJiZSU9PzlhMGg4Fx48ZRUFDAeeed5/f0frATGH/3YVQoTn2VOsxkIuzbbU3SUlLYml9AfuEOzp9xbpcrX6d7/CljC+IMLAQ/WRWEs4Ei0oR2Vi6ameNwH67Gua8CV8kxcAfv91sRE45qbCbqsZne/Rr95XI4+qXT6ul43G48frwnqbQa9FHfVXYcra1Co9bQYbXwydfraW1rRa1Sk5M1jJmTp6NSdn7FUOu0GCLDsDT7vubbZXMEde9FkTwKoeDxeLjzzjuZOXMm48aN896elJTUY4XYnDlzyMvL47777uPrr78mJyeHuLg4LrzwQm9jnLy8PLKyssjIyOjy2EceeYRZs2YBcO+993LJJZdgs9nQ6XTk5uaSm5vrPfahhx7iww8/5KOPPuKOO+4gOjq6c8u0sLAem0Teeeed3r9nZmby8MMP88tf/pIXXnjBe7vT6eSFF17wjllZWcmrr75KZWUlycnJACxbtoxPP/2UV199lUcffbTbOHFxnU0VIyMju8Xk8XhYtWoVYWGd710/+9nPWL9+PY888gh2u51HH32UdevWMWPGDACys7PZtGkTL730kvc1EroSyWOANDU1ER/ftw5xiYmJlJaW+nTstGnTKCoqorS0lJycHL/GCfom9X6WlPmabCYnJrBr7z7MHR3epLLz8b5/YZQUwS13C/prLwhnEUmlRJWTiionFdnhxH28AXdlHe6j9Xhqm5ADOJuliA5HkRSNKj0BZUY8UqSpzxvcB33W0c/x9BFdn2NrWxse2cMnX61j9LAcUnKncLyuhj2H9mN3OPjBzNnfPdbP5NFpt6PDt/VfgeC0iyUFQvDdfvvt7N27l02bNnW5feXKlT0+dvbs2dx55504nU7y8vK8M5WzZs3ipZdeAr7rrvp9EyZM8P79xLKpuro60tPTMZvNrFixgrVr11JdXY3L5cJqtXpnHv2xbt06Vq5cycGDB2lra8PlcmGz2br099BoNF3i2bNnD263u9t3W7vdTsxpljWdSWZmpjdxPPF8677dd7y0tBSLxcL8+fO7PMbhcDBp0iS/xzpbiOQxQJqamhg5cmSfzpGUlMS2bduw2+1oz9BpFCA5OZmkpCQKCgoGfPKoVPo3M2q32zCaer5y7/62/Pb7s3l2u+9XkP2ZpQwEkTwKQnBIGjWqrCRUWZ1fjGRZRm5qx13diKfZjNzWgafNgtxmQTZbkJ1uOPnClUqJpNOgCDcghRu//a8BRXwUyoQoJJ0m4DF7gjhTCvjdKEdj7Noh1uly4nK5GDtiFBdMOReA7LRMPB4P+0oOMn38JCLDO5ufKdUqlGq1z51Ug122Kgf5tReEO+64g48//pivvvqK1NRUvx8/Z84cOjo6yM/PZ8OGDfz+978HOpPHn//85zQ1NbFt2zZ+8YtfdHvsyUusTlwQOjHTuWzZMr744gueeOIJhg8fjl6v54orrvC7Z0NFRQU/+tGP+NWvfsUjjzxCdHQ0mzZt4qabbsLhcHiTR71e3+WilNlsRqlUUlhY2O07k8mH74Zneq4nnu+J53pi/eXatWtJSUnpclxP38PPZiJ5DACr1YrVau1Sr94bJ67+1NTUdCsx+D5Jkpg2bRr/+c9//C6ZVapUSJLkdzlpb+n8WIMI0Nrc4t23EThlMu3xeDh6vKqzdOKkNxO32425zfer26dbX9lf1JrAf+EUBKFnkiQhxYSjiAk/7TGy2w0euTNxDEETnmCX0fu9pEDV9YvciS6PIzKyu9w+IiObfSUHqW2s9yaPAEq10ufkMZjluxD8xF04e8myzK9//Ws+/PBDb1lpbwwbNoy0tDQ++ugjioqKvCWWKSkppKSk8OSTT+JwOPzutLp582aWLl3KZZddBnQmWBUVFV2O0Wg03gv4p1NYWIjH4+HJJ5/0Lq/617/+1eP4kyZNwu12U1dXxwUXXOBz3Gq1useYvu/kJj2iRNV3omFOADQ3NwP0OXmMjY1FpVL51DQHYPz48Wi1Wr86dEHnAuZgJk0qlQqt1vdGFi1NjTgc380e7tq3ny3b8zlUUsqRo8coLi0jb9NmWtvaGDVihPcLDEBjfT0ut+9XrA2G4JVFARh8bMMtCELwSUolkloVksQRgp8w9bWhqfHbC4N6Xdf39xM/27pVgfjzugb7tQjyeMJZ6/bbb+eNN97grbfeIiwsjJqaGmpqarBav9um57777uP666/v8Vxz5szhhRdeYPjw4SQkfLfP9axZs3juuee8jXX8MWLECD744AOKiorYtWsXP/3pT7utv8zMzOSrr77i+PHjNDQ0nPI8w4cPx+l08txzz3H48GFef/11XnzxxR7Hz8nJYcmSJVx//fV88MEHlJeXs337dlauXMnatWtP+7jMzEzWr19PTU2N93t5T8LCwli2bBl33XUXq1evpqysjB07dvDcc8+xevVqn85xNhLJYwD0dZuOE5RKJQkJCT41zYHOqywTJ05kx44dfjdiMYX5vp1FIPiTNHlkD0fKyrxvVimJiUjAkaNH2bNvH4crKtDrdEyfPJlhWZnex9msVqqO+VeTH+xkLtivuyAIg4fkZ4l/3wf0cz3695Lb2KjO9Ucdlq77+J74+ftJpV8zq0FO4IP+2gtnrb///e+0trYye/ZskpKSvH/effdd7zHV1dU+rTGcM2cO7e3t3Tqzzpo1i/b29l7t7/jUU08RFRXFeeedx6JFi1iwYAGTJ0/ucsyDDz5IRUUFw4YN8zar+b7c3FyeeuopHn/8ccaNG8ebb77p01pO6Oyaev3113P33XczcuRIFi9eTH5+Punp6ad9zJNPPskXX3xBWlqaX+sVH3roIZYvX87KlSsZPXo0CxcuZO3atb2eET4bSHKwaheHsK+//potW7bwhz/8oc/n+s9//sOxY8f41a9+5dPxDQ0NPP/881x++eVdFhz3ZN1/P6a89NQtj/tDXU0NR4+U+/WY6JhYMrKH+dRN1m6zUXJwv1/rHSUkxk2ajCaIpaTzLr6E7BH+rVEVBOHsYGlq4/DmXUEbz9pmpr2m0efjI1MT0Bi+Swjrmxr496cfMSIzm/nnzfbe/sWWPEorK7j+x1dh/LbKRZZlGsqO+ZxAasOMRCTF9nxggMTnpBM/8szLRYSBxWazUV5eTlZWFjrdwNimRxAGM19/p8SaxwBoamrqc8nqCUlJSezcuROXy9WlHPN0YmNjyc7OJj8/36/kMdgzYNGxsRw/eqTH1tMna2pswOV0kpE9DM0ZFi63trRw5HAZTqd/i7kjoqKCmjgCmMJOv95KEISzm1IT3I9ktda/9z9bW0eX5DEuOpZR2SM4eLgEj0cmJSGR47U1lFWWM3lsrjdxBLCbLX7NPKr7oSHRmSjUp3/tPR6ZtlYPba2d/21v99De5sHpAKdTxuMGpQpUKgm1BsIjFISHKwgLV3T+PeLMG6kLgiAMJiJ5DICmpqY+l6yecGJvn9ra2m6dn05n2rRpvPvuu9TU1PS4584JEVGBSXZ9pVKpiIqJpbG+zq/HtbW1sm9XEZHR0YRHRqLV6r7tlOXGarHQ1NBAR4e5VzHFxSf0fFCARURFBn1MQRAGB5UuuN39lBq1X83TbO0dGKLCUWm/6144a9p5hBlNHDxcQvmxSsKMRmZOPofcUWO9x8gemY7GVr9iU/mZ2PaVRv/da9/a4uHoERc11W5qql3U1XhwOXtfpKXRSsQnKklM6vyTlqEiLFyUyQqCMDiJ5DEAmpube+yO6qv4+HgUCgXV1dU+J48jR44kLCyM/Px8Fi1a5NNj4vq4J2VvxCck+J08QucayKbGBpoaT70ouzd0Oh1hERE9HxhA4RGRfjUOEgTh7KJUKVFqVEHbpkKSJFQ6DU6rj+X+skxrdT1RqQnezqtKpZJp4ycxbfyp1xjJskx7XaPf24KogjjzKMtQU69ia6GF8sMu2loC23nVYZc5dsTFsSPf/X+NjFaQNUzN2PFqklJC091XEAShN0Ty2EdOp5O2traAla2q1Wri4uJ8bpoDnd1Tp0yZwpYtW5g/f75Ptf9RMbEoFAq/ykj7ymA0ER0TG9AksLdS0zOD/mEdG4KEXRCEwUWt1wZ1j0ON0eB78kjn3pAtx+qISI1D2cPSClmWaa9txNbW4VdMaoPOp7XufWWxShSXaSgp16BPcqNQ+rf0oS9amjzsbLKzM99OZLSCCZM0jJ+owWgUM5KCIAxs4l2qj060Aw5U2SpAYmKiz9t1nDBlyhRcLhe7d+/26XilUkl07Kk7ZPWntMws1OrQ7nUYExtHRAD/f/lKJI+CIPREow9udYI+wuh3Z1OXw0HzkRqsLe2n3B9RlmXsHVaaK2v8ThwBDJH9tyZflqGmTsmGzQb+/Z8wivbpsFiVIe222tLk4av1Nl58po2PP7RQfTx4Fw+EoWX27NnceeedoQ7jlAZabEuXLmXx4sWhDmNQEjOPfRSoPR5PlpSUxL59+3C73SiVyp4fQOdeNaNGjSI/P59p06b5NKuWmJxMQ11tX8P1i0qlIj0ri7LiQ0Ed9wS1WkNqRmZIxk5M9q0MWRCEs5c23Ah+dEDtK4VSiS7M4HeS53G7aa9rwtzQglqnQaFUggSyW8Zpt+Nx+bdZtzcelQqNUd+rx56JLMPRKhV7Dmipb+z61UepVSP5tQdl/3C7Yf8eB/v3OEjPVHHu+VoyskK376gw+HzwwQeo1eqeD/TR7NmzmThxIs8880zAzjlUrFixgjVr1lBUVBTqUIJOzDz2UVNTE2q1GpPJFLBzJiUl4XK5Trvx6ulMmzaN+vp6jhw54ts4Kam9Ca/PIqOiSUwKfiKlUCjIHjHCpy62gaZSqYgNQYMeQRAGF31E4D5LfB4zqvddoGWPB4fFhq29A1tbB/YOS68TRwB9ZFjAk6WmFgWfbjDy5SZjt8QRgt/Z1ReVFS7+9UYH77/dQXNT719PYWhwOHwrqY6OjiZM7Cd9Rm63O6hLtoYikTz20YlOq4H8sDvRMdWfdY8AmZmZxMbGkp+f79s4KalIUmj+CaSkpxOX4Ftn2EBQSAqyR4wM2VYZSSmpPs8iC4Jw9gpF8qjWajD0IYEMFJVWgyEqcF98XS4o2KXjP5+bqK0//UXDYHd29cfhUhev/L2dzV/ZcLvFttxDwezZs7njjju44447iIiIIDY2luXLl3fpepyZmclDDz3E9ddfT3h4OLfeeisA77//PmPHjkWr1ZKZmcmTTz7Z7dwnl4ba7XaWLVtGSkoKRqORc845h7y8vC6P2bx5M7Nnz8ZgMBAVFcWCBQtobm5m6dKlbNy4kWeffRZJ6txupqKiAoC9e/dy8cUXYzKZSEhI4Gc/+1mXCY+Ojg6uv/56TCYTSUlJ3eI8lRUrVjBx4kReeeUV0tPTMZlM3Hbbbbjdbv7yl7+QmJhIfHw8jzzySJfHPfXUU4wfPx6j0UhaWhq33XYbZvN3XfhXrVpFZGQkH330EWPGjEGr1VJZWdlt/Pz8fOLi4nj88ccBaGlp4eabbyYuLo7w8HDmzp3Lrl27vOd84IEH2LVrl/e1WbVqVY/PcagQyWMfNTc3B7RkFUCr1RIdHe33ukdJkpg2bRoHDhygvb29x+N1Oh1pmZm9jLLv0jOzglLKqVQqGT5qFBGRkf0+1ukMGzkqZGMLgjB4qHSaoHYaPcEYG4lSE7hyt94IT4wJ2IXY+kYl//ncxN6DWmT5zOcciDOPJ3O7YXOejTdeMVNXK2Yhh4LVq1ejUqnYvn07zz77LE899RQvv/xyl2OeeOIJcnNz2blzJ8uXL6ewsJCrrrqKa665hj179rBixQqWL19+xqTljjvuYOvWrbzzzjvs3r2bK6+8koULF1JSUgJAUVER8+bNY8yYMWzdupVNmzaxaNEi3G43zz77LDNmzOCWW26hurqa6upq0tLSaGlpYe7cuUyaNImCggI+/fRTamtrueqqq7zj/v73v2fjxo383//9H59//jl5eXns2LGjx9elrKyMTz75hE8//ZS3336bf/7zn1xyySUcO3aMjRs38vjjj/PHP/6Rbdu2eR+jUCj429/+xr59+1i9ejVffvkl99xzT5fzWiwWHn/8cV5++WX27dtH/Pd6UHz55ZfMnz+fRx55hD/84Q8AXHnlldTV1fHJJ59QWFjI5MmTmTdvHk1NTVx99dXcfffdjB071vvaXH311T0+v6FCrHnso6amJkaNCnxikJSU5HfyCJCbm8u6devYsWMHs2bN6vH4EaNGU1l+uDchBkRKWjomUxhHyg/jdAa+0114eATp2dkh3SJDpVKRmT0sZOMLgjB4SJKEMSaC1uP1QR83PDGG5qO1nQsEg8jpdHKkropMk5rk5OQ+ncvjgZ17tT4ljQCSUoFSG9qk2Ve11W5ef7md8+fomD5DK9ZCDmJpaWk8/fTTSJLEyJEj2bNnD08//TS33HKL95i5c+dy9913e39esmQJ8+bNY/ny5QDk5OSwf/9+/vrXv7J06dJuY1RWVvLqq69SWVnp/b1atmwZn376Ka+++iqPPvoof/nLX5g6dSovvPCC93Fjx363R6tGo8FgMHTZQ/z5559n0qRJPProo97bXnnlFdLS0iguLiY5OZl//vOfvPHGG8ybNw/oTJZTU3teKuXxeHjllVcICwtjzJgxzJkzh0OHDvHf//4XhULByJEjefzxx9mwYQPnnHMOQJeZ1szMTB5++GF++ctfdnlOTqeTF154gdzc3G5jfvjhh1x//fW8/PLL3gRw06ZNbN++nbq6OrTazj1gn3jiCdasWcN7773HrbfeislkQqVS+by/+lAiksc+8Hg8tLS0BLTT6glJSUl8/fXXyLLs1weETqdjwoQJFBYWcsEFF/TY7jw9KxudTo/NZu1ryL0WERXFmLBcjh2poLEhMF+YlEolKWkZxCWEfp1hds5I1JqBfWVbEISBIxTJI4BapyU8MYa26uBupxQWE0WcVqa4uJh2czsjRoxA0YslFTa7xFdb9VTV+p4MavS6AdEsx1duN2xcZ6P6uJuLf2xAqx08sQvfOffcc7t8t5sxYwZPPvlkl0aJU6dO7fKYAwcOcOmll3a5bebMmTzzzDOnbLC4Z88e3G43OTk5XW632+3ExMQAnTOPV155pV+x79q1iw0bNpyy10dZWRlWqxWHw+FN7qBzLebIkSN7PHdmZmaXNZsJCQkolcou32UTEhKoq/tuz/B169axcuVKDh48SFtbGy6XC5vNhsViwWAwAJ1J8IQJE7qNt23bNj7++GPee++9Lp1Xd+3ahdls9r5OJ1itVsrKynp8HkOdSB77oLW1FY/HE/CyVehMHu12O01NTd3+8fZk6tSpFBYWUlxc3OOsqEqlImfMWHbvKOhLuH2mUqnIHDac+MQk6mtraW5swO3xvzxHq9URGx9PbFw8qgB2HOuLMeO7X+kSBEE4nbD4aJCAECxx04UZQYa22sagzEDqIkyEJUQTTgwmk4mS4hIsHRbGjh2Lxo+Lbo3NCjZsNmLu8C/p1JhCV5XSF8UHnDQ2mFl8pYGYWLGefigyGo19erzZbEapVFJYWNgtsTyR+On1/nc2NpvNLFq0yLs28GRJSUmUlpb2LmDo1ilWkqRT3nai4U1FRQU/+tGP+NWvfsUjjzxCdHQ0mzZt4qabbsLhcHiTR71ef8qJmGHDhhETE8Mrr7zCJZdc4h3LbDaTlJTUbX0oQGQIl0ANFGLNYx80NTUBgd2m44TeNs2Bzl/e1NRUnxvnjB4/YcCUvxiMRjKysxk3aTLpmVlERceg053+yrBCUmA0moiLT2D4yFGMzZ1IYnLKgEkc4xISB8TspyAIg4dar8UYExGy8XXhRiKS4zq33+gv35bnhifEeN/fkxKTyJ2Yi9VqpbCw0Ke1+wCVx1X8d73J78RRkiS0JoPfoQ8UjfVuXv+nmcoKsS/kYHPymj2Ab775hhEjRpyxsd7o0aPZvHlzl9s2b95MTk7OKR83adIk3G43dXV1DB8+vMufE98xJ0yYwPr16087pkajwe3ueiF/8uTJ7Nu3j8zMzG7nNRqNDBs2DLVa3eU5Njc3U1xcfPoXpJcKCwvxeDw8+eSTnHvuueTk5FBVVeXz42NjY/nyyy8pLS3lqquuwul0ep9jTU0NKpWq23OMjY0FTv3anC1E8tgHTU1NKBQKIiIC/yFvNBoJDw/v1bpH6Ny2o6ysjMbGnvcLC4+IIC1Eex+ejkqlIi4hkewROYzNncSEKVMZOWYcI0aNYfjIUeSMGsPo8ROYMGUqo8aNJz0rm4jIwHa9DYQxE8SsoyAI/otMDe1FJ61RT3RmUr8kV0qNmqi0BIwxkd3uiwiPYMqUKWi1Gnbu3Elt7Zn3Ii6rULNhswG32//3fo1J3+PSjoHOYZf591tmyoqdoQ5F8ENlZSW/+93vOHToEG+//TbPPfccv/3tb8/4mLvvvpv169fz0EMPUVxczOrVq3n++edZtmzZKY/PyclhyZIlXH/99XzwwQeUl5ezfft2Vq5cydq1awG47777yM/P57bbbmP37t0cPHiQv//9797OqZmZmWzbto2KigoaGhrweDzcfvvtNDU1ce2115Kfn09ZWRmfffYZN954I263G5PJxE033cTvf/97vvzyS/bu3cvSpUv75Xdt+PDhOJ1OnnvuOQ4fPszrr7/Oiy++6Nc54uPj+fLLLzl48CDXXnstLpeLiy66iBkzZrB48WI+//xzKioq2LJlC/fffz8FBQXe16a8vJyioiIaGhqw2+0Bf34D1eB+1wyx5uZmIiMj++3Dp7dNc6BzwbPBYPD+I+/JhClTez4ohFQqFaawMMIjIoiIjCIsIgKn08Wbb77JsWPHQh3eKZnCwhiW03ONvyAIwvdFJMeiVId2ZYlCqSQiOY7wpNiAdGKVlEqMMRFEZySh1mlPe5xWq2XixEnEx8dz4MABysrKumxjcEJpuZqvtxl8aoxzKvrI4G+L0h/cLvjwXx2UHBQJ5GBx/fXXY7VamT59Orfffju//e1vvdtxnM7kyZP517/+xTvvvMO4ceP405/+xIMPPnjKZjknvPrqq1x//fXcfffdjBw5ksWLF5Ofn096ejrQmWB+/vnn7Nq1i+nTpzNjxgz+7//+z7sf9rJly1AqlYwZM4a4uDhv853Nmzfjdrv5wQ9+wPjx47nzzju7fB/+61//ygUXXMCiRYu46KKLOP/885kyZUpgXryT5Obm8tRTT/H4448zbtw43nzzTVauXOn3eRITE/nyyy/Zs2cPS5YswePx8N///pcLL7yQG2+8kZycHK655hqOHDlCwrfVZD/5yU9YuHAhc+bMIS4ujrfffjvQT2/AkuRTvSMLPnnnnXdwuVxcd911/XL+vLw8tm/fzu9///tezah98cUX7Nixg9/97nfdasa/T5Zl/vPev6it9n26P9ROtHTOyMhgwYIFfq2PCYYZF85m3MRJoQ5DEIRBqnrfYRoPHw91GF4Oiw1rSzv2Dqtf6yFVOi36SBO6MKNfn2UyMsePHaesrIyoqEhGjxmDWtX5WXbkmIq8Lb1PHJUaNdGZiYOqWU5PlCq44lojGVkDY9lGf7PZbJSXl5OVlYVON3jWrs6ePZuJEyfyzDPPBPzcM2bMYN68eTz88MMBP7cw9Pn6OyVmHvugqampXzqtnpCUlITFYqGtra1Xj58yZQo2m429e/f2eKwkSUyfeX6vxgmVuro6lAoF9fX17N+//5RXpkMlLDyCUePGhzoMQRAGseiMgdUCXmPQEZEcR+ywVCJT4zHGRqI1GVBpNSg1apRqFUqNGo1BhyE6gojkOGKyUohOT0QfbvL7IqiERGpqKhMmTKC9vZ0dhTvo6Oigtl7Jxq29Txyhc9ZxKCWO0DkD+cG7FrEX5FnIbrdTUFDAvn37umy1IQj9QSSPvSTLMs3Nzf3SLOeEpKQkoHdNc6Czkc/w4cN9bpyTmJzCiFGjezVWsFksFurr6zEYjWg0GsrKyvxaJN3fzps121v2IQiC0Btak4GwhP77jOkthUKBxqDH+G2CGJ2RRExmMjFZKcRkJhOZmoDp28QyEKW3UVFRTJ4yBYVCwdZvdrP2CxmPp/eJn6RQoAvvWyfLgcrpkFnz7w6sVk+oQxGC6JNPPmHu3Ln8+Mc/5oorrgh1OMIQJ5LHXjKbzTidzn5NHsPCwjAYDL1e9wid23ZUVVVx/LhvpU/TZ16AVjvwyz8aGhqw2WwYjd+VQe3Zs4eOjo4QRwYZ2cNIz8oOdRiCIAwBscN73lj7bKDX6cmdOJny42OoPNpIS0sLci/3MtFHhQ36Rjln0tLkYe2HlgFVjSN8Jy8vL+Alq4sXL6atrY033nijx2VKgtBXQ/fds5+d2KajP8tWJUnqU9McgBEjRhAREeFz4xyD0ciMWbN7PV6w1NTU4HQ6cTgctLW24nQ6aWtr4/DhwyGNS6vVcf6ceSGNQRCEocMYHRHSbTsGksLdRlTaNCIjI2lpaaG+rt6735uvJIWEYYg0yjmTw6UuNm88e7o/CoIQPCJ57KXm5magf5NH6FvHVegsL5o6dSp79uzBarX69JjhI0eRkT2s12P2N1mWaWtrIzw8nLBvO7COHj2ac845h+zs0M74nTd7DoY+buwrCIJwssSx2Qyx5Xl+qzyu4lCpFgmJyMhI4uPjsVqtVFdX43T53mXUGBPRv/tXDiBbv7ZxrFLsAdkTWZZxNrRgq6jG2dByVszYrlixgokTJ/b68QcPHuTcc89Fp9P16Tz+kCSJNWvWBGUs4cxE8thLTU1NhIeH93t5QFJSEm1tbX0qx5w8eTKyLFNUVOTT8ZIkcf6ceej0+l6P2Z8kSeKCCy5gwYIFXHTRRaSkpJCWlkZGRgbGECZuWcNHiK05BEEIOH2EiZjM5FCHETJ2u8Q3hV0/jwwGA0lJSciyTHVVtU8XR5UaNfrIsP4Kc8CRZfjkPxaczqGfDPWGq6Wd48/+m8Kca9kW/2MKsq9mW/yPKcy5luPP/htXS3u/jDt79mzuvPPOfjl3sPz5z3/GaDRy6NAh1q9fH9Bz9zWxFfqfSB57qb87rZ7Q16Y5AEajkTFjxpCfn+/zFTWD0ci8iy9BkgbmPxGtVotGo0Gn06FQKLBYLCGNJzIqigvnze/VliqCIAg9iR+ZgVp/+r0Rh7LtRTos1u6fRRqNhqTkJLRaLbW1tbS1tZ1xHWR4YvRZ9x7d3Ojh6w22UIcx4DR/tp3taT+h/HfPYzvctbrLdria8t89z/a0n9D82faQxCfLMi7XwJ01Lisr4/zzzycjI4OYmJhencPhcAQ4KiFYBmZmMAj0d6fVE6KiotBqtX0qXQWYNm0aTU1Nfq0JTE5N45zzL+jTuP1NkiT0en1Ik0e1RsP8S36MRnt2frETBKH/KdUqUnJHhDqMoDteo6Ks4vR7+CoVSuLj4wmPCKepqYnGhsZTXiQ1RIWj1p2d79GF2+zUVA3cRCTYmj/bzr4f3YPHau+cnv3+v5dvb/NY7ez70T0BTSCXLl3Kxo0befbZZ5EkCUmSqKioIC8vD0mS+OSTT5gyZQparZZNmzbh8XhYuXIlWVlZ6PV6cnNzee+997znO/G49evXM3XqVAwGA+eddx6HDh3qMu5jjz1GQkICYWFh3HTTTdhsXS8o5OXlMX36dIxGI5GRkcycOZMjR46c8jlIkkRhYSEPPvggkiSxYsUKoLNp4dy5c9Hr9cTExHDrrbdiNpu7PPfFixfzyCOPkJyczMiR3Su1Vq1axQMPPMCuXbu8r8+qVau89zc0NHDZZZdhMBgYMWIEH330UZfH7927l4svvhiTyURCQgI/+9nPaGho8On/jeA7kTz2UlNTU1CSR0mSSExM7HPymJaWRkJCgs/bdpwwbuKkAV+KaTAYfF7P2R9mz19AZBD+LQiCcHYzxUURNcD2fuxPbjds39Fz929JkoiOiiY2NpaOjg5qaqq7zNooNSqMsWdv0yFZhi8+sZ4Va/l64mpp58AVf+x8UTw9vB6eziTywBV/DFgJ67PPPsuMGTO45ZZbqK6uprq6mrS0NO/99957L4899hgHDhxgwoQJrFy5ktdee40XX3yRffv2cdddd3HdddexcePGLue9//77efLJJykoKEClUvHzn//ce9+//vUvVqxYwaOPPkpBQQFJSUm88MIL370mLheLFy9m1qxZ7N69m61bt3Lrrbeedpa+urqasWPHcvfdd1NdXc2yZcvo6OhgwYIFREVFkZ+fz7///W/WrVvHHXfc0eWx69ev59ChQ3zxxRd8/PHH3c599dVXc/fddzN27Fjv63P11Vd773/ggQe46qqr2L17Nz/84Q9ZsmSJt4FlS0sLc+fOZdKkSRQUFPDpp59SW1vLVVdd5cf/IcEXYiO6XrBarVit1qCUrUJn6WpxcXGfziFJEtOmTWPt2rW0trYSEeHbB6kkSVw4bz4dZjM1Vb5t9xFser2e2trakIw9feb5ZA4bHpKxBUE4+ySOzsLS1Ia9PbSl+sFwsFRDa7vvzW1MJhNqtZq6ujqqq6uJi49Dp9MTnhR71pWrfl/1cTf7djsZl3v6WdyzQe3qT/FY7N1nG0/HI+Ox2Kl77TOSf9P3/RMjIiLQaDQYDAYSE7tfCHrwwQeZP38+AHa7nUcffZR169YxY8YMALKzs9m0aRMvvfQSs2bN8j7ukUce8f587733cskll2Cz2dDpdDzzzDPcdNNN3HTTTQA8/PDDrFu3zjv72NbWRmtrKz/60Y8YNqyzWeLo0aff8zsxMRGVSoXJZPI+h3/84x/YbDZee+01b++J559/nkWLFvH444+TkJAAdC6jevnll9FoTv3vUK/XYzKZUKlUp3x9li5dyrXXXgvAo48+yt/+9je2b9/OwoULef7555k0aRKPPvqo9/hXXnmFtLQ0iouLycnJOe1zEvwjZh574USn1WDMPEJn8tjU1ITd3re22+PHj0etVlNYWOjX41RqNT9Y9GNi4uL7NH5/MRgM2O123G53UMedOHUauVOmBXVMQRDObkq1ivSpo1Goh3bHULcb9h3yv8xUq9WSnJyMSqWitqYWu9KNWnt2J0wnfLPZdlbPPsqyTPXz7/fqsVXPvReU127q1Knev5eWlmKxWJg/fz4mk8n757XXXqOsrKzL4yZMmOD9+4leGXV1dQAcOHCAc845p8vxJ5JR6Pwuu3TpUhYsWMCiRYt49tln/a52O3DgALm5uV2aFs6cOROPx9OlhHb8+PGnTRx9cfLzNBqNhIeHe5/nrl272LBhQ5fXatSoUQDdXi+hb0Ty2AvB2OPxZCeuvvSlaQ50fqjm5uayY8cOvxMtrVbHxZdeNiATSIPBABDUdY/jJ01h6oyZQRtPEAThBK3JQOrEgb2coK9KK9SnbJLjC6VSSWJiImGxkZRVVlBSUoJH9m8/yKGoqcFDycGzd+2jq7EVW1mV77OOJ8gytrIqXE1t/RPYSU5Ovk6sF1y7di1FRUXeP/v37++y7hHo0vn/xCy7P3ugvvrqq2zdupXzzjuPd999l5ycHL755pu+PJVT6mtH/O/vcCBJkvd5ms1mFi1a1OW1KioqoqSkhAsvvLBP4wpdieSxF5qamtDr9eiDtJVFXFwcKpWqz+seobNxjtls5uDBg34/Vm8wcMnlPyEhaWC1jD/x/yFY6x6nzpjJOedfcNaXQQmCEDrhiTHE56SHOox+IctwoKRvzW00Bh0jJ44jJyeHqqoqdu/ajcPZ2d2xsamJbdu3Y3f4V83j9si0tnqoOu6i+KCTnYV2CrbZyf+m879FO+yUHHJSXeWirc2Dp6c1dSFQmN+3CqbBzG3u23cEd4BKxTUajU8X8MeMGYNWq6WyspLhw4d3+XPyOsmejB49mm3btnW57VSJ4aRJk7jvvvvYsmUL48aN46233vJrjF27dnXZVm7z5s0oFIpTNsY5E19fn++bPHky+/btIzMzs9vrFcpt3IYikTz2QrA6rZ6gUChISEgISPIYHx9PRkaG341zTtBqdfxw8eUDqomOWq1GrVb3+8yjUqlk1kU/YNK06SJxFAQh5OJy0okegg10auuVtLT2vixXpVUTkRKHJEkkJyczceJEOiwWdhTuoK6+nh07dlB++DDHjvm2jt9i8VBW6iR/q529RQ7KS13U17qxmGXsNhmHvfO/He0ydTVuDpe42LPTQf43dsoPO7HZBs6s59EKF/V1wV3iMVAoTX274K8MMwQkjszMTLZt20ZFRQUNDQ2nnSEMCwtj2bJl3HXXXaxevZqysjJ27NjBc889x+rVq30e77e//S2vvPIKr776KsXFxfz5z39m37593vvLy8u577772Lp1K0eOHOHzzz+npKTkjOsev2/JkiXodDpuuOEG9u7dy4YNG/j1r3/Nz372M+96R19lZmZSXl5OUVERDQ0NPi/Zuv3222lqauLaa68lPz+fsrIyPvvsM2688cagL2sa6kTy2AvB6rR6sqSkpIAkj9A5+1hRUeGtE/eXSq1mzoKLOfeCWQNmH0iDwdCvyaMpLIxFV1xNzpix/TaGIAiCPyRJImnccCJS4kIdSkAdKuv9miilRkVESjwKxXefTREREUyZMhmFQsHnn31GVVUVarWaw4cP43Kfvoyzo8PD3l0OduY7qDnu5gyHnpLLCVVH3RRud7B/rwOrdWAkkUWFZ+f+eqqYCHTDksHfi7+ShG5YMqro8IDEsWzZMpRKJWPGjCEuLo7KysrTHvvQQw+xfPlyVq5cyejRo1m4cCFr164lKyvL5/Guvvpqli9fzj333MOUKVM4cuQIv/rVr7z3GwwGDh48yE9+8hNycnK49dZbuf322/nFL37h8xgGg4HPPvuMpqYmpk2bxhVXXMG8efN4/vnnfT7HCT/5yU9YuHAhc+bMIS4ujrffftunxyUnJ7N582bcbjc/+MEPGD9+PHfeeSeRkZFd3g+EvpPks3n1dC899dRTTJw4kblz5wZtzMLCQtauXct9993XrebbX263m6eeeoqxY8fywx/+sE/nqjp2lPWfrMUWwq0yAA4dOkR7e3uXxeaBkpyaxtyFP0RvCMxVR0EQhECSPTJVu0toPhqartOBZLVJ/OujMGTZ/+qOzhnHeJSq7rOWsiyza9cuCgoKkBQKIsLD8cgyM2bMIC01tduxx466OXrERSCXSiqUkJGlIilFiUToqlc0Wonb7gpHoxncFTQ2m43y8nKysrLQ6Xre0gXg+LP/pvx3z/u37lGSyH761wHptioIA5mvv1MiFfeT0+mkra0t6DOPiYmJeDyeXs8WnkypVDJlyhR27dqFw9G3K5DJqWn85Kc/IyN7WJ/j6gu9Xo/VGth9rJRKJdNnns/Fiy8XiaMgCAOWpJBIzh1B7LCUUIfSZ+WV6l4ljmq9lsjUUyeOAJWVlZSUlhIVFUV0VBRms5m21lbKSku7NNOx2WR273RQWR7YxBHA44byUhd7dzlxOEJ33d5hlyk95AzZ+KGUcMNCFAYtKHz8N6aQUBi0xF+/oH8DE4RBRCSPfgr2Nh0nJCQkoFAoAla6OmXKFBwOB7t37+7zuQxGI/MvWcScBRej1fp29S/QDAYDbre7z8nwCfGJiVx2zRJyp0wT5Q6CIAx4kiSROCablIk5SMrB+55VXul/ZY0+wkRkajwK5enXSSpVKuLi4rzNOJQqFTa7neLiYo4ePQZ0rm3cU+TA3N6/iV1bi4c9uxzYbKFLIPfvOUtLVyPDGP3ew52lqz0lkAoJJInR7z+MKjIsOAEKwiCgCnUAg82J5DFY23ScoPr2gy9QyWNERAQ5OTnk5+czZcqUPjeAkSSJ4SNHkZyaRv6WTRQf2B+QOH118nYdWm3vu/RptFqmnnseo8dPEEmjIAiDTlRaArowA5UFB3BaB1dnTatNor7Rj68lEoTFR6OPMPV4aGpKCqkpKdjtdlrb2mhra6OutpaS0lKOHj1KfFwq+3Y7cdiDk9DZLDL7djsYP1ETkvLRI+UunE4ZtXpwl672RtSC6Yz9+C8cuOKPeCzf/o6cXLX07fchhV7L6PcfJuoH00MQpSAMXOLbsZ+amppQq9WYTD1/WAVaIJvmQGfjnNraWo4ePRqwcxqMRmbNX8CPr7yaxOTglVDpdDokSep10xyFQsHocRO46mdLGZs7USSOgiAMWvrIMIZdMBFTfHAvcvZVTZ3viaNSrSIqLcGnxPFkWq2W+Lg4hg8bxnnnncfPfnYd554zg/17gpc4nmCzyuzf68ATgtYTbjdUHTt793yMWjCd6UffJ/vpX6PLTupyny47ieynf830Yx+IxFEQTkHMPPrpRKfVUGzVkJSUxN69eztLbs5QnuOrYcOGER0dTUFBAenpgd0vLCEpmUVXXEVtdRVFBfkcrSgP6HrE71MoFOh0Or/3etRotYwcM47xkyZjDMEFAUEQhP6g0mrImD6W5qO11Ow/jMc58FvV19b79rmmjwzDGBsRkIt8CklBeYUTmzU0JaQd7TLHKt2kZwT/69ixSjcZWX1rwDeYqSLDSP7NFST9+ie4mtpwt1tQhhlQRYeL7biGsBUrVrBmzRqKiopCHcqgJZJHPzU3Nwe9ZPWExMREXC4XDQ0Nfu+bcyqSJDF16lTWr1/PggUL+mUT1YSkZBYsupT2tlYO7t3DwX17+60zqz/bdcTExTNmwgSG54xC1cfutYIgCAORJElEpydiioukancp5rrmUId0Rg1NZ04elWoVYQnRaAyBW1vf2uKhpiq0ifWxShfRMQpMpuBWvFQfP3tnHk8mSRLqmAjUMRGhDqVfiaRJCBRRm+enUOzxeEJiYudm0IEsXZ04cSKSJLFz586AnfNUwsIjmHbe+Vx7483Mu/gSMrKHBbw0tKfkUafXM27iJBZf/VMuu+anjBo7XiSOgiAMeRq9jozpY0mfOhq1vvdrwvuT2w3NradOHiVJwhgbQXRmUkATR1mWKS12Qog3LJM9UFYc/O6nNdXufq0IEgYGWZZxuYb+hYJANUwUeiaSRz94PB5aWlpCljxqtVpiYmICmjwaDAbGjRtHQUEBHk//b2CsUqnIHpHDD370Y6675RcsWHQp4ydNIT4xsc+luAaDAZvN5n0eOr2etIxMps88n0uvupYlN93KjAtnE5eQIEpSBEE4q0iSRHhSLCPmTCFpXPaASyJb2xS43V3flyVJQh8VRnRWEsboiIC/bzc1enpdripJEBauID5RSWKykrgEJaYwBb3dvtHcLtPa2v+fwSezdMj93llW6O69995j/Pjx6PV6YmJiuOiii+jo6ABg6dKlLF68mAceeIC4uDjCw8P55S9/2SUxstvt/OY3vyE+Ph6dTsf5559Pfn6+9/68vDwkSeKTTz5hypQpaLVa3njjDR544AF27dqFJElIksSqVauQZZkVK1aQnp6OVqslOTmZ3/zmN6eNvaysjEsvvZSEhARMJhPTpk1j3bp1XY7JzMzkoYce4tprr8VoNJKSksL/+3//r8sxkiTx97//nYsvvhi9Xk92djbvvfdel2P+8Ic/kJOTg8FgIDs7m+XLl+N0fneRZcWKFUycOJGXX365y76ELS0t3Hzzzd7Xb+7cuezatcvP/0vCmYiyVT+0trbi8XhCVrYKgW+aA52Nc4qKiigtLSUnJyeg5z4TrVZHelY26VnZALjdblqam2ioraW1pQVzexvm9nY6zO10mDuQT7HplkqlwhQWhjEsnOi4eOpb2pg68wJGjhqF0RQmkkRBEISTKJRKYrJSiMpIoq2qgaYj1Via2kIdFk0nzToq1Er0ESb0EaYzbr/RV9U+lqt6PA6aWvOx2qux2WswGO1Mn7aIzPQJ3Y5tbKwnv+ALqmuOIklKjPos4mPmoFL2vFdwTZWLiAiN38+jL+pq3ISFi3mEYKmurubaa6/lL3/5C5dddhnt7e18/fXXXWaA169fj06nIy8vj4qKCm688UZiYmJ45JFHALjnnnt4//33Wb16NRkZGfzlL39hwYIFlJaWdpncuPfee3niiSfIzs5Gp9Nx99138+mnn3qTvYiICN5//32efvpp3nnnHcaOHUtNTc0ZEy2z2cwPf/hDHnnkEbRaLa+99hqLFi3i0KFDXXpn/PWvf+V//ud/eOCBB/jss8/47W9/S05ODvPnz/ces3z5ch577DGeffZZXn/9da655hr27NnD6NGjAQgLC2PVqlUkJyezZ88ebrnlFsLCwrjnnnu85ygtLeX999/ngw8+8E5AXHnllej1ej755BMiIiJ46aWXmDdvHsXFxSGb/BlqRPLoh6amJiD4ezyeLCkpieLiYmRZDlhilJycTHJyMvn5+UFNHr9PqVQSExtHTGxct/tkWcbtduN2u/C4PSiVSlRqdZfS146ODvYcKsFgCsMUFh7M0AVBEAYVhUJBZGo8kanxWFvNNB+ppqWqPmSNdTosSjRGHfpIExqDvt8v/FksHlpbfJvpc7utNDRvRasJIzs7EbvzGJpTbHFhs7Wx58BbhEXoSEmZQ8VhC40tBRyraSAj+Tok6cyJcGODB4dDDurWHW1twZ3tPNtVV1fjcrm4/PLLycjIAGD8+PFdjtFoNLzyyisYDAbGjh3Lgw8+yO9//3seeughrFYrf//731m1ahUXX3wxAP/4xz/44osv+Oc//8nvf/9773kefPDBLsmayWRCpVJ5l0ABVFZWkpiYyEUXXYRarSY9PZ3p00/fYTY3N5fc3Fzvzw899BAffvghH330EXfccYf39pkzZ3LvvfcCkJOTw+bNm3n66ae7xHPllVdy8803e8/zxRdf8Nxzz/HCCy8A8Mc//tF7bGZmJsuWLeOdd97pkjw6HA5ee+014uI6vzdu2rSJ7du3U1dX59227YknnmDNmjW899573Hrrrad9boLvxOUmPzQ1NaFQKIiICN2i6sTERBwOhzeRDYQTjXNKS0u9+1gONJIkoVKp0Gp16A0GNFpttzWTBoMBnU5HQ0NDiKIUBEEYfPQRJpInjGDU/HNJmzKaiORYlJr+Xw8uKRUYYyNIGpdNVM5IIlPi0RoNQakYaWr0+LzWUak0kpP5KxZd8msmTph32uPKK7ficTuZPPEaxo+bzsyZF5CSsAibvZ7W9r09jiN7oLkpuMlcu0gegyo3N5d58+Yxfvx4rrzySv7xj390+96Vm5vr3bsaYMaMGZjNZo4ePUpZWRlOp5OZM2d671er1UyfPp0DBw50Oc/UqVN7jOfKK6/EarWSnZ3NLbfcwocffnjG9ZFms5lly5YxevRoIiMjMZlMHDhwgMrKyi7HzZgxo9vP34+vp2PeffddZs6cSWJiIiaTiT/+8Y/dxsnIyPAmjgC7du3CbDYTExODyWTy/ikvL6esrKzH10PwjZh59ENzczORkZEh3QMwKalzP6Lq6mpiYmICdt7x48fz+eefU1hYyEUXXRSw8waTJEnExsbS2NgY6lAEQRAGHYVSQURyLBHJsciyjL3dgqW5DWurGVurGVtbB7Kn92vkNCY9+nAT+kgT+sgw9JFhKJSdn6cdW8xA8BKZDrPvYykUKrKG6YiIVNB2hgrfurpDxMYOR6/rvMAcHaMkJ2cYNfVRtHccIjI89/QP/pbZ7CGB/ivV/b62VrHmMZiUSiVffPEFW7Zs4fPPP+e5557j/vvvZ9u2bWRlZQV0LF866KelpXHo0CHWrVvHF198wW233cZf//pXNm7ciPoUDQWXLVvGF198wRNPPMHw4cPR6/VcccUVAW9Ws3XrVpYsWcIDDzzAggULiIiI4J133uHJJ5/sctz3n6PZbCYpKYm8vLxu54yMjAxojGczkTz6IZSdVk8w/H/27js+qipt4PjvTq9JSCEZICGBEDoBBBFQAUVBhRVULKsiou5aWEVFxbWBBdAVV5S1vBZQV2VdBRZFQUQCiIAB6Z2YEISEkl6mz33/GDMypM0kk0yC57uffNbM3HvPuTfJcJ97znkeg4HIyEjy8vLo1atXyI6rVqvp27cvP//8M8OHD0elap2/GjExMWLkURAEoZEkSUIXYUQX8fvNmcfjwWW147Tacdi8/++yOZA9MrLHg+yRUSgVSEoFSrUKtV7725cOtV6LUlV7UFRa2rxBTDCJYtQaiYR2dQd0NlspDmclEeYEv9fbJyox6BMoq8gOsF/NOxIopq02P0mSGDp0KEOHDuXpp5+mY8eOLFmyhIceegjwjp5ZrVb0ej0AmzZtwmQykZiYSGxsLBqNhg0bNvimvTqdTjIzM5k6dWqd7Wo0Gtzu6tPS9Xo9Y8eOZezYsdx3331069aNXbt20b9//2rbbtiwgUmTJjF+/HjAG6zl5ORU227Tpk3Vvq9ay3jmaxMnTvT7vl+/fgD8+OOPdOzYkSeeeML3/pEjR+o8P4D+/fuTn5+PSqUiOTm53u2FhmmdEUKYFBYWtohfRovFQn5+fsiPO2DAADZt2sTevXvp06d6IoDWIDY2NuRrQgVBEATvOkmNUY/GqCfUVYHttuYLHl1uGVsQ7cW1VaBQ1P3vicPhzZap0fhfGZVKIjo6guJSGx6PC4Wi7tuuygq5Wf/9as7rLsDmzZtZvXo1l19+OW3btmXz5s2cOnXKL7ByOBzccccdPPnkk+Tk5PDMM88wZcoUFAoFRqORe+65h0ceeYTo6GiSkpJ46aWXqKys5I477qiz7eTkZLKzs9m+fTsdOnTAbDbz6aef4na7GTRoEAaDgX//+9/o9XpfYHq2Ll26sHjxYsaOHYskSTz11FM1ZurfsGEDL730EuPGjWPVqlX897//Zfny5X7b/Pe//2XAgAFceOGFfPzxx/z000+89957vnZyc3NZtGgRAwcOZPny5SxZsqTe6zty5EgGDx7MuHHjeOmll0hLS+P48eMsX76c8ePHBzSVV6ifWPMYIFmWKSoqCmum1SoJCQnk5eWFvD5TbGwsnTp18kv53NrExMRgtVrrrPcoCIIgtCxOZ/MFMU6HHFRtR4Ox/lslt8dbQqCm4FBv8E7/k+X6a+153N6al82lOa+7ABEREaxbt44rr7yStLQ0nnzySebOnetLfgNw6aWX0qVLFy6++GJuuOEG/vSnPzFjxgzf+3PmzOHaa6/l1ltvpX///hw+fJiVK1fWe3967bXXMnr0aEaMGEFcXByffvopUVFRvPPOOwwdOpQ+ffrw3Xff8eWXX9a6LOqVV16hTZs2DBkyhLFjxzJq1KgaRygffvhhtmzZQr9+/Xj++ed55ZVXGDVqlN82M2fOZNGiRfTp04cPP/yQTz/9lB49egDwpz/9iQcffJApU6bQt29ffvzxR5566ql6r68kSXz99ddcfPHF3H777aSlpXHjjTdy5MgR4uPj691fCIwkiwqxASkrK2Pu3LncdNNNdO3aNax9OXDgAJ9++ikPPvhgyJP37Nu3j//85z/cfffdfhm5WouTJ0/yxhtvMHnyZL+00YIgCELLNXdWMe5mqmNeUeFh+5bA12h17aEmNs47bbW0NI/NWz+gZ7eraGf5PUum7/XuV9EuwT975uafVrN7z0bSkqfWO/IIMHCwttkyrpojFNwztXVmJ7fZbGRnZ/vV+GvtJk2aRHFxMUuXLg13VxosOTmZqVOn1jmNVpIklixZwrhx45qtX0L9Av2bEiOPAarKbtoSRh7PTJoTal27dsVsNrfa0cfo6GgkSRLrHgVBEFqR+qaFhlKwM0IDecReNV21avrqmRyOMpRKXUCBIwTfv8aQxF2gIAhBEh8bAapKpdwSgkez2YzRaGyS4FGhUDBgwAB27tyJzWYL+fGbmkqlIioqSmRcFQRBaEVqSOzYZJTK4KIzh73+6FGni0CjNlBaVj0fQXFJHlpN24DbUzRfslXUNdSrFARBqItImBOgwsJCIiIiakxd3NwkSWqypDngzVa1du1aduzYwaBBg5qkjaYkMq4KgiC0Liq1RFALERtBowWlioCnyRYVemifWP92bePSyMvfjdVW4ivXcbogm+LiAqIj+gXUllYnoWzGUdhWmlj9nLVw4cJwd6HRasq+ejaxYq51Ex8bASosLGwRo45VEhIS2LlzZ5Mc22w2061bN7Zs2cL555/f6rKWxsbGcvjw4XB3QxAEQQiQySxRWtw8bUlIGE0KSosDK1NRUuzh4OFMVCoHdkc5AKcKDmGze4s+JrY/D7VaR3LHwZw4dYCfty8iscN5uN0ODhzcjEQcEabASmuZzM37763ZLCagCYIQHPGpEaCioqKw13g8k8ViobS0lIqK6usrQmHgwIGcOnUqoLo6LU1MTAyFhYU11jMSBEEQWp7mDmJMpuCCtO3bNnH4l3X8emwbACdPHSQrez1Z2etxuewA6HWRDOj7Z/T6SA5nZfBLzmYcto4kJlwX8HpHo6mZr0NE63o4LAhC+ImRxwAVFhbSrVu3cHfD58ykOampqSE/fnJyMrGxsWRmZraI2pbBiI2NxePxUFxcXGu6aUEQBKHliIhs3qApIlLB8V8Df8DYvu1dRMcq6NpdXWdyH5Mpjv7pN+J2y+zb7aSEwEY3z+xXc2ru9gQYPnw4ffv25dVXX23wMXJyckhJSWHbtm307ds3ZH1rKjNmzGDp0qVs37493F0RQkB8agTAarVitVpb1LTVNm3aoNVqmyRpDnjXVQ4cOJB9+/ZRVlbWJG00laqAUax7FARBaB2aO4hpE6NArQlun8LTHvbucmK31b1ey1rpYfcOJyUBToutojdIREQ270hgRIS4DWxuixcv5rnnngt3N/wsXLiQqKioJjv+tGnTWL16daOOsXjxYi677DLi4uKIiIhg8ODBrFy5stp2//rXv0hOTkan0zFo0CB++ukn33uFhYX87W9/o2vXruj1epKSkrj//vspKSmpsc2CggI6dOiAJEkUFxf7Xv/f//7HhRdeSEREBAkJCUyfPv0PtY5TfGoEoCrTakuattrUSXMA0tPTUSqV/Pzzz03WRlMwm81oNBqRcVUQBKGVaBPdvLcjCkki3hJ8WtOSYg8/Z9o5dMBJYYEba6UHm02mstJDwWk3B/Y52bbVQXlZcIEjQEI7JRLNGzxGx4jbwOYWHR2N2WwOdzcaxOEIvD7qmUwmU6Nngq1bt47LLruMr7/+mq1btzJixAjGjh3Ltm3bfNv85z//4aGHHuKZZ57h559/Jj09nVGjRnHy5EkAjh8/zvHjx3n55ZfZvXs3CxcuZMWKFdxxxx01tnnHHXfQp0+faq+vWrWKe+65h59//pk333yTefPm8eGHHzbq/FoT8akRgKoajy0peARv0pymGnkE0Ol09OnTh61bt+LxBP8PYbhIkiQyrgqCILQiDQnkGivBompQTUWPB07mu9m328nPmQ62brazLdPB/j1OTp90Izfgn0uFEtrGN+81UCohtm3zX/c/uuHDhzN16lTf98nJycyaNYvJkydjNptJSkri//7v//z2+emnn+jXrx86nY4BAwb4BUxQ88jh0qVL/RIe7tixgxEjRmA2m4mIiOC8885jy5YtZGRkcPvtt1NSUoIkSUiSxIwZM3x9e+6555g4cSIRERH85S9/4ZJLLmHKlCl+bZ06dQqNRlPr6OKMGTP8ptdOmjSJcePG8fLLL2OxWIiJieG+++7D6XTWet1effVVHn30UQYOHEiXLl2YNWsWXbp04csvv/Rt88orr3DXXXdx++2306NHD9566y0MBgPvv/8+AL169eKLL75g7NixdO7cmUsuuYQXXniBL7/8EpfLP/3ym2++SXFxMdOmTavWl/nz53PzzTeTmprK+PHj6dKlC0ePHq217+caETwGoLCwEL1ej06nC3dX/FgsFgoLC5u0HuPAgQMpLS3lwIEDTdZGU4iNjRUjj4IgCK2E0aho9qmrWq1EQvuWETx1SFKhUjXvqGNsW2WztynUbO7cub6g8N577+Wee+7x3XeVl5czZswYevTowdatW5kxY0aNAU19br75Zjp06EBmZiZbt25l+vTpqNVqhgwZwquvvkpERAR5eXnk5eX5Hf/ll18mPT2dbdu28dRTT3HnnXfyySefYLfbfdv8+9//pn379lxyySUB92fNmjVkZWWxZs0aPvjgAxYuXBhUqRKPx0NZWZlvYMfhcLB161ZGjhzp20ahUDBy5Eg2btxY63FKSkqIiIhAdUbdmr179/Lss8/y4YcfolDU/bm0YMECsrOzuf766wPue2sngscAtLRMq1WqkuY05dTVhIQE34dNayJGHgVBEFqX+ITmD+Q6pqjQ6cMbQJnMEh0Sm//cw3G9hZpdeeWV3HvvvaSmpvLYY48RGxvLmjVrAPjkk0/weDy899579OzZkzFjxvDII48E3UZubi4jR46kW7dudOnShQkTJpCeno5GoyEyMhJJkkhISCAhIQGTyeTb75JLLuHhhx+mc+fOdO7cmWuuuQbwrvursnDhQiZNmhRUabc2bdowf/58unXrxpgxY7jqqquCWhf58ssvU15e7gvaTp8+jdvtJj4+3m+7+Pj4Wu+TT58+zXPPPcdf/vIX32t2u52bbrqJf/zjHyQlJdXZhw8++IAHHniAr776irS0tID73tqJ4DEAhYWFLTJ4jI2NRaVSNWnwCN7Rx19++aVVjeTFxsZSUVHRpKOygiAIQui0T2r+YEapkOjSVU0zLzX0kRSQ2lUdlnrK7RNFwv2W4sx1dVVBXNU6vX379tGnTx+/2W+DBw8Ouo2HHnqIO++8k5EjRzJnzhyysrIC2m/AgAF+3+t0Om699VbfVNCff/6Z3bt3M2nSpKD607NnT5TK3//mLRaL75zr88knnzBz5kw+++wz2rZtG1S7VUpLS7nqqqvo0aOHb5ouwOOPP0737t255ZZb6tzf7Xbzt7/9jX/84x8MGzasQX1orUTwGIDCwsIWlWm1ikKhID4+vknXPYL3D9xgMLBly5YmbSeURMZVQRCE1iWpY3iCmYhIBUnJ4Wm7U6oKozE8t2LhOmehOrVa7fe9JElB5ZpQKBTVsn2evX5wxowZ7Nmzh6uuuorvv/+eHj16sGTJknqPbTQaq7125513smrVKn799VcWLFjAJZdcQseOHQPuLzT8nBctWsSdd97JZ5995jdFNTY2FqVSyYkTJ/y2P3HiBAkJCX6vlZWVMXr0aMxmM0uWLPHry/fff89///tfVCoVKpWKSy+91Hf8Z555xrddeXk5ZWVldO3aNfCTPkeI4LEeTqfTb051S2OxWJo8eFSpVPTr149t27bVuZi5JakKHlvTaKkgCMIfWdsEZdimkCYmqWjfzFNHO3ZSkWAJTwAX2UZBZJS4BWwNunfvzs6dO/1mUm3atMlvm7i4OMrKyqioqPC9VlNNxbS0NB588EG+/fZbrrnmGhYsWACARqPB7Q687mnv3r0ZMGAA77zzDp988gmTJ08O8qwa5tNPP+X222/n008/5aqrrvJ7T6PRcN555/lNffV4PKxevdpvpLa0tJTLL78cjUbDsmXLquUz+eKLL9ixYwfbt29n+/btvPvuuwCsX7+e++67z7edyWQiMzOT8847rylOtUUTnxz1aIllOs5ksVg4depUkwd1AwYMwG63s3v37iZtJ1Q0Gg0RERFi5FEQBKGVUCgkuvVQ179hE0nupCaxOUbjJOjURUWHME4b7dErfNdZCM6f//xnJEnirrvuYu/evXz99de8/PLLftsMGjQIg8HA3//+d7Kysvjkk0/8ks9YrVamTJlCRkYGR44cYcOGDWRmZtK9e3fAm1W1vLyc1atXc/r0aSorK+vt15133smcOXOQZZnx48eH9Jxr8sknnzBx4kTmzp3LoEGDyM/PJz8/369G40MPPcQ777zDBx98wL59+7jnnnuoqKjg9ttvB34PHCsqKnjvvfcoLS31HacqeO7cuTO9evXyfaWkpADeIP7MKbL5+fnccsst7Nu3r8nPvaURwWM9qsp0tMRpq+ANHmVZrjZMH2pt2rQhNTW1VSXOERlXBUEQWpfuvTRhbT+po4ruvdRotE0zAqrVS/Tqo8HSLrxTRsN9nYXAmUwmvvzyS3bt2kW/fv144oknePHFF/22iY6O5t///jdff/01vXv35tNPP/Vbx6dUKikoKGDixImkpaVx/fXXc8UVVzBz5kwAhgwZwt13380NN9xAXFwcL730Ur39uummm1CpVNx0003NUo3g//7v/3C5XNx3331YLBbf1wMPPODb5oYbbuDll1/m6aefpm/fvmzfvp0VK1b4kuj8/PPPbN68mV27dpGamup3nGBLbTidTg4cOBBQoH2ukeSzJ0kLfjZu3Mj333/P3//+97AsaK+Py+Vi1qxZXHnlldUWNYfawYMH+eSTT7jrrrto3759k7YVCsuXL+fIkSPce++94e6KIAiCEABZlnn3jTKKCsJbW9jlksnOcnEyP/CpfHWSIKGdkuQUFUpleO8lLB2U3Dq5dRapP5PNZiM7O5uUlJQWV0rtjyAnJ4fOnTuTmZlJ//79w90dIQQC/ZsSI4/1qMq02hIDR/CuR4yLi2vydY8AqampREVFtZrRx9jYWAoLC4NadC4IgiCEjyRJ9D1PG+5uoFJ5s7D26a8hLl6J1MC7JYUS4i1K0vtr6JyqDnvgCNCvBVxfofVyOp3k5+fz5JNPcsEFF4jA8Q9IBI/1aKmZVs/UHElzwJvNa8CAAezevRur1drk7TVWTEwMLpfLbz68IAiC0LL1SlejUoc/yAIwmxWkdVMz8AItyZ1VtIlWoK5nxqdGKxEdoyAlVcXAC7SkpqkxmVrG7ZbeINGtp1jvKDTchg0bsFgsZGZm8tZbb4W7O0IYiDzN9SgqKqJbt27h7kadLBYLu3btwu12+9XMaQr9+vVjzZo1bN++vUF1hppTbGws4M242tIfAAiCIAheer2CHr3U7NzmCHdXfNRqifYdVLTv4P3ebpepqPDgdoEsgySBSgVGkwKNpmUEvjXp00+DStVy+ye0fMOHD69WFkT4Y2kZj8JaKLfbTXFxcYvNtFrFYrHgdrs5depUk7dlNBrp0aMHmZmZLf7DIzIyEpVKJTKuCoIgtDIDLtDSQleLAKDVSkRHK4lrq6RtvPf/20QrW3TgqFTCeeeLKauCIDSOCB7rUFJSgsfjafGjVvHx8UiSRH5+frO0N3DgQAoLC/nll1+apb2GkiSJmJgYkXFVEAShlYmNU5LaVUyvDKVe6RpMZnHbJwhC44hPkTq09BqPVbRaLdHR0c2y7hEgMTGR+Pj4VpE4JyYmRow8CoIgtEIXDtehEHcpIaFSSwy+SGQkFQSh8cTHch0KCwtRKBRERkaGuyv1aq6kOeAd0Rs4cCAHDhxo8cloRK1HQRCE1imurZL+YpplSAy+SEtEpLjlEwSh8cQnSR0KCwuJiopC0QoefVosFvLz85ttHWLv3r3RaDRs3bq1WdprqJiYGEpLS3E4Wk7iBUEQBCEwQ4fpMEe0/H+DW7LoWAUDLxBBuCAIoXHOZ1v1uDw4yhw4yp24rG5cNhcum/f/PS4Z2eP9khQSkkJCoZJQ6VQotUrKcyppq03AWmhDG6FBoWq5/4BZLBYcDgcFBQW+LKNNSavVkp6ezs8//8ywYcOaPMtrQ52ZcdVisYS5N4IgCEIwtFqJy6/S88WnFeHuSqskSXDlnwwiw6ogCCFzTgWPsizjqnRhLbJjK7RhL3XgrHAFNBone7zbuB3grHQB4DrpoU1UDMd/OgGA2qhGG6FBH61F10aH2qhCaiHp4BISEgDIz89vluARYMCAAfz000/s37+fnj17NkubwYqJiQHg9OnTIngUBEFohTp3UdO7r4Zd28UMkmANHKylXYdz6lZPaISMjAxGjBhBUVERUVFR4e5Ok5sxYwZLly5l+/bt4e7KOaXlDqUFwVHhpPBwMUfX55G7/jindhdQdrwCR7mzUdM4rTYrOv3vC8ydFU7K8yo4taeQoz8cJ3fdcQoOFuMoC/8/aAaDgcjIyGZb9wjQtm1bOnbs2KIT5+h0Okwmk1j3KAiC0IqNuFwv1uwFKSZOyYXDRZKclubNN9+kT58+REREEBERweDBg/nmm2+COsaNN97I6NGj/V5bsWIFkiQxY8YMv9dnzJhBUlJSY7vtJzk5mVdffTWkx2wsSZJYunRpuLvRImVkZCBJEsXFxSE5Xqv9JJY9MuX5FRz/6QRH1x+n6HAJzkpnyI7vcDjwuD3o9fpat3FZXRT/UsLRDXkc25RP2bFyPO7w1T5szqQ5VQYOHEhOTg4nT55s1naDITKuCoIgtG46ncS4CQaUYhAtIBqt93qJ6aotT4cOHZgzZw5bt25ly5YtXHLJJVx99dXs2bMn4GOMGDGCDRs24HK5fK+tWbOGxMREMjIy/LZds2YNI0aMCFX3m5Tb7cbj8YS7G82utrwcTmfo4ppQanXBo9vhpuhwCblrj3Fi+2mshbYmacdqtQLUGTyeyVZs5+SuAnLXHqPgYDEuu7tJ+lWXquCxuZLmAHTv3h2TycSWLVuarc1giYyrgiAIrV9COxWXXWEIdzdahTHjDMTEtsxcBH90Y8eO5corr6RLly6kpaXxwgsvYDKZ2LRpU8DHGDFiBOXl5X73XhkZGUyfPp3Nmzdjs3nvjW02G5s3b64WPG7dupUBAwZgMBgYMmQIBw4c8L2XlZXF1VdfTXx8PCaTiYEDB/Ldd9/53h8+fDhHjhzhwQcfRJKkOpdvvfLKK/Tu3Ruj0UhiYiL33nsv5eXlvvcXLlxIVFQUy5Yto0ePHmi1WnJzc7Hb7UybNo327dtjNBoZNGhQtaD4TMnJyQCMHz8eSZJ831f56KOPSE5OJjIykhtvvJGysjLfex6Ph9mzZ5OSkoJeryc9PZ3PP/+81rYA7HY7jz32GImJiWi1WlJTU3nvvff8zulMS5cu9btOM2bMoG/fvrz77rukpKSg03lnCEiSxJtvvsmf/vQnjEYjL7zwAgD/+9//6N+/Pzqdjk6dOjFz5ky/BweSJPHuu+8yfvx4DAYDXbp0YdmyZQDk5OT4fv5t2rRBkiQmTZpU5/nVp9UEj26Hm4L9ReSuO07h4aYPzqqCx6ofaKDcDjfFv5SQu+4Yp/cWNmsQabFYsFqtlJaWNlubSqWS/v37s2PHjpBlNJVlOaQBcExMDAUFBc0aVAuCIAih16efhr4DNOHuRos25GIdqV3V4e6GEAC3282iRYuoqKhg8ODBvtcnTZrE8OHDa90vLS2Ndu3asWbNGgDKysr4+eefmTBhAsnJyWzcuBGAH3/8EbvdXi14fOKJJ5g7dy5btmxBpVIxefJk33vl5eVceeWVrF69mm3btjF69GjGjh1Lbm4uAIsXL6ZDhw48++yz5OXl1TnjTaFQ8Nprr7Fnzx4++OADvv/+ex599FG/bSorK3nxxRd599132bNnD23btmXKlCls3LiRRYsWsXPnTiZMmMDo0aM5dOhQje1ULZ9asGABeXl5fsupsrKyWLp0KV999RVfffUVa9euZc6cOb73Z8+ezYcffshbb73Fnj17ePDBB7nllltYu3Ztrec1ceJEPv30U1577TX27dvH22+/jclkqnX7mhw+fJgvvviCxYsX+63JnDFjBuPHj2fXrl1MnjyZ9evXM3HiRB544AH27t3L22+/zcKFC32BZZWZM2dy/fXXs3PnTq688kpuvvlmCgsLSUxM5IsvvgDgwIED5OXlMW/evKD6erYWPwFElmXKjlVQeLAYt6P5AjGr1YZWq21wmQ7ZLVOSW0bZ8QqiUyOJSDIjKZp2+khV0py8vLxmrU153nnnsX79enbu3MmAAQPq3FaWZfC4kN1OZLcbZDeyxw0eN8gekGWqQjwJvKniJAUolEgKJUje/5eUalAGlrAoNjYWh8NBWVkZERERjT5fQRAEIXwuHaWnuNBDzi+u+jf+g+naQ83QYaIsR0u3a9cuBg8ejM1mw2QysWTJEnr06OF732Kx1Dt9c8SIEWRkZPD444+zfv160tLSiIuL4+KLL/YlxsnIyCAlJYWOHTv67fvCCy8wbNgwAKZPn85VV12FzWZDp9ORnp5Oenq6b9vnnnuOJUuWsGzZMqZMmUJ0dDRKpRKz2ey776zN1KlTff+dnJzM888/z913380bb7zhe93pdPLGG2/42szNzWXBggXk5ubSrl07AKZNm8aKFStYsGABs2bNqtZOXFwcAFFRUdX65PF4WLhwIWazGYBbb72V1atX88ILL2C325k1axbfffedL3jv1KkTP/zwA2+//bbvGp3p4MGDfPbZZ6xatYqRI0f69gmWw+Hgww8/9PW9yp///Gduv/123/eTJ09m+vTp3Hbbbb62nnvuOR599FGeeeYZ33aTJk3ipptuAmDWrFm89tpr/PTTT4wePZro6GjAm6skFImSWnTw6Khwcmp3AbYie7O3bbNaA56yWhePy8Pp/UWUHa8grncMWnPTPTE1m80YjUby8vLo1q1bk7VztsjISLp27UpmZibnnXeeX0AnezzIbgeyy47scoLbxe/hYd1kAFkG2Rtcnr2XhOQNIJVqJJXG+6WoPk3nzIyrIngUBEFo3ZRKiXHXG/ns43KOH23+JSItVadUFWPGG1pMFnihdl27dmX79u2UlJTw+eefc9ttt7F27VpfADl79ux6jzF8+HCmTp2K0+kkIyPDN1I5bNgw3n77beD37Kpn69Onj++/qzLRnzx5kqSkJMrLy5kxYwbLly8nLy8Pl8uF1Wr1jTwG47vvvmP27Nns37+f0tJSXC4XNpuNyspKDAbvFHSNRuPXn127duF2u0lLS/M7lt1u993PBSM5OdkXOFadb1WejsOHD1NZWclll13mt4/D4aBfv341Hm/79u0olcoaA8tgdOzYsVrgCFQbhNmxYwcbNmzwG2l0u93VruOZ19BoNBIREdFk+UhaZPAoyzKlR8ooPFSCxx2ehbNWmxWj0Riy49lLHRzbmE+bzpFEpUQ0ySikJElhSZoD3sQ5H330EUePHiUxsQOy04bssCK7HAGGisGTkcHtRHY7wVEJgKTSoFDrkdQ6pN9Gjdu0aYNSqaSgoKBBT4cEQRCElkWjkbjuJhP//aScvF9FAJncScXVE4wolSJwbA00Gg2pqamAd/ZWZmYm8+bN8wV9gRgxYgQVFRVkZmayZs0aHnnkEcAbPE6ePJnCwkI2b97MX//612r7qtW/T2uuethQNdI5bdo0Vq1axcsvv0xqaip6vZ7rrrsu6KVJOTk5jBkzhnvuuYcXXniB6OhofvjhB+644w4cDocv6NHr9X4PPMrLy1EqlWzdurVaDfFgp4aefa5V51t1rlXrL5cvX0779u39ttNqax7Br29gSaFQVFsmVVPim9pijLNfLy8vZ+bMmVxzzTXVtj1zaV1d5xlqLS54dNvdnNh5GmtB0yTCCZTVag15vUTZI1N4qJjKU1bi+8ai0oX+8lsslrDUs+nUqRPtLfHkHtpDu0hN2NYXyi4HbpcDyVqKpNGh0BhRqNS0adNGZFwVBEE4h+h0EjfcYmLJfyo4kv3HncLapZuasdeIzKqtmcfjwW4PbpZd586dSUxMZNmyZWzfvt03Eta+fXvat2/P3LlzcTgcQWda3bBhA5MmTWL8+PGAN3jJycnx20aj0eB21/3QZuvWrXg8HubOnetbAvbZZ5/V236/fv1wu92cPHmSiy66KOB+q9Xqevt0tjOT9AQ6kti7d288Hg9r1671TVs9U1xcHGVlZVRUVPgCwcbcl/fv358DBw74HjY0hEbjnfUY7PWpTYtKmGMvsfPrpvywB44ulwuX0xWSaas1sRXb+fXH/CbJFGuxWCgrK/PLZtWUZFnG47Thrihk5JDzsFeU4gjyA7BJ+oWMx2HFVX4aV9lpUhLbiYyrgiAI5xiNRuLam4z07PPHTKLT/3wtV18nAsfW5PHHH2fdunXk5OSwa9cuHn/8cTIyMrj55pv9tpk4cWK9xxoxYgRvvPEGqampxMfH+14fNmwYr7/+ui+xTjC6dOniS+KyY8cO/vznP1cbwUpOTmbdunUcO3as1gfzqampOJ1OXn/9dX755Rc++ugj3nrrrXrbT0tL4+abb2bixIksXryY7OxsfvrpJ2bPns3y5ctr3S85OZnVq1eTn59PUVFRQOdqNpuZNm0aDz74IB988AFZWVn8/PPPvP7663zwwQe1tnPbbbcxefJkli5dSnZ2NhkZGb7AeNCgQRgMBv7+97+TlZXFJ598wsKFCwPqT02efvppPvzwQ2bOnMmePXvYt28fixYt4sknnwz4GB07dkSSJL766itOnTrV6BihxQSPZcfKOfbTCVzW8D89bGim1WC4HW7yMk9ScqQspKN0VQuF8/PzQ3bMmsiyNzhzl5/GXVGE7HKQkJCA1AxtB0t2O0lNjCchQovHUSmyrgqCIJxDVCqJK6/WM/IKPQ3McdfqqNQSV40zMHK0HkUTJ+MTQuvkyZNMnDiRrl27cumll5KZmcnKlSv91t3l5eUFtMZwxIgRlJWVVcvMOmzYMMrKyhpU3/GVV16hTZs2DBkyhLFjxzJq1Cj69+/vt82zzz5LTk4OnTt3rnHdHkB6ejqvvPIKL774Ir169eLjjz8OaC0neLOmTpw4kYcffpiuXbsybtw4MjMzSUpKqnWfuXPnsmrVKhITE2tdr1iT5557jqeeeorZs2fTvXt3Ro8ezfLly0lJSal1nzfffJPrrruOe++9l27dunHXXXdRUVEBQHR0NP/+97/5+uuv6d27N59++ikzZswIuD9nGzVqFF999RXffvstAwcO5IILLuCf//xntSRIdWnfvj0zZ85k+vTpxMfHM2XKlAb3B0CSw3wnLcsyRVklFB0uCWc3/Jw8eZJ9e/cx9MKhqFRNP7M3smMEMd2iQrLIXZZlXnzxRYYOHRrUcH9QbbiduK2lyK7q898P7N9PcXEx5w8a1KIW7efn53PgwAEuuvBClBodSn0kkkqkMhcEQTiX/Jrr4n+fV1BRfu4+JIyIUjD+eiPxCX/sOo42m43s7Gy/OnmCIDRcoH9TYX1GJ8syBfuLW1TgCN4yHWq1ulkCR4CSI6Wc2l2I7Gn8P3aSJJGQkNAkSXNkWcZjK8NddrrGwBGgXbt22Gw2igoLQ95+Yxj0epBlrFarN/gtP+0NgMUopCAIwjmjQ5KK2/5ipku3c/PhYM8+Gm67y/SHDxwFQQifsAWPsixTeKCYkiPNV9A+UDabFZ2+eZ9ilR0r59SewpAEM02RcdUbcBXgtpXXmT3VHBGB2Wzm2PHjIW2/saqyelVNSZYBj70Cd3ntgbAgCILQ+phMCsZNMDBmvAGdvuXMgGkMk1nBtTcauWqcAb3+DzI3VxCEFilsn0DFWaUU57S8wBG8AUZTJcupS9mxcgr2Fzc6gLRYLBQVFWGzNT4hjyzL3iCrrMBbEiMA7dq3p7CwENtvgVpLoFKrUanVVFZW+r0uu124ywvw2MrFKKQgCMI5QpIkevTWMPkec6tOpiNJ0HeAhtvvNtE57dwcTRUEoXUJS/BYdryCwsPF4Wg6IOEKHsE7hbU0t3FZkEKVNEeWZTzWEu/0ziCqNbaNi0OlVHI8DPUm62IwGKisIaCVAbetDE9lMbIcnrqigiAIQuiZTAquGmfgz5NMWDq0rqmeSckqbr3TxOVXitFGQRBajmb/NLKXOji9p2WthzuTx+PBYXeEdfF1wf6iRpXxiI2NRa1WN2rqquxxe0fkHMGPHiqUShIsFvLz8pqsQGlDGPT6aiOPZ/I4bbjLC5Dd4c/4KwiCIIROhyQVt9xu4oZbjaR0bnElrn0kCbp0VXPLZBM3TjSRYGm5fRWEM1VWVnLttdcSERGBJEkUFxfXu09OTg6SJIWlPrnQcM36qeR2uDmx7RQed8sJKM5WtSYuXCOP4B3xO7njNO0HJ6DSBf8jUigUxMfHNzh4lN1O3BWFyI0I/NpZLPx69CinT52i7Rm1h8LJYDB46xHJsvdf6BpUTWNVGqNFNlZBEIRziCRJdExR0zFFTWGBm+1bHeza7sBuC/+SBYNRok8/Den9tURGiVFGofX54IMPWL9+PT/++COxsbFERkaGu0tCE2m24FGWZU7uLMDZAuo41qUlBI8ALrubE9tP0+78eKQG1HCyWCzk5OQEvZ/s+i1wbOT0Tb3BQJs2bTh2/HiLCR71Bj0ulwun04laU/saGFn24K6oCiBb71oZQRAEoWbRMUouuVzPRSN0HNjrZN9uBzm/uGjOpe8KBXRKVdOjt5rUrmpUqnMjuY/wx5SVlUX37t3p1atXuLsiNLFme7xVerScytMtJ4FKbWw2G0qlEk0dwUWz9aXYTnF2w5IKWSwWTp8+jdMZWJIbqBpxLAjZur927dtRWlJCRXnj1nCGikHvzbhaaa196moVWZa9QbTIxCoIgnDOUqsleqVrmHCziXsfimDMeAPp/TVExypqm6DSYJIEsW2V9B2g4errDNz3cATX3GikW0+NCBz/YD7//HN69+6NXq8nJiaGkSNHUlFRwfDhw5k6darftuPGjWPSpEm+7+12O4899hiJiYlotVpSU1N57733fO/v2bOHMWPGEPFb9vuLLrqIrKws3/vvvvsu3bt3R6fT0a1bN9544w3few6HgylTpmCxWNDpdHTs2JHZs2cD3vuiGTNmkJSUhFarpV27dtx///0ADB8+nLlz57Ju3TokSWL48OGAd7R/6dKlfucTFRXFwoULG38RhbBplpFHp9VF4cHi5miq0azW5i/TUZeirBKMbfVozMEFswkJCciyzIkTJ+jQoUO923una4amVEiVmJhYtFotx44fJy0tLWTHbSidXg+SRGWllcjIqHq39waQRShN0UhKMYVVEAThXGY0KujRW0OP3t5/b+12mZP5bvLz3BScclNW6qG0VKa0xIPTUfu/lRqtRESkgogICXOEgti2ShIsSuLilWg0Ikj8o8vLy+Omm27ipZdeYvz48ZSVlbF+/fqA778mTpzIxo0bee2110hPTyc7O9u7JAc4duwYF198McOHD+f7778nIiKCDRs24HJ5Z/19/PHHPP3008yfP59+/fqxbds27rrrLoxGI7fddhuvvfYay5Yt47PPPiMpKYmjR49y9OhRAL744gv++c9/smjRInr27El+fj47duwAYPHixUyfPp3du3ezePHiFjEAIzSdJg8eZVnm1O4CPK6Wu87xTFarFb0uvFNWzyR7ZE7uLqT9oOCmr7Zt2xaFQkFeXl69waN3mmZRyDONSpKEpV07cnNz6dypE0pVeBf+KxQK9Dod1jqS5pyt6tooTTFIitaVqU8QBEFoOK1WIrGjisSO/v92ybKM3Q5Oh4zLJeN2g1IJKpWERiuh1YoAUahdXl4eLpeLa665ho4dOwLQu3fvgPY9ePAgn332GatWrWLkyJEAdOrUyff+v/71LyIjI1m0aBFqtfeh95kP75955hnmzp3LNddcA0BKSgp79+7l7bff5rbbbiM3N5cuXbpw4YUXetcI/9Y/gNzcXBISEhg5ciRqtZqkpCTOP/98AKKjozEYDGg0Gl/Gf+Hc1eTTVsvzKrEWNL7eYHOxWW1hX+94NnuJndKjwU39VKlUtG3btt6kObIse0tUeJpmLaolIQHZ4yH/xIkmOX6w9AZDnRlXayJ73L+V8Qh/UgVBEAQhvCRJQqfzjiq2iVYSG6ekTbQSc4RCBI5CvdLT07n00kvp3bs3EyZM4J133qGoqCigfbdv345SqWTYsGG1vn/RRRf5AsczVVRUkJWVxR133IHJZPJ9Pf/8875prZMmTWL79u107dqV+++/n2+//da3/4QJE7BarXTq1Im77rqLJUuW+EY0hT+WJg0e3U4PhQcC+4NoCWRZxmaztahpq1UKDxXjsruD2sdisdRb61G2l+Nx2hvTtTpptFpi4+I4fvw4zZqJoBa11Xqsj8flwGMra4IeCYIgCILwR6FUKlm1ahXffPMNPXr04PXXX6dr165kZ2ejUCiqPag+M3dFfYMbdb1f/lv+iXfeeYft27f7vnbv3s2mTZsA6N+/P9nZ2Tz33HNYrVauv/56rrvuOgASExM5cOAAb7zxBnq9nnvvvZeLL764ztwakiTVeT5C69SkwWNJTmnQAU842Ww2ZFlucSOPAB6Xh+KskqD2SUhI4MSJE7jdNf8MPE47HlvTJ7Np364dlRUVlJQE1/+mYNDrsVmtDSpD4rFXNKjupSAIgiAIQhVJkhg6dCgzZ85k27ZtaDQalixZQlxcnN+MMbfbze7du33f9+7dG4/Hw9q1a2s8bp8+fVi/fn2NAVp8fDzt2rXjl19+ITU11e8rJSXFt11ERAQ33HAD77zzDv/5z3/44osvKCz01mfX6/WMHTuW1157jYyMDDZu3MiuXbtqPc+zz+fQoUNBz/4SWp4mW4TmcXkozW1dIzU2m3d6bUsMHgFKj5UT1TkSlTawtXcWiwW3282pU6eqzUGXZQ8eawnNMRYYGRmJwWjk2PHjREZFNUOLtTMYDL4RZr3BEPT+Hmspkkoj1j8KgiAIghC0zZs3s3r1ai6//HLatm3L5s2bOXXqFN27d8doNPLQQw+xfPlyOnfuzCuvvEJxcbFv3+TkZG677TYmT57sS5hz5MgRTp48yfXXX8+UKVN4/fXXufHGG3n88ceJjIxk06ZNnH/++XTt2pWZM2dy//33ExkZyejRo7Hb7WzZsoWioiIeeughXnnlFSwWC/369UOhUPDf//6XhIQEX4ZUt9vNoEGDMBgM/Pvf/0av1/utizzbJZdcwvz58xk8eDBut5vHHnusxim1QuvSZMFj6dFy3M6WnSRHdst43DIgAxKV5ZVIkoRWqw1312oku2VKj5QRnRYV0PYJCQlIkkReXl614NFjLUX2NNOosCTRrl07sg4fxmG3ownj9a16MFBZWdmg4NEbdJeiMEQhhTqPuyAIgiAI57SIiAjWrVvHq6++SmlpKR07dmTu3LlcccUVOJ1OduzYwcSJE1GpVDz44IOMGDHCb/8333yTv//979x7770UFBSQlJTE3//+dwBiYmL4/vvveeSRRxg2bBhKpZK+ffsydOhQAO68804MBgP/+Mc/eOSRRzAajfTu3dtXHsRsNvPSSy9x6NAhlEolAwcO5Ouvv0ahUBAVFcWcOXN46KGHcLvd9O7dmy+//JKYmJhaz3Xu3LncfvvtXHTRRbRr14558+axdevWprmwQrOR5CbIAiLLMkfXH8dZ2bIW0rpsbpyVTtx2N267G4/L/9QdTgdWm43Y+GiUOiVqvQqVPrwZQs+m1ChJGtYehTKwwGX+/Pl06tSJK6+80veax2nDXdG8a1HdLhc/btxIUlJSnU+pmpws88OGDXTs2JHExMQGH0ZpiEKhaZkj1IIgCIJwrrPZbGRnZ5OSkoJO1/JyVQhCaxPo31STREbW07YWEzjKHhlHuRN7iQN3PesvNWoNGrUGZ6ULZ6ULG3aUGgXaCA2aCE1QpTKaitvhpuJEJeZ2xoC2PztpjizLeKylTdW9WilVKuLj48k7fpykpKTwjdpJEga9vtFz7j3WUiS1Fklq8oTFgiAIgiAIgtAiNMmdb0tZ62gvdVBypIzKk9Z6A8fauB0eKk/bKMkpw17iCHEPGyaY65uQkEB+fj6e3xLEeOzlzTdd9Szt27XDbrdTUHA6LO1XMRgMQdV6rIkse5ol2ZAgCIIgCIIgtBQhDx5dNheVp8Nb19Hj8lCeV0HlSSuyOzSzcmWPTOUpK2XHKvCEeS2nrdiOozywVMcWiwWHw0FhYSGyx41sr2ji3tXOaDIRERnJ8WPHw9YHaFitx5rI9oomq48pCIIgCIIgCC1NyIPH8rzKsBZTd1ldlB4tx1nRNDf1vx8/vHVqyo8HFgRaLBYA8vLy8NjD+7MB7+hjUVFRo0f+GsNg0ON0OnE1staQjLd8hyAIgiAIrZMsyxQXFnL816MUFxaG/T5JEFq6Jggew3cz7axwUZZXEbLRxtrIHpny/EocZeELIMvzKgL6gNPr9URFRZGfl4fsCH+gExsXh1qt5vgZdX98ZBk8bv+vJvgQN+i9WVZDMvroaFjNSEEQBEEQwqestIRFC9/n2pHDGDWoH+NHXMioQf24duQwFi18n7LS8NembsmSk5N59dVXm+z4kiSxdOnSRh1j0qRJjBs3LiT9EX4X0oQ5brsbe2l41gW6rC7KT1RAc93Hy1BxshJJYUBtbP6aNU6rN6mPJoC2LRYLtvLiFvE0TaFQYLFYOH78OMkdO6LAAx43suwBuZYfnqTwJqZRKJGUKmhkkpqqch1Wq5WIyMhGHUuWZWRHJZLO1KjjCIIgCILQPDatX8v0KXdjs1qrvXf8aC6vznqWt/75D+bMf4sLLhrWbP2aNGkSxcXFjQ6aqgwfPpy+ffs2SZCXmZmJ0RhY8sZwmTdvXou49z3XhHTk0VpkD+XhAuZxe0cCmy1wrCJDxQlr2NZA2goDu94JCQkoZWeTjOI1RDtLAglx0d41gy67d91gbYEjgOxB9riQXXY89grvaJ/b1eDzUSiVaHW6kIw8Angc4Z8OLAiCIAhC/TatX8uDd07CZrV5HwCf9e931Ws2q40H75zEpvVrw9TT2jkbuewmFOLi4jA0oF52c4qMjCQqKirc3TjnhDR4tBWFJ1FOKBPjBEv2yFScrP7kqjkEer07WOLRKBXY7eEJ7n08HjyOStSSm5SOiSiUDfv1kz0uZKcVj8PqndraAAaDgcoanjg2rD9uZFeYr60gCIIgCHUqKy1h+pS7kWVv1vS6yLIHWYbpU+4O6RTWzz//nN69e6PX64mJiWHkyJFUVFQwY8YMPvjgA/73v/8hSRKSJJGRkUFOTg6SJPGf//yHYcOGodPp+PjjjykoKOCmm26iffv2GAwGevfuzaeffuprZ9KkSaxdu5Z58+b5jpeTkwPA7t27ueKKKzCZTMTHx3Prrbdy+vTvmfDLysq4+eabMRqNWCwW/vnPfzJ8+HCmTp3q2+bsaavFxcXceeedxMXFERERwSWXXMKOHTt87+/YsYMRI0ZgNpuJiIjgvPPOY8uWLXVeq7y8PK644gr0ej2dOnXi888/93v/6NGjXH/99URFRREdHc3VV1/tO8eqa3DmtNXhw4dz//338+ijjxIdHU1CQgIzZszwO+b+/fu58MIL0el09OjRg++++y4kU2jPJSENHsNRysJR5gx78hqX1RWWcw+0zfgY79TMsvIwlVCRZWSXA4+jssHBXs3HdeNxWJFdjqBHIUNR69GvK47wPEAQBEEQBCEwyxd/gc1qrTdwrCLLHmxWK18vWRyS9vPy8rjpppuYPHky+/btIyMjg2uuuQZZlpk2bRrXX389o0ePJi8vj7y8PIYMGeLbd/r06TzwwAPs27ePUaNGYbPZOO+881i+fDm7d+/mL3/5C7feeis//fQT4J2yOXjwYO666y7f8RITEykuLuaSSy6hX79+bNmyhRUrVnDixAmuv/56X1sPPfQQGzZsYNmyZaxatYr169fz888/13luEyZM4OTJk3zzzTds3bqV/v37c+mll1JYWAjAzTffTIcOHcjMzGTr1q1Mnz4dtbrupVdPPfUU1157LTt27ODmm2/mxhtvZN++fYB39HXUqFGYzWbWr1/Phg0bMJlMjB49Goej9vvjDz74AKPRyObNm3nppZd49tlnWbVqFQBut5tx48ZhMBjYvHkz//d//8cTTzxRZx//iEK25lH2yM2eQEaWofJ0y7hptxbY0JjVSAqp2dp0VrrwuDwoVLU/A5BlGa1SQqPRUF5WTmxsXLP177cOIDttTVjSQvZNfVWo9SAFdv0NBgPH8/KQZRkpwH3q7IXTjix7vGszBUEQBEFoUWRZ5rOPFjZo3/98uIDrJ05q9P1CXl4eLpeLa665ho4dOwLQu3dv3/t6vR673U5CQkK1fadOnco111zj99q0adN8//23v/2NlStX8tlnn3H++ecTGRmJRqPBYDD4HW/+/Pn069ePWbNm+V57//33SUxM5ODBg1gsFj744AM++eQTLr30UgAWLFhAu3btaj2vH374gZ9++omTJ0+i1WoBePnll1m6dCmff/45f/nLX8jNzeWRRx6hW7duAHTp0qXe6zVhwgTuvPNOAJ577jlWrVrF66+/zhtvvMF//vMfPB4P7777ru/nsmDBAqKiosjIyODyyy+v8Zh9+vThmWee8fVh/vz5rF69mssuu4xVq1aRlZVFRkaG75q98MILXHbZZfX29Y8kZMGjs8KJx928a/+c5Y4GT1dVahSoTWqUagUoJPDIuJ0enBUu3PbgR8dkj4yj1IE2Stug/jSELMvYSx3oo3W1b+O0ATIms5ny8mYuai/L3qmlcghHG2vj8Y5CKjSBBZB6gx7Z48Fus6H7LYFOY8h4g2RJ07Ln/wuCIAjCH1FJURHHco8EvZ8syxzLPUJpcTGRbdo0qg/p6elceuml9O7dm1GjRnH55Zdz3XXX0SaA4w4YMMDve7fbzaxZs/jss884duwYDocDu91e7zrEHTt2sGbNGkym6on+srKysFqtOJ1Ozj//fN/rkZGRdO3atc5jlpeXExMT4/e61WolKysL8I5m3nnnnXz00UeMHDmSCRMm0Llz5zr7Onjw4Grfb9++3dfm4cOHMZvNftvYbDZfmzXp06eP3/cWi4WTJ08CcODAARITE/2C7TOvg+AVsuDR0UR1FevSkKmiCpWEPk5PuauMDTvXc/zUcex2Gyajma7JXenXvT+SU0HlqeAT4dibOXgEb9BeZ/Do8l4jk8lEfn5+c3XLGzg6mylw9LUZeADpK9dhrQxJ8Aje0UdE8CgIgiAILU5lZePKlVVUlDc6eFQqlaxatYoff/yRb7/9ltdff50nnniCzZs3k5KSUue+Z2c2/cc//sG8efN49dVX6d27N0ajkalTp9Y5ZROgvLycsWPH8uKLL1Z7z2KxcPjw4aDPq7y8HIvFQkZGRrX3qhLWzJgxgz//+c8sX76cb775hmeeeYZFixYxfvz4oNuravO8887j448/rvZeXFzts+zOniorSRIeUXItKCGbY+eyNW/w6Ha4cdmCC0wUagWm9ibsWPls5SLyT+eRnpbOxQOGYYlNYNPOjazc8A1qgwpzeyNKTXCXx+3w4LI273Wo7xpUBY9mkwmH3Y6zng+VUJFdjtCubwy44cCS12i1WhRKJZWVoZv2LLscIuuqIAiCILRABkPjykoYjaEpySVJEkOHDmXmzJls27YNjUbDkiVLANBoNLjdgd07bdiwgauvvppbbrmF9PR0OnXqxMGDB/22qel4/fv3Z8+ePSQnJ5Oamur3ZTQa6dSpE2q1mszMTN8+JSUl1Y599jHz8/NRqVTVjhkbG+vbLi0tjQcffJBvv/2Wa665hgULFtR5jps2bar2fffu3X1tHjp0iLZt21ZrM7KBZdi6du3K0aNHOXHihO+1M6+D4BW64NHavIGCqzK4IE1SSpjaGVGqFezP3o/dYedPI65mQK+B9OrSm8uGjKJ7Snd++fUXbHYbCpUCUzsjCnVwl8jZ3MFjHe3JHrdvrWHV9IRmSZrjcSO7w1PvE0B2O72lPOoiSej1eqyhTJoje8ITMAuCIAiCUKfINm1on9Qx6HWLkiTRPqkjESEo+bB582ZmzZrFli1byM3NZfHixZw6dcoXECUnJ7Nz504OHDjA6dOn6yzJ0aVLF98o5r59+/jrX//qF/RUHW/z5s3k5ORw+vRpPB4P9913H4WFhdx0001kZmaSlZXFypUruf3223G73ZjNZm677TYeeeQR1qxZw549e7jjjjtQKBS1XruRI0cyePBgxo0bx7fffktOTg4//vgjTzzxBFu2bMFqtTJlyhQyMjI4cuQIGzZsIDMz03fetfnvf//L+++/z8GDB3nmmWf46aefmDJlCuBNwBMbG8vVV1/N+vXryc7OJiMjg/vvv59ff/01mB+Lz2WXXUbnzp257bbb2LlzJxs2bODJJ58ECEl+jHNFqx15dAW5LtEQq/OubwQcTm9gY9D5P4UyGkxIkoRC4d1OoVJgiAtuSmND1ks2Rl0jj7L79w8dnU6HSqWivLxx0zbqJct4nOEp2eLXDZe93gysBoMhpBlXwf+aC4IgCILQMkiSxPW3TmrQvjdMvD0kwUNERATr1q3jyiuvJC0tjSeffJK5c+dyxRVXAHDXXXfRtWtXBgwYQFxcHBs2bKj1WE8++ST9+/dn1KhRDB8+nISEBL+yFOBNqKNUKunRowdxcXHk5ubSrl07NmzYgNvt5vLLL6d3795MnTqVqKgo3/3vK6+8wuDBgxkzZgwjR45k6NChdO/eHZ2u5mVSkiTx9ddfc/HFF3P77beTlpbGjTfeyJEjR4iPj0epVFJQUMDEiRNJS0vj+uuv54orrmDmzJl1Xq+ZM2eyaNEi+vTpw4cffsinn35Kjx49AO893Lp160hKSuKaa66he/fu3HHHHdhsNiIiIgL9kfhRKpUsXbqU8vJyBg4cyJ133unLtlrbuf8RSXKI5tkd25iPraT5at2V5pbhdgQ2R1mhVhCRZPL94eccz+F/3y+hU4dOXNBnMDqtjrxTeaze/B09Ovdk2IDhvn1lWabs14qAg0KFSiIyuWG/tA2hNqhJurjmDFgeWxlu2+9JcnZs345araZHz55N1h/Z5WgxNQ8lpQZJXfsa1JzsbPLy86styG4MhdaIUt98P39BEARB+COy2WxkZ2eTkpIS8I19WWkJYy+6AJvVFlC5DoVCgVan48v1mzBHNGwq5LmgoqKC9u3bM3fuXO64445wd6dZbdiwgQsvvJDDhw/Xm+CntQv0bypkCXOaM9OqLIM7iGQ2aoPK74lRcrtkBqcPYcvuTH759Rff6+f3GsTgvkP89pUkCbVRFXDw6HHJeNwyCmXzDG/LdVz3s6dumsxmvyKwoe+M3IiRNwlJqQKFEqi6djJ4PL8dM/hnHLLbiaTS1Jo8x2Aw4LDbcbtcKFUh+lMQI4+CIAiC0CKZIyKZM/8tHrxzEqCoM4CsKr01Z/7bf7jAcdu2bezfv5/zzz+fkpISnn32WQCuvvrqMPes6S1ZsgSTyUSXLl04fPgwDzzwAEOHDj3nA8dghK7OYwNLZjSIRw4qlqgp8U2EKYJ28e1JTUxFp9WTfewXMvf8hEFvIL1rX//9g1z3KHtkaKbg0VPHdT87eDSbTPx69CgupxNVPYVZG0L2uCHAwrtnkpQa/rfye3bu3lPLgeGBv07CbAg2k62M7HEhKWs+V/1vWVatViums1I9N1S9ay0FQRAEQQibCy4axj/fXcj0KXdjs3qT5p05Ca9qsEGn1zFn/ttccNHFYelnuL388sscOHAAjUbDeeedx/r16/2S35yrysrKeOyxx8jNzSU2NpaRI0cyd+7ccHerRQld8NiMWSaDbuuskaeDOQf4ftNqbv3TbZiN3qAhNSkVZJkNP/9AWseu6HX6WvcPoIPBbd8ItV0LWZarlcmoSppTXlFOVFTj0k3XqAGjbpJKi6TScF7fPnRK7uj3nizLfL3yO6IiI4iMjvUmwnHaCebJgex21ho8VtVCqrRWhi54lD3IsiwWVguCIAhCC3XBRcP4cv0mvl6ymP98uMCv/mO7xCRumHg7V11zLSbzH3MZSr9+/di6dWu4uxEWEydOZOLEieHuRosWsuBRUjTfzXKwN+ayxz/Y2HlwB7HRcb7AsUpKh87s/WUvp4pOkWRJOuMAwQWDzRk4KGq77rJcLbDUGwwolErKyytCHzzKsnfkMQhVgSNAYof2JHZo7/d+7tFfcbqc9O7pXRwtKdXedoJZU+lxe39+NfxMlCoVGo0mpOU6fG0qQ/anJQiCIAhCiJkjIrnhttu5fuIkSouLqagox2g0EREVJR4AC0IdQpZttVmDR4X0+7K4AHjOWh9Zaa2sccTO89uUS89ZUy8DTczj179mUmtbNQRykiRhMhopL2uCch2yTFBziSUl1DIiWGX33v1IkkSvnt1+f1Gp/m1dZDB9q/3nZzAYQlquA0CWRbkOQRAEQWgNJEkisk0b2nVIJLJNm3MycFy4cCFRdZQaycnJQZIktm/f3mx9OlNGRgaSJFFcXAzU318hvEIWPCpUITtU/SRQagIPIJwVTr9gMSqiDacKT1JUWuS33cFsb7ASG/X7nG5ZlnFUBD4dU6FSIDXTekcAqZbrXlsAYzKbKSsvr/G9xgg2YJJU6jo/oN1uN3v3HyCxfTuizij2KkkSklITXN/qGBHVN0G5DjzNlzxKEARBEAShJZoxYwaSJHH33Xf7vb59+3YkSSInJweAIUOGkJeXR2RkaBITVQXDQtMIWcSn0gU5GtRISm3g7XlcMo6y3wPA83qchyzLfP7tZ2zeuYmdB3bwv++XkvVrFj0698RkMPm2dVa48AQx8hhMv0Kh1utey1Rbs8lEZWUlbneIR8eCDZgUdU/rzMrOodJqpVePGgrI+mVkDUA9I4+VVmto16k245pXQRAEQRCElkqn0/Hee+9x6NChWrfRaDQkJCSIgK+VCGHw2LxrvFRBBmnW0zZfuY328R2YMOoG2kbHs+vgTtZtXUtJWTFD+g7lkvMv9e3jdnqoPBXcerhg+9VYtV/3mgMYk8kEskxFyEcfg5myqqj3A2L33v3ewrbd0qrvLknBJzGqhUGvx+N2Y7eHsjalCB4FQRAEQaguOTmZV1991e+1vn37MmPGDMA7423GjBkkJSWh1Wpp164d999/v29bu93OtGnTaN++PUajkUGDBpGRkeF3vIULF5KUlITBYGD8+PEUFBQE1Lf9+/czZMgQdDodvXr1Yu3atb4+paam8vLLL/ttXzWCePjw4VqP2bVrV0aMGMETTzxR6zZnT1s926lTpxgwYADjx4/Hbrfj8XiYPXs2KSkp6PV60tPT+fzzz2s9/pEjRxg7dixt2rTBaDTSs2dPvv766zquhFCXkEV8zT3yqNIH13XZI1OeV4HJYkSpVZIQm8DVl4yrdXu300P58YqgS5CoDM0cRNc68ljzywajEUmhoLyigogQTQ8INYfDwYGDh+mU3NGXEbVR6hgJrDq+1WpFG2CR4ca0JwiCIAiCUJsvvviCf/7znyxatIiePXuSn5/Pjh07fO9PmTKFvXv3smjRItq1a8eSJUsYPXo0u3btokuXLmzevJk77riD2bNnM27cOFasWMEzzzwTUNuPPPIIr776Kj169OCVV15h7NixZGdnExMTw+TJk1mwYAHTpk3zbb9gwQIuvvhiUlNT6zzunDlzGDhwIFu2bGHAgAFBXY+jR49y2WWXccEFF/Dee++hVCp54YUX+Pe//81bb71Fly5dWLduHbfccgtxcXEMGzas2jHuu+8+HA4H69atw2g0snfvXl8FAiF4oRt5DDKYayylRhF0oOZxyZQdq8Be6qizxIWjzEHZr+XVEu3U2yetsuUE0bUMzCkUCowGQ9MkzQlY3cHV/oOH/bKsNlodo5RanQ5JoQjtukcx7UIQBEEQhAbIzc0lISGBkSNHkpSUxPnnn89dd93le2/BggX897//5aKLLqJz585MmzaNCy+8kAULFgAwb948Ro8ezaOPPkpaWhr3338/o0aNCqjtKVOmcO2119K9e3fefPNNIiMjee+99wCYNGkSBw4c4KeffgLA6XTyySefMHny5HqP279/f66//noee+yxoK7FgQMHGDp0KKNGjWLBggUolUrsdjuzZs3i/fffZ9SoUXTq1IlJkyZxyy238PbbbwPe0d0z7/Nzc3MZOnQovXv3plOnTowZM4aLL/5j1u8MhZAFj2pj6IvO10cbEVziFPCOQFaetFKSU0blKSu2Yjv2Ege2YjuVp6yUHimn4oQ16BFHAG1k8P1pLE2t1732AKZJkuYEEzDJMnId6xB37dmHRq2ha5fOtewuBzm6V3vfJElCr9d71z2GjAgeBUEQBEEI3oQJE7BarXTq1Im77rqLJUuW4HK5ANi1axdut5u0tDRMJpPva+3atWRlZQGwb98+Bg0a5HfMwYMHB9T2mdupVCoGDBjAvn37AGjXrh1XXXUV77//PgBffvkldrudCRMmBHTs559/nvXr1/Ptt98GtL3VauWiiy7immuuYd68eb7lTocPH6ayspLLLrvM7xp8+OGHvmtwtvvvv5/nn3+eoUOH8swzz7Bz586A+iDULITBo6p5M67iDZwU6oa1Kbtl7CUOrKdtVJ6yYj1tw17iwONqWKZMSSmhMTVvAC1JEpqIWtqUar8uZpOJiooK5FBmBZWCHHF1u2p8uaKikuwjR+jWtQtqdS3n5nER1LpCRd2/Iwa9Xow8CoIgCILQ5BQKRbXZb07n70kdExMTOXDgAG+88QZ6vZ57772Xiy++GKfTSXl5OUqlkq1bt7J9+3bf1759+5g3b16T9/3OO+9k0aJFWK1WFixYwA033BDw8qLOnTtz1113MX369Fpn/51Jq9UycuRIvvrqK44dO+Z7vfy3wY/ly5f7XYO9e/fWuu7xzjvv5JdffuHWW29l165dDBgwgNdffz2gfgvVha7OoyQ1aCSwcY2CITZE69QayRCra9b6jgAakxqFsuYfoVRHwGQymZA9HioqKkLWl7raq4nsdtb44bF3/wE8Hg+9enSrYS/vqKPscgTXtzoCaQh9rUcp2DqUgiAIgiD8IcTFxZGXl+f7vrS0lOzsbL9t9Ho9Y8eO5bXXXiMjI4ONGzeya9cu+vXrh9vt5uTJk6Smpvp9JSQkANC9e3c2b97sd7xNmzYF1Lczt3O5XGzdupXu3X/Pen/llVdiNBp58803WbFiRUBTVs/09NNPc/DgQRYtWlTvtgqFgo8++ojzzjuPESNGcPz4cQB69OiBVqslNze32jVITEys9XiJiYncfffdLF68mIcffph33nknqL4LvwvpQkVtpAZroS2Uh6yX2qhGY1b7leJobmqjCo05DFNW65omW0cAYzSZQJIor6jAZDaHpjOSAu90zQBHBGUPuOzIKq1f5tVde/ZiNBjolNyx+i6yDC57naU3auhYnaOw4K31aLPb8bjdKJQhCPxE8CgIgiAIQg0uueQSFi5cyNixY4mKiuLpp59Geca9x8KFC3G73QwaNAiDwcC///1v9Ho9HTt2JCYmhptvvpmJEycyd+5c+vXrx6lTp1i9ejV9+vThqquu4v7772fo0KG8/PLLXH311axcuZIVK1YE1Ld//etfdOnShe7du/PPf/6ToqIivwBRqVQyadIkHn/8cbp06RLwdNgq8fHxPPTQQ/zjH/8IaHulUsnHH3/MTTfdxCWXXEJGRgYJCQlMmzaNBx98EI/Hw4UXXkhJSQkbNmwgIiKC2267rdpxpk6dyhVXXEFaWhpFRUWsWbPGLygWghPSeaa6NtpQHi5ghlg9ClV4pgpKSglDnD4sbevruN5SHeUwlEolBr0+5ElzJGWQGXDdTm8AecYI5OSJN/Pw/feiOGsk0xs4Orz7BNMnhbLeaaQGvR5kGWsI1j1K3kYbfRxBEARBEM49jz/+OMOGDWPMmDFcddVVjBs3js6df8/xEBUVxTvvvMPQoUPp06cP3333HV9++SUxMTGAN8PpxIkTefjhh+natSvjxo0jMzOTpKQkAC644ALeeecd5s2bR3p6Ot9++y1PPvlkQH2bM2cOc+bMIT09nR9++IFly5YRGxvrt80dd9yBw+Hg9ttvb9D5T5s2LahMpyqVik8//ZSePXtyySWXcPLkSZ577jmeeuopZs+eTffu3Rk9ejTLly8nJSWlxmO43W7uu+8+37ZpaWm88cYbDeq/AJIcyMTjALkdbo6sORbQXOZQc9vdlB2rQPY0Y9sKMFuMzZ5ptkrSsPao62jbVXYKuZa1hfv27cNms9GvX7/QdcjjxuNowPRPhRJJpa11uqfs8SC77L+tdQyOpNbXG9S6nE42bNhAj549iItrG3Qbfu0plKgiGncMQRAEQRDqZrPZyM7OJiUlBV2oSm0J9Vq/fj2XXnopR48eJT4+PtzdEUIo0L+pkA6RKDXKsGQcBW+ZDFM7Q7OtO5QUEqaE8AWOaqO6zsARQFLUnsDHbDJRXl4e2kBfoQw+cQ6Ax43sqMTjqER22pFdDu+X0+59zVHRoMARSRHQ+kOVWo1arcZaGYKRR0V4fh8EQRAEQRCait1u59dff2XGjBlMmDBBBI5/YCGfX2eyGEN9yICpdCrM7Y1NPoVVUkqYLAbUQdaZDCVzANe5rhE3k9mEx+3GFtISFSCpGpFx1uNGdjuQXXbvl9sBHnfD+6JUB5z5VG8whCbjamPOXxAEQRAEoQX69NNP6dixI8XFxbz00kvh7o4QRqEPHhMMta61aw5KrZKIJDMac9PcxKtNaiKTzGEbcaxisgSQGrmOQMZk9M43LysP8bpHhaplJIyRlN7gMUAGgyEktR6DaVMQBEEQBKE1mDRpEm63m61bt9K+fftwd0cIo5AHj0qtEmN8YDVfmoqkkDDGGzBZjA2uA3k2hUrCmGDwBsfK8Nbx08foUBvrD1IkpbrWcvUqtRqdTkd5WXloOydJKFQ6qLXl5iChUGuDqrfoq/XYyGm8IngUBEEQBEEQzlVNkhYyIinwLEpNSW1UEZlkxmQxoDKoGhTPqPTeYDiyYwS5+UdYvnw5ZSHOUhqsyKTAymtIkgLqCGZMZjNl5SEOHgEUCiRVeNa+wm9TZ4Mc/TQYDLhdLr9CvUG3q1CKGo+CIAiCIPxhZGRkIEkSxcXF4e6K0EyaJHjUtdGiMbWQERjJm1zG3M5IRJIZQ5weTYQGpVZZPZiUvCOnGrMaQ5yeiCQT5vYm7xRYCZxOJzk5OaxcuZLs7Gw8nmDqDYaGSqcMqjRIXUGc2WTylutoguy4kkoTllE4SaFCUgYfuOr13mvamHWPkio8pWoEQRAEQRCa2vDhw5k6darfa0OGDCEvL4/IyEjAW6cyKiqqWfrzv//9jwsvvJCIiAgSEhKYPn16WCo+/NE0ycI9SZKISDJzem9hUxy+wZRqBcpIDb5bfPm3+oEy3lryklTn6GRkZCQ6nY6ioiIyMzPJz8+nZ8+eRERENEPvvSISzUFllJVUWrBX1PieyWzC5XJht9vRNkGa66pgKtjajA1uT6FCUuuCmq5aRa/XI0kSlVYrkQ380AvnaKsgCIIgCEJz02g0JCQkhPy4brcbSZKq1f0+06pVq7jnnnsYNGgQu3bt4s9//jPdu3fntttuC3l/hN81WTVzc3sjSk0Ln8IneddHSkrJG5DVE3OoVCpMJhMajTdIOHr0KOvXryc7O6fp+wooVAoiEoObEiypNN7pqzVoqqQ5vzcuees3NmAkMOimlOoGB44AkkKBTqdr8MijJEne9gVBEARBEOrg8Xh46aWXSE1NRavVkpSUxAsvvADArl27uOSSS9Dr9cTExPCXv/yF8jOWGE2aNIlx48Yxa9Ys4uPjiYqK4tlnn8XlcvHII48QHR1Nhw4dWLBggW+fnJwcJEli0aJFDBkyBJ1OR69evVi7dq1fv3bv3s0VV1yByWQiPj6eW2+9ldOnT/vaXbt2LfPmzfPe80gSOTk5ftNWMzIyuP322ykpKfFtM2PGDACKioqYOHEibdq0wWAwcMUVV3Do0CFf21UjlsuWLaNHjx5otVpyc3PJyMjg/PPPx2g0EhUVxdChQzly5AgA8+fP5+abbyY1NZXx48fTpUsXjh492iQ/M+F3TRY8KpQKopIDW5vXWiiVStRqNREREVRWVqJQKKioqCA/P69Z2o9INAUdkEuShKSpeZqrRqtFo9GEPmmOfweQ1FoktR5qCWIbd3wFklrfqMCxisFgwNrQ4FGtD2uWYUEQBEEQWofHH3+cOXPm8NRTT7F3714++eQT4uPjqaioYNSoUbRp04bMzEz++9//8t133zFlyhS//b///nuOHz/OunXreOWVV3jmmWcYM2YMbdq0YfPmzdx999389a9/5ddff/Xb75FHHuHhhx9m27ZtDB48mLFjx1JQUABAcXExl1xyCf369WPLli2sWLGCEydOcP311wMwb948Bg8ezF133UVeXh55eXkkJib6HX/IkCG8+uqrRERE+LaZNm0a4A0+t2zZwrJly9i4cSOyLHPllVf65ZqorKzkxRdf5N1332XPnj1ER0czbtw4hg0bxs6dO9m4cSN/+ctfarzfWrBgAdnZ2b7+Ck2nSetNRHQ0U3q0HKe1AQXeWyCVSuUbPrfZbKjVaoYOHVrtj6cpKDVKolIaNj1WoTHgqXXqahMlzTmLpFQhKZS/1W904Z0r3Nhjqr1TY0MUtOkNBgp+e8IWLEUtAbogCIIgCEKVsrIy5s2bx/z5833TKzt37syFF17IO++8g81m48MPP8Ro9Nbznj9/PmPHjuXFF18kPj4egOjoaF577TUUCgVdu3blpZdeorKykr///e/A78HpDz/8wI033uhre8qUKVx77bUAvPnmm6xYsYL33nuPRx99lPnz59OvXz9mzZrl2/79998nMTGRgwcPkpaWhkajwWAw1DpNVaPREBkZiSRJftscOnSIZcuWsWHDBoYMGQLAxx9/TGJiIkuXLmXChAmAN7fIG2+8QXp6OgCFhYWUlJQwZswYOnfuDED37t2rtfvBBx/wwAMP8NVXX5GWlhbsj0QIUpONPIJ39DGmW5umbKJZqVQqlEolCoWC9PR0EuITMJvNKJVNPz03Oi2qwdOAJaUKRS3r8XxJc5rDb1M7FVrDb0FfA379JAlJpUGhNXL0+ImQZr41GAzYbDbkIBMheYNYsd5REARBEIS67du3D7vdzqWXXlrje+np6b7AEWDo0KF4PB4OHDjge61nz55+awHj4+Pp3bu373ulUklMTAwnT570O/7gwYN9/61SqRgwYAD79u0DYMeOHaxZswaTyeT76tatGwBZWVmNPmeVSsWgQYN8r8XExNC1a1df++ANPvv06eP7Pjo6mkmTJjFq1CjGjh3LvHnzyMvzn+3ndrv529/+xj/+8Q+GDRvWqH4KgWnS4BHA0FYf9rqPoWIwGOjRowcXX3wxgwcPJi4ujqysrCbP7KSP1mFub6x/wzpImpp/BiazCYfDgcNub9Txg+uMt5SHQmPwTvdUaZEUqpqDSUnhTYSj8k59VWiMvsBzw48/8tXy5Zw4kR+Sbhn0emRZxmq1BrWfQntu/H4LgiAIgtC0qrK7N4Za7Z/NXpKkGl8LpipAeXk5Y8eOZfv27X5fhw4d4uKLL250nwNRlbzwTAsWLGDjxo0MGTKE//znP6SlpbFp0ya/fpeVldG1a9dm6aPQDMGjJEnE9ohGqW7hyXMCIEkSnTt39qUg7pzaGZvVVm1OeSgplAriekY3ej2dpNbVWIOwKmlOeUXTT12tRpK801lVGiSNHoXWiEJnRqE1eb90ZhRaI2VWBzm5vyIpVX5TVE0mE6dOnWLduvXs27sXt6tx06MNBm8QGEzwKP225lIQBEEQBKE+Xbp0Qa/Xs3r16mrvde/enR07dlBR8ftSow0bNvimpzbWmUGXy+Vi69atvmmg/fv3Z8+ePSQnJ5Oamur3VTUSqtFocLvddbZR0zbdu3fH5XKxefNm32sFBQUcOHCAHj161Nvvfv368fjjj/Pjjz/Sq1cvPvnkE997JpOJzMxMzjvvvPovgBASTR48Aqi0SmK6nzvTV6sYjUbatWvHkSNHcDgcTdJGdFoUamPj6yVKkoRCW330UqfToVKpmjZpTrAkyS9I3LVrF6u//57Tp0/5bWYymbxJbqxWdu3ezabNmxs1BVetVqNUqYLKuKrQGkWiHEEQBEEQAqLT6Xjsscd49NFH+fDDD8nKymLTpk2899573Hzzzeh0Om677TZ2797NmjVr+Nvf/satt97qW+/YGP/6179YsmQJ+/fv57777qOoqIjJkycDcN9991FYWMhNN91EZmYmWVlZrFy5kttvv90XDCYnJ7N582ZycnI4ffp0jSObycnJlJeXs3r1ak6fPk1lZSVdunTh6quv5q677uKHH35gx44d3HLLLbRv356rr7661v5mZ2fz+OOPs3HjRo4cOcK3337LoUOH/NY95ufnc8stt/hNfxWaVrMEjwAmiwFTQuOmXrZEySnJSJJEdnZ2yI9tiNUTkRRcaY66SBpD9bIdktRsSXMaRJYpKyujsqKCLZlbsJ0xKqiQJMwmE7LHg9FgIDc3lx82bKCkuLhhbUkSBoOBygBHHr2ZbMWUVUEQBEEQAvfUU0/x8MMP8/TTT9O9e3duuOEGTp48icFgYOXKlRQWFjJw4ECuu+46Lr30UubPnx+SdufMmcOcOXNIT0/nhx9+YNmyZcTGxgLQrl07NmzYgNvt5vLLL6d3795MnTqVqKgo3/rKadOmoVQq6dGjB3FxceTm5lZrY8iQIdx9993ccMMNxMXF8dJLLwHe6afnnXceY8aMYfDgwciyzNdff11tuu2ZDAYD+/fv59prryUtLY2//OUv3Hffffz1r3/1beN0Ojlw4ECDS60JwZPkpl6wdwaPy8OxzSdwlDXNKF24HD9+nEOHDtG/f3/M5tCUJ1HrVbQfnBDyWpkeeyVua4nfa79kZXHq1CkGXXBBSNsKhcqKCr5dtYpTp06h1Wjo2rUrAwcORKFUsm7dOgoKCigvK8NmsxEbG0u7du3o06cPekPDgrr9+/Zhtdno169fvdsq9RE1juYKgiAIgtC0bDYb2dnZpKSkoNOJOst1ycnJISUlhW3bttG3b99wd0dooQL9m2q2kUfwFrmP7xuLUt2szTY5i8WC0Wjk8OHDITmepJSI7xcX8sARQNLokZT+T3lMZhM2mw3XGbV2WorCoiKcDgd6nQ69Xs+R3FxvxjFZRiFJVJSXo1Kr0ev19O7Th0GDBjU4cITAaz1KSpUYdRQEQRAEQRD+UJo9itMY1cT1jj2n1olJkkRqaiqlJaWcOHGi0ceL6xmDNqJpSj9IkoTSEMmZVz+sSXPqUVV3UavV4nS5UCoU7D9wgPwTJ9AbDCQnJzNixAh69OxJaWlpo9vTG/Q4nc46A2kJUOojz6nfYUEQBEEQBEGojyocjRrb6onp3obTewvD0XyTiIqKIi4ujl9++YXY2NgG136M7hKFuV3TToWUlGoknQnZ5g0W9QYDCqWS8rJyoqJaTmIjj8fDiZMncTgcOF0ubDYbHdq3x2TyBrv9+/XzZmyVJHQ6LTu276CgsICYmNgGt2nQe0cTKysriYiMrHEbSWsUdR0FQRAEQWgVkpOTm7ysnPDHEZbgESAyyYzH5aHwYHG4uhBynTp3IvOnTHJzj5KSkhz0/lEpEUR1igh9x2qg0JqQnXZktxNJkjCZTC0yaU50dDSWhASQJH49epQhQ4YQ+VuplDNFRbUhqk0bsrNziImO8cvWGgy9Xg+SRKXVWmPwKClUKHShWdcqCIIgCIIgCK1J2IJHgDadvNMnC86RAFKn09EhMZFfjx7FYkkIagF3VEoE0WlRzTYVsmr6qru8AFmWMZtMFDU0S2kTUSgUDBgwAACX00lhQQF2h73W7VOSk9m2bRunTp0irm3bhrWpVKLTarGWluE2mJHtTjx2B7LDhezx4DzlRHYdRFIpUahVKAxalJEmVJFGlJEmlKbqBW4FQRAEQRAE4VwQ1uARIKpTJJJKQcG+onNiSD0pKZET+flkZWXRs2fPgPaJTosiKiWi2YMOSalGoY/EXVmMyWTi2PHjuN3uBk+5bUoqtRqdTkdZWTlt29Zc6ygiMpLomBiyc3KIjYsL7HrKMh6rHXdZ5W9fVtocLcGdW0J5W/+RWPvRElwFdZfxkFRKNAkxaCwxaNrFoO3QFlWsWB8pCIIgCIIgtH5hDx7BO4VVpVNxatdp3M7qBUdbE6VSSafOndi3dx/FxcVE1TDFsopCqSCuZzSmJl7jWBeFRo/sdnrXEcoyFeXlta71Czez2Ux5WVmd26QkJ7N161ZOnjhBfEJCzRt5PLgKS3GeKsZVXI7H7p8cR61UYbPZ/F5zFlTWGzgCyC439l9PYv/1pO81pVGPLjkBffeO6LsmoVC3iD87QRAEQRAEQQhKi7mLNbbVo74ggRPbT7f6OpBt27bl2LFjHD50mPMGnFfjqJPaoCK+Xxxac/gTryh0ZkyRdiSFgvIWHDyazGZvQVpZrnVNo8lsJi4ujpycHNq2bYuk+D2hsLvcijOvAMeJQmSXu9Z21Go1ZWcEqZ4KJ45fG57J1V1hpWJPNhV7slFo1Bh6JmMa0A1t+7gGH1MQBEEQBEEQmluLKrioMappPygek6X1F15PTU2lorKC48ePV3vPEKen/QUJLSJwBO/6R5U5BoVK3SKT5lQxm024XS6s1rpHAJOTk7HZ7eTl54PHg/NkERXbDlG+ZT/2Y6fqDBzBGzwiy7hcLjx2F9bsIgjRjGqPw0n5tkPkv/Ml+e98Sfn2Q8guV2gOLgiCIAiCIAhNqEUFjwAKlYK2fWKI7xOLUt3y1t4Fymw2Y0mwkJOTg/O3moEKlYK4njEk9I9DqWlZ5yZJCio9agoKi8PdlVqZTd4sp2XldU9dNRiNxMfGkbdzP6Wb9lK5NwdXSeBBsVqtBsBWbsV2uBBcTTOV2n7sFAVL13Ps1f9SumEXHnvrHnEXBEEQBKHlysjIQJIkiltYgsSmMmPGDPr27RvubpxzWlzwCN6RMFM7Ix0utGCMN4S7Ow2WnOKtq3Mk5wj6GB0dhlqISDS12OQpcW3j2bh9L7LUIn8t/JLm1EqWcZ4qom2BA/WJUkpOFwTdjlKpxGl1kJ+ZhdwMa3Dd5VaKVmVy/LUvvCOR50DiKEEQBEEQ6jZnzhwkSWLq1KlB7XfjjTcyevRov9dWrFiBJEnMmDHD7/UZM2aQlJTUyJ76S05O5tVXXw3pMRtLkiSWLl0a7m60SKF+aNAyo4TfqLRK4vvGktAvDrW+xSzPDJhGoyE5NZmdp7ahSJRb/DlYLBZsdgcFFW4kpTrc3alRXUlzPDY7lbt+oXJPDkqPjMlkoqSkJOhgzGN1cWLLL5QVNXydY0O4K6wULF3PiQVf4zxd3KxtC4IgCILQfDIzM3n77bfp06dP0PuOGDGCDRs24Dpj2cuaNWtITEwkIyPDb9s1a9YwYsSIxna3Wbjdbjye1p04syEcjppnnlXNXGxpWnTwCN4nCcZ4Ax0utBDTtQ0qbcua7lkbpVpJm9RIhtx0Pro4LStXrmzxI0rx8fFIkkRefj5KUwwKjT7cXarGZDZ712WeeS1lGcfx05RnHsBZ+HvAFxUVhcfjobQ08CDQVWLDeqgArVKDtbIylF0PmD33BHlv/Y/SH3ch/wE/RAVBEAThXFZeXs7NN9/MO++8Q5s2bYLef8SIEZSXl7NlyxbfaxkZGUyfPp3Nmzf7MsbbbDY2b95cLXjcunUrAwYMwGAwMGTIEA4cOOB7Lysri6uvvpr4+HhMJhMDBw7ku+++870/fPhwjhw5woMPPogkSXXOpnvllVfo3bs3RqORxMRE7r33XsrPyK2xcOFCoqKiWLZsGT169ECr1ZKbm4vdbmfatGm0b98eo9HIoEGDqgXFZ0pOTgZg/PjxSJLk+77KRx99RHJyMpGRkdx4443+SRE9HmbPnk1KSgp6vZ709HQ+//zzWtsCsNvtPPbYYyQmJqLVaklNTeW9997zO6czLV261O86VU2nfffdd0lJSfHVhZckiTfffJM//elPGI1GXnjhBQD+97//0b9/f3Q6HZ06dWLmzJl+Dw4kSeLdd99l/PjxGAwGunTpwrJlywDIycnx/fzbtGmDJElMmjSpzvOrT4sPHqsolAqiUiJIvLg9cT2j0Zha5siY2qAmtlsbkoa1Izo1CrVWzejRo8nOzmb//v3h7l6dNBoNsbGx5OXlIUkSCn0kSn0ELWmS7dlJc2SHk4qdWVgPHkV2+yfCUSqVmM1mSktKAnqS5cgrw55dDB4ZvcFAZZiCR/CW/Cj6NpMTC77GVVz3Gk9BEARBEFqP++67j6uuuoqRI0fW+P6kSZMYPnx4rfunpaXRrl071qxZA0BZWRk///wzEyZMIDk5mY0bNwLw448/YrfbqwWPTzzxBHPnzmXLli2oVComT57se6+8vJwrr7yS1atXs23bNkaPHs3YsWO92e6BxYsX06FDB5599lny8vLIy8urtZ8KhYLXXnuNPXv28MEHH/D999/z6KOP+m1TWVnJiy++yLvvvsuePXto27YtU6ZMYePGjSxatIidO3cyYcIERo8ezaFDh2psJzMzE4AFCxaQl5fn+x68wfDSpUv56quv+Oqrr1i7di1z5szxvT979mw+/PBD3nrrLfbs2cODDz7ILbfcwtq1a2s9r4kTJ/Lpp5/y2muvsW/fPt5++21vybsgHD58mC+++ILFixezfft23+szZsxg/Pjx7Nq1i8mTJ7N+/XomTpzIAw88wN69e3n77bdZuHChL7CsMnPmTK6//np27tzJlVdeyc0330xhYSGJiYl88cUXABw4cIC8vDzmzZsXVF/P1rLnUdZAoZSISDRj7mDCVmSnNLecihOVYR3VkyQJQ5yeiEQT+lhdtacwXbp0oUuXLnz77bd06dIFlarlXnaLxUJ+fj7gPS9Ja0RSqnFXFCHL4R8FOzNpjsbpoXJvTrU6jWeKjIykvLyc0tLSWmtuyi4P9iPFuM8oEWMwGHA4HLjdbpTK8I1224+eJP//viT2uuHoOrULWz8EQRAEQWi8RYsW8fPPP/sFOGezWCz1PvQeMWIEGRkZPP7446xfv560tDTi4uK4+OKLycjI8L2fkpJCx44d/fZ94YUXGDZsGADTp0/nqquuwmazodPpSE9PJz093bftc889x5IlS1i2bBlTpkwhOjra93A+obZ62r85cy1ncnIyzz//PHfffTdvvPGG73Wn08kbb7zhazM3N5cFCxaQm5tLu3be+55p06axYsUKFixYwKxZs6q1ExfnLX0WFRVVrU8ej4eFCxdiNnvvH2+99VZWr17NCy+8gN1uZ9asWXz33XcMHjwYgE6dOvHDDz/w9ttv+67RmQ4ePMhnn33GqlWrfMF/p06d6rwONXE4HHz44Ye+vlf585//zO233+77fvLkyUyfPp3bbrvN19Zzzz3Ho48+yjPPPOPbbtKkSdx0000AzJo1i9dee42ffvqJ0aNHEx0dDXhLCdZVfz5QLTeKqYckSeijdeijdbgdbsrzKyk/XoG9xNFsgaQ2UovJYsCUYEClq/tSjho1ijfeeIMff/yRiy++uFn61xAJCQns27cPj8eD4rcaiZJKg9Ich8dWisdRd5mMplaVNKciJw+dTap3Wqdv9LG0lIiICN85VXGV2HAcLUU+K6OqXu+dsltZWen7wAkXd6WNkx+tJGrkAMxDerXYhEuCIAiCINTu6NGjPPDAA6xatco3VbEms2fPrvdYw4cPZ+rUqTidTjIyMnwjlcOGDePtt98G8AWRZztznaXFYgHg5MmTJCUlUV5ezowZM1i+fDl5eXm4fpvtVTXyGIzvvvuO2bNns3//fkpLS3G5XNhsNiorKzEYvAkxNRqNX3927dqF2+0mLS3N71h2u52YmJig+5CcnOx3H2exWDh58iTgHf2rrKzksssu89vH4XDQr1+/Go+3fft2lEpljYFlMDp27FgtcAQYMGCA3/c7duxgw4YNfiONbre72nU88xoajUYiIiJ85xlqrTZ4PJNSoyQyyUxkkhm304OtyIatyI69xIG91IEnBKUWFCoF2ggNGrMGfbQWXRttUOU2YmNjGTRoEOvXr6dv375EREQ0uk9NwWKx4HQ6KSgo8PullhQKlIYoJLUOT2VJ+EYhZZmocjfO03nI9TzxqhIZGUlZWRklJSW+tQWyy4P911LcxbYa96n6Y2wJwSOALMsUrcrEkV9AzLiLkMI4GioIgiAIQvC2bt3KyZMn6d+/v+81t9vNunXrmD9/Pna7PeDZTiNGjKCiooLMzEzWrFnDI488AniDx8mTJ1NYWMjmzZv561//Wm3fqpJkgO+BdNVI57Rp01i1ahUvv/wyqamp6PV6rrvuulqTutQmJyeHMWPGcM899/DCCy8QHR3NDz/8wB133IHD4fDdZ+n1er+H4uXl5SiVSrZu3VrtWgQ7NfTsc60636pzrVp/uXz5ctq3b++3nVarrfF4VYMLtVEoFNUGsWpKfGM01lzT/uzXy8vLmTlzJtdcc021bc98AFHXeYbaORE8nkmpVmBsa8DY1vtLKcsyzkoXznInLpsbl82Fy+r9f49bRnbLyB4ZSSEhKSUUSgmVTolKp0KlU6LUqdCY1KgNKiRF40Z8hg0bxs6dO/nuu+9q/CVoCaqG+/Py8mp8IqJQ65AiNHhs5cj2Cpp1srDHg3V/LvoKJyVBfIgpFAoiIyIoKSkhIiICT5EdR14ZuGvvvVKpRKPVhi1pTm0qdv2Cx+Yg9vpLUKjPuT9fQRAEQThnXXrppezatcvvtdtvv51u3brx2GOPBbVMpnPnziQmJrJs2TK2b9/uGwlr37497du3Z+7cuTgcjqAzrW7YsIFJkyYxfvx4wBu85OTk+G2j0Whwn5Vn4mxbt27F4/Ewd+5c36yvzz77rN72+/Xrh9vt5uTJk1x00UUB91utVtfbp7OdmaQn0JHE3r174/F4WLt2bY1rVuPi4igrK6OiosIXCJ65pjFY/fv358CBA6Smpjb4GBqNBiDo61Obc/7uU5IkNEY1GmP4E+zodDouvfRSli1bxsCBA0lMTAx3l6rR6/W0adOGvLy8WtNHS5ICpT4CWaP3BpFOW9MHkR4PlXuP4DxdjFarRfZ4cLlcAa8fNUdE8OvBI+zfd5LkhMCuu8FgoNIa3mm6NbEe+pVTH68i7ubLRAApCIIgCK2E2WymV69efq8ZjUZiYmL8Xn/88cc5duwYH374YZ3HGzFiBG+88QapqanEx8f7Xh82bBivv/66L7FOMLp06cLixYsZO3YskiTx1FNPVRvBSk5OZt26ddx4441otVpiY2OrHSc1NRWn08nrr7/O2LFj2bBhA2+99Va97aelpXHzzTczceJE5s6dS79+/Th16hSrV6+mT58+XHXVVTXul5yczOrVqxk6dCharTagLLZms5lp06bx4IMP4vF4uPDCCykpKWHDhg1ERET41hme3c5tt93G5MmTee2110hPT+fIkSOcPHmS66+/nkGDBmEwGPj73//O/fffz+bNm1m4cGG9fanN008/zZgxY0hKSuK6665DoVCwY8cOdu/ezfPPPx/QMTp27IgkSXz11VdceeWV6PX6Bo3iVmk12VbPFX379sVisfDNN9+02NIdZybNqYukVKM0tkFpjkOhNTTdWjxZxnog11f7sOoJit1ur39fj4yzoBLbgUKU+XZyD2b7UljXx6DXhzXjal1sOXmc/u+aahlmBUEQBEFo3fLy8gJaYzhixAjKysqqZWYdNmwYZWVlDarv+Morr9CmTRuGDBnC2LFjGTVqlN80W4Bnn32WnJwcOnfuXOMsNYD09HReeeUVXnzxRXr16sXHH38c0FpO8GZNnThxIg8//DBdu3Zl3LhxZGZmkpSUVOs+c+fOZdWqVSQmJta6XrEmzz33HE899RSzZ8+me/fujB49muXLl5OSklLrPm+++SbXXXcd9957L926deOuu+6ioqICgOjoaP7973/z9ddf07t3bz799FNmzJgRcH/ONmrUKL766iu+/fZbBg4cyAUXXMA///nPakmQ6tK+fXtmzpzJ9OnTiY+PZ8qUKQ3uD4Akt9QI5hyWm5vL+++/z9VXXx3UL3hzWbduHT/++COPPfZYUAGhLHuQHTY8jgpkt6v+HQJkyzqG/aj/ot9jv/6KwWis9cmSx+7CeaoSV5HVNz3V7Xbz0+bNRMfE0LVr13rb/fXXX8nOzg5q2kRzM6anetdAiiQ6giAIwh+IzWYjOzvbr06eIAgNF+jflBh5DIOkpCR69+7N6tWrAxs9a2YWiwWbzUZxcXFQ+0mSAoXWgNIUi8oci0JrRFI0LrGL80RhtcARQKPV4jjr2slON85TlVgPFmDddxrX6Uq/dY1KpZKkjh3Jz8/31Ymsi8FgwPNbRquWqmLHYcp/2hfubgiCIAiCIAh/ACJ4DJPLLrsMu93OunXrwt2VaqrSNtdV+LUukiR5p7TqI1BFtEUVEYfSEIlCY0BSqpEIbJTMXVaJ9cDRGt/TqNWUnCrGVVCJ/WgJlXtPUbnnFI5jpXgqa6/7aLFY0Go05GRn19t+VSawQALNcCpa+RO2I/VPMxYEQRAEQRCExhDBY5hERERw4YUXsmnTJgoKCsLdHT8mkwmz2RzQusdASAoVCo0BpSESlTkWZWQ8KlOsd72kPgKF1ujN4qrSeINLpQrZLVO+9RCuUhuuEhvOUxU4jpViyy7GeuA0ZdvzObx6O8UHT+AqsCI7Alv7p1Ao6JiczMmTJ33z02uj1WpRKBQtdt1jFdnj4fRna3CV1n0+giAIgiAIgtAYIngMoyFDhmA2m1m5cmW4u1JNQkJCg0ce6yNJEpJKjUKtQ6E1otRHoDS2QWWKQWWORWWOo2T1fiq2H8d2uBB7djGOY2U4T1XiLrHhsbow/Zb+uKysLOj2ExIS0On19Y4+SpKEXq9vceU6auKusFKwdH2LTcIkCIIgCIIgtH4ieAwjtVrN5ZdfzsGDBzl06FC4u+PHYrE0WfBYn4o92VTuzalzG7VajU6no7wBwaMkSSQnJ3P69Ol6g0+9wdDiRx6r2H45TvnWA+HuhiAIgiAIzWzhwoVERUWFpe2cnBwkSWpUPcOm0pL71lqJ4DHMunfvTnJyMitXrgxZ8c5QsFgslJeXN2hkrzHcFVaKlm8MaFuT2dzg/rVt2xaD0Vjv6GNLrfVYm+JvM3EVN+/PTBAEQRCEP67ExETy8vKq1bBsKBHwtWwieAwzSZK44oorKCgo4Keffgp3d3wamzSnoYpW/oS7MrDspmazmbLy8ga1UzX6WFhYSElJSa3b6fV67DZbiwrs6+JxOCn8aqOYvioIgiAIQqPIsozLVX/pNaVSSUJCAiqVqhl6JYSbCB5bgPj4eAYMGEBGRka9SVyaS2RkJHq9PmRJcwJhy8mjYmdWwNubzWbcLleDs6HGxcVhMpvJrmP0sbVkXD2T9fCvWA/UX1xYEARBEITmtWLFCi688EKioqKIiYlhzJgxZGX9fu9TNeq2ePFiRowYgcFgID09nY0b/WdlLVy4kKSkJAwGA+PHj683+WLVcRctWsSQIUPQ6XT06tWLtWvXNe/NtAABAABJREFU+rbJyMhAkiS++eYbzjvvPLRaLT/88AN2u53777+ftm3botPpuPDCC8nMzKx27DNHCnfv3s0VV1yByWQiPj6eW2+9ldOnT/ve93g8vPTSS6SmpqLVaklKSuKFF14AICUlBYB+/fohSRLDhw/37ffuu+/SvXt3dDod3bp144033vA7z59++ol+/fqh0+kYMGAA27Ztq+cnIgRLBI8txIgRI1AoFHz//ffh7grgHZlryqQ5Z5NlmaJvNge1j8lkAhqWNKdKcnIyJcXFFBUV1fh+VfDYWtY9VilasRm5lYyWCoIgCMIfRUVFBQ899BBbtmxh9erVKBQKxo8fj8fj8dvuiSeeYNq0aWzfvp20tDRuuukm3yjg5s2bueOOO5gyZQrbt29nxIgRPP/88wG1/8gjj/Dwww+zbds2Bg8ezNixY6sFntOnT2fOnDns27ePPn368Oijj/LFF1/wwQcf8PPPP5OamsqoUaMoLCyssY3i4mIuueQS+vXrx5YtW1ixYgUnTpzg+uuv923z+OOPM2fOHJ566in27t3LJ598Qnx8PIBvJt53331HXl4eixcvBuDjjz/m6aef5oUXXmDfvn3MmjWLp556ig8++ACA8vJyxowZQ48ePdi6dSszZsxg2rRpAV0XIXBifLmFMBgMDB8+nBUrVjBgwADftNFwslgs7N27t1nash7IxXGi5g+h2pyZNKdt27YNajcmJoaIyEiys7Np06ZNtfdVKhVqjaZVjTwCuIrLqdiZhalfWri7IgiCIAjCb6699lq/799//33i4uLYu3ev35rBadOmcdVVVwEwc+ZMevbsyeHDh+nWrRvz5s1j9OjRPProowCkpaXx448/smLFinrbnzJliq8Pb775JitWrOC9997zHQvg2Wef5bLLLgO8we6bb77JwoULueKKKwB45513WLVqFe+99x6PPPJItTbmz59Pv379mDVrlt95JiYmcvDgQSwWC/PmzWP+/PncdtttAHTu3JkLL7wQ8M4MA+89WkJCgu8YzzzzDHPnzuWaa64BvCOUe/fu5e233+a2227jk08+wePx8N5776HT6ejZsye//vor99xzT73XRQicGHlsQQYOHEhcXBzffPNNi1izZrFYKC4ubvLASZZlSn/Y1aB9G5M0p0pKSgplpaW1Tvkw6PWtbuQRoPSHXchnPckUBEEQBCF8Dh06xE033USnTp2IiIggOTkZgNxc/+Umffr08f131YDCyZMnAdi3bx+DBg3y237w4MEBtX/mdiqVigEDBrBv3z6/bQYMGOD776ysLJxOJ0OHDvW9plarOf/886vtV2XHjh2sWbMGk8nk++rWrZvvePv27cNut3PppZcG1GfwBrFZWVnccccdfsd9/vnnfdN+q0ZKdTpdjecrhIYYeWxBFAoFo0eP5sMPP2TPnj0hy1rVUGcmzenUqVOTteM4dgr7rycbtK/ZbK72gRusqKgootq0ITs7m5iYGN/rstuDu7ySSKeE83QB5bZDUBWMKSQUWg1KswGlWY/SbEBSKhvVj1BzFpRgO3wMfVpiuLsiCIIgCAIwduxYOnbsyDvvvEO7du3weDz06tULh8Pht51arfb9tyRJANWmtjYV42+1tBuqvLycsWPH8uKLL1Z7z2Kx8MsvvzTomOAd9Tw7cFa2sPuvc50YeWxhOnXqRLdu3fj222+rfZA0t+joaDQaTZMnzSnL3N/gfU0mU6OS5lRJSUmhoryckydP4ioup3JvDqUbdlGx7RDRlTLxaiPuknLcZZXer5IKnCeLsGUdo2L7YUp/2EXl7l9wFZY2qh+hVpZZ81NBQRAEQRCaV0FBAQcOHODJJ5/k0ksvpXv37rXmXKhL9+7d2bzZP0/Epk2bAtr3zO1cLhdbt26le/futW7fuXNnNBoNGzZs8L3mdDrJzMykR48eNe7Tv39/9uzZQ3JyMqmpqX5fRqORLl26oNfrWb16dY37azQaAL9M9/Hx8bRr145ffvml2jGrEux0796dnTt3YrP9nrU/0OsiBE4Ejy3QqFGjqKio8PtDDQeFQkF8fHyTJs1xV1ip3F13rcW6mM1moHFJcwAiIiJI0Jqp2HKAiu2HcJ4s+n2UMRCyjPN0CRU7syjbvBdHXt1Zz5qL7fAxXEWi7qMgCIIghFubNm2IiYnh//7v/zh8+DDff/89Dz30UNDHuf/++1mxYgUvv/wyhw4dYv78+QGtdwT417/+xZIlS9i/fz/33XcfRUVFTJ48udbtjUYj99xzD4888ggrVqxg79693HXXXVRWVnLHHXfUuM99991HYWEhN910E5mZmWRlZbFy5Upuv/123G43Op2Oxx57jEcffZQPP/yQrKwsNm3axHvvvQd4a3Hr9Xpfop2qkmozZ85k9uzZvPbaaxw8eJBdu3axYMECXnnlFQD+/Oc/I0kSd911F3v37uXrr7/m5ZdfDubSCgEQwWML1KZNG4YMGcKGDRsoLi4Oa18sFkuTBo8V2w83KivomUlzGkp2OKncnU2CXUG0wdTg41TxWO1YD+RSsTMLjz28o8eyLFO+9UBY+yAIgiAIgveh/KJFi9i6dSu9evXiwQcf5B//+EfQx7ngggt45513mDdvHunp6Xz77bc8+eSTAe07Z84c5syZQ3p6Oj/88APLli0jNja23n2uvfZabr31Vvr378/hw4dZuXJljYkGAdq1a8eGDRtwu91cfvnl9O7dm6lTpxIVFYVC4Q09nnrqKR5++GGefvppunfvzg033OBb06lSqXjttdd4++23adeuHVdffTUAd955J++++y4LFiygd+/eDBs2jIULF/pGHk0mE19++SW7du2iX79+PPHEEzVOnRUaR5JbQmYWoRqHw8Hrr79OUlISEyZMCFs/tm3bxrJly3j88cd90whCKe/NpUFnWT3bnj17cDmdpPftG/S+ztPFWA8cRXbWXwS3ISSVEl2XDmjio5vk+IFQRRhp9+D1vjUTgiAIgtDa2Ww2srOzSUlJ8UuQItQsJyeHlJQUtm3bRt8G3C/V5cCBA3Tr1o1Dhw6Rmpoa0mMLzSfQvykx8thCaTQaRo4cyZ49e8jJyQlbPywWC7IsN8m6R8eJwkYHjuCdulr220LqoNrPK6ByT06TBY4AssuNdd8R7EcblhAoFFylFdiPNO26VUEQBEEQ/ngKCwv5/PPPiYiIIDFRJOj7IxDBYwvWp08fOnTowDfffNNsGbbOFhcXh1KpbJLg0XqgcVlSqzQkaY4jv8DbfjMNvNuyjoU1gLTuD821FgRBEARBqHLHHXfw9ttv8+abb6LVasPdHaEZiFIdLZgkSVxxxRW88847/Pzzz351d5qLUqmkbdu2TbLu0ZYTmoD0zKQ5er2+3u1dRWVYDxwNSdvBsGUdQ6FVo25b8xqBJm1bjDwKgiAIwh9WcnJyk9QQX7JkSciPKbRsYuSxhWvfvj19+/bl+++/b3Q5ioZqiqQ5stuNI0QjcWq1Gm2ASXNkt9s7Chempb7WQ78iO5zN3q4zvxC31d7s7QqCIAiCIAjnDhE8tgIjR47E5XKRkZERlvYtFou3/qErdGsDnSeL8IRwrWGg6x5th4+FNQOq7HRhPfhr87cryzhbSPkQQRAEQWgpZFmmyHGaY9Ycihynm2R0ThDOJSJ4bAVMJhPDhg0jMzPTl8a4OVksFjweD6dOnQrZMR3HQxvImM3mems9uorLWkT9RefpYpynipu9Xfvx083epiAIgiC0RKXOYj7KfY3RP3Zj6DoLl23owtB1Fkb/2I2Pcl+j1Fkc7i62OgsXLiQqKirc3QjapEmTGDdunO/74cOHM3Xq1Dr3aa3nGgoieGwlBg0aRFRUFCtXrmz2p2Lx8fFIkhTSqauhDuICSZrTqIQ1CgXKCCOq2EjUsZEoI01ISmWDDxeO5DkOETwKgiAIAj8UfMuIH5KZc3Aav1qz/d771ZrNnIPTGPFDMj8UfBumHrZON9xwAwcPHgx3Nxpt8eLFPPfcc77vk5OTefXVV/22OVfOtSFE8NhKqFQqRo0aRVZWFgcONG/Rd7VaTWxsbGiDx/zGl+g405lJc2risTlwFda/JvJskkZNSTszW9vKLHb9yoLCvXxSdpi1mmJcfZPRd01CoQ2+/qW7tAJ3eWXQ+zWGM8TXXBAEQRBamx8KvuXubX/C5rYi//a/M1W9ZnNbuXvbn0QAGQS9Xk/btm3D3Y1Gi46O9t1X1uZcOdeGEMFjK5KWlkZqaiorV64M6frDQIQ6aY6rOPhAri71Jc1xHD8ddJIcpdmAaUBXdjqLOFyYT2KbOIZ36U3vdh05VlLAp9vXUxqhxjSwK6ooU9B9dhxr3pFAV0mFWMshCIIg/GGVOot5YOf1v4WHdZdAk/EgI/PAzutDOoXV4/Ewe/ZsUlJS0Ov1pKen8/nnn/tts2fPHsaMGUNERARms5mLLrqIrKws3/7PPvssHTp0QKvV0rdvX1asWOHbNycnB0mSWLx4MSNGjMBgMJCens7GjRv92vjiiy/o2bMnWq2W5ORk5s6d6/d+cnIyzz//PBMnTsRkMtGxY0eWLVvGqVOnuPrqqzGZTPTp04ctW7b49qlpKueXX37JwIED0el0xMbGMn78+DqvT13bFxUVMXHiRNq0aYPBYOCKK67g0KFD1dpfuXIl3bt3x2QyMXr0aL/7V7fbzUMPPURUVBQxMTE8+uij1e6Nzpy2Onz4cI4cOcKDDz6IJElIklTrub755pt07twZjUZD165d+eijj/zelySJd999l/Hjx2MwGOjSpQvLli3zO7+bb76ZuLg49Ho9Xbp0YcGCBXVer3AQwWMrIkkSo0aNoqSkhE2bNjVr2xaLhRMnToSk3qTscuEuD33m2LqS5gS7xlBpNmBM74xCo6Z/h87cccHljOjSm16Wjgzq2JUJfS/EI3vYknsISaXC0Ltz0AFkc697lN1uPBXhydgrCIIgCOH2v7wPsbkr6w0cq8h4sLkr+V/eR/VvHKDZs2fz4Yf/z959h0dZpQ0c/r0zk8nU9JAGJAGSECBAKFKlKAooCDYUEQQVXRUVNYquSlWKgoK98BnQRRFXZFkQXEWCiihFaRIDBEIooRNIz7Tvj5gxQ+qQDs99XblkZk573xTnmXPOcz7mvffe448//uCJJ57g7rvvZsOGDQAcPXqUPn364Onpyffff8+2bdu49957nZMGCxYsYN68ecydO5edO3cycOBAbrrpJpcgCuD5558nISGB7du3Ex0dzciRI51tbNu2jREjRnDnnXeya9cupk6dyosvvsiiRYtc2nj99dfp1asXv//+OzfeeCOjR49mzJgx3H333fz222+0bNmSMWPGlPvB9OrVq7n55pu54YYb+P3331m3bh1XXXVVufemsvJjx45l69atrFy5kk2bNuFwOLjhhhuwWP7OYp+bm8vcuXP55JNP+OGHH0hPTychIcH5+rx581i0aBEfffQRP/30E2fPnq3wuJHly5fTtGlTpk+fTkZGRrkTKV999RWPP/44Tz31FLt37+bBBx9k3LhxrF+/3qXctGnTGDFiBDt37uSGG25g1KhRnD1btDLsxRdfZM+ePaxZs4bk5GTeffddAgICyh1bfZFzHhuZwMBArrrqKn744Qc6dOhQ6bR6TQkJCcFisXDmzBkCAwOr1Zb1Qu0s1zSbzRxOTy/1vMNqw+7GMRWKWo2hbQSKpujXI9Tbr1QZX4MJP4OZMzlZf9VRoW8TQfbWlCofxVE8LpW+7g7VtZ7PQW0y1Fl/QgghREPgcDj41+G3L6nuvw6/xd3NJjhnnS5VQUEBM2fO5LvvvqNHjx4AtGjRgp9++on333+fvn378vbbb+Pt7c3SpUvx8PAAilaeFZs7dy6TJk3izjvvBGDOnDmsX7+e+fPn8/bbf19fQkICN954I1AUsLRt25b9+/fTunVrXnvtNa699lpefPFFZ/t79uzh1VdfZezYsc42brjhBh588EEAJk+ezLvvvkvXrl25/fbbAZg0aRI9evTgxIkTBAcHl7rel19+mTvvvJNp06Y5n+vQoUO596ei8vv27WPlypVs3LiRnj17ArBkyRKaNWvGihUrnGOyWCy89957tGzZEoAJEyYwffp0Z3vz58/nueee45ZbbgHgvffe45tvvil3TH5+fqjVasxmc5nXWGzu3LmMHTuWhx9+GIAnn3ySX375hblz59K/f39nubFjxzJy5EgAZs6cyRtvvMHmzZsZNGgQ6enpxMfHO891j4iIKLe/+iQzj41Qv3790Gg0fPfdd3XWZ/EvTE0sXbVdyKl2G2UxmUxYy0iaY8tyL1jVhgag0lUc0DkcDvIsBeg9/t7vqNJ64NnUvcDa3bFVl+187dx7IYQQoiHLtJzhcN6BUnscK+PAweG8A5y3VD9vwP79+8nNzeW6667DZDI5vz7++GPnstTt27dz9dVXOwPHki5cuMCxY8fo1auXy/O9evUiOTnZ5bn27ds7/x0SEgLgzNifnJxcZhv79u3DZrOV2UZQUBAAcXFxpZ4r7ySA7du3c+2115b5mrvlk5OT0Wg0dOvWzfmcv78/MTExLtduMBicgSP8fdwcwPnz58nIyHBpQ6PROIO16ijvnlb0fTEajXh5eTnH99BDD7F06VI6duzIM888w88//1ztcdUGCR4bIZ1Ox7XXXsuOHTs4cqRuzgzU6XT4+vrWSPBoz62dw+rLS5rj7hJZj+DSM40X+/PkEbIK8oluEuZ23eqMrbrcmYEVQgghLhe5tsrPgq5Ijq36uRqy/9pas3r1arZv3+782rNnj3Pfo16vr3Y/gEvwWTxj6u7Wo7LacKddd6+lJq794qBbUZQGle+hrPEV37/Bgwc791ceO3aMa6+91mXJbUMhwWMjFR8fT3BwMGvWrKmzX4qaSprjsNoqL3QJykua4yio2jJSKFqyqjJUPOt4NjeL7/fuJNTLjzbBzVxeU2k9UOmqnn3V7sbYakJt3XshhBCiITOo3U9sV5JRXf1tQm3atMHT05P09HRatWrl8tWsWdH7ifbt2/Pjjz+67OMr5uXlRWhoKBs3bnR5fuPGjbRp06bK44iNjS2zjejoaNTVOIbsYu3bt2fdunU1Uj42Nhar1cqvv/7qfO7MmTOkpKRU+dq9vb0JCQlxacNqtbJt27YK62m1WpcZ2fLGV93vCxRtT7vnnnv417/+xfz58/nggw/cql8XZM9jI6VSqRg8eDCJiYns2LGDjh071nqfISEhbNy4EYfDUa11/45azBRbVtIchzuftGlUFV5bTmE+K3b+gqfGgxvbdkGllPH5izt/eOv40zAJHoUQQlyJfDz8aaZvwZG8g24tXVVQaKqPxNvDvZVFZTGbzSQkJPDEE09gt9vp3bs358+fZ+PGjXh5eXHPPfcwYcIE3nzzTe68806ee+45vL29+eWXX7jqqquIiYnh6aefZsqUKbRs2ZKOHTuSmJjI9u3bWbJkSZXH8dRTT9G1a1dmzJjBHXfcwaZNm3jrrbd45513qn2NJU2ZMoVrr72Wli1bcuedd2K1Wvn666+ZNGmS2+WjoqIYNmwY48eP5/3338dsNvPss88SFhbGsGHDqjymxx9/nNmzZxMVFeXc/5mZmVlhnYiICH744QfuvPNOPD09y0xi8/TTTzNixAji4+MZMGAA//3vf1m+fLlbW8wmT55M586dadu2LQUFBaxatYrY2Ngq168rMvPYiIWHh9OuXTu+++47CgpqfzliSEgI+fn5lf6SVaqaG84rYjaZSh/X4U5/Ffz/JN9q4audv1BgtXBz+x6YPMtbXtFwlkeUUnu3XgghhGiwFEXh7maPXFLdmkiWU2zGjBm8+OKLzJo1i9jYWAYNGsTq1auJjIwEivbxff/992RnZ9O3b186d+7Mhx9+6Fzu+Nhjj/Hkk0/y1FNPERcXx9q1a1m5ciVRUVFVHkOnTp1YtmwZS5cupV27dkyePJnp06e7JMupCf369eOLL75g5cqVdOzYkWuuuYbNmzdfcvnExEQ6d+7MkCFD6NGjBw6Hg6+//rrM/aHleeqppxg9ejT33HMPPXr0wGw2V3p8yPTp00lLS6Nly5blJo0cPnw4CxYsYO7cubRt25b333+fxMRE+vXrV+WxabVannvuOdq3b0+fPn1Qq9UsXbq0yvXriuJoSAuBhdvOnz/PW2+9Rbdu3RgwYECt9pWdnc3cuXMZMWKE29PwJeXsPsDpfyfV3MBKOHv2LLt27uSqbt2ca+fzU49ScLjszdylKApevdujqF0/V7HabSzfsYmTWZnc0qFnmRlYoSiRTtbGXVWe4dOG+KOPaV61sdUAvxu6Y77q0r93QgghREOQn5/PwYMHiYyMRKfTVanOBUsm/X+KIN+WV6XjOlSo8FTrWd87DS8Pn2qOWIiGraq/UzLz2Mh5e3vTq1cvNm3a5DwnpraYTCbMZnO19z0qmppbT3+xspLmqAxV+58KAA4H1vOuy17tDjur/9hKxoWz3Ni2a7mBI4A9O8+tpaF1eUwHgOIhK9WFEEJcmbw8fFjQfhkKCkolb4GLXld4o/0XEjgKUYIEj5eBXr16YTKZKjynpqbURNIctbn2zhksK2mO2uxe9q7Co6ddHv+Q+gcHzhwnwi+IfGshyScOu3yVVHD0lFt91ea9KLM/U81kcRNCCCEao97+1/Ne/Ep0av1fQaTrctTi53RqPe/H/5de/tfV00iFaJhkGuIy4OHhwfXXX88XX3xBamqqy/k2NS0kJIStW7dWK2mOxttYw6NydXHSHLVRDyoVVDFxjvXMeSynMvEI9AHgVPZ5AA6cOc6BM8dLlY8NKsqQZs3MwnLinFtjrfPg0bt62eaEEEKIxq63//Ws753GfzI+4V+H3+Jw3gHna031kdzdbALDQ8dg1njX4yiFaJgkeLxMtGnThvDwcNauXcs//vGPGk21XFJISAg5OTlkZ2c7l4i6S2XUo6jVOCpJe3ypzCYThw+XmBFUFNRmPbbzOVVuI+/PQygeajQ+Zm7v2LvS8rasXHJ3H3Qre6rKoKvVJbxlqe3AXQghhGgMvDx8GN38Ue5uNoHzlrPk2LIwqs14e/jVWHIcIS5Hsmz1MqEoCoMHD+b06dNs2bKl1voJDg4GqNbSVUVRUHvV3oybyWzGarWSl5fnfE4b7O9WGw6bnZydByg8cbbSczQtp8+Ts2O/28dgaEPcG1N1qXRaVJ5VP4NSCCGEuNwpioKP1p8wfQQ+Wv9aDxz79evHxIkTa7WPYosWLcLHx8f5eOrUqXVytFtdq8t7KiR4vKwEBwfTuXNnkpKSyMmp+iybO7y9vdHr9dXe9+jh51VDIyqtrKQ5HkG+7s/y2e3kJR8iZ/s+Co+fwZ5XgMNmw2GzYc8vpPDkOXJ27Cd39wH3z09UqdAGV//MKHfU5j0XQgghROWWL1/OjBkzqtXG2LFjURSF2bNnuzy/YsUKl+D3jjvuYO/evdXqq6SkpCQiIiJqrL2aUhP3VFSdBI+Xmf79+wOwfv36WmlfUZQaSZqjDS19wGpNcSbNKbHvUVGp3J59LGY7n0Pen+lk/bqHCz/u5MKPO8n65Q/y9qRhPZdVeQNljbGJT51nPvWo45lOIYQQQrjy8/O75G0/Jel0OubMmcO5c+XnWtDr9TRp0qTafTV01b2nNpsNexXzYggJHi87RqORfv36sW3bNo4fL53cpSaEhIRUu+3aXrJpNptdZh4BtM2b1Pkew7IoahW68OA679ezFgN2IYQQQlTu4iWWERERzJw5k3vvvRez2Uzz5s354IMPKm1nwIABBAcHM2vWrHLLXLxs9WKpqam0aNGCCRMm4HA4KCgoICEhgbCwMIxGI926dSMpKanc+jt27KB///6YzWa8vLzo3LkzW7duLbe8oii8//77DBkyBIPBQGxsLJs2bWL//v3069cPo9FIz549SU1NdRnjsGHDCAoKwmQy0bVrV7777juXdi++p+fOnWPMmDH4+vpiMBgYPHgw+/btK3VfVq5cSZs2bfD09CQ9Pb3ccQtXEjxehrp27UpAQABr1qypdL/epQgODiYzM5Pc3NxLbkMbWsvBo8nkclwHgErrga5V01rttyo8I0Lq/HxHqP17LoQQQgj3zZs3jy5duvD777/z8MMP89BDD5GSklJhHbVazcyZM3nzzTc5cuSI233u3LmT3r17c9ddd/HWW2+hKAoTJkxg06ZNLF26lJ07d3L77bczaNAgl8CrpFGjRtG0aVO2bNnCtm3bePbZZ/Hw8Kiw3xkzZjBmzBi2b99O69atueuuu3jwwQd57rnnnNn8J0yY4CyfnZ3NDTfcwLp16/j9998ZNGgQQ4cOrTDYGzt2LFu3bmXlypVs2rQJh8PBDTfcgMVicZbJzc1lzpw5LFy4kD/++OOKmKGtKRI8XobUajWDBg3i0KFD7Nmzp8bbDwkJAajW7KPa21Srx1SUlTQHQBvsh8a//vb+qb2NeDar+z9QKq0HHk1867xfIYQQQlTshhtu4OGHH6ZVq1ZMmjSJgICAKm0/uvnmm+nYsSNTpkxxq7+ff/6Zfv36kZCQwEsvvQRAeno6iYmJfPHFF1x99dW0bNmShIQEevfuTWJiIlA0w5eWluZsJz09nQEDBtC6dWuioqK4/fbb6dChQ4V9jxs3jhEjRhAdHc2kSZNIS0tj1KhRDBw4kNjYWB5//HGX2c4OHTrw4IMP0q5dO6KiopgxYwYtW7Zk5cqVZba/b98+Vq5cycKFC7n66qvp0KEDS5Ys4ejRo6xYscJZzmKx8M4779CzZ09iYmIwGOr26LTGTILHy1TLli2JiYnhf//7n8snLTXB398frVZb7YyruojaW7pZVtKcYoaY5qh0dZ91VNF6YGgdXuf9Ang2D0KppeNbhBBCCHHp2rdv7/y3oigEBwdz8uTJKtWdM2cOixcvJjk5uUrl09PTue6665g8eTJPPfWU8/ldu3Zhs9mIjo7GZDI5vzZs2OCyjLSkJ598kvvvv58BAwYwe/bscsuVVPJag4KCAIiLi3N5Lj8/nwsXLgBFM48JCQnExsbi4+ODyWQiOTm53JnH5ORkNBoN3bp1cz7n7+9PTEyMyz3SarUuYxFVJ8HjZWzgwIFkZ2fz888/12i7xX/Yqps0RxcZUkMjKq2spDnFFK0Hxg6tUDwrXlpRkxQPDcb2LetluSrU7r0WQgghxKW7eKmnoihVTuDSp08fBg4cyHPPPVel8oGBgVx11VV89tlnzgANioI0tVrNtm3b2L59u/MrOTmZBQsWlNnW1KlT+eOPP7jxxhv5/vvvadOmDV999VWF/Ze81uLMsGU9V3z9CQkJfPXVV8ycOZMff/yR7du3ExcXR2FhYZWutzx6vV7O87xEEjxexvz8/OjRowc//fQT58+fr9G2ayJpjr51OIqq9n4EzSZTmTOPACq9J6aOUXUyA6loPTB2bIXapK/1vspjaBNRb30LIYQQovbMnj2b//73v2zatKnSsnq9nlWrVqHT6Rg4cKDzfVJ8fDw2m42TJ0/SqlUrl6/iM77LEh0dzRNPPMH//vc/brnlFucS15qyceNGxo4dy80330xcXBzBwcEuS2cvFhsbi9Vq5ddff3U+d+bMGVJSUmjTpk2Nju1KJcHjZe7qq6/G09OTb7/9tkbbDQ4O5syZM9X65Edt0KGPqr0ENmazuVTSnJJUek882kVwNCcTq9VaK2PQ+Hth6hyD2lh/gaNn8yA0vtVPCy6EEEKIhicuLo5Ro0bxxhtvVKm80Whk9erVaDQaBg8eTHZ2NtHR0YwaNYoxY8awfPlyDh48yObNm5k1axarV68u1UZeXh4TJkwgKSmJQ4cOsXHjRrZs2UJsbGyNXltUVBTLly9n+/bt7Nixg7vuuqvCWdmoqCiGDRvG+PHj+emnn9ixYwd33303YWFhDBs2rEbHdqWS4PEy5+npyYABA9i9ezeHDh2qsXZDQkJwOBzVnn00dmxVQyMqrbykOSWlpO7ngp8OY7sWKNqaW8aqaNToW4djjGuJqg6Xx5bFVIv3WAghhBD1b/r06W6dVWgymZxZ+W+88UZycnJITExkzJgxPPXUU8TExDB8+HC2bNlC8+bNS9VXq9WcOXOGMWPGEB0dzYgRIxg8eDDTpk2rycvitddew9fXl549ezJ06FAGDhxIp06dKqyTmJhI586dGTJkCD169MDhcPD1119XmglWVI3iqI2zHESD4nA4WLhwITabjQceeABVDSwVtdlszJw5k+uvv95lU7LbY7PbOTb/C6wXcqo9potZLBZ+3riR2DZtykzBfPjwYQ4cOECHDh3w8fHBYbFSeOw0hRlnsOdf2oyqovVAG+qPZ2hAjQajl0ql0xL21J2oPDT1PRQhhBCixuTn53Pw4EEiIyPR6XT1PRxRj3r06MG1117rzBwrLk1Vf6dk5vEKoCgKgwcP5vjx4/z+++810qZarSYoKKjaSXMUlQpT55gaGdPFKkqak5WVxcEDB2jerJnzAF3FQ4NneDDmbm0wxLXAI9CnzD2RVpvN9Ro8PfAI8MHQNhKvHm3RRYQ0iMARKNrXKYGjEEIIIS4zBQUFbN26lT/++IO2bdvW93CuGPKu8grRtGlTOnTowLp162jbtm2NfEoXEhLC0aNHq92OqXM053/YgeOioKwmlJU0x2azkZycjMlkIiIysnQlRcHD3xsPf28AHBYrtqxc7PmFFBYUkLxrN63bxGLy9UFt1jeYQLEspi61E5gLIYQQQtSnNWvWMGbMGG666SZuu+22+h7OFUNmHq8gAwYMwGq1smHDhhppr/gcouomm1GbDBjbt6iRMV2srKQ5qfv3U1BQQGybNlVK06x4aND4eaENDUAT4k+BlycOfzMaf68GHTgaWofjEeBT38MQQgghhKhxw4cP58KFC/zrX/+S/Yx1SILHK4jZbKZPnz78+uuvnDp1qtrthYSEYLfbq3yQbUW8esXVynk7FyfNOXXqFBkZGbRq1Qq93v0MqMX7RRvDVmGv3nGVFxJCCCGEEKKKJHi8wnTv3h1vb2+++eabagdAQUFBKIpS7X2PAB4BPhg71HxWULO56IiKrKwsCgoK2Lt3L4GBgYSEhFxSexcfXttQ6aOb4dm0dJIgIYQQQlw5IiIimD9/fn0PQ1xGJHi8wmg0GgYOHMj+/fvZt29ftdry8PAgMDDQJXh0OBzY7XZsFhvWAiuWfAuWvL++8i1Y863YCm3YbfZSwavPgM5lJqip7hg9dTqysrL4MzkZtUpFdMyl7wNsDDOPikaN3+Du9T0MIYQQQjRy/fr1Y+LEiVUuf+jQIcaMGUNYWBhms5nrr7+eAwcO1N4ARZ2ThDlXoJiYGFq0aMHatWtp0aIFGo37PwYOhwOH3UFk80jyc/PJPZeLw2bHbnNAVQMrRUGlUlDUKlQeKtQaD7yv6cy5rze5PZ6KmE0mDh06hFqtpkOHDpd0vcUaw8yj99Ud0Pia63sYQgghhKgnhYWFaLU1+4F8VWzdupXw8HBWrVqFWq3moYce4r777mP9+vV1PhZRO2Tm8QpUfHRHZmYmv/76a5Xr2e12LLmF5J3PI+d0DjmnsokIDkeraLHmW7Bb7VUPHAEcDuw2O7ZCK5acQvLP50HzMDyvao+qaQiKnw/UwDETKrWao0eP0qzEsRyX3NZfM48NNXjUBvvLXkchhBCiAevXrx+PPvooEydOxNfXl6CgID788ENycnIYN24cZrOZVq1asWbNGqAoS/x9991HZGQker2emJgYFixY4NLm2LFjGT58OC+//DKhoaHElLPKauHChfj4+LBu3ToAdu/ezeDBgzGZTAQFBTF69GhOnz7tbHPDhg0sWLAARVFQFIW0tDTOnTvHqFGjCAwMRK/XExUVRWJiIgC33norM2bMID4+nvbt23Prrbdy+PDh2rqVoh5I8HiFCgwMpGvXrvzwww9lnoNYzOFwYMm3kHcul5xTOeRfyMeaZ8FhKwqeTCYzdrud3NzcGhmXAuhiw1F5m1EFBaBuFYEqPAzFxwtU7ifUsdlsHD9+HI1GQ3BwcM2MUaVqkMtWFZUK/+G9UdTq+h6KEEIIISqwePFiAgIC2Lx5M48++igPPfQQt99+Oz179uS3337j+uuvZ/To0eTm5mK322natClffPEFe/bsYfLkyfzzn/9k2bJlLm2uW7eOlJQUvv32W1atWlWqz1deeYVnn32W//3vf1x77bVkZmZyzTXXEB8fz9atW1m7di0nTpxgxIgRACxYsIAePXowfvx4MjIyyMjIoFmzZrz44ovs2bOHNWvWkJyczLvvvktAQECp/g4fPszrr7/OvffeWzs3UdQLWbZ6BevXrx+7du1i3bp1DBs2zOU1u83u3KtYHCiWxWQyApCdnY3RaKyRcan1nni2DCV/3xEAFIMexaDH0SQAx/kLODIvQEFhldrav38/KkUhMCCAnJwcDAZDtcenUpQGOfPo1acD2mD/+h6GEEIIISrRoUMHXnjhBQCee+45Zs+eTUBAAOPHjwdg8uTJvPvuu+zcuZPu3bszbdo0Z93IyEg2bdrEsmXLnIEegNFoZOHChWUuV500aRKffPIJGzZsoG3btgC89dZbxMfHM3PmTGe5jz76iGbNmrF3716io6PRarUYDAaXD+DT09OJj4+nS5cuQFFSnosdOXKE3r17M3z4cP75z39W406JhkaCxyuYXq/nmmuuYdWqVXTp0oWwsDBsVhuWnEIs+dYqLUHVaDTo9Xqys7MJCgqqsbFpQwOwZWZjOZXpfE5Rq4qWsvr54MjOxX7mHOTmldvGqVOnOJ6RQUzr1qSlpZGVlUVgYGC1x6aoVDgaWPCobxmGd58O9T0MIYQQQlRB+/btnf9Wq9X4+/sTF/f3tpPi91TFx6G9/fbbfPTRR6Snp5OXl0dhYSEdO3Z0aTMuLq7MwHHevHnk5OSwdetWWrT4+1ztHTt2sH79ekwmU6k6qampREdHlzn2hx56iFtvvdU5Qzp8+HB69uzpUmbWrFmEhYXxxhtvVHInRGMjy1avcJ06dSIoKIhvv/mWvMxcck/nYMmzuLV30WQykVXB0tdLoQD6mOaojbqyXzcZUIeHoQoPA8/SfygLCgrYm5JCYJMmBAcHYzaZyMrKqpGxqVQq7A1o2arGx4T/rX1RVPLrLIQQQjQGFx9qryiKy3MlE/QtXbqUhIQE7rvvPv73v/+xfft2xo0bR2Gh6yqs8laAXX311dhstlLLXLOzsxk6dCjbt293+dq3bx99+vQpd+yDBw/m0KFDPPHEExw7doxrr72WhIQElzLHjh0jOjq6Vs7wFvVLZh6vcIqicMN1N7B9y+8cTT9GUBP3zwY0m0ykp6fjoCjoq7GxqVXo20aS89teHFZb2WUMelSRzXCcO4/j1BmwO3A4HCQnJ6PWaJyfmpnN5hrbsK1SlAYz86ho1ATecQ1qQ9lBthBCCCEat40bN9KzZ08efvhh53OpqalVrn/VVVcxYcIEBg0ahEajcQZ6nTp14ssvvyQiIqLcTPRarRabrfR7sMDAQO655x7uuecerr76ap5++mnmzp3rfH3u3LmoJQfDZUmmKq5gNquNvLO5+Jl8CQoK5kBqapl/ICpjMpmx2mzk55W/hPRSqfWe6NtEQAWfXCmKgsrPB1VkczDoOXz4MOfPn6d169bOP4Ymsxmr1UpeDYxRUakaxJ5HRVHwv7kP2pDSm9SFEEIIcXmIiopi69atfPPNN+zdu5cXX3yRLVu2uNVGz549+frrr5k2bRrz588H4JFHHuHs2bOMHDmSLVu2kJqayjfffMO4ceOc7wcjIiL49ddfSUtL4/Tp09jtdiZPnsx//vMf9u/fzx9//MGqVauIjY116W/atGm8//77NXL9omGR4PEK5HA4KMwpIPdMLjZL0R+Hli1aYLFaSU9Pd7s9k7lorXxFWVurw8PXjCE2otJyitYDa3AA2QZPmkdEuBzLYTaba2yMDWXZqt/QnhjbRtb3MIQQQghRix588EFuueUW7rjjDrp168aZM2dcZiGrqnfv3qxevZoXXniBN998k9DQUDZu3IjNZuP6668nLi6OiRMn4uPj4zyaLCEhAbVaTZs2bQgMDCQ9PR2tVstzzz1H+/bt6dOnD2q1mqVLl7r0lZ6eTkZGRo1cv2hYFEdDPHNA1BqH3UH++TysBdZSr6WlpZF++DBdu3ZFr3NvGeSmX34hOCiIyMjaC2YKT5wj789D5b5ut9vJOHYMlVpNkK8fjiMZYPn7On/ZtIkmQUEum8UvxW/btmE2m4kqZyN5XfAb3B1ztzb11r8QQghRn/Lz8zl48CCRkZHo3HzPIoQoraq/UzLzeAWxWW3kns0pM3AEaNasGR4eHhxwYx19sdpImnMxbZAvhraR5S5hPXv2LDabjYCAAFQ6T1SRzcD499EcZrOZ7BpImqPU48yjoij4D79aAkchhBBCCFHnJHi8QljzLeSdzcVuLX+vnlqtpmWLlpw6fZpz5zLdat9sMpGdlUVth1QeAd4Y27dE8XDd2J2Tk0N2djZ+/v7ObGWKWo2qWUjR8R4UBY81kXG1vs55VHlqCbjzWkwdo+q8byGEEEIIISR4vAJY8izknc/HYa88tAtsEoi3tzf7U/fjzopmk8lMocVCYUFBdYZaJRofE6ZO0ahNegCsVitnzpzBaDSWOqtIURRUQQEoTfxrLGmOqh7OefQI8CZ4/BAMMc3rtF8hhBBCCCGKSfB4mSvMLST/fF6Vz21UgKhWrcjNyeGYGxudaztpzsVUOi3G+Cg0Qb6cPn0alUqFv79/+eX9ffGJaVUjY6zrZauG1uEEjx+KR4BPnfUphBBCCCHExSR4vIxZ8iwUXMh3u57JZCI4JIS0gwexWCxVquPp6YmHh0edBY9QFMSd1sNZk5rAoCbOzGDl8QjwI7h922ovXa2rZasqDw1+g7sTcMc1qDy1td6fEEIIIYQQFZHg8TJlzbeQfwmBY7HIyEgcDgdpaWlVKq9QN0lzSrpwIYu0Q4cIbhtFwNXxaPy8Kq0TENUS/Lyr1a9SB8tWdeHBBD80HHO3NigVnHEphBBCCCFEXZHg8TJks9qKAsdqLK3UengQERHBsWPHyM7JqVKd4qQ5dcFqs5GcvAez2Ux4eDgqTw8McS3Qtw4vlUynJK1Wi2dYCJiMl9x3bZ7zqNJp8buxB03GDsajCsGwEEIIIYQQdUWCx8uMw+4gPzOvSslxKhMaFobeYCB1//4qZVE1mczkFxRUealrdezfv59Ci4XY1q1R/TUzp1B0nIf5qli0YYFlHunh6emJ3W7H3sQPtB6X1LdKUWp85lFRqTB1iib00Vsxd42V2UYhhBBCCNHgSPB4GXE4HOSfz6vwOA53qBSFVi1bcS4zk9OnTldavq6S5pw8eYrjx48TFRWFXq8v9bqiUaNvFYa5exu0zZqgqNXO17Taor2DhVYrqqYhoHI/SFNUqhrb86jy0GDu1obQx27F/6beqI2lr0cIIYQQQoiGQILHy4gl14K1wFqjbfr5+eLv70/qgVRslQRMer0etVpdq8Fjfn4+e/ftpUmTJgQFBVVYVqX1QN8iFHOPtuiim6E2Fo1Po1ZTWFiI4qlFCW7i9hhqYtmqR4A3foO7E/bUHfgN7o7Gx1yt9oQQQgghKpKUlISiKGRmZtb3UOrE1KlT6dixY30P47IjweNlwm61UZBdO2cstmzZksKCAo4cOVJhudpOmuNwOPjzzz/RqNVER0VR1TlDRa3CM8QfY5cYjB2jUAf5UWAtWlqr8ja7vf/xUpetqrQemDpGETRmECGP3IK5WxtUOk+32xFCCCFE4zR16lQURXH5at26tVtt3HnnnQwaNMjlubVr16IoClOnTi3VX/PmNXtGdEREBPPnz6/RNqtLURRWrFhR38NokGr6Q4PyM4uIRqNouWr1EuRUxKDXE9a0KemHDhEcFISnZ/kBj9lk4uzZs7UyjvTDhzl//jwdO3ZEo3H/R1cBNN5G9NFNOXL4MC1iW2A9n43NoMOy408c1qrN2lZ12aqiKHgE+aGLCMYzMgRdi1BUFSTzEUIIIcTlr23btnz33XfOx+6+p+nfvz8JCQlYrVZn3fXr19OsWTOSkpJcyq5fv57+/ftXe8x1wWazoShKpUevXW4KCwud26pKslgseHhcWn6O2nRlfXcuU5ZcCzaLrVb7CA8PR61Wc+DAgQrLmcxmcvPysNpqdjwXLmSRlpZG8/BwvL2rd9SG2WzGYrNhNWrRtwjF1LU1gQ/fSvD4ofgN6YmpUzS6FqF4+HujaNSl6qsUxWXZqqJW4+HnhS4yBFN8FH43dCf4vhtp+s/RhPxjGL6DumGIaS6BoxBCCCHQaDQEBwc7vwICAtyq379/f7Kzs9m6davzuaSkJJ599ll+/fVX8vOLjmrLz8/n119/LRU8btu2jS5dumAwGOjZsycpKSnO11JTUxk2bBhBQUGYTCa6du3qEuj269ePQ4cO8cQTTzhnTsvz2muvERcXh9FopFmzZjz88MMuW5sWLVqEj48PK1eupE2bNnh6epKenk5BQQEJCQmEhYVhNBrp1q1bqaC4pIiICABuvvlmFEVxPi72ySefEBERgbe3N3feeafLed92u51Zs2YRGRmJXq+nQ4cO/Pvf/y63L4CCggImTZpEs2bN8PT0pFWrVvzf//2fyzWVtGLFCpf7VLycduHChURGRqLT6YCiSYd3332Xm266CaPRyMsvvwzAf/7zHzp16oROp6NFixZMmzYNa4kJD0VRWLhwITfffDMGg4GoqChWrlwJQFpamvP77+vri6IojB07tsLrq4y8m23k7DY7hTm1s1y1JI1aTYsWLfgzJYXQ0DC8vcs+RsJkKkqak5OdU26ZqsjOzmb79u20atWKgIAAl2M5qqt4jNlZWej/+oW1FtowBPriGRboUtbhcGDPzceWk4/DYsVhtXHst9859qudkIeGozbqUBn1kh1VCCGEEFWyb98+QkND0el09OjRg1mzZrksLR07dixpaWnlBkzR0dGEhoayfv16unfvTlZWFr/99hurVq3izTffZNOmTfTv35+ff/6ZgoKCUsHj888/z7x58wgMDOQf//gH9957Lxs3bgSK3n/dcMMNvPzyy3h6evLxxx8zdOhQUlJSaN68OcuXL6dDhw488MADjB8/vsLrVKlUvPHGG0RGRnLgwAEefvhhnnnmGd555x1nmdzcXObMmcPChQvx9/enSZMmTJgwgT179rB06VJCQ0P56quvGDRoELt27SIqKqpUP1u2bKFJkyYkJiYyaNAg1CUSJaamprJixQpWrVrFuXPnGDFiBLNnz3YGZrNmzeJf//oX7733HlFRUfzwww/cfffdBAYG0rdv3zKva8yYMWzatIk33niDDh06cPDgQU6frjyxZEn79+/nyy+/ZPny5S7jnTp1KrNnz2b+/PloNBp+/PFHxowZwxtvvMHVV19NamoqDzzwAABTpkxx1ps2bRqvvPIKr776Km+++SajRo3i0KFDNGvWjC+//JJbb72VlJQUvLy8ykw26Q4JHhu5guyCGjmWoyqCgoM5euwY+1P306lTpzL3HBoMBlQqFdnZWdUKHjPPn+fo0aOcOn0ag8GATqejfYcOzmM5qkOr1eLp6UlWVhaBgX8Hi/lZBRj81C6BoKIoRYl2SmRB1ZwMIG+3B9ogv2qPRQghhBBXjm7durFo0SJiYmLIyMhg2rRpXH311ezevRuzuSh5XkhISKXbY/r3709SUhLPPfccP/74I9HR0QQGBtKnTx+SkpKcr0dGRpb64P3ll192BkbPPvssN954I/n5+eh0Ojp06ECHDh2cZWfMmMFXX33FypUrmTBhAn5+fqjVasxmM8HBwRWOceLEic5/R0RE8NJLL/GPf/zDJXi0WCy88847zj7T09NJTEwkPT2d0NBQABISEli7di2JiYnMnDmzVD/F7+V8fHxKjclut7No0SLnvR09ejTr1q3j5ZdfpqCggJkzZ/Ldd9/Ro0cPAFq0aMFPP/3E+++/X2bwuHfvXpYtW8a3337LgAEDnHXcVVhYyMcff+zyPhTgrrvuYty4cc7H9957L88++yz33HOPs68ZM2bwzDPPuASPY8eOZeTIkQDMnDmTN954g82bNzNo0CD8/IrerzZp0qTUrOilkOCxEbNbbVjzaza7akUUIKpVK377/XdOHD9e5h8NlaJgNBqrnTQnNycHRVGwWa3s37ePyMhICvILnDOF1WU2m0uN0W6xYS2w4qGreH25Wq3GZrPhcDhkxlEIIYQQVTZ48GDnv9u3b0+3bt0IDw9n2bJl3HfffUDRbFhl+vXrx8SJE7FYLCQlJdGvXz8A+vbty/vvvw/gDCIv1r59e+e/Q0JCADh58iTNmzcnOzubqVOnsnr1ajIyMrBareTl5ZGenu72tX733XfMmjWLP//8kwsXLmC1WsnPzyc3NxeDwQAUfaBfcjy7du3CZrMRHR3t0lZBQQH+/v5ujyEiIsIZOBZf78mTJ4Gi2b/c3Fyuu+46lzqFhYXEx8eX2d727dtRq9XlzkpWVXh4eKnAEaBLly4uj3fs2MHGjRudM6VQtDf04vtY8h4ajUa8vLyc11nTJHhsxApzCmstSU55vLy8CAoK4sCBAwQEBJS5ydtsMnHhwoVq9ZOVnY3d4SA/N5eAgAAuZGWxceNPdO16FU2alP5lc5fZZOLIkSM4wGUGtTCnEI2npsKgsPiabTbbJSXuEUIIIYSAotmy6Oho9u/f71a9/v37k5OTw5YtW1i/fj1PP/00UBQ83nvvvZw9e5Zff/2VBx98sFTdkklYit/vFM90JiQk8O233zJ37lxatWqFXq/ntttuo7Cw0K3xpaWlMWTIEB566CFefvll/Pz8+Omnn7jvvvsoLCx0Bj16vevWn+zsbNRqNdu2bXNZzgl/bztyx8UJZxRFcV5r8f7L1atXExYW5lKuvOSQlS35VKlUOC56b26xWEqVMxrLzvR/8fPZ2dlMmzaNW265pVRZXYkJlYqus6bJO99Gym6zY6nDWceSWkRGsvnUKQ6lp9OyjKl6k9nM6dNnsFlsRbFt8S+RUvTDrKhUKGqlwqM2MjMzyc3JQePhgUqtRqFotrCiTK/uMJvNWP76BKzkbKbdYsNmsaHRlv+rUfzHTIJHIYQQQlRHdnY2qampjB492q16LVu2pFmzZqxcuZLt27c7Z8LCwsIICwtj3rx5FBYWup1pdePGjYwdO5abb77ZOb60tDSXMlqtFlsliRG3bduG3W5n3rx5zuypy5Ytq7T/+Ph4bDYbJ0+e5Oqrr67yuD08PCod08VKJump6kxiXFwcdrudDRs2OJetlhQYGEhWVhY5OTnOQHD79u1ujaukTp06kZKSQqtWrS65jeJMru7en/JIttVGypJnqfNZx2Kenp40Dw/nyJEj5ObmAkVDsVlsWPIs+Jl9adMqFmuBFVuhFdtfAZmtsGhZqCWvkMKcQgpzLVgLbWV+QnPy5Elyc3NRq9X4+/vTvXt3evfuXa19lCWVTJpzMUtu6U+ISioZPAohhBBCVFVCQgIbNmwgLS2Nn3/+mZtvvhm1Wu3crwbw3HPPMWbMmErb6t+/P++88w6tWrUiKCjI+Xzfvn158803nYl13BEVFcXy5cvZvn07O3bs4K677io1gxUREcEPP/zA0aNHy00U06pVKywWC2+++SYHDhzgk08+4b333qu0/+joaEaNGsWYMWNYvnw5Bw8eZPPmzcyaNYvVq1eXWy8iIoJ169Zx/Phxzp07V6VrNZvNJCQk8MQTT7B48WJSU1P57bffePPNN1m8eHG5/dxzzz3ce++9rFixgoMHD5KUlOQMjLt164bBYOCf//wnqampfPrppyxatKhK4ynL5MmT+fjjj5k2bRp//PEHycnJLF26lBdeeKHKbYSHh6MoCqtWreLUqVMuGW8vhQSPjZDD4SgKHutRs6ZN8fT0JC0trSggzC3EWmDFbrOjoKA3VJLJyeHAYbdjK7RSmGvBkm/F/lfin/Pnz5OdnU1wcDC9evWiT58+NG3atEbP/XEmzSnjF6j4OspTHDxaq3gupBBCCCEEwJEjRxg5ciQxMTGMGDECf39/fvnlF5f9bxkZGVXaY9i/f3+ysrKc+x2L9e3bl6ysrEs63/G1117D19eXnj17MnToUAYOHEinTp1cykyfPp20tDRatmxZ5r49gA4dOvDaa68xZ84c2rVrx5IlS6q0lxMgMTGRMWPG8NRTTxETE8Pw4cPZsmWLS0bai82bN49vv/2WZs2albtfsSwzZszgxRdfZNasWcTGxjJo0CBWr15NZGRkuXXeffddbrvtNh5++GFat27N+PHjycnJAcDPz49//etffP3118TFxfHZZ58xderUKo/nYgMHDmTVqlX873//o2vXrnTv3p3XX3/drdMHwsLCmDZtGs8++yxBQUFMmDDhkscDoDgunvYRDZ4130JeZl69jsEBXDh3gfycvBrJ3ASAoqD2UOFQHPyZkkJ0dDSeZRyaWlN2//EHNpuNDiU2GRfTmjzxNJW9RPbgwYMsXryYxx9/HF9f31obnxBCCCHKlp+fz8GDB13OyRNCXLqq/k7JzGMjVF97HYs5HA6seRZ0Hp41FzgWNYyt0IbDAm3btK3VwBGKkuZkZ2VR1qcn1nxLqeW0xWTZqhBCCCGEuBJJ8NjIOOwOrAX1Fzza7UVLZita1lldDru91vsA16Q5F7Nb7dgtZfcvy1aFEEIIIcSVSILHRsZaYK23RDl2e9GMo8NeB/07HEX7IK21F0BWlDQHwFpQ9r5SmXkUQgghhBBXIgkeGxlbYf3MdjkcjgqXctZSp1gqSV5THRUlzQGwFpYdHJY851EIIYQQQogrhQSPjYytnICmtlkLrHUz43gxR9Ey3dqKWc0mE1nlzDzarfYyr1lmHoUQQgghGp/jx49z3XXXYTQaazZvxxVEgsdGxG6z1/o+wLLYLLZaXT5aGYfdUWszrmazudykOTgc2CylA0TZ8yiEEEII0fi8/vrrZGRksH37dvbu3Vvfw2mUJHhsROojgHP8lQG1vtmstRM4V5Q0B8BuLX3tsmxVCCGEEKJ6LJaaO7O8qm2lpqbSuXNnoqKiaNKkySX1VVhYeEn1LhcSPDYiZc2C1UWf1dnnqCiKy9clq6UgtrKkObYyMq7KslUhhBBCXAq73c4rr7xCq1at8PT0pHnz5rz88svO13ft2sU111yDXq/H39+fBx54gOwSuRnGjh3L8OHDmTt3LiEhIfj7+/PII4+4BE/vvPMOUVFR6HQ6goKCuO2225yvRUREMH/+fJcxdezY0eUge0VReP/99xkyZAgGg4HY2Fg2bdrE/v376devH0ajkZ49e5KamurSzn/+8x86deqETqejRYsWTJs2zWWVlqIovPvuu9x0000YjUaX6y4pIiKCGTNmMHLkSIxGI2FhYbz99tsuZcprq6IxRERE8OWXX/Lxxx+jKApjx44FIDMzk/vvv5/AwEC8vLy45ppr2LFjh7OvqVOn0rFjRxYuXOhyBmJV633yySdERETg7e3NnXfe6bJdqrKfh8OHDzNixAh8fHzw8/Nj2LBhpKWlOV9PSkriqquuci7D7dWrF4cOHSrzvtYUTa22LmpUXc88OqDc4yoqc+T4EZYu/xxFAYqDRocDhwPuHjGKkMAQt4NSu61oD6KiqkYQepGSSXMCAwNL91nBslUJHoUQQgjhjueee44PP/yQ119/nd69e5ORkcGff/4JQE5ODgMHDqRHjx5s2bKFkydPcv/99zNhwgQWLVrkbGP9+vWEhISwfv169u/fzx133EHHjh0ZP348W7du5bHHHuOTTz6hZ8+enD17lh9//NHtcc6YMYPXXnuN1157jUmTJnHXXXfRokULnnvuOZo3b869997LhAkTWLNmDQA//vgjY8aM4Y033uDqq68mNTWVBx54AIApU6Y42506dSqzZ89m/vz5zpVcZXn11Vf55z//ybRp0/jmm294/PHHiY6O5rrrriu3rcrGsGXLFsaMGYOXlxcLFixAr9cDcPvtt6PX61mzZg3e3t68//77XHvttezduxc/Pz8A9u/fz5dffsny5cud7wOrUi81NZUVK1awatUqzp07x4gRI5g9e7YzQKzo58FisTh/Hn788Uc0Gg0vvfQSgwYNYufOnahUKoYPH8748eP57LPPKCwsZPPmzdWbrKkCCR4bkbre72i/hFlHRVHQeGrQeGpQVApd4rsQGhLiUiawSSAanQZrvvtJeGwWGxrPmv2xrTBpjt2Bw+Fw+UWUPY9CCCGEcFdWVhYLFizgrbfe4p577gGgZcuW9O7dG4BPP/2U/Px8Pv74Y4xGIwBvvfUWQ4cOZc6cOQQFBQHg6+vLW2+9hVqtpnXr1tx4442sW7eO8ePHk56ejtFoZMiQIZjNZsLDw4mPj3d7rOPGjWPEiBEATJo0iR49evDiiy8ycOBAAB5//HHGjRvnLD9t2jSeffZZ53W1aNGCGTNm8Mwzz7gEj3fddZdLvfL06tWLZ599FoDo6Gg2btzI66+/7hI8XtzWvffeW+EYAgMD8fT0RK/XExwcDMBPP/3E5s2bOXnyJJ6engDMnTuXFStW8O9//9sZfBYWFvLxxx87JxqqWs9ut7No0SLMZjMAo0ePZt26dbz88suV/jx8/vnn2O12Fi5c6HwfmpiYiI+PD0lJSXTp0oXz588zZMgQWrZsCUBsbGyl97a6JHhsRBx1HTy6OdOpKAoanQaV+u/V0M2bNqN1TOsyy3voPLDku3dupN1qx+EJNfmZitls5siRIzgoo12Ho2i2U/33K4qioFarZeZRCCGEEFWWnJxMQUEB1157bbmvd+jQwRk4QlEQZbfbSUlJcQaPbdu2dX6QDRASEsKuXbsAuO666wgPD6dFixYMGjSIQYMGcfPNN2MwGNwaa/v27Z3/Lu43Li7O5bn8/HwuXLiAl5cXO3bsYOPGjS5LLm02G/n5+eTm5jr779KlS5X679GjR6nHFy+3vbitqo7h4jrZ2dn4+/u7PJ+Xl+eyLDc8PNxlhVpV60VERDgDRyj6Xp08eRKo/Odhx44d7N+/36U+QH5+PqmpqVx//fWMHTuWgQMHct111zFgwABGjBhByEWTNjVNgsdGwmF31OlRGY6/+nSHxtM1cCxWUFCAh4cHKpXra4pKKQog86p+fqTD4QC7A2pw6arJ9HfSHP1f69hLstvspa5LgkchhBBCuKN4mWR1eXh4uDxWFAW7vegDf7PZzG+//UZSUhL/+9//mDx5MlOnTmXLli34+PigUqlKvecqK9lMyT6KZ73Keq643+zsbKZNm8Ytt9xSqi1difdWJQPj6rq4raqO4eI6ISEhJCUllXqt5FEeZfVVlXoVfa8q+3nIzs6mc+fOLFmypNRrxYFsYmIijz32GGvXruXzzz/nhRde4Ntvv6V79+4Vtl0dEjw2EsU/aHXF8ddyzapSaVSoNKUDx1VrV1FosaBSqWgW1pRr+l5DSPDfn4goKgWVh8qtZDh2uwN1DQaPZnNx0pzsMoNHh63ssx5l2aoQQgjRuDkcDhw2O3a7A5VKQVGram3PWFRUFHq9nnXr1nH//feXej02NpZFixaRk5PjDFY2btyISqUiJiamyv1oNBoGDBjAgAEDmDJlCj4+Pnz//ffccsstBAYGkpGR4Sx74cIFDh48WO1r69SpEykpKbRq1arabQH88ssvpR5XtiTzUsbQqVMnjh8/jkajISIiotbrlVTZz0OnTp34/PPPadKkCV5eXuW2Ex8fT3x8PM899xw9evTg008/leBRuD8LWNf9qTVq18cqNa2jY2gZ2RKD3sDpM6f5deuvfPLZvxhz12iCg4Jd6roTPDrsdmoyUfDfSXOyCAwMKKO/soNHmXkUQgghGie7zU7euVxyTue4vAdRa9UYA4zofQ1lrqaqDp1Ox6RJk3jmmWfQarX06tWLU6dO8ccff3DfffcxatQopkyZwj333MPUqVM5deoUjz76KKNHj3YuHa3MqlWrOHDgAH369MHX15evv/4au93uDD6vueYaFi1axNChQ/Hx8WHy5MkuS2Av1eTJkxkyZAjNmzfntttuQ6VSsWPHDnbv3s1LL73kdnsbN27klVdeYfjw4Xz77bd88cUXrF69usbHMGDAAHr06MHw4cN55ZVXiI6O5tixY6xevZqbb7653GW2l1qvpKr8PLz66qsMGzaM6dOn07RpUw4dOsTy5ct55plnsFgsfPDBB9x0002EhoaSkpLCvn37GDNmTOU3uBokeGws6jZ2dDt4LLknEKBpWFOahjV1Po5qFUVMdAz/t/j/SPpxA3fedsffdVUKikqpcp+OWpiErShpTlk3X6PRSPAohBBCNEIFWfmcO3SuzPcdtkIbF45dIOt4Fr7hvniay17ueKlefPFFNBoNkydP5tixY4SEhPCPf/wDAIPB4Mws2rVrVwwGA7feeiuvvfZaldv38fFh+fLlTJ06lfz8fKKiovjss89o27YtUJTd8+DBgwwZMgRvb29mzJhRIzOPAwcOZNWqVUyfPp05c+bg4eFB69aty5xRq4qnnnqKrVu3Mm3aNLy8vHjttdecyXpqcgyKovD111/z/PPPM27cOE6dOkVwcDB9+vSpMGC/1HoXq+zn4YcffmDSpEnccsstZGVlERYWxrXXXouXlxd5eXn8+eefLF68mDNnzhASEsIjjzzCgw8+WOX+L4XiqM4hfqLOWPIt5Gfm1Vl/1kIbtsKqLctUFAWtUVulsitWrSBl716enpjgsgfSkmepcjZZlUaFh86j8oJuOHToEEeOHKFnr16lkuZoTZ54mjxdnnvzzTdp3bq1S9YvIYQQQtSN/Px8Dh486HLuXlUUZOVz9uDZKpf3i/Sr8QBSVCwiIoKJEycyceLE+h7KFaWqv1M1Ox8vRCW8zF7Y7LYyN2fXp5JJc6pC9jwKIYQQjYvdZufcoXNu1Tl36FydH5UmREMmwWMjUdsHflaHw1H15DqZ5zPRqDVota4zle5NgNf8vSiZNKcqZNmqEEII0bjknct1e1uOw+4g71xuLY1IiMZH9jyKMrkbq158FmJObg5Gg2ta4xMnT7Bv/35aRLZwCYYdDodbezprI46uKGlOWYG7JMwRQgghGg+Hw0HO6ZxLqptzOgeDv7FBf5B/OUlLS6vvIYgKSPDYSCg1eDRFlfpzM8OY3eJ6FuKKVf9Bo9bQNDQMg8HA6TNn2L7zdzw8NPS/ul+puu7MPLo7tqoqL2lOWfdelq0KIYQQjYfDZncrs3tJtkIbDpsDRSPBoxCybLWRqOl00ZVRVIpbU3x2m91lT0B0q2jy8nLZvG0z36z7H3/uTSYmKoaxd48lIODvmT2H3YHN4t4f89oKpM1mM9lZWaUmQS/OJAsy8yiEEEI0JvZqHnlWU+dt9+vXr1YSwSQlJaEoCpmZmTXethAlycxjY6Hg1nEWNdAdKpWC3VbF4zMcDqwFVjx0Higqha6dutC1U8Vn3BTXcWvWUVFqLXgsmTRHXyLLVMmssMVkz6MQQgjReKiq+d6hrPcCl2L58uV4eFQvY3y/fv3o2LEj8+fPr5ExNUSLFi1i4sSJEgw3QDLz2EgUBU11++1Sadzrz2F3YMmv2pEbDrsDa77V7QxmKo2qFtLlFCkvaY7MPAohhBCNm6JWodaqL6muWqsu873ApfDz88NsNtdIW5erhpaRX7iS4LERUdXQH64q96dRu52dpjgotBZYcdhLZ2F12B3YCm1unevoMiaPS/vDXxVarRZPrZas7L/3PSpqVbkJc2TPoxBCCNE4KIqCMcBYecEyGANqLlnOxctWIyIimDlzJvfeey9ms5nmzZvzwQcflFt/7NixbNiwgQULFhRNLCiKS4KZbdu20aVLFwwGAz179iQlJcWl/n/+8x86deqETqejRYsWTJs2rcL3M0lJSVx11VUYjUZ8fHzo1asXhw4dAmDq1Kl07NiR999/n2bNmmEwGBgxYgTnz5931rfb7UyfPp2mTZvi6elJx44dWbt2rfP1tLQ0FEXh888/p2/fvuh0OpYsWcK4ceM4f/688xqnTp0KwDvvvENUVBQ6nY6goCBuu+22qtx2UYMkeGxEajNwKouigNrN2UcoWo5qs9gozC3EkmfBkmvBkmehMLeQwtxCrIXuLVUtplKrqr3spDLF+x6LlXf9smxVCCGEaFz0vga3t74oKgW9r6GWRlRk3rx5dOnShd9//52HH36Yhx56qFTQV2zBggX06NGD8ePHk5GRQUZGBs2aNXO+/vzzzzNv3jy2bt2KRqPh3nvvdb72448/MmbMGB5//HH27NnD+++/z6JFi3j55ZfL7MtqtTJ8+HD69u3Lzp072bRpEw888IBLIL1//36WLVvGf//7X9auXeu8hpLjnTdvHnPnzmXnzp0MHDiQm266iX379rn09eyzz/L444+TnJxM//79mT9/Pl5eXs5rTEhIYOvWrTz22GNMnz6dlJQU1q5dS58+fS7pnotLJ3seGxF3l5HWBLWHGpvVDpcQ7EHRTKPDnXM4KhlLbTObzRw5cgQHf+37LKdPWbYqhBBCNC4qtQrfcF/OHjxb5Tq+4b61nrTwhhtucAZckyZN4vXXX2f9+vXExMSUKuvt7Y1Wq8VgMBAcHFzq9Zdffpm+ffsCRQHZjTfeSH5+PjqdjmnTpvHss89yzz33ANCiRQtmzJjBM888w5QpU0q1deHCBc6fP8+QIUNo2bIlALGxsS5l8vPz+fjjjwkLCwPgzTff5MYbb2TevHkEBwczd+5cJk2axJ133gnAnDlzWL9+PfPnz+ftt992tjNx4kRuueUWl+tUFMXlGtPT0zEajQwZMgSz2Ux4eDjx8fFVuMOiJsnMYyNSF8HTxRSVguYS9wjUJJVGXSfBc8mkOQBqjQSPQgghxOXC06zDL9Kv0hlIRaXgF+mHp1lXYbma0L59+7/7/StgOnnyZLXbCgkJAXC2tWPHDqZPn47JZHJ+Fc9g5ubmlmrLz8+PsWPHMnDgQIYOHcqCBQvIyMhwKdO8eXNn4AjQo0cP7HY7KSkpXLhwgWPHjtGrVy+XOr169SI5OdnluS5dKk6yCHDdddcRHh5OixYtGD16NEuWLClz3KJ2SfDYiCgqpdbOOKyIykNd50eFlKQoChrPuglgL06ao/Io+7plz6MQQgjROHmadTSJDcIr1KtUEh21Vo1XqBdNYoPqJHAESmVfVRTlko8GKdlW8fLS4rays7OZNm0a27dvd37t2rWLffv2odOVfa2JiYls2rSJnj178vnnnxMdHc0vv/xySWOriNFY+X5Us9nMb7/9xmeffUZISAiTJ0+mQ4cOkpG1jknw2IgoilI/s4+A2lNTY5vF3etcQVOHfZdMmqPSlB80y55HIYQQovFSqVUYA0wExjQhqE0wga3/+m9ME4wBpnr90LwyWq32kt6DdOrUiZSUFFq1alXqq6KjSOLj43nuuef4+eefadeuHZ9++qnztfT0dI4dO+Z8/Msvv6BSqYiJicHLy4vQ0FA2btzo0t7GjRtp06bNJV2jRqNhwIABvPLKK+zcuZO0tDS+//77qt4CUQNkz2Mjo9GqsebXfQpjlUpBo9Ngybde8v7HS6HR1s1y1ZKKk+ZUlNJblq0KIYQQjZ+iKCgaBVUjmk+JiIjg119/JS0tDZPJhJ+fX5XqTZ48mSFDhtC8eXNuu+02VCoVO3bsYPfu3bz00kulyh88eJAPPviAm266idDQUFJSUti3bx9jxoxxltHpdNxzzz3MnTuXCxcu8NhjjzFixAjnXsWnn36aKVOm0LJlSzp27EhiYiLbt29nyZIllV5jdnY269ato0OHDhgMBr7//nsOHDhAnz598PX15euvv8Zut5e5N1TUHgkeGxm1p6YoDWodBnDFVGoVHjoN1vxLy5bqFqVor2V9zLSazGaOHjmCuoKlsrJsVQghhBD1ISEhgXvuuYc2bdqQl5fHwYMHq1Rv4MCBrFq1iunTpzNnzhw8PDxo3bo1999/f5nlDQYDf/75J4sXL+bMmTOEhITwyCOP8OCDDzrLtGrViltuuYUbbriBs2fPMmTIEN555x3n64899hjnz5/nqaee4uTJk7Rp04aVK1cSFRVV4Vh79uzJP/7xD+644w7OnDnDlClTGDBgAMuXL2fq1Knk5+cTFRXFZ599Rtu2bat0/aJmKI5ajwJETcs9l4utoP4CF/tfZzk6LnE9fmUUVdFS1fpaMnLmzFl++30b/W+8Bl9f3zLLbNq0ifXr1/PPf/6zjkcnhBBCiPz8fA4ePEhkZGS5+/VE7Zo6dSorVqxg+/bt9T0UUQOq+jvVeObohZOHzqPyQrVIpVLwMHig1v41C1qTbXuo8dBr63Wvgdls4syFs6UyipUkex6FEEIIIcSVRoLHRkij07h9yG1NUyjaj+ih90ClUVc7iFSpVXjoPfDw1NR0POo2racn+ZZ8lw3gFyve8ygT90IIIYQQ4kohwWMjpCgKHnptfQ8D+GsWUqdB+9dMpDtBbXH2WA+DtigIbSCZzTRaNQFNAsqceXQ4HOTb8shXcinQ5HIi9yhnC09xwZJJni1XgkkhhBBCXBGmTp0qS1avQJIwp5HyMHhQmFtYL4lzyqL8leAGrRqHw4HD5sBudxQFU8VDVIomKBWVCpVKAZVCPU8ylslDryUwOIAN29fxe+YmzllOk209T5b1PFnWC1jtFk4VnGRf6B4+P/oBavXfv0YaRYNRY8as8cak8cZPG0CgNoRAzxB0an09XpUQQgghhBDVI8FjI6VSq9B4aurl2I7K/J32uvGwOCxkWTI5Zz/NnpPb2G/aS7L3TtYdVZW5aVhRiq7ObnegLpGU1eqwct5yjvOWc6XqeHv4EqJrTpg+nGb6Fpg0XrV2PUIIIYQQQtS0xvT+XlxEa2wYS1cbK7vDztnCU+zN3s32zF/Yn5PMrsKtnLWcxmQyApCVlVVmXdVfy3PtbmScPW85x59ZO1h3ciWL0xfw1bHF7LnwG4X2gupfjBBCCCFEHejXrx8TJ04s9/WpU6fSsWPHavcTERHB/Pnzq92OqFkSPDZiag81Gk+ZPHZXvj2fw3kH2XH+V/bnJJNpOYsDBxalgPOqswBotZ54arVkZ5cdPBbPPDocl3ZcicPh4GjeIb4/tYrEQ6+z4fTXnCk8eWkXJIQQQgjRQCQkJLBu3br6HoaLtLQ0FEWpsT2a//d//0fnzp0xGo2Eh4fz+uuv10i7jYFEHo2cp1mHtTCnwex9bMiyrRfIyD9MpuUMZd2t4+rDoPz9itlsrmDm8e9lq9VlsRey6/xWdp3fSpg+nE4+vWiub4lS32lnhRBCCHHFKCwsRKut/qo2k8mEyWSqgRHVvareg++//54XX3yR9u3bs27dOh588EE6depE375962CU9UtmHhs5lUYly1crkWPL5s+sHezJ2s65cgLHLFUm2aoLLs+ZTMXBY+kaxctWL3XmsTxH8w7x34xPWXb0Qw7nHqjRtoUQQghRv9auXUvv3r3x8fHB39+fIUOGkJqa6ny9eIZs6dKl9OzZE51OR7t27diwYYOzTFJSEoqisHr1atq3b49Op6N79+7s3r3bWebMmTOMHDmSsLAwDAYDcXFxfPbZZy5j6devHxMmTGDixIkEBAQwcOBAAHbv3s3gwYMxmUwEBQUxevRoTp8+XeVrvHjZ6tixYxk+fDhz584lJCQEf39/HnnkESyWv/N2nDx5kqFDh6LX64mMjGTJkiUubZY1c5iZmYmiKCQlJQFw7tw5Ro0aRWBgIHq9nqioKBITEwGIjIwEID4+HkVR6Nevn8vYXn75ZUJDQ4mJiWH69Om0a9eu1HV17NiRF198EYAlS5YwfPhwWrRowf3334+XlxeHDx+u8j1qzCR4vAxojdqisxaFC6vDSlruPvZc+I0L1vPllrNhLZp1vIjZbMZitZKfX3pP4t8Jc2o2eCx2quA4/8n4F2tP/Jts64XKKwghhBCiwcvJyeHJJ59k69atrFu3DpVKxc0331zq/cTTTz/NU089xe+//06PHj0YOnQoZ86cKVVm3rx5bNmyhcDAQIYOHeoMyPLz8+ncuTOrV69m9+7dPPDAA4wePZrNmze7tLF48WK0Wi0bN27kvffeIzMzk2uuuYb4+Hi2bt3K2rVrOXHiBCNGjKjWda9fv57U1FTWr1/P4sWLWbRoEYsWLXK+PnbsWA4fPsz69ev597//zTvvvMPJk+5t53nxxRfZs2cPa9asITk5mXfffZeAgAAA53V/9913ZGRksHz5cme9devWkZKSwrfffsuqVau49957SU5OZsuWLc4yv//+Ozt37mTcuHGl+p06dSoGg4HBgwe7Nd7GSpatXgYURUHnrSP3bK4sX/1LpuUMabn7KLQXVlr2hPooVqV01lqzuWjJRVZWVqmMq38nzKnd+70/ew+H8w7Q2/96Wps6yFJWIYQQohG79dZbXR5/9NFHBAYGsmfPHpfZrgkTJjjLvvvuu6xdu5b/+7//45lnnnGWmTJlCtdddx1QFAQ2bdqUr776ihEjRhAWFkZCQoKz7KOPPso333zDsmXLuOqqq5zPR0VF8corrzgfv/TSS8THxzNz5kyXMTZr1oy9e/cSHR19Sdft6+vLW2+9hVqtpnXr1tx4442sW7eO8ePHs3fvXtasWcPmzZvp2rUrULSnMDY21q0+0tPTiY+Pp0uXLkBRwp1igYGBAPj7+xMcHOxSz2g0snDhQpflqgMHDiQxMdE5nsTERPr27UuLFi1c6k6fPp3333+fdevW4e/v79Z4GyuZebxMqD3UaA2yfNXqsHIgJ4W92X9UKXDMVl3gvOpMma9VlDSneM9jTS9bLUuBLZ91J1ey+sRSmYUUQgghGrF9+/YxcuRIWrRogZeXlzPASU9PdynXo0cP5781Gg1dunQhOTm53DJ+fn7ExMQ4y9hsNmbMmEFcXBx+fn6YTCa++eabUv107tzZ5fGOHTtYv369c9+iyWSidevWAC7La93Vtm1b1CXONgsJCXHOLCYnJ6PRaFzG0rp1a3x8fNzq46GHHmLp0qV07NiRZ555hp9//rlK9eLi4krtcxw/fjyfffYZ+fn5FBYW8umnn3Lvvfe6lDlx4gRTp05l8eLFtG3b1q2xNmYy83gZ0Zq02CxWbIW2+h5KvcixZbM/+w8Kqnj0hVUp5Jg6DSqYzCsvaU7Jcx7rSlrOPj7P/4CBQbfSVB9ZZ/0KIYQQomYMHTqU8PBwPvzwQ0JDQ7Hb7bRr147Cwso/8HbHq6++yoIFC5g/fz5xcXEYjUYmTpxYqh+j0ejyODs7m6FDhzJnzpxSbYaEhFzyeDw8PFweK4ri1tafvz+0//t9V8k9kwCDBw/m0KFDfP3113z77bdce+21PPLII8ydO7fCti++B1D0ffL09OSrr75Cq9VisVi47bbbXMocP34ch8NBTExMla/jciAzj5eRouWrehT1lfdtPVN4kuQL26scODqwc1h9AJtirbBceUlzLuWcx5qQZ8tlZcYStmducvkDKoQQQoiG7cyZM6SkpPDCCy9w7bXXEhsby7lz58os+8svvzj/bbVa2bZtW6llnCXLnDt3jr179zrLbNy4kWHDhnH33XfToUMHWrRowd69eysdY6dOnfjjjz+IiIigVatWLl9lBVk1oXXr1s5rLJaSkkJmZqbzcfGy04yMDOdzZR27ERgYyD333MO//vUv5s+fzwcffADgnFm02ao2waLRaLjnnntITEwkMTGRO++8E71e71ImOjqaLVu2EBoaWqU2Lxcy83iZUalV6L115J7LuyL2PzpwcDj3AMcLjrpV77j6MPmq3ErLlUyaU3LfY10uW72Y3WHnpzPfcqLgGNcE3oSHyqPySkIIIYSoV76+vvj7+/PBBx8QEhJCeno6zz77bJll3377baKiooiNjeX111/n3LlzpZZNTp8+HX9/f4KCgnj++ecJCAhg+PDhQNFexn//+9/8/PPP+Pr68tprr3HixAnatGlT4RgfeeQRPvzwQ0aOHMkzzzyDn58f+/fvZ+nSpSxcuNBl6WlNiYmJYdCgQTz44IO8++67aDQaJk6c6BKs6fV6unfvzuzZs4mMjOTkyZO88MILLu1MnjyZzp0707ZtWwoKCli1apUzmG7SpAl6vZ61a9fStGlTdDod3t7eFY7r/vvvdwnGL7Zr1y7GjBnDunXrCAsLq+5taDSuvCmqK4Baq0Fn9qzvYdQ6u8NOanay24HjOdUpMtVl73O8WMmkOSXVx7LVi+3L/oP/Hl9CYRVnW4UQQghRf1QqFUuXLmXbtm20a9eOJ554gldffbXMsrNnz2b27Nl06NCBn376iZUrVzozh5Ys8/jjj9O5c2eOHz/Of//7X+cM2wsvvECnTp0YOHAg/fr1Izg42BlYViQ0NJSNGzdis9m4/vrriYuLY+LEifj4+Dg/OK8NiYmJhIaG0rdvX2655RYeeOABmjRp4lLmo48+wmq10rlzZyZOnMhLL73k8rpWq+W5556jffv29OnTB7VazdKlS4GimcQ33niD999/n9DQUIYNG1bpmKKioujZsyetW7emW7dupV7Pzc0lJSWl1PLZy53ikLVvl62C7AIKsy/PwMLmsLM/5w/OW8pe7lGeC6qzHK1kn+PFNm36meDgYCIjS2bYcpC0YQMx0THV2gNQEwI9g7kp5G70akO9jkMIIYSoK/n5+Rw8eJDIyMhSGdEbs7S0NCIjI/n9999dzkosKSkpif79+3Pu3Dm3k8qIqnM4HERFRfHwww/z5JNP1vdwal1Vf6dk5vEypjVq0RovvxlIOw5Sc/a4HThmq85zVH3IrcARwFRm0hwFRVHqZdnqxU4VHOe/GTIDKYQQQghRE06dOsVbb73F8ePHyzzb8Uomex4vY4qioDVpQeGymYF0AAdz/iTTctateudVZzmmPgSK+xPtZpOZo8eO/tX735GnSqWq12WrJZ0syGD18aUMDR6FRiW/1kIIIYQQl6pJkyYEBATwwQcf4OvrW9/DaVDkXeZlTlEUPE2eKCqFgqyCRp9E51jeIc4UnnKrzjnVSY6rj7g941jMbDZjsVhKJ81pIDOPxY7mHWLD6a+5JnAoinKJFyuEEEKIehMREVFpNvV+/fpJxvVaJve3fLJs9QqhNWjR++pRanGzc23LtJzhaP6hKpd3YOeY+hDHNZceOEIFSXNUqjo/qqMyyVnb+SNrW+UFhRBCCCGEcFPjjSSE2zRaDQZ/A2qPmk+zXNvybXmk5vxZ5fIWpZA0zV7OVzGrakW0Wk88tVqys12Dx4a0bLWkH09/Q0b+4foehhBCCCGEuMxI8HiFUalV6P0MeBi09T2UKrP/lVnV5qjawa7ZqvMc1PxZpXMcq+rvpDkO8vPzyM3NwVJYSE5ONufPn+fChfMU7YmsfzaHjbUn/k2+La++hyKEEEIIIS4jsufxCqQoCjovHRqdhoIL+ditDWvp5cWO5h8i11Z5IGjDygn1Ec6rzlZrmWpZipPmpKens2vXLqw2G6dOnUKr1bJv3z50Oh09e/bE29unZju+RDnWLH48s5brmtxc30MRQgghhBCXCZl5vIJptBoMfsaiWcgGmmAl25rF8SoswcxSZXLAI5nz6poPHKFo36PFYkGvN2C327FZrWi1Wjy1WgotFgxGI2azV813XA0pWbs44MZSXyGEEEIIISoiweMVTlEVzUIa/AyotQ1rItrusHMwN6XCxaCFSgGHNfs5ojmAVbHU2ljMZjNQFGNHtmiBxWJBo9GAoqBSqWjZogWqBpiMKOn017J8VQghhBBC1IiG925X1Au1hxq9rx69b8MJIk8UHCWvnOWqBUoeRzVppGr2kK26UOtjKU6ak5WVRYvIFpjNZgoKCsjNzcXPz4+QkJBaH8OlyLVm8+u5pPoehhBCCCHqWVpaGoqisH379voeSp1YtGgRPj4+9T2My44Ej8JJURQ0nhoMfgYM/kY89B71tpy1wF5QxrEcDrJUmaRr9nNAk8wF1VlQ6i5JTXHSHIPBQMtWrbBYLNisVlq2aIFa3TAC7rLsvrCVUwUZ9T0MIYQQQgBHjx7l7rvvxt/fH71eT1xcHFu3bq1y/WeffZbWrVu7PPfnn3+iKApjx451eX7RokV4enqSl1dzq5D69evHxIkTa6y9mhAREcH8+fPrexhXBAkeRZnUHmp03npMgSY8zTpUdXy8R0Z+OnZHUSIfi1LAaXUG+z3+4IjmADmqC7Wyr7EyZpOZrOxswEFEeARGgwGDwUBYWFjdD8YNDodDZh+FEEKIBuDcuXP06tULDw8P1qxZw549e5g3bx6+vr5VbqN///6kpKRw/Phx53Pr16+nWbNmJCUluZRdv3493bt3R6/X19Ql1BqHw4HVaq3vYYhKSPAoKqSoFLRGLUZ/I4YAI1qTZ9Gy1lqckbQ4LByxHOSs6gSHNHvZr/mDU+oMLEphrfVZFcVJc/LzC/D09KRjfDztO3TAw6PhH3uSlrOP0wUn6nsYQgghxBVtzpw5NGvWjMTERK666ioiIyO5/vrradmyZZXb6N27Nx4eHi6BYlJSEo888ghnz54lLS3N5fn+/fu71D9w4AD9+/fHYDDQoUMHNm3a5HztzJkzjBw5krCwMAwGA3FxcXz22WfO18eOHcuGDRtYsGABiqKgKIpLfyV98skndOnSBbPZTHBwMHfddRcnT550GZuiKKxZs4bOnTvj6enJTz/9hN1uZ9asWURGRqLX6+nQoQP//ve/y70f/fr149ChQzzxxBPOMZX0zTffEBsbi8lkYtCgQWRkuK7GWrhwIbGxseh0Olq3bs0777xTbl9CgkfhBrVGjafJE4OfAVMTEwY/A55eOjz0Hqg06ksLKBUFlUaFRueBp9kTva+BvdqdpGr2cEJzlFxVdr3MMpalOGnOuawzZFrOENwqgKBW/pwuPM45y2kK7A07Mc3285sqLySEEEKIWrNy5Uq6dOnC7bffTpMmTYiPj+fDDz90KTN16lQiIiLKbcNoNNK1a1fWr1/vfC4pKYlrr72WXr16OZ8/cOAA6enppYLH559/noSEBLZv3050dDQjR450zvjl5+fTuXNnVq9eze7du3nggQcYPXo0mzdvBmDBggX06NGD8ePHk5GRQUZGBs2aNStznBaLhRkzZrBjxw5WrFhBWlpaqWW1ULQMd/bs2SQnJ9O+fXtmzZrFxx9/zHvvvccff/zBE088wd13382GDRvK7Gf58uU0bdqU6dOnO8dULDc3l7lz5/LJJ5/www8/kJ6eTkJCgvP1JUuWMHnyZF5++WWSk5OZOXMmL774IosXLy73/l/pGu5GLdGgKYqCWqtBXWLSzeFwgMOB3ebAYbNjtzsoSpXqwOEoqoNSNJupUqlQ1AqKyvUTIqvdyh9Zv9X59VTE5rByuvA45y3nsIfncpBkNNkeZZbVKBqMahNmD18CtcF4qBrOrOS+7D/o5X89erWhvocihBBCXJEOHDjAu+++y5NPPsk///lPtmzZwmOPPYZWq+Wee+4BICAgoNKZyP79+/PFF18AsGfPHvLz84mPj6dPnz4kJSUxbtw4kpKS0Ol0dO/e3aVuQkICN954IwDTpk2jbdu27N+/n9atWxMWFuYSXD366KN88803LFu2jKuuugpvb2+0Wi0Gg4Hg4OAKx3jvvfc6/92iRQveeOMNunbtSnZ2NiaTyfna9OnTue666wAoKChg5syZfPfdd/To0cNZ96effuL999+nb9++pfrx8/NDrVY7ZzhLslgsvPfee877OWHCBKZPn+58fcqUKcybN49bbrkFgMjISPbs2cP777/v/H4IVxI8ihpTFBwqqFXAJe6RPJCbTJ4tp2YHdolybdmcKDjKmcKTzv2XJm9jhXWsDivnrZmct2ZyLC8NX20ATTzDMGu862LIFbI5bCRn/U4nn171PRQhhBDiimS32+nSpQszZ84EID4+nt27d/Pee+85g5UJEyYwYcKECtvp168fL7/8MhkZGSQlJdG7d2/UajV9+/blvffeA4pmI3v27Imnp6dL3fbt2zv/XZwt/uTJk7Ru3RqbzcbMmTNZtmwZR48epbCwkIKCAgwG9z943rZtG1OnTmXHjh2cO3cOu73ovVR6ejpt2rRxluvSpYvz3/v37yc3N9cZTBYrLCwkPj7e7TEYDAaXQDwkJMS5dDYnJ4fU1FTuu+8+xo8f7yxjtVrx9q7/920NlQSPokHZdX5LfQ8Bm8PGkbwDnCw4VuEZk5Wx4+BM4SnOFJ7CXxtIuCEKjVL2jGVd2X1hKx29e6BSZMW6EEIIUddCQkJcAieA2NhYvvzyS7fa6dWrF1qtlvXr17N+/XrnjFzXrl05ffo0Bw4cICkpiQcffLBUXQ+Pv9+LFK/+Kg7sXn31VRYsWMD8+fOJi4vDaDQyceJECgvdyzuRk5PDwIEDGThwIEuWLCEwMJD09HQGDhxYqi2j8e8P5rOzswFYvXp1qYSEFwfBVVHyWqHoeh0Oh0tfH374Id26dXMpp1bXbaLIxkSCR9FgnCs8TUb+kXodQ5Y1k4M5KeTb82u03TOFp7hgySTCGIWvR2CNtu2OC5bzHMs/RFN9ZL2NQQghhLhS9erVi5SUFJfn9u7dS3h4uFvt6PV6unXrRlJSEhs2bODpp58GioKl7t2783//938cPny41H7HymzcuJFhw4Zx9913A0VB5d69e10CXq1Wi81mq7CdP//8kzNnzjB79mznnsiqHEfSpk0bPD09SU9PL3OJanmqMqaLBQUFERoayoEDBxg1apRbda9kMv0gGoy92bvrtf8TBUf5M2tHjQeOxSwOC/uy93Ak72CttF9VKVm76rV/IYQQ4kr1xBNP8MsvvzBz5kz279/Pp59+ygcffMAjjzziLPPWW29x7bXXVtpW//79Wbp0Kfn5+XTq1Mn5fN++fXnzzTediXXcERUVxbfffsvPP/9McnIyDz74ICdOuGZrj4iI4NdffyUtLY3Tp087Zy1Lat68OVqtljfffJMDBw6wcuVKZsyYUWn/ZrOZhIQEnnjiCRYvXkxqaiq//fYbb775ZoVJbCIiIvjhhx84evQop0+frvL1Tps2jVmzZvHGG2+wd+9edu3aRWJiIq+99lqV27jSSPAoGox99Rg8ZuSncyh3f7WWqVbVsb/6qi+pOcnYHO59OieEEEKI6uvatStfffUVn332Ge3atWPGjBnMnz/fZebr9OnTpKamVtpW//79ycrKolevXmg0fy8m7Nu3L1lZWc4jPdzxwgsv0KlTJwYOHEi/fv0IDg5m+PDhLmUSEhJQq9W0adPGuRz1YoGBgSxatIgvvviCNm3aMHv2bObOnVulMcyYMYMXX3yRWbNmERsby6BBg1i9ejWRkeWvmpo+fTppaWm0bNmSwMCqr/C6//77WbhwIYmJicTFxdG3b18WLVpUYV9XOsVRvPBXiHqUZT3P4kML6qXvUwXHOJi7r877DdOFE6aPqPN+AW4Lu5dgXdN66VsIIYSorvz8fA4ePEhkZCQ6na6+hyNEo1fV3ymZeRQNwrG8Q/XSb54tp95mAY/lH+KC9Vy99H00P61e+hVCCCGEEI2XBI+iQcjIP1znfTpwcCDnT+x1sli1rP7hYE5KvSwhrY/7LYQQQgghGjcJHkWDcLLgWJ33mZF/mBxbdp33W1KBvYAjeQfqvN+TBRl13qcQQgghhGjcJHgU9c7msHGm8GQd92klI7/0Bu/6cLLgGAX2vDrtM9eaTY41q077FEIIIYQQjZsEj6LenS08VedLN08XHr+kPlUo+GubEGVsS3uvq+jo3R1TSjBHFuVw7Kss1Ir7h8o6qJ+ZwPqY7RVCCCFE45KWloaiKGzfvr3KdcaOHVsqS+vl7M8//6R79+7odDo6duxY38OpVRI8inqXaTlT531eSuBkUptp59WVlsZYfLUB6NR6bDkODv92HKOnCT9tE+K8uuLt4et226cLjmN3lD4nqTadt5yt0/6EEEIIUX2LFi3Cx8enVtouK+hr1qwZGRkZtGvXrlb6vBxMmTIFo9FISkoK69atq+/h1CoJHkW9y7aer9P+sqznybO5t0zUS+NDjLkDOrXe5fk/f0rFJ9gb7yAzAFqVJ1HGdvhpq37GEIDFYSHTUvVDbWtClvVCnfYnhBBCiMZHrVYTHBzscpZkfSgsLKzX/iuSmppK7969CQ8Px9/fv76HU6skeBT1rq6DGHeDVb3aQJSpbaklqWePZnJ8/yli+7RyeV6lqGhhaI1Z4+1WPw39PgghhBCievr168eECROYMGEC3t7eBAQE8OKLL1Ly2PVz584xZswYfH19MRgMDB48mH37is6jTkpKYty4cZw/fx5FUVAUhalTpwJQUFBAQkICYWFhGI1GunXrRlJSkrPd4hnLb775htjYWEwmE4MGDSIjo2jrzNSpU1m8eDH/+c9/nG0nJSWVWrZqs9m47777iIyMRK/XExMTw4IF7p3VfebMGUaOHElYWBgGg4G4uDg+++yzMu/VxIkTCQgIYODAgQDs3r2bwYMHYzKZCAoKYvTo0Zw+/fcH8GvXrqV37974+Pjg7+/PkCFDSE1Ndb5eWFjIhAkTCAkJQafTER4ezqxZs8odq91uZ/r06TRt2hRPT086duzI2rVrna8risK2bduYPn26y/fjciXBo6h3dR3EuJthNVzfCrXi+mmb3W7nj6S9NGsbgleAqVQdlaIi0hCN4kY/uba6TWCTJcGjEEIIUecWL16MRqNh8+bNLFiwgNdee42FCxc6Xx87dixbt25l5cqVbNq0CYfDwQ033IDFYqFnz57Mnz8fLy8vMjIyyMjIICEhAYAJEyawadMmli5dys6dO7n99tsZNGiQM/AEyM3NZe7cuXzyySf88MMPpKenO+snJCQwYsQIZ0CZkZFBz549S43fbrfTtGlTvvjiC/bs2cPkyZP55z//ybJly6p8D/Lz8+ncuTOrV69m9+7dPPDAA4wePZrNmzeXuldarZaNGzfy3nvvkZmZyTXXXEN8fDxbt25l7dq1nDhxghEjRjjr5OTk8OSTT7J161bWrVuHSqXi5ptvxm4v2h70xhtvsHLlSpYtW0ZKSgpLliwhIiKi3LEuWLCAefPmMXfuXHbu3MnAgQO56aabnPc1IyODtm3b8tRTT7l8Py5X9Tv/LASQW8fHZeS6kWXUoDZi1viUej591zHysgq46ubIcuvq1Aa8PfzIrOLewhxbNg4cKG6FnJeuvo8pEUIIIa5EzZo14/XXX0dRFGJiYti1axevv/4648ePZ9++faxcuZKNGzc6A7clS5bQrFkzVqxYwe233463tzeKohAcHOxsMz09ncTERNLT0wkNDQWKgsG1a9eSmJjIzJkzAbBYLLz33nu0bNkSKAo4p0+fDoDJZEKv11NQUODS9sU8PDyYNm2a83FkZCSbNm1i2bJlLkFcRcLCwlyCrEcffZRvvvmGZcuWcdVVVzmfj4qK4pVXXnE+fumll4iPj3deD8BHH31Es2bN2Lt3L9HR0dx6660ufX300UcEBgayZ88e2rVrR3p6OlFRUfTu3RtFUQgPD69wrHPnzmXSpEnceeedAMyZM4f169czf/583n77beeSXpPJVOF9u1zIzKOodxZ73a1hd2An355f5fJeGl8UxTWYK8wrZN8vB2l1VTieBm2l9avK7rBT6MbYqsvqsNRZX0IIIYQo0r17d5f3Fj169GDfvn3YbDaSk5PRaDR069bN+bq/vz8xMTEkJyeX2+auXbuw2WxER0djMpmcXxs2bHBZsmkwGJyBI0BISAgnT7p/XNrbb79N586dCQwMxGQy8cEHH5CeXvUj0Gw2GzNmzCAuLg4/Pz9MJhPffPNNqTY6d+7s8njHjh2sX7/e5Rpbt24N4LzOffv2MXLkSFq0aIGXl5dzVrG47bFjx7J9+3ZiYmJ47LHH+N///lfuOC9cuMCxY8fo1auXy/O9evWq8PtxOZOZR1HvrA5rnfXlbkZTT5Wu1HN7Nx3EQ+dBRIemldbXqjzd6q8uM67a7HV334UQQghRe7Kzs1Gr1Wzbtg212jVHg8n09/YaDw8Pl9cURXHZb1kVS5cuJSEhgXnz5tGjRw/MZjOvvvoqv/76a5XbePXVV1mwYAHz588nLi4Oo9HIxIkTSyXFMRqNLo+zs7MZOnQoc+bMKdVmSEgIAEOHDiU8PJwPP/yQ0NBQ7HY77dq1c7bdqVMnDh48yJo1a/juu+8YMWIEAwYM4N///rdb9+FKJcGjqHcO6i5gcuDeH8iLV5DmnMslffcx2vSJIj+nwPm83WrHYbeTeyEPjVaDVufxV3X3lqC6Pb5qsNfhfRdCCCFEkYuDrF9++YWoqCjUajWxsbFYrVZ+/fVX57LVM2fOkJKSQps2bQDQarXYbK5nVcfHx2Oz2Th58iRXX331JY+trLYvVryk9uGHH3Y+V3J2syo2btzIsGHDuPvuu4GifZR79+51XmN5OnXqxJdffklERESZ2V+L79WHH37ovA8//fRTqXJeXl7ccccd3HHHHdx2220MGjSIs2fP4ufnV6pcaGgoGzdupG/fvi7jL7m89koiy1ZFvbs4GU1tUtz8kbc7XP+A5ucUgAP2bNhHUuIvzq/M4xfIOZdHUuIv7P81zVneRsV/gC+mqsNfybq870IIIYQokp6ezpNPPklKSgqfffYZb775Jo8//jhQtMdv2LBhjB8/np9++okdO3Zw9913ExYWxrBhwwCIiIggOzubdevWcfr0aXJzc4mOjmbUqFGMGTOG5cuXc/DgQTZv3sysWbNYvXp1lccWERHBzp07SUlJ4fTp01gspbe4REVFsXXrVr755hv27t3Liy++yJYtW9y6B1FRUXz77bf8/PPPJCcn8+CDD3LixIlK6z3yyCOcPXuWkSNHsmXLFlJTU/nmm28YN24cNpsNX19f/P39+eCDD9i/fz/ff/89Tz75pEsbr732Gp999hl//vkne/fu5YsvviA4OLjcszOffvpp5syZw+eff05KSgrPPvss27dvd37PrjTy7lHUO43iUXmhGqJWVKgVNTZH1YK6i5PKmP2NdBpS+pDcvZsOYi200qZvFAbvv8+CzLW6l5RGo6q7e1GX910IIYQQRcaMGUNeXh5XXXUVarWaxx9/nAceeMD5emJiIo8//jhDhgyhsLCQPn368PXXXzuXnPbs2ZN//OMf3HHHHZw5c4YpU6YwdepUEhMTeemll3jqqac4evQoAQEBdO/enSFDhlR5bOPHjycpKYkuXbqQnZ3N+vXrS2UiffDBB/n999+54447UBSFkSNH8vDDD7NmzZoq9/PCCy9w4MABBg4ciMFg4IEHHmD48OGcP19xJvjiWcBJkyZx/fXXU1BQQHh4OIMGDUKlUqEoCkuXLuWxxx6jXbt2xMTE8MYbb9CvXz9nG2azmVdeeYV9+/ahVqvp2rUrX3/9NSpV2R/gP/bYY5w/f56nnnqKkydP0qZNG1auXElUVFSVr/dyojjcXegsRA1bcexjjuSl1Vl/yVnbq3xMhQoVHb27VxrU/fLl7xTmWehz999LGOwOOzsu/FrlhEA6lY723t0qL1hDfDz8uLv5hDrrTwghhKgp+fn5HDx4kMjISHS60vkJGqp+/frRsWNH5s+fX99DEcJFVX+nZNmqqHcmjXed9mdUlz6XsTx27BzLr3r2sJJOFWS4lUnWoKn6uGqCuY7vuxBCCCGEaNxk2aqod3UdxBg1ZiiovFyxEwVHMGm88NMGllum+63xLo+zLJkczjvg3rjUZrfKV1ddB+1CCCGEEKJxk+BR1DuzxqtO+/P28EOFqsrZRh1Aak4yDhz4a5tUWv6C5Rz7cv5wO5upr0eAW+WrS2YehRBCiLqVlJRU30MQolpk2aqod3U9A6ZRPCqcRSyLAwepOckcyPmTPFtumWUK7Pmk56aSkr2zygl5inlpfNCpDW7VqS5THQftQgghhBCicZOZR1Hv/LVBdd5nE88QThdWnhL6YqcLT3C68AQGtQm92uCcwSyw5ZFjy7rkUxqbeIZcYs1LF+BZ9/ddCCGEaEgcDgeFOXlY8wvR6LRojXoUxb0zmt0hCXPqX1paGpGRkfz+++907NixvofT6EjwKOqdUWPCpDGTbc2qsz5NGm/MGi+yrBcuqX6uLZtcm3vHcJRHp9Lh6+ZMaHWpFTV+HpUvwRVCCCEuR4W5+aRt2sXepK3knMp0Pm8M9CG6XxciesShNdR8Ftfly5c7j9wQNU9RFL766iuGDx9ebplmzZqRkZFBQEDdbhe6XEjwKBqEQM+QOg0eASIM0fxx4Te39ybWJAWIMEajUHufcpbFTxuIRiW//kIIIa48GX8c4OcPlmMtsJR6LedUJr9/8R27Vm6g5wO3ENK2RY327efnV6PtVVVhYSFarbZe+m5o1Go1wcHB9T2MRkv2PIoGIcgzrM771KuNhOrD67zfkpp4huKl8a3zfuvjfgshhBD1LeOPA/z49jKshaUDx5KshRZ+fHsZGX+4lzm9Mv369WPixInOxxEREbz00kuMGTMGk8lEeHg4K1eu5NSpUwwbNgyTyUT79u3ZunWrs86iRYvw8fFhxYoVREVFodPpGDhwIIcPH3aWmTp1Kh07dmThwoUu5/alp6c72/Xy8mLEiBGcOFG0jWfv3r0oisKff/7pMubXX3+dli1bOh/v3r2bwYMHYzKZCAoKYvTo0Zw+fdrlGh999FEmTpyIr68vQUFBfPjhh+Tk5DBu3DjMZjOtWrVizZo1Lv1Upd3HHnuMZ555Bj8/P4KDg5k6darLvQS4+eabURTF+fhiaWlpKIrC9u3bgaIkRoqisG7dOrp06YLBYKBnz56kpKSU8128sknwKBqE+griQnTNMGnq9oiMYjqVjqb6mv1Es6rC6jloFkIIIepaYW4+P3+wHIfDQaVJChxF+yF//mA5hbn5tTqu119/nV69evH7779z4403Mnr0aMaMGcPdd9/Nb7/9RsuWLRkzZkzRuP+Sm5vLyy+/zMcff8zGjRvJzMzkzjvvdGl3//79fPnllyxfvpzt27djt9sZNmwYZ8+eZcOGDXz77bccOHCAO+64A4Do6Gi6dOnCkiVLXNpZsmQJd911FwCZmZlcc801xMfHs3XrVtauXcuJEycYMWKES53FixcTEBDA5s2befTRR3nooYe4/fbb6dmzJ7/99hvXX389o0ePJjc31+12jUYjv/76K6+88grTp0/n22+/BWDLli0AJCYmkpGR4XxcVc8//zzz5s1j69ataDQa7r33XrfqXykkeBQNQhNtKBpV3e8BUFCIMrVDr9bXab9alZZoc3vUirpO+y0WqpPgUQghxJUlbdOuoqWqVc1u5wBrgYW0X3bV6rhuuOEGHnzwQaKiopg8eTIXLlyga9eu3H777URHRzNp0iSSk5OdM4QAFouFt956ix49etC5c2cWL17Mzz//zObNm51lCgsL+fjjj4mPj6d9+/asW7eOXbt28emnn9K5c2e6devGxx9/zIYNG5yB1qhRo/jss8+cbezdu5dt27YxatQoAN566y3i4+OZOXMmrVu3Jj4+no8++oj169ezd+9eZ70OHTrwwgsvEBUVxXPPPYdOpyMgIIDx48c7r/PMmTPs3LnTrXbbt2/PlClTiIqKYsyYMXTp0oV169YBEBhYlD/Cx8eH4OBg5+Oqevnll+nbty9t2rTh2Wef5eeffyY/v3Y/OGiMJHgUDYJGpaF5Pc3CeShaWps6oK+jozK0Ki0xpvboVHUbsBZr4hmCsZ5mW4UQQoj64HA42Ju0tfKCZdi7fqvLrF9Na9++vfPfQUFFmdDj4uJKPXfy5EnncxqNhq5duzoft27dGh8fH5KTk53PhYeHuwRQycnJNGvWjGbNmjmfa9OmjUu9O++8k7S0NH755RegaNaxU6dOtG7dGoAdO3awfv16TCaT86v4tdTU1DKvSa1W4+/vX+E1XUq7ACEhIS73pTpKth0SEuIyPvE3yZghGoxoUxwHcupnfbmHypNYc0dSs/dw3ppZa/0Y1SZamdriqar5DG5VFWOOq7yQEEIIcRkpzMlzyarqjpxTmRTm5OFpqp0PmUtmXy0+JqSs5+x29xL8GY1Gt8cSHBzMNddcw6effkr37t359NNPeeihh5yvZ2dnM3ToUObMmVOqbnHABZTKKKsoSoXXVJ123b0v5amJe34lkOBRNBgRhmg81ToKbPWzRECjeBBjbs/JggwO5x3A5rDVWNsqFEL14YTomtd5ZlWXcSgqWhnb1Vv/QgghRH2w5hdWu35tBY+Xwmq1snXrVq666ioAUlJSyMzMJDY2ttw6sbGxHD58mMOHDztnH/fs2UNmZiZt2rRxlhs1ahTPPPMMI0eO5MCBAy57KTt16sSXX35JREQEGk3NhRE11a6Hhwc2W829fxOlybJV0WBoVBpamzrU8ygUmniG0s6rC34eAdUO9BTA28OXtl6dCdWF12vgCBBpjMGoMdXrGIQQQoi6ptFV75iK6tavaR4eHjz66KP8+uuvbNu2jbFjx9K9e3dnMFmWAQMGEBcXx6hRo/jtt9/YvHkzY8aMoW/fvnTp0sVZ7pZbbiErK4uHHnqI/v37Exoa6nztkUce4ezZs4wcOZItW7aQmprKN998w7hx46oVtNVUuxEREaxbt47jx49z7ty5Sx6PKJ8Ej6JBaefVub6HAICnSkcrU1s6eF9FqK45Hir3/qehUTQEe4YR59WVGFN79Gr3l47UhjivLpUXEkIIIS4zWqMeY6DPJdU1BvqgNdZPnoLyGAwGJk2axF133UWvXr0wmUx8/vnnFdZRFIX//Oc/+Pr60qdPHwYMGECLFi1K1TObzQwdOpQdO3Y4E+UUCw0NZePGjdhsNq6//nri4uKYOHEiPj4+qFSXHlbUVLvz5s3j22+/pVmzZsTHx1/yeET5FEdt7gAW4hL859gnHM47WN/DcOHATq4th1xrFjm2bHJt2VjtVhzYUVBQKxr0aiNGjRmj2oRBbUalNKzPZvy0AYxs+pBzHb8QQgjRWOXn53Pw4EGXMwwrs3fdFn7/4ju3+4ofMYDoa7pWXrCOLFq0iIkTJ5KZmVnfQxGXkar+TsmeR9HgdPDu1uCCRwUVRrUZo9qMe4mfG4723ldJ4CiEEOKKFdEjjl0rN2AtrOJxHYqCRqshorskmhOiWMOaGhECCDdEEeDZpL6HcVkxaEy0NnWs72EIIYQQ9UZr0NHzgVuKPkit7LNUBRQFej14C1pD/WVIF6KhkeBRNDiKotDZp3d9D+OyEu/dHY1KFhoIIYS4soW0bcHVj4xAo/WosJxG60GfCSMIblM/Z1BXZOzYsbJkVdQbeTcpGqRWxrbs1G0hI/9wfQ+l0fPx8CPOq/zsa0IIIcSVJKRtC4bOmkDaL7vYu36ry/mPxkAfovt3IaJHHFq9zDgKcTEJHkWDpCgKfQNuYNnRD7E75IDW6rg6YJDMOgohhBAlaA06oq/pSlT/LhTm5GHNL0Sj06I16htEfoCLk+JMnTqVFStWsH379nodlxCybFU0WAGeQXT07lHfw2jUYsxxhBta1fcwhBBCiAZJURQ8TQaMAT54mgy1HjiOHTsWRVGYPXu2y/MrVqxw6fuOO+5g7969NdZvUlISERERNdZeQ7do0SJ8fHzqexiXJQkeRYN2lW9f/LQB9T2MRsmgMXG1/6D6HoYQQgghStDpdMyZM6fCQ+z1ej1Nmly+yQMLCwvLfN5isdTxSIS7JHgUDZpGpeHawGENYglJY9M/4EZ06oZ1qLEQQghxpRswYADBwcHMmjWr3DKVzZylpqbSokULJkyYgMPhoKCggISEBMLCwjAajXTr1o2kpKRy6+/YsYP+/ftjNpvx8vKic+fObN26tdzymZmZPPjggwQFBaHT6WjXrh2rVq0CipbUduzY0aX8/PnzXWY6x44dy/Dhw3n55ZcJDQ0lJiaGtLQ0FEXh888/p2/fvuh0OpYsWQLAwoULiY2NRafT0bp1a9555x1nW8X1li9fTv/+/TEYDHTo0IFNmzYBRbOs48aN4/z58yiKgqIoTJ06tdxrE+6RjVCiwQvShdHNtz+/nP2+vofSaLTz6kykMaa+hyGEEEKIi6jVambOnMldd93FY489RtOmTd2qv3PnTgYOHMh9993HSy+9BMCECRPYs2cPS5cuJTQ0lK+++opBgwaxa9cuoqKiSrUxatQo4uPjeffdd1Gr1Wzfvh0Pj7Iz0NrtdgYPHkxWVhb/+te/aNmyJXv27EGtVrs17nXr1uHl5cW3337r8vyzzz7LvHnziI+PdwaQkydP5q233iI+Pp7ff/+d8ePHYzQaueeee5z1nn/+eebOnUtUVBTPP/88I0eOZP/+/fTs2ZP58+czefJkUlJSADCZTG6NVZRPgkfRKHT26cXJgqMcyEmp76E0eCG6plwdIMtVhRBCiIbq5ptvpmPHjkyZMoX/+7//q3K9n3/+mSFDhvD888/z1FNPAZCenk5iYiLp6emEhoYCkJCQwNq1a0lMTGTmzJn069ePtLQ0Zzvp6ek8/fTTtG7dGqDMALPYd999x+bNm0lOTiY6OhqAFi3cP8LEaDSycOFCtFotgHM8EydO5JZbbnGWmzJlCvPmzXM+FxkZyZ49e3j//fddgseEhARuvPFGAKZNm0bbtm3Zv38/rVu3xtvbG0VRCA4OdnucomKybFU0CoqiMKDJcHy1/vU9lAbNoDExKOh21Ip7nwYKIYQQom7NmTOHxYsXk5ycXKXy6enpXHfddUyePNkZOALs2rULm81GdHQ0JpPJ+bVhwwZSU1PLbOvJJ5/k/vvvZ8CAAcyePbvccgDbt2+nadOmzsDxUsXFxTkDx5K6dOni/HdOTg6pqancd999Ltfy0ksvlRpj+/btnf8OCQkB4OTJk9Uao6iczDyKRkOr8uSGoDv497GPKLDl1/dwGhy1omZQ0G0YNeb6HooQQgghKtGnTx8GDhzIc889x9ixYystHxgYSGhoKJ999hn33nsvXl5eAGRnZ6NWq9m2bVuppaTlLdecOnUqd911F6tXr2bNmjVMmTKFpUuXcvPNN5cqq9dXnD9BpVLhcDhcnisr8Y3RaCyzfsnns7OzAfjwww/p1q2bS7mLr63kMtvi3Bh2uxzvVttk5lE0Kr7aAG4MvhONqux1+VcqRVG4PugWQnXN63soQgghhKii2bNn89///teZ7KUier2eVatWodPpGDhwIFlZWQDEx8djs9k4efIkrVq1cvmqaNlmdHQ0TzzxBP/73/+45ZZbSExMLLNc+/btOXLkSLlHhwQGBnL8+HGXAPJSz6MMCgoiNDSUAwcOlLqWyMjIKrej1Wqx2WyXNAZRMQkeRaMTqmvOYFma6eKawKG0NMbW9zCEEEII4Ya4uDhGjRrFG2+8UaXyRqOR1atXo9FoGDx4MNnZ2URHRzNq1CjGjBnD8uXLOXjwIJs3b2bWrFmsXr26VBt5eXlMmDCBpKQkDh06xMaNG9myZQuxsWW/j+jbty99+vTh1ltv5dtvv+XgwYOsWbOGtWvXAtCvXz9OnTrFK6+8QmpqKm+//TZr1qy55Hsybdo0Zs2axRtvvMHevXvZtWsXiYmJvPbaa1VuIyIiguzsbNatW8fp06fJzc295PEIVxI8ikYp3NBKZiABlaLiuibDiTV3rO+hCCGEEOISTJ8+3a3lliaTiTVr1uBwOLjxxhvJyckhMTGRMWPG8NRTTxETE8Pw4cPZsmULzZuXXpGkVqs5c+YMY8aMITo6mhEjRjB48GCmTZtWbp9ffvklXbt2ZeTIkbRp04ZnnnnGObMXGxvLO++8w9tvv02HDh3YvHkzCQkJ7t+Iv9x///0sXLiQxMRE4uLi6Nu3L4sWLXJr5rFnz5784x//4I477iAwMJBXXnnlkscjXCmOixcpC9GIZOQfZvXxpeTb8up7KHVOo/JgYJNb5EgOIYQQV5z8/HwOHjxIZGQkOp2uvocjRKNX1d8pmXkUjVqIrhm3h91PgGdQfQ+lTnl5eHNr6DgJHIUQQgghRJ2R4FE0et4evtwaei/Rpnb1PZQ60dzQgtvDxhPoKWcXCSGEEEKIuiNHdYjLgofKg+ua3EyQLoxNZ9ZhdVjre0g1TlEUOvv05irfvqgU+dxHCCGEEELULQkexWVDURQ6eHejmb4F6079hxP5x+p7SDXGV+vPtYHDCNY1re+hCCGEEEKIK5RMX4jLjp82kFtD76Wn/7WNPhurSlHRyacnd4Q9KIGjEEII0cj169ePiRMn1mofiqKwYsWKWu2jMUhKSkJRFDIzM6tcJyIigvnz59famC4HEjyKy1JR0NWLu5o+RItGmlQmTB/OHU0foKf/ADQqWSQghBBCiMplZGQwePDg+h5Go3fmzBkeeeQRwsPDMRqN9OzZk99++62+h1Xv5B2puKx5efhwQ/Ad9T0MIYQQQog6ERwsCfVqwt69e1GpVCxbtgwfHx8mTZrErbfeysGDB+t7aPVKZh6FEEIIIYSoI1arlQkTJuDt7U1AQAAvvvgiJY9dL2vZqY+PD4sWLQKgsLCQCRMmEBISgk6nIzw8nFmzZpVZPy0tDUVRWL58Of3798dgMNChQwc2bdrk0v5PP/3E1VdfjV6vp1mzZjz22GPk5OQ4X3/nnXeIiopCp9MRFBTEbbfd5nzt3//+N3Fxcej1evz9/RkwYIBL3ZKKl5J+8803xMfHo9frueaaazh58iRr1qwhNjYWLy8v7rrrLnJzc531CgoKeOyxx2jSpAk6nY7evXuzZcsWl7a//vproqOj0ev19O/fn7S0tFL9V3adJfXo0YM333yTbt26ERMTw5gxY8jIyMBqvfySMrpDgkchhBBCCCHqyOLFi9FoNGzevJkFCxbw2muvsXDhwirXf+ONN1i5ciXLli0jJSWFJUuWEBERUWGd559/noSEBLZv3050dDQjR450BkGpqakMGjSIW2+9lZ07d/L555/z008/8f/s3XlcTnn/P/DXab/aSVSUkkooIkuFsoxiuC1jnUyyGxpCBkMJQwZZxjo3M2UMY+xjZKcaspWZQiVJTXObTPaR9qvz+8PX+bm0XZEavJ6PR4/pfNb353Rfj4f3/flc5/j5+QEA4uLiMGXKFCxcuBApKSk4evQounTpAuDZEdnhw4dj9OjRSE5ORlRUFAYOHKiQDJclODgY69atw7lz5/Dnn39iyJAhWL16NXbs2IGIiAgcP34ca9euldp//vnn2Lt3L7Zu3YrffvsNTZs2haenJx48eAAA+PPPPzFw4ED07dsX8fHxGDt2LGbPnq0wZ2XrrMijR4+wcOFC+Pj4QE3tPT+4KRIRERERvUXy8vLEpKQkMS8vr7ZDqRJ3d3fR3t5eLCkpkcpmzZol2tvbS9cAxP379yv0MzAwEMPCwkRRFMXPPvtM7Natm8IYL3qxf3p6ughA3LJli1SfmJgoAhCTk5NFURTFMWPGiOPHj1cY48yZM6KKioqYl5cn7t27V9TX1xf/+eefUnNdvnxZBCBmZGQotf7IyEgRgHjy5EmpLCQkRAQgpqWlSWUTJkwQPT09RVEUxZycHFFdXV3cvn27VF9YWCiamZmJy5YtE0VRFOfMmSM2b95cYa5Zs2aJAMSHDx8qtU5RFMXGjRuLq1atUmjz+PFjsXXr1uKAAQPEwsJCpdb5NlL2M8WdRyIiIiKiGtKxY0cIgiBdu7i4IDU1FXK5XKn+vr6+iI+Ph52dHaZMmYLjx49X2sfR0VH63dTUFACQnZ0NAEhISEB4eDh0dXWlH09PT5SUlCA9PR0ffPABGjdujCZNmuCTTz7B9u3bpSOlrVq1Qvfu3eHg4IDBgwdj8+bNePjwYZXiadCgAbS1tdGkSROFsufxpaWloaioCG5ublK9uro62rdvj+TkZABAcnIyOnTooDCHi4uLwnVl6yzPN998gwcPHmDnzp1QV3+7n+JfHZg8EhERERH9SwiCUOrYZ1FRkfR7mzZtkJ6ejkWLFiEvLw9DhgxR+A5iWV5Mep4nriUlJQCAnJwcTJgwAfHx8dJPQkICUlNTYW1tDT09Pfz222/48ccfYWpqiqCgILRq1QqPHj2CqqoqTpw4gSNHjqB58+ZYu3Yt7OzsKn2ozMvxvJyUCYIgxVddKltnef766y9YWVlBQ0OjWuN5W73nh3aJiIiIiGrOxYsXFa4vXLgAGxsbqKqqAgCMjY2RlZUl1aempio8PAYA9PX1MXToUAwdOhSDBg2Cl5cXHjx4gLp161Y5njZt2iApKQlNmzYtt42amhp69OiBHj16YP78+TA0NMTp06cxcOBACIIANzc3uLm5ISgoCI0bN8b+/fsxffr0KsdSFmtra2hoaCAmJgaNGzcG8CyZjo2Nld6ZaW9vj4MHDyr0u3DhQpXXWZbp06eXuv/vMyaPREREREQ1JDMzE9OnT8eECRPw22+/Ye3atQgNDZXqu3XrhnXr1sHFxQVyuRyzZs1S2JlbuXIlTE1N4eTkBBUVFezevRsmJiYwNDR8pXhmzZqFjh07ws/PD2PHjoWOjg6SkpJw4sQJrFu3DocOHcKtW7fQpUsX1KlTB4cPH0ZJSQns7Oxw8eJFnDp1Cj179kT9+vVx8eJF3L17F/b29q97myQ6Ojr49NNPMXPmTNStWxcWFhZYtmwZcnNzMWbMGADAxIkTERoaipkzZ2Ls2LG4fPmy9HRaZddZng0bNuD27dv4/vvvq21NbzMmj0RERERENcTHxwd5eXlo3749VFVVMXXqVIwfP16qDw0NxahRo9C5c2eYmZlhzZo1uHz5slSvp6eHZcuWITU1FaqqqmjXrh0OHz4MFZVX+zaao6MjoqOjMXfuXHTu3BmiKMLa2hpDhz57T7ahoSH27duH4OBg5Ofnw8bGBj/++CNatGiB5ORk/Prrr1i9ejX++ecfNG7cGKGhoejVq9fr3aSXLF26FCUlJfjkk0/w5MkTODs749ixY6hTpw4AwMLCAnv37sW0adOwdu1atG/fHkuWLMHo0aOVXmd5srKykJmZWa3reZsJ4suHqomIiIiI/sXy8/ORnp4OKysraGlp1XY4RG89ZT9TfGAOERERERERVYrJIxEREREREVWKySMRERERERFViskjERERERERVYrJIxEREREREVWKySMRERERERFViskjERERERERVYrJIxEREREREVWKySMRERERERFViskjEREREdE7LioqCoIg4NGjR7UdSo0IDg5G69atazuMdw6TRyIiIiKiGmBpaQlBEEr9TJ48Wekxhg0bBi8vL4Wyo0ePQhAEBAcHK5QHBwfDwsKiOkKXWFpaYvXq1dU65usSBAEHDhyo7TDeC0weiYiIiIhqQGxsLLKysqSfEydOAAAGDx6s9Bhdu3ZFTEwMiouLpbLIyEiYm5sjKipKoW1kZCS6du1aLbG/aXK5HCUlJbUdBlWCySMRERERUQ0wNjaGiYmJ9HPo0CFYW1vD3d1d6TG6du2KnJwcxMXFSWVRUVGYPXs2Ll68iPz8fABAfn4+Ll68WCp5vHz5MpydnaGtrQ1XV1ekpKRIdWlpaejXrx8aNGgAXV1dtGvXDidPnpTqPTw88Mcff2DatGnSrml5Vq5cCQcHB+jo6MDc3ByTJk1CTk6OVB8eHg5DQ0McPHgQzZs3h6amJjIzM1FQUICAgAA0bNgQOjo66NChQ6mk+EWWlpYAgAEDBkAQBOn6uW3btsHS0hIGBgYYNmwYnjx5ItWVlJQgJCQEVlZWkMlkaNWqFfbs2VPuXMTkkYiIiIioxhUWFuKHH37A6NGjFZIwX19feHh4lNvP1tYWZmZmiIyMBAA8efIEv/32GwYPHgxLS0ucP38eAHDu3DkUFBSUSh7nzp2L0NBQxMXFQU1NDaNHj5bqcnJy0Lt3b5w6dQq///47vLy80LdvX2RmZgIA9u3bh0aNGmHhwoXS7ml5VFRU8PXXXyMxMRFbt27F6dOn8fnnnyu0yc3NxVdffYUtW7YgMTER9evXh5+fH86fP4+dO3fiypUrGDx4MLy8vJCamlrmPLGxsQCAsLAwZGVlSdfAs2T4wIEDOHToEA4dOoTo6GgsXbpUqg8JCcH333+PTZs2ITExEdOmTcOIESMQHR1d7rreeyIRERER0VskLy9PTEpKEvPy8mo7lFf2008/iaqqquLt27cVymfPni1+8sknFfb19vYWe/bsKYqiKEZERIjNmzcXRVEUx48fLwYFBYmiKIqBgYGilZWV1CcyMlIEIJ48eVIqi4iIEAFUeB9btGghrl27Vrpu3LixuGrVKuUW+YLdu3eLRkZG0nVYWJgIQIyPj5fK/vjjjzLvSffu3cU5c+aUOzYAcf/+/Qpl8+fPF7W1tcV//vlHKps5c6bYoUMHURRFMT8/X9TW1hbPnTun0G/MmDHi8OHDq7y+t52ynym12k1diYiIiIjeP99++y169eoFMzMzhfKQkJBK+3p4eMDf3x9FRUWIioqSdird3d3xzTffAHh2lLWs7zs6OjpKv5uamgIAsrOzYWFhgZycHAQHByMiIgJZWVkoLi5GXl6etPNYFSdPnkRISAiuX7+Of/75B8XFxcjPz0dubi60tbUBABoaGgrxXL16FXK5HLa2tgpjFRQUwMjIqMoxWFpaQk9PT2G92dnZAICbN28iNzcXH3zwgUKfwsJCODk5VXmu9wWTRyIiIiKiGvTHH3/g5MmT2Ldv3yv179q1K54+fYrY2FhERkZi5syZAJ4lj6NHj8aDBw9w8eJFTJgwoVRfdXV16ffnx2WfP6gmICAAJ06cwIoVK9C0aVPIZDIMGjQIhYWFVYovIyMDffr0waefforFixejbt26OHv2LMaMGYPCwkIpeZTJZApHdnNycqCqqorLly9DVVVVYUxdXd0qxfDyWp+v9/lan3//MiIiAg0bNlRop6mpWeW53hdMHomIiIiIalBYWBjq16+PDz/88JX6W1tbw9zcHAcPHkR8fLz0wJ2GDRuiYcOGCA0NRWFhYZWftBoTEwNfX18MGDAAwLMEKyMjQ6GNhoYG5HJ5heNcvnwZJSUlCA0NhYrKs0es7Nq1q9L5nZycIJfLkZ2djc6dOysdt7q6eqUxvezFh/RU5YFF7zs+MIeIiIiIqIaUlJQgLCwMI0eOhJpa6X2cOXPmwMfHp9Jxunbtig0bNqBp06Zo0KCBVO7u7o61a9dKD9apChsbG+zbtw/x8fFISEjAxx9/XOr1GZaWlvj1119x+/Zt3Lt3r8xxmjZtiqKiIqxduxa3bt3Ctm3bsGnTpkrnt7W1hbe3N3x8fLBv3z6kp6fj0qVLCAkJQURERLn9LC0tcerUKdy5cwcPHz5Uaq16enoICAjAtGnTsHXrVqSlpeG3337D2rVrsXXrVqXGeB8xeSQiIiIiqiEnT55EZmamwlNOX5SVlaXUdwy7du2KJ0+elHoyq7u7O548efJK73dcuXIl6tSpA1dXV/Tt2xeenp5o06aNQpuFCxciIyMD1tbWMDY2LnOcVq1aYeXKlfjqq6/QsmVLbN++XanvcgLPdmV9fHwwY8YM2NnZoX///oiNjYWFhUW5fUJDQ3HixAmYm5tX6fuKixYtQmBgIEJCQmBvbw8vLy9ERETAyspK6THeN4IoimJtB0FEREREpKz8/Hykp6fDysoKWlpatR0O0VtP2c8Udx6JiIiIiIioUkweiYiIiIiIqFJMHomIiIiIiKhSTB6JiIiIiIioUkweiYiIiIiIqFJMHomIiIiIqEwZGRkQBAHx8fFK9/H19UX//v3fWExUe5g8EhERERG9JcLDw2FoaPhGxi4r6TM3N0dWVhZatmz5Ruakt4tabQdARERERET/TqqqqjAxMantMOhfgjuPREREREQ1wMPDA35+fvDz84OBgQHq1auHwMBAiKIotXn48CF8fHxQp04daGtro1evXkhNTQUAREVFYdSoUXj8+DEEQYAgCAgODgYAFBQUICAgAA0bNoSOjg46dOiAqKgoadznO5bHjh2Dvb09dHV14eXlhaysLABAcHAwtm7dip9//lkaOyoqqtSxVblcjjFjxsDKygoymQx2dnZYs2ZNle5DZbE8t2XLFtjb20NLSwvNmjXDhg0bpLpBgwbBz89Puvb394cgCLh+/ToAoLCwEDo6Ojh58mSVYqOKMXkkIiIiIqohW7duhZqaGi5duoQ1a9Zg5cqV2LJli1Tv6+uLuLg4HDx4EOfPn4coiujduzeKiorg6uqK1atXQ19fH1lZWcjKykJAQAAAwM/PD+fPn8fOnTtx5coVDB48GF5eXlLiCQC5ublYsWIFtm3bhl9//RWZmZlS/4CAAAwZMkRK4rKysuDq6loq/pKSEjRq1Ai7d+9GUlISgoKC8MUXX2DXrl1Vug8VxQIA27dvR1BQEBYvXozk5GQsWbIEgYGB2Lp1KwDA3d1dITmOjo5GvXr1pLLY2FjpnlH14bFVIiIiIqIaYm5ujlWrVkEQBNjZ2eHq1atYtWoVxo0bh9TUVBw8eBAxMTFS0rN9+3aYm5vjwIEDGDx4MAwMDCAIgsJR0szMTISFhSEzMxNmZmYAniWDR48eRVhYGJYsWQIAKCoqwqZNm2BtbQ3gWcK5cOFCAICuri5kMhkKCgoqPKaqrq6OBQsWSNdWVlY4f/48du3ahSFDhih9HyqKBQDmz5+P0NBQDBw4UJonKSkJ33zzDUaOHAkPDw9MnToVd+/ehZqaGpKSkhAYGIioqChMnDgRUVFRaNeuHbS1tZWOiSrH5JGIiIiIqIZ07NgRgiBI1y4uLggNDYVcLkdycjLU1NTQoUMHqd7IyAh2dnZITk4ud8yrV69CLpfD1tZWobygoABGRkbStba2tpSsAYCpqSmys7OrvIb169fju+++Q2ZmJvLy8lBYWIjWrVtXaYyKYnn69CnS0tIwZswYjBs3TmpTXFwMAwMDAEDLli1Rt25dREdHQ0NDA05OTujTpw/Wr18P4NlOpIeHR5XXRhVj8khERERE7yVRFIESOURRDkFQBVRUFRK7t0VOTg5UVVVx+fJlqKqqKtTp6upKv6urqyvUCYKg8H1LZezcuRMBAQEIDQ2Fi4sL9PT0sHz5cly8eLFK41QUS05ODgBg8+bNCok0AGl9giCgS5cuiIqKgqamJjw8PODo6IiCggJcu3YN586dUzgGS9WDySMRERERvVfEkmLIcx5A/uQuxOJCqVxQ04CqnjFUdetCUHkz/0x+Ocm6cOECbGxsoKqqCnt7exQXF+PixYvSsdX79+8jJSUFzZs3BwBoaGhALpcrjOHk5AS5XI7s7Gx07tz5lWMra+yXPT9SO2nSJKksLS3tlecsS4MGDWBmZoZbt27B29u73Hbu7u7YvHkzNDU1sXjxYqioqKBLly5Yvnw5CgoK4ObmVq1xER+YQ0RERETvEXnePyj4XyKKH95WSBwBQCwuRPHD2yj4XyLkef+8kfkzMzMxffp0pKSk4Mcff8TatWsxdepUAICNjQ369euHcePG4ezZs0hISMCIESPQsGFD9OvXDwBgaWmJnJwcnDp1Cvfu3UNubi5sbW3h7e0NHx8f7Nu3D+np6bh06RJCQkIQERGhdGyWlpa4cuUKUlJScO/ePRQVFZVqY2Njg7i4OBw7dgw3btxAYGAgYmNjq+fmvGDBggUICQnB119/jRs3buDq1asICwvDypUrpTYeHh5ISkpCYmIiOnXqJJVt374dzs7O0NHRqfa43ndMHomIiIjovSDP+wdF2WmAWFJxQ7EERdlpbySB9PHxQV5eHtq3b4/Jkydj6tSpGD9+vFQfFhaGtm3bok+fPnBxcYEoijh8+LB0zNPV1RUTJ07E0KFDYWxsjGXLlkn9fHx8MGPGDNjZ2aF///6IjY2FhYWF0rGNGzcOdnZ2cHZ2hrGxMWJiYkq1mTBhAgYOHIihQ4eiQ4cOuH//vsIuZHUZO3YstmzZgrCwMDg4OMDd3R3h4eGwsrKS2jg4OMDQ0BCtW7eWjud6eHhALpfz+45viCBW9aAzEREREVEtys/PR3p6OqysrKClpaVUH7GkGAX/S6w8cXyRoALNRi2q7Qirh4cHWrdujdWrV1fLeETVRdnPFHceiYiIiOidJ895ULXEEQDEkmf9iAgAk0ciIiIieseJogj5k7uv1Ff+5G6Vn0hK9K7i01aJiIiI6N1WIi/1cBxlicWFQIkcUH39fzZHRUW99hhEtYk7j0RERET0ThPFil8/8ab7E70rmDwSERER0TtNEFRrtX9tsbS0rPWH84iiiPHjx6Nu3boQBAHx8fG1EoeHhwf8/f2l63/DvXkbMXkkIiIionebiioENY1X6iqoaQAqb2fy+LpeTrhexdGjRxEeHo5Dhw4hKysLLVu2rJ7g/o8gCNDS0sIff/yhUN6/f3/4+vpK1/v27cOiRYuqde4XhYeHw9DQ8I2N/2/B5JGIiIiI3mmCIEBVz/iV+qrqGUMQhGqO6M0qLHy173e+CWlpaTA1NYWrqytMTEygplb1746Kooji4uJy6wVBQFBQUIVj1K1bF3p6elWemxQxeSQiIiKid56qbl1AqOI/fQWVZ/2qiYeHBz777DP4+/ujTp06aNCgATZv3oynT59i1KhR0NPTQ9OmTXHkyBGpj1wux5gxY2BlZQWZTAY7OzusWbNGYVxfX1/0798fixcvhpmZGezs7Mqcf8uWLTA0NMSpU6cAANeuXUOvXr2gq6uLBg0a4JNPPsG9e/ekMaOjo7FmzRoIggBBEJCRkYGHDx/C29sbxsbGkMlksLGxQVhYWJnz+fr64rPPPkNmZiYEQYClpSUAoKCgAFOmTEH9+vWhpaWFTp06ITY2VuoXFRUFQRBw5MgRtG3bFpqamjh79my599XPzw8//PADrl27VuG9r2gXtSr35mVRUVEYNWoUHj9+LN2r4OBgAMDDhw/h4+ODOnXqQFtbG7169UJqamq5cfzbMXkkIiIioneeoKIGdWOrKvVRN7aCoFK9LyfYunUr6tWrh0uXLuGzzz7Dp59+isGDB8PV1RW//fYbevbsiU8++QS5ubkAgJKSEjRq1Ai7d+9GUlISgoKC8MUXX2DXrl0K4546dQopKSk4ceIEDh06VGreZcuWYfbs2Th+/Di6d++OR48eoVu3bnByckJcXByOHj2Kv//+G0OGDAEArFmzBi4uLhg3bhyysrKQlZUFc3NzBAYGIikpCUeOHEFycjI2btyIevXqlbnWNWvWYOHChWjUqBGysrKkBPHzzz/H3r17sXXrVvz2229o2rQpPD098eCB4js1Z8+ejaVLlyI5ORmOjo7l3lM3Nzf06dMHs2fPVv4P8Rr35mWurq5YvXo19PX1pXsVEBAA4FkCHRcXh4MHD+L8+fMQRRG9e/dGUVHRK8Va2/iqDiIiIiJ6L6jK9IH61ii6mw6IJeU3FFSgbmz1rH01a9WqFebNmwcAmDNnDpYuXYp69eph3LhxAICgoCBs3LgRV65cQceOHaGuro4FCxZI/a2srHD+/Hns2rVLIZnR0dHBli1boKFR+ruds2bNwrZt2xAdHY0WLVoAANatWwcnJycsWbJEavfdd9/B3NwcN27cgK2tLTQ0NKCtrQ0TExOpTWZmJpycnODs7AwA0m5iWQwMDKCnpwdVVVVpjKdPn2Ljxo0IDw9Hr169AACbN2/GiRMn8O2332LmzJlS/4ULF+KDDz5Q6r6GhITA0dERZ86cQefOnZXqA7z6vXmRhoYGDAwMIAiCwr1KTU3FwYMHERMTA1dXVwDA9u3bYW5ujgMHDmDw4MFKx/lvweSRiIiIiN4bqjJ9qDRqAXnOA8if3FV4/6OgpgFVPWOo6hpBeEMPyXlxB01VVRVGRkZwcHCQyho0aAAAyM7OlsrWr1+P7777DpmZmcjLy0NhYSFat26tMK6Dg0OZiWNoaCiePn2KuLg4NGnSRCpPSEhAZGQkdHV1S/VJS0srlSA99+mnn+Kjjz6Sdkn79+8vJUbKSEtLQ1FREdzc3KQydXV1tG/fHsnJyQptnyeoymjevDl8fHwwe/ZsxMTEKNWnuu/Ny5KTk6GmpoYOHTpIZUZGRrCzsyu11rcFj60SERER0XtFUFGDmn59aJg1h2YjB2g0/L//mjWHmn79N5Y4As8SJYVYBEGh7PnDeUpKnu2M7ty5EwEBARgzZgyOHz+O+Ph4jBo1qtRDcXR0dMqcr3PnzpDL5aWOuebk5KBv376Ij49X+ElNTUWXLl3Kjb9Xr174448/MG3aNPz111/o3r27dESzupW3pvIsWLAAv/32Gw4cOKBU++q+N+8D7jwSERER0XtJEARAVQ3Cv/ifxM+PPE6aNEkqS0tLU7p/+/bt4efnBy8vL6ipqUmJXps2bbB3715YWlqW+wRUDQ0NyOXyUuXGxsYYOXIkRo4cic6dO2PmzJlYsWKFUvFYW1tDQ0MDMTExaNy4MQCgqKgIsbGxr/1aEHNzc/j5+eGLL76AtbV1pe1f5968rKx7ZW9vj+LiYly8eFHanb1//z5SUlLQvHnzKq7u34E7j0RERERE/1I2NjaIi4vDsWPHcOPGDQQGBio8mVQZrq6uOHz4MBYsWIDVq1cDACZPnowHDx5g+PDhiI2NRVpaGo4dO4ZRo0ZJSZClpSUuXryIjIwM3Lt3DyUlJQgKCsLPP/+MmzdvIjExEYcOHYK9vb3Ssejo6ODTTz/FzJkzcfToUSQlJWHcuHHIzc3FmDFjqrSussyZMwd//fUXTp48qVT7V703L7O0tEROTg5OnTqFe/fuITc3FzY2NujXrx/GjRuHs2fPIiEhASNGjEDDhg3Rr1+/115rbWDySERERET0LzVhwgQMHDgQQ4cORYcOHXD//n2FXUhlderUCREREZg3bx7Wrl0LMzMzxMTEQC6Xo2fPnnBwcIC/vz8MDQ2hovIsRQgICICqqiqaN28OY2NjZGZmQkNDA3PmzIGjoyO6dOkCVVVV7Ny5s0qxLF26FB999BE++eQTtGnTBjdv3sSxY8dQp06dKq/rZXXr1sWsWbOQn5+vdJ9XuTcvc3V1xcSJEzF06FAYGxtj2bJlAICwsDC0bdsWffr0gYuLC0RRxOHDh0sdX35bCKIoirUdBBERERGRsvLz85Geng4rKytoaWnVdjhEbz1lP1PceSQiIiIiIqJKMXkkIiIiIiKiSjF5JCIiIiIiokoxeSQiIiIiIqJKMXkkIiIiInpHhIeHw9DQsFbm9vX1Rf/+/WtlbqoZTB6JiIiIiOi1rVmzBuHh4a81xubNm9G5c2fUqVMHderUQY8ePXDp0iWFNqIoIigoCKamppDJZOjRowdSU1Ol+oyMDIwZMwZWVlaQyWSwtrbG/PnzUVhYWOacN2/ehJ6eXqmk+9tvv0Xbtm2ho6ODxo0bY9WqVa+1tncBk0ciIiIiIpIUFRW9Uj8DA4PX3vWMiorC8OHDERkZifPnz8Pc3Bw9e/bE7du3pTbLli3D119/jU2bNuHixYvQ0dGBp6en9G7H69evo6SkBN988w0SExOxatUqbNq0CV988UWp+YqKijB8+HB07ty5VN3p06cRGBiIq1evYt68eZgxYwaio6Nfa31vOyaPREREREQ1oKSkBCEhIdKOWKtWrbBnzx4Az3bTevToAU9PTzx/DfuDBw/QqFEjBAUFAXiWWAmCgIiICDg6OkJLSwsdO3bEtWvXKpx348aNsLa2hoaGBuzs7LBt2zaFekEQsHHjRvznP/+Bjo4OFi9eDAD4+eef0aZNG2hpaaFJkyZYsGABiouLy53n5WOrHh4emDJlCj7//HPUrVsXJiYmCA4OrjDW7du3Y9KkSWjdujWaNWuGLVu2oKSkBKdOnZLu0+rVqzFv3jz069cPjo6O+P777/HXX3/hwIEDAAAvLy+EhYWhZ8+eaNKkCf7zn/8gICAA+/btKzXfvHnz0KxZMwwZMqTMWPr3748mTZpg7Nix0NfXx59//llh/O86Jo9ERERERDUgJCQE33//PTZt2oTExERMmzYNI0aMQHR0NARBwNatWxEbG4uvv/4aADBx4kQ0bNhQSh6fmzlzJkJDQxEbGwtjY2P07du33N3C/fv3Y+rUqZgxYwauXbuGCRMmYNSoUYiMjFRoFxwcjAEDBuDq1asYPXo0zpw5Ax8fH0ydOhVJSUn45ptvEB4eLiWWytq6dSt0dHRw8eJFLFu2DAsXLsSJEyeU7p+bm4uioiLUrVsXAJCeno47d+6gR48eUhsDAwN06NAB58+fL3ecx48fS2M8d/r0aezevRvr16+vNI7g4GBoa2ujV69eSsf+ThKJiIiIiN4ieXl5YlJSkpiXl1fboSgtPz9f1NbWFs+dO6dQPmbMGHH48OHS9a5du0QtLS1x9uzZoo6Ojnjjxg2pLjIyUgQg7ty5Uyq7f/++KJPJxJ9++kkURVEMCwsTDQwMpHpXV1dx3LhxCnMOHjxY7N27t3QNQPT391do0717d3HJkiUKZdu2bRNNTU3LXePIkSPFfv36Sdfu7u5ip06dFNq0a9dOnDVrVrljvOzTTz8VmzRpIv2tY2JiRADiX3/9VWpNQ4YMKXOM1NRUUV9fX/zvf/8rld27d080NzcXo6OjRVEsfd9etGDBArFBgwbitWvXlI77baPsZ0qtlnNXIiIiIqJ33s2bN5Gbm4sPPvhAobywsBBOTk7S9eDBg7F//34sXboUGzduhI2NTamxXFxcpN/r1q0LOzs7JCcnlzlvcnIyxo8fr1Dm5uaGNWvWKJQ5OzsrXCckJCAmJkZhp1EulyM/Px+5ubnQ1tauZMXPODo6KlybmpoiOztbqb5Lly7Fzp07ERUVBS0tLaX6vOz27dvw8vLC4MGDMW7cOKl83Lhx+Pjjj9GlS5cK+//9998IDg7GkSNH0KJFi1eK4V3C5JGIiIiI6A3LyckBAERERKBhw4YKdZqamtLvubm5uHz5MlRVVRWeIPqm6ejoKFzn5ORgwYIFGDhwYKm2VUnk1NXVFa4FQUBJSUml/VasWIGlS5fi5MmTCgmoiYkJgGdJnampqVT+999/o3Xr1gpj/PXXX+jatStcXV3x3//+V6Hu9OnTOHjwIFasWAHg2XcpS0pKoKamhv/+978YPXo0AODOnTsQRRF2dnZKr/ldxuSRiIiIiOgNa968OTQ1NZGZmQl3d/dy282YMQMqKio4cuQIevfujQ8//BDdunVTaHPhwgVYWFgAAB4+fIgbN27A3t6+zPHs7e0RExODkSNHSmUxMTFo3rx5hfG2adMGKSkpaNq0qbJLrDbLli3D4sWLcezYsVI7olZWVjAxMcGpU6ekZPGff/7BxYsX8emnn0rtbt++ja5du6Jt27YICwuDiorio17Onz8PuVwuXf/888/46quvcO7cOYXk3tbWFrGxsTAzM3sDK337MHkkIiIiInrD9PT0EBAQgGnTpqGkpASdOnXC48ePERMTA319fYwcORIRERH47rvvcP78ebRp0wYzZ87EyJEjceXKFdSpU0caa+HChTAyMkKDBg0wd+5c1KtXT+Eppy+aOXMmhgwZAicnJ/To0QO//PIL9u3bh5MnT1YYb1BQEPr06QMLCwsMGjQIKioqSEhIwLVr1/Dll19W561R8NVXXyEoKAg7duyApaUl7ty5AwDQ1dWFrq4uBEGAv78/vvzyS9jY2MDKygqBgYEwMzOT7sHt27fh4eGBxo0bY8WKFbh79640/vOdy5eT7bi4OKioqKBly5YK5VevXoWPjw9OnTpVasf4fcTkkYiIiIioBixatAjGxsYICQnBrVu3YGhoiDZt2uCLL77A3bt3MWbMGAQHB6NNmzYAgAULFuD48eOYOHEifvrpJ2mcpUuXYurUqUhNTUXr1q3xyy+/QENDo8w5+/fvjzVr1mDFihWYOnUqrKysEBYWBg8Pjwpj9fT0xKFDh7Bw4UJ89dVXUFdXR7NmzTB27Nhqux9l2bhxIwoLCzFo0CCF8vnz50uv+fj888/x9OlTjB8/Ho8ePUKnTp1w9OhR6TjtiRMncPPmTdy8eRONGjVSGEf8v9egKCs3NxcpKSmv/O7Ld40gVvUOEhERERHVovz8fKSnp8PKyuqVH6TyNoqKikLXrl3x8OFDGBoa1nY49A5R9jPF9zwSERERERFRpZg8EhERERERUaX4nUciIiIioreAh4dHlb+zR1SduPNIRERERERElWLySERERERERJVi8khERERERESVYvJIRERERERElWLySERERERERJVi8khERERERESVYvJIRERERPSOy8jIgCAIiI+Pr+1QakR4eDgMDQ1rO4x3DpNHIiIiIqIaIJfLERgYCCsrK8hkMlhbW2PRokVVenfj7Nmz0axZM4Wy69evQxAE+Pr6KpSHh4dDU1MTeXl51RE+gGfvmvT396+28aqDpaUlVq9eXdthvBeYPBIRERER1YCvvvoKGzduxLp165CcnIyvvvoKy5Ytw9q1a5Ueo2vXrkhJScGdO3ekssjISJibmyMqKkqhbWRkJDp27AiZTFZdS3hjRFFEcXFxbYdBlWDySERERERUA86dO4d+/frhww8/hKWlJQYNGoSePXvi0qVLSo/RqVMnqKurKySKUVFRmDx5Mh48eICMjAyF8q5duyr0v3XrFrp27QptbW20atUK58+fl+ru37+P4cOHo2HDhtDW1oaDgwN+/PFHqd7X1xfR0dFYs2YNBEGAIAgK871o27ZtcHZ2hp6eHkxMTPDxxx8jOztbITZBEHDkyBG0bdsWmpqaOHv2LEpKShASEiLtzrZq1Qp79uwp9354eHjgjz/+wLRp06SYXnTs2DHY29tDV1cXXl5eyMrKUqjfsmUL7O3toaWlhWbNmmHDhg3lzkVMHomIiIiIaoSrqytOnTqFGzduAAASEhJw9uxZ9OrVS2oTHBwMS0vLcsfQ0dFBu3btEBkZKZVFRUWhe/fucHNzk8pv3bqFzMzMUsnj3LlzERAQgPj4eNja2mL48OHSjl9+fj7atm2LiIgIXLt2DePHj8cnn3wiJbdr1qyBi4sLxo0bh6ysLGRlZcHc3LzMOIuKirBo0SIkJCTgwIEDyMjIKHWsFnh2DHfp0qVITk6Go6MjQkJC8P3332PTpk1ITEzEtGnTMGLECERHR5c5z759+9CoUSMsXLhQium53NxcrFixAtu2bcOvv/6KzMxMBAQESPXbt29HUFAQFi9ejOTkZCxZsgSBgYHYunVruff/vScSEREREb1F8vLyxKSkJDEvL6+2Q6kSuVwuzpo1SxQEQVRTUxMFQRCXLFmi0Gbt2rVit27dKhxn7ty5oq2trSiKopiYmCjq6+uLxcXF4pIlS0QfHx9RFEXx22+/FbW0tMT8/HxRFEUxPT1dBCBu2bJFGicxMVEEICYnJ5c714cffijOmDFDunZ3dxenTp1apXWLoijGxsaKAMQnT56IoiiKkZGRIgDxwIEDUpv8/HxRW1tbPHfunELfMWPGiMOHDy937MaNG4urVq1SKAsLCxMBiDdv3pTK1q9fLzZo0EC6tra2Fnfs2KHQb9GiRaKLi0uV1/e2U/YzpVarmSsRERER0Xti165d2L59O3bs2IEWLVogPj4e/v7+MDMzw8iRIwEAfn5+8PPzq3AcDw8PLF68GFlZWYiKikKnTp2gqqoKd3d3bNq0CcCz3UhXV1doamoq9HV0dJR+NzU1BQBkZ2ejWbNmkMvlWLJkCXbt2oXbt2+jsLAQBQUF0NbWrvJaL1++jODgYCQkJODhw4coKSkBAGRmZqJ58+ZSO2dnZ+n3mzdvIjc3Fx988IHCWIWFhXBycqpyDNra2rC2tpauTU1NpaOzT58+RVpaGsaMGYNx48ZJbYqLi2FgYFDlud4XTB6JiIiIiGrAzJkzMXv2bAwbNgwA4ODggD/++AMhISFS8qgMNzc3aGhoIDIyEpGRkXB3dwcAtGvXDvfu3cOtW7cQFRWFCRMmlOqrrq4u/f78+4HPE7vly5djzZo1WL16NRwcHKCjowN/f38UFhZWaZ1Pnz6Fp6cnPD09sX37dhgbGyMzMxOenp6lxtLR0ZF+z8nJAQBERESgYcOGCu1eToKV8eJagWfrFf/vybbP59q8eTM6dOig0E5VVbXKc70vmDwSEREREdWA3NxcqKgoPnJEVVVVSt6UJZPJ0KFDB0RFRSE6OhozZ84E8CxZ6tixI7799lv8+eefpb7vWJmYmBj069cPI0aMAPAsqbxx44bCTqGGhgbkcnmF41y/fh3379/H0qVLpe9ExsXFVTp/8+bNoampiczMTCkhVoYyMb2sQYMGMDMzw61bt+Dt7V2lvu8zJo9ERERERDWgb9++WLx4MSwsLNCiRQv8/vvvWLlyJUaPHi21WbduHfbv349Tp05VOFbXrl2xatUqAECbNm2kcnd3d6xYsUJ6sE5V2NjYYM+ePTh37hzq1KmDlStX4u+//1ZIHi0tLXHx4kVkZGRAV1cXdevWLZUQW1hYQENDA2vXrsXEiRNx7do1LFq0qNL59fT0EBAQgGnTpqGkpASdOnXC48ePERMTA319/XJ3Zy0tLfHrr79i2LBh0NTURL169ZRa74IFCzBlyhQYGBjAy8sLBQUFiIuLw8OHDzF9+nSlxnjf8GmrREREREQ1YO3atRg0aBAmTZoEe3t7BAQEYMKECQqJ1b1795CWllbpWF27dsWTJ0/g5uYGNbX/vx/k7u6OJ0+eSK/0qIp58+ahTZs28PT0hIeHB0xMTNC/f3+FNgEBAVBVVUXz5s2l46gvMzY2Rnh4OHbv3o3mzZtj6dKlWLFihVIxLFq0CIGBgQgJCYG9vT28vLwQEREBKyurcvssXLgQGRkZsLa2hrGxsdLrHTt2LLZs2YKwsDA4ODjA3d0d4eHhFc71vhPE5wd/iYiIiIjeAvn5+UhPT4eVlRW0tLRqOxyit56ynynuPBIREREREVGlmDwSERERERFRpZg8EhERERERUaWYPBIREREREVGlmDwSEREREVGZMjIyIAgC4uPjle7j6+tb6imt9G5g8khERERE9JYIDw+HoaHhGxm7rKTP3NwcWVlZaNmy5RuZk94uapU3ISIiIiKi95GqqipMTExqOwz6l+DOIxERERFRDfDw8ICfnx/8/PxgYGCAevXqITAwEC++dv3hw4fw8fFBnTp1oK2tjV69eiE1NRUAEBUVhVGjRuHx48cQBAGCICA4OBgAUFBQgICAADRs2BA6Ojro0KEDoqKipHGf71geO3YM9vb20NXVhZeXF7KysgAAwcHB2Lp1K37++Wdp7KioqFLHVuVyOcaMGQMrKyvIZDLY2dlhzZo1VboPlcXy3JYtW2Bvbw8tLS00a9YMGzZskOoGDRoEPz8/6drf3x+CIOD69esAgMLCQujo6ODkyZNVio0qxuSRiIiIiKiGbN26FWpqarh06RLWrFmDlStXYsuWLVK9r68v4uLicPDgQZw/fx6iKKJ3794oKiqCq6srVq9eDX19fWRlZSErKwsBAQEAAD8/P5w/fx47d+7ElStXMHjwYHh5eUmJJwDk5uZixYoV2LZtG3799VdkZmZK/QMCAjBkyBApicvKyoKrq2up+EtKStCoUSPs3r0bSUlJCAoKwhdffIFdu3ZV6T5UFAsAbN++HUFBQVi8eDGSk5OxZMkSBAYGYuvWrQAAd3d3heQ4Ojoa9erVk8piY2Ole0bVh8dWiYiIiIhqiLm5OVatWgVBEGBnZ4erV69i1apVGDduHFJTU3Hw4EHExMRISc/27dthbm6OAwcOYPDgwTAwMIAgCApHSTMzMxEWFobMzEyYmZkBeJYMHj16FGFhYViyZAkAoKioCJs2bYK1tTWAZwnnwoULAQC6urqQyWQoKCio8Jiquro6FixYIF1bWVnh/Pnz2LVrF4YMGaL0fagoFgCYP38+QkNDMXDgQGmepKQkfPPNNxg5ciQ8PDwwdepU3L17F2pqakhKSkJgYCCioqIwceJEREVFoV27dtDW1lY6Jqock0ciIiIiohrSsWNHCIIgXbu4uCA0NBRyuRzJyclQU1NDhw4dpHojIyPY2dkhOTm53DGvXr0KuVwOW1tbhfKCggIYGRlJ19ra2lKyBgCmpqbIzs6u8hrWr1+P7777DpmZmcjLy0NhYSFat25dpTEqiuXp06dIS0vDmDFjMG7cOKlNcXExDAwMAAAtW7ZE3bp1ER0dDQ0NDTg5OaFPnz5Yv349gGc7kR4eHlVeG1WMySMRERER0VssJycHqqqquHz5MlRVVRXqdHV1pd/V1dUV6gRBUPi+pTJ27tyJgIAAhIaGwsXFBXp6eli+fDkuXrxYpXEqiiUnJwcAsHnzZoVEGoC0PkEQ0KVLF0RFRUFTUxMeHh5wdHREQUEBrl27hnPnzikcg6XqweSRiIiIiKiGvJxkXbhwATY2NlBVVYW9vT2Ki4tx8eJF6djq/fv3kZKSgubNmwMANDQ0IJfLFcZwcnKCXC5HdnY2Onfu/MqxlTX2y54fqZ00aZJUlpaW9spzlqVBgwYwMzPDrVu34O3tXW47d3d3bN68GZqamli8eDFUVFTQpUsXLF++HAUFBXBzc6vWuIgPzCEiIiIiqjGZmZmYPn06UlJS8OOPP2Lt2rWYOnUqAMDGxgb9+vXDuHHjcPbsWSQkJGDEiBFo2LAh+vXrBwCwtLRETk4OTp06hXv37iE3Nxe2trbw9vaGj48P9u3bh/T0dFy6dAkhISGIiIhQOjZLS0tcuXIFKSkpuHfvHoqKikq1sbGxQVxcHI4dO4YbN24gMDAQsbGx1XNzXrBgwQKEhITg66+/xo0bN3D16lWEhYVh5cqVUhsPDw8kJSUhMTERnTp1ksq2b98OZ2dn6OjoVHtc7zsmj0RERERENcTHxwd5eXlo3749Jk+ejKlTp2L8+PFSfVhYGNq2bYs+ffrAxcUFoiji8OHD0jFPV1dXTJw4EUOHDoWxsTGWLVsm9fPx8cGMGTNgZ2eH/v37IzY2FhYWFkrHNm7cONjZ2cHZ2RnGxsaIiYkp1WbChAkYOHAghg4dig4dOuD+/fsKu5DVZezYsdiyZQvCwsLg4OAAd3d3hIeHw8rKSmrj4OAAQ0NDtG7dWjqe6+HhAblczu87viGCWNWDzkREREREtSg/Px/p6emwsrKClpZWbYejNA8PD7Ru3RqrV6+u7VCIFCj7meLOIxEREREREVWKySMRERERERFVik9bJSIiIiKqAVFRUbUdAtFr4c4jERERERERVYrJIxERERG9l0RRRGFOEfIe5KMwpwjv2nMkLS0t+XCeahYeHg5DQ8PaDqPW8NgqEREREb1XivKKkRX7N/48k4W8+/lSucxIC+adTWHargHUZfxn8st8fX3x6NEjHDhwoLZDqTVDhw5F7969azuMWsNPBRERERG9N+5ff4gr4cmQF5aUqsu7n48bB9KRdvgPOPraw6hZnVqIUFFhYSE0NDRqO4xaVVRUJL3n8k2Ry+UQBAEqKhUfzJTJZJDJZG80ln8zHlslIiIiovfC/esP8fvmRMiLSieOL5IXleD3zYm4f/1htc7v4eEBPz8/+Pn5wcDAAPXq1UNgYKDCcVlLS0ssWrQIPj4+0NfXx/jx4wEAe/fuRYsWLaCpqQlLS0uEhoYqjJ2dnY2+fftCJpPBysoK27dvV6jPyMiAIAiIj4+Xyh49egRBEBQe5JOYmIg+ffpAX18fenp66Ny5M9LS0hAcHIytW7fi559/hiAIUr/CwkL4+fnB1NQUWlpaaNy4MUJCQsq9ByUlJVi4cCEaNWoETU1NtG7dGkePHi0V508//QR3d3doaWmVWgvw7MhxcHAwLCwsoKmpCTMzM0yZMkWqLygoQEBAABo2bAgdHR106NBBYZ3Pj58ePHgQzZs3h6amJrZs2QItLS08evRIYa6pU6eiW7duCv1e9Msvv6Bdu3bQ0tJCvXr1MGDAAKXj+OOPP9C3b1/UqVMHOjo6aNGiBQ4fPlzu/att3HkkIiIiondeUV4xroQnP7uo7KuNIgABuBKejE7z21frEdatW7dizJgxuHTpEuLi4jB+/HhYWFhg3LhxUpsVK1YgKCgI8+fPBwBcvnwZQ4YMQXBwMIYOHYpz585h0qRJMDIygq+vL4BnR0r/+usvREZGQl1dHVOmTEF2dnaVYrt9+za6dOkCDw8PnD59Gvr6+oiJiUFxcTECAgKQnJyMf/75B2FhYQCAunXr4uuvv8bBgwexa9cuWFhY4M8//8Sff/5Z7hxr1qxBaGgovvnmGzg5OeG7777Df/7zHyQmJsLGxkZqN3v2bISGhsLJyanMl9bv3bsXq1atws6dO9GiRQvcuXMHCQkJUr2fnx+SkpKwc+dOmJmZYf/+/fDy8sLVq1eleXJzc/HVV19hy5YtMDIyQqNGjRAUFIS9e/dizJgxAJ7tSP70009YvHhxmeuJiIjAgAEDMHfuXHz//fcoLCxUSP4qi2Py5MkoLCzEr7/+Ch0dHSQlJUFXV7cKf7WaxeSRiIiIiN55WbF/l3lUtVwiIC8sQVZsNiy6mFVbHObm5li1ahUEQYCdnR2uXr2KVatWKSSP3bp1w4wZM6Rrb29vdO/eHYGBgQAAW1tbJCUlYfny5fD19cWNGzdw5MgRXLp0Ce3atQMAfPvtt7C3t69SbOvXr4eBgQF27twpHRO1tbWV6mUyGQoKCmBiYiKVZWZmwsbGBp06dYIgCGjcuHGFc6xYsQKzZs3CsGHDAABfffUVIiMjsXr1aqxfv15q5+/vj4EDB5Y7TmZmJkxMTNCjRw+oq6vDwsIC7du3l+rCwsKQmZkJM7Nnf7uAgAAcPXoUYWFhWLJkCYBnx2E3bNiAVq1aSeMOGzYMO3bskJLHU6dO4dGjR/joo4/KjGPx4sUYNmwYFixYIJU9H0+ZODIzM/HRRx/BwcEBANCkSZMK719t47FVIiIiInqniaKIP89kvVLfP8/8Va1PYe3YsSMEQZCuXVxckJqaCrlcLpU5Ozsr9ElOToabm5tCmZubm9QvOTkZampqaNu2rVTfrFmzKj8VND4+Hp07d67S9wt9fX0RHx8POzs7TJkyBcePHy+37T///IO//vqrzLUkJycrlL18D142ePBg5OXloUmTJhg3bhz279+P4uJiAMDVq1chl8tha2sLXV1d6Sc6OhppaWnSGBoaGnB0dFQY19vbG1FRUfjrr78AANu3b8eHH35Y7r2Mj49H9+7dy6xTJo4pU6bgyy+/hJubG+bPn48rV65UuO7axp1HIiIiInqnFT0tVniqalXk3c9HUW4xNHTe7ANbXqSjo1PtYz5/EMyLiXBRUZFCm1d5EEybNm2Qnp6OI0eO4OTJkxgyZAh69OiBPXv2vFa8ld0Dc3NzpKSk4OTJkzhx4gQmTZqE5cuXIzo6Gjk5OVBVVcXly5ehqqqq0O/FI6EymUwhkQeAdu3awdraGjt37sSnn36K/fv3Izw8vNw4KrpnysQxduxYeHp6IiIiAsePH0dISAhCQ0Px2WefVbj+2sKdRyIiIiJ6p8kL5ZU3qqh/wev1f9HFixcVri9cuAAbG5tSycWL7O3tERMTo1AWExMDW1tbqKqqolmzZiguLsbly5el+pSUFIUHvxgbGwMAsrL+/w7siw/PAQBHR0ecOXOmVFL5nIaGhsIO6XP6+voYOnQoNm/ejJ9++gl79+7FgwcPymxnZmZW5lqaN29e9uIrIJPJ0LdvX3z99deIiorC+fPncfXqVTg5OUEulyM7OxtNmzZV+HnxyG15vL29sX37dvzyyy9QUVHBhx9+WG5bR0dHnDp1qsw6ZeMwNzfHxIkTsW/fPsyYMQObN2+u8r2oKdx5JCIiIqJ3mqpG+YmZUv01X6//izIzMzF9+nRMmDABv/32G9auXVvqyakvmzFjBtq1a4dFixZh6NChOH/+PNatW4cNGzYAAOzs7ODl5YUJEyZg48aNUFNTg7+/v8KumEwmQ8eOHbF06VJYWVkhOzsb8+bNU5jHz88Pa9euxbBhwzBnzhwYGBjgwoULaN++Pezs7GBpaYljx44hJSUFRkZGMDAwwNq1a2FqagonJyeoqKhg9+7dMDExKfeY58yZMzF//nxYW1ujdevWCAsLQ3x8fJlPVK1IeHg45HI5OnToAG1tbfzwww+QyWRo3LgxjIyM4O3tDR8fH+mhO3fv3sWpU6fg6OhYYTIIPEseg4ODsXjxYgwaNAiamprltp0/fz66d+8Oa2trDBs2DMXFxTh8+DBmzZoFW1vbSuPw9/dHr169YGtri4cPHyIyMrLK31WtSdx5JCIiIqJ3mrqOGmRGpZ/YqQyZkRbUtatvv8XHxwd5eXlo3749Jk+ejKlTp0qv4yhPmzZtsGvXLuzcuRMtW7ZEUFAQFi5cKD1pFQDCwsJgZmYGd3d3DBw4EOPHj0f9+vUVxvnuu+9QXFyMtm3bwt/fH19++aVCvZGREU6fPo2cnBy4u7ujbdu22Lx5s/QdyHHjxsHOzg7Ozs4wNjZGTEwM9PT0sGzZMjg7O6Ndu3bIyMjA4cOHy31f4pQpUzB9+nTMmDEDDg4OOHr0KA4ePKjwpFVlGBoaYvPmzXBzc4OjoyNOnjyJX375BUZGRtL98PHxwYwZM2BnZ4f+/fsjNjYWFhYWlY7dtGlTtG/fHleuXIG3t3eFbT08PLB7924cPHgQrVu3Rrdu3XDp0iWpvrI45HI5Jk+eDHt7e3h5ecHW1lb6PwX+jQSxOr8BTERERET0huXn5yM9PR1WVlZlvsahLJm/3saNA+lVnsu2f5Nqe9qqh4cHWrdujdWrV1fLeETVRdnPFHceiYiIiOidZ9quAVQ1VACh8rYAAAFQ1VCBabv6lbclek8weSQiIiKid566TA2Ovv/3XbLKEsj/q3ccZQ91GR8RQvQcPw1ERERE9F4walYHTuNa4Ep4MuSFJeW2U1VXgeMoexjZ1anW+aOioqp1PKKaxuSRiIiIiN4bRs3qoNP89siKzcafZ/5SeP+jzEgL5p3NYNauPtS440hUCo+tEhEREdF7RV2mBosuZnD9oi26LOoAt3nO6LKoA1y/aAuLLma1njiGh4crvOoiODgYrVu3rrV4XkVGRgYEQSj1LskXRUVFQRAEhfdRvkwQBBw4cKDa43sdlpaW7+1Dj5g8EhEREdF7SRAEaOioQ1ZXCxo66hAEZZ+m82p8fX0hCAKWLl2qUH7gwAGFuYcOHYobN2680VjeFllZWejVq5fS7V9OvKl6MXkkIiIiIqohWlpa+Oqrr/Dw4cNy28hkslLvaHxfmZiYQFNTs9rHLSwsrPYx3wdMHomIiIiIakiPHj1gYmKCkJCQcttUtnuWlpaGJk2awM/PD6IooqCgAAEBAWjYsCF0dHTQoUOHSh/O8+jRI0yYMAENGjSAlpYWWrZsiUOHDkn1e/fuRYsWLaCpqQlLS0uEhoYq9C/rOKmhoSHCw8PLnfPw4cOwtbWFTCZD165dkZGRUWGML8/z/Cjsvn370LVrV2hra6NVq1Y4f/48gGfHYEeNGoXHjx9DEAQIgoDg4GAAz46aLlq0CD4+PtDX18f48eOVWmd2djb69u0LmUwGKysrbN++XaG+rOO5jx49giAICn+DxMRE9OnTB/r6+tDT00Pnzp2RlpYm1W/ZsgX29vbQ0tJCs2bNsGHDhkrvTW1g8khEREREVENUVVWxZMkSrF27Fv/73/+q3P/KlSvo1KkTPv74Y6xbtw6CIMDPzw/nz5/Hzp07ceXKFQwePBheXl5ITU0tc4ySkhL06tULMTEx+OGHH5CUlISlS5dCVVUVAHD58mUMGTIEw4YNw9WrVxEcHIzAwMAKE8PK/Pnnnxg4cCD69u2L+Ph4jB07FrNnz36lsebOnYuAgADEx8fD1tYWw4cPR3FxMVxdXbF69Wro6+sjKysLWVlZCAgIkPqtWLECrVq1wu+//47AwECl1unr64s///wTkZGR2LNnDzZs2IDs7OwqxXv79m106dIFmpqaOH36NC5fvozRo0ejuLgYALB9+3YEBQVh8eLFSE5OxpIlSxAYGIitW7e+0v15k/gYKSIiIiKiGjRgwAC0bt0a8+fPx7fffqt0v3PnzqFPnz6YO3cuZsyYAQDIzMxEWFgYMjMzYWZmBgAICAjA0aNHERYWhiVLlpQa5+TJk7h06RKSk5Nha2sLAGjSpIlUv3LlSnTv3h2BgYEAAFtbWyQlJWH58uXw9fV9pTVv3LgR1tbW0s6enZ0drl69iq+++qrKYwUEBODDDz8EACxYsAAtWrTAzZs30axZMxgYGEAQBJiYmJTq161bN+m+AYC3t3eF67xx4waOHDmCS5cuoV27dgCAb7/9Fvb29lWKd/369TAwMMDOnTuhrq4uzfXc/PnzERoaioEDBwIArKyskJSUhG+++QYjR46s0lxvGnceiYiIiIhq2FdffYWtW7ciOTlZqfaZmZn44IMPEBQUpJAAXb16FXK5HLa2ttDV1ZV+oqOjFY5Fvig+Ph6NGjVSSGBelJycDDc3N4UyNzc3pKamQi6XK7nC0mN26NBBoczFxeWVxnJ0dJR+NzU1BQCldgOdnZ1LxVTROpOTk6Gmpoa2bdtK9c2aNavyA3ni4+PRuXNnKXF80dOnT5GWloYxY8Yo/P2+/PLLcv9+tYk7j0RERERENaxLly7w9PTEnDlzlNrNMzY2hpmZGX788UeMHj0a+vr6AICcnByoqqri8uXL0rHT53R1dcscSyaTvXb8giBAFEWFsqKiotceVxkvJmHPn1JbUlJSaT8dHZ1qj0VF5dle3Iv34uX7UNH9zsnJAQBs3ry5VHL98t/z34A7j0REREREtWDp0qX45ZdfpAe+VEQmk+HQoUPQ0tKCp6cnnjx5AgBwcnKCXC5HdnY2mjZtqvBT1tFN4NnO3f/+979yXwdib2+PmJgYhbKYmBjY2tpKCY2xsTGysrKk+tTUVOTm5pYbv729PS5duqRQduHChUrXXVUaGhpK745Wts5mzZqhuLgYly9flupTUlIU3ktpbGwMAAr34uV3Wzo6OuLMmTNlJtcNGjSAmZkZbt26VervZ2VlpdQ6ahKTRyIiIiKiWuDg4ABvb298/fXXSrXX0dFBREQE1NTU0KtXL+Tk5MDW1hbe3t7w8fHBvn37kJ6ejkuXLiEkJAQRERFljuPu7o4uXbrgo48+wokTJ5Ceno4jR47g6NGjAIAZM2bg1KlTWLRoEW7cuIGtW7di3bp1Cg+f6datG9atW4fff/8dcXFxmDhxYpnHMp+bOHEiUlNTMXPmTKSkpGDHjh2v9QCe8lhaWiInJwenTp3CvXv3KkxoK1unnZ0dvLy8MGHCBFy8eBGXL1/G2LFjFXYSZTIZOnbsiKVLlyI5ORnR0dGYN2+ewjx+fn74559/MGzYMMTFxSE1NRXbtm1DSkoKgGff2wwJCcHXX3+NGzdu4OrVqwgLC8PKlSur/f68LiaPRERERES1ZOHChUoduXxOV1cXR44cgSiK+PDDD/H06VOEhYXBx8cHM2bMgJ2dHfr374/Y2FhYWFiUO87evXvRrl07DB8+HM2bN8fnn38u7di1adMGu3btws6dO9GyZUsEBQVh4cKFCsdrQ0NDYW5ujs6dO+Pjjz9GQEAAtLW1y53PwsICe/fuxYEDB9CqVSts2rSpzIf5vC5XV1dMnDgRQ4cOhbGxMZYtW1ZuW2XWGRYWBjMzM7i7u2PgwIEYP358qXdwfvfddyguLkbbtm3h7++PL7/8UqHeyMgIp0+fRk5ODtzd3dG2bVts3rxZSrbHjh2LLVu2ICwsDA4ODnB3d0d4ePi/cudREF8+rExERERE9C+Wn5+P9PR0WFlZQUtLq7bDIXrrKfuZ4s4jERERERERVYrJIxEREREREVWKySMRERERERFViskjERERERERVYrJIxEREREREVWKySMRERERUQ3w8PCAv79/bYdB9MqYPBIRERER1YB9+/Zh0aJFrzVGSEgI2rVrBz09PdSvXx/9+/eXXjb/XH5+PiZPngwjIyPo6urio48+wt9//y3VJyQkYPjw4TA3N4dMJoO9vT3WrFlT7pwxMTFQU1ND69atFcqXLl2KFi1aQFtbG7a2ttixY8drrY3+/Zg8EhERERHVgLp160JPT++1xoiOjsbkyZNx4cIFnDhxAkVFRejZsyeePn0qtZk2bRp++eUX7N69G9HR0fjrr78wcOBAqf7y5cuoX78+fvjhByQmJmLu3LmYM2cO1q1bV2q+R48ewcfHB927dy9Vd+bMGaxatQrXrl3DiBEj4OPjg1u3br3W+ujfTRBFUaztIIiIiIiIlKXsC83/bTw8PNC6dWusXr0aAGBpaYnx48fj5s2b2L17N+rUqYN58+Zh/PjxSo959+5d1K9fH9HR0ejSpQseP34MY2Nj7NixA4MGDQIAXL9+Hfb29jh//jw6duxY5jiTJ09GcnIyTp8+rVA+bNgw2NjYQFVVFQcOHEB8fHyZ/R88eAAjIyOcOXMGnTp1Ujp++ndQ9jPFnUciIiIioloSGhoKZ2dn/P7775g0aRI+/fTTUsdQK/L48WMAz3Y1gWe7ikVFRejRo4fUplmzZrCwsMD58+crHOf5GM+FhYXh1q1bmD9/foUxiKKIGTNmoGXLlmjfvr3SsdPbh8kjEREREVEt6d27NyZNmoSmTZti1qxZqFevHiIjI5XqW1JSAn9/f7i5uaFly5YAgDt37kBDQwOGhoYKbRs0aIA7d+6UOc65c+fw008/Kex4pqamYvbs2fjhhx+gpqZWYRxjx47FuXPncPToUWhoaCgVO72dKv5fAhERERERvTGOjo7S74IgwMTEBNnZ2Ur1nTx5Mq5du4azZ8++8vzXrl1Dv379MH/+fPTs2RMAIJfL8fHHH2PBggWwtbWtsH9sbCy+++47XL9+HQ0bNnzlOOjtwOSRiIiIiKiWqKurK1wLgoCSkpJK+/n5+eHQoUP49ddf0ahRI6ncxMQEhYWFePTokcLu499//w0TExOFMZKSktC9e3eMHz8e8+bNk8qfPHmCuLg4/P777/Dz8wPwbJdTFEWoqanh+PHj6NatGwDgr7/+AgDY2dlVbeH0VmLySERERET0lhBFEZ999hn279+PqKgoWFlZKdS3bdsW6urqOHXqFD766CMAQEpKCjIzM+Hi4iK1S0xMRLdu3TBy5EgsXrxYYQx9fX1cvXpVoWzDhg04ffo09uzZozCnu7s7YmNjq3uZ9C/F5JGIiIiI6C0xefJk7NixAz///DP09PSk7zEaGBhAJpPBwMAAY8aMwfTp01G3bl3o6+vjs88+g4uLi/Sk1WvXrqFbt27w9PTE9OnTpTFUVVVhbGwMFRUV6TuUz9WvXx9aWlqlyiMjIzFnzhxcv369BlZPtY3JIxERERHRW2Ljxo0Anr3240VhYWHw9fUFAKxatQoqKir46KOPUFBQAE9PT2zYsEFqu2fPHty9exc//PADfvjhB6m8cePGyMjIqFI8jx8/rtLTYentxvc8EhEREdFb5W19zyPRvxXf80hERERERETVhskjERERERERVYrJIxEREREREVWKySMRERERERFViskjERERERERVYrJIxEREREREVWKySMRERERERFViskjERERERERVYrJIxEREREREVWKySMRERER0TsuIyMDgiAgPj6+tkOpEeHh4TA0NKztMN45TB6JiIiIiGrAkydP4O/vj8aNG0Mmk8HV1RWxsbFVGmP27Nlo1qyZQtn169chCAJ8fX0VysPDw6GpqYm8vLzXDV3i4eEBf3//ahuvOlhaWmL16tW1HcZ7gckjEREREVENGDt2LE6cOIFt27bh6tWr6NmzJ3r06IHbt28rPUbXrl2RkpKCO3fuSGWRkZEwNzdHVFSUQtvIyEh07NgRMpmsupbwxoiiiOLi4toOgyrB5JGIiIiI6A3Ly8vD3r17sWzZMnTp0gVNmzZFcHAwmjZtio0bNyo9TqdOnaCurq6QKEZFRWHy5Ml48OABMjIyFMq7du2q0P/WrVvo2rUrtLW10apVK5w/f16qu3//PoYPH46GDRtCW1sbDg4O+PHHH6V6X19fREdHY82aNRAEAYIgKMz3om3btsHZ2Rl6enowMTHBxx9/jOzsbIXYBEHAkSNH0LZtW2hqauLs2bMoKSlBSEgIrKysIJPJ0KpVK+zZs6fc++Hh4YE//vgD06ZNk2J60bFjx2Bvbw9dXV14eXkhKytLoX7Lli2wt7eHlpYWmjVrhg0bNpQ7FzF5JCIiIiJ644qLiyGXy6GlpaVQLpPJcPbsWek6ODgYlpaW5Y6jo6ODdu3aITIyUiqLiopC9+7d4ebmJpXfunULmZmZpZLHuXPnIiAgAPHx8bC1tcXw4cOlHb/8/Hy0bdsWERERuHbtGsaPH49PPvkEly5dAgCsWbMGLi4uGDduHLKyspCVlQVzc/My4ywqKsKiRYuQkJCAAwcOICMjo9SxWuDZMdylS5ciOTkZjo6OCAkJwffff49NmzYhMTER06ZNw4gRIxAdHV3mPPv27UOjRo2wcOFCKabncnNzsWLFCmzbtg2//vorMjMzERAQINVv374dQUFBWLx4MZKTk7FkyRIEBgZi69at5d7/955IRERERPQWycvLE5OSksS8vLzaDqVKXFxcRHd3d/H27dticXGxuG3bNlFFRUW0tbWV2qxdu1bs1q1bhePMnTtX6pOYmCjq6+uLxcXF4pIlS0QfHx9RFEXx22+/FbW0tMT8/HxRFEUxPT1dBCBu2bJFGicxMVEEICYnJ5c714cffijOmDFDunZ3dxenTp1a5bXHxsaKAMQnT56IoiiKkZGRIgDxwIEDUpv8/HxRW1tbPHfunELfMWPGiMOHDy937MaNG4urVq1SKAsLCxMBiDdv3pTK1q9fLzZo0EC6tra2Fnfs2KHQb9GiRaKLi0uV1/e2U/YzxZ1HIiIiIqIasG3bNoiiiIYNG0JTUxNff/01hg8fDhWV//9Pcj8/P5w6darCcTw8PHDjxg1kZWUhKioKnTp1gqqqKtzd3aXjrFFRUXB1dYWmpqZCX0dHR+l3U1NTAJCOk8rlcixatAgODg6oW7cudHV1cezYMWRmZlZ5rZcvX0bfvn1hYWEBPT09uLu7A0CpsZydnaXfb968idzcXHzwwQfQ1dWVfr7//nukpaVVOQZtbW1YW1srrPf5Wp8+fYq0tDSMGTNGYa4vv/zyleZ6X6jVdgBERERERO8Da2trREdH4+nTp/jnn39gamqKoUOHokmTJlUax83NDRoaGoiMjERkZKSUmLVr1w737t3DrVu3EBUVhQkTJpTqq66uLv3+/PuBJSUlAIDly5djzZo1WL16NRwcHKCjowN/f38UFhZWKb6nT5/C09MTnp6e2L59O4yNjZGZmQlPT89SY+no6Ei/5+TkAAAiIiLQsGFDhXYvJ8HKeHGtwLP1iqKoMNfmzZvRoUMHhXaqqqpVnut9weSRiIiIiKgG6ejoQEdHBw8fPsSxY8ewbNmyKvWXyWTo0KEDoqKiEB0djZkzZwJ4lix17NgR3377Lf78889S33esTExMDPr164cRI0YAeJZU3rhxA82bN5faaGhoQC6XVzjO9evXcf/+fSxdulT6TmRcXFyl8zdv3hyamprIzMyUEmJlKBPTyxo0aAAzMzPcunUL3t7eVer7PmPySERERERUA44dOwZRFGFnZ4ebN29i5syZaNasGUaNGiW1WbduHfbv31/p0dWuXbti1apVAIA2bdpI5e7u7lixYoX0YJ2qsLGxwZ49e3Du3DnUqVMHK1euxN9//62QPFpaWuLixYvIyMiArq4u6tatq3DsFgAsLCygoaGBtWvXYuLEibh27RoWLVpU6fx6enoICAjAtGnTUFJSgk6dOuHx48eIiYmBvr4+Ro4cWWY/S0tL/Prrrxg2bBg0NTVRr149pda7YMECTJkyBQYGBvDy8kJBQQHi4uLw8OFDTJ8+Xakx3jf8ziMRERERUQ14/PgxJk+ejGbNmsHHxwedOnXCsWPHFI5X3rt3T6nv3HXt2hVPnjyBm5sb1NT+/36Qu7s7njx5Ir3SoyrmzZuHNm3awNPTEx4eHjAxMUH//v0V2gQEBEBVVRXNmzeXjqO+zNjYGOHh4di9ezeaN2+OpUuXYsWKFUrFsGjRIgQGBiIkJAT29vbw8vJCREQErKysyu2zcOFCZGRkwNraGsbGxkqvd+zYsdiyZQvCwsLg4OAAd3d3hIeHVzjX+04Qnx/8JSIiIiJ6C+Tn5yM9PR1WVlalXn1BRFWn7GeKO49ERERERERUKSaPREREREREVCkmj0RERERERFQpJo9ERERERERUKSaPRERERERUpoyMDAiCgPj4eKX7+Pr6lnpKa20KDg5G69atq31cS0tLrF69utrH/Tdj8khERERE9JYIDw+HoaHhGxm7rKTP3NwcWVlZaNmy5RuZ821Q3j2PjY3F+PHjaz6gWqRWeRMiIiIiInofqaqqwsTEpLbDeCWiKEIul7+x8avyTsl3BXceiYiIiIhqgIeHB/z8/ODn5wcDAwPUq1cPgYGBePG16w8fPoSPjw/q1KkDbW1t9OrVC6mpqQCAqKgojBo1Co8fP4YgCBAEAcHBwQCAgoICBAQEoGHDhtDR0UGHDh0QFRUljft89+zYsWOwt7eHrq4uvLy8kJWVBeDZ0c6tW7fi559/lsaOiooqdWxVLpdjzJgxsLKygkwmg52dHdasWVOl+/A8lkOHDsHOzg7a2toYNGgQcnNzsXXrVlhaWqJOnTqYMmWKQvK3bds2ODs7Q09PDyYmJvj444+RnZ0t1UdFRUEQBBw5cgRt27aFpqYmzp49W2r+tLQ0NGnSBH5+fhBFscJ7V9E9f/nYqiAI2LJlCwYMGABtbW3Y2Njg4MGDCnMfPHgQNjY20NLSQteuXbF161YIgoBHjx5V6R7WFiaPREREREQ1ZOvWrVBTU8OlS5ewZs0arFy5Elu2bJHqfX19ERcXh4MHD+L8+fMQRRG9e/dGUVERXF1dsXr1aujr6yMrKwtZWVkICAgAAPj5+eH8+fPYuXMnrly5gsGDB8PLy0tKPAEgNzcXK1aswLZt2/Drr78iMzNT6h8QEIAhQ4ZICWVWVhZcXV1LxV9SUoJGjRph9+7dSEpKQlBQEL744gvs2rWrSvchNzcXX3/9NXbu3ImjR48iKioKAwYMwOHDh3H48GFs27YN33zzDfbs2SP1KSoqwqJFi5CQkIADBw4gIyMDvr6+pcaePXs2li5diuTkZDg6OirUXblyBZ06dcLHH3+MdevWQRCECu9dRfe8LAsWLMCQIUNw5coV9O7dG97e3njw4AEAID09HYMGDUL//v2RkJCACRMmYO7cuVW6b7VOJCIiIiJ6i+Tl5YlJSUliXl5ebYdSJe7u7qK9vb1YUlIilc2aNUu0t7cXRVEUb9y4IQIQY2JipPp79+6JMplM3LVrlyiKohgWFiYaGBgojPvHH3+Iqqqq4u3btxXKu3fvLs6ZM0fqB0C8efOmVL9+/XqxQYMG0vXIkSPFfv36KYyRnp4uAhB///33ctc1efJk8aOPPqpwnBeVFcuECRNEbW1t8cmTJ1KZp6enOGHChHLHiY2NFQFIfSIjI0UA4oEDBxTazZ8/X2zVqpUYExMj1qlTR1yxYoVUp+y9e/mei6IoNm7cWFy1apV0DUCcN2+edJ2TkyMCEI8cOSKK4rO/dcuWLRXGmDt3rghAfPjwYbnrrAnKfqb4nUciIiIiohrSsWNHCIIgXbu4uCA0NBRyuRzJyclQU1NDhw4dpHojIyPY2dkhOTm53DGvXr0KuVwOW1tbhfKCggIYGRlJ19ra2rC2tpauTU1NFY59Kmv9+vX47rvvkJmZiby8PBQWFlb5aaYvx9KgQQNYWlpCV1dXoezF+C5fvozg4GAkJCTg4cOHKCkpAQBkZmaiefPmUjtnZ+dS82VmZuKDDz7A4sWL4e/vL5Ure++U9eJOp46ODvT19aU1pKSkoF27dgrt27dvX+U5ahOTRyIiIiKit1hOTg5UVVVx+fJlqKqqKtS9mIypq6sr1AmCoPB9S2Xs3LkTAQEBCA0NhYuLC/T09LB8+XJcvHixSuOUFUtZZc8TxKdPn8LT0xOenp7Yvn07jI2NkZmZCU9PTxQWFir009HRKTWfsbExzMzM8OOPP2L06NHQ19cHoPy9e511PV/Du4DJIxERERFRDXk5ybpw4QJsbGygqqoKe3t7FBcX4+LFi9L3De/fv4+UlBRpZ01DQ6PUE0SdnJwgl8uRnZ2Nzp07v3JsZY39spiYGLi6umLSpElSWVpa2ivPqazr16/j/v37WLp0KczNzQEAcXFxSveXyWQ4dOgQevfuDU9PTxw/fhx6enpK3Ttl7osy7OzscPjwYYWy2NjY1x63JvGBOURERERENSQzMxPTp09HSkoKfvzxR6xduxZTp04FANjY2KBfv34YN24czp49i4SEBIwYMQINGzZEv379ADx7wmdOTg5OnTqFe/fuITc3F7a2tvD29oaPjw/27duH9PR0XLp0CSEhIYiIiFA6NktLS1y5cgUpKSm4d+8eioqKSrWxsbFBXFwcjh07hhs3biAwMLBGEiALCwtoaGhg7dq1uHXrFg4ePIhFixZVaQwdHR1ERERATU0NvXr1Qk5OjlL3rqx7/iomTJiA69evY9asWbhx4wZ27dqF8PBwAFA4yvxvxuSRiIiIiKiG+Pj4IC8vD+3bt8fkyZMxdepUhRfNh4WFoW3btujTpw9cXFwgiiIOHz4sHYd0dXXFxIkTMXToUBgbG2PZsmVSPx8fH8yYMQN2dnbo378/YmNjYWFhoXRs48aNg52dHZydnWFsbIyYmJhSbSZMmICBAwdi6NCh6NChA+7fv6+wC/mmGBsbIzw8HLt370bz5s2xdOlSrFixosrj6Orq4siRIxBFER9++CGePn1a6b0r755XlZWVFfbs2YN9+/bB0dERGzdulJ62qqmp+Upj1jRBrOpBZyIiIiKiWpSfn4/09HRYWVlBS0urtsNRmoeHB1q3bq3wbkB6vy1evBibNm3Cn3/+WatxKPuZ4nceiYiIiIiIasCGDRvQrl07GBkZISYmBsuXL4efn19th6U0Jo9EREREREQ1IDU1FV9++SUePHgACwsLzJgxA3PmzKntsJTGY6tERERE9FZ5W4+tEv1bKfuZ4gNziIiIiIiIqFJMHomIiIiISGmWlpb/iof++Pr6on///tK1h4cH/P39ay2e9wGTRyIiIiKiWvZyIvS63mQiFRsbq/B6kepmaWkJQRBw4cIFhXJ/f394eHhI12vWrJHek0g1g8kjEREREdFboqioqLZDgLGxMbS1td/oHFpaWpg1a1aFbQwMDGBoaPhG4yBFTB6JiIiIiGrAnj174ODgAJlMBiMjI/To0QNPnz5FcHAwtm7dip9//hmCIEAQBERFRSEjIwOCIOCnn36Cu7s7tLS0sH37dty/fx/Dhw9Hw4YNoa2tDQcHB/z444/SPL6+voiOjsaaNWuk8TIyMgAA165dQ69evaCrq4sGDRrgk08+wb1796S+T548gbe3N3R0dGBqaopVq1aV2sV8+djqo0ePMHbsWBgbG0NfXx/dunVDQkKCVJ+QkICuXbtCT08P+vr6aNu2LeLi4iq8V+PHj8eFCxdw+PDhcttUtlsbEREBAwMDbN++HQDw559/YsiQITA0NETdunXRr18/6b6Qcpg8EhERERG9YVlZWRg+fDhGjx6N5ORkREVFYeDAgRBFEQEBARgyZAi8vLyQlZWFrKwsuLq6Sn1nz56NqVOnIjk5GZ6ensjPz0fbtm0RERGBa9euYfz48fjkk09w6dIlAM+Oc7q4uGDcuHHSeObm5nj06BG6desGJycnxMXF4ejRo/j7778xZMgQaa7p06cjJiYGBw8exIkTJ3DmzBn89ttvFa5t8ODByM7OxpEjR3D58mW0adMG3bt3x4MHDwAA3t7eaNSoEWJjY3H58mXMnj0b6urqFY5pZWWFiRMnYs6cOSgpKany/d6xYweGDx+O7du3w9vbG0VFRfD09ISenh7OnDmDmJgY6OrqwsvLC4WFhVUe/33F9zwSEREREb1hWVlZKC4uxsCBA9G4cWMAgIODg1Qvk8lQUFAAExOTUn39/f0xcOBAhbKAgADp988++wzHjh3Drl270L59exgYGEBDQwPa2toK461btw5OTk5YsmSJVPbdd9/B3NwcN27cgKmpKbZu3YodO3age/fuAICwsDCYmZmVu66zZ8/i0qVLyM7OhqamJgBgxYoVOHDgAPbs2YPx48cjMzMTM2fORLNmzQAANjY2St2zefPmISwsDNu3b8cnn3yiVB8AWL9+PebOnYtffvkF7u7uAICffvoJJSUl2LJlCwRBkNZmaGiIqKgo9OzZU+nx32dMHomIiIiI3rBWrVqhe/fucHBwgKenJ3r27IlBgwahTp06lfZ1dnZWuJbL5ViyZAl27dqF27dvo7CwEAUFBZV+DzEhIQGRkZHQ1dUtVZeWloa8vDwUFRWhffv2UrmBgQHs7OwqHDMnJwdGRkYK5Xl5eUhLSwPwbDdz7Nix2LZtG3r06IHBgwfD2tq60nUbGxsjICAAQUFBGDp0aKXtgWdHg7OzsxETE4N27dopxHnz5k3o6ekptM/Pz5fipMoxeSQiIiIiesNUVVVx4sQJnDt3DsePH8fatWsxd+5cXLx4EVZWVhX21dHRUbhevnw51qxZg9WrV8PBwQE6Ojrw9/ev9PhlTk4O+vbti6+++qpUnampKW7evFnldeXk5MDU1BRRUVGl6p4/zCY4OBgff/wxIiIicOTIEcyfPx87d+7EgAEDKh1/+vTp2LBhAzZs2KBUPE5OTvjtt9/w3XffwdnZWdplzMnJQdu2baXvP77I2NhYqbGJ33kkIiIioveUKIqQ3ytG8R8FkN8rhiiKb3Q+QRDg5uaGBQsW4Pfff4eGhgb2798PANDQ0IBcLldqnJiYGPTr1w8jRoxAq1at0KRJE9y4cUOhTVnjtWnTBomJibC0tETTpk0VfnR0dNCkSROoq6sjNjZW6vP48eNSY7885p07d6CmplZqzHr16kntbG1tMW3aNBw/fhwDBw5EWFiYUmvV1dVFYGAgFi9ejCdPnlTa3traGpGRkfj555/x2WefKcSZmpqK+vXrl4rTwMBAqViIySMRERERvWdKHhUjZ93f+LtlIu6YJ+DvZtee/bdlInLW/Y2SR8XVPufFixexZMkSxMXFITMzE/v27cPdu3dhb28P4NkTTK9cuYKUlBTcu3evwldy2NjYSLuYycnJmDBhAv7++2+FNpaWlrh48SIyMjJw7949lJSUYPLkyXjw4AGGDx+O2NhYpKWl4dixYxg1ahTkcjn09PQwcuRIzJw5E5GRkUhMTMSYMWOgoqIi7eC9rEePHnBxcUH//v1x/PhxZGRk4Ny5c5g7dy7i4uKQl5cHPz8/REVF4Y8//kBMTAxiY2OldStj/PjxMDAwwI4dO5Rqb2tri8jISOzdu1d6Sqy3tzfq1auHfv364cyZM0hPT0dUVBSmTJmC//3vf0rH8r5j8khERERE7438E49xp+lVPP78f5CnFyjUydML8Pjz/+FO06vIP/G4WufV19fHr7/+it69e8PW1hbz5s1DaGgoevXqBQAYN24c7Ozs4OzsDGNjY8TExJQ71rx589CmTRt4enrCw8MDJiYmpV5ZERAQAFVVVTRv3hzGxsbIzMyEmZkZYmJiIJfL0bNnTzg4OMDf3x+GhoZQUXmWFqxcuRIuLi7o06cPevToATc3N9jb20NLS6vMWARBwOHDh9GlSxeMGjUKtra2GDZsGP744w80aNAAqqqquH//Pnx8fGBra4shQ4agV69eWLBggdL3Tl1dHYsWLUJ+fr7Sfezs7HD69Gn8+OOPmDFjBrS1tfHrr7/CwsICAwcOhL29PcaMGYP8/Hzo6+srPe77ThDf9P48EREREVE1ys/PR3p6OqysrMpNasrsd+Ix7g+4CYgAKnr7gwoAATDa3xRaH7zfRxqfPn2Khg0bIjQ0FGPGjKntcOgNUfYzxZ1HIiIiInrnlTwqxoPhtypPHPF/9SLwYPitN3KE9d/s999/x48//oi0tDT89ttv8Pb2BgD069evliOjfwMmj0RERET0zsv94T7E3JLKE8fnSgAxtwS52x+80bj+jVasWIFWrVqhR48eePr0Kc6cOaPw8Bt6f/FVHURERET0ThNFETkb775S35wN2dCZZFzuA2PeNU5OTrh8+XJth0H/Utx5JCIiIqJ3Wsl9OeS3Cp4dWa0KEZDfKkDJA+VeoUH0rmPySERERETvNPHp6yV/Ys7bkzxaWlpi9erVFbYRBAEHDhyokXjo3cLkkYiIiIjeaYKO6uv11329/lQ5JrRvByaPRERERPROUzFShWoTTaCqX1sUANUmmlCpy+SRCGDySERERETvOEEQoPup8Sv11Z1Uv9oellNSUoJly5ahadOm0NTUhIWFBRYvXizVX716Fd26dYNMJoORkRHGjx+PnJwcqd7DwwP+/v4KY/bv3x++vr7lzpmamoouXbpAS0sLzZs3x4kTJyqNs6yjr61bt0ZwcDCAZw8gCg4OhoWFBTQ1NWFmZoYpU6ZIbQsKChAQEICGDRtCR0cHHTp0QFRUVIXzAcCAAQMgCIJ0DQAbN26EtbU1NDQ0YGdnh23btlUaP705TB6JiIiI6J2nPcIIgraK8v/6VQEEbRVoe9etthjmzJmDpUuXIjAwEElJSdixYwcaNGgAAHj69Ck8PT1Rp04dxMbGYvfu3Th58iT8/Pxeeb6SkhIMHDgQGhoauHjxIjZt2oRZs2a99jr27t2LVatW4ZtvvkFqaioOHDgABwcHqd7Pzw/nz5/Hzp07ceXKFQwePBheXl5ITU0tc7zY2FgAQFhYGLKysqTr/fv3Y+rUqZgxYwauXbuGCRMmYNSoUYiMjHztNdCr4as6iIiIiOidp2Kohro/NsH9ATefJZAVve9RBYAA1N1pDRXD6vnn8pMnT7BmzRqsW7cOI0eOBABYW1ujU6dOAIAdO3YgPz8f33//PXR0dAAA69atQ9++ffHVV19JSWZVnDx5EtevX8exY8dgZmYGAFiyZAl69er1WmvJzMyEiYkJevToAXV1dVhYWKB9+/ZSXVhYGDIzM6U5AwICcPToUYSFhWHJkiWlxjM2frYrbGhoCBMTE6l8xYoV8PX1xaRJkwAA06dPx4ULF7BixQp07dr1tdZAr4Y7j0RERET0XtD6wABG+5tCkKk8+/7jy6dR/69MkKnA6IANtHroV9vcycnJKCgoQPfu3cutb9WqlZQ4AoCbmxtKSkqQkpLyynOam5tLSRwAuLi4vNJYL2V2lnAAAEoRSURBVBo8eDDy8vLQpEkTjBs3Dvv370dxcTGAZ0dv5XI5bG1toaurK/1ER0cjLS2tyvG7ubkplLm5uSE5Ofm110CvhjuPRERERPTe0PrAACY3HZC7/QFyNmQ/e//j/1G10oTupPrQHmEEFYPqfUiOTCZ77TFUVFQgioovqywqKnrtcas6j7m5OVJSUnDy5EmcOHECkyZNwvLlyxEdHY2cnByoqqri8uXLUFVVvIe6urrVHivVLO48EhEREdF7RcVQDbqT66PBtRYw+V8rNLje8tl/r7WA7uT61Z44AoCNjQ1kMhlOnTpVZr29vT0SEhLw9OlTqSwmJgYqKiqws7MD8Ox4Z1ZWllQvl8tx7dq1cue0t7fHn3/+qdDnwoULlcb68jz//PMP0tPTFdrIZDL07dsXX3/9NaKionD+/HlcvXoVTk5OkMvlyM7ORtOmTRV+XjyS+jJ1dXXI5Yrv07S3t0dMTIxCWUxMDJo3b17pGujN4M4jEREREb2XBEGAqpEaYPTm/0mspaWFWbNm4fPPP4eGhgbc3Nxw9+5dJCYmYsyYMfD29sb8+fMxcuRIBAcH4+7du/jss8/wySefSN937NatG6ZPn46IiAhYW1tj5cqVePToUblz9ujRA7a2thg5ciSWL1+Of/75B3Pnzq001m7duiE8PBx9+/aFoaEhgoKCFHYRw8PDIZfL0aFDB2hra+OHH36ATCZD48aNYWRkBG9vb/j4+CA0NBROTk64e/cuTp06BUdHR3z44YdlzmlpaYlTp07Bzc0NmpqaqFOnDmbOnIkhQ4bAyckJPXr0wC+//IJ9+/bh5MmTVbv5VG2480hEREREVAMCAwMxY8YMBAUFwd7eHkOHDkV2djYAQFtbG8eOHcODBw/Qrl07DBo0CN27d8e6deuk/qNHj8bIkSPh4+MDd3d3NGnSpMIHx6ioqGD//v3Iy8tD+/btMXbsWIVXg5Rnzpw5cHd3R58+ffDhhx+if//+sLa2luoNDQ2xefNmuLm5wdHRESdPnsQvv/wCIyMjAM+emurj44MZM2bAzs4O/fv3R2xsLCwsLMqdMzQ0FCdOnIC5uTmcnJwAPHsNyZo1a7BixQq0aNEC33zzDcLCwuDh4VHpGujNEMSXDzQTEREREf2L5efnIz09HVZWVtDS0qrtcIjeesp+prjzSERERERERJVi8khERERERESVYvJIRERERERElWLySERERERERJVi8khEREREVAM8PDzg7+9f22HUCktLS6xevbq2w6DXxPc8EhERERHVgH379kFdXb22w6gVsbGx0NHRka4FQcD+/fvRv3//2guKqozJIxERERFRDahbt+4bHb+wsBAaGhpvdI5XZWxs/MbnKCoqem+T85rCY6tERERERDXg5WOrlpaWWLJkCUaPHg09PT1YWFjgv//9r0Kf//3vfxg+fDjq1q0LHR0dODs74+LFiwCA4OBgtG7dGlu2bFF4P9+jR48wduxYGBsbQ19fH926dUNCQoI0ZlpaGvr164cGDRpAV1cX7dq1w8mTJxXm3bBhA2xsbKClpYUGDRpg0KBBUl1JSQlCQkJgZWUFmUyGVq1aYc+ePRWu/cVjq5aWlgCAAQMGQBAE6TohIQFdu3aFnp4e9PX10bZtW8TFxZU7piAI2LhxI/7zn/9AR0cHixcvBgD8/PPPaNOmDbS0tNCkSRMsWLAAxcXFAICPP/4YQ4cOVRinqKgI9erVw/fff6/U+qKioiAIAk6dOgVnZ2doa2vD1dUVKSkpUhtfX99Su6r+/v7w8PB4rftY25g8EhERERHVktDQUDg7O+P333/HpEmT8Omnn0pJSE5ODtzd3XH79m0cPHgQCQkJ+Pzzz1FSUiL1v3nzJvbu3Yt9+/YhPj4eADB48GBkZ2fjyJEjuHz5Mtq0aYPu3bvjwYMH0ri9e/fGqVOn8Pvvv8PLywt9+/ZFZmYmACAuLg5TpkzBwoULkZKSgqNHj6JLly7SnCEhIfj++++xadMmJCYmYtq0aRgxYgSio6OVWnNsbCwAICwsDFlZWdK1t7c3GjVqhNjYWFy+fBmzZ8+udCcxODgYAwYMwNWrVzF69GicOXMGPj4+mDp1KpKSkvDNN98gPDxcSiy9vb3xyy+/ICcnRxrj2LFjyM3NxYABA6q0vrlz5yI0NBRxcXFQU1PD6NGjlVr/c697H2uFSERERET0FsnLyxOTkpLEvLy82g6lStzd3cWpU6dK140bNxZHjBghXZeUlIj169cXN27cKIqiKH7zzTeinp6eeP/+/TLHmz9/vqiuri5mZ2dLZWfOnBH19fXF/Px8hbbW1tbiN998U25sLVq0ENeuXSuKoiju3btX1NfXF//5559S7fLz80VtbW3x3LlzCuVjxowRhw8fXu74jRs3FletWiVdAxD379+v0EZPT08MDw8vd4yXARD9/f0Vyrp37y4uWbJEoWzbtm2iqampKIqiWFRUJNarV0/8/vvvpfrhw4eLQ4cOVXp9kZGRIgDx5MmTUn1ERIQIQPrf5MiRI8V+/fopjDF16lTR3d1d6XlqkrKfKX7nkYiIiIioljg6Okq/C4IAExMTZGdnAwDi4+Ph5ORU4XclGzdurPB9woSEBOTk5MDIyEihXV5eHtLS0gA823kMDg5GREQEsrKyUFxcjLy8PGnn8YMPPkDjxo3RpEkTeHl5wcvLCwMGDIC2tjZu3ryJ3NxcfPDBBwrjFxYWwsnJ6bXuxfTp0zF27Fhs27YNPXr0wODBg2FtbV1hH2dnZ4XrhIQExMTESDuNACCXy5Gfn4/c3Fxoa2tjyJAh2L59Oz755BM8ffoUP//8M3bu3AkAVVrfi387U1NTAEB2djYsLCwqXeubvI9vEpNHIiIiIqJa8vKxTEEQpGOpMpms0v4vPsEUeJYYmpqaIioqqlRbQ0NDAEBAQABOnDiBFStWoGnTppDJZBg0aBAKCwsBAHp6evjtt98QFRWF48ePIygoCMHBwYiNjZWOe0ZERKBhw4YK42tqaiq15vIEBwfj448/RkREBI4cOYL58+dj586d0nHSspS1/gULFmDgwIGl2j7/Tqi3tzfc3d2RnZ2NEydOQCaTwcvLS+qv7Ppe/NsJggAA0t9ORUUFoigqtC8qKlKIU9l5/k2YPBIRERER/Qs5Ojpiy5YtePDggdJPam3Tpg3u3LkDNTU16UE0L4uJiYGvr6+UlOXk5CAjI0OhjZqaGnr06IEePXpg/vz5MDQ0xOnTp/HBBx9AU1MTmZmZcHd3f+W1qaurQy6Xlyq3tbWFra0tpk2bhuHDhyMsLKzC5PFlbdq0QUpKCpo2bVpuG1dXV5ibm+Onn37CkSNHMHjwYCkRbN68ebWsz9jYGNeuXVMoi4+Pr/Z5ahqTRyIiIiKif6Hhw4djyZIl6N+/P0JCQmBqaorff/8dZmZmcHFxKbNPjx494OLigv79+2PZsmWwtbXFX3/9hYiICAwYMADOzs6wsbHBvn370LdvXwiCgMDAQIWH8Bw6dAi3bt1Cly5dUKdOHRw+fBglJSWws7ODnp4eAgICMG3aNJSUlKBTp054/PgxYmJioK+vj5EjRyq1NktLS5w6dQpubm7Q1NSElpYWZs6ciUGDBsHKygr/+9//EBsbi48++qhK9ywoKAh9+vSBhYUFBg0aBBUVFSQkJODatWv48ssvpXYff/wxNm3ahBs3biAyMlIqr671devWDcuXL8f3338PFxcX/PDDD7h27Zp0JLW65qlpfNoqEREREdG/kIaGBo4fP4769eujd+/ecHBwwNKlS6GqqlpuH0EQcPjwYXTp0gWjRo2Cra0thg0bhj/++AMNGjQAAKxcuRJ16tSBq6sr+vbtC09PT7Rp00Yaw9DQEPv27UO3bt1gb2+PTZs24ccff0SLFi0AAIsWLUJgYCBCQkJgb28PLy8vREREwMrKSum1hYaG4sSJEzA3N4eTkxNUVVVx//59+Pj4wNbWFkOGDEGvXr2wYMGCKt0zT09PHDp0CMePH0e7du3QsWNHrFq1Co0bN1Zo5+3tjaSkJDRs2BBubm4KddWxPk9PTwQGBuLzzz9Hu3bt8OTJE/j4+FT7PDVNEF8+jEtERERE9C+Wn5+P9PR0hXcbEtGrU/YzxZ1HIiIiIiIiqhSTRyIiIiIiIqoUk0ciIiIiIiKqFJNHIiIiIiIiqhSTRyIiIiIiIqoUk0ciIiIiohrg4eEBf3//2g6D6JUxeSQiIiIiqgH79u3DokWLXnucP//8E6NHj4aZmRk0NDTQuHFjTJ06Fffv31doFxwcjGbNmkFHRwd16tRBjx49cPHixQrHDgkJQbt27aCnp4f69eujf//+SElJUWiTn5+PyZMnw8jICLq6uvjoo4/w999/S/UJCQkYPnw4zM3NIZPJYG9vjzVr1pQ7Z0xMDNTU1NC6dWuF8qVLl6JFixbQ1taGra0tduzYoeQdojeFySMRERERUQ2oW7cu9PT0XmuMW7duwdnZGampqfjxxx9x8+ZNbNq0CadOnYKLiwsePHggtbW1tcW6detw9epVnD17FpaWlujZsyfu3r1b7vjR0dGYPHkyLly4gBMnTqCoqAg9e/bE06dPpTbTpk3DL7/8gt27dyM6Ohp//fUXBg4cKNVfvnwZ9evXxw8//IDExETMnTsXc+bMwbp160rN9+jRI/j4+KB79+6l6s6cOYNVq1bh2rVrGDFiBHx8fHDr1q1XvXVUDQRRFMXaDoKIiIiISFnKvtD838bDwwOtW7fG6tWrAQCWlpYYP348bt68id27d6NOnTqYN28exo8fX+4YvXr1wrVr13Djxg3IZDKp/M6dO7C2toaPjw82btxYZt9//vkHBgYGOHnyZJnJWlnu3r2L+vXrIzo6Gl26dMHjx49hbGyMHTt2YNCgQQCA69evw97eHufPn0fHjh3LHGfy5MlITk7G6dOnFcqHDRsGGxsbqKqq4sCBA4iPjy+z/4MHD2BkZIQzZ86gU6dOSsVOylP2M8WdRyIiIiKiWhIaGgpnZ2f8/vvvmDRpEj799NNSx0Sfe/DgAY4dO4ZJkyYpJI4AYGJiAm9vb/z0008oa2+osLAQ//3vf2FgYIBWrVopHd/jx48BPNs1BZ7tKhYVFaFHjx5Sm2bNmsHCwgLnz5+vcJznYzwXFhaGW7duYf78+RXGIIoiZsyYgZYtW6J9+/ZKx07Vj8kjEREREVEt6d27NyZNmoSmTZti1qxZqFevHiIjI8tsm5qaClEUYW9vX2a9vb09Hj58qHAs9dChQ9DV1YWWlhZWrVqFEydOoF69ekrFVlJSAn9/f7i5uaFly5YAnu1wamhowNDQUKFtgwYNcOfOnTLHOXfuHH766SeFHdXU1FTMnj0bP/zwA9TU1CqMY+zYsTh37hyOHj0KDQ0NpWKnN6PivxQREREREb0xjo6O0u+CIMDExATZ2dkV9qnsW2cvJlhdu3ZFfHw87t27h82bN2PIkCG4ePEi6tevX2lskydPxrVr13D27NlK25bn2rVr6NevH+bPn4+ePXsCAORyOT7++GMsWLAAtra2FfaPjY3Fd999h+vXr6Nhw4avHAdVD+48EhERERHVEnV1dYVrQRBQUlJSZtumTZtCEAQkJyeXWZ+cnAxjY2OFXUEdHR00bdoUHTt2xLfffgs1NTV8++23lcbl5+eHQ4cOITIyEo0aNZLKTUxMUFhYiEePHim0//vvv2FiYqJQlpSUhO7du2P8+PGYN2+eVP7kyRPExcXBz88PampqUFNTw8KFC5GQkAA1NTWF70X+9ddfAAA7O7tKY6Y3j8kjEREREdFbwMjICB988AE2bNiAvLw8hbo7d+5g+/bt8PX1rXCMkpISFBQUlFsviiL8/Pywf/9+nD59GlZWVgr1bdu2hbq6Ok6dOiWVpaSkIDMzEy4uLlJZYmIiunbtipEjR2Lx4sUKY+jr6+Pq1auIj4+XfiZOnAg7OzvEx8ejQ4cOUlt3d3fExsZWuCaqOTy2SkRERET0lli3bh1cXV3h6emJL7/8ElZWVkhMTMTMmTNha2uLoKAgAMDTp0+xePFi/Oc//4GpqSnu3buH9evX4/bt2xg8eHC540+ePBk7duzAzz//DD09Pel7jAYGBpDJZDAwMMCYMWMwffp01K1bF/r6+vjss8/g4uIiPWn12rVr6NatGzw9PTF9+nRpDFVVVRgbG0NFRUX6DuVz9evXh5aWVqnyyMhIzJkzB9evX6+2e0ivjjuPRERERERvCRsbG8TGxqJJkyYYMmQIGjdujF69esHW1hYxMTHQ1dUF8CxRu379Oj766CPY2tqib9++uH//Ps6cOYMWLVqUO/7GjRvx+PFjeHh4wNTUVPr56aefpDarVq1Cnz598NFHH6FLly4wMTHBvn37pPo9e/bg7t27+OGHHxTGaNeuXZXX+/jx43KfPks1j+95JCIiIqK3ytv6nsc3Zf78+Vi5ciVOnDhR7nsWiSqi7GeKx1aJiIiIiN5iCxYsgKWlJS5cuID27dtDRYWHC+nNYPJIRERERPSWGzVqVG2HQO8B/t8SREREREREVCkmj0RERERERFQpJo9ERERERERUKSaPREREREREVCkmj0RERERERFQpJo9ERERERERUKSaPREREREREVCkmj0REREREhPDwcBgaGtZ2GDXG19cX/fv3r+0w3ipMHomIiIiIasCvv/6Kvn37wszMDIIg4MCBA6XaiKKIoKAgmJqaQiaToUePHkhNTa3SPB07dvx/7d17XM/n//jxx7uS3p0dQhlqVJ+ccxzmzIrNsI1GW8LCaNg0ZiOxzcyHD9nmvE9hJtuc9nFqNG8jISb6rBb6lLDImbdC3l2/P3y9fntTKqfGnvfbrdut13Vdr+t6Xq/2vt08d13v68WwYcPMyubPn49OpyM6OtqsPDg4mLZt25Z2KvdU1NzKSmZmJjqdjqSkpLIO5YknyaMQQgghhBCPwdWrV2nUqBFfffVVkW2mT5/OnDlzmD9/Pnv27MHOzg4/Pz+uXbtW4nE6duyIwWAwK9u2bRs1atS4q9xgMNCpU6fSTKPM3Lhxo6xD+NuT5FEIIYQQQojHoFu3bnzyySf07t270HqlFLNnz2bChAn07NmThg0bsnTpUv74449SreR17NiRtLQ0Tp06pZVt376dDz74wCx5zMjI4NixY3Ts2NHs/tjYWHx8fLC3t8ff35/s7GytLjExka5du1K5cmWcnJxo3749v/76q1bv7u4OQO/evdHpdNp1YcaNG4eXlxe2trY8++yzTJw4kfz8fK0+IiKCxo0bs3jxYjw8PLCxsQHg4sWLvPXWW7i4uODo6EinTp04ePBgkeN4eHgA4Ovri06no0OHDmb1M2bMwNXVlUqVKjFixAizGK5fv05YWBjVq1fHzs6Oli1b3pWA/51I8iiEEEIIIcRfQEZGBqdOnaJLly5amZOTEy1btiQhIUEr69ChA8HBwUX206ZNG8qVK8e2bdsASElJIS8vj8GDB3Pu3DkyMjKAW6uRNjY2tGrVSrs3NzeXGTNmsGzZMn755ReysrIICwvT6q9cucKAAQPYuXMnu3fvxtPTk+7du3PlyhXgVnIJEBUVRXZ2tnZdGAcHB6Kjo0lJSSEyMpJFixYxa9YsszZHjx5l1apVrF69Wtt22qdPH3Jycti0aRP79++nSZMmdO7cmfPnzxc6zt69ewHYunUr2dnZrF69Wqvbtm0b6enpbNu2jSVLlhAdHW22tTc0NJSEhARiYmI4dOgQffr0wd/fv9RbiZ8WVmUdgBBCCCGEEAJtpbBq1apm5VWrVjVbRaxZsyaurq5F9mNnZ0eLFi0wGAz069cPg8HA888/T/ny5WndujUGgwEPDw8MBgOtWrWifPny2r35+fnMnz+f2rVrA7eSpylTpmj1d25xXbhwIc7Ozmzfvp2XXnoJFxcXAJydnalWrdo95zthwgTtd3d3d8LCwoiJiWHs2LFa+Y0bN1i6dKnW786dO9m7dy85OTla3DNmzGDt2rX88MMPDBky5K5xbt9bqVKlu2KqUKECX375JZaWlvzjH//gxRdfJC4ujpCQELKysoiKiiIrKws3NzcAwsLC2Lx5M1FRUUydOvWe83saSfIohBBCCCHEE2Tp0qXFtunQoQPff/89cOt7jbe3arZv3x6DwcDAgQMxGAyEhISY3Wdra6sljgCurq7k5ORo16dPn2bChAkYDAZycnIwmUzk5uaSlZVV6nmsXLmSOXPmkJ6ejtFo5ObNmzg6Opq1qVWrlpb8ARw8eBCj0UilSpXM2uXl5ZGenl7qGOrVq4elpaV27erqSnJyMgDJycmYTCa8vLzM7rl+/fpd4/9dSPIohBBCCCHEX8DtVbHTp0+brSyePn2axo0bl6qvjh078umnn3Ly5EkMBoO29bR9+/YsWLCA9PR0jh8/ftdKYrly5cyudTodSintesCAAZw7d47IyEhq1apF+fLladWqVakPs0lISCAwMJDJkyfj5+eHk5MTMTExzJw506ydnZ2d2bXRaMTV1bXQ7x3ez2tGCptvQUGBNpalpSX79+83SzAB7O3tSz3W00CSRyGEEEIIIf4CPDw8qFatGnFxcVqyePnyZfbs2cPbb79dqr5at26NtbU1c+fO5dq1azRt2hSA5s2bc+bMGf79739r21tLIz4+nrlz59K9e3cAjh8/ztmzZ83alCtXDpPJdM9+du3aRa1atfjoo4+0smPHjhU7fpMmTTh16hRWVlb3PIznz6ytrQGKjelOvr6+mEwmcnJyHvrrTJ5UcmCOEEIIIYQQj4HRaCQpKUk7+CUjI4OkpCRty6dOp2P06NF88skn/PjjjyQnJxMUFISbm5vZy+yDgoIYP378PcfS6/U899xzfPHFF7Rp00ZbObO2tjYrv3PlrTienp4sW7aM1NRU9uzZQ2BgIHq93qyNu7s7cXFxnDp1igsXLhTZT1ZWFjExMaSnpzNnzhzWrFlT7PhdunShVatW9OrVi59++onMzEx27drFRx99xL59+wq9p0qVKuj1ejZv3szp06e5dOlSiebq5eVFYGAgQUFBrF69moyMDPbu3ctnn33Ghg0bStTH00aSRyGEEEIIIR6Dffv24evri6+vLwDvvfcevr6+hIeHa23Gjh3LO++8w5AhQ2jevDlGo5HNmzdrr6kAyMrKMnt9RlE6duzIlStX7no1Rfv27bly5cpdr+goia+//poLFy7QpEkT3nzzTUaOHEmVKlXM2sycOZMtW7ZQo0YNba53evnll3n33XcJDQ2lcePG7Nq1i4kTJxY7vk6nY+PGjbRr146BAwfi5eXF66+/zrFjx+46aOg2Kysr5syZw4IFC3Bzc6Nnz54lnm9UVBRBQUGMGTMGb29vevXqRWJiIjVr1ixxH08TnfrzJmYhhBBCCCH+4q5du0ZGRobZu/+EEPevpJ8pWXkUQgghhBBCCFEsSR6FEEIIIYQQQhRLkkchhBBCCCGEEMWS5FEIIYQQQgghRLEkeRRCCCGEEEIUKjMzE51Op71epCSCg4PNXi0inh6SPAohhBBCCPGEiI6OxtnZ+ZH0XVjSV6NGDbKzs6lfv/4jGfNhu59kV5ScVVkHIIQQQgghhPhrsrS0pFq1amUdhviLkJVHIYQQQgghHoMOHToQGhpKaGgoTk5OVK5cmYkTJ/Ln165fuHCBoKAgKlSogK2tLd26dePIkSMAGAwGBg4cyKVLl9DpdOh0OiIiIgC4fv06YWFhVK9eHTs7O1q2bInBYND6vb1iGRsbi4+PD/b29vj7+5OdnQ1AREQES5YsYd26dVrfBoPhrpU8k8nE4MGD8fDwQK/X4+3tTWRkZKmfRXx8PB06dMDW1pYKFSrg5+fHhQsXtLmMHDmSKlWqYGNjw/PPP09iYqLZMwoMDMTFxQW9Xo+npydRUVEAeHh4AODr64tOp6NDhw6ljk0UTZJHIYQQQgghHpMlS5ZgZWXF3r17iYyM5F//+heLFy/W6oODg9m3bx8//vgjCQkJKKXo3r07+fn5tG7dmtmzZ+Po6Eh2djbZ2dmEhYUBEBoaSkJCAjExMRw6dIg+ffrg7++vJZ4Aubm5zJgxg2XLlvHLL7+QlZWl3R8WFkbfvn21hDI7O5vWrVvfFX9BQQHPPPMM33//PSkpKYSHh/Phhx/y3XfflfgZJCUl0blzZ+rWrUtCQgI7d+6kR48emEwmAMaOHcuqVatYsmQJv/76K3Xq1MHPz4/z588DMHHiRFJSUti0aROpqanMmzePypUrA7B3714Atm7dSnZ2NqtXry7Nn0cUQ7atCiGEEEII8ZjUqFGDWbNmodPp8Pb2Jjk5mVmzZhESEsKRI0f48ccfiY+P1xK35cuXU6NGDdauXUufPn1wcnJCp9OZbSXNysoiKiqKrKws3NzcgFvJ4ObNm4mKimLq1KkA5OfnM3/+fGrXrg3cSjinTJkCgL29PXq9nuvXr99zm2q5cuWYPHmydu3h4UFCQgLfffcdffv2LdEzmD59Os2aNWPu3LlaWb169QC4evUq8+bNIzo6mm7dugGwaNEitmzZwtdff837779PVlYWvr6+NGvWDAB3d3etHxcXFwAqVaok220fAVl5FEIIIYQQ4jF57rnn0Ol02nWrVq04cuQIJpOJ1NRUrKysaNmypVZfqVIlvL29SU1NLbLP5ORkTCYTXl5e2Nvbaz/bt28nPT1da2dra6sljgCurq7k5OSUeg5fffUVTZs2xcXFBXt7exYuXEhWVlaJ77+98liY9PR08vPzadOmjVZWrlw5WrRooT2Dt99+m5iYGBo3bszYsWPZtWvXPcfbsWOH2XNZvnx5iWMV5mTlUQghhBBCiCeY0WjE0tKS/fv3Y2lpaVZnb2+v/V6uXDmzOp1OZ/Z9y5KIiYkhLCyMmTNn0qpVKxwcHPjnP//Jnj17StyHXq8v1Zh36tatG8eOHWPjxo1s2bKFzp07M2LECGbMmFFo+2bNmpmdvlq1atUHGv/vTFYehRBCCCGEeEzuTLJ2796Np6cnlpaW+Pj4cPPmTbM2586dIy0tjbp16wJgbW2tfTfwNl9fX0wmEzk5OdSpU8fspzRbNwvr+063t9QOHz4cX19f6tSpY7a6WRINGzYkLi6u0LratWtjbW1NfHy8Vpafn09iYqL2DODW9tQBAwbwzTffMHv2bBYuXKjNATCbh16vN3smDg4OpYpX/H+SPAohhBBCCPGYZGVl8d5775GWlsaKFSv44osvGDVqFACenp707NmTkJAQdu7cycGDB3njjTeoXr06PXv2BG59v89oNBIXF8fZs2fJzc3Fy8uLwMBAgoKCWL16NRkZGezdu5fPPvuMDRs2lDg2d3d3Dh06RFpaGmfPniU/P/+uNp6enuzbt4/Y2FgOHz7MxIkTzU5CLYnx48eTmJjI8OHDOXToEL///jvz5s3j7Nmz2NnZ8fbbb/P++++zefNmUlJSCAkJITc3l8GDBwMQHh7OunXrOHr0KL/99hvr16/Hx8cHgCpVqqDX69m8eTOnT5/m0qVLpYpN3Jskj0IIIYQQQjwmQUFB5OXl0aJFC0aMGMGoUaMYMmSIVh8VFUXTpk156aWXaNWqFUopNm7cqG05bd26NcOGDSMgIAAXFxemT5+u3RcUFMSYMWPw9vamV69eJCYmUrNmzRLHFhISgre3N82aNcPFxcVs9e+2oUOH8sorrxAQEEDLli05d+4cw4cPL9Uz8PLy4qeffuLgwYO0aNGCVq1asW7dOqysbn2jbtq0abz66qu8+eabNGnShKNHjxIbG0uFChWAW6uL48ePp2HDhrRr1w5LS0tiYmIAsLKyYs6cOSxYsAA3Nzct6RYPh06VdqOzEEIIIYQQZejatWtkZGTg4eGBjY1NWYdTYh06dKBx48bMnj27rEMRwkxJP1Oy8iiEEEIIIYQQoliSPAohhBBCCCGEKJa8qkMIIYQQQojHwGAwlHUIQjwQWXkUQgghhBBCCFEsSR6FEEIIIYR4CnTo0IHRo0eXdRhPDJ1Ox9q1a8s6jCeKbFsVQgghhBDiKbB69WrtlR5l5WGfKOvu7s7o0aMfSVKcnZ2tvf5DlIwkj0IIIYQQQjwFKlasWNYhlIhSCpPJpL3XsaxUq1atTMd/Esm2VSGEEEIIIR6DH374gQYNGqDX66lUqRJdunTh6tWrACQmJtK1a1cqV66Mk5MT7du359dff9Xu7d+/PwEBAWb95efnU7lyZZYuXQrcvW3V3d2dqVOnMmjQIBwcHKhZsyYLFy4062PXrl00btwYGxsbmjVrxtq1a9HpdCQlJRU5j7lz5+Lp6YmNjQ1Vq1bltddeAyA4OJjt27cTGRmJTqdDp9ORmZmJwWBAp9OxadMmmjZtSvny5dm5cyfp6en07NmTqlWrYm9vT/Pmzdm6das2TocOHTh27Bjvvvuu1t9tO3fupG3btuj1emrUqMHIkSO1Zwm3VhVffPFF9Ho9Hh4efPvtt7i7u5utiN65bfX48eP07dsXZ2dnKlasSM+ePcnMzNTqDQYDLVq0wM7ODmdnZ9q0acOxY8eKfE5PI0kehRBCCCGEeMSys7Pp168fgwYNIjU1FYPBwCuvvIJSCoArV64wYMAAdu7cye7du/H09KR79+5cuXIFgMDAQP7zn/9gNBq1PmNjY8nNzaV3795Fjjtz5kyaNWvGgQMHGD58OG+//TZpaWkAXL58mR49etCgQQN+/fVXPv74Y8aNG3fPeezbt4+RI0cyZcoU0tLS2Lx5M+3atQMgMjKSVq1aERISQnZ2NtnZ2dSoUUO794MPPmDatGmkpqbSsGFDjEYj3bt3Jy4ujgMHDuDv70+PHj3IysoCbm3DfeaZZ5gyZYrWH0B6ejr+/v68+uqrHDp0iJUrV7Jz505CQ0O1sYKCgvjjjz8wGAysWrWKhQsXkpOTU+S88vPz8fPzw8HBgR07dhAfH4+9vT3+/v7cuHGDmzdv0qtXL9q3b8+hQ4dISEhgyJAhZgnt34ISQgghhBDiCZKXl6dSUlJUXl5eWYdSYvv371eAyszMLFF7k8mkHBwc1H/+8x+llFL5+fmqcuXKaunSpVqbfv36qYCAAO26ffv2atSoUdp1rVq11BtvvKFdFxQUqCpVqqh58+YppZSaN2+eqlSpktlzXLRokQLUgQMHCo1r1apVytHRUV2+fLnQ+jtjUEqpbdu2KUCtXbu22HnXq1dPffHFF2ZzmDVrllmbwYMHqyFDhpiV7dixQ1lYWKi8vDyVmpqqAJWYmKjVHzlyRAFmfQFqzZo1Simlli1bpry9vVVBQYFWf/36daXX61VsbKw6d+6cApTBYCh2Dk+ikn6mZOVRCCGEEEKIR6xRo0Z07tyZBg0a0KdPHxYtWsSFCxe0+tOnTxMSEoKnpydOTk44OjpiNBq1VTgrKyv69u3L8uXLAbh69Srr1q0jMDDwnuM2bNhQ+12n01GtWjVtBS4tLY2GDRtiY2OjtWnRosU9++vatSu1atXi2Wef5c0332T58uXk5uaW6Bk0a9bM7NpoNBIWFoaPjw/Ozs7Y29uTmpqqzbkoBw8eJDo6Gnt7e+3Hz8+PgoICMjIySEtLw8rKiiZNmmj31KlT556H4xw8eJCjR4/i4OCg9VmxYkWuXbtGeno6FStWJDg4GD8/P3r06EFkZKS2Evp3IsmjEEIIIYQQj5ilpSVbtmxh06ZN1K1bly+++AJvb28yMjIAGDBgAElJSURGRrJr1y6SkpKoVKkSN27c0PoIDAwkLi6OnJwc1q5di16vx9/f/57j3nn6qk6no6Cg4L7n4eDgwK+//sqKFStwdXUlPDycRo0acfHixWLvtbOzM7sOCwtjzZo1TJ06lR07dpCUlESDBg3M5lwYo9HI0KFDSUpK0n4OHjzIkSNHqF279n3Ny2g00rRpU7M+k5KSOHz4MP379wcgKiqKhIQEWrduzcqVK/Hy8mL37t33Nd6TSpJHIYQQQgjxt6SU4txZE1mZNzl31qR9//BR0el0tGnThsmTJ3PgwAGsra1Zs2YNAPHx8YwcOZLu3btTr149ypcvz9mzZ83ub926NTVq1GDlypUsX76cPn36PNCrOby9vUlOTub69etaWWJiYrH3WVlZ0aVLF6ZPn86hQ4fIzMzk559/BsDa2hqTyVSi8ePj4wkODqZ37940aNCAatWqmR1QU1R/TZo0ISUlhTp16tz1Y21tjbe3Nzdv3uTAgQPaPUePHjVb6b1TkyZNOHLkCFWqVLmrTycnJ62dr68v48ePZ9euXdSvX59vv/22RHN9WkjyKIQQQggh/lYuXSxgwReXaeHzB/9wO0FTr5P8w+0ELXz+YMEXl7l08f5X5oqyZ88epk6dyr59+8jKymL16tWcOXMGHx8fADw9PVm2bBmpqans2bOHwMBA9Hr9Xf3079+f+fPns2XLlmK3rBanf//+FBQUMGTIEFJTU4mNjWXGjBkARR4Es379eubMmUNSUhLHjh1j6dKlFBQU4O3tDdw64XXPnj1kZmZy9uzZe65yenp6snr1am3l8HY8f+bu7s4vv/zCyZMntWR63Lhx7Nq1i9DQUJKSkjhy5Ajr1q3TDsz5xz/+QZcuXRgyZAh79+7lwIEDDBkyBL1eX+S8AgMDqVy5Mj179mTHjh1kZGRgMBgYOXIkJ06cICMjg/Hjx5OQkMCxY8f46aefOHLkiPb3+7uQ5FEIIYQQQvxt/PxTHg09TjAx7ALHMm6a1R3LuMnEsAs09DjBzz/lPdRxHR0d+eWXX+jevTteXl5MmDCBmTNn0q1bNwC+/vprLly4QJMmTXjzzTcZOXIkVapUuaufwMBAUlJSqF69Om3atHngmP7zn/+QlJRE48aN+eijjwgPDwcw+x7knzk7O7N69Wo6deqEj48P8+fPZ8WKFdSrVw+4tRXV0tKSunXr4uLics/vL/7rX/+iQoUKtG7dmh49euDn52f2PUWAKVOmkJmZSe3atXFxcQFufY9z+/btHD58mLZt2+Lr60t4eDhubm7afUuXLqVq1aq0a9eO3r17ExISgoODQ5HzsrW15ZdffqFmzZq88sor+Pj4MHjwYK5du4ajoyO2trb8/vvvvPrqq3h5eTFkyBBGjBjB0KFDS/7AnwI69ajX54UQQgghhHiIrl27RkZGBh4eHkUmA4X5+ac8+vfMQSm419f+LCxAp4Nv11Wh0wt3r/49zZYvX87AgQO5dOlSoSufT6oTJ05Qo0YNtm7dSufOncs6nL+ckn6mrB5jTEIIIYQQQpSJSxcLGBhwptjEEW7VW1jAwIAzHMp4Bifnp3ez3tKlS3n22WepXr06Bw8eZNy4cfTt2/eJTxx//vlnjEYjDRo0IDs7m7Fjx+Lu7q69k1Lcn6f3kyCEEEIIIcT/iVlmJC9XFZs43lZQAHm5ipXfGB9tYGXs1KlTvPHGG/j4+PDuu+/Sp08fFi5cWNZhPbD8/Hw+/PBD6tWrR+/evXFxccFgMDzQAUNCtq0KIYQQQognTGm3rSqlaOHzB8cyblKaf/nqdFDLw4q9qW5FHrQixNOgpJ8pWXkUQgghhBBPtfPnCsj8X+kSRwClIPN/N7lw/uGfvirEk0iSRyGEEEII8VS7anywjXbGK493o150dDTOzs7adUREBI0bN36sMTyozMxMdDodSUlJRbYxGAzodDouXrxYZBudTsfatWsfenx36tChA6NHj37k4zzpJHkUQgghhBBPNTv7B9tyau/wcLasBgcHo9PpmDZtmln52rVrzbbFBgQEcPjw4Ycy5pMuOztbe52JuCU4OJhevXqVydiSPAohhBBCiKdaxUoWuD9rRWm/tqjTgfuzVlSo+PD+yWxjY8Pnn3/OhQsXimyj1+sLfcfj31G1atUoX758WYch/o8kj0IIIYQQ4qmm0+l4a4TDfd0bEurwUA/L6dKlC9WqVeOzzz4rss2d21bvlJ6ezrPPPktoaChKKa5fv05YWBjVq1fHzs6Oli1bYjAY7hnHxYsXGTp0KFWrVsXGxob69euzfv16rX7VqlXUq1eP8uXL4+7uzsyZM83uL2w7qbOzM9HR0UWOuXHjRry8vNDr9XTs2JHMzMx7xnjnOLe3wn733Xe0bdsWvV5P8+bNOXz4MImJiTRr1gx7e3u6devGmTNntD5ur9RNnjwZFxcXHB0dGTZsGDdu3Chy3OKe6e2/0fr16/H29sbW1pbXXnuN3NxclixZgru7OxUqVGDkyJGYTKZS9xsbG4uPjw/29vb4+/uTnZ0N3NrCvGTJEtatW4dOp0On0xX7t36YJHkUQgghhBBPvdfftEdvq8OihP/6tbAAva2OgDfsH2oclpaWTJ06lS+++IITJ06U+v5Dhw7x/PPP079/f7788kt0Oh2hoaEkJCQQExPDoUOH6NOnD/7+/hw5cqTQPgoKCujWrRvx8fF88803pKSkMG3aNCwtLQHYv38/ffv25fXXXyc5OZmIiAgmTpx4z8SwOMePH+eVV16hR48eJCUl8dZbb/HBBx/cV1+TJk1iwoQJ/Prrr1hZWdG/f3/Gjh1LZGQkO3bs4OjRo4SHh5vdExcXR2pqKgaDgRUrVrB69WomT55c5Bgleaa5ubnMmTOHmJgYNm/ejMFgoHfv3mzcuJGNGzeybNkyFixYwA8//FDqfmfMmMGyZcv45ZdfyMrKIiwsDICwsDD69u2rJZTZ2dm0bt36vp7jfVFCCCGEEEI8QfLy8lRKSorKy8sr1X1xsbmqqk2mqlI+U1UuV/RPlfKZqqpNpvr5p9yHGveAAQNUz549lVJKPffcc2rQoEFKKaXWrFmj/vzP8qioKOXk5KRdT5o0STVq1EjFx8erChUqqBkzZmh1x44dU5aWlurkyZNmY3Xu3FmNHz++0DhiY2OVhYWFSktLK7S+f//+qmvXrmZl77//vqpbt652Dag1a9aYtXFyclJRUVFKKaUyMjIUoA4cOKCUUmr8+PFm9yul1Lhx4xSgLly4UGgcd45zu8/Fixdr9StWrFCAiouL08o+++wz5e3trV0PGDBAVaxYUV29elUrmzdvnrK3t1cmk0kppVT79u3VqFGjlFIle6ZRUVEKUEePHtXqhw4dqmxtbdWVK1e0Mj8/PzV06NAH6verr75SVatWNZvP7f+OHpaSfqasHl+aKoQQQgghRNnp9IKeb9dVYWDAGfJyb52g+ufXd9zenWqj1xH9nQsdu+ofWSyff/45nTp10laUipOVlUXXrl359NNPzU4FTU5OxmQy4eXlZdb++vXrVKpUqdC+kpKSeOaZZ+6657bU1FR69uxpVtamTRtmz56NyWTSVihLIzU1lZYtW5qVtWrVqtT9ADRs2FD7vWrVqgA0aNDArCwnJ8fsnkaNGmFra2s2ttFo5Pjx49SqVcusbUmfqa2tLbVr1zYb193dHXt7e7Oy27Hcb7+urq53zaesSPIohBBCCCH+Njq9oOdQxjOs/MbIoi+vkPm/m1pdLQ8rQkIdeP1NexydHu23u9q1a4efnx/jx48nODi42PYuLi64ubmxYsUKBg0ahKOjIwBGoxFLS0v2799/V1L35yTmz/T6B0+KdTod6o4XZ+bn5z9wvyVRrlw5szgKKysouP93c5b0mf55zNvjFlZ2O5YH6ffOZ11WJHkUQgghhBB/K07OFgwJdSRkhAMXzhdgvKKwd9BRoaLFQz0cpzjTpk2jcePGeHt7F9tWr9ezfv16unfvjp+fHz/99BMODg74+vpiMpnIycmhbdu2JRq3YcOGnDhxgsOHDxe6+ujj40N8fLxZWXx8PF5eXlrS4+Lioh3iAnDkyBFyc3OLHNPHx4cff/zRrGz37t0livdhOHjwIHl5eVrivHv3buzt7alRo8Zdbe/nmZbEw+rX2tra7BCex0kOzBFCCCGEEH9LOp2OipUsqeluRcVKlo81cYRbWy0DAwOZM2dOidrb2dmxYcMGrKys6NatG0ajES8vLwIDAwkKCmL16tVkZGSwd+9ePvvsMzZs2FBoP+3bt6ddu3a8+uqrbNmyhYyMDDZt2sTmzZsBGDNmDHFxcXz88cccPnyYJUuW8OWXX5ptse3UqRNffvklBw4cYN++fQwbNuyuFbM/GzZsGEeOHOH9998nLS2Nb7/99oEO4CmtGzduMHjwYFJSUti4cSOTJk0iNDQUi0JOULqfZ1oSD6tfd3d3Dh06RFpaGmfPnn1sK74gyaMQQgghhBBlZsqUKaXaYmlvb8+mTZtQSvHiiy9y9epVoqKiCAoKYsyYMXh7e9OrVy8SExOpWbNmkf2sWrWK5s2b069fP+rWrcvYsWO11awmTZrw3XffERMTQ/369QkPD2fKlClm22tnzpxJjRo1aNu2Lf379ycsLMzsO4V3qlmzJqtWrWLt2rU0atSI+fPnM3Xq1BLP+0F17twZT09P2rVrR0BAAC+//DIRERFFtr+fZ1oSD6PfkJAQvL29adasGS4uLnetEj9KOvVX2UArhBBCCCFECVy7do2MjAw8PDywsbEp63DEX1xwcDAXL168672U4v8r6WdKVh6FEEIIIYQQQhRLkkchhBBCCCGEEMWS01aFEEIIIYQQT63HeTDP005WHoUQQgghhBBCFEuSRyGEEEIIIZ4g0dHRODs7l3UYZSIiIoLGjRuXdRh/W5I8CiGEEEII8Rh06NCB0aNHP3A/AQEBHD58+MEDegKFhYURFxenXQcHB9OrV6+yC+j//F0SevnOoxBCCCGEEE8QvV6PXq9/pGPk5+dTrly5RzrG/bC3t8fe3r6sw/jbkpVHIYQQQgghHrHg4GC2b99OZGQkOp0OnU5HZmYmANu3b6dFixaUL18eV1dXPvjgA27evFlkX3euct3eyrls2TLc3d1xcnLi9ddf58qVK1qbgoICpk+fTp06dShfvjw1a9bk008/BSAzMxOdTsfKlStp3749NjY2LF++HIDFixfj4+ODjY0N//jHP5g7d65ZLOPGjcPLywtbW1ueffZZJk6cSH5+vlZ/8OBBOnbsiIODA46OjjRt2pR9+/Zp9Tt37qRt27bo9Xpq1KjByJEjuXr1apFz//O21YiICJYsWcK6deu0Z2owGLhx4wahoaG4urpiY2NDrVq1+Oyzz+75t+nVqxczZszA1dWVSpUqMWLECLN5XL9+nbCwMKpXr46dnR0tW7bEYDAAYDAYGDhwIJcuXdLiiIiIKHK8J5msPAohhBBCCPGIRUZGcvjwYerXr8+UKVMAcHFx4eTJk3Tv3p3g4GCWLl3K77//TkhICDY2NqVKQNLT01m7di3r16/nwoUL9O3bl2nTpmkJ4vjx41m0aBGzZs3i+eefJzs7m99//92sjw8++ICZM2fi6+urJZDh4eF8+eWX+Pr6cuDAAUJCQrCzs2PAgAEAODg4EB0djZubG8nJyYSEhODg4MDYsWMBCAwMxNfXl3nz5mFpaUlSUpK2opmeno6/vz+ffPIJ//73vzlz5gyhoaGEhoYSFRVV7JzDwsJITU3l8uXLWvuKFSsyZ84cfvzxR7777jtq1qzJ8ePHOX78+D372rZtG66urmzbto2jR48SEBBA48aNCQkJASA0NJSUlBRiYmJwc3NjzZo1+Pv7k5ycTOvWrZk9ezbh4eGkpaUBPL2ro0oIIYQQQognSF5enkpJSVF5eXllHUqptG/fXo0aNcqs7MMPP1Te3t6qoKBAK/vqq6+Uvb29MplMhfYTFRWlnJyctOtJkyYpW1tbdfnyZa3s/fffVy1btlRKKXX58mVVvnx5tWjRokL7y8jIUICaPXu2WXnt2rXVt99+a1b28ccfq1atWhU5x3/+85+qadOm2rWDg4OKjo4utO3gwYPVkCFDzMp27NihLCwsivzbTpo0STVq1Ei7HjBggOrZs6dZm3feeUd16tTJ7Jney4ABA1StWrXUzZs3tbI+ffqogIAApZRSx44dU5aWlurkyZNm93Xu3FmNHz9eKXX33+RJU9LPlKw8CiGEEEIIUUZSU1Np1aoVOp1OK2vTpg1Go5ETJ05Qs2bNEvXj7u6Og4ODdu3q6kpOTo42xvXr1+ncufM9+2jWrJn2+9WrV0lPT2fw4MHa6hvAzZs3cXJy0q5XrlzJnDlzSE9Px2g0cvPmTRwdHbX69957j7feeotly5bRpUsX+vTpQ+3atYFbW1oPHTqkbZEFUEpRUFBARkYGPj4+JZr7nYKDg+natSve3t74+/vz0ksv8cILL9zznnr16mFpaaldu7q6kpycDEBycjImkwkvLy+ze65fv06lSpXuK8YnlSSPQgghhBBCPOHuPNxGp9NRUFAAUOLDdezs7LTfjUYjAIsWLaJly5Zm7W4nWQkJCQQGBjJ58mT8/PxwcnIiJiaGmTNnam0jIiLo378/GzZsYNOmTUyaNImYmBh69+6N0Whk6NChjBw58q5YSpo0F6ZJkyZkZGSwadMmtm7dSt++fenSpQs//PBDkffc6/kZjUYsLS3Zv3+/WYIJT/H21CJI8iiEEEIIIcRjYG1tjclkMivz8fFh1apVKKW01cf4+HgcHBx45plnHsq4np6e6PV64uLieOutt0p0T9WqVXFzc+N///sfgYGBhbbZtWsXtWrV4qOPPtLKjh07dlc7Ly8vvLy8ePfdd+nXrx9RUVH07t2bJk2akJKSQp06de5vYhT+TAEcHR0JCAggICCA1157DX9/f86fP0/FihVLPYavry8mk4mcnBzatm1bqjieNpI8CiGEEEII8Ri4u7uzZ88eMjMzsbe3p2LFigwfPpzZs2fzzjvvEBoaSlpaGpMmTeK9997DwuLhvBjBxsaGcePGMXbsWKytrWnTpg1nzpzht99+Y/DgwUXeN3nyZEaOHImTkxP+/v5cv36dffv2ceHCBd577z08PT3JysoiJiaG5s2bs2HDBtasWaPdn5eXx/vvv89rr72Gh4cHJ06cIDExkVdffRW4dVLrc889R2hoKG+99RZ2dnakpKSwZcsWvvzyyxLNzd3dndjYWNLS0qhUqRJOTk588cUXuLq64uvri4WFBd9//z3VqlW77/cwenl5ERgYSFBQkHag0JkzZ4iLi6Nhw4a8+OKLuLu7YzQaiYuLo1GjRtja2mJra3tf4/2Vyas6hBBCCCGEeAzCwsKwtLSkbt26uLi4kJWVRfXq1dm4cSN79+6lUaNGDBs2jMGDBzNhwoSHOvbEiRMZM2YM4eHh+Pj4EBAQoH0nsihvvfUWixcvJioqigYNGtC+fXuio6Px8PAA4OWXX+bdd98lNDSUxo0bs2vXLiZOnKjdb2lpyblz5wgKCsLLy4u+ffvSrVs3Jk+eDEDDhg3Zvn07hw8fpm3btvj6+hIeHo6bm1uJ5xUSEoK3tzfNmjXDxcVFW7WdPn06zZo1o3nz5mRmZrJx48YHSsajoqIICgpizJgxeHt706tXLxITE7Xtta1bt2bYsGEEBATg4uLC9OnT73usvzKdUkqVdRBCCCGEEEKU1LVr18jIyMDDwwMbG5uyDkeIJ15JP1Oy8iiEEEIIIYQQoliSPAohhBBCCCGEKJYkj0IIIYQQQgghiiXJoxBCCCGEEEKIYknyKIQQQgghhBCiWJI8CiGEEEIIIR6J4OBgevXqVdZhiIdEkkchhBBCCCHEIxEZGUl0dPQD93P+/HlGjx5NrVq1sLa2xs3NjUGDBpGVlWXWbt68eTRs2BBHR0ccHR1p1aoVmzZtumffixYtom3btlSoUIEKFSrQpUsX9u7da9ZGKUV4eDiurq7o9Xq6dOnCkSNHtPrMzEwGDx6Mh4cHer2e2rVrM2nSJG7cuFHomEePHsXBwQFnZ2ez8q+//pqmTZtiZ2dHrVq1mDVrVime0qMnyaMQQgghhBBloKjE4mni5OR0V4JUWufPn+e5555j69atzJ8/n6NHjxITE8PRo0dp3rw5//vf/7S2zzzzDNOmTWP//v3s27ePTp060bNnT3777bci+zcYDPTr149t27aRkJBAjRo1eOGFFzh58qTWZvr06cyZM4f58+ezZ88e7Ozs8PPz49q1awD8/vvvFBQUsGDBAn777TdmzZrF/Pnz+fDDD+8aLz8/n379+tG2bdu76n7++WcmTpxIcnIyEyZMYMyYMWzfvv1BHt/DpYQQQgghhHiC5OXlqZSUFJWXl1fWoZRK+/bt1YgRI9SoUaNUpUqVVIcOHZRSSiUnJyt/f39lZ2enqlSpot544w115swZpZRSCxYsUK6urspkMpn19fLLL6uBAwdq12vXrlW+vr6qfPnyysPDQ0VERKj8/HytHlCLFi1SvXr1Unq9XtWpU0etW7dOq4+KilJOTk5mY6xZs0bdmS4UN86dBgwYoHr27Gn2DN555x31/vvvqwoVKqiqVauqSZMm3fO5DRs2TNnZ2ans7Gyz8tzcXFW9enXl7+9/z/srVKigFi9efM82f3bz5k3l4OCglixZopRSqqCgQFWrVk3985//1NpcvHhRlS9fXq1YsaLIfqZPn648PDzuKh87dqx64403Cn3mf1ZQUKCcnJzUsmXLShz7/SrpZ0pWHoUQQgghhHhMlixZgrW1NfHx8cyfP5+LFy/SqVMnfH192bdvH5s3b+b06dP07dsXgD59+nDu3Dm2bdum9XH+/Hk2b95MYGAgADt27CAoKIhRo0aRkpLCggULiI6O5tNPPzUbe/LkyfTt25dDhw7RvXt3AgMDOX/+fIljL+k4JXkGdnZ27Nmzh+nTpzNlyhS2bNlSaNuCggJiYmIIDAykWrVqZnV6vZ7hw4cTGxtb6DxMJhMxMTFcvXqVVq1alTi+3Nxc8vPzqVixIgAZGRmcOnWKLl26aG2cnJxo2bIlCQkJRfZz6dIlrY/bfv75Z77//nu++uqrYuOIiIjA1taWbt26lTj2R+6Rp7FCCCGEEEI8RE/yyqOvr69Z2ccff6xeeOEFs7Ljx48rQKWlpSmllOrZs6caNGiQVr9gwQLl5uamrUZ27txZTZ061ayPZcuWKVdXV+0aUBMmTNCujUajAtSmTZuUUiVbeSzJOHcqbOXx+eefN2vTvHlzNW7cuELvP3XqlALUrFmzCq1fvXq1AtSePXu0skOHDik7OztlaWmpnJyc1IYNG4qMrzBvv/22evbZZ7X/vuLj4xWg/vjjD7N2ffr0UX379i20jyNHjihHR0e1cOFCrezs2bOqRo0aavv27Uqpwp/5bZMnT1ZVq1ZV//3vf0sV+/0q6WfKqgzzViGEEEIIIf5WmjZtanZ98OBBtm3bhr29/V1t09PT8fLyIjAwkJCQEObOnUv58uVZvnw5r7/+OhYWFlof8fHxZiuAJpOJa9eukZubi62tLQANGzbU6u3s7HB0dCQnJ6fEsZd0nOL8OQ4AV1fXYuNQSt2z3traWvvd29ubpKQkLl26xA8//MCAAQPYvn07devWLTa2adOmERMTg8FgwMbGptj2hTl58iT+/v706dOHkJAQrTwkJIT+/fvTrl27e95/+vRpIiIi2LRpE/Xq1buvGB4VSR6FEEIIIYR4TOzs7MyujUYjPXr04PPPP7+rraurKwA9evRAKcWGDRto3rw5O3bsMDuF02g0MnnyZF555ZW7+vhzAlSuXDmzOp1OR0FBAQAWFhZ3JWj5+fl3xVqScYpzrzju5OLigrOzM6mpqYXWp6amYmVlhYeHh1ZmbW1NnTp1gFvJemJiIpGRkSxYsOCecc2YMYNp06axdetWswT39nbZ06dPa3+T29eNGzc26+OPP/6gY8eOtG7dmoULF5rV/fzzz/z444/MmDEDuJUQFxQUYGVlxcKFCxk0aBAAp06dQimFt7f3PeMtC5I8CiGEEEIIUUaaNGnCqlWrcHd3x8qq8H+a29jY8Morr7B8+XKOHj2Kt7c3TZo0MesjLS1NS5juh4uLC1euXOHq1atagpuUlHRXrA86TmlZWFjQt29fli9fzpQpU8y+95iXl8fcuXPp3bs3Tk5ORfZRUFDA9evX7znO9OnT+fTTT4mNjaVZs2ZmdR4eHlSrVo24uDgtWbx8+TJ79uzh7bff1tqdPHmSjh070rRpU6KiorSV4dsSEhIwmUza9bp16/j888/ZtWsX1atX18q9vLxITEzEzc3tnjGXBUkehRBCCCGEKCMjRoxg0aJF9OvXj7Fjx1KxYkXtVRSLFy/G0tISgMDAQF566SV+++033njjDbM+wsPDeemll6hZsyavvfYaFhYWHDx4kP/+97988sknJYqjZcuW2Nra8uGHHzJy5Ej27Nlz1/sZH8Y49+PTTz8lLi6Orl27Mn36dOrXr09GRgYTJkzAwsKCyMhIre348ePp1q0bNWvW5MqVK3z77bcYDAZiY2OL7P/zzz8nPDycb7/9Fnd3d06dOgWAvb099vb26HQ6Ro8ezSeffIKnpyceHh5MnDgRNzc3evXqBdxKHDt06ECtWrWYMWMGZ86c0fq/nfD6+PiYjbtv3z4sLCyoX7++WXlycjJBQUHExcWZJZV/BXLaqhBCCCGEEGXEzc2N+Ph4TCYTL7zwAg0aNGD06NE4OzubrVx16tSJihUrkpaWRv/+/c368PPzY/369fz00080b96c5557jlmzZlGrVq0Sx1GxYkW++eYbNm7cSIMGDVixYgUREREPfZz7UblyZXbv3k3Hjh0ZOnQoHh4etG/fHpPJRFJSktlW0pycHIKCgvD29qZz584kJiYSGxtL165di+x/3rx53Lhxg9deew1XV1ft5/b2UoCxY8fyzjvvMGTIEJo3b47RaGTz5s3adt0tW7Zw9OhR4uLieOaZZ8z6Ka3c3FzS0tLu2jb8V6BTxX37VAghhBBCiL+Qa9eukZGRgYeHx30faiKebF9//TXDhw9n5cqV2uqfuH8l/UzJyqMQQgghhBDiiTJ48GBiYmJITU0lLy+vrMP525DvPAohhBBCCCGeOL179y7rEP52ZOVRCCGEEEIIIUSxJHkUQgghhBBCCFEsSR6FEEIIIcQTSc59FOLhKOlnSZJHIYQQQgjxRClXrhxw65UGQogHd/uzdPuzVRQ5MEcIIYQQQjxRLC0tcXZ2JicnBwBbW1t0Ol0ZRyXEk0cpRW5uLjk5OTg7O2NpaXnP9vKeRyGEEEII8cRRSnHq1CkuXrxY1qEI8cRzdnamWrVqxf5PGEkehRBCCCHEE8tkMpGfn1/WYQjxxCpXrlyxK463SfIohBBCCCGEEKJYcmCOEEIIIYQQQohiSfIohBBCCCGEEKJYkjwKIYQQQgghhCiWJI9CCCGEEEIIIYolyaMQQgghhBBCiGJJ8iiEEEIIIYQQoliSPAohhBBCCCGEKNb/A4dCSFU0IqY4AAAAAElFTkSuQmCC",
+ "text/plain": [
+ "