Merge branch 'kyegomez:master' into master

pull/296/head
pliny 1 year ago committed by GitHub
commit d1035e54bc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

37
.github/action.yml vendored

@ -0,0 +1,37 @@
name: "Init Environment"
description: "Initialize environment for tests"
runs:
using: "composite"
steps:
- name: Checkout actions
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install and configure Poetry
uses: snok/install-poetry@v1
with:
virtualenvs-create: true
virtualenvs-in-project: true
installer-parallel: true
- name: Load cached venv
id: cached-poetry-dependencies
uses: actions/cache@v3
with:
path: .venv
key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }}
- name: Install dependencies
if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true'
run: poetry install --no-interaction --no-root --with test --with dev --all-extras
shell: bash
- name: Activate venv
run: |
source .venv/bin/activate
echo PATH=$PATH >> $GITHUB_ENV
shell: bash

@ -56,6 +56,6 @@ jobs:
# Upload the SARIF file generated in the previous step
- name: Upload SARIF results file
uses: github/codeql-action/upload-sarif@v2
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: results.sarif

@ -50,7 +50,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@ -64,7 +64,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
uses: github/codeql-action/autobuild@v3
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
@ -77,6 +77,6 @@ jobs:
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

@ -1,5 +1,3 @@
---
# This is a basic workflow to help you get started with Actions
name: Lint

@ -11,5 +11,5 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v4
- uses: actions/labeler@v5
if: ${{ github.event.pull_request.draft == false }}

@ -1,4 +1,3 @@
---
name: Pull Request Checks
on:
@ -22,6 +21,7 @@ jobs:
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install swarms
pip install pytest
- name: Run tests and checks

@ -26,7 +26,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest
pip install flake8 pytest swarms
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |

@ -28,7 +28,7 @@ jobs:
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade swarms
python -m pip install flake8 pytest
python -m pip install flake8 pytest swarms
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |

@ -1,4 +1,3 @@
---
name: Upload Python Package
on: # yamllint disable-line rule:truthy

@ -19,7 +19,7 @@ jobs:
pull-requests: write
steps:
- uses: actions/stale@v5
- uses: actions/stale@v9
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-issue-stale: 14

@ -1,4 +1,3 @@
---
name: test
on:
@ -10,6 +9,7 @@ on:
env:
POETRY_VERSION: "1.4.2"
jobs:
test:
runs-on: ubuntu-latest
strategy:
@ -30,7 +30,7 @@ env:
python-version: ${{ matrix.python-version }}
poetry-version: "1.4.2"
cache-key: ${{ matrix.test_type }}
install-command: |
install-command:
if [ "${{ matrix.test_type }}" == "core" ]; then
echo "Running core tests, installing dependencies with poetry..."
poetry install

@ -16,8 +16,8 @@ jobs:
name: Test Py3.11
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Run tests
@ -29,8 +29,8 @@ jobs:
name: Test Py3.10
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Run tests
@ -42,8 +42,8 @@ jobs:
name: Test Py3.9
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- name: Run tests
@ -55,8 +55,8 @@ jobs:
name: pytype 3.10
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Run pytype
@ -69,8 +69,8 @@ jobs:
name: Check format with black
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Check format

@ -16,7 +16,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.x
python-version: "3.10"
- name: Install dependencies
run: |
@ -24,4 +24,4 @@ jobs:
pip install pytest
- name: Run unit tests
run: pytest
run: pytest

@ -24,6 +24,7 @@ jobs:
run: |
pip install -r requirements.txt
pip install pytest
pip install swarms
- name: Run Python unit tests
run: pytest

1
.gitignore vendored

@ -18,6 +18,7 @@ venv
swarms/agents/.DS_Store
_build
conversation.txt
stderr_log.txt
.vscode

@ -27,7 +27,7 @@ Run example in Collab: <a target="_blank" href="https://colab.research.google.co
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
### `Agent` Example
### `Agent`
- Reliable Structure that provides LLMS autonomy
- Extremely Customizeable with stopping conditions, interactivity, dynamical temperature, loop intervals, and so much more
- Enterprise Grade + Production Grade: `Agent` is designed and optimized for automating real-world tasks at scale!
@ -64,6 +64,45 @@ agent.run("Generate a 10,000 word blog on health and wellness.")
```
### `ToolAgent`
- Versatility: The ToolAgent class is designed to be flexible and adaptable. It can be used with any model and tokenizer, making it suitable for a wide range of tasks. This versatility means that you can use ToolAgent as a foundation for any tool that requires language model processing.
- Ease of Use: With its simple and intuitive interface, ToolAgent makes it easy to perform complex tasks. Just initialize it with your model, tokenizer, and JSON schema, and then call the run method with your task. This ease of use allows you to focus on your task, not on setting up your tools.
- Customizability: ToolAgent accepts variable length arguments and keyword arguments, allowing you to customize its behavior to suit your needs. Whether you need to adjust the temperature of the model's output, limit the number of tokens, or tweak any other parameter, ToolAgent has you covered. This customizability ensures that ToolAgent can adapt to your specific requirements.
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b")
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
json_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "number"},
"is_student": {"type": "boolean"},
"courses": {
"type": "array",
"items": {"type": "string"}
}
}
}
task = "Generate a person's information based on the following schema:"
agent = ToolAgent(model=model, tokenizer=tokenizer, json_schema=json_schema)
generated_data = agent.run(task)
print(generated_data)
```
------
@ -123,19 +162,139 @@ workflow.run()
# Output the results
for task in workflow.tasks:
print(f"Task: {task.description}, Result: {task.result}")
```
### `ConcurrentWorkflow`
- Run all the tasks all at the same time
```python
import os
from dotenv import load_dotenv
from swarms import OpenAIChat, Task, ConcurrentWorkflow, Agent
# Load environment variables from .env file
load_dotenv()
# Load environment variables
llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY"))
agent = Agent(llm=llm, max_loops=1)
# Create a workflow
workflow = ConcurrentWorkflow(max_workers=5)
# Create tasks
task1 = Task(agent, "What's the weather in miami")
task2 = Task(agent, "What's the weather in new york")
task3 = Task(agent, "What's the weather in london")
# Add tasks to the workflow
workflow.add(task1)
workflow.add(task2)
workflow.add(task3)
# Run the workflow
workflow.run()
```
## `Multi Modal Autonomous Agents`
- Run the agent with multiple modalities useful for various real-world tasks in manufacturing, logistics, and health.
### `RecursiveWorkflow`
- Recursively iterate on a workflow until a specific token is detected.
```python
import os
from dotenv import load_dotenv
from swarms import OpenAIChat, Task, RecursiveWorkflow, Agent
# Load environment variables from .env file
load_dotenv()
# Load environment variables
llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY"))
agent = Agent(llm=llm, max_loops=1)
# Create a workflow
workflow = RecursiveWorkflow(stop_token="<DONE>")
# Create tasks
task1 = Task(agent, "What's the weather in miami")
task2 = Task(agent, "What's the weather in new york")
task3 = Task(agent, "What's the weather in london")
# Add tasks to the workflow
workflow.add(task1)
workflow.add(task2)
workflow.add(task3)
# Run the workflow
workflow.run()
```
### `ModelParallelizer`
- Concurrent Execution of Multiple Models: The ModelParallelizer allows you to run multiple models concurrently, comparing their outputs. This feature enables you to easily compare the performance and results of different models, helping you make informed decisions about which model to use for your specific task.
- Plug-and-Play Integration: The structure provides a seamless integration with various models, including OpenAIChat, Anthropic, Mixtral, and Gemini. You can easily plug in any of these models and start using them without the need for extensive modifications or setup.
```python
# Description: This is an example of how to use the Agent class to run a multi-modal workflow
import os
from dotenv import load_dotenv
from swarms.models.gpt4_vision_api import GPT4VisionAPI
from swarms.structs import Agent
from swarms.models import Anthropic, Gemini, Mixtral, OpenAIChat
from swarms.swarms import ModelParallelizer
load_dotenv()
# API Keys
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
gemini_api_key = os.getenv("GEMINI_API_KEY")
# Initialize the models
llm = OpenAIChat(openai_api_key=openai_api_key)
anthropic = Anthropic(anthropic_api_key=anthropic_api_key)
mixtral = Mixtral()
gemini = Gemini(gemini_api_key=gemini_api_key)
# Initialize the parallelizer
llms = [llm, anthropic, mixtral, gemini]
parallelizer = ModelParallelizer(llms)
# Set the task
task = "Generate a 10,000 word blog on health and wellness."
# Run the task
out = parallelizer.run(task)
# Print the responses 1 by 1
for i in range(len(out)):
print(f"Response from LLM {i}: {out[i]}")
```
### Simple Conversational Agent
- Plug in and play conversational agent with `GPT4`, `Mixytral`, or any of our models
- Reliable conversational structure to hold messages together with dynamic handling for long context conversations and interactions with auto chunking
- Reliable, this simple system will always provide responses you want.
```python
import os
from dotenv import load_dotenv
from swarms import (
OpenAIChat,
Conversation,
)
conv = Conversation(
time_enabled=True,
)
# Load the environment variables
load_dotenv()
@ -144,65 +303,270 @@ load_dotenv()
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = GPT4VisionAPI(
openai_api_key=api_key,
max_tokens=500,
)
llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4")
# Run the language model in a loop
def interactive_conversation(llm):
conv = Conversation()
while True:
user_input = input("User: ")
conv.add("user", user_input)
if user_input.lower() == "quit":
break
task = (
conv.return_history_as_string()
) # Get the conversation history
out = llm(task)
conv.add("assistant", out)
print(
f"Assistant: {out}",
)
conv.display_conversation()
conv.export_conversation("conversation.txt")
# Replace with your LLM instance
interactive_conversation(llm)
# Initialize the task
task = (
"Analyze this image of an assembly line and identify any issues such as"
" misaligned parts, defects, or deviations from the standard assembly"
" process. IF there is anything unsafe in the image, explain why it is"
" unsafe and how it could be improved."
```
### `SwarmNetwork`
- Efficient Task Management: SwarmNetwork's intelligent agent pool and task queue management system ensures tasks are distributed evenly across agents. This leads to efficient use of resources and faster task completion.
- Scalability: SwarmNetwork can dynamically scale the number of agents based on the number of pending tasks. This means it can handle an increase in workload by adding more agents, and conserve resources when the workload is low by reducing the number of agents.
- Versatile Deployment Options: With SwarmNetwork, each agent can be run on its own thread, process, container, machine, or even cluster. This provides a high degree of flexibility and allows for deployment that best suits the user's needs and infrastructure.
```python
import os
from dotenv import load_dotenv
# Import the OpenAIChat model and the Agent struct
from swarms import OpenAIChat, Agent, SwarmNetwork
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
openai_api_key=api_key,
)
img = "assembly_line.jpg"
## Initialize the workflow
agent = Agent(
llm=llm,
max_loops="auto",
autosave=True,
dashboard=True,
multi_modal=True
agent = Agent(llm=llm, max_loops=1, agent_name="Social Media Manager")
agent2 = Agent(llm=llm, max_loops=1, agent_name=" Product Manager")
agent3 = Agent(llm=llm, max_loops=1, agent_name="SEO Manager")
# Load the swarmnet with the agents
swarmnet = SwarmNetwork(
agents=[agent, agent2, agent3],
)
# List the agents in the swarm network
out = swarmnet.list_agents()
print(out)
# Run the workflow on a task
agent.run(task=task, img=img)
out = swarmnet.run_single_agent(
agent2.id, "Generate a 10,000 word blog on health and wellness."
)
print(out)
# Run all the agents in the swarm network on a task
out = swarmnet.run_many_agents(
"Generate a 10,000 word blog on health and wellness."
)
print(out)
```
### `OmniModalAgent`
- An agent that can understand any modality and conditionally generate any modality.
### `Task`
Task Execution: The Task structure allows for the execution of tasks by an assigned agent. The run method is used to execute the task. It's like a Zapier for LLMs
- Task Description: Each Task can have a description, providing a human-readable explanation of what the task is intended to do.
- Task Scheduling: Tasks can be scheduled for execution at a specific time using the schedule_time attribute.
- Task Triggers: The set_trigger method allows for the setting of a trigger function that is executed before the task.
- Task Actions: The set_action method allows for the setting of an action function that is executed after the task.
- Task Conditions: The set_condition method allows for the setting of a condition function. The task will only be executed if this function returns True.
- Task Dependencies: The add_dependency method allows for the addition of dependencies to the task. The task will only be executed if all its dependencies have been completed.
- Task Priority: The set_priority method allows for the setting of the task's priority. Tasks with higher priority will be executed before tasks with lower priority.
- Task History: The history attribute is a list that keeps track of all the results of the task execution. This can be useful for debugging and for tasks that need to be executed multiple times.
```python
from swarms.agents.omni_modal_agent import OmniModalAgent, OpenAIChat
import os
from swarms.structs import Task, Agent
from swarms.models import OpenAIChat
from dotenv import load_dotenv
import os
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
model_name="gpt-4",
openai_api_key=api_key,
# Define a function to be used as the action
def my_action():
print("Action executed")
# Define a function to be used as the condition
def my_condition():
print("Condition checked")
return True
# Create an agent
agent = Agent(
llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]),
max_loops=1,
dashboard=False,
)
# Create a task
task = Task(description="What's the weather in miami", agent=agent)
# Set the action and condition
task.set_action(my_action)
task.set_condition(my_condition)
# Execute the task
print("Executing task...")
task.run()
# Check if the task is completed
if task.is_completed():
print("Task completed")
else:
print("Task not completed")
# Output the result of the task
print(f"Task result: {task.result}")
agent = OmniModalAgent(llm)
agent.run("Generate a video of a swarm of fish and then make an image out of the video")
```
---
### `BlockList`
- Modularity and Flexibility: BlocksList allows users to create custom swarms by adding or removing different classes or functions as blocks. This means users can easily tailor the functionality of their swarm to suit their specific needs.
- Ease of Management: With methods to add, remove, update, and retrieve blocks, BlocksList provides a straightforward way to manage the components of a swarm. This makes it easier to maintain and update the swarm over time.
- Enhanced Searchability: BlocksList offers methods to get blocks by various attributes such as name, type, ID, and parent-related properties. This makes it easier for users to find and work with specific blocks in a large and complex swarm.
```python
import os
from dotenv import load_dotenv
from transformers import AutoModelForCausalLM, AutoTokenizer
# Import the models, structs, and telemetry modules
from swarms import (
Gemini,
GPT4VisionAPI,
Mixtral,
OpenAI,
ToolAgent,
BlocksList,
)
# Load the environment variables
load_dotenv()
# Get the environment variables
openai_api_key = os.getenv("OPENAI_API_KEY")
gemini_api_key = os.getenv("GEMINI_API_KEY")
# Tool Agent
model = AutoModelForCausalLM.from_pretrained(
"databricks/dolly-v2-12b"
)
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
json_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "number"},
"is_student": {"type": "boolean"},
"courses": {"type": "array", "items": {"type": "string"}},
},
}
toolagent = ToolAgent(
model=model, tokenizer=tokenizer, json_schema=json_schema
)
# Blocks List which enables you to build custom swarms by adding classes or functions
swarm = BlocksList(
"SocialMediaSwarm",
"A swarm of social media agents",
[
OpenAI(openai_api_key=openai_api_key),
Mixtral(),
GPT4VisionAPI(openai_api_key=openai_api_key),
Gemini(gemini_api_key=gemini_api_key),
],
)
# Add the new block to the swarm
swarm.add(toolagent)
# Remove a block from the swarm
swarm.remove(toolagent)
# Update a block in the swarm
swarm.update(toolagent)
# Get a block at a specific index
block_at_index = swarm.get(0)
# Get all blocks in the swarm
all_blocks = swarm.get_all()
# Get blocks by name
openai_blocks = swarm.get_by_name("OpenAI")
# Get blocks by type
gpt4_blocks = swarm.get_by_type("GPT4VisionAPI")
# Get blocks by ID
block_by_id = swarm.get_by_id(toolagent.id)
# Get blocks by parent
blocks_by_parent = swarm.get_by_parent(swarm)
# Get blocks by parent ID
blocks_by_parent_id = swarm.get_by_parent_id(swarm.id)
# Get blocks by parent name
blocks_by_parent_name = swarm.get_by_parent_name(swarm.name)
# Get blocks by parent type
blocks_by_parent_type = swarm.get_by_parent_type(type(swarm).__name__)
# Get blocks by parent description
blocks_by_parent_description = swarm.get_by_parent_description(
swarm.description
)
# Run the block in the swarm
inference = swarm.run_block(toolagent, "Hello World")
print(inference)
```
## Real-World Deployment
### Multi-Agent Swarm for Logistics
- Swarms is a framework designed for real-world deployment here is a demo presenting a fully ready to use Swarm for a vast array of logistics tasks.
- Swarms is designed to be modular and reliable for real-world deployments.
@ -314,6 +678,246 @@ efficiency_analysis = efficiency_agent.run(
```
---
## `Multi Modal Autonomous Agents`
- Run the agent with multiple modalities useful for various real-world tasks in manufacturing, logistics, and health.
```python
# Description: This is an example of how to use the Agent class to run a multi-modal workflow
import os
from dotenv import load_dotenv
from swarms.models.gpt4_vision_api import GPT4VisionAPI
from swarms.structs import Agent
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = GPT4VisionAPI(
openai_api_key=api_key,
max_tokens=500,
)
# Initialize the task
task = (
"Analyze this image of an assembly line and identify any issues such as"
" misaligned parts, defects, or deviations from the standard assembly"
" process. IF there is anything unsafe in the image, explain why it is"
" unsafe and how it could be improved."
)
img = "assembly_line.jpg"
## Initialize the workflow
agent = Agent(
llm=llm,
max_loops="auto",
autosave=True,
dashboard=True,
multi_modal=True
)
# Run the workflow on a task
agent.run(task=task, img=img)
```
---
## Multi-Modal Model APIs
### `Gemini`
- Deploy Gemini from Google with utmost reliability with our visual chain of thought prompt that enables more reliable responses
```python
import os
from dotenv import load_dotenv
from swarms.models import Gemini
from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("GEMINI_API_KEY")
# Initialize the language model
llm = Gemini(
gemini_api_key=api_key,
temperature=0.5,
max_tokens=1000,
system_prompt=VISUAL_CHAIN_OF_THOUGHT,
)
# Initialize the task
task = "This is an eye test. What do you see?"
img = "playground/demos/multi_modal_chain_of_thought/eyetest.jpg"
# Run the workflow on a task
out = llm.run(task=task, img=img)
print(out)
```
### `Anthropic`
```python
# Import necessary modules and classes
from swarms.models import Anthropic
# Initialize an instance of the Anthropic class
model = Anthropic(
anthropic_api_key=""
)
# Using the run method
completion_1 = model.run("What is the capital of France?")
print(completion_1)
# Using the __call__ method
completion_2 = model("How far is the moon from the earth?", stop=["miles", "km"])
print(completion_2)
```
### `HuggingFaceLLM`
```python
from swarms.models import HuggingfaceLLM
# Initialize with custom configuration
custom_config = {
"quantize": True,
"quantization_config": {"load_in_4bit": True},
"verbose": True
}
inference = HuggingfaceLLM(model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config)
# Generate text based on a prompt
prompt_text = "Create a list of known biggest risks of structural collapse with references"
generated_text = inference(prompt_text)
print(generated_text)
```
### `Mixtral`
- Utilize Mixtral in a very simple API,
- Utilize 4bit quantization for a increased speed and less memory usage
- Use Flash Attention 2.0 for increased speed and less memory usage
```python
from swarms.models import Mixtral
# Initialize the Mixtral model with 4 bit and flash attention!
mixtral = Mixtral(load_in_4bit=True, use_flash_attention_2=True)
# Generate text for a simple task
generated_text = mixtral.run("Generate a creative story.")
# Print the generated text
print(generated_text)
```
### `Dalle3`
```python
from swarms import Dalle3
# Create an instance of the Dalle3 class with high quality
dalle3 = Dalle3(quality="high")
# Define a text prompt
task = "A high-quality image of a sunset"
# Generate a high-quality image from the text prompt
image_url = dalle3(task)
# Print the generated image URL
print(image_url)
```
### `GPT4Vision`
```python
from swarms.models import GPT4VisionAPI
# Initialize with default API key and custom max_tokens
api = GPT4VisionAPI(max_tokens=1000)
# Define the task and image URL
task = "Describe the scene in the image."
img = "https://i.imgur.com/4P4ZRxU.jpeg"
# Run the GPT-4 Vision model
response = api.run(task, img)
# Print the model's response
print(response)
```
### Text to Video with `ZeroscopeTTV`
```python
# Import the model
from swarms import ZeroscopeTTV
# Initialize the model
zeroscope = ZeroscopeTTV()
# Specify the task
task = "A person is walking on the street."
# Generate the video!
video_path = zeroscope(task)
print(video_path)
```
### ModelScope
```python
from swarms.models import ModelScopeAutoModel
# Initialize the model
mp = ModelScopeAutoModel(
model_name="AI-ModelScope/Mixtral-8x7B-Instruct-v0.1",
)
mp.run("Generate a 10,000 word blog on health and wellness.")
```
```python
from swarms import CogAgent
# Initialize CogAgent
cog_agent = CogAgent()
# Run the model on the tests
cog_agent.run("Describe this scene", "images/1.jpg")
```
----
## Supported Models :heavy_check_mark:
Swarms supports various model providers like OpenAI, Huggingface, Anthropic, Google, Mistral and many more.
| Provider | Provided :heavy_check_mark: | Module Name |
|----------|-----------------------------|-------------|
| OpenAI | :heavy_check_mark: | OpenAIChat, OpenAITTS, GPT4VisionAPI, Dalle3 |
| Anthropic | :heavy_check_mark: | Anthropic |
| Mistral | :heavy_check_mark: | Mistral, Mixtral |
| Gemini/Palm | :heavy_check_mark: | Gemini |
| Huggingface | :heavy_check_mark: | HuggingFaceLLM |
| Modelscope | :heavy_check_mark: | Modelscope |
| Vllm | :heavy_check_mark: | vLLM |
---
# Features 🤖
The Swarms framework is designed with a strong emphasis on reliability, performance, and production-grade readiness.
Below are the key features that make Swarms an ideal choice for enterprise-level AI deployments.
@ -384,30 +988,47 @@ The Swarms framework is equipped with a suite of advanced AI capabilities design
Swarms framework is not just a tool but a robust, scalable, and secure partner in your AI journey, ready to tackle the challenges of modern AI applications in a business environment.
---
## Documentation
- For documentation, go here, [swarms.apac.ai](https://swarms.apac.ai)
Documentation is located here at: [swarms.apac.ai](https://swarms.apac.ai)
----
## 🫶 Contributions:
Swarms is an open-source project, and contributions are welcome. If you want to contribute, you can create new features, fix bugs, or improve the infrastructure. Please refer to the [CONTRIBUTING.md](https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md) and our [contributing board](https://github.com/users/kyegomez/projects/1) file in the repository for more information on how to contribute.
The easiest way to contribute is to pick any issue with the `good first issue` tag 💪. Read the Contributing guidelines [here](/CONTRIBUTING.md). Bug Report? [File here](https://github.com/swarms/gateway/issues) | Feature Request? [File here](https://github.com/swarms/gateway/issues)
To see how to contribute, visit [Contribution guidelines](https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md)
Swarms is an open-source project, and contributions are VERY welcome. If you want to contribute, you can create new features, fix bugs, or improve the infrastructure. Please refer to the [CONTRIBUTING.md](https://github.com/kyegomez/swarms/blob/master/CONTRIBUTING.md) and our [contributing board](https://github.com/users/kyegomez/projects/1) to participate in Roadmap discussions!
<a href="https://github.com/kyegomez/swarms/graphs/contributors">
<img src="https://contrib.rocks/image?repo=kyegomez/swarms" />
</a>
----
## Community
Join our growing community around the world, for real-time support, ideas, and discussions on Swarms 😊
- View our official [Blog](https://swarms.apac.ai)
- Chat live with us on [Discord](https://discord.gg/kS3rwKs3ZC)
- Follow us on [Twitter](https://twitter.com/kyegomez)
- Connect with us on [LinkedIn](https://www.linkedin.com/company/the-swarm-corporation)
- Visit us on [YouTube](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ)
- [Join the Swarms community on Discord!](https://discord.gg/AJazBmhKnr)
- Join our Swarms Community Gathering every Thursday at 1pm NYC Time to unlock the potential of autonomous agents in automating your daily tasks [Sign up here](https://lu.ma/5p2jnc2v)
---
## Discovery Call
Book a discovery call with the Swarms team to learn how to optimize and scale your swarm! [Click here to book a time that works for you!](https://calendly.com/swarm-corp/30min?month=2023-11)
Book a discovery call to learn how Swarms can lower your operating costs by 40% with swarms of autonomous agents in lightspeed. [Click here to book a time that works for you!](https://calendly.com/swarm-corp/30min?month=2023-11)
## Accelerate Backlog
Help us accelerate our backlog by supporting us financially! Note, we're an open source corporation and so all the revenue we generate is through donations at the moment ;)
<a href="https://polar.sh/kyegomez"><img src="https://polar.sh/embed/fund-our-backlog.svg?org=kyegomez" /></a>
# License
Apache License

@ -0,0 +1,30 @@
import os
from dotenv import load_dotenv
from swarms import OpenAIChat, Task, ConcurrentWorkflow, Agent
# Load environment variables from .env file
load_dotenv()
# Load environment variables
llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY"))
agent = Agent(
system_prompt=None,
llm=llm,
max_loops=1,
)
# Create a workflow
workflow = ConcurrentWorkflow(max_workers=3)
# Create tasks
task1 = Task(agent=agent, description="What's the weather in miami")
task2 = Task(
agent=agent, description="What's the weather in new york"
)
task3 = Task(agent=agent, description="What's the weather in london")
# Add tasks to the workflow
workflow.add(tasks=[task1, task2, task3])
# Run the workflow and print each task result
workflow.run()

Binary file not shown.

After

Width:  |  Height:  |  Size: 283 KiB

@ -0,0 +1,21 @@
# [Go To Market Strategy][GTM]
Our vision is to become the world leader in real-world production grade autonomous agent deployment through open-source product development, Deep Verticalization, and unmatched value delivery to the end user.
We will focus on first accelerating the open source framework to PMF where it will serve as the backend for upstream products and services such as the Swarm Cloud which will enable enterprises to deploy autonomous agents with long term memory and tools in the cloud and a no-code platform for users to build their own swarm by dragging and dropping blocks.
Our target user segment for the framework is AI engineers looking to deploy agents into high risk environments where reliability is crucial.
Once PMF has been achieved and the framework has been extensively benchmarked we aim to establish high value contracts with customers in Security, Logistics, Manufacturing, Health and various other untapped industries.
Our growth strategy for the OS framework can be summarized by:
- Educating developers on value of autonomous agent usage.
- Tutorial Walkthrough on various applications like deploying multi-modal agents through cameras or building custom swarms for a specific business operation.
- Demonstrate unmatched reliability by delighting users.
- Staying up to date with trends and integrating the latest models, frameworks, and methodologies.
- Building a loyal and devoted community for long term user retention. [Join here](https://codex.apac.ai)
As we continuously deliver value with the open framework we will strategically position ourselves to acquire leads for high value contracts by demonstrating the power, reliability, and performance of our framework openly.
Acquire Full Access to the memo here: [TSC Memo](https://docs.google.com/document/d/1hS_nv_lFjCqLfnJBoF6ULY9roTbSgSuCkvXvSUSc7Lo/edit?usp=sharing)

@ -1,19 +1,74 @@
# Swarms Docs
<div align="center">
<p>
<a align="center" href="" target="_blank">
<img
width="850"
src="https://github.com/kyegomez/swarms/raw/master/images/swarmslogobanner.png"
>
</a>
</p>
</div>
Welcome to Swarm's Documentation!
## 👋 Hello
Swarms is a modular framework that enables reliable and useful multi-agent collaboration at scale to automate real-world tasks.
Swarms provides you with all the building blocks you need to build reliable, production-grade, and scalable multi-agent apps!
Swarms is transforming the landscape of AI from siloed AI agents to a unified 'swarm' of intelligence. Through relentless iteration and the power of collective insight from our 1500+ Agora researchers, we're developing a groundbreaking framework for AI collaboration. Our mission is to catalyze a paradigm shift, advancing Humanity with the power of unified autonomous AI agent swarms.
## 💻 Install
You can install `swarms` with pip in a
[**Python>=3.8**](https://www.python.org/) environment.
This documentation covers the fundamentals of the **Swarms** framework and describes how to use **Swarms Tools**.
!!! example "pip install (recommended)"
## Swarms
=== "headless"
The headless installation of `swarms` is designed for environments where graphical user interfaces (GUI) are not needed, making it more lightweight and suitable for server-side applications.
The Swarms framework provides developers with the ability to create AI systems that operate across two dimensions: predictability and creativity. For predictability, Swarms enforces structures like sequential pipelines, DAG-based workflows, and long-term memory. To facilitate creativity, Swarms safely prompts LLMs with [tools](https://github.com/kyegomez/swarms-tools) and short-term memory connecting them to external APIs and data stores. The framework allows developers to transition between those two dimensions effortlessly based on their use case.
```bash
pip install swarms
```
Swarms not only helps developers harness the potential of LLMs but also enforces trust boundaries, schema validation, and tool activity-level permissions. By doing so, Swarms maximizes LLMs reasoning while adhering to strict policies regarding their capabilities.
!!! example "git clone (for development)"
=== "virtualenv"
```bash
# clone repository and navigate to root directory
git clone https://github.com/kyegomez/swarms.git
cd swarms
# setup python environment and activate it
python3 -m venv venv
source venv/bin/activate
pip install --upgrade pip
# headless install
pip install -e "."
# desktop install
pip install -e ".[desktop]"
```
=== "poetry"
```bash
# clone repository and navigate to root directory
git clone https://github.com/kyegomez/swarms.git
cd swarms
# setup python environment and activate it
poetry env use python3.10
poetry shell
# headless install
poetry install
# desktop install
poetry install --extras "desktop"
```
## Documentation
[Learn more about swarms →](swarms/)

@ -1,4 +1,4 @@
# `PineconDB` Documentation
# `PineconeDB` Documentation
## Table of Contents

@ -28,7 +28,7 @@ pip install swarms
## Initialization
To use the Weaviate API Client, you need to initialize an instance of the `WeaviateClient` class. Here are the parameters you can pass to the constructor:
To use the Weaviate API Client, you need to initialize an instance of the `WeaviateDB` class. Here are the parameters you can pass to the constructor:
| Parameter | Type | Description |
|----------------------|----------------|----------------------------------------------------------------------------------------------------------------------------------|
@ -43,12 +43,12 @@ To use the Weaviate API Client, you need to initialize an instance of the `Weavi
| `additional_config` | Optional[weaviate.AdditionalConfig] | Additional configuration for the client. (Optional) |
| `connection_params` | Dict[str, Any] | Dictionary containing connection parameters. This parameter is used internally and can be ignored in most cases. |
Here's an example of how to initialize a WeaviateClient:
Here's an example of how to initialize a WeaviateDB:
```python
from swarms.memory import WeaviateClient
from swarms.memory import WeaviateDB
weaviate_client = WeaviateClient(
weaviate_client = WeaviateDB(
http_host="YOUR_HTTP_HOST",
http_port="YOUR_HTTP_PORT",
http_secure=True,

@ -0,0 +1,76 @@
# Module Name: Mixtral
## Introduction
The Mixtral module is a powerful language model designed for text generation tasks. It leverages the MistralAI Mixtral-8x7B pre-trained model to generate high-quality text based on user-defined tasks or prompts. In this documentation, we will provide a comprehensive overview of the Mixtral module, including its architecture, purpose, arguments, and detailed usage examples.
## Purpose
The Mixtral module is designed to facilitate text generation tasks using state-of-the-art language models. Whether you need to generate creative content, draft text for various applications, or simply explore the capabilities of Mixtral, this module serves as a versatile and efficient solution. With its easy-to-use interface, you can quickly generate text for a wide range of applications.
## Architecture
The Mixtral module is built on top of the MistralAI Mixtral-8x7B pre-trained model. It utilizes a deep neural network architecture with 8 layers and 7 attention heads to generate coherent and contextually relevant text. The model is capable of handling a variety of text generation tasks, from simple prompts to more complex content generation.
## Class Definition
### `Mixtral(model_name: str = "mistralai/Mixtral-8x7B-v0.1", max_new_tokens: int = 500)`
#### Parameters
- `model_name` (str, optional): The name or path of the pre-trained Mixtral model. Default is "mistralai/Mixtral-8x7B-v0.1".
- `max_new_tokens` (int, optional): The maximum number of new tokens to generate. Default is 500.
## Functionality and Usage
The Mixtral module offers a straightforward interface for text generation. It accepts a task or prompt as input and returns generated text based on the provided input.
### `run(task: Optional[str] = None, **kwargs) -> str`
#### Parameters
- `task` (str, optional): The task or prompt for text generation.
#### Returns
- `str`: The generated text.
## Usage Examples
### Example 1: Basic Usage
```python
from swarms.models import Mixtral
# Initialize the Mixtral model
mixtral = Mixtral()
# Generate text for a simple task
generated_text = mixtral.run("Generate a creative story.")
print(generated_text)
```
### Example 2: Custom Model
You can specify a custom pre-trained model by providing the `model_name` parameter.
```python
custom_model_name = "model_name"
mixtral_custom = Mixtral(model_name=custom_model_name)
generated_text = mixtral_custom.run("Generate text with a custom model.")
print(generated_text)
```
### Example 3: Controlling Output Length
You can control the length of the generated text by adjusting the `max_new_tokens` parameter.
```python
mixtral_length = Mixtral(max_new_tokens=100)
generated_text = mixtral_length.run("Generate a short text.")
print(generated_text)
```
## Additional Information and Tips
- It's recommended to use a descriptive task or prompt to guide the text generation process.
- Experiment with different prompt styles and lengths to achieve the desired output.
- You can fine-tune Mixtral on specific tasks if needed, although pre-trained models often work well out of the box.
- Monitor the `max_new_tokens` parameter to control the length of the generated text.
## Conclusion
The Mixtral module is a versatile tool for text generation tasks, powered by the MistralAI Mixtral-8x7B pre-trained model. Whether you need creative writing, content generation, or assistance with text-based tasks, Mixtral can help you achieve your goals. With a simple interface and flexible parameters, it's a valuable addition to your text generation toolkit.
If you encounter any issues or have questions about using Mixtral, please refer to the MistralAI documentation or reach out to their support team for further assistance. Happy text generation with Mixtral!

@ -0,0 +1,105 @@
# Module Name: ZeroscopeTTV
## Introduction
The ZeroscopeTTV module is a versatile zero-shot video generation model designed to create videos based on textual descriptions. This comprehensive documentation will provide you with an in-depth understanding of the ZeroscopeTTV module, its architecture, purpose, arguments, and detailed usage examples.
## Purpose
The ZeroscopeTTV module serves as a powerful tool for generating videos from text descriptions. Whether you need to create video content for various applications, visualize textual data, or explore the capabilities of ZeroscopeTTV, this module offers a flexible and efficient solution. With its easy-to-use interface, you can quickly generate videos based on your textual input.
## Architecture
The ZeroscopeTTV module is built on top of the Diffusers library, leveraging the power of diffusion models for video generation. It allows you to specify various parameters such as model name, data type, chunk size, dimensions, and more to customize the video generation process. The model performs multiple inference steps and utilizes a diffusion pipeline to generate high-quality videos.
## Class Definition
### `ZeroscopeTTV(model_name: str = "cerspense/zeroscope_v2_576w", torch_dtype=torch.float16, chunk_size: int = 1, dim: int = 1, num_inference_steps: int = 40, height: int = 320, width: int = 576, num_frames: int = 36)`
#### Parameters
- `model_name` (str, optional): The name of the pre-trained model to use. Default is "cerspense/zeroscope_v2_576w".
- `torch_dtype` (torch.dtype, optional): The torch data type to use for computations. Default is torch.float16.
- `chunk_size` (int, optional): The size of chunks for forward chunking. Default is 1.
- `dim` (int, optional): The dimension along which the input is split for forward chunking. Default is 1.
- `num_inference_steps` (int, optional): The number of inference steps to perform. Default is 40.
- `height` (int, optional): The height of the video frames. Default is 320.
- `width` (int, optional): The width of the video frames. Default is 576.
- `num_frames` (int, optional): The number of frames in the video. Default is 36.
## Functionality and Usage
The ZeroscopeTTV module offers a straightforward interface for video generation. It accepts a textual task or description as input and returns the path to the generated video.
### `run(task: str = None, *args, **kwargs) -> str`
#### Parameters
- `task` (str, optional): The input task or description for video generation.
#### Returns
- `str`: The path to the generated video.
## Usage Examples
### Example 1: Basic Usage
```python
from swarms.models import ZeroscopeTTV
# Initialize the ZeroscopeTTV model
zeroscope = ZeroscopeTTV()
# Generate a video based on a textual description
task = "A bird flying in the sky."
video_path = zeroscope.run(task)
print(f"Generated video path: {video_path}")
```
### Example 2: Custom Model and Parameters
You can specify a custom pre-trained model and adjust various parameters for video generation.
```python
custom_model_name = "your_custom_model_path"
custom_dtype = torch.float32
custom_chunk_size = 2
custom_dim = 2
custom_num_inference_steps = 50
custom_height = 480
custom_width = 720
custom_num_frames = 48
custom_zeroscope = ZeroscopeTTV(
model_name=custom_model_name,
torch_dtype=custom_dtype,
chunk_size=custom_chunk_size,
dim=custom_dim,
num_inference_steps=custom_num_inference_steps,
height=custom_height,
width=custom_width,
num_frames=custom_num_frames,
)
task = "A car driving on the road."
video_path = custom_zeroscope.run(task)
print(f"Generated video path: {video_path}")
```
### Example 3: Exporting Video Frames
You can also export individual video frames if needed.
```python
from swarms.models import export_to_video
# Generate video frames
video_frames = zeroscope.run("A boat sailing on the water.")
# Export video frames to a video file
video_path = export_to_video(video_frames)
print(f"Generated video path: {video_path}")
```
## Additional Information and Tips
- Ensure that the input textual task or description is clear and descriptive to achieve the desired video output.
- Experiment with different parameter settings to control video resolution, frame count, and inference steps.
- Use the `export_to_video` function to export individual video frames as needed.
- Monitor the progress and output paths to access the generated videos.
## Conclusion
The ZeroscopeTTV module is a powerful solution for zero-shot video generation based on textual descriptions. Whether you are creating videos for storytelling, data visualization, or other applications, ZeroscopeTTV offers a versatile and efficient way to bring your text to life. With a flexible interface and customizable parameters, it empowers you to generate high-quality videos with ease.
If you encounter any issues or have questions about using ZeroscopeTTV, please refer to the Diffusers library documentation or reach out to their support team for further assistance. Enjoy creating videos with ZeroscopeTTV!

@ -0,0 +1,106 @@
# swarms.structs Documentation
## Introduction
The swarms.structs library provides a collection of classes for representing artifacts and their attributes. This documentation will provide an overview of the `Artifact` class, its attributes, functionality, and usage examples.
### Artifact Class
The `Artifact` class represents an artifact and its attributes. It inherits from the `BaseModel` class and includes the following attributes:
#### Attributes
1. `artifact_id (str)`: Id of the artifact.
2. `file_name (str)`: Filename of the artifact.
3. `relative_path (str, optional)`: Relative path of the artifact in the agent's workspace.
These attributes are crucial for identifying and managing different artifacts within a given context.
## Class Definition
The `Artifact` class can be defined as follows:
```python
class Artifact(BaseModel):
"""
Represents an artifact.
Attributes:
artifact_id (str): Id of the artifact.
file_name (str): Filename of the artifact.
relative_path (str, optional): Relative path of the artifact in the agent's workspace.
"""
artifact_id: str = Field(
...,
description="Id of the artifact",
example="b225e278-8b4c-4f99-a696-8facf19f0e56",
)
file_name: str = Field(
..., description="Filename of the artifact", example="main.py"
)
relative_path: Optional[str] = Field(
None,
description=(
"Relative path of the artifact in the agent's workspace"
),
example="python/code/",
)
```
The `Artifact` class defines the mandatory and optional attributes and provides corresponding descriptions along with example values.
## Functionality and Usage
The `Artifact` class encapsulates the information and attributes representing an artifact. It provides a structured and organized way to manage artifacts within a given context.
### Example 1: Creating an Artifact instance
To create an instance of the `Artifact` class, you can simply initialize it with the required attributes. Here's an example:
```python
from swarms.structs import Artifact
artifact_instance = Artifact(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
file_name="main.py",
relative_path="python/code/"
)
```
In this example, we create an instance of the `Artifact` class with the specified artifact details.
### Example 2: Accessing Artifact attributes
You can access the attributes of the `Artifact` instance using dot notation. Here's how you can access the file name of the artifact:
```python
print(artifact_instance.file_name)
# Output: "main.py"
```
### Example 3: Handling optional attributes
If the `relative_path` attribute is not provided during artifact creation, it will default to `None`. Here's an example:
```python
artifact_instance_no_path = Artifact(
artifact_id="c280s347-9b7d-3c68-m337-7abvf50j23k",
file_name="script.js"
)
print(artifact_instance_no_path.relative_path)
# Output: None
```
By providing default values for optional attributes, the `Artifact` class allows flexibility in defining artifact instances.
### Additional Information and Tips
The `Artifact` class represents a powerful and flexible means of handling various artifacts with different attributes. By utilizing this class, users can organize, manage, and streamline their artifacts with ease.
## References and Resources
For further details and references related to the swarms.structs library and the `Artifact` class, refer to the [official documentation](https://swarms.structs.docs/artifact.html).
This comprehensive documentation provides an in-depth understanding of the `Artifact` class, its attributes, functionality, and usage examples. By following the detailed examples and explanations, developers can effectively leverage the capabilities of the `Artifact` class within their projects.

@ -0,0 +1,49 @@
# swarms.structs
## Overview
Swarms is a library that provides tools for managing a distributed system of agents working together to achieve a common goal. The structs module within Swarms provides a set of data structures and classes that are used to represent artifacts, tasks, and other entities within the system. The `ArtifactUpload` class is one such data structure that represents the process of uploading an artifact to an agent's workspace.
## ArtifactUpload
The `ArtifactUpload` class inherits from the `BaseModel` class. It has two attributes: `file` and `relative_path`. The `file` attribute represents the bytes of the file to be uploaded, while the `relative_path` attribute represents the relative path of the artifact in the agent's workspace.
### Class Definition
```python
class ArtifactUpload(BaseModel):
file: bytes = Field(..., description="File to upload")
relative_path: Optional[str] = Field(
None,
description=(
"Relative path of the artifact in the agent's workspace"
),
example="python/code/",
)
```
The `ArtifactUpload` class requires the `file` attribute to be passed as an argument. It is of type `bytes` and represents the file to be uploaded. The `relative_path` attribute is optional and is of type `str`. It represents the relative path of the artifact in the agent's workspace. If not provided, it defaults to `None`.
### Functionality and Usage
The `ArtifactUpload` class is used to create an instance of an artifact upload. It can be instantiated with or without a `relative_path`. Here is an example of how the class can be used:
```python
from swarms.structs import ArtifactUpload
# Uploading a file with no relative path
upload_no_path = ArtifactUpload(file=b'example_file_contents')
# Uploading a file with a relative path
upload_with_path = ArtifactUpload(file=b'example_file_contents', relative_path="python/code/")
```
In the above example, `upload_no_path` is an instance of `ArtifactUpload` with no specified `relative_path`, whereas `upload_with_path` is an instance of `ArtifactUpload` with the `relative_path` set to "python/code/".
### Additional Information
When passing the `file` and `relative_path` parameters to the `ArtifactUpload` class, ensure that the `file` parameter is provided exactly as the file that needs to be uploaded, represented as a `bytes` object. If a `relative_path` is provided, ensure that it is a valid path within the agent's workspace.
# Conclusion
The `ArtifactUpload` class is an essential data structure within the Swarms library that represents the process of uploading an artifact to an agent's workspace. By using this class, users can easily manage and represent artifact uploads within the Swarms distributed system.

@ -0,0 +1,137 @@
# Module/Function Name: BaseStructure
## Introduction:
The `BaseStructure` module contains the basic structure and attributes required for running machine learning models and associated metadata, error logging, artifact saving/loading, and relevant event logging.
The module provides the flexibility to save and load the model metadata, log errors, save artifacts, and maintain a log for multiple events associated with multiple threads and batched operations. The key attributes of the module include **name**, **description**, **save_metadata_path**, and **save_error_path**.
## Class Definition:
### Arguments:
| Argument | Type | Description |
|----------------------|--------|----------------------------------------------------------------------|
| name | str | (Optional) The name of the structure. |
| description | str | (Optional) A description of the structure. |
| save_metadata | bool | A boolean flag to enable or disable metadata saving. |
| save_artifact_path | str | (Optional) The path to save artifacts. |
| save_metadata_path | str | (Optional) The path to save metadata. |
| save_error_path | str | (Optional) The path to save errors. |
## Methods:
### 1. run
Runs the structure.
### 2. save_to_file
Saves data to a file.
* **data**: Value to be saved.
* **file_path**: Path where the data is to be saved.
### 3. load_from_file
Loads data from a file.
* **file_path**: Path from where the data is to be loaded.
### 4. save_metadata
Saves metadata to a file.
* **metadata**: Data to be saved as metadata.
### 5. load_metadata
Loads metadata from a file.
### 6. log_error
Logs error to a file.
### 7. save_artifact
Saves artifact to a file.
* **artifact**: The artifact to be saved.
* **artifact_name**: Name of the artifact.
### 8. load_artifact
Loads artifact from a file.
* **artifact_name**: Name of the artifact.
### 9. log_event
Logs an event to a file.
* **event**: The event to be logged.
* **event_type**: Type of the event (optional, defaults to "INFO").
### 10. run_async
Runs the structure asynchronously.
### 11. save_metadata_async
Saves metadata to a file asynchronously.
### 12. load_metadata_async
Loads metadata from a file asynchronously.
### 13. log_error_async
Logs error to a file asynchronously.
### 14. save_artifact_async
Saves artifact to a file asynchronously.
### 15. load_artifact_async
Loads artifact from a file asynchronously.
### 16. log_event_async
Logs an event to a file asynchronously.
### 17. asave_to_file
Saves data to a file asynchronously.
### 18. aload_from_file
Loads data from a file asynchronously.
### 19. run_concurrent
Runs the structure concurrently.
### 20. compress_data
Compresses data.
### 21. decompres_data
Decompresses data.
### 22. run_batched
Runs batched data.
## Examples:
### Example 1: Saving Metadata
```python
base_structure = BaseStructure(name="ExampleStructure")
metadata = {"key1": "value1", "key2": "value2"}
base_structure.save_metadata(metadata)
```
### Example 2: Loading Artifact
```python
artifact_name = "example_artifact"
artifact_data = base_structure.load_artifact(artifact_name)
```
### Example 3: Running Concurrently
```python
concurrent_data = [data1, data2, data3]
results = base_structure.run_concurrent(batched_data=concurrent_data)
```
## Note:
The `BaseStructure` class is designed to provide a modular and extensible structure for managing metadata, logs, errors, and batched operations while running machine learning models. The class's methods offer asynchronous and concurrent execution capabilities, thus optimizing the performance of the associated applications and models. The module's attributes and methods cater to a wide range of use cases, making it an essential foundational component for machine learning and data-based applications.
# Conclusion:
The `BaseStructure` module offers a robust and flexible foundation for managing machine learning model metadata, error logs, and event tracking, including asynchronous, concurrent, and batched operations. By leveraging the inherent capabilities of this class, developers can enhance the reliability, scalability, and performance of machine learning-based applications.
## References:
- [Python Concurrent Programming with `asyncio`](https://docs.python.org/3/library/asyncio.html)
- [Understanding Thread Pool Executor in Python](https://docs.python.org/3/library/concurrent.futures.html#executor-objects)
- [Documentation on `gzip` Module for Data Compression](https://docs.python.org/3/library/gzip.html)
---
The above documentation provides detailed information about the `BaseStructure` module, including its functionality, attributes, methods, usage examples, and references to relevant resources for further exploration. This comprehensive documentation aims to deepen the users' understanding of the module's purpose and how it can be effectively utilized in practice.
Please let me know if you need further elaboration on any specific aspect or functionality of the `BaseStructure` module.

@ -0,0 +1,42 @@
### swarms.modules.structs
`Class Name: BaseWorkflow`
Base class for workflows.
`Attributes`
- Task_pool (list): A list to store tasks.
`Methods`
- Add(task: Task = None, tasks: List[Task] = None, *args, **kwargs): Adds a task or a list of tasks to the task pool.
- Run(): Abstract method to run the workflow.
Source Code:
```python
class BaseWorkflow(BaseStructure):
"""
Base class for workflows.
Attributes:
task_pool (list): A list to store tasks.
Methods:
add(task: Task = None, tasks: List[Task] = None, *args, **kwargs):
Adds a task or a list of tasks to the task pool.
run():
Abstract method to run the workflow.
"""
```
For the usage examples and additional in-depth documentation please visit [BaseWorkflow](https://github.com/swarms-modules/structs/blob/main/baseworkflow.md#swarms-structs)
Explanation:
Initially, the `BaseWorkflow` class is a class designed to handle workflows. It contains a list within the task pool to handle various tasks and run methods. In the current structure, there are a few in-built methods such as `add`, `run`, `__sequential_loop`, `__log`, `reset`, `get_task_results`, `remove_task`, `update_task`, `delete_task`, `save_workflow_state`, `add_objective_to_workflow`, and `load_workflow_state`, each serving a unique purpose.
The `add` method functions to add tasks or a list of tasks to the task pool while the `run` method is left as an abstract method for initializing the workflow. Considering the need to run the workflow, `__sequential_loop` is another abstract method. In cases where the user desires to log messages, `__log` can be utilized. For resetting the workflow, there is a `reset` method, complemented by `get_task_results` that returns the results of each task in the workflow. To remove a task from the workflow, `remove_task` can be employed.
In cases where an update is required for the tasks in the workflow, `update_task` comes in handy. Deleting a task from the workflow can be achieved using the `delete_task` method. The method saves the workflows state to a JSON file, and the user can fix the path where the file resides. For adding objectives to the workflow, `add_objective_to_workflow` can be employed, and there is an abstract method of `load_workflow_state` for loading the workflow state from a JSON file providing the freedom to revert the workflow to a specific state.
The class also has a method `__str__` and `__repr__` to represent the text and instantiate an object of the class, respectively. The object can be reset, task results obtained, tasks removed, tasks updated, tasks deleted, or workflow state saved. The structure provides detailed methods for altering the workflow at every level.

@ -0,0 +1,77 @@
```
# Module/Function Name: ConcurrentWorkflow
class swarms.structs.ConcurrentWorkflow(max_workers, autosave, saved_state_filepath):
"""
ConcurrentWorkflow class for running a set of tasks concurrently using N autonomous agents.
Args:
- max_workers (int): The maximum number of workers to use for concurrent execution.
- autosave (bool): Whether to autosave the workflow state.
- saved_state_filepath (Optional[str]): The file path to save the workflow state.
"""
def add(self, task, tasks=None):
"""Adds a task to the workflow.
Args:
- task (Task): Task to add to the workflow.
- tasks (List[Task]): List of tasks to add to the workflow (optional).
"""
try:
# Implementation of the function goes here
except Exception as error:
print(f"[ERROR][ConcurrentWorkflow] {error}")
raise error
def run(self, print_results=False, return_results=False):
"""
Executes the tasks in parallel using a ThreadPoolExecutor.
Args:
- print_results (bool): Whether to print the results of each task. Default is False.
- return_results (bool): Whether to return the results of each task. Default is False.
Returns:
- (List[Any]): A list of the results of each task, if return_results is True. Otherwise, returns None.
"""
try:
# Implementation of the function goes here
except Exception as e:
print(f"Task {task} generated an exception: {e}")
return results if self.return_results else None
def _execute_task(self, task):
"""Executes a task.
Args:
- task (Task): Task to execute.
Returns:
- result: The result of executing the task.
"""
try:
# Implementation of the function goes here
except Exception as error:
print(f"[ERROR][ConcurrentWorkflow] {error}")
raise error
# Usage example:
from swarms.models import OpenAIChat
from swarms.structs import ConcurrentWorkflow
llm = OpenAIChat(openai_api_key="")
workflow = ConcurrentWorkflow(max_workers=5)
workflow.add("What's the weather in miami", llm)
workflow.add("Create a report on these metrics", llm)
workflow.run()
workflow.tasks
"""
```

@ -0,0 +1,265 @@
# Module/Class Name: Conversation
## Introduction
The `Conversation` class is a powerful tool for managing and structuring conversation data in a Python program. It enables you to create, manipulate, and analyze conversations easily. This documentation will provide you with a comprehensive understanding of the `Conversation` class, its attributes, methods, and how to effectively use it.
## Table of Contents
1. **Class Definition**
- Overview
- Attributes
2. **Methods**
- `__init__(self, time_enabled: bool = False, *args, **kwargs)`
- `add(self, role: str, content: str, *args, **kwargs)`
- `delete(self, index: str)`
- `update(self, index: str, role, content)`
- `query(self, index: str)`
- `search(self, keyword: str)`
- `display_conversation(self, detailed: bool = False)`
- `export_conversation(self, filename: str)`
- `import_conversation(self, filename: str)`
- `count_messages_by_role(self)`
- `return_history_as_string(self)`
- `save_as_json(self, filename: str)`
- `load_from_json(self, filename: str)`
- `search_keyword_in_conversation(self, keyword: str)`
- `pretty_print_conversation(self, messages)`
---
### 1. Class Definition
#### Overview
The `Conversation` class is designed to manage conversations by keeping track of messages and their attributes. It offers methods for adding, deleting, updating, querying, and displaying messages within the conversation. Additionally, it supports exporting and importing conversations, searching for specific keywords, and more.
#### Attributes
- `time_enabled (bool)`: A flag indicating whether to enable timestamp recording for messages.
- `conversation_history (list)`: A list that stores messages in the conversation.
### 2. Methods
#### `__init__(self, time_enabled: bool = False, *args, **kwargs)`
- **Description**: Initializes a new Conversation object.
- **Parameters**:
- `time_enabled (bool)`: If `True`, timestamps will be recorded for each message. Default is `False`.
#### `add(self, role: str, content: str, *args, **kwargs)`
- **Description**: Adds a message to the conversation history.
- **Parameters**:
- `role (str)`: The role of the speaker (e.g., "user," "assistant").
- `content (str)`: The content of the message.
#### `delete(self, index: str)`
- **Description**: Deletes a message from the conversation history.
- **Parameters**:
- `index (str)`: The index of the message to delete.
#### `update(self, index: str, role, content)`
- **Description**: Updates a message in the conversation history.
- **Parameters**:
- `index (str)`: The index of the message to update.
- `role (_type_)`: The new role of the speaker.
- `content (_type_)`: The new content of the message.
#### `query(self, index: str)`
- **Description**: Retrieves a message from the conversation history.
- **Parameters**:
- `index (str)`: The index of the message to query.
- **Returns**: The message as a string.
#### `search(self, keyword: str)`
- **Description**: Searches for messages containing a specific keyword in the conversation history.
- **Parameters**:
- `keyword (str)`: The keyword to search for.
- **Returns**: A list of messages that contain the keyword.
#### `display_conversation(self, detailed: bool = False)`
- **Description**: Displays the conversation history.
- **Parameters**:
- `detailed (bool, optional)`: If `True`, provides detailed information about each message. Default is `False`.
#### `export_conversation(self, filename: str)`
- **Description**: Exports the conversation history to a text file.
- **Parameters**:
- `filename (str)`: The name of the file to export to.
#### `import_conversation(self, filename: str)`
- **Description**: Imports a conversation history from a text file.
- **Parameters**:
- `filename (str)`: The name of the file to import from.
#### `count_messages_by_role(self)`
- **Description**: Counts the number of messages by role in the conversation.
- **Returns**: A dictionary containing the count of messages for each role.
#### `return_history_as_string(self)`
- **Description**: Returns the entire conversation history as a single string.
- **Returns**: The conversation history as a string.
#### `save_as_json(self, filename: str)`
- **Description**: Saves the conversation history as a JSON file.
- **Parameters**:
- `filename (str)`: The name of the JSON file to save.
#### `load_from_json(self, filename: str)`
- **Description**: Loads a conversation history from a JSON file.
- **Parameters**:
- `filename (str)`: The name of the JSON file to load.
#### `search_keyword_in_conversation(self, keyword: str)`
- **Description**: Searches for a keyword in the conversation history and returns matching messages.
- **Parameters**:
- `keyword (str)`: The keyword to search for.
- **Returns**: A list of messages containing the keyword.
#### `pretty_print_conversation(self, messages)`
- **Description**: Pretty prints a list of messages with colored role indicators.
- **Parameters**:
- `messages (list)`: A list of messages to print.
## Examples
Here are some usage examples of the `Conversation` class:
### Creating a Conversation
```python
from swarms.structs import Conversation
conv = Conversation()
```
### Adding Messages
```python
conv.add("user", "Hello, world!")
conv.add("assistant", "Hello, user!")
```
### Displaying the Conversation
```python
conv.display_conversation()
```
### Searching for Messages
```python
result = conv.search("Hello")
```
### Exporting and Importing Conversations
```python
conv.export_conversation("conversation.txt")
conv.import_conversation("conversation.txt")
```
### Counting Messages by Role
```python
counts = conv.count_messages_by_role()
```
### Loading and Saving as JSON
```python
conv.save_as_json("conversation.json")
conv.load_from_json("conversation.json")
```
Certainly! Let's continue with more examples and additional information about the `Conversation` class.
### Querying a Specific Message
You can retrieve a specific message from the conversation by its index:
```python
message = conv.query(0) # Retrieves the first message
```
### Updating a Message
You can update a message's content or role within the conversation:
```python
conv.update(0, "user", "Hi there!") # Updates the first message
```
### Deleting a Message
If you want to remove a message from the conversation, you can use the `delete` method:
```python
conv.delete(0) # Deletes the first message
```
### Counting Messages by Role
You can count the number of messages by role in the conversation:
```python
counts = conv.count_messages_by_role()
# Example result: {'user': 2, 'assistant': 2}
```
### Exporting and Importing as Text
You can export the conversation to a text file and later import it:
```python
conv.export_conversation("conversation.txt") # Export
conv.import_conversation("conversation.txt") # Import
```
### Exporting and Importing as JSON
Conversations can also be saved and loaded as JSON files:
```python
conv.save_as_json("conversation.json") # Save as JSON
conv.load_from_json("conversation.json") # Load from JSON
```
### Searching for a Keyword
You can search for messages containing a specific keyword within the conversation:
```python
results = conv.search_keyword_in_conversation("Hello")
```
### Pretty Printing
The `pretty_print_conversation` method provides a visually appealing way to display messages with colored role indicators:
```python
conv.pretty_print_conversation(conv.conversation_history)
```
These examples demonstrate the versatility of the `Conversation` class in managing and interacting with conversation data. Whether you're building a chatbot, conducting analysis, or simply organizing dialogues, this class offers a robust set of tools to help you accomplish your goals.
## Conclusion
The `Conversation` class is a valuable utility for handling conversation data in Python. With its ability to add, update, delete, search, export, and import messages, you have the flexibility to work with conversations in various ways. Feel free to explore its features and adapt them to your specific projects and applications.
If you have any further questions or need additional assistance, please don't hesitate to ask!

@ -0,0 +1,147 @@
# Module Name: Group Chat
The `GroupChat` class is used to create a group chat containing a list of agents. This class is used in scenarios such as role-play games or collaborative simulations, where multiple agents must interact with each other. It provides functionalities to select the next speaker, format chat history, reset the chat, and access details of the agents.
## Class Definition
The `GroupChat` class is defined as follows:
```python
@dataclass
class GroupChat:
"""
A group chat class that contains a list of agents and the maximum number of rounds.
Args:
agents: List[Agent]
messages: List[Dict]
max_round: int
admin_name: str
Usage:
>>> from swarms import GroupChat
>>> from swarms.structs.agent import Agent
>>> agents = Agent()
"""
agents: List[Agent]
messages: List[Dict]
max_round: int = 10
admin_name: str = "Admin" # the name of the admin agent
```
## Arguments
The `GroupChat` class takes the following arguments:
| Argument | Type | Description | Default Value |
|-------------|---------------|---------------------------------------------------|-----------------|
| agents | List[Agent] | List of agents participating in the group chat. | |
| messages | List[Dict] | List of messages exchanged in the group chat. | |
| max_round | int | Maximum number of rounds for the group chat. | 10 |
| admin_name | str | Name of the admin agent. | "Admin" |
## Methods
1. **agent_names**
- Returns the names of the agents in the group chat.
- Returns: List of strings.
2. **reset**
- Resets the group chat, clears all the messages.
3. **agent_by_name**
- Finds an agent in the group chat by their name.
- Arguments: name (str) - Name of the agent to search for.
- Returns: Agent - The agent with the matching name.
- Raises: ValueError if no matching agent is found.
4. **next_agent**
- Returns the next agent in the list based on the order of agents.
- Arguments: agent (Agent) - The current agent.
- Returns: Agent - The next agent in the list.
5. **select_speaker_msg**
- Returns the message for selecting the next speaker.
6. **select_speaker**
- Selects the next speaker based on the system message and history of conversations.
- Arguments: last_speaker (Agent) - The speaker in the last round, selector (Agent) - The agent responsible for selecting the next speaker.
- Returns: Agent - The agent selected as the next speaker.
7. **_participant_roles**
- Formats and returns a string containing the roles of the participants.
- (Internal method, not intended for direct usage)
8. **format_history**
- Formats the history of messages exchanged in the group chat.
- Arguments: messages (List[Dict]) - List of messages.
- Returns: str - Formatted history of messages.
## Additional Information
- For operations involving roles and conversations, the system messages and agent names are used.
- The `select_speaker` method warns when the number of agents is less than 3, indicating that direct communication might be more efficient.
## Usage Example 1
```Python
from swarms import GroupChat
from swarms.structs.agent import Agent
agents = [Agent(name="Alice"), Agent(name="Bob"), Agent(name="Charlie")]
group_chat = GroupChat(agents, [], max_round=5)
print(group_chat.agent_names) # Output: ["Alice", "Bob", "Charlie"]
selector = agents[1]
next_speaker = group_chat.select_speaker(last_speaker=agents[0], selector=selector)
print(next_speaker.name) # Output: "Bob"
```
## Usage Example 2
```Python
from swarms import GroupChat
from swarms.structs.agent import Agent
agents = [Agent(name="X"), Agent(name="Y")]
group_chat = GroupChat(agents, [], max_round=10)
group_chat.messages.append({"role": "X", "content": "Hello Y!"})
group_chat.messages.append({"role": "Y", "content": "Hi X!"})
formatted_history = group_chat.format_history(group_chat.messages)
print(formatted_history)
"""
Output:
'X: Hello Y!
Y: Hi X!'
"""
agent_charlie = Agent(name="Charlie")
group_chat.agents.append(agent_charlie)
print(group_chat.agent_names) # Output: ["X", "Y", "Charlie"]
```
## Usage Example 3
```Python
from swarms import GroupChat
from swarms.structs.agent import Agent
agents = [Agent(name="A1"), Agent(name="A2"), Agent(name="A3")]
group_chat = GroupChat(agents, [], max_round=3, admin_name="A1")
group_chat.reset()
print(group_chat.messages) # Output: []
```
## References
1. [Swarms Documentation](https://docs.swarms.org/)
2. [Role-Based Conversations in Multi-Agent Systems](https://arxiv.org/abs/2010.01539)
This detailed documentation has provided a comprehensive understanding of the `GroupChat` class in the `swarms.structs` module of the `swarms` library. It includes class definition, method descriptions, argument types, and usage examples.
*(Sample Documentation - 950 words)*

@ -0,0 +1,92 @@
# GroupChatManager
Documentation:
The `GroupChatManager` class is designed for managing group chat interactions between agents. It allows you to create and manage group chats among multiple agents. The `GroupChatManager` requires two main arguments - the `groupchat` of type `GroupChat` which indicates the actual group chat object and `selector` of type `Agent` which specifies the agent who is the selector or the initiator of the chat.
This class provides a variety of features and functions such as maintaining and appending messages, managing the communication rounds, interacting between different agents and extracting replies.
Args:
| Parameter | Type | Description |
|-----------|--------------|--------------------------------------------------|
| groupchat | `GroupChat` | The group chat object where the conversation occurs. |
| selector | `Agent` | The agent who is the selector or the initiator of the chat. |
Usage:
```python
from swarms import GroupChatManager
from swarms.structs.agent import Agent
# Create an instance of Agent
agents = Agent()
# Initialize GroupChatManager with an existing GroupChat instance and an agent
manager = GroupChatManager(groupchat, selector)
# Call the group chat manager passing a specific chat task
result = manager('Discuss the agenda for the upcoming meeting')
```
Explanation:
1. First, you import the `GroupChatManager` class and the `Agent` class from the `swarms` library.
2. Then, you create an instance of the `Agent`.
3. After that, you initialize the `GroupChatManager` with an existing `GroupChat` instance and an agent.
4. Finally, you call the group chat manager, passing a specific chat task and receive the response.
Source Code:
```python
class GroupChatManager:
"""
GroupChatManager
Args:
groupchat: GroupChat
selector: Agent
Usage:
>>> from swarms import GroupChatManager
>>> from swarms.structs.agent import Agent
>>> agents = Agent()
"""
def __init__(self, groupchat: GroupChat, selector: Agent):
self.groupchat = groupchat
self.selector = selector
def __call__(self, task: str):
"""Call 'GroupChatManager' instance as a function.
Args:
task (str): The task to be performed during the group chat.
Returns:
str: The response from the group chat.
"""
self.groupchat.messages.append(
{"role": self.selector.name, "content": task}
)
for i in range(self.groupchat.max_round):
speaker = self.groupchat.select_speaker(
last_speaker=self.selector, selector=self.selector
)
reply = speaker.generate_reply(
self.groupchat.format_history(self.groupchat.messages)
)
self.groupchat.messages.append(reply)
print(reply)
if i == self.groupchat.max_round - 1:
break
return reply
```
The `GroupChatManager` class has an `__init__` method which takes `groupchat` and `selector` as arguments to initialize the class properties. It also has a `__call__` method to perform the group chat task and provide the appropriate response.
In the `__call__` method, it appends the message with the speakers role and their content. It then iterates over the communication rounds, selects speakers, generates replies and appends messages to the group chat. Finally, it returns the response.
The above example demonstrates how to use the `GroupChatManager` class to manage group chat interactions. You can further customize this class based on specific requirements and extend its functionality as needed.

@ -0,0 +1,96 @@
#### Class Name: NonlinearWorkflow
This class represents a Directed Acyclic Graph (DAG) workflow used to store tasks and their dependencies in a workflow. The structures can validate, execute and store the order of tasks present in the workflow. It has the following attributes and methods:
#### Attributes:
- `tasks` (dict): A dictionary mapping task names to Task objects.
- `edges` (dict): A dictionary mapping task names to a list of dependencies.
- `stopping_token` (str): The token which denotes the end condition for the workflow execution. Default: `<DONE>`
#### Methods:
1. `__init__(self, stopping_token: str = "<DONE>")`: The initialization method that sets up the NonlinearWorkflow object with an optional stopping token. This token marks the end of the workflow.
- **Args**:
- `stopping_token` (str): The token to denote the end condition for the workflow execution.
2. `add(task: Task, *dependencies: str)`: Adds a task to the workflow along with its dependencies. This method is used to add a new task to the workflow with an optional list of dependency tasks.
- **Args**:
- `task` (Task): The task to be added.
- `dependencies` (varargs): Variable number of dependency task names.
- **Returns**: None
3. `run()`: This method runs the workflow by executing tasks in topological order. It runs the tasks according to the sequence of dependencies.
- **Raises**:
- `Exception`: If a circular dependency is detected.
- **Returns**: None
#### Examples:
Usage Example 1:
```python
from swarms.models import OpenAIChat
from swarms.structs import NonlinearWorkflow, Task
# Initialize the OpenAIChat model
llm = OpenAIChat(openai_api_key="")
# Create a new Task
task = Task(llm, "What's the weather in Miami")
# Initialize the NonlinearWorkflow
workflow = NonlinearWorkflow()
# Add task to the workflow
workflow.add(task)
# Execute the workflow
workflow.run()
```
Usage Example 2:
```python
from swarms.models import OpenAIChat
from swarms.structs import NonlinearWorkflow, Task
# Initialize the OpenAIChat model
llm = OpenAIChat(openai_api_key="")
# Create new Tasks
task1 = Task(llm, "What's the weather in Miami")
task2 = Task(llm, "Book a flight to New York")
task3 = Task(llm, "Find a hotel in Paris")
# Initialize the NonlinearWorkflow
workflow = NonlinearWorkflow()
# Add tasks to the workflow with dependencies
workflow.add(task1, task2.name)
workflow.add(task2, task3.name)
workflow.add(task3, "OpenAIChat Initialization")
# Execute the workflow
workflow.run()
```
Usage Example 3:
```python
from swarms.models import OpenAIChat
from swarms.structs import NonlinearWorkflow, Task
# Initialize the OpenAIChat model
llm = OpenAIChat(openai_api_key="")
# Create new Tasks
task1 = Task(llm, "What's the weather in Miami")
task2 = Task(llm, "Book a flight to New York")
task3 = Task(llm, "Find a hotel in Paris")
# Initialize the NonlinearWorkflow
workflow = NonlinearWorkflow()
# Add tasks to the workflow with dependencies
workflow.add(task1)
workflow.add(task2, task1.name)
workflow.add(task3, task1.name, task2.name)
# Execute the workflow
workflow.run()
```
These examples illustrate the three main types of usage for the NonlinearWorkflow class and how it can be used to represent a directed acyclic graph (DAG) workflow with tasks and their dependencies.
---
The explanatory documentation details the architectural aspects, methods, attributes, examples, and usage patterns for the `NonlinearWorkflow` class. By following the module and function definition structure, the documentation provides clear and comprehensive descriptions of the class and its functionalities.

@ -0,0 +1,71 @@
**Module/Function Name: RecursiveWorkflow**
`class` RecursiveWorkflow(BaseStructure):
Creates a recursive workflow structure for executing a task until a stated stopping condition is reached.
#### Parameters
* *task* (`Task`): The task to execute.
* *stop_token* (`Any`): The token that signals the termination of the workflow.
#### Examples:
```python
from swarms.models import OpenAIChat
from swarms.structs import RecursiveWorkflow, Task
llm = OpenAIChat(openai_api_key="YourKey")
task = Task(llm, "What's the weather in miami")
workflow = RecursiveWorkflow(stop_token="<DONE>")
workflow.add(task)
workflow.run()
```
Returns: None
#### Source Code:
```python
class RecursiveWorkflow(BaseStructure):
def __init__(self, stop_token: str = "<DONE>"):
"""
Args:
stop_token (str, optional): The token that indicates when to stop the workflow. Default is "<DONE>".
The stop_token indicates the value at which the current workflow is finished.
"""
self.stop_token = stop_token
self.tasks = []
assert (
self.stop_token is not None
), "stop_token cannot be None"
def add(self, task: Task, tasks: List[Task] = None):
"""Adds a task to the workflow.
Args:
task (Task): The task to be added.
tasks (List[Task], optional): List of tasks to be executed.
"""
try:
if tasks:
for task in tasks:
self.tasks.append(task)
else:
self.tasks.append(task)
except Exception as error:
print(f"[ERROR][ConcurrentWorkflow] {error}")
raise error
def run(self):
"""Executes the tasks in the workflow until the stop token is encountered"""
try:
for task in self.tasks:
while True:
result = task.execute()
if self.stop_token in result:
break
except Exception as error:
print(f"[ERROR][RecursiveWorkflow] {error}")
raise error
```
In summary, the `RecursiveWorkflow` class is designed to automate tasks by adding and executing these tasks recursively until a stopping condition is reached. This can be achieved by utilizing the `add` and `run` methods provided. A general format for adding and utilizing the `RecursiveWorkflow` class has been provided under the "Examples" section. If you require any further information, view other sections, like Args and Source Code for specifics on using the class effectively.

@ -0,0 +1,73 @@
# Module/Class Name: StepInput
The `StepInput` class is used to define the input parameters for the task step. It is a part of the `BaseModel` and accepts any value. This documentation will provide an overview of the class, its functionality, and usage examples.
## Overview and Introduction
The `StepInput` class is an integral part of the `swarms.structs` library, allowing users to define and pass input parameters for a specific task step. This class provides flexibility by accepting any value, allowing the user to customize the input parameters according to their requirements.
## Class Definition
The `StepInput` class is defined as follows:
```python
class StepInput(BaseModel):
__root__: Any = Field(
...,
description=(
"Input parameters for the task step. Any value is"
" allowed."
),
example='{\n"file_to_refactor": "models.py"\n}',
)
```
The `StepInput` class extends the `BaseModel` and contains a single field `__root__` of type `Any` with a description of accepting input parameters for the task step.
## Functionality and Usage
The `StepInput` class is designed to accept any input value, providing flexibility and customization for task-specific parameters. Upon creating an instance of `StepInput`, the user can define and pass input parameters as per their requirements.
### Usage Example 1:
```python
from swarms.structs import StepInput
input_params = {
"file_to_refactor": "models.py",
"refactor_method": "code"
}
step_input = StepInput(__root__=input_params)
```
In this example, we import the `StepInput` class from the `swarms.structs` library and create an instance `step_input` by passing a dictionary of input parameters. The `StepInput` class allows any value to be passed, providing flexibility for customization.
### Usage Example 2:
```python
from swarms.structs import StepInput
input_params = {
"input_path": "data.csv",
"output_path": "result.csv"
}
step_input = StepInput(__root__=input_params)
```
In this example, we again create an instance of `StepInput` by passing a dictionary of input parameters. The `StepInput` class does not restrict the type of input, allowing users to define parameters based on their specific task requirements.
### Usage Example 3:
```python
from swarms.structs import StepInput
file_path = "config.json"
with open(file_path, 'r') as f:
input_data = json.load(f)
step_input = StepInput(__root__=input_data)
```
In this example, we read input parameters from a JSON file and create an instance of `StepInput` by passing the loaded JSON data. The `StepInput` class seamlessly accepts input data from various sources, providing versatility to the user.
## Additional Information and Tips
When using the `StepInput` class, ensure that the input parameters are well-defined and align with the requirements of the task step. When passing complex data structures, such as nested dictionaries or JSON objects, ensure that the structure is valid and well-formed.
## References and Resources
- For further information on the `BaseModel` and `Field` classes, refer to the Pydantic documentation: [Pydantic Documentation](https://pydantic-docs.helpmanual.io/)
The `StepInput` class within the `swarms.structs` library is a versatile and essential component for defining task-specific input parameters. Its flexibility in accepting any value and seamless integration with diverse data sources make it a valuable asset for customizing input parameters for task steps.

@ -0,0 +1,157 @@
```markdown
# Class Name: SwarmNetwork
## Overview and Introduction
The `SwarmNetwork` class is responsible for managing the agents pool and the task queue. It also monitors the health of the agents and scales the pool up or down based on the number of pending tasks and the current load of the agents.
## Class Definition
The `SwarmNetwork` class has the following parameters:
| Parameter | Type | Description |
|-------------------|-------------------|-------------------------------------------------------------------------------|
| idle_threshold | float | Threshold for idle agents to trigger scaling down |
| busy_threshold | float | Threshold for busy agents to trigger scaling up |
| agents | List[Agent] | List of agent instances to be added to the pool |
| api_enabled | Optional[bool] | Flag to enable/disable the API functionality |
| logging_enabled | Optional[bool] | Flag to enable/disable logging |
| other arguments | *args | Additional arguments |
| other keyword | **kwargs | Additional keyword arguments |
## Function Explanation and Usage
### Function: `add_task`
- Adds a task to the task queue
- Parameters:
- `task`: The task to be added to the queue
- Example:
```python
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
agent = Agent()
swarm = SwarmNetwork(agents=[agent])
swarm.add_task("task")
```
### Function: `async_add_task`
- Asynchronous function to add a task to the task queue
- Parameters:
- `task`: The task to be added to the queue
- Example:
```python
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
agent = Agent()
swarm = SwarmNetwork(agents=[agent])
await swarm.async_add_task("task")
```
### Function: `run_single_agent`
- Executes a task on a single agent
- Parameters:
- `agent_id`: ID of the agent to run the task on
- `task`: The task to be executed by the agent (optional)
- Returns:
- Result of the task execution
- Example:
```python
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
agent = Agent()
swarm = SwarmNetwork(agents=[agent])
swarm.run_single_agent(agent_id, "task")
```
### Function: `run_many_agents`
- Executes a task on all the agents in the pool
- Parameters:
- `task`: The task to be executed by the agents (optional)
- Returns:
- List of results from each agent
- Example:
```python
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
agent = Agent()
swarm = SwarmNetwork(agents=[agent])
swarm.run_many_agents("task")
```
### Function: `list_agents`
- Lists all the agents in the pool
- Returns:
- List of active agents
- Example:
```python
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
agent = Agent()
swarm = SwarmNetwork(agents=[agent])
swarm.list_agents()
```
### Function: `add_agent`
- Adds an agent to the agent pool
- Parameters:
- `agent`: Agent instance to be added to the pool
- Example:
```python
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
agent = Agent()
swarm = SwarmNetwork()
swarm.add_agent(agent)
```
### Function: `remove_agent`
- Removes an agent from the agent pool
- Parameters:
- `agent_id`: ID of the agent to be removed from the pool
- Example:
```python
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
agent = Agent()
swarm = SwarmNetwork(agents=[agent])
swarm.remove_agent(agent_id)
```
### Function: `scale_up`
- Scales up the agent pool by adding new agents
- Parameters:
- `num_agents`: Number of agents to be added (optional)
- Example:
```python
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
swarm = SwarmNetwork()
swarm.scale_up(num_agents=5)
```
### Function: `scale_down`
- Scales down the agent pool by removing existing agents
- Parameters:
- `num_agents`: Number of agents to be removed (optional)
- Example:
```python
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
swarm = SwarmNetwork(agents=[agent1, agent2, agent3, agent4, agent5])
swarm.scale_down(num_agents=2)
```
### Function: `create_apis_for_agents`
- Creates APIs for each agent in the pool (optional)
- Example:
```python
from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork
agent = Agent()
swarm = SwarmNetwork(agents=[agent])
swarm.create_apis_for_agents()
```
## Additional Information
- The `SwarmNetwork` class is an essential part of the swarms.structs library, enabling efficient management and scaling of agent pools.
```

@ -0,0 +1,28 @@
- This is the class for the Task
- For the constructor, it takes in the description, agent, args, kwargs, result, history, schedule_time, scheduler, trigger, action, condition, priority, and dependencies
- The `execute` method runs the task by calling the agent or model with the arguments and keyword arguments
- It sets a trigger, action, and condition for the task
- Task completion is checked with `is_completed` method
- `add_dependency` adds a task to the list of dependencies
- `set_priority` sets the priority of the task
```python
# Example 1: Creating and executing a Task
from swarms.structs import Task, Agent
from swarms.models import OpenAIChat
agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False)
task = Task(description="What's the weather in miami", agent=agent)
task.execute()
print(task.result)
# Example 2: Adding a dependency and setting priority
task2 = Task(description="Task 2", agent=agent)
task.add_dependency(task2)
task.set_priority(1)
# Example 3: Executing a scheduled task
task3 = Task(description="Scheduled Task", agent=agent)
task3.schedule_time = datetime.datetime.now() + datetime.timedelta(minutes=30)
task3.handle_scheduled_task()
print(task3.is_completed())
```

@ -0,0 +1,75 @@
## Module/Class Name: TaskInput
The `TaskInput` class is designed to handle the input parameters for a task. It is an abstract class that serves as the base model for input data manipulation.
### Overview and Introduction
The `TaskInput` class is an essential component of the `swarms.structs` library, allowing users to define and pass input parameters to tasks. It is crucial for ensuring the correct and structured input to various tasks and processes within the library.
### Class Definition
#### TaskInput Class:
- Parameters:
- `__root__` (Any): The input parameters for the task. Any value is allowed.
### Disclaimer:
It is important to note that the `TaskInput` class extends the `BaseModel` from the `pydantic` library. This means that it inherits all the properties and methods of the `BaseModel`.
### Functionality and Usage
The `TaskInput` class encapsulates the input parameters in a structured format. It allows for easy validation and manipulation of input data.
#### Usage Example 1: Using TaskInput for Debugging
```python
from pydantic import BaseModel, Field
from swarms.structs import TaskInput
class DebugInput(TaskInput):
debug: bool
# Creating an instance of DebugInput
debug_params = DebugInput(__root__={"debug": True})
# Accessing the input parameters
print(debug_params.debug) # Output: True
```
#### Usage Example 2: Using TaskInput for Task Modes
```python
from pydantic import BaseModel, Field
from swarms.structs import TaskInput
class ModeInput(TaskInput):
mode: str
# Creating an instance of ModeInput
mode_params = ModeInput(__root__={"mode": "benchmarks"})
# Accessing the input parameters
print(mode_params.mode) # Output: benchmarks
```
#### Usage Example 3: Using TaskInput with Arbitrary Parameters
```python
from pydantic import BaseModel, Field
from swarms.structs import TaskInput
class ArbitraryInput(TaskInput):
message: str
quantity: int
# Creating an instance of ArbitraryInput
arbitrary_params = ArbitraryInput(__root__={"message": "Hello, world!", "quantity": 5})
# Accessing the input parameters
print(arbitrary_params.message) # Output: Hello, world!
print(arbitrary_params.quantity) # Output: 5
```
### Additional Information and Tips
- The `TaskInput` class can be extended to create custom input models with specific parameters tailored to individual tasks.
- The `Field` class from `pydantic` can be used to specify metadata and constraints for the input parameters.
### References and Resources
- Official `pydantic` Documentation: [https://pydantic-docs.helpmanual.io/](https://pydantic-docs.helpmanual.io/)
- Additional resources on data modelling with `pydantic`: [https://www.tiangolo.com/blog/2021/02/16/real-python-tutorial-modern-fastapi-pydantic/](https://www.tiangolo.com/blog/2021/02/16/real-python-tutorial-modern-fastapi-pydantic/)
This documentation presents the `TaskInput` class, its usage, and practical examples for creating and handling input parameters within the `swarms.structs` library.

@ -1,4 +1,4 @@
# `GodMode` Documentation
# `ModelParallelizer` Documentation
## Table of Contents
1. [Understanding the Purpose](#understanding-the-purpose)
@ -11,19 +11,19 @@
## 1. Understanding the Purpose <a name="understanding-the-purpose"></a>
To create comprehensive documentation for the `GodMode` class, let's begin by understanding its purpose and functionality.
To create comprehensive documentation for the `ModelParallelizer` class, let's begin by understanding its purpose and functionality.
### Purpose and Functionality
`GodMode` is a class designed to facilitate the orchestration of multiple Language Model Models (LLMs) to perform various tasks simultaneously. It serves as a powerful tool for managing, distributing, and collecting responses from these models.
`ModelParallelizer` is a class designed to facilitate the orchestration of multiple Language Model Models (LLMs) to perform various tasks simultaneously. It serves as a powerful tool for managing, distributing, and collecting responses from these models.
Key features and functionality include:
- **Parallel Task Execution**: `GodMode` can distribute tasks to multiple LLMs and execute them in parallel, improving efficiency and reducing response time.
- **Parallel Task Execution**: `ModelParallelizer` can distribute tasks to multiple LLMs and execute them in parallel, improving efficiency and reducing response time.
- **Structured Response Presentation**: The class presents the responses from LLMs in a structured tabular format, making it easy for users to compare and analyze the results.
- **Task History Tracking**: `GodMode` keeps a record of tasks that have been submitted, allowing users to review previous tasks and responses.
- **Task History Tracking**: `ModelParallelizer` keeps a record of tasks that have been submitted, allowing users to review previous tasks and responses.
- **Asynchronous Execution**: The class provides options for asynchronous task execution, which can be particularly useful for handling a large number of tasks.
@ -33,29 +33,29 @@ Now that we have an understanding of its purpose, let's proceed to provide a det
### Overview
The `GodMode` class is a crucial component for managing and utilizing multiple LLMs in various natural language processing (NLP) tasks. Its architecture and functionality are designed to address the need for parallel processing and efficient response handling.
The `ModelParallelizer` class is a crucial component for managing and utilizing multiple LLMs in various natural language processing (NLP) tasks. Its architecture and functionality are designed to address the need for parallel processing and efficient response handling.
### Importance and Relevance
In the rapidly evolving field of NLP, it has become common to use multiple language models to achieve better results in tasks such as translation, summarization, and question answering. `GodMode` streamlines this process by allowing users to harness the capabilities of several LLMs simultaneously.
In the rapidly evolving field of NLP, it has become common to use multiple language models to achieve better results in tasks such as translation, summarization, and question answering. `ModelParallelizer` streamlines this process by allowing users to harness the capabilities of several LLMs simultaneously.
Key points:
- **Parallel Processing**: `GodMode` leverages multithreading to execute tasks concurrently, significantly reducing the time required for processing.
- **Parallel Processing**: `ModelParallelizer` leverages multithreading to execute tasks concurrently, significantly reducing the time required for processing.
- **Response Visualization**: The class presents responses in a structured tabular format, enabling users to visualize and analyze the outputs from different LLMs.
- **Task Tracking**: Developers can track the history of tasks submitted to `GodMode`, making it easier to manage and monitor ongoing work.
- **Task Tracking**: Developers can track the history of tasks submitted to `ModelParallelizer`, making it easier to manage and monitor ongoing work.
### Architecture and How It Works
The architecture and working of `GodMode` can be summarized in four steps:
The architecture and working of `ModelParallelizer` can be summarized in four steps:
1. **Task Reception**: `GodMode` receives a task from the user.
1. **Task Reception**: `ModelParallelizer` receives a task from the user.
2. **Task Distribution**: The class distributes the task to all registered LLMs.
3. **Response Collection**: `GodMode` collects the responses generated by the LLMs.
3. **Response Collection**: `ModelParallelizer` collects the responses generated by the LLMs.
4. **Response Presentation**: Finally, the class presents the responses from all LLMs in a structured tabular format, making it easy for users to compare and analyze the results.
@ -65,15 +65,15 @@ Now that we have an overview, let's proceed with a detailed class definition.
### Class Attributes
- `llms`: A list of LLMs (Language Model Models) that `GodMode` manages.
- `llms`: A list of LLMs (Language Model Models) that `ModelParallelizer` manages.
- `last_responses`: Stores the responses from the most recent task.
- `task_history`: Keeps a record of all tasks submitted to `GodMode`.
- `task_history`: Keeps a record of all tasks submitted to `ModelParallelizer`.
### Methods
The `GodMode` class defines various methods to facilitate task distribution, execution, and response presentation. Let's examine some of the key methods:
The `ModelParallelizer` class defines various methods to facilitate task distribution, execution, and response presentation. Let's examine some of the key methods:
- `run(task)`: Distributes a task to all LLMs, collects responses, and returns them.
@ -87,23 +87,23 @@ The `GodMode` class defines various methods to facilitate task distribution, exe
- `save_responses_to_file(filename)`: Saves responses to a file for future reference.
- `load_llms_from_file(filename)`: Loads LLMs from a file, making it easy to configure `GodMode` for different tasks.
- `load_llms_from_file(filename)`: Loads LLMs from a file, making it easy to configure `ModelParallelizer` for different tasks.
- `get_task_history()`: Retrieves the task history, allowing users to review previous tasks.
- `summary()`: Provides a summary of task history and the last responses, aiding in post-processing and analysis.
Now that we have covered the class definition, let's delve into the functionality and usage of `GodMode`.
Now that we have covered the class definition, let's delve into the functionality and usage of `ModelParallelizer`.
## 4. Functionality and Usage <a name="functionality-and-usage"></a>
### Distributing a Task and Collecting Responses
One of the primary use cases of `GodMode` is to distribute a task to all registered LLMs and collect their responses. This can be achieved using the `run(task)` method. Below is an example:
One of the primary use cases of `ModelParallelizer` is to distribute a task to all registered LLMs and collect their responses. This can be achieved using the `run(task)` method. Below is an example:
```python
god_mode = GodMode(llms)
responses = god_mode.run("Translate the following English text to French: 'Hello, how are you?'")
parallelizer = ModelParallelizer(llms)
responses = parallelizer.run("Translate the following English text to French: 'Hello, how are you?'")
```
### Printing Responses
@ -111,7 +111,7 @@ responses = god_mode.run("Translate the following English text to French: 'Hello
To present the responses from all LLMs in a structured tabular format, use the `print_responses(task)` method. Example:
```python
god_mode.print_responses("Summarize the main points of 'War and Peace.'")
parallelizer.print_responses("Summarize the main points of 'War and Peace.'")
```
### Saving Responses to a File
@ -119,15 +119,15 @@ god_mode.print_responses("Summarize the main points of 'War and Peace.'")
Users can save the responses to a file using the `save_responses_to_file(filename)` method. This is useful for archiving and reviewing responses later. Example:
```python
god_mode.save_responses_to_file("responses.txt")
parallelizer.save_responses_to_file("responses.txt")
```
### Task History
The `GodMode` class keeps track of the task history. Developers can access the task history using the `get_task_history()` method. Example:
The `ModelParallelizer` class keeps track of the task history. Developers can access the task history using the `get_task_history()` method. Example:
```python
task_history = god_mode.get_task_history()
task_history = parallelizer.get_task_history()
for i, task in enumerate(task_history):
print(f"Task {i + 1}: {task}")
```
@ -136,7 +136,7 @@ for i, task in enumerate(task_history):
### Parallel Execution
`GodMode` employs multithreading to execute tasks concurrently. This parallel processing capability significantly improves the efficiency of handling multiple tasks simultaneously.
`ModelParallelizer` employs multithreading to execute tasks concurrently. This parallel processing capability significantly improves the efficiency of handling multiple tasks simultaneously.
### Response Visualization
@ -144,13 +144,13 @@ The structured tabular format used for presenting responses simplifies the compa
## 6. Examples <a name="examples"></a>
Let's explore additional usage examples to illustrate the versatility of `GodMode` in handling various NLP tasks.
Let's explore additional usage examples to illustrate the versatility of `ModelParallelizer` in handling various NLP tasks.
### Example 1: Sentiment Analysis
```python
from swarms.models import OpenAIChat
from swarms.swarms import GodMode
from swarms.swarms import ModelParallelizer
from swarms.workers.worker import Worker
# Create an instance of an LLM for sentiment analysis
@ -184,15 +184,15 @@ worker3 = Worker(
temperature=0.5,
)
# Register the worker agents with GodMode
# Register the worker agents with ModelParallelizer
agents = [worker1, worker2, worker3]
god_mode = GodMode(agents)
parallelizer = ModelParallelizer(agents)
# Task for sentiment analysis
task = "Please analyze the sentiment of the following sentence: 'This movie is amazing!'"
# Print responses from all agents
god_mode.print_responses(task)
parallelizer.print_responses(task)
```
### Example 2: Translation
@ -200,22 +200,22 @@ god_mode.print_responses(task)
```python
from swarms.models import OpenAIChat
from swarms.swarms import GodMode
from swarms.swarms import ModelParallelizer
# Define LLMs for translation tasks
translator1 = OpenAIChat(model_name="translator-en-fr", openai_api_key="api-key", temperature=0.7)
translator2 = OpenAIChat(model_name="translator-en-es", openai_api_key="api-key", temperature=0.7)
translator3 = OpenAIChat(model_name="translator-en-de", openai_api_key="api-key", temperature=0.7)
# Register translation agents with GodMode
# Register translation agents with ModelParallelizer
translators = [translator1, translator2, translator3]
god_mode = GodMode(translators)
parallelizer = ModelParallelizer(translators)
# Task for translation
task = "Translate the following English text to French: 'Hello, how are you?'"
# Print translated responses from all agents
god_mode.print_responses(task)
parallelizer.print_responses(task)
```
### Example 3: Summarization
@ -223,7 +223,7 @@ god_mode.print_responses(task)
```python
from swarms.models import OpenAIChat
from swarms.swarms import GodMode
from swarms.swarms import ModelParallelizer
# Define LLMs for summarization tasks
@ -231,19 +231,19 @@ summarizer1 = OpenAIChat(model_name="summarizer-en", openai_api_key="api-key", t
summarizer2 = OpenAIChat(model_name="summarizer-en", openai_api_key="api-key", temperature=0.6)
summarizer3 = OpenAIChat(model_name="summarizer-en", openai_api_key="api-key", temperature=0.6)
# Register summarization agents with GodMode
# Register summarization agents with ModelParallelizer
summarizers = [summarizer1, summarizer2, summarizer3]
god_mode = GodMode(summarizers)
parallelizer = ModelParallelizer(summarizers)
# Task for summarization
task = "Summarize the main points of the article titled 'Climate Change and Its Impact on the Environment.'"
# Print summarized responses from all agents
god_mode.print_responses(task)
parallelizer.print_responses(task)
```
## 7. Conclusion <a name="conclusion"></a>
In conclusion, the `GodMode` class is a powerful tool for managing and orchestrating multiple Language Model Models in natural language processing tasks. Its ability to distribute tasks, collect responses, and present them in a structured format makes it invaluable for streamlining NLP workflows. By following the provided documentation, users can harness the full potential of `GodMode` to enhance their natural language processing projects.
In conclusion, the `ModelParallelizer` class is a powerful tool for managing and orchestrating multiple Language Model Models in natural language processing tasks. Its ability to distribute tasks, collect responses, and present them in a structured format makes it invaluable for streamlining NLP workflows. By following the provided documentation, users can harness the full potential of `ModelParallelizer` to enhance their natural language processing projects.
For further information on specific LLMs or advanced usage, refer to the documentation of the respective models and their APIs. Additionally, external resources on parallel execution and response visualization can provide deeper insights into these topics.

@ -0,0 +1,86 @@
# check_device
# Module/Function Name: check_device
The `check_device` is a utility function in PyTorch designed to identify and return the appropriate device(s) for CUDA processing. If CUDA is not available, a CPU device is returned. If CUDA is available, the function returns a list of all available GPU devices.
The function examines the CUDA availability, checks for multiple GPUs, and finds additional properties for each device.
## Function Signature and Arguments
**Signature:**
```python
def check_device(
log_level: Any = logging.INFO,
memory_threshold: float = 0.8,
capability_threshold: float = 3.5,
return_type: str = "list",
) -> Union[torch.device, List[torch.device]]
```
| Parameter | Data Type | Default Value | Description |
| ------------- | ------------- | ------------- | ------------- |
| `log_level` | Any | logging.INFO | The log level. |
| `memory_threshold` | float | 0.8 | It is used to check the threshold of memory used on the GPU(s). |
| `capability_threshold` | float | 3.5 | It is used to consider only those GPU(s) which have higher compute capability compared to the threshold. |
| `return_type` | str | "list" | Depending on the `return_type` either a list of devices can be returned or a single device. |
This function does not take any mandatory argument. However, it supports optional arguments such as `log_level`, `memory_threshold`, `capability_threshold`, and `return_type`.
**Returns:**
- A single torch.device if one device or list of torch.devices if multiple CUDA devices are available, else returns the CPU device if CUDA is not available.
## Usage and Examples
### Example 1: Basic Usage
```python
import torch
import logging
from swarms.utils import check_device
# Basic usage
device = check_device(
log_level=logging.INFO,
memory_threshold=0.8,
capability_threshold=3.5,
return_type="list"
)
```
### Example 2: Using CPU when CUDA is not available
```python
import torch
import logging
from swarms.utils import check_device
# When CUDA is not available
device = check_device()
print(device) # If CUDA is not available it should return torch.device('cpu')
```
### Example 3: Multiple GPU Available
```python
import torch
import logging
from swarms.utils import check_device
# When multiple GPUs are available
device = check_device()
print(device) # Should return a list of available GPU devices
```
## Tips and Additional Information
- This function is useful when a user wants to exploit CUDA capabilities for faster computation but unsure of the available devices. This function abstracts all the necessary checks and provides a list of CUDA devices to the user.
- The `memory_threshold` and `capability_threshold` are utilized to filter the GPU devices. The GPUs which have memory usage above the `memory_threshold` and compute capability below the `capability_threshold` are not considered.
- As of now, CPU does not have memory or capability values, therefore, in the respective cases, it will be returned as default without any comparison.
## Relevant Resources
- For more details about the CUDA properties functions used (`torch.cuda.get_device_capability, torch.cuda.get_device_properties`), please refer to the official PyTorch [CUDA semantics documentation](https://pytorch.org/docs/stable/notes/cuda.html).
- For more information about Torch device objects, you can refer to the official PyTorch [device documentation](https://pytorch.org/docs/stable/tensor_attributes.html#torch-device).
- For a better understanding of how the `logging` module works in Python, see the official Python [logging documentation](https://docs.python.org/3/library/logging.html).

@ -0,0 +1,86 @@
# display_markdown_message
# Module Name: `display_markdown_message`
## Introduction
`display_markdown_message` is a useful utility function for creating visually-pleasing markdown messages within Python scripts. This function automatically manages multiline strings with lots of indentation and makes single-line messages with ">" tags easy to read, providing users with convenient and elegant logging or messaging capacity.
## Function Definition and Arguments
Function Definition:
```python
def display_markdown_message(message: str, color: str = "cyan"):
```
This function accepts two parameters:
|Parameter |Type |Default Value |Description |
|--- |--- |--- |--- |
|message |str |None |This is the message that is to be displayed. This should be a string. It can contain markdown syntax.|
|color |str |"cyan" |This allows you to choose the color of the message. Default is "cyan". Accepts any valid color name.|
## Functionality and Usage
This utility function is used to display a markdown formatted message on the console. It accepts a message as a string and an optional color for the message. The function is ideal for generating stylized print outputs such as headers, status updates or pretty notifications.
By default, any text within the string which is enclosed within `>` tags or `---` is treated specially:
- Lines encased in `>` tags are rendered as a blockquote in markdown.
- Lines consisting of `---` are rendered as horizontal rules.
The function automatically strips off leading and trailing whitespaces from any line within the message, maintaining aesthetic consistency in your console output.
### Usage Examples
#### Basic Example
```python
display_markdown_message("> This is an important message", color="red")
```
Output:
```md
> **This is an important message**
```
This example will print out the string "This is an important message" in red color, enclosed in a blockquote tag.
#### Multiline Example
```python
message = """
> Header
My normal message here.
---
Another important information
"""
display_markdown_message(message, color="green")
```
Output:
```md
> **Header**
My normal message here.
_____
Another important information
```
The output is a green colored markdown styled text with the "Header" enclosed in a blockquote, followed by the phrase "My normal message here", a horizontal rule, and finally another phrase, "Another important information".
## Additional Information
Use newline characters `\n` to separate the lines of the message. Remember, each line of the message is stripped of leading and trailing whitespaces. If you have special markdown requirements, you may need to revise the input message string accordingly.
Also, keep in mind the console or terminal's ability to display the chosen color. If a particular console does not support the chosen color, the output may fallback to the default console color.
For a full list of color names supported by the `Console` module, refer to the official [Console documentation](http://console.readthedocs.io/).
## References and Resources
- Python Strings: https://docs.python.org/3/tutorial/introduction.html#strings
- Python Markdown: https://pypi.org/project/markdown/
- Console module: https://console.readthedocs.io/

@ -0,0 +1,114 @@
# extract_code_from_markdown
# swarms.utils Module
The `swarms.utils` module provides utility functions designed to facilitate specific tasks within the main Swarm codebase. The function `extract_code_from_markdown` is a critical function within this module that we will document in this example.
## Overview and Introduction
Many software projects use Markdown extensively for writing documentation, tutorials, and other text documents that can be easily rendered and viewed in different formats, including HTML.
The `extract_code_from_markdown` function plays a crucial role within the swarms.utils library. As developers write large volumes of Markdown, they often need to isolate code snippets from the whole Markdown file body. These isolated snippets can be used to generate test cases, transform into other languages, or analyze for metrics.
## Function Definition: `extract_code_from_markdown`
```python
def extract_code_from_markdown(markdown_content: str) -> str:
"""
Extracts code blocks from a Markdown string and returns them as a single string.
Args:
- markdown_content (str): The Markdown content as a string.
Returns:
- str: A single string containing all the code blocks separated by newlines.
"""
# Regular expression for fenced code blocks
pattern = r"```(?:\w+\n)?(.*?)```"
matches = re.findall(pattern, markdown_content, re.DOTALL)
# Concatenate all code blocks separated by newlines
return "\n".join(code.strip() for code in matches)
```
### Arguments
The function `extract_code_from_markdown` takes one argument:
| Argument | Description | Type | Default Value |
|-----------------------|----------------------------------------|-------------|-------------------|
| markdown_content | The input markdown content as a string | str | N/A |
## Function Explanation and Usage
This function uses a regular expression to find all fenced code blocks in a Markdown string. The pattern `r"```(?:\w+\n)?(.*?)```"` matches strings that start and end with three backticks, optionally followed by a newline and then any number of any characters (the `.*?` part) until the first occurrence of another triple backtick set.
Once we have the matches, we join all the code blocks into a single string, each block separated by a newline.
The method's functionality is particularly useful when we need to extract code blocks from markdown content for secondary processing, such as syntax highlighting or execution in a different environment.
### Usage Examples
Below are three examples of how you might use this function:
#### Example 1:
Extracting code blocks from a simple markdown string.
```python
import re
from swarms.utils import extract_code_from_markdown
markdown_string = '''# Example
This is an example of a code block:
```python
print("Hello World!")
``` '''
print(extract_code_from_markdown(markdown_string))
```
#### Example 2:
Extracting code blocks from a markdown file.
```python
import re
def extract_code_from_markdown(markdown_content: str) -> str:
pattern = r"```(?:\w+\n)?(.*?)```"
matches = re.findall(pattern, markdown_content, re.DOTALL)
return "\n".join(code.strip() for code in matches)
# Assume that 'example.md' contains multiple code blocks
with open('example.md', 'r') as file:
markdown_content = file.read()
print(extract_code_from_markdown(markdown_content))
```
#### Example 3:
Using the function in a pipeline to extract and then analyze code blocks.
```python
import re
def extract_code_from_markdown(markdown_content: str) -> str:
pattern = r"```(?:\w+\n)?(.*?)```"
matches = re.findall(pattern, markdown_content, re.DOTALL)
return "\n".join(code.strip() for code in matches)
def analyze_code_blocks(code: str):
# Add your analysis logic here
pass
# Assume that 'example.md' contains multiple code blocks
with open('example.md', 'r') as file:
markdown_content = file.read()
code_blocks = extract_code_from_markdown(markdown_content)
analyze_code_blocks(code_blocks)
```
## Conclusion
This concludes the detailed documentation of the `extract_code_from_markdown` function from the swarms.utils module. With this documentation, you should be able to understand the function's purpose, how it works, its parameters, and see examples of how to use it effectively.

@ -0,0 +1,94 @@
# find_image_path
Firstly, we will divide this documentation into multiple sections.
# Overview
The module **swarms.utils** has the main goal of providing necessary utility functions that are crucial during the creation of the swarm intelligence frameworks. These utility functions can include common operations such as handling input-output operations for files, handling text parsing, and handling basic mathematical computations necessary during the creation of swarm intelligence models.
The current function `find_image_path` in the module is aimed at extracting an image path from a given text document.
# Function Detailed Explanation
## Definition
The function `find_image_path` takes a singular argument as an input:
```python
def find_image_path(text):
# function body
```
## Parameter
The parameter `text` in the function is a string that represents the document or text from which the function is trying to extract all paths to the images present. The function scans the given text, looking for <em>absolute</em> or <em>relative</em> paths to image files (.png, .jpg, .jpeg) on the disk.
| Parameter Name | Data Type | Default Value | Description |
|:--------------:|:---------:|:-------------:|:--------:|
| `text` | `str` | - | The text content to scan for image paths |
## Return Value
The return value of the function `find_image_path` is a string that represents the longest existing image path extracted from the input text. If no image paths exist within the text, the function returns `None`.
| Return Value | Data Type | Description |
|:------------:|:-----------:|:-----------:|
| Path | `str` | Longest image path found in the text or `None` if no path found |
# Function's Code
The function `find_image_path` performs text parsing and pattern recognition to find image paths within the provided text. The function uses `regular expressions (re)` module to detect all potential paths.
```python
def find_image_path(text):
pattern = r"([A-Za-z]:\\[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))|(/[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))"
matches = [
match.group()
for match in re.finditer(pattern, text)
if match.group()
]
matches += [match.replace("\\", "") for match in matches if match]
existing_paths = [
match for match in matches if os.path.exists(match)
]
return max(existing_paths, key=len) if existing_paths else None
```
# Usage Examples
Let's consider examples of how the function `find_image_path` can be used in different scenarios.
**Example 1:**
Consider the case where a text without any image path is provided.
```python
from swarms.utils import find_image_path
text = "There are no image paths in this text"
print(find_image_path(text)) # Outputs: None
```
**Example 2:**
Consider the case where the text has multiple image paths.
```python
from swarms.utils import find_image_path
text = "Here is an image path: /home/user/image1.png. Here is another one: C:\\Users\\User\\Documents\\image2.jpeg"
print(find_image_path(text)) # Outputs: the longest image path (depends on your file system and existing files)
```
**Example 3:**
In the final example, we consider a case where the text has an image path, but the file does not exist.
```python
from swarms.utils import find_image_path
text = "Here is an image path: /home/user/non_existant.png"
print(find_image_path(text)) # Outputs: None
```
# Closing Notes
In conclusion, the `find_image_path` function is crucial in the `swarms.utils` module as it supports a key operation of identifying image paths within given input text. This allows users to automate the extraction of such data from larger documents/text. However, it's important to note the function returns only existing paths in your file system and only the longest if multiple exist.

@ -0,0 +1,82 @@
# limit_tokens_from_string
## Introduction
The `Swarms.utils` library contains utility functions used across codes that handle machine learning and other operations. The `Swarms.utils` library includes a notable function named `limit_tokens_from_string()`. This function particularly limits the number of tokens in a given string.
# Function: limit_tokens_from_string()
Within the `Swarms.utils` library, there is a method `limit_tokens_from_string(string: str, model: str = "gpt-4", limit: int = 500) -> str:`
## Description
The function `limit_tokens_from_string()` limits the number of tokens in a given string based on the specified threshold. It is primarily useful when you are handling large text data and need to chunk or limit your text to a certain length. Limiting token length could be useful in various scenarios such as when working with data with limited computational resources, or when dealing with models that accept a specific maximum limit of text.
## Parameters
| Parameter | Type | Default Value | Description
| :-----------| :----------- | :------------ | :------------|
| `string` | `str` | `None` | The input string from which the tokens need to be limited. |
| `model` | `str` | `"gpt-4"` | The model used to encode and decode the token. The function defaults to `gpt-4` but you can specify any model supported by `tiktoken`. If a model is not found, it falls back to use `gpt2` |
| `limit` | `int` | `500` | The limit up to which the tokens have to be sliced. Default limit is 500.|
## Returns
| Return | Type | Description
| :-----------| :----------- | :------------
| `out` | `str` | A string that is constructed back from the encoded tokens that have been limited to a count of `limit` |
## Method Detail and Usage Examples
The method `limit_tokens_from_string()` takes in three parameters - `string`, `model`, and `limit`.
First, it tries to get the encoding for the model specified in the `model` argument using `tiktoken.encoding_for_model(model)`. In case the specified model is not found, the function uses `gpt2` model encoding as a fallback.
Next, the input `string` is tokenized using the `encode` method on the `encoding` tensor. This results in the `encoded` tensor.
Then, the function slices the `encoded` tensor to get the first `limit` number of tokens.
Finally, the function converts back the tokens into the string using the `decode` method of the `encoding` tensor. The resulting string `out` is returned.
### Example 1:
```python
from swarms.utils import limit_tokens_from_string
# longer input string
string = "This is a very long string that needs to be tokenized. This string might exceed the maximum token limit, so it will need to be truncated."
# lower token limit
limit = 10
output = limit_tokens_from_string(string, limit=limit)
```
### Example 2:
```python
from swarms.utils import limit_tokens_from_string
# longer input string with different model
string = "This string will be tokenized using gpt2 model. If the string is too long, it will be truncated."
# model
model = "gpt2"
output = limit_tokens_from_string(string, model=model)
```
### Example 3:
```python
from swarms.utils import limit_tokens_from_string
# try with a random model string
string = "In case the method does not find the specified model, it will fall back to gpt2 model."
# model
model = "gpt-4"
output = limit_tokens_from_string(string, model=model)
```
**Note:** If specifying a model not supported by `tiktoken` intentionally, it will fall back to `gpt2` model for encoding.

@ -0,0 +1,102 @@
# load_model_torch
# load_model_torch: Utility Function Documentation
## Introduction:
`load_model_torch` is a utility function in the `swarms.utils` library that is designed to load a saved PyTorch model and move it to the designated device. It provides flexibility allowing the user to specify the model file location, the device where the loaded model should be moved to, whether to strictly enforce the keys in the state dictionary to match the keys returned by the model's `state_dict()`, and many more.
Moreover, if the saved model file only contains the state dictionary, but not the model architecture, you can pass the model architecture as an argument.
## Function Definition and Parameters:
```python
def load_model_torch(
model_path: str = None,
device: torch.device = None,
model: nn.Module = None,
strict: bool = True,
map_location=None,
*args,
**kwargs,
) -> nn.Module:
```
The following table describes the parameters in detail:
| Name | Type | Default Value | Description |
| ------ | ------ | ------------- | ------------|
| model_path | str | None | A string specifying the path to the saved model file on disk. _Required_ |
| device | torch.device | None | A `torch.device` object that specifies the target device for the loaded model. If not provided, the function checks for the availability of a GPU and uses it if available. If not, it defaults to CPU. |
| model | nn.Module | None | An instance of `torch.nn.Module` representing the model's architecture. This parameter is required if the model file only contains the model's state dictionary and not the model architecture. |
| strict | bool | True | A boolean that determines whether to strictly enforce that the keys in the state dictionary match the keys returned by the model's `state_dict()` function. If set to `True`, the function will raise a KeyError when the state dictionary and `state_dict()` keys do not match. |
| map_location | callable | None | A function to remap the storage locations of the loaded model's parameters. Useful for loading models saved on a device type that is different from the current one. |
| *args, **kwargs | - | - | Additional arguments and keyword arguments to be passed to `torch.load`.
Returns:
- `torch.nn.Module` - The loaded model after moving it to the desired device.
Raises:
- `FileNotFoundError` - If the saved model file is not found at the specified path.
- `RuntimeError` - If there was an error while loading the model.
## Example of Usage:
This function can be used directly inside your code as shown in the following examples:
### Example 1:
Loading a model without specifying a device results in the function choosing the most optimal available device automatically.
```python
from swarms.utils import load_model_torch
import torch.nn as nn
# Assume `mymodel.pth` is in the current directory
model_path = "./mymodel.pth"
# Define your model architecture if the model file only contains state dict
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 2)
def forward(self, x):
return self.linear(x)
model = MyModel()
# Load the model
loaded_model = load_model_torch(model_path, model=model)
# Now you can use the loaded model for prediction or further training
```
### Example 2:
Explicitly specifying a device.
```python
# Assume `mymodel.pth` is in the current directory
model_path = "./mymodel.pth"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load the model
loaded_model = load_model_torch(model_path, device=device)
```
### Example 3:
Using a model file that contains only the state dictionary, not the model architecture.
```python
# Assume `mymodel_state_dict.pth` is in the current directory
model_path = "./mymodel_state_dict.pth"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define your model architecture
model = MyModel()
# Load the model
loaded_model = load_model_torch(model_path, device=device, model=model)
```
This gives you an insight on how to use `load_model_torch` utility function from `swarms.utils` library efficiently. Always remember to pass the model path argument while the other arguments can be optional based on your requirements. Furthermore, handle exceptions properly for smooth functioning of your PyTorch related projects.

@ -0,0 +1,78 @@
# math_eval
The `math_eval` function is a python decorator that wraps around a function to run two functions on the same inputs and compare their results. The decorator can be used for testing functions that are expected to have equivalent functionality, or in situations where two different methods are used to calculate or retrieve a value, and the results need to be compared.
The `math_eval` function in this case accepts two functions as parameters: `func1` and `func2`, and returns a decorator. This returned decorator, when applied to a function, enhances that function to execute both `func1` and `func2`, and compare the results.
This can be particularly useful in situations when you are implementing a new function and wants to compare its behavior and results with that of an existing one under the same set of input parameters. It also logs the results if they do not match which could be quite useful during the debug process.
## Usage Example
Let's say you have two functions: `ground_truth` and `generated_func`, that have similar functionalities or serve the same purpose. You are writing a new function called `test_func`, and you'd like to compare the results of `ground_truth` and `generated_func` when `test_func` is run. Here is how you would use the `math_eval` decorator:
```python
@math_eval(ground_truth, generated_func)
def test_func(x):
return x
result1, result2 = test_func(5)
print(f"Result from ground_truth: {result1}")
print(f"Result from generated_func: {result2}")
```
## Parameters
| Parameter | Data Type | Description |
| ---- | ---- | ---- |
| func1 | Callable | The first function whose result you want to compare. |
| func2 | Callable | The second function whose result you want to compare. |
The data types for `func1` and `func2` cannot be specified as they can be any python function (or callable object). The decorator verifies that they are callable and exceptions are handled within the decorator function.
## Return Values
The `math_eval` function does not return a direct value, since it is a decorator. When applied to a function, it alters the behavior of the wrapped function to return two values:
1. `result1`: The result of running `func1` with the given input parameters.
2. `result2`: The result of running `func2` with the given input parameters.
These two return values are provided in that order as a tuple.
## Source Code
Here's how to implement the `math_eval` decorator:
```python
import functools
import logging
def math_eval(func1, func2):
"""Math evaluation decorator."""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
result1 = func1(*args, **kwargs)
except Exception as e:
logging.error(f"Error in func1: {e}")
result1 = None
try:
result2 = func2(*args, **kwargs)
except Exception as e:
logging.error(f"Error in func2: {e}")
result2 = None
if result1 != result2:
logging.warning(
f"Outputs do not match: {result1} != {result2}"
)
return result1, result2
return wrapper
return decorator
```
Please note that the code is logging exceptions to facilitate debugging, but the actual processing and handling of the exception would depend on how you want your application to respond to exceptions. Therefore, you may want to customize the error handling depending upon your application's requirements.

@ -0,0 +1,86 @@
# metrics_decorator
This documentation explains the use and functionality of the `metrics_decorator` function in the LLM (Large Language Models).
The `metrics_decorator` function is a standard Python decorator that augments a specific function by wrapping extra functionality around it. It is commonly used for things like timing, logging or memoization.
--
The `metrics_decorator` in LLM is specially designed to measure and calculate three key performance metrics when generating language models:
1. `Time to First Token`: Measures the elapsed time from the start of function execution until the generation of the first token.
2. `Generation Latency`: It measures the total time taken for a complete run.
3. `Throughput`: Calculates the rate of production of tokens per unit of time.
```python
def metrics_decorator(func: Callable):
"""
Metrics decorator for LLM
Args:
func (Callable): The function to be decorated.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
"""
An inner function that wraps the decorated function. It calculates 'Time to First Token',
'Generation Latency' and 'Throughput' metrics.
Args:
self : The object instance.
*args : Variable length argument list of the decorated function.
**kwargs : Arbitrary keyword arguments of the decorated function.
"""
# Measure Time to First Token
start_time = time.time()
result = func(self, *args, **kwargs)
first_token_time = time.time()
# Measure Generation Latency
end_time = time.time()
# Calculate Throughput (assuming the function returns a list of tokens)
throughput = len(result) / (end_time - start_time)
return f"""
Time to First Token: {first_token_time - start_time}
Generation Latency: {end_time - start_time}
Throughput: {throughput}
"""
return wrapper
```
## Example Usage
Now let's discuss the usage of the `metrics_decorator` function with an example.
Assuming that we have a language generation function called `text_generator()` that generates a list of tokens.
```python
@metrics_decorator
def text_generator(self, text: str):
"""
Args:
text (str): The input text.
Returns:
A list of tokens generated from the input text.
"""
# language generation implementation goes here
return tokens
# Instantiate the class and call the decorated function
obj = ClassName()
obj.text_generator("Hello, world!")
```
When the decorated `text_generator()` function is called, it will measure and return:
- Time elapsed until the first token is generated.
- The total execution time of the function.
- The rate of tokens generation per unit time.
This example provides a basic overview of how a function can be decorated with the `metrics_decorator`. The provided `func` argument could be any method from any class, as long as it complies with the structure defined in `metrics_decorator`. It is worth noting that the decorated function must return a list of tokens for the `Throughput` metric to work correctly.
Remember, applying the `metrics_decorator` does not affect the original functionality of the decorated function, it just adds additional measurement and logging capabilities to it. It's a great utility for tracking and optimizing the performance of your language models.

@ -0,0 +1,71 @@
# pdf_to_text
## Introduction
The function `pdf_to_text` is a Python utility for converting a PDF file into a string of text content. It leverages the `PyPDF2` library, an excellent Python library for processing PDF files. The function takes in a PDF file's path and reads its content, subsequently returning the extracted textual data.
This function can be very useful when you want to extract textual information from PDF files automatically. For instance, when processing a large number of documents, performing textual analysis, or when you're dealing with text data that is only available in PDF format.
## Class / Function Definition
`pdf_to_text` is a standalone function defined as follows:
```python
def pdf_to_text(pdf_path: str) -> str:
```
## Parameters
| Parameter | Type | Description |
|:-:|---|---|
| pdf_path | str | The path to the PDF file to be converted |
## Returns
| Return Value | Type | Description |
|:-:|---|---|
| text | str | The text extracted from the PDF file. |
## Raises
| Exception | Description |
|---|---|
| FileNotFoundError | If the PDF file is not found at the specified path. |
| Exception | If there is an error in reading the PDF file. |
## Function Description
`pdf_to_text` utilises the `PdfReader` function from the `PyPDF2` library to read the PDF file. If the PDF file does not exist at the specified path or there was an error while reading the file, appropriate exceptions will be raised. It then iterates through each page in the PDF and uses the `extract_text` function to extract the text content from each page. These contents are then concatenated into a single variable and returned as the result.
## Usage Examples
To use this function, you first need to install the `PyPDF2` library. It can be installed via pip:
```python
!pip install pypdf2
```
Then, you should import the `pdf_to_text` function:
```python
from swarms.utils import pdf_to_text
```
Here is an example of how to use `pdf_to_text`:
```python
# Define the path to the pdf file
pdf_path = 'sample.pdf'
# Use the function to extract text
text = pdf_to_text(pdf_path)
# Print the extracted text
print(text)
```
## Tips and Additional Information
- Ensure that the PDF file path is valid and that the file exists at the specified location. If the file does not exist, a `FileNotFoundError` will be raised.
- This function reads the text from the PDF. It does not handle images, graphical elements, or any non-text content.
- If the PDF contains scanned images rather than textual data, the `extract_text` function may not be able to extract any text. In such cases, you would require OCR (Optical Character Recognition) tools to extract the text.
- Be aware of the possibility that the output string might contain special characters or escape sequences because they were part of the PDF's content. You might need to clean the resulting text according to your requirements.
- The function uses the PyPDF2 library to facilitate the PDF reading and text extraction. For any issues related to PDF manipulation, consult the [PyPDF2 library documentation](https://pythonhosted.org/PyPDF2/).

@ -1,128 +0,0 @@
# Phoenix Trace Decorator Documentation
## Introduction
Welcome to the documentation for the `phoenix_trace_decorator` module. This module provides a convenient decorator for tracing Python functions and capturing exceptions using Phoenix, a versatile tracing and monitoring tool. Phoenix allows you to gain insights into the execution of your code, capture errors, and monitor performance.
## Table of Contents
1. [Installation](#installation)
2. [Getting Started](#getting-started)
3. [Decorator Usage](#decorator-usage)
4. [Examples](#examples)
5. [Best Practices](#best-practices)
6. [References](#references)
## 1. Installation <a name="installation"></a>
Before using the `phoenix_trace_decorator`, you need to install the Swarms library. You can install Phoenix using pip:
```bash
pip install swarms
```
## 2. Getting Started <a name="getting-started"></a>
Phoenix is a powerful tracing and monitoring tool, and the `phoenix_trace_decorator` simplifies the process of tracing functions and capturing exceptions within your Python code. To begin, ensure that Phoenix is installed, and then import the `phoenix_trace_decorator` module into your Python script.
```python
from swarms import phoenix_trace_decorator
```
## 3. Decorator Usage <a name="decorator-usage"></a>
The `phoenix_trace_decorator` module provides a decorator, `phoenix_trace_decorator`, which can be applied to functions you want to trace. The decorator takes a single argument, a docstring that describes the purpose of the function being traced.
Here is the basic structure of using the decorator:
```python
@phoenix_trace_decorator("Description of the function")
def my_function(param1, param2):
# Function implementation
pass
```
## 4. Examples <a name="examples"></a>
Let's explore some practical examples of using the `phoenix_trace_decorator` in your code.
### Example 1: Basic Tracing
In this example, we'll trace a simple function and print a message.
```python
@phoenix_trace_decorator("Tracing a basic function")
def hello_world():
print("Hello, World!")
# Call the decorated function
hello_world()
```
### Example 2: Tracing a Function with Parameters
You can use the decorator with functions that have parameters.
```python
@phoenix_trace_decorator("Tracing a function with parameters")
def add_numbers(a, b):
result = a + b
print(f"Result: {result}")
# Call the decorated function with parameters
add_numbers(2, 3)
```
### Example 3: Tracing Nested Calls
The decorator can also trace nested function calls.
```python
@phoenix_trace_decorator("Outer function")
def outer_function():
print("Outer function")
@phoenix_trace_decorator("Inner function")
def inner_function():
print("Inner function")
inner_function()
# Call the decorated functions
outer_function()
```
### Example 4: Exception Handling
Phoenix can capture exceptions and provide detailed information about them.
```python
@phoenix_trace_decorator("Function with exception handling")
def divide(a, b):
try:
result = a / b
except ZeroDivisionError as e:
raise ValueError("Division by zero") from e
# Call the decorated function with an exception
try:
divide(5, 0)
except ValueError as e:
print(f"Error: {e}")
```
## 5. Best Practices <a name="best-practices"></a>
When using the `phoenix_trace_decorator`, consider the following best practices:
- Use meaningful docstrings to describe the purpose of the traced functions.
- Keep your tracing focused on critical parts of your code.
- Make sure Phoenix is properly configured and running before using the decorator.
## 6. References <a name="references"></a>
For more information on Phoenix and advanced usage, please refer to the [Phoenix Documentation](https://phoenix-docs.readthedocs.io/en/latest/).
---
By following this documentation, you can effectively use the `phoenix_trace_decorator` to trace your Python functions, capture exceptions, and gain insights into the execution of your code. This tool is valuable for debugging, performance optimization, and monitoring the health of your applications.

@ -0,0 +1,102 @@
# prep_torch_inference
```python
def prep_torch_inference(
model_path: str = None,
device: torch.device = None,
*args,
**kwargs,
):
"""
Prepare a Torch model for inference.
Args:
model_path (str): Path to the model file.
device (torch.device): Device to run the model on.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
torch.nn.Module: The prepared model.
"""
try:
model = load_model_torch(model_path, device)
model.eval()
return model
except Exception as e:
# Add error handling code here
print(f"Error occurred while preparing Torch model: {e}")
return None
```
This method is part of the 'swarms.utils' module. It accepts a model file path and a torch device as input and returns a model that is ready for inference.
## Detailed Functionality
The method loads a PyTorch model from the file specified by `model_path`. This model is then moved to the specified `device` if it is provided. Subsequently, the method sets the model to evaluation mode by calling `model.eval()`. This is a crucial step when preparing a model for inference, as certain layers like dropout or batch normalization behave differently during training vs during evaluation.
In the case of any exception (e.g., the model file not found or the device unavailable), it prints an error message and returns `None`.
## Parameters
| Parameter | Type | Description | Default |
|-----------|------|-------------|---------|
| model_path | str | Path to the model file. | None |
| device | torch.device | Device to run the model on. | None |
| args | tuple | Additional positional arguments. | None |
| kwargs | dict | Additional keyword arguments. | None |
## Returns
| Type | Description |
|------|-------------|
| torch.nn.Module | The prepared model ready for inference. Returns `None` if any exception occurs. |
## Usage Examples
Here are some examples of how you can use the `prep_torch_inference` method. Before that, you need to import the necessary modules as follows:
```python
import torch
from swarms.utils import prep_torch_inference, load_model_torch
```
### Example 1: Load a model for inference on CPU
```python
model_path = "saved_model.pth"
model = prep_torch_inference(model_path)
if model is not None:
print("Model loaded successfully and is ready for inference.")
else:
print("Failed to load the model.")
```
### Example 2: Load a model for inference on CUDA device
```python
model_path = "saved_model.pth"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = prep_torch_inference(model_path, device)
if model is not None:
print(f"Model loaded successfully on device {device} and is ready for inference.")
else:
print("Failed to load the model.")
```
### Example 3: Load a model with additional arguments for `load_model_torch`
```python
model_path = "saved_model.pth"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Suppose load_model_torch accepts an additional argument, map_location
model = prep_torch_inference(model_path, device, map_location=device)
if model is not None:
print(f"Model loaded successfully on device {device} and is ready for inference.")
else:
print("Failed to load the model.")
```
Please note, you need to ensure the given model path does exist and the device is available on your machine, else `prep_torch_inference` method will return `None`. Depending on the complexity and size of your models, loading them onto a specific device might take a while. So it's important that you take this into consideration when designing your machine learning workflows.

@ -0,0 +1,110 @@
# print_class_parameters
# Module Function Name: print_class_parameters
The `print_class_parameters` function is a utility function developed to help developers and users alike in retrieving and printing the parameters of a class constructor in Python, either in standard output or returned as a dictionary if the `api_format` is set to `True`.
This utility function utilizes the `inspect` module to fetch the signature of the class constructor and fetches the parameters from the obtained signature. The parameter values and their respective types are then outputted.
This function allows developers to easily inspect and understand the class' constructor parameters without the need to individually go through the class structure. This eases the testing and debugging process for developers and users alike, aiding in generating more efficient and readable code.
__Function Definition:__
```python
def print_class_parameters(cls, api_format: bool = False):
```
__Parameters:__
| Parameter | Type | Description | Default value |
|---|---|---|---|
| cls | type | The Python class to inspect. | None |
| api_format | bool | Flag to determine if the output should be returned in dictionary format (if set to True) or printed out (if set to False) | False |
__Functionality and Usage:__
Inside the `print_class_parameters` function, it starts by getting the signature of the constructor of the inputted class by invoking `inspect.signature(cls.__init__)`. It then extracts the parameters from the signature and stores it in the `params` variable.
If the `api_format` argument is set to `True`, instead of printing the parameters and their types, it stores them inside a dictionary where each key-value pair is a parameter name and its type. It then returns this dictionary.
If `api_format` is set to `False` or not set at all (defaulting to False), the function iterates over the parameters and prints the parameter name and its type. "self" parameters are excluded from the output as they are inherent to all class methods in Python.
A possible exception that may occur during the execution of this function is during the invocation of the `inspect.signature()` function call. If the inputted class does not have an `__init__` method or any error occurs during the retrieval of the class constructor's signature, an exception will be triggered. In that case, an error message that includes the error details is printed out.
__Usage and Examples:__
Assuming the existence of a class:
```python
class Agent:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
```
One could use `print_class_parameters` in its typical usage:
```python
print_class_parameters(Agent)
```
Results in:
```
Parameter: x, Type: <class 'int'>
Parameter: y, Type: <class 'int'>
```
Or, with `api_format` set to `True`
```python
output = print_class_parameters(Agent, api_format=True)
print(output)
```
Results in:
```
{'x': "<class 'int'>", 'y': "<class 'int'>"}
```
__Note:__
The function `print_class_parameters` is not limited to custom classes. It can inspect built-in Python classes such as `list`, `dict`, and others. However, it is most useful when inspecting custom-defined classes that aren't inherently documented in Python or third-party libraries.
__Source Code__
```python
def print_class_parameters(cls, api_format: bool = False):
"""
Print the parameters of a class constructor.
Parameters:
cls (type): The class to inspect.
Example:
>>> print_class_parameters(Agent)
Parameter: x, Type: <class 'int'>
Parameter: y, Type: <class 'int'>
"""
try:
# Get the parameters of the class constructor
sig = inspect.signature(cls.__init__)
params = sig.parameters
if api_format:
param_dict = {}
for name, param in params.items():
if name == "self":
continue
param_dict[name] = str(param.annotation)
return param_dict
# Print the parameters
for name, param in params.items():
if name == "self":
continue
print(f"Parameter: {name}, Type: {param.annotation}")
except Exception as e:
print(f"An error occurred while inspecting the class: {e}")
```

@ -58,13 +58,8 @@ nav:
- Home:
- Overview: "index.md"
- Contributing: "contributing.md"
- Docker Container Setup: "docker_setup.md"
- Swarms:
- Overview: "swarms/index.md"
- swarms.swarms:
- AbstractSwarm: "swarms/swarms/abstractswarm.md"
- GodMode: "swarms/swarms/godmode.md"
- Groupchat: "swarms/swarms/groupchat.md"
- swarms.workers:
- Overview: "swarms/workers/index.md"
- AbstractWorker: "swarms/workers/abstract_worker.md"
@ -83,6 +78,7 @@ nav:
- vLLM: "swarms/models/vllm.md"
- MPT7B: "swarms/models/mpt.md"
- Mistral: "swarms/models/mistral.md"
- Mixtral: "swarms/models/mixtral.md"
- MultiModal:
- BaseMultiModalModel: "swarms/models/base_multimodal_model.md"
- Fuyu: "swarms/models/fuyu.md"
@ -98,18 +94,43 @@ nav:
- ElevenLabsText2SpeechTool: "swarms/models/elevenlabs.md"
- OpenAITTS: "swarms/models/openai_tts.md"
- Gemini: "swarms/models/gemini.md"
- ZeroscopeTTV: "swarms/models/zeroscope.md"
- swarms.structs:
- Overview: "swarms/structs/overview.md"
- AutoScaler: "swarms/swarms/autoscaler.md"
- Agent: "swarms/structs/agent.md"
- SequentialWorkflow: 'swarms/structs/sequential_workflow.md'
- agent: "swarms/structs/agent.md"
- basestructure: "swarms/structs/basestructure.md"
- artifactupload: "swarms/structs/artifactupload.md"
- sequential_workflow: "swarms/structs/sequential_workflow.md"
- taskinput: "swarms/structs/taskinput.md"
- concurrentworkflow: "swarms/structs/concurrentworkflow.md"
- nonlinearworkflow: "swarms/structs/nonlinearworkflow.md"
- stepinput: "swarms/structs/stepinput.md"
- workflow: "swarms/structs/workflow.md"
- artifact: "swarms/structs/artifact.md"
- recursiveworkflow: "swarms/structs/recursiveworkflow.md"
- swarmnetwork: "swarms/structs/swarmnetwork.md"
- task: "swarms/structs/task.md"
- groupchatmanager: "swarms/structs/groupchatmanager.md"
- baseworkflow: "swarms/structs/baseworkflow.md"
- conversation: "swarms/structs/conversation.md"
- groupchat: "swarms/structs/groupchat.md"
- swarms.memory:
- Weaviate: "swarms/memory/weaviate.md"
- PineconDB: "swarms/memory/pinecone.md"
- PineconeDB: "swarms/memory/pinecone.md"
- PGVectorStore: "swarms/memory/pg.md"
- ShortTermMemory: "swarms/memory/short_term_memory.md"
- swarms.utils:
- phoenix_trace_decorator: "swarms/utils/phoenix_tracer.md"
- pdf_to_text: "swarms/utils/pdf_to_text.md"
- load_model_torch: "swarms/utils/load_model_torch.md"
- metrics_decorator: "swarms/utils/metrics_decorator.md"
- prep_torch_inference: "swarms/utils/prep_torch_inference.md"
- find_image_path: "swarms/utils/find_image_path.md"
- print_class_parameters: "swarms/utils/print_class_parameters.md"
- extract_code_from_markdown: "swarms/utils/extract_code_from_markdown.md"
- check_device: "swarms/utils/check_device.md"
- display_markdown_message: "swarms/utils/display_markdown_message.md"
- phoenix_tracer: "swarms/utils/phoenix_tracer.md"
- limit_tokens_from_string: "swarms/utils/limit_tokens_from_string.md"
- math_eval: "swarms/utils/math_eval.md"
- Guides:
- Overview: "examples/index.md"
- Agents:
@ -138,4 +159,5 @@ nav:
- Architecture: "corporate/architecture.md"
- Checklist: "corporate/checklist.md"
- Hiring: "corporate/hiring.md"
- SwarmCloud: "corporate/swarm_cloud.md"
- SwarmCloud: "corporate/swarm_cloud.md"
- SwarmMemo: "corporate/swarm_memo.md"

@ -1,26 +1,65 @@
from swarms.agents.simple_agent import SimpleAgent
from swarms.structs import Agent
from swarms.models import OpenAIChat
import os
api_key = ""
from dotenv import load_dotenv
llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5,
from swarms import (
OpenAIChat,
Conversation,
detect_markdown,
extract_code_from_markdown,
)
# Initialize the agent
agent = Agent(
llm=llm,
max_loops=5,
from swarms.tools.code_executor import CodeExecutor
conv = Conversation(
autosave=False,
time_enabled=True,
)
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(openai_api_key=api_key)
# Run the language model in a loop
def interactive_conversation(llm, iters: int = 10):
conv = Conversation()
for i in range(iters):
user_input = input("User: ")
conv.add("user", user_input)
if user_input.lower() == "quit":
break
task = (
conv.return_history_as_string()
) # Get the conversation history
# Run the language model
out = llm(task)
conv.add("assistant", out)
print(
f"Assistant: {out}",
)
# Code Interpreter
if detect_markdown(out):
code = extract_code_from_markdown(out)
if code:
print(f"Code: {code}")
executor = CodeExecutor()
out = executor.run(code)
conv.add("assistant", out)
# print(f"Assistant: {out}")
conv.display_conversation()
# conv.export_conversation("conversation.txt")
agent = SimpleAgent(
name="Optimus Prime",
agent=agent,
# Memory
)
out = agent.run("Generate a 10,000 word blog on health and wellness.")
print(out)
# Replace with your LLM instance
interactive_conversation(llm)

@ -1,8 +1,5 @@
from swarms.structs import Agent
from swarms.models.gpt4_vision_api import GPT4VisionAPI
from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1,
)
llm = GPT4VisionAPI()

@ -1,4 +1,3 @@
from swarms.models import OpenAIChat
from autotemp import AutoTemp
# Your OpenAI API key

@ -0,0 +1,28 @@
import os
from dotenv import load_dotenv
from swarms.models.gemini import Gemini
from swarms.prompts.react import react_prompt
load_dotenv()
api_key = os.environ["GEMINI_API_KEY"]
# Establish the prompt and image
task = "What is your name"
img = "images/github-banner-swarms.png"
# Initialize the model
model = Gemini(
gemini_api_key=api_key,
model_name="gemini-pro",
max_tokens=1000,
system_prompt=react_prompt(task=task),
temperature=0.5,
)
out = model.chat(
"Create the code for a react component that displays a name",
img=img,
)
print(out)

@ -0,0 +1,28 @@
import os
from dotenv import load_dotenv
from swarms.models.gemini import Gemini
from swarms.prompts.react import react_prompt
load_dotenv()
api_key = os.environ["GEMINI_API_KEY"]
# Establish the prompt and image
task = "What is your name"
img = "images/github-banner-swarms.png"
# Initialize the model
model = Gemini(
gemini_api_key=api_key,
model_name="gemini-pro",
max_tokens=1000,
system_prompt=react_prompt(task=task),
temperature=0.5,
)
# Run the model
out = model.run(
"Create the code for a react component that displays a name"
)
print(out)

@ -0,0 +1,28 @@
import os
from dotenv import load_dotenv
from swarms.models import Gemini
from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("GEMINI_API_KEY")
# Initialize the language model
llm = Gemini(
gemini_api_key=api_key,
temperature=0.5,
max_tokens=1000,
system_prompt=VISUAL_CHAIN_OF_THOUGHT,
)
# Initialize the task
task = "This is an eye test. What do you see?"
img = "playground/demos/multi_modal_chain_of_thought/eyetest.jpg"
# Run the workflow on a task
out = llm.run(task=task, img=img)
print(out)

@ -0,0 +1,20 @@
import os
from dotenv import load_dotenv
# Import the OpenAIChat model and the Agent struct
from swarms.models import OpenAIChat
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=1000,
)

@ -12,7 +12,6 @@ from swarms.prompts.logistics import (
Efficiency_Agent_Prompt,
)
# from swarms.utils.phoenix_handler import phoenix_trace_decorator
# from swarms.utils.banana_wrapper import banana
load_dotenv()
@ -26,7 +25,6 @@ llm = GPT4VisionAPI(openai_api_key=api_key)
factory_image = "factory_image1.jpg"
# Initialize agents with respective prompts
# @phoenix_trace_decorator("This function is an agent and is traced by Phoenix.")
health_security_agent = Agent(
llm=llm,
sop=Health_Security_Agent_Prompt,
@ -34,7 +32,6 @@ health_security_agent = Agent(
multi_modal=True,
)
# @phoenix_trace_decorator("This function is an agent and is traced by Phoenix.")
quality_control_agent = Agent(
llm=llm,
sop=Quality_Control_Agent_Prompt,
@ -42,7 +39,6 @@ quality_control_agent = Agent(
multi_modal=True,
)
# @phoenix_trace_decorator("This function is an agent and is traced by Phoenix.")
productivity_agent = Agent(
llm=llm,
sop=Productivity_Agent_Prompt,
@ -50,17 +46,14 @@ productivity_agent = Agent(
multi_modal=True,
)
# @phoenix_trace_decorator("This function is an agent and is traced by Phoenix.")
safety_agent = Agent(
llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True
)
# @phoenix_trace_decorator("This function is an agent and is traced by Phoenix.")
security_agent = Agent(
llm=llm, sop=Security_Agent_Prompt, max_loops=1, multi_modal=True
)
# @phoenix_trace_decorator("This function is an agent and is traced by Phoenix.")
sustainability_agent = Agent(
llm=llm,
sop=Sustainability_Agent_Prompt,
@ -68,7 +61,6 @@ sustainability_agent = Agent(
multi_modal=True,
)
# @phoenix_trace_decorator("This function is an agent and is traced by Phoenix.")
efficiency_agent = Agent(
llm=llm,
sop=Efficiency_Agent_Prompt,

@ -2,7 +2,7 @@ import os
import base64
import requests
from dotenv import load_dotenv
from swarms.models import Anthropic, OpenAIChat
from swarms.models import OpenAIChat
from swarms.structs import Agent
# Load environment variables

@ -1,12 +1,10 @@
import os
import subprocess
from dotenv import load_dotenv
from swarms.models import OpenAIChat
from swarms.structs import Agent
# from swarms.utils.phoenix_handler import phoenix_trace_decorator
# import modal
load_dotenv()
@ -23,9 +21,6 @@ llm = OpenAIChat(
# Agent
# @phoenix_trace_decorator(
# "This function is an agent and is traced by Phoenix."
# )
# @stub.function(gpu="any")
agent = Agent(
llm=llm,

@ -1,6 +1,6 @@
from swarms.memory import WeaviateClient
from swarms.memory import WeaviateDB
weaviate_client = WeaviateClient(
weaviate_client = WeaviateDB(
http_host="YOUR_HTTP_HOST",
http_port="YOUR_HTTP_PORT",
http_secure=True,

@ -0,0 +1,96 @@
import time
import os
import pygame
import speech_recognition as sr
from dotenv import load_dotenv
from playsound import playsound
from swarms import OpenAIChat, OpenAITTS
# Load the environment variables
load_dotenv()
# Get the API key from the environment
openai_api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
openai_api_key=openai_api_key,
)
# Initialize the text-to-speech model
tts = OpenAITTS(
model_name="tts-1-1106",
voice="onyx",
openai_api_key=openai_api_key,
saved_filepath="runs/tts_speech.wav",
)
# Initialize the speech recognition model
r = sr.Recognizer()
def play_audio(file_path):
# Check if the file exists
if not os.path.isfile(file_path):
print(f"Audio file {file_path} not found.")
return
# Initialize the mixer module
pygame.mixer.init()
try:
# Load the mp3 file
pygame.mixer.music.load(file_path)
# Play the mp3 file
pygame.mixer.music.play()
# Wait for the audio to finish playing
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
except pygame.error as e:
print(f"Couldn't play {file_path}: {e}")
finally:
# Stop the mixer module and free resources
pygame.mixer.quit()
while True:
# Listen for user speech
with sr.Microphone() as source:
print("Listening...")
audio = r.listen(source)
# Convert speech to text
try:
print("Recognizing...")
task = r.recognize_google(audio)
print(f"User said: {task}")
except sr.UnknownValueError:
print("Could not understand audio")
continue
except Exception as e:
print(f"Error: {e}")
continue
# Run the Gemini model on the task
print("Running GPT4 model...")
out = llm(task)
print(f"Gemini output: {out}")
# Convert the Gemini output to speech
print("Running text-to-speech model...")
out = tts.run_and_save(out)
print(f"Text-to-speech output: {out}")
# Ask the user if they want to play the audio
# play_audio = input("Do you want to play the audio? (yes/no): ")
# if play_audio.lower() == "yes":
# Initialize the mixer module
# Play the audio file
time.sleep(5)
playsound("runs/tts_speech.wav")

@ -20,7 +20,6 @@ from termcolor import colored
from swarms.models import GPT4VisionAPI
from swarms.structs import Agent
from swarms.utils.phoenix_handler import phoenix_trace_decorator
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")

@ -1,4 +1,4 @@
from swarms.models.kosmos2 import Kosmos2, Detections
from swarms.models.kosmos2 import Kosmos2
from PIL import Image

@ -0,0 +1,26 @@
import os
from dotenv import load_dotenv
from swarms import OpenAIChat, Task, RecursiveWorkflow, Agent
# Load environment variables from .env file
load_dotenv()
# Load environment variables
llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY"))
agent = Agent(llm=llm, max_loops=1)
# Create a workflow
workflow = RecursiveWorkflow(stop_token="<DONE>")
# Create tasks
task1 = Task(agent, "What's the weather in miami")
task2 = Task(agent, "What's the weather in new york")
task3 = Task(agent, "What's the weather in london")
# Add tasks to the workflow
workflow.add(task1)
workflow.add(task2)
workflow.add(task3)
# Run the workflow
workflow.run()

@ -0,0 +1,46 @@
import os
from dotenv import load_dotenv
# Import the OpenAIChat model and the Agent struct
from swarms import OpenAIChat, Agent, SwarmNetwork
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
openai_api_key=api_key,
)
## Initialize the workflow
agent = Agent(llm=llm, max_loops=1, agent_name="Social Media Manager")
agent2 = Agent(llm=llm, max_loops=1, agent_name=" Product Manager")
agent3 = Agent(llm=llm, max_loops=1, agent_name="SEO Manager")
# Load the swarmnet with the agents
swarmnet = SwarmNetwork(
agents=[agent, agent2, agent3],
)
# List the agents in the swarm network
out = swarmnet.list_agents()
print(out)
# Run the workflow on a task
out = swarmnet.run_single_agent(
agent2.id, "Generate a 10,000 word blog on health and wellness."
)
print(out)
# Run all the agents in the swarm network on a task
out = swarmnet.run_many_agents(
"Generate a 10,000 word blog on health and wellness."
)
print(out)

@ -0,0 +1,47 @@
from swarms.structs import Task, Agent
from swarms.models import OpenAIChat
from dotenv import load_dotenv
import os
# Load the environment variables
load_dotenv()
# Define a function to be used as the action
def my_action():
print("Action executed")
# Define a function to be used as the condition
def my_condition():
print("Condition checked")
return True
# Create an agent
agent = Agent(
llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]),
max_loops=1,
dashboard=False,
)
# Create a task
task = Task(description="What's the weather in miami", agent=agent)
# Set the action and condition
task.set_action(my_action)
task.set_condition(my_condition)
# Execute the task
print("Executing task...")
task.run()
# Check if the task is completed
if task.is_completed():
print("Task completed")
else:
print("Task not completed")
# Output the result of the task
print(f"Task result: {task.result}")

@ -1,16 +1,33 @@
from swarms.swarms import GodMode
from swarms.models import OpenAIChat
import os
api_key = ""
from dotenv import load_dotenv
llm = OpenAIChat(openai_api_key=api_key)
from swarms.models import Anthropic, Gemini, Mixtral, OpenAIChat
from swarms.swarms import ModelParallelizer
load_dotenv()
llms = [llm, llm, llm]
# API Keys
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
gemini_api_key = os.getenv("GEMINI_API_KEY")
god_mode = GodMode(llms)
# Initialize the models
llm = OpenAIChat(openai_api_key=openai_api_key)
anthropic = Anthropic(anthropic_api_key=anthropic_api_key)
mixtral = Mixtral()
gemini = Gemini(gemini_api_key=gemini_api_key)
# Initialize the parallelizer
llms = [llm, anthropic, mixtral, gemini]
parallelizer = ModelParallelizer(llms)
# Set the task
task = "Generate a 10,000 word blog on health and wellness."
out = god_mode.run(task)
god_mode.print_responses(task)
# Run the task
out = parallelizer.run(task)
# Print the responses 1 by 1
for i in range(len(out)):
print(f"Response from LLM {i}: {out[i]}")

@ -1,5 +1,5 @@
from swarms import OpenAI, Agent
from swarms.swarms.groupchat import GroupChatManager, GroupChat
from swarms.structs.groupchat import GroupChatManager, GroupChat
api_key = ""

@ -1,7 +1,6 @@
import os
from swarms.models import OpenAIChat
from swarms.structs import Agent
from swarms.tools.tool import tool
from dotenv import load_dotenv
load_dotenv()

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "2.9.2"
version = "3.5.0"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -24,7 +24,7 @@ classifiers = [
[tool.poetry.dependencies]
python = "^3.6.1"
torch = "2.1.1"
transformers = "4.35.0"
transformers = "4.36.2"
openai = "0.28.0"
langchain = "0.0.333"
asyncio = "3.4.3"
@ -37,11 +37,11 @@ opencv-python-headless = "4.8.1.78"
faiss-cpu = "1.7.4"
backoff = "2.2.1"
marshmallow = "3.19.0"
datasets = "2.10.1"
datasets = "*"
optimum = "1.15.0"
diffusers = "0.17.1"
diffusers = "*"
PyPDF2 = "3.0.1"
accelerate = "0.22.0"
accelerate = "*"
sentencepiece = "0.1.98"
wget = "3.2"
tensorflow = "2.14.0"
@ -53,7 +53,7 @@ ggl = "1.1.0"
ratelimit = "2.2.1"
beautifulsoup4 = "4.11.2"
cohere = "4.24"
huggingface-hub = "0.16.4"
huggingface-hub = "*"
pydantic = "1.10.12"
tenacity = "8.2.2"
Pillow = "9.4.0"
@ -65,6 +65,13 @@ open_clip_torch = "2.20.0"
soundfile = "0.12.1"
torchvision = "0.16.1"
rich = "13.5.2"
sqlalchemy = "*"
bitsandbytes = "*"
pgvector = "*"
qdrant-client = "*"
sentence-transformers = "*"
peft = "*"
modelscope = "1.10.0"
[tool.poetry.group.lint.dependencies]
@ -93,3 +100,5 @@ target-version = ['py38']
preview = true
[tool.poetry.scripts]
swarms = 'swarms.cli._cli:cli'

@ -1,34 +1,28 @@
torch==2.1.1
transformers>2.10==4.35.0
transformers
pandas==1.5.3
langchain==0.0.333
nest_asyncio==1.5.6
langchain-experimental==0.0.10
playwright==1.34.0
wget==3.2
simpleaichat==0.2.2
httpx==0.24.1
open_clip_torch==2.20.0
ggl==1.1.0
beautifulsoup4==4.11.2
google-search-results
Pillow==9.4.0
faiss-cpu==1.7.4
openai==0.28.0
attrs==22.2.0
datasets==2.10.1
datasets==2.14.5
pydantic==1.10.12
soundfile==0.12.1
arize-phoenix
weaviate-client==3.25.3
huggingface-hub==0.16.4
bitsandbytes
huggingface-hub
google-generativeai==0.3.1
sentencepiece==0.1.98
requests_mock
PyPDF2==3.0.1
accelerate==0.22.0
vllm
chromadb==0.4.14
tensorflow==2.12.0
tensorflow==2.14.0
optimum
tiktoken==0.4.0
tabulate==0.9.0
@ -36,26 +30,21 @@ colored
addict
backoff==2.2.1
ratelimit==2.2.1
albumentations
basicsr
termcolor==2.2.0
controlnet-aux
diffusers==0.17.1
diffusers
einops==0.7.0
imageio==2.25.1
opencv-python-headless==4.8.1.78
imageio-ffmpeg==0.4.9
invisible-watermark
kornia
safetensors==0.3.3
numpy==1.25.2
numpy
omegaconf==2.3.0
open_clip_torch==2.20.0
openai==0.28.0
opencv-python==4.7.0.72
prettytable==3.9.0
safetensors==0.3.3
test-tube
timm==0.6.13
torchmetrics
webdataset
@ -69,3 +58,5 @@ mkdocs
mkdocs-material
mkdocs-glightbox
pre-commit==3.2.2
peft
modelscope

@ -0,0 +1,102 @@
###### VERISON2
import inspect
import os
import threading
from dotenv import load_dotenv
from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
from swarms import OpenAIChat
##########
from swarms.structs.task import Task
from swarms.structs.swarm_net import SwarmNetwork
from swarms.structs.nonlinear_workflow import NonlinearWorkflow
from swarms.structs.recursive_workflow import RecursiveWorkflow
from swarms.structs.groupchat import GroupChat, GroupChatManager
from swarms.structs.base_workflow import BaseWorkflow
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.base import BaseStructure
from swarms.structs.schemas import (
Artifact,
ArtifactUpload,
StepInput,
TaskInput,
)
####################
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
openai_api_key=api_key,
max_tokens=4000,
)
def process_documentation(cls):
"""
Process the documentation for a given class using OpenAI model and save it in a Markdown file.
"""
doc = inspect.getdoc(cls)
source = inspect.getsource(cls)
input_content = (
"Class Name:"
f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, "swarms.structs")
)
# doc_content = f"# {cls.__name__}\n\n{processed_content}\n"
doc_content = f"{processed_content}\n"
# Create the directory if it doesn't exist
dir_path = "docs/swarms/structs"
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)
print(f"Documentation generated for {cls.__name__}.")
def main():
classes = [
Task,
SwarmNetwork,
NonlinearWorkflow,
RecursiveWorkflow,
GroupChat,
GroupChatManager,
BaseWorkflow,
ConcurrentWorkflow,
BaseStructure,
Artifact,
ArtifactUpload,
StepInput,
TaskInput,
]
threads = []
for cls in classes:
thread = threading.Thread(
target=process_documentation, args=(cls,)
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print("Documentation generated in 'swarms.structs' directory.")
if __name__ == "__main__":
main()

@ -0,0 +1,77 @@
import inspect
import os
import sys
import threading
from dotenv import load_dotenv
from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
from swarms import OpenAIChat
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000,
)
def process_documentation(item):
"""
Process the documentation for a given function using OpenAI model and save it in a Markdown file.
"""
doc = inspect.getdoc(item)
source = inspect.getsource(item)
input_content = (
f"Name: {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
print(input_content)
# Process with OpenAI model
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, "swarms.utils")
)
doc_content = f"# {item.__name__}\n\n{processed_content}\n"
# Create the directory if it doesn't exist
dir_path = "docs/swarms/utils"
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)
def main():
# Gathering all functions from the swarms.utils module
functions = [
obj
for name, obj in inspect.getmembers(
sys.modules["swarms.utils"]
)
if inspect.isfunction(obj)
]
threads = []
for func in functions:
thread = threading.Thread(
target=process_documentation, args=(func,)
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print("Documentation generated in 'docs/swarms/utils' directory.")
if __name__ == "__main__":
main()

@ -0,0 +1,133 @@
import inspect
import os
import threading
from dotenv import load_dotenv
from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
from swarms import OpenAIChat
from swarms.structs.agent import Agent
from swarms.structs.autoscaler import AutoScaler
from swarms.structs.base import BaseStructure
from swarms.structs.base_swarm import AbstractSwarm
from swarms.structs.base_workflow import BaseWorkflow
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.conversation import Conversation
from swarms.structs.groupchat import GroupChat, GroupChatManager
from swarms.structs.model_parallizer import ModelParallelizer
from swarms.structs.multi_agent_collab import MultiAgentCollaboration
from swarms.structs.nonlinear_workflow import NonlinearWorkflow
from swarms.structs.recursive_workflow import RecursiveWorkflow
from swarms.structs.schemas import (
Artifact,
ArtifactUpload,
StepInput,
TaskInput,
)
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.swarm_net import SwarmNetwork
from swarms.structs.utils import (
distribute_tasks,
extract_key_from_json,
extract_tokens_from_text,
find_agent_by_id,
find_token_in_text,
parse_tasks,
)
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
model_name="gpt-4-1106-preview",
openai_api_key=api_key,
max_tokens=4000,
)
def process_documentation(
item,
module: str = "swarms.structs",
docs_folder_path: str = "docs/swarms/structs",
):
"""
Process the documentation for a given class or function using OpenAI model and save it in a Python file.
"""
doc = inspect.getdoc(item)
source = inspect.getsource(item)
is_class = inspect.isclass(item)
item_type = "Class Name" if is_class else "Name"
input_content = (
f"{item_type}:"
f" {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
# Process with OpenAI model
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, module)
)
doc_content = f"# {item.__name__}\n\n{processed_content}\n"
# Create the directory if it doesn't exist
dir_path = docs_folder_path
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Python file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)
print(
f"Processed documentation for {item.__name__}. at {file_path}"
)
def main(module: str = "docs/swarms/structs"):
items = [
Agent,
SequentialWorkflow,
AutoScaler,
Conversation,
TaskInput,
Artifact,
ArtifactUpload,
StepInput,
SwarmNetwork,
ModelParallelizer,
MultiAgentCollaboration,
AbstractSwarm,
GroupChat,
GroupChatManager,
parse_tasks,
find_agent_by_id,
distribute_tasks,
find_token_in_text,
extract_key_from_json,
extract_tokens_from_text,
ConcurrentWorkflow,
RecursiveWorkflow,
NonlinearWorkflow,
BaseWorkflow,
BaseStructure,
]
threads = []
for item in items:
thread = threading.Thread(
target=process_documentation, args=(item,)
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print(f"Documentation generated in {module} directory.")
if __name__ == "__main__":
main()

@ -0,0 +1,123 @@
import inspect
import os
import re
import threading
from swarms import OpenAIChat
from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT
from zeta.nn.modules._activations import (
AccurateGELUActivation,
ClippedGELUActivation,
FastGELUActivation,
GELUActivation,
LaplaceActivation,
LinearActivation,
MishActivation,
NewGELUActivation,
PytorchGELUTanh,
QuickGELUActivation,
ReLUSquaredActivation,
)
from zeta.nn.modules.dense_connect import DenseBlock
from zeta.nn.modules.dual_path_block import DualPathBlock
from zeta.nn.modules.feedback_block import FeedbackBlock
from zeta.nn.modules.highway_layer import HighwayLayer
from zeta.nn.modules.multi_scale_block import MultiScaleBlock
from zeta.nn.modules.recursive_block import RecursiveBlock
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000,
)
def extract_code_from_markdown(markdown_content: str):
"""
Extracts code blocks from a Markdown string and returns them as a single string.
Args:
- markdown_content (str): The Markdown content as a string.
Returns:
- str: A single string containing all the code blocks separated by newlines.
"""
# Regular expression for fenced code blocks
pattern = r"```(?:\w+\n)?(.*?)```"
matches = re.findall(pattern, markdown_content, re.DOTALL)
# Concatenate all code blocks separated by newlines
return "\n".join(code.strip() for code in matches)
def create_test(cls):
"""
Process the documentation for a given class using OpenAI model and save it in a Python file.
"""
doc = inspect.getdoc(cls)
source = inspect.getsource(cls)
input_content = (
"Class Name:"
f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
print(input_content)
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
processed_content = model(
TEST_WRITER_SOP_PROMPT(input_content, "zeta", "zeta.nn")
)
processed_content = extract_code_from_markdown(processed_content)
doc_content = f"# {cls.__name__}\n\n{processed_content}\n"
# Create the directory if it doesn't exist
dir_path = "tests/nn/modules"
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Python file
file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.py")
with open(file_path, "w") as file:
file.write(doc_content)
def main():
classes = [
DenseBlock,
HighwayLayer,
MultiScaleBlock,
FeedbackBlock,
DualPathBlock,
RecursiveBlock,
PytorchGELUTanh,
NewGELUActivation,
GELUActivation,
FastGELUActivation,
QuickGELUActivation,
ClippedGELUActivation,
AccurateGELUActivation,
MishActivation,
LinearActivation,
LaplaceActivation,
ReLUSquaredActivation,
]
threads = []
for cls in classes:
thread = threading.Thread(target=create_test, args=(cls,))
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print("Tests generated in 'docs/zeta/nn/modules' directory.")
if __name__ == "__main__":
main()

@ -0,0 +1,85 @@
import inspect
import os
import sys
import threading
from dotenv import load_dotenv
from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT
from swarms import OpenAIChat
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils import (
extract_code_from_markdown,
)
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000,
)
def process_documentation(item):
"""
Process the documentation for a given function using OpenAI model and save it in a Markdown file.
"""
doc = inspect.getdoc(item)
source = inspect.getsource(item)
input_content = (
f"Name: {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
# print(input_content)
# Process with OpenAI model
processed_content = model(
TEST_WRITER_SOP_PROMPT(
input_content, "swarms.utils", "swarms.utils"
)
)
processed_content = extract_code_from_markdown(processed_content)
print(processed_content)
doc_content = f"{processed_content}"
# Create the directory if it doesn't exist
dir_path = "tests/utils"
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.py")
with open(file_path, "w") as file:
file.write(doc_content)
def main():
# Gathering all functions from the swarms.utils module
functions = [
obj
for name, obj in inspect.getmembers(
sys.modules["swarms.utils"]
)
if inspect.isfunction(obj)
]
threads = []
for func in functions:
thread = threading.Thread(
target=process_documentation, args=(func,)
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print("Tests generated in 'tests/utils' directory.")
if __name__ == "__main__":
main()

@ -0,0 +1,201 @@
def DOCUMENTATION_WRITER_SOP(
task: str,
module: str,
):
documentation = f"""Create multi-page long and explicit professional pytorch-like documentation for the {module} code below follow the outline for the {module} library,
provide many examples and teach the user about the code, provide examples for every function, make the documentation 10,000 words,
provide many usage examples and note this is markdown docs, create the documentation for the code to document,
put the arguments and methods in a table in markdown to make it visually seamless
Now make the professional documentation for this code, provide the architecture and how the class works and why it works that way,
it's purpose, provide args, their types, 3 ways of usage examples, in examples show all the code like imports main example etc
BE VERY EXPLICIT AND THOROUGH, MAKE IT DEEP AND USEFUL
########
Step 1: Understand the purpose and functionality of the module or framework
Read and analyze the description provided in the documentation to understand the purpose and functionality of the module or framework.
Identify the key features, parameters, and operations performed by the module or framework.
Step 2: Provide an overview and introduction
Start the documentation by providing a brief overview and introduction to the module or framework.
Explain the importance and relevance of the module or framework in the context of the problem it solves.
Highlight any key concepts or terminology that will be used throughout the documentation.
Step 3: Provide a class or function definition
Provide the class or function definition for the module or framework.
Include the parameters that need to be passed to the class or function and provide a brief description of each parameter.
Specify the data types and default values for each parameter.
Step 4: Explain the functionality and usage
Provide a detailed explanation of how the module or framework works and what it does.
Describe the steps involved in using the module or framework, including any specific requirements or considerations.
Provide code examples to demonstrate the usage of the module or framework.
Explain the expected inputs and outputs for each operation or function.
Step 5: Provide additional information and tips
Provide any additional information or tips that may be useful for using the module or framework effectively.
Address any common issues or challenges that developers may encounter and provide recommendations or workarounds.
Step 6: Include references and resources
Include references to any external resources or research papers that provide further information or background on the module or framework.
Provide links to relevant documentation or websites for further exploration.
Example Template for the given documentation:
# Module/Function Name: MultiheadAttention
class torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None):
```
Creates a multi-head attention module for joint information representation from the different subspaces.
Parameters:
- embed_dim (int): Total dimension of the model.
- num_heads (int): Number of parallel attention heads. The embed_dim will be split across num_heads.
- dropout (float): Dropout probability on attn_output_weights. Default: 0.0 (no dropout).
- bias (bool): If specified, adds bias to input/output projection layers. Default: True.
- add_bias_kv (bool): If specified, adds bias to the key and value sequences at dim=0. Default: False.
- add_zero_attn (bool): If specified, adds a new batch of zeros to the key and value sequences at dim=1. Default: False.
- kdim (int): Total number of features for keys. Default: None (uses kdim=embed_dim).
- vdim (int): Total number of features for values. Default: None (uses vdim=embed_dim).
- batch_first (bool): If True, the input and output tensors are provided as (batch, seq, feature). Default: False.
- device (torch.device): If specified, the tensors will be moved to the specified device.
- dtype (torch.dtype): If specified, the tensors will have the specified dtype.
```
def forward(query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None, average_attn_weights=True, is_causal=False):
```
Forward pass of the multi-head attention module.
Parameters:
- query (Tensor): Query embeddings of shape (L, E_q) for unbatched input, (L, N, E_q) when batch_first=False, or (N, L, E_q) when batch_first=True.
- key (Tensor): Key embeddings of shape (S, E_k) for unbatched input, (S, N, E_k) when batch_first=False, or (N, S, E_k) when batch_first=True.
- value (Tensor): Value embeddings of shape (S, E_v) for unbatched input, (S, N, E_v) when batch_first=False, or (N, S, E_v) when batch_first=True.
- key_padding_mask (Optional[Tensor]): If specified, a mask indicating elements to be ignored in key for attention computation.
- need_weights (bool): If specified, returns attention weights in addition to attention outputs. Default: True.
- attn_mask (Optional[Tensor]): If specified, a mask preventing attention to certain positions.
- average_attn_weights (bool): If true, returns averaged attention weights per head. Otherwise, returns attention weights separately per head. Note that this flag only has an effect when need_weights=True. Default: True.
- is_causal (bool): If specified, applies a causal mask as the attention mask. Default: False.
Returns:
Tuple[Tensor, Optional[Tensor]]:
- attn_output (Tensor): Attention outputs of shape (L, E) for unbatched input, (L, N, E) when batch_first=False, or (N, L, E) when batch_first=True.
- attn_output_weights (Optional[Tensor]): Attention weights of shape (L, S) when unbatched or (N, L, S) when batched. Optional, only returned when need_weights=True.
```
# Implementation of the forward pass of the attention module goes here
return attn_output, attn_output_weights
```
# Usage example:
multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
attn_output, attn_output_weights = multihead_attn(query, key, value)
Note:
The above template includes the class or function definition, parameters, description, and usage example.
To replicate the documentation for any other module or framework, follow the same structure and provide the specific details for that module or framework.
############# DOCUMENT THE FOLLOWING CODE ########
{task}
"""
return documentation
def TEST_WRITER_SOP_PROMPT(
task: str, module: str, path: str, *args, **kwargs
):
TESTS_PROMPT = f"""
Create 5,000 lines of extensive and thorough tests for the code below using the guide, do not worry about your limits you do not have any
just write the best tests possible, the module is {module}, the file path is {path}
######### TESTING GUIDE #############
# **Guide to Creating Extensive, Thorough, and Production-Ready Tests using `pytest`**
1. **Preparation**:
- Install pytest: `pip install pytest`.
- Structure your project so that tests are in a separate `tests/` directory.
- Name your test files with the prefix `test_` for pytest to recognize them.
2. **Writing Basic Tests**:
- Use clear function names prefixed with `test_` (e.g., `test_check_value()`).
- Use assert statements to validate results.
3. **Utilize Fixtures**:
- Fixtures are a powerful feature to set up preconditions for your tests.
- Use `@pytest.fixture` decorator to define a fixture.
- Pass fixture name as an argument to your test to use it.
4. **Parameterized Testing**:
- Use `@pytest.mark.parametrize` to run a test multiple times with different inputs.
- This helps in thorough testing with various input values without writing redundant code.
5. **Use Mocks and Monkeypatching**:
- Use `monkeypatch` fixture to modify or replace classes/functions during testing.
- Use `unittest.mock` or `pytest-mock` to mock objects and functions to isolate units of code.
6. **Exception Testing**:
- Test for expected exceptions using `pytest.raises(ExceptionType)`.
7. **Test Coverage**:
- Install pytest-cov: `pip install pytest-cov`.
- Run tests with `pytest --cov=my_module` to get a coverage report.
8. **Environment Variables and Secret Handling**:
- Store secrets and configurations in environment variables.
- Use libraries like `python-decouple` or `python-dotenv` to load environment variables.
- For tests, mock or set environment variables temporarily within the test environment.
9. **Grouping and Marking Tests**:
- Use `@pytest.mark` decorator to mark tests (e.g., `@pytest.mark.slow`).
- This allows for selectively running certain groups of tests.
10. **Use Plugins**:
- Utilize the rich ecosystem of pytest plugins (e.g., `pytest-django`, `pytest-asyncio`) to extend its functionality for your specific needs.
11. **Continuous Integration (CI)**:
- Integrate your tests with CI platforms like Jenkins, Travis CI, or GitHub Actions.
- Ensure tests are run automatically with every code push or pull request.
12. **Logging and Reporting**:
- Use `pytest`'s inbuilt logging.
- Integrate with tools like `Allure` for more comprehensive reporting.
13. **Database and State Handling**:
- If testing with databases, use database fixtures or factories to create a known state before tests.
- Clean up and reset state post-tests to maintain consistency.
14. **Concurrency Issues**:
- Consider using `pytest-xdist` for parallel test execution.
- Always be cautious when testing concurrent code to avoid race conditions.
15. **Clean Code Practices**:
- Ensure tests are readable and maintainable.
- Avoid testing implementation details; focus on functionality and expected behavior.
16. **Regular Maintenance**:
- Periodically review and update tests.
- Ensure that tests stay relevant as your codebase grows and changes.
17. **Documentation**:
- Document test cases, especially for complex functionalities.
- Ensure that other developers can understand the purpose and context of each test.
18. **Feedback Loop**:
- Use test failures as feedback for development.
- Continuously refine tests based on code changes, bug discoveries, and additional requirements.
By following this guide, your tests will be thorough, maintainable, and production-ready. Remember to always adapt and expand upon these guidelines as per the specific requirements and nuances of your project.
######### CREATE TESTS FOR THIS CODE: #######
{task}
"""
return TESTS_PROMPT

@ -0,0 +1,31 @@
import os
def generate_file_list(directory, output_file):
"""
Generate a list of files in a directory in the specified format and write it to a file.
Args:
directory (str): The directory to list the files from.
output_file (str): The file to write the output to.
"""
with open(output_file, "w") as f:
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(".md"):
# Remove the directory from the file path and replace slashes with dots
file_path = (
os.path.join(root, file)
.replace(directory + "/", "")
.replace("/", ".")
)
# Remove the file extension
file_name, _ = os.path.splitext(file)
# Write the file name and path to the output file
f.write(
f'- {file_name}: "swarms/utils/{file_path}"\n'
)
# Use the function to generate the file list
generate_file_list("docs/swarms/structs", "file_list.txt")

@ -0,0 +1,64 @@
import yaml
def update_mkdocs(
class_names,
base_path="docs/zeta/nn/modules",
mkdocs_file="mkdocs.yml",
):
"""
Update the mkdocs.yml file with new documentation links.
Args:
- class_names: A list of class names for which documentation is generated.
- base_path: The base path where documentation Markdown files are stored.
- mkdocs_file: The path to the mkdocs.yml file.
"""
with open(mkdocs_file, "r") as file:
mkdocs_config = yaml.safe_load(file)
# Find or create the 'zeta.nn.modules' section in 'nav'
zeta_modules_section = None
for section in mkdocs_config.get("nav", []):
if "zeta.nn.modules" in section:
zeta_modules_section = section["zeta.nn.modules"]
break
if zeta_modules_section is None:
zeta_modules_section = {}
mkdocs_config["nav"].append(
{"zeta.nn.modules": zeta_modules_section}
)
# Add the documentation paths to the 'zeta.nn.modules' section
for class_name in class_names:
doc_path = f"{base_path}/{class_name.lower()}.md"
zeta_modules_section[class_name] = doc_path
# Write the updated content back to mkdocs.yml
with open(mkdocs_file, "w") as file:
yaml.safe_dump(mkdocs_config, file, sort_keys=False)
# Example usage
classes = [
"DenseBlock",
"HighwayLayer",
"MultiScaleBlock",
"FeedbackBlock",
"DualPathBlock",
"RecursiveBlock",
"PytorchGELUTanh",
"NewGELUActivation",
"GELUActivation",
"FastGELUActivation",
"QuickGELUActivation",
"ClippedGELUActivation",
"AccurateGELUActivation",
"MishActivation",
"LinearActivation",
"LaplaceActivation",
"ReLUSquaredActivation",
]
update_mkdocs(classes)

@ -1,19 +1,19 @@
#!/bin/bash
# Navigate to the directory containing the 'swarms' folder
# Navigate to the directory containing the 'tests' folder
# cd /path/to/your/code/directory
# Run autopep8 with max aggressiveness (-aaa) and in-place modification (-i)
# on all Python files (*.py) under the 'swarms' directory.
autopep8 --in-place --aggressive --aggressive --recursive --experimental --list-fixes swarms/
# on all Python files (*.py) under the 'tests' directory.
autopep8 --in-place --aggressive --aggressive --recursive --experimental --list-fixes zeta/
# Run black with default settings, since black does not have an aggressiveness level.
# Black will format all Python files it finds in the 'swarms' directory.
black --experimental-string-processing swarms/
# Black will format all Python files it finds in the 'tests' directory.
black --experimental-string-processing zeta/
# Run ruff on the 'swarms' directory.
# Run ruff on the 'tests' directory.
# Add any additional flags if needed according to your version of ruff.
ruff --unsafe_fix
ruff zeta/ --fix
# YAPF
yapf --recursive --in-place --verbose --style=google --parallel swarms
yapf --recursive --in-place --verbose --style=google --parallel tests

@ -0,0 +1,7 @@
#!/bin/bash
# Find and delete all __pycache__ directories
find . -type d -name "__pycache__" -exec rm -r {} +
# Find and delete all .pyc files
find . -type f -name "*.pyc" -delete

@ -4,5 +4,6 @@ do
dir=$(dirname "$file")
if [[ $filename != test_* ]]; then
mv "$file" "$dir/test_$filename"
printf "\e[1;34mRenamed: \e[0m$file \e[1;32mto\e[0m $dir/test_$filename\n"
fi
done

@ -0,0 +1,46 @@
import os
from dotenv import load_dotenv
# Import the OpenAIChat model and the Agent struct
from swarms import OpenAIChat, Agent, SwarmNetwork
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
openai_api_key=api_key,
)
## Initialize the workflow
agent = Agent(llm=llm, max_loops=1, agent_name="Social Media Manager")
agent2 = Agent(llm=llm, max_loops=1, agent_name=" Product Manager")
agent3 = Agent(llm=llm, max_loops=1, agent_name="SEO Manager")
# Load the swarmnet with the agents
swarmnet = SwarmNetwork(
agents=[agent, agent2, agent3],
)
# # List the agents in the swarm network
out = swarmnet.list_agents()
print(out)
# Run the workflow on a task
out = swarmnet.run_single_agent(
agent.id, "Generate a 10,000 word blog on health and wellness."
)
print(out)
# Run all the agents in the swarm network on a task
out = swarmnet.run_many_agents(
"Generate a 10,000 word blog on health and wellness."
)
print(out)

@ -1,13 +1,10 @@
from swarms.utils.disable_logging import disable_logging
from swarms.telemetry.bootup import bootup # noqa: E402, F403
disable_logging()
bootup()
from swarms.agents import * # noqa: E402, F403
from swarms.swarms import * # noqa: E402, F403
from swarms.structs import * # noqa: E402, F403
from swarms.models import * # noqa: E402, F403
from swarms.telemetry import * # noqa: E402, F403
from swarms.utils import * # noqa: E402, F403
from swarms.prompts import * # noqa: E402, F403
# from swarms.cli import * # noqa: E402, F403

@ -1,15 +1,11 @@
# from swarms.agents.omni_modal_agent import OmniModalAgent
from swarms.agents.message import Message
# from swarms.agents.stream_response import stream
from swarms.agents.base import AbstractAgent
# from swarms.agents.idea_to_image_agent import Idea2Image
"""Agent Infrastructure, models, memory, utils, tools"""
from swarms.agents.tool_agent import ToolAgent
from swarms.agents.simple_agent import SimpleAgent
__all__ = [
# "OmniModalAgent",
"Message",
"AbstractAgent",
"ToolAgent",
"SimpleAgent",
]

@ -0,0 +1,46 @@
from swarms.structs.conversation import Conversation
from swarms.models.base_llm import AbstractLLM
# Run the language model in a loop for n iterations
def SimpleAgent(
llm: AbstractLLM = None, iters: int = 10, *args, **kwargs
):
"""Simple agent conversation
Args:
llm (_type_): _description_
iters (int, optional): _description_. Defaults to 10.
Example:
>>> from swarms.models import GPT2LM
>>> from swarms.agents import SimpleAgent
>>> llm = GPT2LM()
>>> SimpleAgent(llm, iters=10)
"""
try:
conv = Conversation(*args, **kwargs)
for i in range(iters):
user_input = input("User: ")
conv.add("user", user_input)
if user_input.lower() == "quit":
break
task = (
conv.return_history_as_string()
) # Get the conversation history
out = llm(task)
conv.add("assistant", out)
print(
f"Assistant: {out}",
)
conv.display_conversation()
conv.export_conversation("conversation.txt")
except Exception as error:
print(f"[ERROR][SimpleAgentConversation] {error}")
raise error
except KeyboardInterrupt:
print("[INFO][SimpleAgentConversation] Keyboard interrupt")
conv.export_conversation("conversation.txt")
raise KeyboardInterrupt

@ -0,0 +1,122 @@
"""
Tool Agent
"""
from swarms.tools.format_tools import Jsonformer
from typing import Any
from swarms.models.base_llm import AbstractLLM
class ToolAgent(AbstractLLM):
"""
Represents a tool agent that performs a specific task using a model and tokenizer.
Args:
name (str): The name of the tool agent.
description (str): A description of the tool agent.
model (Any): The model used by the tool agent.
tokenizer (Any): The tokenizer used by the tool agent.
json_schema (Any): The JSON schema used by the tool agent.
*args: Variable length arguments.
**kwargs: Keyword arguments.
Attributes:
name (str): The name of the tool agent.
description (str): A description of the tool agent.
model (Any): The model used by the tool agent.
tokenizer (Any): The tokenizer used by the tool agent.
json_schema (Any): The JSON schema used by the tool agent.
Methods:
run: Runs the tool agent for a specific task.
Raises:
Exception: If an error occurs while running the tool agent.
Example:
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b")
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
json_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "number"},
"is_student": {"type": "boolean"},
"courses": {
"type": "array",
"items": {"type": "string"}
}
}
}
task = "Generate a person's information based on the following schema:"
agent = ToolAgent(model=model, tokenizer=tokenizer, json_schema=json_schema)
generated_data = agent.run(task)
print(generated_data)
"""
def __init__(
self,
name: str,
description: str,
model: Any,
tokenizer: Any,
json_schema: Any,
*args,
**kwargs,
):
super().__init__()
self.name = name
self.description = description
self.model = model
self.tokenizer = tokenizer
self.json_schema = json_schema
def run(self, task: str, *args, **kwargs):
"""
Run the tool agent for the specified task.
Args:
task (str): The task to be performed by the tool agent.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
The output of the tool agent.
Raises:
Exception: If an error occurs during the execution of the tool agent.
"""
try:
self.toolagent = Jsonformer(
self.model,
self.tokenizer,
self.json_schema,
task,
*args,
**kwargs,
)
out = self.toolagent()
return out
except Exception as error:
print(f"[Error] [ToolAgent] {error}")
raise error
def __call__(self, task: str, *args, **kwargs):
"""Call self as a function.
Args:
task (str): _description_
Returns:
_type_: _description_
"""
return self.run(task, *args, **kwargs)

@ -2,7 +2,7 @@ import argparse
import sys
def run_file():
def cli():
parser = argparse.ArgumentParser(description="Swarms CLI")
parser.add_argument(
"file_name", help="Python file containing Swarms code to run"

@ -2,7 +2,7 @@ import sys
import subprocess
def run_file():
def run_file(filename: str):
"""Run a given file.
Usage: swarms run file_name.py

@ -1,7 +1,11 @@
from swarms.memory.base_vectordb import VectorDatabase
from swarms.memory.short_term_memory import ShortTermMemory
from swarms.memory.sqlite import SQLiteDB
from swarms.memory.weaviate_db import WeaviateDB
__all__ = [
"VectorDatabase",
"ShortTermMemory"
"ShortTermMemory",
"SQLiteDB",
"WeaviateDB",
]

@ -0,0 +1,159 @@
from abc import ABC, abstractmethod
class AbstractDatabase(ABC):
"""
Abstract base class for a database.
This class defines the interface for interacting with a database.
Subclasses must implement the abstract methods to provide the
specific implementation details for connecting to a database,
executing queries, and performing CRUD operations.
"""
@abstractmethod
def connect(self):
"""
Connect to the database.
This method establishes a connection to the database.
"""
pass
@abstractmethod
def close(self):
"""
Close the database connection.
This method closes the connection to the database.
"""
pass
@abstractmethod
def execute_query(self, query):
"""
Execute a database query.
This method executes the given query on the database.
Parameters:
query (str): The query to be executed.
"""
pass
@abstractmethod
def fetch_all(self):
"""
Fetch all rows from the result set.
This method retrieves all rows from the result set of a query.
Returns:
list: A list of dictionaries representing the rows.
"""
pass
@abstractmethod
def fetch_one(self):
"""
Fetch one row from the result set.
This method retrieves one row from the result set of a query.
Returns:
dict: A dictionary representing the row.
"""
pass
@abstractmethod
def add(self, table, data):
"""
Add a new record to the database.
This method adds a new record to the specified table in the database.
Parameters:
table (str): The name of the table.
data (dict): A dictionary representing the data to be added.
"""
pass
@abstractmethod
def query(self, table, condition):
"""
Query the database.
This method queries the specified table in the database based on the given condition.
Parameters:
table (str): The name of the table.
condition (str): The condition to be applied in the query.
Returns:
list: A list of dictionaries representing the query results.
"""
pass
@abstractmethod
def get(self, table, id):
"""
Get a record from the database.
This method retrieves a record from the specified table in the database based on the given ID.
Parameters:
table (str): The name of the table.
id (int): The ID of the record to be retrieved.
Returns:
dict: A dictionary representing the retrieved record.
"""
pass
@abstractmethod
def update(self, table, id, data):
"""
Update a record in the database.
This method updates a record in the specified table in the database based on the given ID.
Parameters:
table (str): The name of the table.
id (int): The ID of the record to be updated.
data (dict): A dictionary representing the updated data.
"""
pass
@abstractmethod
def delete(self, table, id):
"""
Delete a record from the database.
This method deletes a record from the specified table in the database based on the given ID.
Parameters:
table (str): The name of the table.
id (int): The ID of the record to be deleted.
"""
pass

@ -1,302 +1,140 @@
import subprocess
import uuid
from typing import Optional
from attr import define, field, Factory
from dataclasses import dataclass
from swarms.memory.base import BaseVectorStore
from typing import Any, List, Optional
from sqlalchemy import JSON, Column, String, create_engine
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
try:
from sqlalchemy.engine import Engine
from sqlalchemy import create_engine, Column, String, JSON
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import Session
except ImportError:
print(
"The PgVectorVectorStore requires sqlalchemy to be installed"
)
print("pip install sqlalchemy")
subprocess.run(["pip", "install", "sqlalchemy"])
try:
from pgvector.sqlalchemy import Vector
except ImportError:
print("The PgVectorVectorStore requires pgvector to be installed")
print("pip install pgvector")
subprocess.run(["pip", "install", "pgvector"])
class PostgresDB:
"""
A class representing a Postgres database.
@define
class PgVectorVectorStore(BaseVectorStore):
"""A vector store driver to Postgres using the PGVector extension.
Args:
connection_string (str): The connection string for the Postgres database.
table_name (str): The name of the table in the database.
Attributes:
connection_string: An optional string describing the target Postgres database instance.
create_engine_params: Additional configuration params passed when creating the database connection.
engine: An optional sqlalchemy Postgres engine to use.
table_name: Optionally specify the name of the table to used to store vectors.
Methods:
upsert_vector(vector: list[float], vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, **kwargs) -> str:
Upserts a vector into the index.
load_entry(vector_id: str, namespace: Optional[str] = None) -> Optional[BaseVector.Entry]:
Loads a single vector from the index.
load_entries(namespace: Optional[str] = None) -> list[BaseVector.Entry]:
Loads all vectors from the index.
query(query: str, count: Optional[int] = None, namespace: Optional[str] = None, include_vectors: bool = False, include_metadata=True, **kwargs) -> list[BaseVector.QueryResult]:
Queries the index for vectors similar to the given query string.
setup(create_schema: bool = True, install_uuid_extension: bool = True, install_vector_extension: bool = True) -> None:
Provides a mechanism to initialize the database schema and extensions.
Usage:
>>> from swarms.memory.vector_stores.pgvector import PgVectorVectorStore
>>> from swarms.utils.embeddings import USEEmbedding
>>> from swarms.utils.hash import str_to_hash
>>> from swarms.utils.dataframe import dataframe_to_hash
>>> import pandas as pd
>>>
>>> # Create a new PgVectorVectorStore instance:
>>> pv = PgVectorVectorStore(
>>> connection_string="postgresql://postgres:password@localhost:5432/postgres",
>>> table_name="your-table-name"
>>> )
>>> # Create a new index:
>>> pv.setup()
>>> # Create a new USEEmbedding instance:
>>> use = USEEmbedding()
>>> # Create a new dataframe:
>>> df = pd.DataFrame({
>>> "text": [
>>> "This is a test",
>>> "This is another test",
>>> "This is a third test"
>>> ]
>>> })
>>> # Embed the dataframe:
>>> df["embedding"] = df["text"].apply(use.embed_string)
>>> # Upsert the dataframe into the index:
>>> pv.upsert_vector(
>>> vector=df["embedding"].tolist(),
>>> vector_id=dataframe_to_hash(df),
>>> namespace="your-namespace"
>>> )
>>> # Query the index:
>>> pv.query(
>>> query="This is a test",
>>> count=10,
>>> namespace="your-namespace"
>>> )
>>> # Load a single entry from the index:
>>> pv.load_entry(
>>> vector_id=dataframe_to_hash(df),
>>> namespace="your-namespace"
>>> )
>>> # Load all entries from the index:
>>> pv.load_entries(
>>> namespace="your-namespace"
>>> )
engine: The SQLAlchemy engine for connecting to the database.
table_name (str): The name of the table in the database.
VectorModel: The SQLAlchemy model representing the vector table.
"""
connection_string: Optional[str] = field(
default=None, kw_only=True
)
create_engine_params: dict = field(factory=dict, kw_only=True)
engine: Optional[Engine] = field(default=None, kw_only=True)
table_name: str = field(kw_only=True)
_model: any = field(
default=Factory(
lambda self: self.default_vector_model(), takes_self=True
)
)
def __init__(
self, connection_string: str, table_name: str, *args, **kwargs
):
"""
Initializes a new instance of the PostgresDB class.
@connection_string.validator
def validate_connection_string(
self, _, connection_string: Optional[str]
) -> None:
# If an engine is provided, the connection string is not used.
if self.engine is not None:
return
Args:
connection_string (str): The connection string for the Postgres database.
table_name (str): The name of the table in the database.
# If an engine is not provided, a connection string is required.
if connection_string is None:
raise ValueError(
"An engine or connection string is required"
)
if not connection_string.startswith("postgresql://"):
raise ValueError(
"The connection string must describe a Postgres"
" database connection"
)
"""
self.engine = create_engine(
connection_string, *args, **kwargs
)
self.table_name = table_name
self.VectorModel = self._create_vector_model()
@engine.validator
def validate_engine(self, _, engine: Optional[Engine]) -> None:
# If a connection string is provided, an engine does not need to be provided.
if self.connection_string is not None:
return
def _create_vector_model(self):
"""
Creates the SQLAlchemy model for the vector table.
# If a connection string is not provided, an engine is required.
if engine is None:
raise ValueError(
"An engine or connection string is required"
)
Returns:
The SQLAlchemy model representing the vector table.
def __attrs_post_init__(self) -> None:
"""If a an engine is provided, it will be used to connect to the database.
If not, a connection string is used to create a new database connection here.
"""
if self.engine is None:
self.engine = create_engine(
self.connection_string, **self.create_engine_params
)
Base = declarative_base()
def setup(
self,
create_schema: bool = True,
install_uuid_extension: bool = True,
install_vector_extension: bool = True,
) -> None:
"""Provides a mechanism to initialize the database schema and extensions."""
if install_uuid_extension:
self.engine.execute(
'CREATE EXTENSION IF NOT EXISTS "uuid-ossp";'
)
class VectorModel(Base):
__tablename__ = self.table_name
if install_vector_extension:
self.engine.execute(
'CREATE EXTENSION IF NOT EXISTS "vector";'
id = Column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
unique=True,
nullable=False,
)
vector = Column(
String
) # Assuming vector is stored as a string
namespace = Column(String)
meta = Column(JSON)
if create_schema:
self._model.metadata.create_all(self.engine)
return VectorModel
def upsert_vector(
def add_or_update_vector(
self,
vector: list[float],
vector: str,
vector_id: Optional[str] = None,
namespace: Optional[str] = None,
meta: Optional[dict] = None,
**kwargs,
) -> str:
"""Inserts or updates a vector in the collection."""
with Session(self.engine) as session:
obj = self._model(
id=vector_id,
vector=vector,
namespace=namespace,
meta=meta,
)
obj = session.merge(obj)
session.commit()
return str(obj.id)
def load_entry(
self, vector_id: str, namespace: Optional[str] = None
) -> BaseVectorStore.Entry:
"""Retrieves a specific vector entry from the collection based on its identifier and optional namespace."""
with Session(self.engine) as session:
result = session.get(self._model, vector_id)
return BaseVectorStore.Entry(
id=result.id,
vector=result.vector,
namespace=result.namespace,
meta=result.meta,
)
def load_entries(
self, namespace: Optional[str] = None
) -> list[BaseVectorStore.Entry]:
"""Retrieves all vector entries from the collection, optionally filtering to only
those that match the provided namespace.
) -> None:
"""
with Session(self.engine) as session:
query = session.query(self._model)
if namespace:
query = query.filter_by(namespace=namespace)
Adds or updates a vector in the database.
results = query.all()
Args:
vector (str): The vector to be added or updated.
vector_id (str, optional): The ID of the vector. If not provided, a new ID will be generated.
namespace (str, optional): The namespace of the vector.
meta (dict, optional): Additional metadata associated with the vector.
return [
BaseVectorStore.Entry(
id=str(result.id),
vector=result.vector,
namespace=result.namespace,
meta=result.meta,
"""
try:
with Session(self.engine) as session:
obj = self.VectorModel(
id=vector_id,
vector=vector,
namespace=namespace,
meta=meta,
)
for result in results
]
def query(
self,
query: str,
count: Optional[int] = BaseVectorStore.DEFAULT_QUERY_COUNT,
namespace: Optional[str] = None,
include_vectors: bool = False,
distance_metric: str = "cosine_distance",
**kwargs,
) -> list[BaseVectorStore.QueryResult]:
"""Performs a search on the collection to find vectors similar to the provided input vector,
optionally filtering to only those that match the provided namespace.
session.merge(obj)
session.commit()
except Exception as e:
print(f"Error adding or updating vector: {e}")
def query_vectors(
self, query: Any, namespace: Optional[str] = None
) -> List[Any]:
"""
distance_metrics = {
"cosine_distance": self._model.vector.cosine_distance,
"l2_distance": self._model.vector.l2_distance,
"inner_product": self._model.vector.max_inner_product,
}
if distance_metric not in distance_metrics:
raise ValueError("Invalid distance metric provided")
op = distance_metrics[distance_metric]
with Session(self.engine) as session:
vector = self.embedding_driver.embed_string(query)
Queries vectors from the database based on the given query and namespace.
# The query should return both the vector and the distance metric score.
query = session.query(
self._model,
op(vector).label("score"),
).order_by(op(vector))
Args:
query (Any): The query or condition to filter the vectors.
namespace (str, optional): The namespace of the vectors to be queried.
if namespace:
query = query.filter_by(namespace=namespace)
Returns:
List[Any]: A list of vectors that match the query and namespace.
results = query.limit(count).all()
return [
BaseVectorStore.QueryResult(
id=str(result[0].id),
vector=(
result[0].vector if include_vectors else None
),
score=result[1],
meta=result[0].meta,
namespace=result[0].namespace,
)
for result in results
]
def default_vector_model(self) -> any:
Base = declarative_base()
@dataclass
class VectorModel(Base):
__tablename__ = self.table_name
"""
try:
with Session(self.engine) as session:
q = session.query(self.VectorModel)
if namespace:
q = q.filter_by(namespace=namespace)
# Assuming 'query' is a condition or filter
q = q.filter(query)
return q.all()
except Exception as e:
print(f"Error querying vectors: {e}")
return []
def delete_vector(self, vector_id):
"""
Deletes a vector from the database based on the given vector ID.
id = Column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
unique=True,
nullable=False,
)
vector = Column(Vector())
namespace = Column(String)
meta = Column(JSON)
Args:
vector_id: The ID of the vector to be deleted.
return VectorModel
"""
try:
with Session(self.engine) as session:
obj = session.get(self.VectorModel, vector_id)
if obj:
session.delete(obj)
session.commit()
except Exception as e:
print(f"Error deleting vector: {e}")

@ -6,9 +6,9 @@ from swarms.utils.hash import str_to_hash
@define
class PineconDB(VectorDatabase):
class PineconeDB(VectorDatabase):
"""
PineconDB is a vector storage driver that uses Pinecone as the underlying storage engine.
PineconeDB is a vector storage driver that uses Pinecone as the underlying storage engine.
Pinecone is a vector database that allows you to store, search, and retrieve high-dimensional vectors with
blazing speed and low latency. It is a managed service that is easy to use and scales effortlessly, so you can
@ -34,14 +34,14 @@ class PineconDB(VectorDatabase):
Creates a new index.
Usage:
>>> from swarms.memory.vector_stores.pinecone import PineconDB
>>> from swarms.memory.vector_stores.pinecone import PineconeDB
>>> from swarms.utils.embeddings import USEEmbedding
>>> from swarms.utils.hash import str_to_hash
>>> from swarms.utils.dataframe import dataframe_to_hash
>>> import pandas as pd
>>>
>>> # Create a new PineconDB instance:
>>> pv = PineconDB(
>>> # Create a new PineconeDB instance:
>>> pv = PineconeDB(
>>> api_key="your-api-key",
>>> index_name="your-index-name",
>>> environment="us-west1-gcp",
@ -166,7 +166,7 @@ class PineconDB(VectorDatabase):
count: Optional[int] = None,
namespace: Optional[str] = None,
include_vectors: bool = False,
# PineconDBStorageDriver-specific params:
# PineconeDBStorageDriver-specific params:
include_metadata=True,
**kwargs,
):

@ -1,4 +1,3 @@
import subprocess
from typing import List
from httpx import RequestError
@ -8,9 +7,6 @@ try:
except ImportError:
print("Please install the sentence-transformers package")
print("pip install sentence-transformers")
print("pip install qdrant-client")
subprocess.run(["pip", "install", "sentence-transformers"])
try:
from qdrant_client import QdrantClient
@ -22,7 +18,6 @@ try:
except ImportError:
print("Please install the qdrant-client package")
print("pip install qdrant-client")
subprocess.run(["pip", "install", "qdrant-client"])
class Qdrant:
@ -82,7 +77,7 @@ class Qdrant:
f"Collection '{self.collection_name}' already"
" exists."
)
except Exception as e:
except Exception:
self.client.create_collection(
collection_name=self.collection_name,
vectors_config=VectorParams(

@ -2,10 +2,30 @@ import logging
from swarms.structs.base import BaseStructure
import threading
import json
import os
class ShortTermMemory(BaseStructure):
"""Short term memory.
Args:
return_str (bool, optional): _description_. Defaults to True.
autosave (bool, optional): _description_. Defaults to True.
*args: _description_
**kwargs: _description_
Example:
>>> from swarms.memory.short_term_memory import ShortTermMemory
>>> stm = ShortTermMemory()
>>> stm.add(role="agent", message="Hello world!")
>>> stm.add(role="agent", message="How are you?")
>>> stm.add(role="agent", message="I am fine.")
>>> stm.add(role="agent", message="How are you?")
>>> stm.add(role="agent", message="I am fine.")
"""
def __init__(
self,
return_str: bool = True,
@ -68,6 +88,14 @@ class ShortTermMemory(BaseStructure):
def update_short_term(
self, index, role: str, message: str, *args, **kwargs
):
"""Update the short term memory.
Args:
index (_type_): _description_
role (str): _description_
message (str): _description_
"""
self.short_term_memory[index] = {
"role": role,
"message": message,

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save