[CLEANUP OPERATION]

pull/389/head
Kye 11 months ago
parent c17b1cf54b
commit 3dc8a4f444

@ -44,7 +44,7 @@ import os
from dotenv import load_dotenv from dotenv import load_dotenv
# Import the OpenAIChat model and the Agent struct # Import the OpenAIChat model and the Agent struct
from swarms import OpenAIChat, Agent from swarms import Agent, OpenAIChat
# Load the environment variables # Load the environment variables
load_dotenv() load_dotenv()
@ -54,10 +54,7 @@ api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model # Initialize the language model
llm = OpenAIChat( llm = OpenAIChat(
temperature=0.5, temperature=0.5, model_name="gpt-4", openai_api_key=api_key, max_tokens=4000
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000
) )
@ -66,9 +63,6 @@ agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
# Run the workflow on a task # Run the workflow on a task
agent.run("Generate a 10,000 word blog on health and wellness.") agent.run("Generate a 10,000 word blog on health and wellness.")
``` ```
@ -79,6 +73,7 @@ ToolAgent is an agent that outputs JSON using any model from huggingface. It tak
```python ```python
# Import necessary libraries # Import necessary libraries
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent from swarms import ToolAgent
# Load the pre-trained model and tokenizer # Load the pre-trained model and tokenizer
@ -107,8 +102,6 @@ generated_data = agent.run(task)
# Print the generated data # Print the generated data
print(generated_data) print(generated_data)
``` ```
@ -124,8 +117,10 @@ The `Worker` is a simple all-in-one agent equipped with an LLM, tools, and RAG f
```python ```python
# Importing necessary modules # Importing necessary modules
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms import Worker, OpenAIChat, tool
from swarms import OpenAIChat, Worker, tool
# Loading environment variables from .env file # Loading environment variables from .env file
load_dotenv() load_dotenv()
@ -151,14 +146,10 @@ worker = Worker(
) )
# Running the worker with a prompt # Running the worker with a prompt
out = worker.run( out = worker.run("Hello, how are you? Create an image of how your are doing!")
"Hello, how are you? Create an image of how your are doing!"
)
# Printing the output # Printing the output
print(out) print(out)
``` ```
------ ------
@ -174,10 +165,12 @@ Sequential Workflow enables you to sequentially execute tasks with `Agent` and t
✅ Utilizes Agent class ✅ Utilizes Agent class
```python ```python
import os import os
from swarms import OpenAIChat, Agent, SequentialWorkflow
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms import Agent, OpenAIChat, SequentialWorkflow
load_dotenv() load_dotenv()
# Load the environment variables # Load the environment variables
@ -186,10 +179,7 @@ api_key = os.getenv("OPENAI_API_KEY")
# Initialize the language agent # Initialize the language agent
llm = OpenAIChat( llm = OpenAIChat(
temperature=0.5, temperature=0.5, model_name="gpt-4", openai_api_key=api_key, max_tokens=4000
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000
) )
@ -207,12 +197,14 @@ workflow = SequentialWorkflow(max_loops=1)
# Add tasks to the workflow # Add tasks to the workflow
workflow.add( workflow.add(
agent1, "Generate a 10,000 word blog on health and wellness.", agent1,
"Generate a 10,000 word blog on health and wellness.",
) )
# Suppose the next task takes the output of the first task as input # Suppose the next task takes the output of the first task as input
workflow.add( workflow.add(
agent2, "Summarize the generated blog", agent2,
"Summarize the generated blog",
) )
# Run the workflow # Run the workflow
@ -231,8 +223,10 @@ for task in workflow.tasks:
```python ```python
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms import OpenAIChat, Task, ConcurrentWorkflow, Agent
from swarms import Agent, ConcurrentWorkflow, OpenAIChat, Task
# Load environment variables from .env file # Load environment variables from .env file
load_dotenv() load_dotenv()
@ -254,16 +248,17 @@ workflow.add(tasks=[task1, task2, task3])
# Run the workflow # Run the workflow
workflow.run() workflow.run()
``` ```
### `RecursiveWorkflow` ### `RecursiveWorkflow`
`RecursiveWorkflow` will keep executing the tasks until a specific token like <DONE> is located inside the text! `RecursiveWorkflow` will keep executing the tasks until a specific token like <DONE> is located inside the text!
```python ```python
import os import os
from dotenv import load_dotenv
from swarms import OpenAIChat, Task, RecursiveWorkflow, Agent from dotenv import load_dotenv
from swarms import Agent, OpenAIChat, RecursiveWorkflow, Task
# Load environment variables from .env file # Load environment variables from .env file
load_dotenv() load_dotenv()
@ -287,8 +282,6 @@ workflow.add(task3)
# Run the workflow # Run the workflow
workflow.run() workflow.run()
``` ```
@ -304,7 +297,7 @@ import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms import Anthropic, Gemini, Mixtral, OpenAIChat, ModelParallelizer from swarms import Anthropic, Gemini, Mixtral, ModelParallelizer, OpenAIChat
load_dotenv() load_dotenv()
@ -346,10 +339,7 @@ import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms import ( from swarms import Conversation, OpenAIChat
OpenAIChat,
Conversation,
)
conv = Conversation( conv = Conversation(
time_enabled=True, time_enabled=True,
@ -364,6 +354,7 @@ api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model # Initialize the language model
llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4") llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4")
# Run the language model in a loop # Run the language model in a loop
def interactive_conversation(llm): def interactive_conversation(llm):
conv = Conversation() conv = Conversation()
@ -372,9 +363,7 @@ def interactive_conversation(llm):
conv.add("user", user_input) conv.add("user", user_input)
if user_input.lower() == "quit": if user_input.lower() == "quit":
break break
task = ( task = conv.return_history_as_string() # Get the conversation history
conv.return_history_as_string()
) # Get the conversation history
out = llm(task) out = llm(task)
conv.add("assistant", out) conv.add("assistant", out)
print( print(
@ -386,7 +375,6 @@ def interactive_conversation(llm):
# Replace with your LLM instance # Replace with your LLM instance
interactive_conversation(llm) interactive_conversation(llm)
``` ```
@ -405,7 +393,7 @@ import os
from dotenv import load_dotenv from dotenv import load_dotenv
# Import the OpenAIChat model and the Agent struct # Import the OpenAIChat model and the Agent struct
from swarms import OpenAIChat, Agent, SwarmNetwork from swarms import Agent, OpenAIChat, SwarmNetwork
# Load the environment variables # Load the environment variables
load_dotenv() load_dotenv()
@ -442,11 +430,8 @@ print(out)
# Run all the agents in the swarm network on a task # Run all the agents in the swarm network on a task
out = swarmnet.run_many_agents( out = swarmnet.run_many_agents("Generate a 10,000 word blog on health and wellness.")
"Generate a 10,000 word blog on health and wellness."
)
print(out) print(out)
``` ```
@ -513,8 +498,6 @@ else:
# Output the result of the task # Output the result of the task
print(f"Task result: {task.result}") print(f"Task result: {task.result}")
``` ```
--- ---
@ -535,14 +518,7 @@ from dotenv import load_dotenv
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
# Import the models, structs, and telemetry modules # Import the models, structs, and telemetry modules
from swarms import ( from swarms import BlocksList, Gemini, GPT4VisionAPI, Mixtral, OpenAI, ToolAgent
Gemini,
GPT4VisionAPI,
Mixtral,
OpenAI,
ToolAgent,
BlocksList,
)
# Load the environment variables # Load the environment variables
load_dotenv() load_dotenv()
@ -552,9 +528,7 @@ openai_api_key = os.getenv("OPENAI_API_KEY")
gemini_api_key = os.getenv("GEMINI_API_KEY") gemini_api_key = os.getenv("GEMINI_API_KEY")
# Tool Agent # Tool Agent
model = AutoModelForCausalLM.from_pretrained( model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b")
"databricks/dolly-v2-12b"
)
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b") tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
json_schema = { json_schema = {
"type": "object", "type": "object",
@ -565,9 +539,7 @@ json_schema = {
"courses": {"type": "array", "items": {"type": "string"}}, "courses": {"type": "array", "items": {"type": "string"}},
}, },
} }
toolagent = ToolAgent( toolagent = ToolAgent(model=model, tokenizer=tokenizer, json_schema=json_schema)
model=model, tokenizer=tokenizer, json_schema=json_schema
)
# Blocks List which enables you to build custom swarms by adding classes or functions # Blocks List which enables you to build custom swarms by adding classes or functions
swarm = BlocksList( swarm = BlocksList(
@ -619,9 +591,7 @@ blocks_by_parent_name = swarm.get_by_parent_name(swarm.name)
blocks_by_parent_type = swarm.get_by_parent_type(type(swarm).__name__) blocks_by_parent_type = swarm.get_by_parent_type(type(swarm).__name__)
# Get blocks by parent description # Get blocks by parent description
blocks_by_parent_description = swarm.get_by_parent_description( blocks_by_parent_description = swarm.get_by_parent_description(swarm.description)
swarm.description
)
# Run the block in the swarm # Run the block in the swarm
inference = swarm.run_block(toolagent, "Hello World") inference = swarm.run_block(toolagent, "Hello World")
@ -636,25 +606,27 @@ Here's a production grade swarm ready for real-world deployment in a factory and
```python ```python
from swarms.structs import Agent
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models import GPT4VisionAPI from swarms.models import GPT4VisionAPI
from swarms.prompts.logistics import ( from swarms.prompts.logistics import (
Efficiency_Agent_Prompt,
Health_Security_Agent_Prompt, Health_Security_Agent_Prompt,
Quality_Control_Agent_Prompt,
Productivity_Agent_Prompt, Productivity_Agent_Prompt,
Quality_Control_Agent_Prompt,
Safety_Agent_Prompt, Safety_Agent_Prompt,
Security_Agent_Prompt, Security_Agent_Prompt,
Sustainability_Agent_Prompt, Sustainability_Agent_Prompt,
Efficiency_Agent_Prompt,
) )
from swarms.structs import Agent
# Load ENV # Load ENV
load_dotenv() load_dotenv()
api_key = os.getenv("OPENAI_API_KEY") api_key = os.getenv("OPENAI_API_KEY")
# GPT4VisionAPI # GPT4VisionAPI
llm = GPT4VisionAPI(openai_api_key=api_key) llm = GPT4VisionAPI(openai_api_key=api_key)
# Image for analysis # Image for analysis
@ -686,9 +658,7 @@ productivity_agent = Agent(
) )
# Initiailize safety agent # Initiailize safety agent
safety_agent = Agent( safety_agent = Agent(llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True)
llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True
)
# Init the security agent # Init the security agent
security_agent = Agent( security_agent = Agent(
@ -748,7 +718,9 @@ Run the agent with multiple modalities useful for various real-world tasks in ma
```python ```python
# Description: This is an example of how to use the Agent class to run a multi-modal workflow # Description: This is an example of how to use the Agent class to run a multi-modal workflow
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.models.gpt4_vision_api import GPT4VisionAPI
from swarms.structs import Agent from swarms.structs import Agent
@ -775,17 +747,11 @@ img = "assembly_line.jpg"
## Initialize the workflow ## Initialize the workflow
agent = Agent( agent = Agent(
llm=llm, llm=llm, max_loops="auto", autosave=True, dashboard=True, multi_modal=True
max_loops="auto",
autosave=True,
dashboard=True,
multi_modal=True
) )
# Run the workflow on a task # Run the workflow on a task
agent.run(task=task, img=img) agent.run(task=task, img=img)
``` ```
--- ---
@ -857,14 +823,10 @@ model = QwenVLMultiModal(
) )
# Run the model # Run the model
response = model( response = model("Hello, how are you?", "https://example.com/image.jpg")
"Hello, how are you?", "https://example.com/image.jpg"
)
# Print the response # Print the response
print(response) print(response)
``` ```
@ -882,7 +844,6 @@ out = model.run("Analyze the reciepts in this image", "docs.jpg")
# Print the output # Print the output
print(out) print(out)
``` ```
@ -923,8 +884,6 @@ model.set_max_length(200)
# Clear the chat history of the model # Clear the chat history of the model
model.clear_chat_history() model.clear_chat_history()
``` ```
## Radically Simple AI Model APIs ## Radically Simple AI Model APIs
@ -941,9 +900,7 @@ We provide a vast array of language and multi-modal model APIs for you to genera
from swarms.models import Anthropic from swarms.models import Anthropic
# Initialize an instance of the Anthropic class # Initialize an instance of the Anthropic class
model = Anthropic( model = Anthropic(anthropic_api_key="")
anthropic_api_key=""
)
# Using the run method # Using the run method
completion_1 = model.run("What is the capital of France?") completion_1 = model.run("What is the capital of France?")
@ -952,7 +909,6 @@ print(completion_1)
# Using the __call__ method # Using the __call__ method
completion_2 = model("How far is the moon from the earth?", stop=["miles", "km"]) completion_2 = model("How far is the moon from the earth?", stop=["miles", "km"])
print(completion_2) print(completion_2)
``` ```
@ -964,12 +920,16 @@ from swarms.models import HuggingfaceLLM
custom_config = { custom_config = {
"quantize": True, "quantize": True,
"quantization_config": {"load_in_4bit": True}, "quantization_config": {"load_in_4bit": True},
"verbose": True "verbose": True,
} }
inference = HuggingfaceLLM(model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config) inference = HuggingfaceLLM(
model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config
)
# Generate text based on a prompt # Generate text based on a prompt
prompt_text = "Create a list of known biggest risks of structural collapse with references" prompt_text = (
"Create a list of known biggest risks of structural collapse with references"
)
generated_text = inference(prompt_text) generated_text = inference(prompt_text)
print(generated_text) print(generated_text)
``` ```
@ -1027,7 +987,6 @@ task = "A person is walking on the street."
# Generate the video! # Generate the video!
video_path = zeroscope(task) video_path = zeroscope(task)
print(video_path) print(video_path)
``` ```

@ -64,6 +64,7 @@ Initialize the `llm` (Language Learning Model) with your OpenAI API key:
```python ```python
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
llm = OpenAIChat( llm = OpenAIChat(
openai_api_key="Your_OpenAI_API_Key", openai_api_key="Your_OpenAI_API_Key",
temperature=0.5, temperature=0.5,
@ -74,6 +75,7 @@ Initialize the bot with the `llm`:
```python ```python
from apps.discord import Bot from apps.discord import Bot
bot = Bot(llm=llm) bot = Bot(llm=llm)
``` ```

@ -46,6 +46,7 @@ You can also specify the conversation style:
```python ```python
from bing_chat import ConversationStyle from bing_chat import ConversationStyle
response = chat("Tell me a joke", style=ConversationStyle.creative) response = chat("Tell me a joke", style=ConversationStyle.creative)
print(response) print(response)
``` ```

@ -107,16 +107,17 @@ Now, let's create your first Agent. A Agent represents a chain-like structure th
# Import necessary modules # Import necessary modules
```python ```python
from swarms.models import OpenAIChat # Zephr, Mistral from swarms.models import OpenAIChat # Zephr, Mistral
from swarms.structs import Agent from swarms.structs import Agent
api_key = ""# Initialize the language model (LLM) api_key = "" # Initialize the language model (LLM)
llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000)# Initialize the Agent object llm = OpenAIChat(
openai_api_key=api_key, temperature=0.5, max_tokens=3000
) # Initialize the Agent object
agent = Agent(llm=llm, max_loops=5)# Run the agent agent = Agent(llm=llm, max_loops=5) # Run the agent
out = agent.run("Create an financial analysis on the following metrics") out = agent.run("Create an financial analysis on the following metrics")
print(out) print(out)
``` ```
### [3. Initializing the Agent Object](https://github.com/kyegomez/swarms) ### [3. Initializing the Agent Object](https://github.com/kyegomez/swarms)

@ -55,7 +55,7 @@ from swarms.models import OpenAIChat
llm = OpenAIChat(openai_api_key="sk-") llm = OpenAIChat(openai_api_key="sk-")
agent = OmniModalAgent(llm) agent = OmniModalAgent(llm)
response = agent.run("Create an video of a swarm of fish concept art, game art") response = agent.run("Create an video of a swarm of fish concept art, game art")
print(response) print(response)
``` ```

@ -35,7 +35,6 @@ The abstraction provided in `revgpt.py` is designed to simplify your interaction
1. **Import the Necessary Modules:** 1. **Import the Necessary Modules:**
```python ```python
import os
from dotenv import load_dotenv from dotenv import load_dotenv
from revgpt import AbstractChatGPT from revgpt import AbstractChatGPT
``` ```

@ -28,8 +28,8 @@ The provided code showcases a system built around a worker node that utilizes va
The code begins with import statements, bringing in necessary modules and classes. Key imports include the `OpenAIChat` class, which represents a language model, and several custom agents and tools from the `swarms` package. The code begins with import statements, bringing in necessary modules and classes. Key imports include the `OpenAIChat` class, which represents a language model, and several custom agents and tools from the `swarms` package.
```python ```python
import os
import interpreter # Assuming this is a custom module import interpreter # Assuming this is a custom module
from swarms.agents.hf_agents import HFAgent from swarms.agents.hf_agents import HFAgent
from swarms.agents.omni_modal_agent import OmniModalAgent from swarms.agents.omni_modal_agent import OmniModalAgent
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
@ -59,11 +59,7 @@ All defined tools are appended to a list called `tools`. This list is later used
```python ```python
# Append tools to a list # Append tools to a list
tools = [ tools = [hf_agent, omni_agent, compile]
hf_agent,
omni_agent,
compile
]
``` ```
### Initializing a Worker Node ### Initializing a Worker Node
@ -263,8 +259,6 @@ response = node.run(task)
# Print the response # Print the response
print(response) print(response)
``` ```

@ -53,11 +53,11 @@ Voila! Youre now ready to summon your Worker.
Heres a simple way to invoke the Worker and give it a task: Heres a simple way to invoke the Worker and give it a task:
```python ```python
from swarms.models import OpenAIChat
from swarms import Worker from swarms import Worker
from swarms.models import OpenAIChat
llm = OpenAIChat( llm = OpenAIChat(
#enter your api key # enter your api key
openai_api_key="", openai_api_key="",
temperature=0.5, temperature=0.5,
) )
@ -75,8 +75,6 @@ node = Worker(
task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."
response = node.run(task) response = node.run(task)
print(response) print(response)
``` ```

@ -37,7 +37,6 @@ class AbstractAgent:
def memory(self, memory_store): def memory(self, memory_store):
"""init memory""" """init memory"""
pass
def reset(self): def reset(self):
"""(Abstract method) Reset the agent.""" """(Abstract method) Reset the agent."""
@ -82,7 +81,7 @@ agent.reset()
The `run` method allows the agent to perform a specific task. The `run` method allows the agent to perform a specific task.
```python ```python
agent.run('some_task') agent.run("some_task")
``` ```
#### 3. `chat` #### 3. `chat`
@ -90,7 +89,7 @@ agent.run('some_task')
The `chat` method enables communication with the agent through a series of messages. The `chat` method enables communication with the agent through a series of messages.
```python ```python
messages = [{'id': 1, 'text': 'Hello, agent!'}, {'id': 2, 'text': 'How are you?'}] messages = [{"id": 1, "text": "Hello, agent!"}, {"id": 2, "text": "How are you?"}]
agent.chat(messages) agent.chat(messages)
``` ```
@ -99,7 +98,7 @@ agent.chat(messages)
The `step` method allows the agent to process a single message. The `step` method allows the agent to process a single message.
```python ```python
agent.step('Hello, agent!') agent.step("Hello, agent!")
``` ```
### Asynchronous Methods ### Asynchronous Methods

@ -44,7 +44,7 @@ class Message:
def __repr__(self): def __repr__(self):
""" """
__repr__ represents the string representation of the Message object. __repr__ represents the string representation of the Message object.
Returns: Returns:
(str) A string containing the timestamp, sender, and content of the message. (str) A string containing the timestamp, sender, and content of the message.
""" """
@ -60,10 +60,7 @@ The `Message` class represents a message in the agent system. Upon initializatio
Creating a `Message` object and displaying its string representation. Creating a `Message` object and displaying its string representation.
```python ```python
mes = Message( mes = Message(sender="Kye", content="Hello! How are you?")
sender = "Kye",
content = "Hello! How are you?"
)
print(mes) print(mes)
``` ```
@ -80,9 +77,7 @@ Creating a `Message` object with metadata.
```python ```python
metadata = {"priority": "high", "category": "urgent"} metadata = {"priority": "high", "category": "urgent"}
mes_with_metadata = Message( mes_with_metadata = Message(
sender = "Alice", sender="Alice", content="Important update", metadata=metadata
content = "Important update",
metadata = metadata
) )
print(mes_with_metadata) print(mes_with_metadata)
@ -98,10 +93,7 @@ Output:
Creating a `Message` object without providing metadata. Creating a `Message` object without providing metadata.
```python ```python
mes_no_metadata = Message( mes_no_metadata = Message(sender="Bob", content="Reminder: Meeting at 2PM")
sender = "Bob",
content = "Reminder: Meeting at 2PM"
)
print(mes_no_metadata) print(mes_no_metadata)
``` ```

@ -39,10 +39,12 @@ For streaming mode, this function yields the response token by token, ensuring a
## Examples & Use Cases ## Examples & Use Cases
Initialize the `OmniModalAgent` and communicate with it: Initialize the `OmniModalAgent` and communicate with it:
```python ```python
import os
from dotenv import load_dotenv
from swarms.agents.omni_modal_agent import OmniModalAgent, OpenAIChat from swarms.agents.omni_modal_agent import OmniModalAgent, OpenAIChat
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from dotenv import load_dotenv
import os
# Load the environment variables # Load the environment variables
load_dotenv() load_dotenv()

@ -69,6 +69,7 @@ The `ToolAgent` class takes the following arguments:
```python ```python
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent from swarms import ToolAgent
# Creating a model and tokenizer # Creating a model and tokenizer
@ -82,11 +83,8 @@ json_schema = {
"name": {"type": "string"}, "name": {"type": "string"},
"age": {"type": "number"}, "age": {"type": "number"},
"is_student": {"type": "boolean"}, "is_student": {"type": "boolean"},
"courses": { "courses": {"type": "array", "items": {"type": "string"}},
"type": "array", },
"items": {"type": "string"}
}
}
} }
# Defining a task # Defining a task

@ -38,7 +38,7 @@ worker = Worker(
human_in_the_loop=False, human_in_the_loop=False,
temperature=0.5, temperature=0.5,
llm=some_language_model, llm=some_language_model,
openai_api_key="my_key" openai_api_key="my_key",
) )
worker.run("What's the weather in Miami?") worker.run("What's the weather in Miami?")
``` ```
@ -56,11 +56,11 @@ worker.send()
```python ```python
external_tools = [MyTool1(), MyTool2()] external_tools = [MyTool1(), MyTool2()]
worker = Worker( worker = Worker(
name="My Worker", name="My Worker",
role="Worker", role="Worker",
external_tools=external_tools, external_tools=external_tools,
human_in_the_loop=False, human_in_the_loop=False,
temperature=0.5, temperature=0.5,
) )
``` ```

@ -69,7 +69,9 @@ from basechunker import BaseChunker, ChunkSeparator
chunker = BaseChunker() chunker = BaseChunker()
# Text to be chunked # Text to be chunked
input_text = "This is a long text that needs to be split into smaller chunks for processing." input_text = (
"This is a long text that needs to be split into smaller chunks for processing."
)
# Chunk the text # Chunk the text
chunks = chunker.chunk(input_text) chunks = chunker.chunk(input_text)

@ -62,8 +62,8 @@ Let's explore how to use the `PdfChunker` class with different scenarios and app
#### Example 1: Basic Chunking #### Example 1: Basic Chunking
```python ```python
from swarms.chunkers.pdf_chunker import PdfChunker
from swarms.chunkers.chunk_seperator import ChunkSeparator from swarms.chunkers.chunk_seperator import ChunkSeparator
from swarms.chunkers.pdf_chunker import PdfChunker
# Initialize the PdfChunker # Initialize the PdfChunker
pdf_chunker = PdfChunker() pdf_chunker = PdfChunker()
@ -82,8 +82,8 @@ for idx, chunk in enumerate(chunks, start=1):
#### Example 2: Custom Separators #### Example 2: Custom Separators
```python ```python
from swarms.chunkers.pdf_chunker import PdfChunker
from swarms.chunkers.chunk_seperator import ChunkSeparator from swarms.chunkers.chunk_seperator import ChunkSeparator
from swarms.chunkers.pdf_chunker import PdfChunker
# Define custom separators for PDF chunking # Define custom separators for PDF chunking
custom_separators = [ChunkSeparator("\n\n"), ChunkSeparator(". ")] custom_separators = [ChunkSeparator("\n\n"), ChunkSeparator(". ")]

@ -28,7 +28,6 @@ We have a small gallery of examples to run here, [for more check out the docs to
- Enterprise Grade + Production Grade: `Agent` is designed and optimized for automating real-world tasks at scale! - Enterprise Grade + Production Grade: `Agent` is designed and optimized for automating real-world tasks at scale!
```python ```python
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.structs import Agent from swarms.structs import Agent
@ -64,9 +63,6 @@ out = agent.run("Generate a 10,000 word blog on health and wellness.")
# out = agent.print_history_and_memory() # out = agent.print_history_and_memory()
# # out = agent.save_state("flow_state.json") # # out = agent.save_state("flow_state.json")
# print(out) # print(out)
``` ```
------ ------
@ -82,9 +78,7 @@ from swarms.structs import Agent
from swarms.structs.sequential_workflow import SequentialWorkflow from swarms.structs.sequential_workflow import SequentialWorkflow
# Example usage # Example usage
api_key = ( api_key = "" # Your actual API key here
"" # Your actual API key here
)
# Initialize the language agent # Initialize the language agent
llm = OpenAIChat( llm = OpenAIChat(
@ -118,7 +112,6 @@ workflow.run()
# Output the results # Output the results
for task in workflow.tasks: for task in workflow.tasks:
print(f"Task: {task.description}, Result: {task.result}") print(f"Task: {task.description}, Result: {task.result}")
``` ```
--- ---

@ -110,7 +110,9 @@ def setup(
```python ```python
# Initialize the PgVectorVectorStore instance # Initialize the PgVectorVectorStore instance
vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name") vector_store = PgVectorVectorStore(
connection_string="your-db-connection-string", table_name="your-table-name"
)
# Set up the database with default settings # Set up the database with default settings
vector_store.setup() vector_store.setup()
@ -120,10 +122,14 @@ vector_store.setup()
```python ```python
# Initialize the PgVectorVectorStore instance # Initialize the PgVectorVectorStore instance
vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name") vector_store = PgVectorVectorStore(
connection_string="your-db-connection-string", table_name="your-table-name"
)
# Set up the database with customized settings # Set up the database with customized settings
vector_store.setup(create_schema=False, install_uuid_extension=True, install_vector_extension=True) vector_store.setup(
create_schema=False, install_uuid_extension=True, install_vector_extension=True
)
``` ```
### 4.2 Upserting Vectors <a name="upserting-vectors"></a> ### 4.2 Upserting Vectors <a name="upserting-vectors"></a>
@ -137,7 +143,7 @@ def upsert_vector(
vector_id: Optional[str] = None, vector_id: Optional[str] = None,
namespace: Optional[str] = None, namespace: Optional[str] = None,
meta: Optional[dict] = None, meta: Optional[dict] = None,
**kwargs **kwargs,
) -> str: ) -> str:
""" """
Inserts or updates a vector in the collection. Inserts or updates a vector in the collection.
@ -158,7 +164,9 @@ def upsert_vector(
```python ```python
# Initialize the PgVectorVectorStore instance # Initialize the PgVectorVectorStore instance
vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name") vector_store = PgVectorVectorStore(
connection_string="your-db-connection-string", table_name="your-table-name"
)
# Define a vector and upsert it # Define a vector and upsert it
vector = [0.1, 0.2, 0.3, 0.4] vector = [0.1, 0.2, 0.3, 0.4]
@ -167,10 +175,7 @@ namespace = "your-namespace"
meta = {"key1": "value1", "key2": "value2"} meta = {"key1": "value1", "key2": "value2"}
vector_store.upsert_vector( vector_store.upsert_vector(
vector=vector, vector=vector, vector_id=vector_id, namespace=namespace, meta=meta
vector_id=vector_id,
namespace=namespace,
meta=meta
) )
``` ```
@ -222,9 +227,7 @@ else:
The `load_entries` method allows you to load all vector entries from the collection, optionally filtering by namespace. The `load_entries` method allows you to load all vector entries from the collection, optionally filtering by namespace.
```python ```python
def load_entries( def load_entries(self, namespace: Optional[str] = None) -> list[BaseVectorStore.Entry]:
self, namespace: Optional[str] = None
) -> list[BaseVectorStore.Entry]:
""" """
Retrieves all vector entries from the collection, optionally filtering to only those that match the provided namespace. Retrieves all vector entries from the collection, optionally filtering to only those that match the provided namespace.
@ -240,7 +243,9 @@ def load_entries(
```python ```python
# Initialize the PgVectorVectorStore instance # Initialize the PgVectorVectorStore instance
vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name") vector_store = PgVectorVectorStore(
connection_string="your-db-connection-string", table_name="your-table-name"
)
# Load all vector entries in the specified namespace # Load all vector entries in the specified namespace
entries = vector_store.load_entries(namespace="your-namespace") entries = vector_store.load_entries(namespace="your-namespace")
@ -266,7 +271,7 @@ def query(
namespace: Optional[str] = None, namespace: Optional[str] = None,
include_vectors: bool = False, include_vectors: bool = False,
distance_metric: str = "cosine_distance", distance_metric: str = "cosine_distance",
**kwargs **kwargs,
) -> list[BaseVectorStore.QueryResult]: ) -> list[BaseVectorStore.QueryResult]:
""" """
Performs a search on the collection to find vectors similar to the provided input vector, Performs a search on the collection to find vectors similar to the provided input vector,
@ -290,7 +295,9 @@ def query(
```python ```python
# Initialize the PgVectorVectorStore instance # Initialize the PgVectorVectorStore instance
vector_store = PgVectorVectorStore(connection_string="your-db-connection-string", table_name="your-table-name") vector_store = PgVectorVectorStore(
connection_string="your-db-connection-string", table_name="your-table-name"
)
# Perform a vector query # Perform a vector query
query_string = "your-query-string" query_string = "your-query-string"
@ -304,7 +311,7 @@ results = vector_store.query(
count=count, count=count,
namespace=namespace, namespace=namespace,
include_vectors=include_vectors, include_vectors=include_vectors,
distance_metric=distance_metric distance_metric=distance_metric,
) )
# Process the query results # Process the query results

@ -174,7 +174,7 @@ pv = PineconeVector(
api_key="your-api-key", api_key="your-api-key",
index_name="your-index-name", index_name="your-index-name",
environment="us-west1-gcp", environment="us-west1-gcp",
project_name="your-project-name" project_name="your-project-name",
) )
``` ```
@ -198,12 +198,7 @@ vector_id = "unique-vector-id"
namespace = "your-namespace" namespace = "your-namespace"
meta = {"key1": "value1", "key2": "value2"} meta = {"key1": "value1", "key2": "value2"}
pv.upsert_vector( pv.upsert_vector(vector=vector, vector_id=vector_id, namespace=namespace, meta=meta)
vector=vector,
vector_id=vector_id,
namespace=namespace,
meta=meta
)
``` ```
### 4.4 Querying the Index <a name="querying-the-index"></a> ### 4.4 Querying the Index <a name="querying-the-index"></a>
@ -222,7 +217,7 @@ results = pv.query(
count=count, count=count,
namespace=namespace, namespace=namespace,
include_vectors=include_vectors, include_vectors=include_vectors,
include_metadata=include_metadata include_metadata=include_metadata,
) )
# Process the query results # Process the query results

@ -14,8 +14,15 @@ pip install qdrant-client sentence-transformers httpx
```python ```python
class Qdrant: class Qdrant:
def __init__(self, api_key: str, host: str, port: int = 6333, collection_name: str = "qdrant", model_name: str = "BAAI/bge-small-en-v1.5", https: bool = True): def __init__(
... self,
api_key: str,
host: str,
port: int = 6333,
collection_name: str = "qdrant",
model_name: str = "BAAI/bge-small-en-v1.5",
https: bool = True,
): ...
``` ```
### Constructor Parameters ### Constructor Parameters
@ -60,10 +67,7 @@ qdrant_client = Qdrant(api_key="your_api_key", host="localhost", port=6333)
### Example 2: Adding Vectors to a Collection ### Example 2: Adding Vectors to a Collection
```python ```python
documents = [ documents = [{"page_content": "Sample text 1"}, {"page_content": "Sample text 2"}]
{"page_content": "Sample text 1"},
{"page_content": "Sample text 2"}
]
operation_info = qdrant_client.add_vectors(documents) operation_info = qdrant_client.add_vectors(documents)
print(operation_info) print(operation_info)

@ -125,7 +125,9 @@ def update_short_term(self, index, role: str, message: str, *args, **kwargs):
##### Example: Updating a Message in Short-Term Memory ##### Example: Updating a Message in Short-Term Memory
```python ```python
memory.update_short_term(index=0, role="Updated Role", message="Updated message content.") memory.update_short_term(
index=0, role="Updated Role", message="Updated message content."
)
``` ```
#### 7. `clear` #### 7. `clear`

@ -82,7 +82,7 @@ weaviate_client.create_collection(
{"name": "property1", "dataType": ["string"]}, {"name": "property1", "dataType": ["string"]},
{"name": "property2", "dataType": ["int"]}, {"name": "property2", "dataType": ["int"]},
], ],
vectorizer_config=None # Optional vectorizer configuration vectorizer_config=None, # Optional vectorizer configuration
) )
``` ```
@ -99,8 +99,7 @@ The `add` method allows you to add an object to a specified collection in Weavia
```python ```python
weaviate_client.add( weaviate_client.add(
collection_name="my_collection", collection_name="my_collection", properties={"property1": "value1", "property2": 42}
properties={"property1": "value1", "property2": 42}
) )
``` ```
@ -142,7 +141,7 @@ The `update` method allows you to update an object in a specified collection in
weaviate_client.update( weaviate_client.update(
collection_name="my_collection", collection_name="my_collection",
object_id="object123", object_id="object123",
properties={"property1": "new_value", "property2": 99} properties={"property1": "new_value", "property2": 99},
) )
``` ```
@ -158,10 +157,7 @@ The `delete` method allows you to delete an object from a specified collection i
#### Usage #### Usage
```python ```python
weaviate_client.delete( weaviate_client.delete(collection_name="my_collection", object_id="object123")
collection_name="my_collection",
object_id="object123"
)
``` ```
## Examples ## Examples
@ -175,28 +171,21 @@ weaviate_client.create_collection(
name="people", name="people",
properties=[ properties=[
{"name": "name", "dataType": ["string"]}, {"name": "name", "dataType": ["string"]},
{"name": "age", "dataType": ["int"]} {"name": "age", "dataType": ["int"]},
] ],
) )
``` ```
### Example 2: Adding an Object ### Example 2: Adding an Object
```python ```python
weaviate_client.add( weaviate_client.add(collection_name="people", properties={"name": "John", "age": 30})
collection_name="people",
properties={"name": "John", "age": 30}
)
``` ```
### Example 3: Querying Objects ### Example 3: Querying Objects
```python ```python
results = weaviate_client.query( results = weaviate_client.query(collection_name="people", query="name:John", limit=5)
collection_name="people",
query="name:John",
limit=5
)
``` ```
These examples cover the basic operations of creating collections, adding objects, and querying objects using the Weaviate API Client. These examples cover the basic operations of creating collections, adding objects, and querying objects using the Weaviate API Client.

@ -72,9 +72,7 @@ class Anthropic:
from swarms.models import Anthropic from swarms.models import Anthropic
# Initialize an instance of the Anthropic class # Initialize an instance of the Anthropic class
model = Anthropic( model = Anthropic(anthropic_api_key="")
anthropic_api_key=""
)
# Using the run method # Using the run method
completion_1 = model.run("What is the capital of France?") completion_1 = model.run("What is the capital of France?")

@ -149,7 +149,9 @@ model = BaseMultiModalModel(
) )
# Run the model with a text task and an image URL # Run the model with a text task and an image URL
response = model.run("Generate a summary of this text", "https://www.example.com/image.jpg") response = model.run(
"Generate a summary of this text", "https://www.example.com/image.jpg"
)
print(response) print(response)
``` ```
@ -209,6 +211,7 @@ for response in responses:
```python ```python
from swarms.models import BaseMultiModalModel from swarms.models import BaseMultiModalModel
class CustomMultiModalModel(BaseMultiModalModel): class CustomMultiModalModel(BaseMultiModalModel):
def __init__(self, model_name, custom_parameter, *args, **kwargs): def __init__(self, model_name, custom_parameter, *args, **kwargs):
# Call the parent class constructor # Call the parent class constructor
@ -226,6 +229,7 @@ class CustomMultiModalModel(BaseMultiModalModel):
# You can use self.custom_parameter and other inherited attributes # You can use self.custom_parameter and other inherited attributes
pass pass
# Create an instance of your custom multimodal model # Create an instance of your custom multimodal model
custom_model = CustomMultiModalModel( custom_model = CustomMultiModalModel(
model_name="your_custom_model_name", model_name="your_custom_model_name",
@ -236,7 +240,9 @@ custom_model = CustomMultiModalModel(
) )
# Run your custom model # Run your custom model
response = custom_model.run("Generate a summary of this text", "https://www.example.com/image.jpg") response = custom_model.run(
"Generate a summary of this text", "https://www.example.com/image.jpg"
)
print(response) print(response)
# Generate a summary using your custom model # Generate a summary using your custom model

@ -39,7 +39,6 @@ print(response)
```python ```python
from swarms.models.bing_chat import BingChat from swarms.models.bing_chat import BingChat
edgegpt = BingChat(cookies_path="./path/to/cookies.json") edgegpt = BingChat(cookies_path="./path/to/cookies.json")
response = edgegpt("Hello, my name is ChatGPT") response = edgegpt("Hello, my name is ChatGPT")
print(response) print(response)
@ -48,7 +47,9 @@ print(response)
3. Generate an image based on a text prompt: 3. Generate an image based on a text prompt:
```python ```python
image_path = edgegpt.create_img("Sunset over mountains", output_dir="./output", auth_cookie="your_auth_cookie") image_path = edgegpt.create_img(
"Sunset over mountains", output_dir="./output", auth_cookie="your_auth_cookie"
)
print(f"Generated image saved at {image_path}") print(f"Generated image saved at {image_path}")
``` ```
@ -59,7 +60,9 @@ from swarms.models.bing_chat import BingChat
edgegpt = BingChat(cookies_path="./path/to/cookies.json") edgegpt = BingChat(cookies_path="./path/to/cookies.json")
image_path = edgegpt.create_img("Sunset over mountains", output_dir="./output", auth_cookie="your_auth_cookie") image_path = edgegpt.create_img(
"Sunset over mountains", output_dir="./output", auth_cookie="your_auth_cookie"
)
print(f"Generated image saved at {image_path}") print(f"Generated image saved at {image_path}")
``` ```

@ -83,7 +83,6 @@ print(generated_text)
```python ```python
from swarms.models import BioGPT from swarms.models import BioGPT
# Initialize the BioGPT model # Initialize the BioGPT model
biogpt = BioGPT() biogpt = BioGPT()
@ -99,7 +98,6 @@ print(features)
```python ```python
from swarms.models import BioGPT from swarms.models import BioGPT
# Initialize the BioGPT model # Initialize the BioGPT model
biogpt = BioGPT() biogpt = BioGPT()

@ -29,7 +29,7 @@ from swarms.models import DistilWhisperModel
model_wrapper = DistilWhisperModel() model_wrapper = DistilWhisperModel()
# Initialize with a specific model ID # Initialize with a specific model ID
model_wrapper = DistilWhisperModel(model_id='distil-whisper/distil-large-v2') model_wrapper = DistilWhisperModel(model_id="distil-whisper/distil-large-v2")
``` ```
## Attributes ## Attributes
@ -62,7 +62,7 @@ Transcribes audio input synchronously.
```python ```python
# Synchronous transcription # Synchronous transcription
transcription = model_wrapper.transcribe('path/to/audio.mp3') transcription = model_wrapper.transcribe("path/to/audio.mp3")
print(transcription) print(transcription)
``` ```
@ -84,7 +84,7 @@ Transcribes audio input asynchronously.
import asyncio import asyncio
# Asynchronous transcription # Asynchronous transcription
transcription = asyncio.run(model_wrapper.async_transcribe('path/to/audio.mp3')) transcription = asyncio.run(model_wrapper.async_transcribe("path/to/audio.mp3"))
print(transcription) print(transcription)
``` ```
@ -103,7 +103,7 @@ Simulates real-time transcription of an audio file.
```python ```python
# Real-time transcription simulation # Real-time transcription simulation
model_wrapper.real_time_transcribe('path/to/audio.mp3', chunk_duration=5) model_wrapper.real_time_transcribe("path/to/audio.mp3", chunk_duration=5)
``` ```
## Error Handling ## Error Handling

@ -107,16 +107,16 @@ The `__call__` method is a convenient way to run the GPT-4 Vision model. It has
```python ```python
def __call__(task: str, img: str) -> str: def __call__(task: str, img: str) -> str:
""" """
Run the GPT-4 Vision model (callable). Run the GPT-4 Vision model (callable).
Parameters: Parameters:
- task (str): The task or question related to the image. - task (str): The task or question related to the image.
- img - img
(str): URL of the image to analyze. (str): URL of the image to analyze.
Returns: Returns:
str: The model's response. str: The model's response.
""" """
``` ```

@ -114,9 +114,11 @@ from swarms.models import HuggingfaceLLM
custom_config = { custom_config = {
"quantize": True, "quantize": True,
"quantization_config": {"load_in_4bit": True}, "quantization_config": {"load_in_4bit": True},
"verbose": True "verbose": True,
} }
inference = HuggingfaceLLM(model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config) inference = HuggingfaceLLM(
model_id="NousResearch/Nous-Hermes-2-Vision-Alpha", **custom_config
)
# Generate text based on a prompt # Generate text based on a prompt
prompt_text = "Tell me a joke" prompt_text = "Tell me a joke"

@ -36,7 +36,9 @@ model = Idefics()
2. Generate text based on prompts: 2. Generate text based on prompts:
```python ```python
prompts = ["User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"] prompts = [
"User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
]
response = model(prompts) response = model(prompts)
print(response) print(response)
``` ```
@ -47,7 +49,9 @@ print(response)
from swarms.models import Idefics from swarms.models import Idefics
model = Idefics() model = Idefics()
prompts = ["User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"] prompts = [
"User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
]
response = model(prompts) response = model(prompts)
print(response) print(response)
``` ```

@ -42,9 +42,10 @@ OpenAI(api_key: str, system: str = None, console: bool = True, model: str = None
**Usage Example:** **Usage Example:**
```python ```python
from swarms import OpenAI
import asyncio import asyncio
from swarms import OpenAI
chat = OpenAI(api_key="YOUR_OPENAI_API_KEY") chat = OpenAI(api_key="YOUR_OPENAI_API_KEY")
response = chat.generate("Hello, how can I assist you?") response = chat.generate("Hello, how can I assist you?")
@ -126,7 +127,10 @@ GooglePalm(model_name: str = "models/chat-bison-001", google_api_key: str = None
from swarms import GooglePalm from swarms import GooglePalm
google_palm = GooglePalm() google_palm = GooglePalm()
messages = [{"role": "system", "content": "You are a helpful assistant"}, {"role": "user", "content": "Tell me a joke"}] messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Tell me a joke"},
]
response = google_palm.generate(messages) response = google_palm.generate(messages)
print(response["choices"][0]["text"]) print(response["choices"][0]["text"])

@ -30,7 +30,9 @@ kosmos = Kosmos()
2. Perform Multimodal Grounding: 2. Perform Multimodal Grounding:
```python ```python
kosmos.multimodal_grounding("Find the red apple in the image.", "https://example.com/apple.jpg") kosmos.multimodal_grounding(
"Find the red apple in the image.", "https://example.com/apple.jpg"
)
``` ```
### Example 1 - Multimodal Grounding ### Example 1 - Multimodal Grounding
@ -40,13 +42,17 @@ from swarms.models.kosmos_two import Kosmos
kosmos = Kosmos() kosmos = Kosmos()
kosmos.multimodal_grounding("Find the red apple in the image.", "https://example.com/apple.jpg") kosmos.multimodal_grounding(
"Find the red apple in the image.", "https://example.com/apple.jpg"
)
``` ```
3. Perform Referring Expression Comprehension: 3. Perform Referring Expression Comprehension:
```python ```python
kosmos.referring_expression_comprehension("Show me the green bottle.", "https://example.com/bottle.jpg") kosmos.referring_expression_comprehension(
"Show me the green bottle.", "https://example.com/bottle.jpg"
)
``` ```
### Example 2 - Referring Expression Comprehension ### Example 2 - Referring Expression Comprehension
@ -56,13 +62,17 @@ from swarms.models.kosmos_two import Kosmos
kosmos = Kosmos() kosmos = Kosmos()
kosmos.referring_expression_comprehension("Show me the green bottle.", "https://example.com/bottle.jpg") kosmos.referring_expression_comprehension(
"Show me the green bottle.", "https://example.com/bottle.jpg"
)
``` ```
4. Generate Referring Expressions: 4. Generate Referring Expressions:
```python ```python
kosmos.referring_expression_generation("It is on the table.", "https://example.com/table.jpg") kosmos.referring_expression_generation(
"It is on the table.", "https://example.com/table.jpg"
)
``` ```
### Example 3 - Referring Expression Generation ### Example 3 - Referring Expression Generation
@ -72,7 +82,9 @@ from swarms.models.kosmos_two import Kosmos
kosmos = Kosmos() kosmos = Kosmos()
kosmos.referring_expression_generation("It is on the table.", "https://example.com/table.jpg") kosmos.referring_expression_generation(
"It is on the table.", "https://example.com/table.jpg"
)
``` ```
5. Perform Grounded Visual Question Answering (VQA): 5. Perform Grounded Visual Question Answering (VQA):
@ -127,7 +139,10 @@ kosmos.grounded_image_captioning_detailed("https://example.com/beach.jpg")
```python ```python
image = kosmos.get_image("https://example.com/image.jpg") image = kosmos.get_image("https://example.com/image.jpg")
entities = [("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)])] entities = [
("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]),
("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]),
]
kosmos.draw_entity_boxes_on_image(image, entities, show=True) kosmos.draw_entity_boxes_on_image(image, entities, show=True)
``` ```
@ -139,24 +154,38 @@ from swarms.models.kosmos_two import Kosmos
kosmos = Kosmos() kosmos = Kosmos()
image = kosmos.get_image("https://example.com/image.jpg") image = kosmos.get_image("https://example.com/image.jpg")
entities = [("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)])] entities = [
("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]),
("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]),
]
kosmos.draw_entity_boxes_on_image(image, entities, show=True) kosmos.draw_entity_boxes_on_image(image, entities, show=True)
``` ```
9. Generate Boxes for Entities: 9. Generate Boxes for Entities:
```python ```python
entities = [("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)])] entities = [
image = kosmos.generate_boxes("Find the apple and the banana in the image.", "https://example.com/image.jpg") ("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]),
("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]),
]
image = kosmos.generate_boxes(
"Find the apple and the banana in the image.", "https://example.com/image.jpg"
)
``` ```
### Example 8 - Generating Boxes for Entities ### Example 8 - Generating Boxes for Entities
```python ```python
from swarms.models.kosmos_two import Kosmos from swarms.models.kosmos_two import Kosmos
kosmos = Kosmos() kosmos = Kosmos()
entities = [("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]), ("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)])] entities = [
image = kosmos.generate_boxes("Find the apple and the banana in the image.", "https://example.com/image.jpg") ("apple", (0, 3), [(0.2, 0.3, 0.4, 0.5)]),
("banana", (4, 9), [(0.6, 0.2, 0.8, 0.4)]),
]
image = kosmos.generate_boxes(
"Find the apple and the banana in the image.", "https://example.com/image.jpg"
)
``` ```
## How Kosmos Works ## How Kosmos Works

@ -150,7 +150,6 @@ Example:
```python ```python
from swarms.models import Mistral from swarms.models import Mistral
model = Mistral() model = Mistral()
task = "Translate the following English text to French: 'Hello, how are you?'" task = "Translate the following English text to French: 'Hello, how are you?'"
result = model.run(task) result = model.run(task)

@ -52,10 +52,10 @@ class MPT7B:
from swarms.models import MPT7B from swarms.models import MPT7B
# Initialize the MPT7B class # Initialize the MPT7B class
mpt = MPT7B('mosaicml/mpt-7b-storywriter', 'EleutherAI/gpt-neox-20b', max_tokens=150) mpt = MPT7B("mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150)
# Generate text # Generate text
output = mpt.run('generate', 'Once upon a time in a land far, far away...') output = mpt.run("generate", "Once upon a time in a land far, far away...")
print(output) print(output)
``` ```
@ -77,13 +77,16 @@ print(outputs)
```python ```python
import asyncio import asyncio
from swarms.models import MPT7B from swarms.models import MPT7B
# Initialize the MPT7B class # Initialize the MPT7B class
mpt = MPT7B('mosaicml/mpt-7b-storywriter', 'EleutherAI/gpt-neox-20b', max_tokens=150) mpt = MPT7B("mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150)
# Generate text asynchronously # Generate text asynchronously
output = asyncio.run(mpt.run_async('generate', 'Once upon a time in a land far, far away...')) output = asyncio.run(
mpt.run_async("generate", "Once upon a time in a land far, far away...")
)
print(output) print(output)
``` ```

@ -168,7 +168,11 @@ prompt = "Translate the following English text to French: 'Hello, how are you?'"
generated_text = openai.generate(prompt, max_tokens=50) generated_text = openai.generate(prompt, max_tokens=50)
# Generate text from multiple prompts # Generate text from multiple prompts
prompts = ["Translate this: 'Good morning' to Spanish.", "Summarize the following article:", article_text] prompts = [
"Translate this: 'Good morning' to Spanish.",
"Summarize the following article:",
article_text,
]
generated_texts = openai.generate(prompts, max_tokens=100) generated_texts = openai.generate(prompts, max_tokens=100)
# Generate text asynchronously # Generate text asynchronously
@ -188,7 +192,7 @@ custom_options = {
"max_tokens": 100, "max_tokens": 100,
"top_p": 0.9, "top_p": 0.9,
"frequency_penalty": 0.2, "frequency_penalty": 0.2,
"presence_penalty": 0.4 "presence_penalty": 0.4,
} }
generated_text = openai.generate(prompt, **custom_options) generated_text = openai.generate(prompt, **custom_options)
``` ```

@ -150,7 +150,9 @@ user_message = "User: Tell me another joke."
response = openai_chat.generate([user_message]) response = openai_chat.generate([user_message])
# Print the generated response # Print the generated response
print(response[0][0].text) # Output: "Assistant: Why don't scientists trust atoms? Because they make up everything!" print(
response[0][0].text
) # Output: "Assistant: Why don't scientists trust atoms? Because they make up everything!"
``` ```
### Example 3: Asynchronous Generation ### Example 3: Asynchronous Generation
@ -158,12 +160,14 @@ print(response[0][0].text) # Output: "Assistant: Why don't scientists trust ato
```python ```python
import asyncio import asyncio
# Define an asynchronous function for generating responses # Define an asynchronous function for generating responses
async def generate_responses(): async def generate_responses():
user_message = "User: Tell me a fun fact." user_message = "User: Tell me a fun fact."
async for chunk in openai_chat.stream([user_message]): async for chunk in openai_chat.stream([user_message]):
print(chunk.text) print(chunk.text)
# Run the asynchronous generation function # Run the asynchronous generation function
asyncio.run(generate_responses()) asyncio.run(generate_responses())
``` ```

@ -26,20 +26,26 @@ To use the Vilt model, follow these steps:
```python ```python
from swarms.models import Vilt from swarms.models import Vilt
model = Vilt() model = Vilt()
``` ```
2. Call the model with a text question and an image URL: 2. Call the model with a text question and an image URL:
```python ```python
output = model("What is this image?", "http://images.cocodataset.org/val2017/000000039769.jpg") output = model(
"What is this image?", "http://images.cocodataset.org/val2017/000000039769.jpg"
)
``` ```
### Example 1 - Image Questioning ### Example 1 - Image Questioning
```python ```python
model = Vilt() model = Vilt()
output = model("What are the objects in this image?", "http://images.cocodataset.org/val2017/000000039769.jpg") output = model(
"What are the objects in this image?",
"http://images.cocodataset.org/val2017/000000039769.jpg",
)
print(output) print(output)
``` ```
@ -47,7 +53,10 @@ print(output)
```python ```python
model = Vilt() model = Vilt()
output = model("Describe the scene in this image.", "http://images.cocodataset.org/val2017/000000039769.jpg") output = model(
"Describe the scene in this image.",
"http://images.cocodataset.org/val2017/000000039769.jpg",
)
print(output) print(output)
``` ```
@ -55,7 +64,10 @@ print(output)
```python ```python
model = Vilt() model = Vilt()
output = model("Tell me more about the landmark in this image.", "http://images.cocodataset.org/val2017/000000039769.jpg") output = model(
"Tell me more about the landmark in this image.",
"http://images.cocodataset.org/val2017/000000039769.jpg",
)
print(output) print(output)
``` ```

@ -63,7 +63,7 @@ custom_vllm = vLLM(
trust_remote_code=True, trust_remote_code=True,
revision="abc123", revision="abc123",
temperature=0.7, temperature=0.7,
top_p=0.8 top_p=0.8,
) )
``` ```
@ -108,7 +108,7 @@ custom_vllm = vLLM(
trust_remote_code=True, trust_remote_code=True,
revision="abc123", revision="abc123",
temperature=0.7, temperature=0.7,
top_p=0.8 top_p=0.8,
) )
# Generate text with custom configuration # Generate text with custom configuration
@ -128,7 +128,7 @@ vllm = vLLM()
tasks = [ tasks = [
"Translate the following sentence to French: 'Hello, world!'", "Translate the following sentence to French: 'Hello, world!'",
"Write a short story set in a futuristic world.", "Write a short story set in a futuristic world.",
"Summarize the main points of a news article about climate change." "Summarize the main points of a news article about climate change.",
] ]
for task in tasks: for task in tasks:

@ -45,6 +45,7 @@ To use the Zephyr model, follow these steps:
```python ```python
from swarms.models import Zephyr from swarms.models import Zephyr
model = Zephyr(max_new_tokens=300, temperature=0.7, top_k=50, top_p=0.95) model = Zephyr(max_new_tokens=300, temperature=0.7, top_k=50, top_p=0.95)
``` ```

@ -47,9 +47,11 @@ The `AbstractSwarm` class is an abstract base class that serves as the foundatio
```python ```python
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Optional, List, Dict, Any from typing import List
from swarms.swarms.base import AbstractWorker from swarms.swarms.base import AbstractWorker
class AbstractSwarm(ABC): class AbstractSwarm(ABC):
""" """
Abstract class for swarm simulation architectures Abstract class for swarm simulation architectures
@ -58,12 +60,12 @@ class AbstractSwarm(ABC):
--------- ---------
... ...
""" """
# The class definition and constructor are provided here. # The class definition and constructor are provided here.
@abstractmethod @abstractmethod
def __init__(self, workers: List["AbstractWorker"]): def __init__(self, workers: List["AbstractWorker"]):
"""Initialize the swarm with workers""" """Initialize the swarm with workers"""
pass
# Other abstract methods are listed here. # Other abstract methods are listed here.
``` ```

@ -68,7 +68,9 @@ final_response = agent.run(initial_task)
You can collect feedback during the conversation using the `provide_feedback` method: You can collect feedback during the conversation using the `provide_feedback` method:
```python ```python
agent.provide_feedback("Generate an SOP for new sales employees on the best cold sales practices") agent.provide_feedback(
"Generate an SOP for new sales employees on the best cold sales practices"
)
``` ```
### Stopping Condition ### Stopping Condition
@ -78,9 +80,11 @@ You can define a custom stopping condition using a function. For example, you ca
```python ```python
from swarms.structs import Agent from swarms.structs import Agent
def stop_when_repeats(response: str) -> bool: def stop_when_repeats(response: str) -> bool:
return "Stop" in response.lower() return "Stop" in response.lower()
agent = Agent(llm=my_language_model, max_loops=5, stopping_condition=stop_when_repeats) agent = Agent(llm=my_language_model, max_loops=5, stopping_condition=stop_when_repeats)
``` ```
@ -107,9 +111,9 @@ Here are three usage examples:
### Example 1: Simple Conversation ### Example 1: Simple Conversation
```python ```python
from swarms.structs import Agent
# Select any Language model from the models folder # Select any Language model from the models folder
from swarms.models import Mistral, OpenAIChat from swarms.models import Mistral, OpenAIChat
from swarms.structs import Agent
llm = Mistral() llm = Mistral()
# llm = OpenAIChat() # llm = OpenAIChat()
@ -128,9 +132,11 @@ final_response = agent.run(initial_task)
```python ```python
from swarms.structs import Agent from swarms.structs import Agent
def stop_when_repeats(response: str) -> bool: def stop_when_repeats(response: str) -> bool:
return "Stop" in response.lower() return "Stop" in response.lower()
agent = Agent(llm=llm, max_loops=5, stopping_condition=stop_when_repeats) agent = Agent(llm=llm, max_loops=5, stopping_condition=stop_when_repeats)
``` ```

@ -41,9 +41,7 @@ class Artifact(BaseModel):
) )
relative_path: Optional[str] = Field( relative_path: Optional[str] = Field(
None, None,
description=( description=("Relative path of the artifact in the agent's workspace"),
"Relative path of the artifact in the agent's workspace"
),
example="python/code/", example="python/code/",
) )
``` ```
@ -64,7 +62,7 @@ from swarms.structs import Artifact
artifact_instance = Artifact( artifact_instance = Artifact(
artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56", artifact_id="b225e278-8b4c-4f99-a696-8facf19f0e56",
file_name="main.py", file_name="main.py",
relative_path="python/code/" relative_path="python/code/",
) )
``` ```
@ -85,8 +83,7 @@ If the `relative_path` attribute is not provided during artifact creation, it wi
```python ```python
artifact_instance_no_path = Artifact( artifact_instance_no_path = Artifact(
artifact_id="c280s347-9b7d-3c68-m337-7abvf50j23k", artifact_id="c280s347-9b7d-3c68-m337-7abvf50j23k", file_name="script.js"
file_name="script.js"
) )
print(artifact_instance_no_path.relative_path) print(artifact_instance_no_path.relative_path)

@ -15,9 +15,7 @@ class ArtifactUpload(BaseModel):
file: bytes = Field(..., description="File to upload") file: bytes = Field(..., description="File to upload")
relative_path: Optional[str] = Field( relative_path: Optional[str] = Field(
None, None,
description=( description=("Relative path of the artifact in the agent's workspace"),
"Relative path of the artifact in the agent's workspace"
),
example="python/code/", example="python/code/",
) )
``` ```
@ -32,10 +30,12 @@ The `ArtifactUpload` class is used to create an instance of an artifact upload.
from swarms.structs import ArtifactUpload from swarms.structs import ArtifactUpload
# Uploading a file with no relative path # Uploading a file with no relative path
upload_no_path = ArtifactUpload(file=b'example_file_contents') upload_no_path = ArtifactUpload(file=b"example_file_contents")
# Uploading a file with a relative path # Uploading a file with a relative path
upload_with_path = ArtifactUpload(file=b'example_file_contents', relative_path="python/code/") upload_with_path = ArtifactUpload(
file=b"example_file_contents", relative_path="python/code/"
)
``` ```
In the above example, `upload_no_path` is an instance of `ArtifactUpload` with no specified `relative_path`, whereas `upload_with_path` is an instance of `ArtifactUpload` with the `relative_path` set to "python/code/". In the above example, `upload_no_path` is an instance of `ArtifactUpload` with no specified `relative_path`, whereas `upload_with_path` is an instance of `ArtifactUpload` with the `relative_path` set to "python/code/".

@ -36,7 +36,9 @@ Initializes the `AutoScaler` with a predefined number of agents and sets up conf
```python ```python
from swarms import AutoScaler from swarms import AutoScaler
scaler = AutoScaler(initial_agents=5, scale_up_factor=3, idle_threshold=0.1, busy_threshold=0.8) scaler = AutoScaler(
initial_agents=5, scale_up_factor=3, idle_threshold=0.1, busy_threshold=0.8
)
``` ```
--- ---
@ -140,7 +142,9 @@ scaler.start()
from swarms import AutoScaler from swarms import AutoScaler
# Initialize the scaler # Initialize the scaler
auto_scaler = AutoScaler(initial_agents=15, scale_up_factor=2, idle_threshold=0.2, busy_threshold=0.7) auto_scaler = AutoScaler(
initial_agents=15, scale_up_factor=2, idle_threshold=0.2, busy_threshold=0.7
)
# Start the monitoring and task processing # Start the monitoring and task processing
auto_scaler.start() auto_scaler.start()
@ -161,7 +165,6 @@ auto_scaler.start()
for i in range(100): # Adding tasks for i in range(100): # Adding tasks
auto_scaler.add_task(f"Task {i}") auto_scaler.add_task(f"Task {i}")
``` ```

@ -13,19 +13,19 @@ Base class for workflows.
Source Code: Source Code:
```python ```python
class BaseWorkflow(BaseStructure): class BaseWorkflow(BaseStructure):
""" """
Base class for workflows. Base class for workflows.
Attributes: Attributes:
task_pool (list): A list to store tasks. task_pool (list): A list to store tasks.
Methods: Methods:
add(task: Task = None, tasks: List[Task] = None, *args, **kwargs): add(task: Task = None, tasks: List[Task] = None, *args, **kwargs):
Adds a task or a list of tasks to the task pool. Adds a task or a list of tasks to the task pool.
run(): run():
Abstract method to run the workflow. Abstract method to run the workflow.
""" """
``` ```
For the usage examples and additional in-depth documentation please visit [BaseWorkflow](https://github.com/swarms-modules/structs/blob/main/baseworkflow.md#swarms-structs) For the usage examples and additional in-depth documentation please visit [BaseWorkflow](https://github.com/swarms-modules/structs/blob/main/baseworkflow.md#swarms-structs)

@ -24,7 +24,7 @@ agents = Agent()
manager = GroupChatManager(groupchat, selector) manager = GroupChatManager(groupchat, selector)
# Call the group chat manager passing a specific chat task # Call the group chat manager passing a specific chat task
result = manager('Discuss the agenda for the upcoming meeting') result = manager("Discuss the agenda for the upcoming meeting")
``` ```
Explanation: Explanation:
@ -67,9 +67,7 @@ class GroupChatManager:
Returns: Returns:
str: The response from the group chat. str: The response from the group chat.
""" """
self.groupchat.messages.append( self.groupchat.messages.append({"role": self.selector.name, "content": task})
{"role": self.selector.name, "content": task}
)
for i in range(self.groupchat.max_round): for i in range(self.groupchat.max_round):
speaker = self.groupchat.select_speaker( speaker = self.groupchat.select_speaker(
last_speaker=self.selector, selector=self.selector last_speaker=self.selector, selector=self.selector

@ -78,16 +78,17 @@ Suppose we have a JSON Schema in `config_schema.json` for application configurat
Now we'll create a subclass `AppConfig` that uses this schema. Now we'll create a subclass `AppConfig` that uses this schema.
```python ```python
import json
from swarms.structs import JSON from swarms.structs import JSON
class AppConfig(JSON): class AppConfig(JSON):
def __init__(self, schema_path): def __init__(self, schema_path):
super().__init__(schema_path) super().__init__(schema_path)
def validate(self, config_data): def validate(self, config_data):
# Here we'll use a JSON Schema validation library like jsonschema # Here we'll use a JSON Schema validation library like jsonschema
from jsonschema import validate, ValidationError from jsonschema import ValidationError, validate
try: try:
validate(instance=config_data, schema=self.schema) validate(instance=config_data, schema=self.schema)
except ValidationError as e: except ValidationError as e:
@ -95,15 +96,13 @@ class AppConfig(JSON):
return False return False
return True return True
# Main Example Usage # Main Example Usage
if __name__ == "__main__": if __name__ == "__main__":
config = { config = {"debug": True, "window_size": [800, 600]}
"debug": True,
"window_size": [800, 600]
}
app_config = AppConfig('config_schema.json') app_config = AppConfig("config_schema.json")
if app_config.validate(config): if app_config.validate(config):
print("Config is valid!") print("Config is valid!")

@ -82,9 +82,11 @@ Executes the given task by all participating agents and aggregates the results t
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.majority_voting import MajorityVoting from swarms.structs.majority_voting import MajorityVoting
def create_agent(name): def create_agent(name):
return Agent(name) return Agent(name)
agents = [create_agent(name) for name in ["GPT-3", "Codex", "Tabnine"]] agents = [create_agent(name) for name in ["GPT-3", "Codex", "Tabnine"]]
majority_voting = MajorityVoting(agents) majority_voting = MajorityVoting(agents)
result = majority_voting.run("What is the capital of France?") result = majority_voting.run("What is the capital of France?")

@ -60,7 +60,7 @@ task3 = Task(llm, "Find a hotel in Paris")
# Initialize the NonlinearWorkflow # Initialize the NonlinearWorkflow
workflow = NonlinearWorkflow() workflow = NonlinearWorkflow()
# Add tasks to the workflow with dependencies # Add tasks to the workflow with dependencies
workflow.add(task1, task2.name) workflow.add(task1, task2.name)
workflow.add(task2, task3.name) workflow.add(task2, task3.name)
workflow.add(task3, "OpenAIChat Initialization") workflow.add(task3, "OpenAIChat Initialization")
# Execute the workflow # Execute the workflow
@ -82,7 +82,7 @@ task3 = Task(llm, "Find a hotel in Paris")
# Initialize the NonlinearWorkflow # Initialize the NonlinearWorkflow
workflow = NonlinearWorkflow() workflow = NonlinearWorkflow()
# Add tasks to the workflow with dependencies # Add tasks to the workflow with dependencies
workflow.add(task1) workflow.add(task1)
workflow.add(task2, task1.name) workflow.add(task2, task1.name)
workflow.add(task3, task1.name, task2.name) workflow.add(task3, task1.name, task2.name)
# Execute the workflow # Execute the workflow

@ -310,9 +310,7 @@ from swarms.structs import Agent
from swarms.structs.sequential_workflow import SequentialWorkflow from swarms.structs.sequential_workflow import SequentialWorkflow
# Example usage # Example usage
api_key = ( api_key = "" # Your actual API key here
"" # Your actual API key here
)
# Initialize the language agent # Initialize the language agent
llm = OpenAIChat( llm = OpenAIChat(
@ -350,9 +348,7 @@ from swarms.structs import Agent
from swarms.structs.sequential_workflow import SequentialWorkflow from swarms.structs.sequential_workflow import SequentialWorkflow
# Example usage # Example usage
api_key = ( api_key = "" # Your actual API key here
"" # Your actual API key here
)
# Initialize the language agent # Initialize the language agent
llm = OpenAIChat( llm = OpenAIChat(
@ -393,9 +389,7 @@ from swarms.structs import Agent
from swarms.structs.sequential_workflow import SequentialWorkflow from swarms.structs.sequential_workflow import SequentialWorkflow
# Example usage # Example usage
api_key = ( api_key = "" # Your actual API key here
"" # Your actual API key here
)
# Initialize the language agent # Initialize the language agent
llm = OpenAIChat( llm = OpenAIChat(
@ -436,9 +430,7 @@ from swarms.structs import Agent
from swarms.structs.sequential_workflow import SequentialWorkflow from swarms.structs.sequential_workflow import SequentialWorkflow
# Example usage # Example usage
api_key = ( api_key = "" # Your actual API key here
"" # Your actual API key here
)
# Initialize the language agent # Initialize the language agent
llm = OpenAIChat( llm = OpenAIChat(

@ -58,11 +58,13 @@ The design of the `StackOverflowSwarm` class is intended to allow easy tracking
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.stack_overflow_swarm import StackOverflowSwarm from swarms.structs.stack_overflow_swarm import StackOverflowSwarm
# Define custom Agents with some logic (placeholder for actual Agent implementation) # Define custom Agents with some logic (placeholder for actual Agent implementation)
class CustomAgent(Agent): class CustomAgent(Agent):
def run(self, conversation, *args, **kwargs): def run(self, conversation, *args, **kwargs):
return "This is a response from CustomAgent." return "This is a response from CustomAgent."
# Initialize agents # Initialize agents
agent1 = CustomAgent(ai_name="Agent1") agent1 = CustomAgent(ai_name="Agent1")
agent2 = CustomAgent(ai_name="Agent2") agent2 = CustomAgent(ai_name="Agent2")

@ -12,10 +12,7 @@ The `StepInput` class is defined as follows:
class StepInput(BaseModel): class StepInput(BaseModel):
__root__: Any = Field( __root__: Any = Field(
..., ...,
description=( description=("Input parameters for the task step. Any value is" " allowed."),
"Input parameters for the task step. Any value is"
" allowed."
),
example='{\n"file_to_refactor": "models.py"\n}', example='{\n"file_to_refactor": "models.py"\n}',
) )
``` ```
@ -29,10 +26,7 @@ The `StepInput` class is designed to accept any input value, providing flexibili
```python ```python
from swarms.structs import StepInput from swarms.structs import StepInput
input_params = { input_params = {"file_to_refactor": "models.py", "refactor_method": "code"}
"file_to_refactor": "models.py",
"refactor_method": "code"
}
step_input = StepInput(__root__=input_params) step_input = StepInput(__root__=input_params)
``` ```
@ -42,10 +36,7 @@ In this example, we import the `StepInput` class from the `swarms.structs` libra
```python ```python
from swarms.structs import StepInput from swarms.structs import StepInput
input_params = { input_params = {"input_path": "data.csv", "output_path": "result.csv"}
"input_path": "data.csv",
"output_path": "result.csv"
}
step_input = StepInput(__root__=input_params) step_input = StepInput(__root__=input_params)
``` ```
@ -56,7 +47,7 @@ In this example, we again create an instance of `StepInput` by passing a diction
from swarms.structs import StepInput from swarms.structs import StepInput
file_path = "config.json" file_path = "config.json"
with open(file_path, 'r') as f: with open(file_path) as f:
input_data = json.load(f) input_data = json.load(f)
step_input = StepInput(__root__=input_data) step_input = StepInput(__root__=input_data)

@ -28,6 +28,7 @@ The `SwarmNetwork` class has the following parameters:
```python ```python
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarm_net import SwarmNetwork
agent = Agent() agent = Agent()
swarm = SwarmNetwork(agents=[agent]) swarm = SwarmNetwork(agents=[agent])
swarm.add_task("task") swarm.add_task("task")
@ -41,6 +42,7 @@ The `SwarmNetwork` class has the following parameters:
```python ```python
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarm_net import SwarmNetwork
agent = Agent() agent = Agent()
swarm = SwarmNetwork(agents=[agent]) swarm = SwarmNetwork(agents=[agent])
await swarm.async_add_task("task") await swarm.async_add_task("task")
@ -57,6 +59,7 @@ The `SwarmNetwork` class has the following parameters:
```python ```python
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarm_net import SwarmNetwork
agent = Agent() agent = Agent()
swarm = SwarmNetwork(agents=[agent]) swarm = SwarmNetwork(agents=[agent])
swarm.run_single_agent(agent_id, "task") swarm.run_single_agent(agent_id, "task")
@ -72,6 +75,7 @@ The `SwarmNetwork` class has the following parameters:
```python ```python
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarm_net import SwarmNetwork
agent = Agent() agent = Agent()
swarm = SwarmNetwork(agents=[agent]) swarm = SwarmNetwork(agents=[agent])
swarm.run_many_agents("task") swarm.run_many_agents("task")
@ -85,6 +89,7 @@ The `SwarmNetwork` class has the following parameters:
```python ```python
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarm_net import SwarmNetwork
agent = Agent() agent = Agent()
swarm = SwarmNetwork(agents=[agent]) swarm = SwarmNetwork(agents=[agent])
swarm.list_agents() swarm.list_agents()
@ -98,6 +103,7 @@ The `SwarmNetwork` class has the following parameters:
```python ```python
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarm_net import SwarmNetwork
agent = Agent() agent = Agent()
swarm = SwarmNetwork() swarm = SwarmNetwork()
swarm.add_agent(agent) swarm.add_agent(agent)
@ -111,6 +117,7 @@ The `SwarmNetwork` class has the following parameters:
```python ```python
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarm_net import SwarmNetwork
agent = Agent() agent = Agent()
swarm = SwarmNetwork(agents=[agent]) swarm = SwarmNetwork(agents=[agent])
swarm.remove_agent(agent_id) swarm.remove_agent(agent_id)
@ -124,6 +131,7 @@ The `SwarmNetwork` class has the following parameters:
```python ```python
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarm_net import SwarmNetwork
swarm = SwarmNetwork() swarm = SwarmNetwork()
swarm.scale_up(num_agents=5) swarm.scale_up(num_agents=5)
``` ```
@ -136,6 +144,7 @@ The `SwarmNetwork` class has the following parameters:
```python ```python
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarm_net import SwarmNetwork
swarm = SwarmNetwork(agents=[agent1, agent2, agent3, agent4, agent5]) swarm = SwarmNetwork(agents=[agent1, agent2, agent3, agent4, agent5])
swarm.scale_down(num_agents=2) swarm.scale_down(num_agents=2)
``` ```
@ -146,6 +155,7 @@ The `SwarmNetwork` class has the following parameters:
```python ```python
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.swarm_net import SwarmNetwork from swarms.structs.swarm_net import SwarmNetwork
agent = Agent() agent = Agent()
swarm = SwarmNetwork(agents=[agent]) swarm = SwarmNetwork(agents=[agent])
swarm.create_apis_for_agents() swarm.create_apis_for_agents()

@ -8,8 +8,9 @@
```python ```python
# Example 1: Creating and executing a Task # Example 1: Creating and executing a Task
from swarms.structs import Task, Agent
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.structs import Agent, Task
agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False) agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False)
task = Task(agent=agent) task = Task(agent=agent)
task.execute("What's the weather in miami") task.execute("What's the weather in miami")

@ -20,11 +20,14 @@ The `TaskInput` class encapsulates the input parameters in a structured format.
#### Usage Example 1: Using TaskInput for Debugging #### Usage Example 1: Using TaskInput for Debugging
```python ```python
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from swarms.structs import TaskInput from swarms.structs import TaskInput
class DebugInput(TaskInput): class DebugInput(TaskInput):
debug: bool debug: bool
# Creating an instance of DebugInput # Creating an instance of DebugInput
debug_params = DebugInput(__root__={"debug": True}) debug_params = DebugInput(__root__={"debug": True})
@ -35,11 +38,14 @@ print(debug_params.debug) # Output: True
#### Usage Example 2: Using TaskInput for Task Modes #### Usage Example 2: Using TaskInput for Task Modes
```python ```python
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from swarms.structs import TaskInput from swarms.structs import TaskInput
class ModeInput(TaskInput): class ModeInput(TaskInput):
mode: str mode: str
# Creating an instance of ModeInput # Creating an instance of ModeInput
mode_params = ModeInput(__root__={"mode": "benchmarks"}) mode_params = ModeInput(__root__={"mode": "benchmarks"})
@ -50,12 +56,15 @@ print(mode_params.mode) # Output: benchmarks
#### Usage Example 3: Using TaskInput with Arbitrary Parameters #### Usage Example 3: Using TaskInput with Arbitrary Parameters
```python ```python
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from swarms.structs import TaskInput from swarms.structs import TaskInput
class ArbitraryInput(TaskInput): class ArbitraryInput(TaskInput):
message: str message: str
quantity: int quantity: int
# Creating an instance of ArbitraryInput # Creating an instance of ArbitraryInput
arbitrary_params = ArbitraryInput(__root__={"message": "Hello, world!", "quantity": 5}) arbitrary_params = ArbitraryInput(__root__={"message": "Hello, world!", "quantity": 5})

@ -7,13 +7,14 @@ The `swarms.structs` library is a key component of a multi-agent system's task m
## TaskQueueBase Class ## TaskQueueBase Class
```python ```python
from abc import ABC, abstractmethod
import threading import threading
from abc import ABC, abstractmethod
# Include any additional imports that are relevant to decorators and other classes such as Task and Agent if needed # Include any additional imports that are relevant to decorators and other classes such as Task and Agent if needed
# Definition of the synchronized_queue decorator (if necessary) # Definition of the synchronized_queue decorator (if necessary)
class TaskQueueBase(ABC): class TaskQueueBase(ABC):
def __init__(self): def __init__(self):
self.lock = threading.Lock() self.lock = threading.Lock()
@ -27,12 +28,12 @@ class TaskQueueBase(ABC):
@abstractmethod @abstractmethod
def get_task(self, agent: Agent) -> Task: def get_task(self, agent: Agent) -> Task:
pass pass
@synchronized_queue @synchronized_queue
@abstractmethod @abstractmethod
def complete_task(self, task_id: str): def complete_task(self, task_id: str):
pass pass
@synchronized_queue @synchronized_queue
@abstractmethod @abstractmethod
def reset_task(self, task_id: str): def reset_task(self, task_id: str):
@ -65,11 +66,12 @@ Below are three examples of how the `TaskQueueBase` class can be implemented and
```python ```python
# file: basic_queue.py # file: basic_queue.py
import threading
from swarms.structs import TaskQueueBase, Task, Agent
# Assume synchronized_queue decorator is defined elsewhere # Assume synchronized_queue decorator is defined elsewhere
from decorators import synchronized_queue from decorators import synchronized_queue
from swarms.structs import Agent, Task, TaskQueueBase
class BasicTaskQueue(TaskQueueBase): class BasicTaskQueue(TaskQueueBase):
def __init__(self): def __init__(self):
@ -95,6 +97,7 @@ class BasicTaskQueue(TaskQueueBase):
# Logic to reset the task # Logic to reset the task
pass pass
# Usage # Usage
queue = BasicTaskQueue() queue = BasicTaskQueue()
# Add task, assuming Task object is created # Add task, assuming Task object is created

@ -25,6 +25,7 @@ The `BaseTokenizer` class provides the structure for creating tokenizers. It inc
```python ```python
from swarms.tokenizers import BaseTokenizer from swarms.tokenizers import BaseTokenizer
class SimpleTokenizer(BaseTokenizer): class SimpleTokenizer(BaseTokenizer):
def count_tokens(self, text: Union[str, List[dict]]) -> int: def count_tokens(self, text: Union[str, List[dict]]) -> int:
@ -33,10 +34,11 @@ class SimpleTokenizer(BaseTokenizer):
return len(text.split()) return len(text.split())
elif isinstance(text, list): elif isinstance(text, list):
# Assume list of dictionaries with 'token' key # Assume list of dictionaries with 'token' key
return sum(len(item['token'].split()) for item in text) return sum(len(item["token"].split()) for item in text)
else: else:
raise TypeError("Unsupported type for text") raise TypeError("Unsupported type for text")
# Usage example # Usage example
tokenizer = SimpleTokenizer(max_tokens=100) tokenizer = SimpleTokenizer(max_tokens=100)
text = "This is an example sentence to tokenize." text = "This is an example sentence to tokenize."

@ -57,7 +57,7 @@ def count_tokens(self, text: str | list) -> int:
Args: Args:
text (str | list): The input text to tokenize. text (str | list): The input text to tokenize.
Returns: Returns:
int: The number of tokens in the text. int: The number of tokens in the text.
@ -82,13 +82,14 @@ First, the Cohere client must be initialized and passed in to create an instance
```python ```python
from cohere import Client from cohere import Client
from swarms.tokenizers import CohereTokenizer from swarms.tokenizers import CohereTokenizer
# Initialize Cohere client with your API key # Initialize Cohere client with your API key
cohere_client = Client('your-api-key') cohere_client = Client("your-api-key")
# Instantiate the tokenizer # Instantiate the tokenizer
tokenizer = CohereTokenizer(model='your-model-name', client=cohere_client) tokenizer = CohereTokenizer(model="your-model-name", client=cohere_client)
``` ```
### Count Tokens Example 1 ### Count Tokens Example 1

@ -76,7 +76,7 @@ Tokenizes given text when the object is called like a function.
from swarms.tokenizers import HuggingFaceTokenizer from swarms.tokenizers import HuggingFaceTokenizer
# Initialize the tokenizer with the path to your tokenizer model. # Initialize the tokenizer with the path to your tokenizer model.
tokenizer = HuggingFaceTokenizer('/path/to/your/model_dir') tokenizer = HuggingFaceTokenizer("/path/to/your/model_dir")
``` ```
### 2. Encoding Text ### 2. Encoding Text

@ -50,7 +50,7 @@ Given the extensive nature of this class, several examples are provided for each
```python ```python
from swarms.tokenizers import OpenAITokenizer from swarms.tokenizers import OpenAITokenizer
tokenizer = OpenAITokenizer(model='gpt-4') tokenizer = OpenAITokenizer(model="gpt-4")
``` ```
This example creates a new instance of `OpenAITokenizer` set to work with the GPT-4 model. This example creates a new instance of `OpenAITokenizer` set to work with the GPT-4 model.
@ -61,7 +61,7 @@ This example creates a new instance of `OpenAITokenizer` set to work with the GP
text = "Hello, this is an example text to tokenize." text = "Hello, this is an example text to tokenize."
# Initialize the tokenizer # Initialize the tokenizer
tokenizer = OpenAITokenizer(model='gpt-4') tokenizer = OpenAITokenizer(model="gpt-4")
# Count tokens # Count tokens
num_tokens = tokenizer.count_tokens(text) num_tokens = tokenizer.count_tokens(text)
@ -78,7 +78,7 @@ messages = [
{"name": "Bob", "message": "I'm good! Just working on some code."}, {"name": "Bob", "message": "I'm good! Just working on some code."},
] ]
tokenizer = OpenAITokenizer(model='gpt-3.5-turbo') tokenizer = OpenAITokenizer(model="gpt-3.5-turbo")
# Count tokens for a list of messages # Count tokens for a list of messages
num_tokens = tokenizer.len(messages, model="gpt-3.5-turbo-0613") num_tokens = tokenizer.len(messages, model="gpt-3.5-turbo-0613")

@ -14,7 +14,7 @@ In `SentencePieceTokenizer`, the tokenization process is language-agnostic and e
class SentencePieceTokenizer: class SentencePieceTokenizer:
""" """
Tokenizer of sentencepiece. Tokenizer of sentencepiece.
Args: Args:
model_file (str): the path of the tokenizer model model_file (str): the path of the tokenizer model
""" """
@ -45,7 +45,7 @@ Parameter | Type | Description
```python ```python
from swarms.tokenizers import SentencePieceTokenizer from swarms.tokenizers import SentencePieceTokenizer
tokenizer = SentencePieceTokenizer(model_file='your_model.model') tokenizer = SentencePieceTokenizer(model_file="your_model.model")
``` ```
### Properties: Vocabulary Information ### Properties: Vocabulary Information

@ -36,16 +36,18 @@ This function does not take any mandatory argument. However, it supports optiona
### Example 1: Basic Usage ### Example 1: Basic Usage
```python ```python
import torch
import logging import logging
import torch
from swarms.utils import check_device from swarms.utils import check_device
# Basic usage # Basic usage
device = check_device( device = check_device(
log_level=logging.INFO, log_level=logging.INFO,
memory_threshold=0.8, memory_threshold=0.8,
capability_threshold=3.5, capability_threshold=3.5,
return_type="list" return_type="list",
) )
``` ```
@ -53,24 +55,24 @@ device = check_device(
```python ```python
import torch import torch
import logging
from swarms.utils import check_device from swarms.utils import check_device
# When CUDA is not available # When CUDA is not available
device = check_device() device = check_device()
print(device) # If CUDA is not available it should return torch.device('cpu') print(device) # If CUDA is not available it should return torch.device('cpu')
``` ```
### Example 3: Multiple GPU Available ### Example 3: Multiple GPU Available
```python ```python
import torch import torch
import logging
from swarms.utils import check_device from swarms.utils import check_device
# When multiple GPUs are available # When multiple GPUs are available
device = check_device() device = check_device()
print(device) # Should return a list of available GPU devices print(device) # Should return a list of available GPU devices
``` ```
## Tips and Additional Information ## Tips and Additional Information

@ -57,14 +57,13 @@ Below are three examples of how you might use this function:
Extracting code blocks from a simple markdown string. Extracting code blocks from a simple markdown string.
```python ```python
import re
from swarms.utils import extract_code_from_markdown from swarms.utils import extract_code_from_markdown
markdown_string = '''# Example markdown_string = """# Example
This is an example of a code block: This is an example of a code block:
```python ```python
print("Hello World!") print("Hello World!")
``` ''' ``` """
print(extract_code_from_markdown(markdown_string)) print(extract_code_from_markdown(markdown_string))
``` ```
@ -75,13 +74,15 @@ Extracting code blocks from a markdown file.
```python ```python
import re import re
def extract_code_from_markdown(markdown_content: str) -> str: def extract_code_from_markdown(markdown_content: str) -> str:
pattern = r"```(?:\w+\n)?(.*?)```" pattern = r"```(?:\w+\n)?(.*?)```"
matches = re.findall(pattern, markdown_content, re.DOTALL) matches = re.findall(pattern, markdown_content, re.DOTALL)
return "\n".join(code.strip() for code in matches) return "\n".join(code.strip() for code in matches)
# Assume that 'example.md' contains multiple code blocks # Assume that 'example.md' contains multiple code blocks
with open('example.md', 'r') as file: with open("example.md") as file:
markdown_content = file.read() markdown_content = file.read()
print(extract_code_from_markdown(markdown_content)) print(extract_code_from_markdown(markdown_content))
``` ```
@ -93,17 +94,20 @@ Using the function in a pipeline to extract and then analyze code blocks.
```python ```python
import re import re
def extract_code_from_markdown(markdown_content: str) -> str: def extract_code_from_markdown(markdown_content: str) -> str:
pattern = r"```(?:\w+\n)?(.*?)```" pattern = r"```(?:\w+\n)?(.*?)```"
matches = re.findall(pattern, markdown_content, re.DOTALL) matches = re.findall(pattern, markdown_content, re.DOTALL)
return "\n".join(code.strip() for code in matches) return "\n".join(code.strip() for code in matches)
def analyze_code_blocks(code: str): def analyze_code_blocks(code: str):
# Add your analysis logic here # Add your analysis logic here
pass pass
# Assume that 'example.md' contains multiple code blocks # Assume that 'example.md' contains multiple code blocks
with open('example.md', 'r') as file: with open("example.md") as file:
markdown_content = file.read() markdown_content = file.read()
code_blocks = extract_code_from_markdown(markdown_content) code_blocks = extract_code_from_markdown(markdown_content)
analyze_code_blocks(code_blocks) analyze_code_blocks(code_blocks)

@ -40,15 +40,9 @@ The function `find_image_path` performs text parsing and pattern recognition to
```python ```python
def find_image_path(text): def find_image_path(text):
pattern = r"([A-Za-z]:\\[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))|(/[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))" pattern = r"([A-Za-z]:\\[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))|(/[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))"
matches = [ matches = [match.group() for match in re.finditer(pattern, text) if match.group()]
match.group()
for match in re.finditer(pattern, text)
if match.group()
]
matches += [match.replace("\\", "") for match in matches if match] matches += [match.replace("\\", "") for match in matches if match]
existing_paths = [ existing_paths = [match for match in matches if os.path.exists(match)]
match for match in matches if os.path.exists(match)
]
return max(existing_paths, key=len) if existing_paths else None return max(existing_paths, key=len) if existing_paths else None
``` ```
@ -75,7 +69,9 @@ Consider the case where the text has multiple image paths.
from swarms.utils import find_image_path from swarms.utils import find_image_path
text = "Here is an image path: /home/user/image1.png. Here is another one: C:\\Users\\User\\Documents\\image2.jpeg" text = "Here is an image path: /home/user/image1.png. Here is another one: C:\\Users\\User\\Documents\\image2.jpeg"
print(find_image_path(text)) # Outputs: the longest image path (depends on your file system and existing files) print(
find_image_path(text)
) # Outputs: the longest image path (depends on your file system and existing files)
``` ```
**Example 3:** **Example 3:**

@ -73,7 +73,7 @@ from swarms.utils import limit_tokens_from_string
string = "In case the method does not find the specified model, it will fall back to gpt2 model." string = "In case the method does not find the specified model, it will fall back to gpt2 model."
# model # model
model = "gpt-4" model = "gpt-4"
output = limit_tokens_from_string(string, model=model) output = limit_tokens_from_string(string, model=model)
``` ```

@ -50,12 +50,14 @@ This function can be used directly inside your code as shown in the following ex
Loading a model without specifying a device results in the function choosing the most optimal available device automatically. Loading a model without specifying a device results in the function choosing the most optimal available device automatically.
```python ```python
from swarms.utils import load_model_torch
import torch.nn as nn import torch.nn as nn
from swarms.utils import load_model_torch
# Assume `mymodel.pth` is in the current directory # Assume `mymodel.pth` is in the current directory
model_path = "./mymodel.pth" model_path = "./mymodel.pth"
# Define your model architecture if the model file only contains state dict # Define your model architecture if the model file only contains state dict
class MyModel(nn.Module): class MyModel(nn.Module):
def __init__(self): def __init__(self):
@ -65,6 +67,7 @@ class MyModel(nn.Module):
def forward(self, x): def forward(self, x):
return self.linear(x) return self.linear(x)
model = MyModel() model = MyModel()
# Load the model # Load the model

@ -15,6 +15,8 @@ Let's say you have two functions: `ground_truth` and `generated_func`, that have
@math_eval(ground_truth, generated_func) @math_eval(ground_truth, generated_func)
def test_func(x): def test_func(x):
return x return x
result1, result2 = test_func(5) result1, result2 = test_func(5)
print(f"Result from ground_truth: {result1}") print(f"Result from ground_truth: {result1}")
print(f"Result from generated_func: {result2}") print(f"Result from generated_func: {result2}")
@ -46,6 +48,7 @@ Here's how to implement the `math_eval` decorator:
import functools import functools
import logging import logging
def math_eval(func1, func2): def math_eval(func1, func2):
"""Math evaluation decorator.""" """Math evaluation decorator."""
@ -65,9 +68,7 @@ def math_eval(func1, func2):
result2 = None result2 = None
if result1 != result2: if result1 != result2:
logging.warning( logging.warning(f"Outputs do not match: {result1} != {result2}")
f"Outputs do not match: {result1} != {result2}"
)
return result1, result2 return result1, result2

@ -70,6 +70,7 @@ def text_generator(self, text: str):
# language generation implementation goes here # language generation implementation goes here
return tokens return tokens
# Instantiate the class and call the decorated function # Instantiate the class and call the decorated function
obj = ClassName() obj = ClassName()
obj.text_generator("Hello, world!") obj.text_generator("Hello, world!")

@ -54,7 +54,7 @@ Here is an example of how to use `pdf_to_text`:
```python ```python
# Define the path to the pdf file # Define the path to the pdf file
pdf_path = 'sample.pdf' pdf_path = "sample.pdf"
# Use the function to extract text # Use the function to extract text
text = pdf_to_text(pdf_path) text = pdf_to_text(pdf_path)

@ -56,7 +56,8 @@ Here are some examples of how you can use the `prep_torch_inference` method. Bef
```python ```python
import torch import torch
from swarms.utils import prep_torch_inference, load_model_torch
from swarms.utils import load_model_torch, prep_torch_inference
``` ```
### Example 1: Load a model for inference on CPU ### Example 1: Load a model for inference on CPU

@ -94,16 +94,16 @@ def print_class_parameters(cls, api_format: bool = False):
if api_format: if api_format:
param_dict = {} param_dict = {}
for name, param in params.items(): for name, param in params.items():
if name == "self": if name == "self":
continue continue
param_dict[name] = str(param.annotation) param_dict[name] = str(param.annotation)
return param_dict return param_dict
# Print the parameters # Print the parameters
for name, param in params.items(): for name, param in params.items():
if name == "self": if name == "self":
continue continue
print(f"Parameter: {name}, Type: {param.annotation}") print(f"Parameter: {name}, Type: {param.annotation}")
except Exception as e: except Exception as e:
print(f"An error occurred while inspecting the class: {e}") print(f"An error occurred while inspecting the class: {e}")

@ -37,43 +37,46 @@ class AbstractWorker:
Args: Args:
name (str): Name of the worker. name (str): Name of the worker.
""" """
@property @property
def name(self): def name(self):
"""Get the name of the worker.""" """Get the name of the worker."""
pass
def run(self, task: str): def run(self, task: str):
"""Run the worker agent once.""" """Run the worker agent once."""
pass
def send(self, message: Union[Dict, str], recipient, request_reply: Optional[bool] = None): def send(
self, message: Union[Dict, str], recipient, request_reply: Optional[bool] = None
):
"""Send a message to another worker.""" """Send a message to another worker."""
pass
async def a_send(self, message: Union[Dict, str], recipient, request_reply: Optional[bool] = None): async def a_send(
self, message: Union[Dict, str], recipient, request_reply: Optional[bool] = None
):
"""Send a message to another worker asynchronously.""" """Send a message to another worker asynchronously."""
pass
def receive(self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None): def receive(
self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None
):
"""Receive a message from another worker.""" """Receive a message from another worker."""
pass
async def a_receive(self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None): async def a_receive(
self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None
):
"""Receive a message from another worker asynchronously.""" """Receive a message from another worker asynchronously."""
pass
def reset(self): def reset(self):
"""Reset the worker.""" """Reset the worker."""
pass
def generate_reply(self, messages: Optional[List[Dict]] = None, sender=None, **kwargs) -> Union[str, Dict, None]: def generate_reply(
self, messages: Optional[List[Dict]] = None, sender=None, **kwargs
) -> Union[str, Dict, None]:
"""Generate a reply based on received messages.""" """Generate a reply based on received messages."""
pass
async def a_generate_reply(self, messages: Optional[List[Dict]] = None, sender=None, **kwargs) -> Union[str, Dict, None]: async def a_generate_reply(
self, messages: Optional[List[Dict]] = None, sender=None, **kwargs
) -> Union[str, Dict, None]:
"""Generate a reply based on received messages asynchronously.""" """Generate a reply based on received messages asynchronously."""
pass
``` ```
### 2.2 Attributes <a name="attributes"></a> ### 2.2 Attributes <a name="attributes"></a>
@ -121,6 +124,7 @@ class MyWorker(AbstractWorker):
def run(self, task: str): def run(self, task: str):
print(f"{self.name} is performing task: {task}") print(f"{self.name} is performing task: {task}")
worker = MyWorker("Worker1") worker = MyWorker("Worker1")
worker.run("Collect data") worker.run("Collect data")
``` ```
@ -155,6 +159,7 @@ The `a_send()` method is an asynchronous version of the `send()` method, allowin
```python ```python
import asyncio import asyncio
async def main(): async def main():
worker1 = AbstractWorker("Worker1") worker1 = AbstractWorker("Worker1")
worker2 = AbstractWorker("Worker2") worker2 = AbstractWorker("Worker2")
@ -162,6 +167,7 @@ async def main():
message = "Hello, Worker2!" message = "Hello, Worker2!"
await worker1.a_send(message, worker2) await worker1.a_send(message, worker2)
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
loop.run_until_complete(main()) loop.run_until_complete(main())
``` ```
@ -208,6 +214,7 @@ The `a_receive()` method is an asynchronous version of the `receive()` method, a
```python ```python
import asyncio import asyncio
async def main(): async def main():
worker1 = AbstractWorker("Worker1") worker1 = AbstractWorker("Worker1")
worker2 = AbstractWorker("Worker2") worker2 = AbstractWorker("Worker2")
@ -218,6 +225,7 @@ async def main():
await worker1.a_receive(message1, worker2) await worker1.a_receive(message1, worker2)
await worker1.a_receive(message2, worker2) await worker1.a_receive(message2, worker2)
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
loop.run_until_complete(main()) loop.run_until_complete(main())
``` ```
@ -233,6 +241,7 @@ class MyWorker(AbstractWorker):
def reset(self): def reset(self):
print(f"{self.name} has been reset.") print(f"{self.name} has been reset.")
worker = MyWorker("Worker1") worker = MyWorker("Worker1")
worker.reset() worker.reset()
``` ```
@ -253,13 +262,16 @@ The `generate_reply()` method is a placeholder for generating a reply based on r
```python ```python
class MyWorker(AbstractWorker): class MyWorker(AbstractWorker):
def generate_reply(self, messages: Optional[List[Dict]] = None, sender=None, **kwargs) -> Union[str, Dict, None]: def generate_reply(
self, messages: Optional[List[Dict]] = None, sender=None, **kwargs
) -> Union[str, Dict, None]:
if messages: if messages:
# Generate a reply based on received messages # Generate a reply based on received messages
return f"Received {len(messages)} messages from {sender.name}." return f"Received {len(messages)} messages from {sender.name}."
else: else:
return None return None
worker1 = MyWorker("Worker1") worker1 = MyWorker("Worker1")
worker2 = MyWorker("Worker2") worker2 = MyWorker("Worker2")
@ -284,6 +296,7 @@ The `a_generate_reply()` method is an asynchronous version of the `generate_repl
```python ```python
import asyncio import asyncio
async def main(): async def main():
worker1 = AbstractWorker("Worker1") worker1 = AbstractWorker("Worker1")
worker2 = AbstractWorker("Worker2") worker2 = AbstractWorker("Worker2")
@ -294,6 +307,7 @@ async def main():
if reply: if reply:
print(f"{worker2.name} generated a reply: {reply}") print(f"{worker2.name} generated a reply: {reply}")
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
loop.run_until_complete(main()) loop.run_until_complete(main())
``` ```
@ -312,12 +326,16 @@ Start by creating a custom worker class that inherits from `AbstractWorker`. Def
class CustomWorker(AbstractWorker): class CustomWorker(AbstractWorker):
def run(self, task: str): def run(self, task: str):
print(f"{self.name} is performing task: {task}") print(f"{self.name} is performing task: {task}")
def receive(self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None): def receive(
self, message: Union[Dict, str], sender, request_reply: Optional[bool] = None
):
if isinstance(message, str): if isinstance(message, str):
print(f"{self.name} received a text message from {sender.name}: {message}") print(f"{self.name} received a text message from {sender.name}: {message}")
elif isinstance(message, dict): elif isinstance(message, dict):
print(f"{self.name} received a dictionary message from {sender.name}: {message}") print(
f"{self.name} received a dictionary message from {sender.name}: {message}"
)
``` ```
### Step 2: Create Custom Worker Instances ### Step 2: Create Custom Worker Instances
@ -355,7 +373,9 @@ Customize the `generate_reply()` method to allow your workers to generate replie
```python ```python
class CustomWorker(AbstractWorker): class CustomWorker(AbstractWorker):
def generate_reply(self, messages: Optional[List[Dict]] = None, sender=None, **kwargs) -> Union[str, Dict, None]: def generate_reply(
self, messages: Optional[List[Dict]] = None, sender=None, **kwargs
) -> Union[str, Dict, None]:
if messages: if messages:
# Generate a reply based on received messages # Generate a reply based on received messages
return f"Received {len(messages)} messages from {sender.name}." return f"Received {len(messages)} messages from {sender.name}."

@ -49,11 +49,11 @@ Makes the Worker class callable. When an instance of the class is called, it wil
### **Example 1**: Basic usage with default parameters: ### **Example 1**: Basic usage with default parameters:
```python ```python
from swarms.models import OpenAIChat
from swarms import Worker from swarms import Worker
from swarms.models import OpenAIChat
llm = OpenAIChat( llm = OpenAIChat(
#enter your api key # enter your api key
openai_api_key="", openai_api_key="",
temperature=0.5, temperature=0.5,
) )
@ -195,17 +195,16 @@ response = node.run(task)
# Print the response # Print the response
print(response) print(response)
``` ```
### **Example 3**: Usage with human in the loop: ### **Example 3**: Usage with human in the loop:
```python ```python
from swarms.models import OpenAIChat
from swarms import Worker from swarms import Worker
from swarms.models import OpenAIChat
llm = OpenAIChat( llm = OpenAIChat(
#enter your api key # enter your api key
openai_api_key="", openai_api_key="",
temperature=0.5, temperature=0.5,
) )
@ -223,7 +222,6 @@ node = Worker(
task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."
response = node.run(task) response = node.run(task)
print(response) print(response)
``` ```
## **Mathematical Description**: ## **Mathematical Description**:

@ -1,6 +1,8 @@
# Description: This is an example of how to use the Agent class to run a multi-modal workflow # Description: This is an example of how to use the Agent class to run a multi-modal workflow
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.models.gpt4_vision_api import GPT4VisionAPI
from swarms.structs import Agent from swarms.structs import Agent

@ -1,7 +1,8 @@
import multion import multion
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.models.base_llm import AbstractLLM from swarms.models.base_llm import AbstractLLM
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.task import Task from swarms.structs.task import Task

@ -1,8 +1,10 @@
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
import swarms.prompts.security_team as stsp
from swarms.models import GPT4VisionAPI from swarms.models import GPT4VisionAPI
from swarms.structs import Agent from swarms.structs import Agent
import swarms.prompts.security_team as stsp
# Load environment variables and initialize the Vision API # Load environment variables and initialize the Vision API
load_dotenv() load_dotenv()

@ -3,12 +3,11 @@ import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms import ( from swarms import (
OpenAIChat,
Conversation, Conversation,
OpenAIChat,
detect_markdown, detect_markdown,
extract_code_from_markdown, extract_code_from_markdown,
) )
from swarms.tools.code_executor import CodeExecutor from swarms.tools.code_executor import CodeExecutor
conv = Conversation( conv = Conversation(

@ -1,5 +1,6 @@
# Import necessary libraries # Import necessary libraries
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent from swarms import ToolAgent
# Load the pre-trained model and tokenizer # Load the pre-trained model and tokenizer

@ -1,8 +1,10 @@
# Importing necessary modules # Importing necessary modules
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.agents.worker_agent import Worker
from swarms import OpenAIChat from swarms import OpenAIChat
from swarms.agents.worker_agent import Worker
# Loading environment variables from .env file # Loading environment variables from .env file
load_dotenv() load_dotenv()

@ -1,5 +1,7 @@
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models import Anthropic, OpenAIChat from swarms.models import Anthropic, OpenAIChat
from swarms.prompts.accountant_swarm_prompts import ( from swarms.prompts.accountant_swarm_prompts import (
DECISION_MAKING_PROMPT, DECISION_MAKING_PROMPT,

@ -1,9 +1,11 @@
import random
import os import os
import random
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.structs import Agent
from swarms.models.stable_diffusion import StableDiffusion from swarms.models.stable_diffusion import StableDiffusion
from swarms.structs import Agent
load_dotenv() load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY")

@ -1,5 +1,5 @@
from swarms.structs import Agent
from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.models.gpt4_vision_api import GPT4VisionAPI
from swarms.structs import Agent
llm = GPT4VisionAPI() llm = GPT4VisionAPI()

@ -1,4 +1,5 @@
import re import re
from swarms.models.openai_models import OpenAIChat from swarms.models.openai_models import OpenAIChat

@ -1,7 +1,9 @@
import os import os
from autotemp import AutoTemp
from termcolor import colored from termcolor import colored
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from autotemp import AutoTemp
from swarms.structs import SequentialWorkflow from swarms.structs import SequentialWorkflow

@ -6,7 +6,7 @@ This is a simple example of how to use the swarms library to create a swarm of d
The swarm is composed of two agents: The swarm is composed of two agents:
- Documentation agent: writes documentation for a given code snippet. - Documentation agent: writes documentation for a given code snippet.
- Tests agent: writes tests for a given code snippet. - Tests agent: writes tests for a given code snippet.
The swarm is initialized with a language model that is used by the agents to generate text. In this example, we use the OpenAI GPT-3 language model. The swarm is initialized with a language model that is used by the agents to generate text. In this example, we use the OpenAI GPT-3 language model.
Agent: Agent:
@ -14,6 +14,7 @@ Documentation agent -> Tests agent
""" """
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv

@ -1,8 +1,10 @@
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models import OpenAIChat
from swarms import Agent, SequentialWorkflow
import swarms.prompts.education as edu_prompts import swarms.prompts.education as edu_prompts
from swarms import Agent, SequentialWorkflow
from swarms.models import OpenAIChat
# Load environment variables # Load environment variables
load_dotenv() load_dotenv()

@ -1,5 +1,7 @@
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models.gemini import Gemini from swarms.models.gemini import Gemini
from swarms.prompts.react import react_prompt from swarms.prompts.react import react_prompt

@ -1,5 +1,7 @@
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models.gemini import Gemini from swarms.models.gemini import Gemini
from swarms.prompts.react import react_prompt from swarms.prompts.react import react_prompt

@ -1,12 +1,12 @@
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from termcolor import colored
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.prompts.code_interpreter import CODE_INTERPRETER from swarms.prompts.code_interpreter import CODE_INTERPRETER
from swarms.prompts.programming import DOCUMENTATION_SOP, TEST_SOP
from swarms.structs import Agent from swarms.structs import Agent
from swarms.prompts.programming import TEST_SOP, DOCUMENTATION_SOP
from termcolor import colored
load_dotenv() load_dotenv()

@ -1,9 +1,8 @@
from swarms.structs import Agent
from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.models.gpt4_vision_api import GPT4VisionAPI
from swarms.prompts.multi_modal_autonomous_instruction_prompt import ( from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1, MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1,
) )
from swarms.structs import Agent
llm = GPT4VisionAPI() llm = GPT4VisionAPI()

@ -1,8 +1,10 @@
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms import Agent
from langchain.llms import OpenAIChat from langchain.llms import OpenAIChat
from swarms import Agent
# Loading environment variables from .env file # Loading environment variables from .env file
load_dotenv() load_dotenv()

@ -1,16 +1,18 @@
from swarms.structs import Agent
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models import GPT4VisionAPI from swarms.models import GPT4VisionAPI
from swarms.prompts.logistics import ( from swarms.prompts.logistics import (
Efficiency_Agent_Prompt,
Health_Security_Agent_Prompt, Health_Security_Agent_Prompt,
Quality_Control_Agent_Prompt,
Productivity_Agent_Prompt, Productivity_Agent_Prompt,
Quality_Control_Agent_Prompt,
Safety_Agent_Prompt, Safety_Agent_Prompt,
Security_Agent_Prompt, Security_Agent_Prompt,
Sustainability_Agent_Prompt, Sustainability_Agent_Prompt,
Efficiency_Agent_Prompt,
) )
from swarms.structs import Agent
# from swarms.utils.banana_wrapper import banana # from swarms.utils.banana_wrapper import banana

@ -1,6 +1,5 @@
from swarms.structs import Agent
from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.models.gpt4_vision_api import GPT4VisionAPI
from swarms.structs import Agent
llm = GPT4VisionAPI() llm = GPT4VisionAPI()

@ -2,8 +2,8 @@
Multi Modal tree of thoughts that leverages the GPT-4 language model and the Multi Modal tree of thoughts that leverages the GPT-4 language model and the
Stable Diffusion model to generate a multimodal output and evaluate the Stable Diffusion model to generate a multimodal output and evaluate the
output based a metric from 0.0 to 1.0 and then run a search algorithm using DFS and BFS and return the best output. output based a metric from 0.0 to 1.0 and then run a search algorithm using DFS and BFS and return the best output.
task: Generate an image of a swarm of bees -> Image generator -> GPT4V evaluates the img from 0.0 to 1.0 -> DFS/BFS -> return the best output task: Generate an image of a swarm of bees -> Image generator -> GPT4V evaluates the img from 0.0 to 1.0 -> DFS/BFS -> return the best output
@ -16,10 +16,12 @@ task: Generate an image of a swarm of bees -> Image generator -> GPT4V evaluates
""" """
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from termcolor import colored
from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.models.gpt4_vision_api import GPT4VisionAPI
from swarms.models.stable_diffusion import StableDiffusion from swarms.models.stable_diffusion import StableDiffusion
from termcolor import colored
# Load the environment variables # Load the environment variables
load_dotenv() load_dotenv()

@ -1,7 +1,9 @@
import os
import base64 import base64
import os
import requests import requests
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.structs import Agent from swarms.structs import Agent

@ -1,5 +1,5 @@
import time
import os import os
import time
import pygame import pygame
import speech_recognition as sr import speech_recognition as sr

@ -1,14 +1,16 @@
from swarms.structs import Agent
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models import GPT4VisionAPI from swarms.models import GPT4VisionAPI
from swarms.prompts.personal_stylist import ( from swarms.prompts.personal_stylist import (
HAIRCUT_STYLIST_AGENT_PROMPT, ACCESSORIES_STYLIST_AGENT_PROMPT,
MAKEUP_STYLIST_AGENT_PROMPT,
BEARD_STYLIST_AGENT_PROMPT, BEARD_STYLIST_AGENT_PROMPT,
CLOTHING_STYLIST_AGENT_PROMPT, CLOTHING_STYLIST_AGENT_PROMPT,
ACCESSORIES_STYLIST_AGENT_PROMPT, HAIRCUT_STYLIST_AGENT_PROMPT,
MAKEUP_STYLIST_AGENT_PROMPT,
) )
from swarms.structs import Agent
# Load environment variables # Load environment variables
load_dotenv() load_dotenv()

@ -20,6 +20,7 @@ Distribution Agent:
- Optimize writer prompt to create longer and more enjoyeable blogs - Optimize writer prompt to create longer and more enjoyeable blogs
- Use Local Models like Storywriter - Use Local Models like Storywriter
""" """
import os import os
from termcolor import colored from termcolor import colored

@ -1,8 +1,11 @@
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
from termcolor import colored
import swarms.prompts.security_team as stsp
from swarms.models import GPT4VisionAPI from swarms.models import GPT4VisionAPI
from swarms.structs import Agent from swarms.structs import Agent
import swarms.prompts.security_team as stsp
# Load environment variables and initialize the Vision API # Load environment variables and initialize the Vision API
load_dotenv() load_dotenv()
@ -11,25 +14,21 @@ api_key = os.getenv("OPENAI_API_KEY")
llm = GPT4VisionAPI(openai_api_key=api_key) llm = GPT4VisionAPI(openai_api_key=api_key)
# Image for analysis # Image for analysis
img = "bank_robbery.jpg" # img = "IMG_1617.jpeg"
img = "ubase1.jpeg"
img2 = "ubase2.jpeg"
# Initialize agents with respective prompts for security tasks # Initialize agents with respective prompts for security tasks
crowd_analysis_agent = Agent( crowd_analysis_agent = Agent(
agent_name="Crowd Analysis Agent",
llm=llm, llm=llm,
sop=stsp.CROWD_ANALYSIS_AGENT_PROMPT, sop=stsp.CROWD_ANALYSIS_AGENT_PROMPT,
max_loops=1, max_loops=1,
multi_modal=True, multi_modal=True,
) )
# Facial Recognition Agent is currently not operational
# facial_recognition_agent = Agent(
# llm=llm,
# sop=stsp.FACIAL_RECOGNITION_AGENT_PROMPT,
# max_loops=1,
# multi_modal=True,
# )
weapon_detection_agent = Agent( weapon_detection_agent = Agent(
agent_name="Weapon Detection Agent",
llm=llm, llm=llm,
sop=stsp.WEAPON_DETECTION_AGENT_PROMPT, sop=stsp.WEAPON_DETECTION_AGENT_PROMPT,
max_loops=1, max_loops=1,
@ -37,6 +36,7 @@ weapon_detection_agent = Agent(
) )
surveillance_monitoring_agent = Agent( surveillance_monitoring_agent = Agent(
agent_name="Surveillance Monitoring Agent",
llm=llm, llm=llm,
sop=stsp.SURVEILLANCE_MONITORING_AGENT_PROMPT, sop=stsp.SURVEILLANCE_MONITORING_AGENT_PROMPT,
max_loops=1, max_loops=1,
@ -44,37 +44,27 @@ surveillance_monitoring_agent = Agent(
) )
emergency_response_coordinator = Agent( emergency_response_coordinator = Agent(
agent_name="Emergency Response Coordinator", # "Emergency Response Coordinator
llm=llm, llm=llm,
sop=stsp.EMERGENCY_RESPONSE_COORDINATOR_PROMPT, sop=stsp.EMERGENCY_RESPONSE_COORDINATOR_PROMPT,
max_loops=1, max_loops=1,
multi_modal=True, multi_modal=True,
) )
# Run agents with respective tasks on the same image colored("Security Team Analysis", "green")
crowd_analysis = crowd_analysis_agent.run( colored("Inspect the scene for any potential threats", "green")
"Analyze the crowd dynamics in the scene", img colored("Weapon Detection Analysis", "green")
)
# Facial Recognition Agent is currently not operational
# facial_recognition_analysis = facial_recognition_agent.run(
# "Identify any known individuals in the scene", img
# )
weapon_detection_analysis = weapon_detection_agent.run( weapon_detection_analysis = weapon_detection_agent.run(
"Inspect the scene for any potential threats", img "Inspect the scene for any potential threats", img
) )
colored("Surveillance Monitoring Analysis", "cyan")
surveillance_monitoring_analysis = surveillance_monitoring_agent.run( surveillance_monitoring_analysis = surveillance_monitoring_agent.run(
"Monitor the overall scene for unusual activities", img "Monitor the overall scene for unusual activities", img
) )
colored("Emergency Response Analysis", "red")
emergency_response_analysis = emergency_response_coordinator.run( emergency_response_analysis = emergency_response_coordinator.run(
"Develop a response plan based on the scene analysis", img "Develop a response plan based on the scene analysis", img
) )
# Process and output results for each task
# Example output (uncomment to use):
# print(f"Crowd Analysis: {crowd_analysis}")
# print(f"Weapon Detection Analysis: {weapon_detection_analysis}")
# print(f"Surveillance Monitoring Analysis: {surveillance_monitoring_analysis}")
# print(f"Emergency Response Analysis: {emergency_response_analysis}")

@ -1,4 +1,4 @@
from swarms import Agent, OpenAIChat, ChromaDB from swarms import Agent, ChromaDB, OpenAIChat
# Making an instance of the ChromaDB class # Making an instance of the ChromaDB class
memory = ChromaDB( memory = ChromaDB(

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save