Merge branch 'master' into fix-ci-2

pull/388/head
Wyatt Stanke 11 months ago committed by GitHub
commit 880a0ba67b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

323
Cargo.lock generated

@ -1,323 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crossbeam-deque"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
[[package]]
name = "either"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a"
[[package]]
name = "engine"
version = "0.1.0"
dependencies = [
"log",
"pyo3",
"rayon",
]
[[package]]
name = "indoc"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47741a8bc60fb26eb8d6e0238bbb26d8575ff623fdc97b1a2c00c050b9684ed8"
dependencies = [
"indoc-impl",
"proc-macro-hack",
]
[[package]]
name = "indoc-impl"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce046d161f000fffde5f432a0d034d0341dc152643b2598ed5bfce44c4f3a8f0"
dependencies = [
"proc-macro-hack",
"proc-macro2",
"quote",
"syn",
"unindent",
]
[[package]]
name = "instant"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
dependencies = [
"cfg-if",
]
[[package]]
name = "libc"
version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "lock_api"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
[[package]]
name = "once_cell"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "parking_lot"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
dependencies = [
"instant",
"lock_api",
"parking_lot_core",
]
[[package]]
name = "parking_lot_core"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
dependencies = [
"cfg-if",
"instant",
"libc",
"redox_syscall",
"smallvec",
"winapi",
]
[[package]]
name = "paste"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
dependencies = [
"paste-impl",
"proc-macro-hack",
]
[[package]]
name = "paste-impl"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
dependencies = [
"proc-macro-hack",
]
[[package]]
name = "proc-macro-hack"
version = "0.5.20+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
[[package]]
name = "proc-macro2"
version = "1.0.78"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae"
dependencies = [
"unicode-ident",
]
[[package]]
name = "pyo3"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d41d50a7271e08c7c8a54cd24af5d62f73ee3a6f6a314215281ebdec421d5752"
dependencies = [
"cfg-if",
"indoc",
"libc",
"parking_lot",
"paste",
"pyo3-build-config",
"pyo3-macros",
"unindent",
]
[[package]]
name = "pyo3-build-config"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "779239fc40b8e18bc8416d3a37d280ca9b9fb04bda54b98037bb6748595c2410"
dependencies = [
"once_cell",
]
[[package]]
name = "pyo3-macros"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00b247e8c664be87998d8628e86f282c25066165f1f8dda66100c48202fdb93a"
dependencies = [
"pyo3-macros-backend",
"quote",
"syn",
]
[[package]]
name = "pyo3-macros-backend"
version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a8c2812c412e00e641d99eeb79dd478317d981d938aa60325dfa7157b607095"
dependencies = [
"proc-macro2",
"pyo3-build-config",
"quote",
"syn",
]
[[package]]
name = "quote"
version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rayon"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
dependencies = [
"bitflags",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "smallvec"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "unindent"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1766d682d402817b5ac4490b3c3002d91dfa0d22812f341609f97b08757359c"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

@ -52,7 +52,6 @@
## **Introdution** ## **Introdution**
Swarms provides automation-as-a-service through swarms of autonomous agents that work together as a team. We enable our customers to build, deploy, and scale production-grade multi-agent applications to automate real-world tasks. Swarms provides automation-as-a-service through swarms of autonomous agents that work together as a team. We enable our customers to build, deploy, and scale production-grade multi-agent applications to automate real-world tasks.
### **Vision** ### **Vision**
Our vision for 2024 is to provide the most reliable infrastructure for deploying autonomous agents into the real world through the Swarm Cloud, our premier cloud platform for the scalable deployment of Multi-Modal Autonomous Agents. The platform focuses on delivering maximum value to users by only taking a small fee when utilizing the agents for the hosted compute power needed to host the agents. Our vision for 2024 is to provide the most reliable infrastructure for deploying autonomous agents into the real world through the Swarm Cloud, our premier cloud platform for the scalable deployment of Multi-Modal Autonomous Agents. The platform focuses on delivering maximum value to users by only taking a small fee when utilizing the agents for the hosted compute power needed to host the agents.
@ -69,16 +68,26 @@ The team has thousands of hours building and optimizing autonomous agents. Leade
Key milestones: get 80K framework users in January 2024, start contracts in target verticals, introduce commercial products in 2025 with various pricing models. Key milestones: get 80K framework users in January 2024, start contracts in target verticals, introduce commercial products in 2025 with various pricing models.
## Resources
### **Pre-Seed Pitch Deck** ### **Pre-Seed Pitch Deck**
- [Here is our pitch deck for our preseed round](https://www.figma.com/file/LlEMXZ48HTIG3S9VzdibaB/Swarm-Pitch-Deck?type=design&node-id=0%3A1& - [Here is our pitch deck for our preseed round](https://drive.google.com/file/d/1c76gK5UIdrfN4JOSpSlvVBEOpzR9emWc/view?usp=sharing)
mode=design&t=D3023hPOz27M9RGD-1)
### **The Swarm Corporation Memo** ### **The Swarm Corporation Memo**
To learn more about our mission, vision, plans for GTM, and much more please refer to the [Swarm Memo here](https://docs.google.com/document/d/1hS_nv_lFjCqLfnJBoF6ULY9roTbSgSuCkvXvSUSc7Lo/edit?usp=sharing) To learn more about our mission, vision, plans for GTM, and much more please refer to the [Swarm Memo here](https://docs.google.com/document/d/1hS_nv_lFjCqLfnJBoF6ULY9roTbSgSuCkvXvSUSc7Lo/edit?usp=sharing)
## **Financial Documents**
This section is dedicated entirely for corporate documents.
- [Cap Table](https://docs.google.com/spreadsheets/d/1wuTWbfhYaY5Xp6nSQ9R0wDtSpwSS9coHxsjKd0UbIDc/edit?usp=sharing)
- [Cashflow Prediction Sheet](https://docs.google.com/spreadsheets/d/1HQEHCIXXMHajXMl5sj8MEfcQtWfOnD7GjHtNiocpD60/edit?usp=sharing)
------
## **Product** ## **Product**
Swarms is an open source framework for developers in python to enable seamless, reliable, and scalable multi-agent orchestration through modularity, customization, and precision. Swarms is an open source framework for developers in python to enable seamless, reliable, and scalable multi-agent orchestration through modularity, customization, and precision.
@ -86,7 +95,7 @@ Swarms is an open source framework for developers in python to enable seamless,
### Product Growth Metrics ### Product Growth Metrics
| Name | Description | Link | | Name | Description | Link |
|----------------------------------|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| |--------------------------b--------|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------|
| Total Downloads of all time | Total number of downloads for the product over its entire lifespan. | [![Downloads](https://static.pepy.tech/badge/swarms)](https://pepy.tech/project/swarms) | | Total Downloads of all time | Total number of downloads for the product over its entire lifespan. | [![Downloads](https://static.pepy.tech/badge/swarms)](https://pepy.tech/project/swarms) |
| Downloads this month | Number of downloads for the product in the current month. | [![Downloads](https://static.pepy.tech/badge/swarms/month)](https://pepy.tech/project/swarms) | | Downloads this month | Number of downloads for the product in the current month. | [![Downloads](https://static.pepy.tech/badge/swarms/month)](https://pepy.tech/project/swarms) |
| Total Downloads this week | Total number of downloads for the product in the current week. | [![Downloads](https://static.pepy.tech/badge/swarms/week)](https://pepy.tech/project/swarms) | | Total Downloads this week | Total number of downloads for the product in the current week. | [![Downloads](https://static.pepy.tech/badge/swarms/week)](https://pepy.tech/project/swarms) |
@ -98,10 +107,4 @@ Swarms is an open source framework for developers in python to enable seamless,
| Github Traffic Metrics | Metrics related to traffic, such as views and clones on Github. | [Github Traffic Metrics](https://github.com/kyegomez/swarms/graphs/traffic) | | Github Traffic Metrics | Metrics related to traffic, such as views and clones on Github. | [Github Traffic Metrics](https://github.com/kyegomez/swarms/graphs/traffic) |
| Issues with the framework | Current open issues for the product on Github. | [![GitHub issues](https://img.shields.io/github/issues/kyegomez/swarms)](https://github.com/kyegomez/swarms/issues) | | Issues with the framework | Current open issues for the product on Github. | [![GitHub issues](https://img.shields.io/github/issues/kyegomez/swarms)](https://github.com/kyegomez/swarms/issues) |
-------
## **Corporate Documents**
This section is dedicated entirely for corporate documents.
- [Cap Table](https://docs.google.com/spreadsheets/d/1wuTWbfhYaY5Xp6nSQ9R0wDtSpwSS9coHxsjKd0UbIDc/edit?usp=sharing)

@ -3,7 +3,7 @@ from swarms import Agent, OpenAIChat
## Initialize the workflow ## Initialize the workflow
agent = Agent( agent = Agent(
llm=OpenAIChat(), llm=OpenAIChat(),
max_loops="auto", max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
streaming_on=True, streaming_on=True,
@ -11,6 +11,4 @@ agent = Agent(
) )
# Run the workflow on a task # Run the workflow on a task
agent( agent("Find a chick fil a equivalent in hayes valley")
"Find a chick fil a equivalent in san francisco in hayes valley"
)

@ -3,8 +3,7 @@ import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms import GPT4VisionAPI, Agent
from swarms.structs import Agent
# Load the environment variables # Load the environment variables
load_dotenv() load_dotenv()

@ -1,71 +1,9 @@
import multion from swarms.agents.multion_agent import MultiOnAgent
import timeit
from swarms.models.base_llm import AbstractLLM from swarms import Agent, ConcurrentWorkflow, Task
from swarms.structs.agent import Agent
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.task import Task
class MultiOnAgent(AbstractLLM):
"""
Represents a multi-on agent that performs browsing tasks.
Args:
max_steps (int): The maximum number of steps to perform during browsing.
starting_url (str): The starting URL for browsing.
Attributes:
max_steps (int): The maximum number of steps to perform during browsing.
starting_url (str): The starting URL for browsing.
"""
def __init__(
self,
multion_api_key: str,
max_steps: int = 4,
starting_url: str = "https://www.google.com",
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.multion_api_key = multion_api_key
self.max_steps = max_steps
self.starting_url = starting_url
multion.login(
use_api=True,
# multion_api_key=self.multion_api_key
*args,
**kwargs,
)
def run(self, task: str, *args, **kwargs):
"""
Runs a browsing task.
Args:
task (str): The task to perform during browsing.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
dict: The response from the browsing task.
"""
response = multion.browse(
{
"cmd": task,
"url": self.starting_url,
"maxSteps": self.max_steps,
},
*args,
**kwargs,
)
return response.result, response.status, response.lastUrl
# model # model
model = MultiOnAgent(multion_api_key="") model = MultiOnAgent(multion_api_key="api-key")
# out = model.run("search for a recipe") # out = model.run("search for a recipe")
agent = Agent( agent = Agent(
@ -76,19 +14,24 @@ agent = Agent(
system_prompt=None, system_prompt=None,
) )
# logger.info("[Agent][ID][MultiOnAgent][Initialized][Successfully")
# Task # Task
task = Task( task = Task(
agent=agent, agent=agent,
description=( description="Download https://www.coachcamel.com/",
"send an email to vyom on superhuman for a partnership with"
" multion"
),
) )
# Swarm # Swarm
# logger.info(
# f"Running concurrent workflow with task: {task.description}"
# )
# Measure execution time
start_time = timeit.default_timer()
workflow = ConcurrentWorkflow( workflow = ConcurrentWorkflow(
max_workers=1000, max_workers=20,
autosave=True, autosave=True,
print_results=True, print_results=True,
return_results=True, return_results=True,
@ -96,6 +39,9 @@ workflow = ConcurrentWorkflow(
# Add task to workflow # Add task to workflow
workflow.add(task) workflow.add(task)
# Run workflow
workflow.run() workflow.run()
# Calculate execution time
execution_time = timeit.default_timer() - start_time
# logger.info(f"Execution time: {execution_time} seconds")
print(f"Execution time: {execution_time} seconds")

@ -1,28 +0,0 @@
from swarms import HierarchicalSwarm
swarm = HierarchicalSwarm(
openai_api_key="key",
model_type="openai",
model_id="gpt-4",
use_vectorstore=False,
use_async=False,
human_in_the_loop=False,
logging_enabled=False,
)
# run the swarm with an objective
result = swarm.run("Design a new car")
# or huggingface
swarm = HierarchicalSwarm(
model_type="huggingface",
model_id="tiaueu/falcon",
use_vectorstore=True,
embedding_size=768,
use_async=False,
human_in_the_loop=True,
logging_enabled=False,
)
# Run the swarm with a particular objective
result = swarm.run("Write a sci-fi short story")

@ -1,11 +1,16 @@
from swarms.memory import chroma from swarms.memory import ChromaDB
chromadbcl = chroma.ChromaClient()
chromadbcl.add_vectors( # Initialize the memory
["This is a document", "BONSAIIIIIII", "the walking dead"] chroma = ChromaDB(
metric="cosine",
limit_tokens=1000,
verbose=True,
) )
results = chromadbcl.search_vectors("zombie", limit=1) # Add text
text = "This is a test"
chroma.add(text)
print(results) # Search for similar text
similar_text = chroma.query(text)

@ -1,11 +0,0 @@
from swarms import Orchestrator, Worker
# Instantiate the Orchestrator with 10 agents
orchestrator = Orchestrator(
Worker, agent_list=[Worker] * 10, task_queue=[]
)
# Agent 1 sends a message to Agent 2
orchestrator.chat(
sender_id=1, receiver_id=2, message="Hello, Agent 2!"
)

@ -1,5 +1,3 @@
# Example
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv

@ -1,6 +1,5 @@
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.swarms import DialogueSimulator from swarms import DialogueSimulator, Worker
from swarms.workers.worker import Worker
llm = OpenAIChat( llm = OpenAIChat(
model_name="gpt-4", openai_api_key="api-key", temperature=0.5 model_name="gpt-4", openai_api_key="api-key", temperature=0.5

@ -1,7 +1,14 @@
from swarms import swarm from swarms import Agent, OpenAIChat
# Use the function ## Initialize the workflow
api_key = "APIKEY" agent = Agent(
objective = "What is the capital of the UK?" llm=OpenAIChat(),
result = swarm(api_key, objective) max_loops=1,
print(result) # Prints: "The capital of the UK is London." autosave=True,
dashboard=False,
streaming_on=True,
verbose=True,
)
# Run the workflow on a task
agent("Find a chick fil a equivalent in hayes valley")

@ -3,7 +3,7 @@ import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models import Anthropic, Gemini, Mixtral, OpenAIChat from swarms.models import Anthropic, Gemini, Mixtral, OpenAIChat
from swarms.swarms import ModelParallelizer from swarms import ModelParallelizer
load_dotenv() load_dotenv()

@ -0,0 +1,86 @@
import os
from dotenv import load_dotenv
from swarms import Agent, OpenAIChat
from swarms.agents.multion_agent import MultiOnAgent
from swarms.memory.chroma_db import ChromaDB
from swarms.tools.tool import tool
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
# Load the environment variables
load_dotenv()
# Memory
chroma_db = ChromaDB()
# MultiOntool
@tool
def multion_tool(
task: str,
api_key: str = os.environ.get("MULTION_API_KEY"),
):
"""
Executes a task using the MultiOnAgent.
Args:
task (str): The task to be executed.
api_key (str, optional): The API key for the MultiOnAgent. Defaults to the value of the MULTION_API_KEY environment variable.
Returns:
The result of the task execution.
"""
multion = MultiOnAgent(multion_api_key=api_key)
return multion(task)
# Execute the interpreter tool
@tool
def execute_interpreter_tool(
code: str,
):
"""
Executes a single command using the interpreter.
Args:
task (str): The command to be executed.
Returns:
None
"""
out = SubprocessCodeInterpreter(debug_mode=True)
out = out.run(code)
return code
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(
temperature=0.5,
openai_api_key=api_key,
)
# Initialize the workflow
agent = Agent(
agent_name="Research Agent",
agent_description="An agent that performs research tasks.",
system_prompt="Perform a research task.",
llm=llm,
max_loops=1,
dashboard=True,
# tools=[multion_tool, execute_interpreter_tool],
verbose=True,
long_term_memory=chroma_db,
stopping_token="done",
)
# Run the workflow on a task
out = agent.run(
"Generate a 10,000 word blog on health and wellness, and say done"
" when you are done"
)
print(out)

@ -4,9 +4,9 @@ from swarms import Agent, MajorityVoting, OpenAIChat
llm = OpenAIChat() llm = OpenAIChat()
# Initialize the agents # Initialize the agents
agent1 = Agent(llm=llm, max_loops=1) agent1 = Agent(agent_name="worker-1", llm=llm, max_loops=1)
agent2 = Agent(llm=llm, max_loops=1) agent2 = Agent(agent_name="worker-2", llm=llm, max_loops=1)
agent3 = Agent(llm=llm, max_loops=1) agent3 = Agent(agent_name="worker3", llm=llm, max_loops=1)
# Initialize the majority voting # Initialize the majority voting

@ -0,0 +1,19 @@
from swarms.structs.agent import Agent
from swarms.structs.message_pool import MessagePool
from swarms import OpenAIChat
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
agent2 = Agent(llm=OpenAIChat(), agent_name="agent2")
agent3 = Agent(llm=OpenAIChat(), agent_name="agent3")
moderator = Agent(agent_name="moderator")
agents = [agent1, agent2, agent3]
message_pool = MessagePool(
agents=agents, moderator=moderator, turns=5
)
message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
message_pool.get_all_messages()
message_pool.get_visible_messages(agent=agent1, turn=1)
message_pool.get_visible_messages(agent=agent2, turn=1)

@ -1,19 +0,0 @@
from swarms import Orchestrator, Worker
node = Worker(
openai_api_key="",
ai_name="Optimus Prime",
)
# Instantiate the Orchestrator with 10 agents
orchestrator = Orchestrator(
node, agent_list=[node] * 10, task_queue=[]
)
# Agent 7 sends a message to Agent 9
orchestrator.chat(
sender_id=7,
receiver_id=9,
message="Can you help me with this task?",
)

@ -1,19 +0,0 @@
from ..swarms import HierarchicalSwarm
# Retrieve your API key from the environment or replace with your actual key
api_key = "sksdsds"
# Initialize HierarchicalSwarm with your API key
swarm = HierarchicalSwarm(openai_api_key=api_key)
# Define an objective
objective = """
Please develop and serve a simple community web service.
People can signup, login, post, comment.
Post and comment should be visible at once.
I want it to have neumorphism-style.
The ports you can use are 4500 and 6500.
"""
# Run HierarchicalSwarm
swarm.run(objective)

@ -1,16 +0,0 @@
from swarms import HierarchicalSwarm
# Retrieve your API key from the environment or replace with your actual key
api_key = ""
# Initialize HierarchicalSwarm with your API key
swarm = HierarchicalSwarm(api_key)
# Define an objective
objective = (
"Find 20 potential customers for a HierarchicalSwarm based AI"
" Agent automation infrastructure"
)
# Run HierarchicalSwarm
swarm.run(objective)

@ -1,19 +0,0 @@
from swarms import HierarchicalSwarm
# Retrieve your API key from the environment or replace with your actual key
api_key = "sksdsds"
# Initialize HierarchicalSwarm with your API key
swarm = HierarchicalSwarm(openai_api_key=api_key)
# Define an objective
objective = """
Please develop and serve a simple web TODO app.
The user can list all TODO items and add or delete each TODO item.
I want it to have neumorphism-style.
The ports you can use are 4500 and 6500.
"""
# Run HierarchicalSwarm
swarm.run(objective)

@ -1,19 +0,0 @@
from swarms.tools.tool import tool
from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs
@tool
def search_api(query: str) -> str:
"""Search API
Args:
query (str): _description_
Returns:
str: _description_
"""
print(f"Searching API for {query}")
tool_docs = scrape_tool_func_docs(search_api)
print(tool_docs)

@ -1,7 +0,0 @@
from swarms.models import OpenAIChat
from swarms.structs.workflow import Workflow
llm = OpenAIChat()
workflow = Workflow(llm)

@ -2,8 +2,8 @@ import os
from dotenv import load_dotenv from dotenv import load_dotenv
from swarms.models import OpenAIChat from swarms import OpenAIChat, Agent
from swarms.structs import Agent from swarms.tools.tool import tool
load_dotenv() load_dotenv()
@ -12,24 +12,25 @@ api_key = os.environ.get("OPENAI_API_KEY")
llm = OpenAIChat(api_key=api_key) llm = OpenAIChat(api_key=api_key)
# @tool
# def search_api(query: str) -> str:
# """Search API
# Args: @tool
# query (str): _description_ def search_api(query: str) -> str:
"""Search API
# Returns: Args:
# str: _description_ query (str): _description_
# """
# print(f"Searching API for {query}") Returns:
str: _description_
"""
print(f"Searching API for {query}")
## Initialize the workflow ## Initialize the workflow
agent = Agent( agent = Agent(
llm=llm, llm=llm,
max_loops=5, max_loops=5,
# tools=[search_api], tools=[search_api],
dashboard=True, dashboard=True,
) )

@ -1,22 +0,0 @@
from swarms.tools.tool import tool
from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs
# Define a tool by decorating a function with the tool decorator and providing a docstring
@tool(return_direct=True)
def search_api(query: str):
"""Search the web for the query
Args:
query (str): _description_
Returns:
_type_: _description_
"""
return f"Search results for {query}"
# Scrape the tool func docs to prepare for injection into the agent prompt
out = scrape_tool_func_docs(search_api)
print(out)

@ -1,10 +0,0 @@
from swarms import Workflow
from swarms.models import ChatOpenAI
workflow = Workflow(ChatOpenAI)
workflow.add("What's the weather in miami")
workflow.add("Provide details for {{ parent_output }}")
workflow.add("Summarize the above information: {{ parent_output}}")
workflow.run()

@ -6,7 +6,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "4.1.6" version = "4.1.9"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]
@ -51,6 +51,7 @@ httpx = "0.24.1"
tiktoken = "0.4.0" tiktoken = "0.4.0"
attrs = "22.2.0" attrs = "22.2.0"
ratelimit = "2.2.1" ratelimit = "2.2.1"
loguru = "*"
cohere = "4.24" cohere = "4.24"
huggingface-hub = "*" huggingface-hub = "*"
pydantic = "1.10.12" pydantic = "1.10.12"
@ -75,6 +76,7 @@ supervision = "*"
scikit-image = "*" scikit-image = "*"
pinecone-client = "*" pinecone-client = "*"
roboflow = "*" roboflow = "*"
multion = "*"

@ -16,6 +16,8 @@ sentencepiece==0.1.98
requests_mock requests_mock
pypdf==4.0.1 pypdf==4.0.1
accelerate==0.22.0 accelerate==0.22.0
loguru
multion
chromadb chromadb
tensorflow tensorflow
optimum optimum

@ -29,48 +29,51 @@ fn my_module(py: Python, m: &PyModule) -> PyResult<()> {
Ok(()) Ok(())
} }
/// The function returns `Ok(())` if all modules are processed successfully.
/// Note: This code assumes that the necessary dependencies (`pyo3`, `rayon`, `log`) are already imported and initialized.
///
/// # Arguments
///
/// * `modules` - A vector of `PythonModule` structs representing the Python modules and functions to execute.
/// * `num_threads` - The number of threads to use for parallel processing.
///
/// # Examples
///
/// ```
/// use pyo3::types::PyModule;
/// use pyo3::types::PyResult;
/// use pyo3::prelude::*;
///
/// struct PythonModule<'a> {
/// name: &'a str,
/// function: &'a str,
/// args: Vec<&'a str>,
/// }
///
/// #[pymodule]
/// fn multithreading_processor(modules: Vec<PythonModule>, num_threads: usize) -> Result<(), PythonError> {
/// // Function implementation
/// Ok(())
/// }
/// ```
///
/// # Errors
///
/// Returns a `PythonError` if an import error or a function call error occurs.
///
/// # Panics
///
/// This function does not panic.
///
/// # Safety
///
/// This function is safe to call, but it assumes that the necessary dependencies (`pyo3`, `rayon`, `log`) are already imported and initialized.
// Initialize Python interpreter
#[pyfunction] #[pyfunction]
fn process_python_modules(modules: Vec<PythonModule>, num_threads: usize) -> Result<(), PythonError> { fn process_python_modules(modules: Vec<PythonModule>, num_threads: usize) -> Result<(), PythonError> {
/// The function returns `Ok(())` if all modules are processed successfully.
/// Note: This code assumes that the necessary dependencies (`pyo3`, `rayon`, `log`) are already imported and initialized.
///
/// # Arguments
///
/// * `modules` - A vector of `PythonModule` structs representing the Python modules and functions to execute.
/// * `num_threads` - The number of threads to use for parallel processing.
///
/// # Examples
///
/// ```
/// use pyo3::types::PyModule;
/// use pyo3::types::PyResult;
/// use pyo3::prelude::*;
///
/// struct PythonModule<'a> {
/// name: &'a str,
/// function: &'a str,
/// args: Vec<&'a str>,
/// }
///
/// #[pymodule]
/// fn multithreading_processor(modules: Vec<PythonModule>, num_threads: usize) -> Result<(), PythonError> {
/// // Function implementation
/// Ok(())
/// }
/// ```
///
/// # Errors
///
/// Returns a `PythonError` if an import error or a function call error occurs.
///
/// # Panics
///
/// This function does not panic.
///
/// # Safety
///
/// This function is safe to call, but it assumes that the necessary dependencies (`pyo3`, `rayon`, `log`) are already imported and initialized.
// Initialize Python interpreter
let gil = Python::acquire_gil(); let gil = Python::acquire_gil();
let py = gil.python(); let py = gil.python();

@ -16,6 +16,7 @@ from swarms.agents.stopping_conditions import (
) )
from swarms.agents.tool_agent import ToolAgent from swarms.agents.tool_agent import ToolAgent
from swarms.agents.worker_agent import Worker from swarms.agents.worker_agent import Worker
from swarms.agents.multion_agent import MultiOnAgent
__all__ = [ __all__ = [
"AbstractAgent", "AbstractAgent",
@ -34,4 +35,5 @@ __all__ = [
"check_end", "check_end",
"Worker", "Worker",
"agent_wrapper", "agent_wrapper",
"MultiOnAgent",
] ]

@ -0,0 +1,69 @@
import os
import multion
from swarms.models.base_llm import AbstractLLM
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Muliton key
MULTION_API_KEY = os.getenv("MULTION_API_KEY")
class MultiOnAgent(AbstractLLM):
"""
Represents a multi-on agent that performs browsing tasks.
Args:
max_steps (int): The maximum number of steps to perform during browsing.
starting_url (str): The starting URL for browsing.
Attributes:
max_steps (int): The maximum number of steps to perform during browsing.
starting_url (str): The starting URL for browsing.
"""
def __init__(
self,
multion_api_key: str = MULTION_API_KEY,
max_steps: int = 4,
starting_url: str = "https://www.google.com",
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.multion_api_key = multion_api_key
self.max_steps = max_steps
self.starting_url = starting_url
def run(self, task: str, *args, **kwargs):
"""
Runs a browsing task.
Args:
task (str): The task to perform during browsing.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
dict: The response from the browsing task.
"""
multion.login(
use_api=True,
multion_api_key=str(self.multion_api_key),
*args,
**kwargs,
)
response = multion.browse(
{
"cmd": task,
"url": self.starting_url,
"maxSteps": self.max_steps,
},
*args,
**kwargs,
)
return response.result, response.status, response.lastUrl

@ -1,12 +1,12 @@
import os import os
from typing import Any, List from typing import List
import faiss import faiss
from langchain.docstore import InMemoryDocstore from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS from langchain.vectorstores import FAISS
from langchain_experimental.autonomous_agents import AutoGPT from langchain_experimental.autonomous_agents import AutoGPT
from swarms.tools.tool import BaseTool
from swarms.utils.decorators import error_decorator, timing_decorator from swarms.utils.decorators import error_decorator, timing_decorator
@ -48,7 +48,7 @@ class Worker:
temperature: float = 0.5, temperature: float = 0.5,
llm=None, llm=None,
openai_api_key: str = None, openai_api_key: str = None,
tools: List[Any] = None, tools: List[BaseTool] = None,
embedding_size: int = 1536, embedding_size: int = 1536,
search_kwargs: dict = {"k": 8}, search_kwargs: dict = {"k": 8},
verbose: bool = False, verbose: bool = False,
@ -165,7 +165,7 @@ class Worker:
# @log_decorator # @log_decorator
@error_decorator @error_decorator
@timing_decorator @timing_decorator
def run(self, task: str = None, img=None, *args, **kwargs): def run(self, task: str = None, *args, **kwargs):
""" """
Run the autonomous agent on a given task. Run the autonomous agent on a given task.
@ -195,7 +195,7 @@ class Worker:
- `results`: The results of the agent's processing. - `results`: The results of the agent's processing.
""" """
try: try:
results = self.run(task, *args, **kwargs) result = self.agent.run([task], *args, **kwargs)
return results return result
except Exception as error: except Exception as error:
raise RuntimeError(f"Error while running agent: {error}") raise RuntimeError(f"Error while running agent: {error}")

@ -1,77 +0,0 @@
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import IO
from pypdf import PdfReader
from swarms.utils.hash import str_to_hash
@dataclass
class TextArtifact:
text: str
@dataclass
class PDFLoader:
"""
A class for loading PDF files and extracting text artifacts.
Args:
tokenizer (str): The tokenizer to use for chunking the text.
max_tokens (int): The maximum number of tokens per chunk.
Methods:
load(source, password=None, *args, **kwargs):
Load a single PDF file and extract text artifacts.
load_collection(sources, password=None, *args, **kwargs):
Load a collection of PDF files and extract text artifacts.
Private Methods:
_load_pdf(stream, password=None):
Load a PDF file and extract text artifacts.
Attributes:
tokenizer (str): The tokenizer used for chunking the text.
max_tokens (int): The maximum number of tokens per chunk.
"""
tokenizer: str
max_tokens: int
def __post_init__(self):
self.chunker = PdfChunker(
tokenizer=self.tokenizer, max_tokens=self.max_tokens
)
def load(
self,
source: str | IO | Path,
password: str | None = None,
*args,
**kwargs,
) -> list[TextArtifact]:
return self._load_pdf(source, password)
def load_collection(
self,
sources: list[str | IO | Path],
password: str | None = None,
*args,
**kwargs,
) -> dict[str, list[TextArtifact]]:
return {
str_to_hash(str(s)): self._load_pdf(s, password)
for s in sources
}
def _load_pdf(
self, stream: str | IO | Path, password: str | None
) -> list[TextArtifact]:
reader = PdfReader(stream, strict=True, password=password)
return [
TextArtifact(text=p.extract_text()) for p in reader.pages
]

@ -1,16 +1,23 @@
############################################ LLMs
from swarms.models.anthropic import Anthropic # noqa: E402 from swarms.models.anthropic import Anthropic # noqa: E402
# 3############ Embedding models
from swarms.models.base_embedding_model import BaseEmbeddingModel from swarms.models.base_embedding_model import BaseEmbeddingModel
from swarms.models.base_llm import AbstractLLM # noqa: E402 from swarms.models.base_llm import AbstractLLM # noqa: E402
################# MultiModal Models
from swarms.models.base_multimodal_model import ( from swarms.models.base_multimodal_model import (
BaseMultiModalModel, BaseMultiModalModel,
) # noqa: E402 )
# noqa: E402
from swarms.models.biogpt import BioGPT # noqa: E402 from swarms.models.biogpt import BioGPT # noqa: E402
from swarms.models.clipq import CLIPQ # noqa: E402 from swarms.models.clipq import CLIPQ # noqa: E402
# from swarms.models.dalle3 import Dalle3
# from swarms.models.distilled_whisperx import DistilWhisperModel # noqa: E402
# from swarms.models.whisperx_model import WhisperX # noqa: E402
# from swarms.models.kosmos_two import Kosmos # noqa: E402
# from swarms.models.cog_agent import CogAgent # noqa: E402
## Function calling models
from swarms.models.fire_function import (
FireFunctionCaller,
)
from swarms.models.fuyu import Fuyu # noqa: E402 from swarms.models.fuyu import Fuyu # noqa: E402
from swarms.models.gemini import Gemini # noqa: E402 from swarms.models.gemini import Gemini # noqa: E402
from swarms.models.gigabind import Gigabind # noqa: E402 from swarms.models.gigabind import Gigabind # noqa: E402
@ -20,9 +27,9 @@ from swarms.models.idefics import Idefics # noqa: E402
from swarms.models.kosmos_two import Kosmos # noqa: E402 from swarms.models.kosmos_two import Kosmos # noqa: E402
from swarms.models.layoutlm_document_qa import ( from swarms.models.layoutlm_document_qa import (
LayoutLMDocumentQA, LayoutLMDocumentQA,
) # noqa: E402 )
# from swarms.models.vip_llava import VipLlavaMultiModal # noqa: E402 # noqa: E402
from swarms.models.llava import LavaMultiModal # noqa: E402 from swarms.models.llava import LavaMultiModal # noqa: E402
from swarms.models.mistral import Mistral # noqa: E402 from swarms.models.mistral import Mistral # noqa: E402
from swarms.models.mixtral import Mixtral # noqa: E402 from swarms.models.mixtral import Mixtral # noqa: E402
@ -32,18 +39,18 @@ from swarms.models.openai_models import (
AzureOpenAI, AzureOpenAI,
OpenAI, OpenAI,
OpenAIChat, OpenAIChat,
) # noqa: E402 )
# noqa: E402
from swarms.models.openai_tts import OpenAITTS # noqa: E402 from swarms.models.openai_tts import OpenAITTS # noqa: E402
from swarms.models.petals import Petals # noqa: E402 from swarms.models.petals import Petals # noqa: E402
from swarms.models.qwen import QwenVLMultiModal # noqa: E402 from swarms.models.qwen import QwenVLMultiModal # noqa: E402
from swarms.models.roboflow_model import RoboflowMultiModal from swarms.models.roboflow_model import RoboflowMultiModal
from swarms.models.sam_supervision import SegmentAnythingMarkGenerator from swarms.models.sam_supervision import SegmentAnythingMarkGenerator
##### Utils
from swarms.models.sampling_params import ( from swarms.models.sampling_params import (
SamplingParams, SamplingParams,
SamplingType, SamplingType,
) # noqa: E402 )
from swarms.models.timm import TimmModel # noqa: E402 from swarms.models.timm import TimmModel # noqa: E402
# from swarms.models.modelscope_pipeline import ModelScopePipeline # from swarms.models.modelscope_pipeline import ModelScopePipeline
@ -62,26 +69,19 @@ from swarms.models.types import ( # noqa: E402
) )
from swarms.models.ultralytics_model import ( from swarms.models.ultralytics_model import (
UltralyticsModel, UltralyticsModel,
) # noqa: E402 )
# noqa: E402
from swarms.models.vilt import Vilt # noqa: E402 from swarms.models.vilt import Vilt # noqa: E402
from swarms.models.wizard_storytelling import ( from swarms.models.wizard_storytelling import (
WizardLLMStoryTeller, WizardLLMStoryTeller,
) # noqa: E402 )
# noqa: E402
# from swarms.models.vllm import vLLM # noqa: E402 # from swarms.models.vllm import vLLM # noqa: E402
from swarms.models.zephyr import Zephyr # noqa: E402 from swarms.models.zephyr import Zephyr # noqa: E402
from swarms.models.zeroscope import ZeroscopeTTV # noqa: E402 from swarms.models.zeroscope import ZeroscopeTTV # noqa: E402
# from swarms.models.dalle3 import Dalle3
# from swarms.models.distilled_whisperx import DistilWhisperModel # noqa: E402
# from swarms.models.whisperx_model import WhisperX # noqa: E402
# from swarms.models.kosmos_two import Kosmos # noqa: E402
# from swarms.models.cog_agent import CogAgent # noqa: E402
################# Tokenizers
__all__ = [ __all__ = [
"AbstractLLM", "AbstractLLM",
"Anthropic", "Anthropic",
@ -100,7 +100,6 @@ __all__ = [
"HuggingfaceLLM", "HuggingfaceLLM",
"MPT7B", "MPT7B",
"WizardLLMStoryTeller", "WizardLLMStoryTeller",
# "GPT4Vision",
# "Dalle3", # "Dalle3",
# "DistilWhisperModel", # "DistilWhisperModel",
"GPT4VisionAPI", "GPT4VisionAPI",
@ -118,7 +117,6 @@ __all__ = [
"TogetherLLM", "TogetherLLM",
"TimmModel", "TimmModel",
"UltralyticsModel", "UltralyticsModel",
# "VipLlavaMultiModal",
"LavaMultiModal", "LavaMultiModal",
"QwenVLMultiModal", "QwenVLMultiModal",
"CLIPQ", "CLIPQ",
@ -129,4 +127,5 @@ __all__ = [
"SegmentAnythingMarkGenerator", "SegmentAnythingMarkGenerator",
"SamplingType", "SamplingType",
"SamplingParams", "SamplingParams",
"FireFunctionCaller",
] ]

@ -0,0 +1,528 @@
import base64
import os
import time
from io import BytesIO
from typing import List, Literal, Optional, Tuple, Union
import torch
from PIL import Image
from pydantic import BaseModel, Field
from transformers import (
AutoModelForCausalLM,
LlamaTokenizer,
TextIteratorStreamer,
)
from swarms.models.base_multimodal_model import BaseMultiModalModel
from swarms.utils.logger import logger
MODEL_PATH = "THUDM/cogvlm-chat-hf"
TOKENIZER_PATH = "lmsys/vicuna-7b-v1.5"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
QUANT_ENABLED = False
class ImageUrl(BaseModel):
url: str
class TextContent(BaseModel):
type: Literal["text"]
text: str
class ImageUrlContent(BaseModel):
type: Literal["image_url"]
image_url: ImageUrl
ContentItem = Union[TextContent, ImageUrlContent]
class ChatMessageInput(BaseModel):
role: Literal["user", "assistant", "system"]
content: Union[str, List[ContentItem]]
name: Optional[str] = None
class ChatMessageResponse(BaseModel):
role: Literal["assistant"]
content: str = None
name: Optional[str] = None
class DeltaMessage(BaseModel):
role: Optional[Literal["user", "assistant", "system"]] = None
content: Optional[str] = None
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatMessageInput]
temperature: Optional[float] = 0.8
top_p: Optional[float] = 0.8
max_tokens: Optional[int] = None
stream: Optional[bool] = False
# Additional parameters
repetition_penalty: Optional[float] = 1.0
class ChatCompletionResponseChoice(BaseModel):
index: int
message: ChatMessageResponse
class ChatCompletionResponseStreamChoice(BaseModel):
index: int
delta: DeltaMessage
class UsageInfo(BaseModel):
prompt_tokens: int = 0
total_tokens: int = 0
completion_tokens: Optional[int] = 0
class ChatCompletionResponse(BaseModel):
model: str
object: Literal["chat.completion", "chat.completion.chunk"]
choices: List[
Union[
ChatCompletionResponseChoice,
ChatCompletionResponseStreamChoice,
]
]
created: Optional[int] = Field(
default_factory=lambda: int(time.time())
)
usage: Optional[UsageInfo] = None
# async def create_chat_completion(request: ChatCompletionRequest):
# global model, tokenizer
# gen_params = dict(
# messages=request.messages,
# temperature=request.temperature,
# top_p=request.top_p,
# max_tokens=request.max_tokens or 1024,
# echo=False,
# stream=request.stream,
# )
# # if request.stream:
# # predict(request.model, gen_params)
# # response = generate_cogvlm(model, tokenizer, gen_params)
# usage = UsageInfo()
# message = ChatMessageResponse(
# role="assistant",
# content=response["text"],
# )
# logger.debug(f"==== message ====\n{message}")
# choice_data = ChatCompletionResponseChoice(
# index=0,
# message=message,
# )
# task_usage = UsageInfo.model_validate(response["usage"])
# for usage_key, usage_value in task_usage.model_dump().items():
# setattr(
# usage, usage_key, getattr(usage, usage_key) + usage_value
# )
# return ChatCompletionResponse(
# model=request.model,
# choices=[choice_data],
# object="chat.completion",
# usage=usage,
# )
class CogVLMMultiModal(BaseMultiModalModel):
"""
Initializes the CogVLM model.
Args:
model_name (str): The path or name of the pre-trained model.
tokenizer (str): The path or name of the tokenizer.
device (str): The device to run the model on.
quantize (bool): Whether to enable quantization.
torch_type (str): The torch data type to use.
temperature (float): The temperature for sampling.
top_p (float): The top-p value for sampling.
max_tokens (int): The maximum number of tokens to generate.
echo (bool): Whether to echo the input text.
stream (bool): Whether to stream the output.
repetition_penalty (float): The repetition penalty for sampling.
do_sample (bool): Whether to use sampling during generation.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Methods:
run: Generates a response using the CogVLM model.
generate_stream_cogvlm: Generates a stream of responses using the CogVLM model in inference mode.
process_history_and_images: Processes history messages to extract text, identify the last user query, and convert base64 encoded image URLs to PIL images.
Example:
>>> model = CogVLMMultiModal()
>>> response = model("Describe this image with meticlous details.", "https://example.com/image.jpg")
>>> print(response)
"""
def __init__(
self,
model_name: str = MODEL_PATH,
tokenizer: str = TOKENIZER_PATH,
device: str = DEVICE,
quantize: bool = QUANT_ENABLED,
torch_type: str = "float16",
temperature: float = 0.5,
top_p: float = 0.9,
max_tokens: int = 3500,
echo: bool = False,
stream: bool = False,
repetition_penalty: float = 1.0,
do_sample: bool = True,
*args,
**kwargs,
):
super().__init__()
self.model_name = model_name
self.device = device
self.tokenizer = tokenizer
self.device = device
self.quantize = quantize
self.torch_type = torch_type
self.temperature = temperature
self.top_p = top_p
self.max_tokens = max_tokens
self.echo = echo
self.stream = stream
self.repetition_penalty = repetition_penalty
self.do_sample = do_sample
if os.environ.get("QUANT_ENABLED"):
pass
else:
with torch.cuda.device(device):
__, total_bytes = torch.cuda.mem_get_info()
total_gb = total_bytes / (1 << 30)
if total_gb < 40:
pass
else:
pass
torch.cuda.empty_cache()
self.tokenizer = LlamaTokenizer.from_pretrained(
tokenizer, trust_remote_code=True
)
if (
torch.cuda.is_available()
and torch.cuda.get_device_capability()[0] >= 8
):
torch_type = torch.bfloat16
else:
torch_type = torch.float16
print(
f"========Use torch type as:{torch_type} with"
f" device:{device}========\n\n"
)
if "cuda" in device:
if QUANT_ENABLED:
self.model = AutoModelForCausalLM.from_pretrained(
model_name,
load_in_4bit=True,
trust_remote_code=True,
torch_dtype=torch_type,
low_cpu_mem_usage=True,
*args,
**kwargs,
).eval()
else:
self.model = (
AutoModelForCausalLM.from_pretrained(
model_name,
load_in_4bit=False,
trust_remote_code=True,
torch_dtype=torch_type,
low_cpu_mem_usage=True,
*args,
**kwargs,
)
.to(device)
.eval()
)
else:
self.model = (
AutoModelForCausalLM.from_pretrained(
model_name,
trust_remote_code=True,
*args,
**kwargs,
)
.float()
.to(device)
.eval()
)
def run(self, task: str, img: str, *args, **kwargs):
"""
Generates a response using the CogVLM model. It processes the chat history and image data, if any,
and then invokes the model to generate a response.
"""
messages = [task]
params = dict(
messages=messages,
temperature=self.temperature,
repitition_penalty=self.repetition_penalty,
top_p=self.top_p,
max_new_tokens=self.max_tokens,
)
for response in self.generate_stream_cogvlm(params):
pass
return response
@torch.inference_mode()
def generate_stream_cogvlm(
self,
params: dict,
):
"""
Generates a stream of responses using the CogVLM model in inference mode.
It's optimized to handle continuous input-output interactions with the model in a streaming manner.
"""
messages = params["messages"]
temperature = float(params.get("temperature", 1.0))
repetition_penalty = float(
params.get("repetition_penalty", 1.0)
)
top_p = float(params.get("top_p", 1.0))
max_new_tokens = int(params.get("max_tokens", 256))
query, history, image_list = self.process_history_and_images(
messages
)
logger.debug(f"==== request ====\n{query}")
input_by_model = self.model.build_conversation_input_ids(
self.tokenizer,
query=query,
history=history,
images=[image_list[-1]],
)
inputs = {
"input_ids": (
input_by_model["input_ids"]
.unsqueeze(0)
.to(self.device)
),
"token_type_ids": (
input_by_model["token_type_ids"]
.unsqueeze(0)
.to(self.device)
),
"attention_mask": (
input_by_model["attention_mask"]
.unsqueeze(0)
.to(self.device)
),
"images": [
[
input_by_model["images"][0]
.to(self.device)
.to(self.torch_type)
]
],
}
if (
"cross_images" in input_by_model
and input_by_model["cross_images"]
):
inputs["cross_images"] = [
[
input_by_model["cross_images"][0]
.to(self.device)
.to(self.torch_type)
]
]
input_echo_len = len(inputs["input_ids"][0])
streamer = TextIteratorStreamer(
tokenizer=self.tokenizer,
timeout=60.0,
skip_promptb=True,
skip_special_tokens=True,
)
gen_kwargs = {
"repetition_penalty": repetition_penalty,
"max_new_tokens": max_new_tokens,
"do_sample": True if temperature > 1e-5 else False,
"top_p": top_p if temperature > 1e-5 else 0,
"streamer": streamer,
}
if temperature > 1e-5:
gen_kwargs["temperature"] = temperature
total_len = 0
generated_text = ""
with torch.no_grad():
self.model.generate(**inputs, **gen_kwargs)
for next_text in streamer:
generated_text += next_text
yield {
"text": generated_text,
"usage": {
"prompt_tokens": input_echo_len,
"completion_tokens": (
total_len - input_echo_len
),
"total_tokens": total_len,
},
}
ret = {
"text": generated_text,
"usage": {
"prompt_tokens": input_echo_len,
"completion_tokens": total_len - input_echo_len,
"total_tokens": total_len,
},
}
yield ret
def process_history_and_images(
self,
messages: List[ChatMessageInput],
) -> Tuple[
Optional[str],
Optional[List[Tuple[str, str]]],
Optional[List[Image.Image]],
]:
"""
Process history messages to extract text, identify the last user query,
and convert base64 encoded image URLs to PIL images.
Args:
messages(List[ChatMessageInput]): List of ChatMessageInput objects.
return: A tuple of three elements:
- The last user query as a string.
- Text history formatted as a list of tuples for the model.
- List of PIL Image objects extracted from the messages.
"""
formatted_history = []
image_list = []
last_user_query = ""
for i, message in enumerate(messages):
role = message.role
content = message.content
# Extract text content
if isinstance(content, list): # text
text_content = " ".join(
item.text
for item in content
if isinstance(item, TextContent)
)
else:
text_content = content
# Extract image data
if isinstance(content, list): # image
for item in content:
if isinstance(item, ImageUrlContent):
image_url = item.image_url.url
if image_url.startswith(
"data:image/jpeg;base64,"
):
base64_encoded_image = image_url.split(
"data:image/jpeg;base64,"
)[1]
image_data = base64.b64decode(
base64_encoded_image
)
image = Image.open(
BytesIO(image_data)
).convert("RGB")
image_list.append(image)
# Format history
if role == "user":
if i == len(messages) - 1:
last_user_query = text_content
else:
formatted_history.append((text_content, ""))
elif role == "assistant":
if formatted_history:
if formatted_history[-1][1] != "":
assert False, (
"the last query is answered. answer"
f" again. {formatted_history[-1][0]},"
f" {formatted_history[-1][1]},"
f" {text_content}"
)
formatted_history[-1] = (
formatted_history[-1][0],
text_content,
)
else:
assert False, "assistant reply before user"
else:
assert False, f"unrecognized role: {role}"
return last_user_query, formatted_history, image_list
async def predict(self, params: dict):
"""
Handle streaming predictions. It continuously generates responses for a given input stream.
This is particularly useful for real-time, continuous interactions with the model.
"""
choice_data = ChatCompletionResponseStreamChoice(
index=0,
delta=DeltaMessage(role="assistant"),
finish_reason=None,
)
chunk = ChatCompletionResponse(
model=self.model_name,
choices=[choice_data],
object="chat.completion.chunk",
)
yield f"{chunk.model_dump_json(exclude_unset=True)}"
previous_text = ""
for new_response in self.generate_stream_cogvlm(params):
decoded_unicode = new_response["text"]
delta_text = decoded_unicode[len(previous_text) :]
previous_text = decoded_unicode
delta = DeltaMessage(
content=delta_text,
role="assistant",
)
choice_data = ChatCompletionResponseStreamChoice(
index=0,
delta=delta,
)
chunk = ChatCompletionResponse(
model=self.model_name,
choices=[choice_data],
object="chat.completion.chunk",
)
yield f"{chunk.model_dump_json(exclude_unset=True)}"
choice_data = ChatCompletionResponseStreamChoice(
index=0,
delta=DeltaMessage(),
)
chunk = ChatCompletionResponse(
model=self.model_name,
choices=[choice_data],
object="chat.completion.chunk",
)
yield f"{chunk.model_dump_json(exclude_unset=True)}"

@ -0,0 +1,87 @@
from transformers import AutoModelForCausalLM, AutoTokenizer
import json
from swarms.models.base_llm import AbstractLLM
from typing import Any
class FireFunctionCaller(AbstractLLM):
"""
A class that represents a caller for the FireFunction model.
Args:
model_name (str): The name of the model to be used.
device (str): The device to be used.
function_spec (Any): The specification of the function.
max_tokens (int): The maximum number of tokens.
system_prompt (str): The system prompt.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Methods:
run(self, task: str, *args, **kwargs) -> None: Run the function with the given task and arguments.
Examples:
>>> fire_function_caller = FireFunctionCaller()
>>> fire_function_caller.run("Add 2 and 3")
"""
def __init__(
self,
model_name: str = "fireworks-ai/firefunction-v1",
device: str = "cuda",
function_spec: Any = None,
max_tokens: int = 3000,
system_prompt: str = "You are a helpful assistant with access to functions. Use them if required.",
*args,
**kwargs,
):
super().__init__(model_name, device)
self.model_name = model_name
self.device = device
self.fucntion_spec = function_spec
self.max_tokens = max_tokens
self.system_prompt = system_prompt
self.model = AutoModelForCausalLM.from_pretrained(
model_name, device_map="auto", *args, **kwargs
)
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.functions = json.dumps(function_spec, indent=4)
def run(self, task: str, *args, **kwargs):
"""
Run the function with the given task and arguments.
Args:
task (str): The task to be performed.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
None
"""
messages = [
{"role": "functions", "content": self.functions},
{
"role": "system",
"content": self.system_prompt,
},
{
"role": "user",
"content": task,
},
]
model_inputs = self.tokenizer.apply_chat_template(
messages, return_tensors="pt"
).to(self.model.device)
generated_ids = self.model.generate(
model_inputs,
max_new_tokens=self.max_tokens,
*args,
**kwargs,
)
decoded = self.tokenizer.batch_decode(generated_ids)
print(decoded[0])

@ -96,9 +96,7 @@ class Nougat:
# Convert the matches to a readable format # Convert the matches to a readable format
cleaned_data = [ cleaned_data = [
"Date: {}, Amount: {}".format( f"Date: {date}, Amount: {amount.replace(',', '')}"
date, amount.replace(",", "")
)
for date, amount in matches for date, amount in matches
] ]

@ -0,0 +1,43 @@
from unittest.mock import MagicMock
from swarms.models.fire_function import FireFunctionCaller
def test_fire_function_caller_run(mocker):
# Create mock model and tokenizer
model = MagicMock()
tokenizer = MagicMock()
mocker.patch.object(FireFunctionCaller, "model", model)
mocker.patch.object(FireFunctionCaller, "tokenizer", tokenizer)
# Create mock task and arguments
task = "Add 2 and 3"
args = (2, 3)
kwargs = {}
# Create mock generated_ids and decoded output
generated_ids = [1, 2, 3]
decoded_output = "5"
model.generate.return_value = generated_ids
tokenizer.batch_decode.return_value = [decoded_output]
# Create FireFunctionCaller instance
fire_function_caller = FireFunctionCaller()
# Run the function
fire_function_caller.run(task, *args, **kwargs)
# Assert model.generate was called with the correct inputs
model.generate.assert_called_once_with(
tokenizer.apply_chat_template.return_value,
max_new_tokens=fire_function_caller.max_tokens,
*args,
**kwargs,
)
# Assert tokenizer.batch_decode was called with the correct inputs
tokenizer.batch_decode.assert_called_once_with(generated_ids)
# Assert the decoded output is printed
assert decoded_output in mocker.patch.object(
print, "call_args_list"
)

@ -3,12 +3,12 @@ import datetime
time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def worker_tools_sop_promp(name: str, memory: str): def worker_tools_sop_promp(name: str, memory: str, time=time):
out = """ out = """
You are {name}, You are {name},
Your decisions must always be made independently without seeking user assistance. Your decisions must always be made independently without seeking user assistance.
Play to your strengths as an LLM and pursue simple strategies with no legal complications. Play to your strengths as an LLM and pursue simple strategies with no legal complications.
If you have completed all your tasks, make sure to use the "finish" command. If you have completed all your tasks, make sure to use the 'finish' command.
GOALS: GOALS:
@ -19,11 +19,11 @@ def worker_tools_sop_promp(name: str, memory: str):
1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files. 1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.
2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember. 2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.
3. No user assistance 3. No user assistance
4. Exclusively use the commands listed in double quotes e.g. "command name" 4. Exclusively use the commands listed in double quotes e.g. 'command name'
Commands: Commands:
1. finish: use this to signal that you have finished all your objectives, args: "response": "final response to let people know you have finished your objectives" 1. finish: use this to signal that you have finished all your objectives, args: 'response': 'final response to let people know you have finished your objectives'
Resources: Resources:
@ -42,17 +42,17 @@ def worker_tools_sop_promp(name: str, memory: str):
You should only respond in JSON format as described below You should only respond in JSON format as described below
Response Format: Response Format:
{ {
"thoughts": { 'thoughts': {
"text": "thought", 'text': 'thoughts',
"reasoning": "reasoning", 'reasoning': 'reasoning',
"plan": "- short bulleted - list that conveys - long-term plan", 'plan': '- short bulleted - list that conveys - long-term plan',
"criticism": "constructive self-criticism", 'criticism': 'constructive self-criticism',
"speak": "thoughts summary to say to user" 'speak': 'thoughts summary to say to user'
}, },
"command": { 'command': {
"name": "command name", 'name': 'command name',
"args": { 'args': {
"arg name": "value" 'arg name': 'value'
} }
} }
} }
@ -62,6 +62,6 @@ def worker_tools_sop_promp(name: str, memory: str):
[{memory}] [{memory}]
Human: Determine which next command to use, and respond using the format specified above: Human: Determine which next command to use, and respond using the format specified above:
""".format(name=name, memory=memory, time=time) """.format(name=name, time=time, memory=memory)
return str(out) return str(out)

@ -3,10 +3,12 @@ import json
import logging import logging
import os import os
import random import random
import sys
import time import time
import uuid import uuid
from typing import Any, Callable, Dict, List, Optional, Tuple from typing import Any, Callable, Dict, List, Optional, Tuple
from loguru import logger
from termcolor import colored from termcolor import colored
from swarms.memory.base_vectordb import AbstractVectorDatabase from swarms.memory.base_vectordb import AbstractVectorDatabase
@ -22,7 +24,6 @@ from swarms.tools.exec_tool import execute_tool_by_name
from swarms.tools.tool import BaseTool from swarms.tools.tool import BaseTool
from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.data_to_text import data_to_text from swarms.utils.data_to_text import data_to_text
from swarms.utils.logger import logger
from swarms.utils.parse_code import extract_code_from_markdown from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.token_count_tiktoken import limit_tokens_from_string from swarms.utils.token_count_tiktoken import limit_tokens_from_string
@ -30,6 +31,7 @@ from swarms.utils.video_to_frames import (
save_frames_as_images, save_frames_as_images,
video_to_frames, video_to_frames,
) )
import yaml
# Utils # Utils
@ -51,10 +53,18 @@ def agent_id():
return str(uuid.uuid4()) return str(uuid.uuid4())
# Task ID generator
def task_id(): def task_id():
"""
Generate a unique task ID.
Returns:
str: A string representation of a UUID.
"""
return str(uuid.uuid4()) return str(uuid.uuid4())
# Step ID generator
def step_id(): def step_id():
return str(uuid.uuid1()) return str(uuid.uuid1())
@ -194,6 +204,12 @@ class Agent:
callback: Optional[Callable] = None, callback: Optional[Callable] = None,
metadata: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None,
callbacks: Optional[List[Callable]] = None, callbacks: Optional[List[Callable]] = None,
logger_handler: Any = sys.stderr,
search_algorithm: Optional[Callable] = None,
logs_to_filename: Optional[str] = None,
evaluator: Optional[Callable] = None,
output_json: bool = False,
stopping_func: Optional[Callable] = None,
*args, *args,
**kwargs, **kwargs,
): ):
@ -243,9 +259,16 @@ class Agent:
self.callback = callback self.callback = callback
self.metadata = metadata self.metadata = metadata
self.callbacks = callbacks self.callbacks = callbacks
self.logger_handler = logger_handler
self.search_algorithm = search_algorithm
self.logs_to_filename = logs_to_filename
self.evaluator = evaluator
self.output_json = output_json
self.stopping_func = stopping_func
# The max_loops will be set dynamically if the dynamic_loop # The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops: if self.dynamic_loops:
logger.info("Dynamic loops enabled")
self.max_loops = "auto" self.max_loops = "auto"
# If multimodal = yes then set the sop to the multimodal sop # If multimodal = yes then set the sop to the multimodal sop
@ -260,7 +283,9 @@ class Agent:
self.feedback = [] self.feedback = []
# Initialize the code executor # Initialize the code executor
self.code_executor = SubprocessCodeInterpreter() self.code_executor = SubprocessCodeInterpreter(
debug_mode=True,
)
# If the preset stopping token is enabled then set the stopping token to the preset stopping token # If the preset stopping token is enabled then set the stopping token to the preset stopping token
if preset_stopping_token: if preset_stopping_token:
@ -279,11 +304,12 @@ class Agent:
self.get_docs_from_doc_folders() self.get_docs_from_doc_folders()
# If tokenizer and context length exists then: # If tokenizer and context length exists then:
if self.tokenizer and self.context_length: # if self.tokenizer and self.context_length:
self.truncate_history() # self.truncate_history()
if verbose: # If verbose is enabled then set the logger level to info
logger.setLevel(logging.DEBUG) # if verbose:
# logger.setLevel(logging.INFO)
# If tools are provided then set the tool prompt by adding to sop # If tools are provided then set the tool prompt by adding to sop
if self.tools: if self.tools:
@ -308,6 +334,21 @@ class Agent:
# Step cache # Step cache
self.step_cache = [] self.step_cache = []
# Set the logger handler
if logger_handler:
logger.add(
f"{self.agent_name}.log",
level="INFO",
colorize=True,
format=(
"<green>{time}</green> <level>{message}</level>"
),
backtrace=True,
diagnose=True,
)
# logger.info("Creating Agent {}".format(self.agent_name))
def set_system_prompt(self, system_prompt: str): def set_system_prompt(self, system_prompt: str):
"""Set the system prompt""" """Set the system prompt"""
self.system_prompt = system_prompt self.system_prompt = system_prompt
@ -342,6 +383,7 @@ class Agent:
if hasattr(self.llm, "temperature"): if hasattr(self.llm, "temperature"):
# Randomly change the temperature attribute of self.llm object # Randomly change the temperature attribute of self.llm object
self.llm.temperature = random.uniform(0.0, 1.0) self.llm.temperature = random.uniform(0.0, 1.0)
logger.info(f"Temperature: {self.llm.temperature}")
else: else:
# Use a default temperature # Use a default temperature
self.llm.temperature = 0.7 self.llm.temperature = 0.7
@ -359,6 +401,7 @@ class Agent:
def add_task_to_memory(self, task: str): def add_task_to_memory(self, task: str):
"""Add the task to the memory""" """Add the task to the memory"""
try: try:
logger.info(f"Adding task to memory: {task}")
self.short_memory.add(f"{self.user_name}: {task}") self.short_memory.add(f"{self.user_name}: {task}")
except Exception as error: except Exception as error:
print( print(
@ -370,6 +413,7 @@ class Agent:
def add_message_to_memory(self, message: str): def add_message_to_memory(self, message: str):
"""Add the message to the memory""" """Add the message to the memory"""
try: try:
logger.info(f"Adding message to memory: {message}")
self.short_memory.add( self.short_memory.add(
role=self.agent_name, content=message role=self.agent_name, content=message
) )
@ -585,24 +629,57 @@ class Agent:
) )
print(response) print(response)
if self.output_json:
response = extract_code_from_markdown(
response
)
# Add the response to the history # Add the response to the history
history.append(response) history.append(response)
# Log each step # Log each step
step = Step( step = Step(
input=task, input=str(task),
task_id=task_id, task_id=str(task_id),
step_id=step_id, step_id=str(step_id),
output=response, output=str(response),
status="running",
) )
# Check to see if stopping token is in the output to stop the loop if self.evaluator:
evaluated_response = self.evaluator(
response
)
out = (
f"Response: {response}\nEvaluated"
f" Response: {evaluated_response}"
)
out = self.short_memory.add(
"Evaluator", out
)
# Stopping logic for agents
if self.stopping_token: if self.stopping_token:
# Check if the stopping token is in the response
if self.stopping_token in response:
break
if self.stopping_condition:
if self._check_stopping_condition( if self._check_stopping_condition(
response response
) or parse_done_token(response): ):
break
if self.parse_done_token:
if parse_done_token(response):
break break
if self.stopping_func is not None:
if self.stopping_func(response) is True:
break
# If the stopping condition is met then break
self.step_cache.append(step) self.step_cache.append(step)
logging.info(f"Step: {step}") logging.info(f"Step: {step}")
@ -679,20 +756,6 @@ class Agent:
""" """
self.run(task, img, *args, **kwargs) self.run(task, img, *args, **kwargs)
def _run(self, **kwargs: Any) -> str:
"""Run the agent on a task
Returns:
str: _description_
"""
try:
task = self.format_prompt(**kwargs)
response, history = self._generate(task, task)
logging.info(f"Message history: {history}")
return response
except Exception as error:
print(colored(f"Error running agent: {error}", "red"))
def agent_history_prompt( def agent_history_prompt(
self, self,
history: str = None, history: str = None,
@ -710,13 +773,12 @@ class Agent:
if self.sop: if self.sop:
system_prompt = self.system_prompt system_prompt = self.system_prompt
agent_history_prompt = f""" agent_history_prompt = f"""
SYSTEM_PROMPT: {system_prompt} role: system
{system_prompt}
Follow this standard operating procedure (SOP) to complete tasks: Follow this standard operating procedure (SOP) to complete tasks:
{self.sop} {self.sop}
-----------------
################ CHAT HISTORY ####################
{history} {history}
""" """
return agent_history_prompt return agent_history_prompt
@ -758,6 +820,7 @@ class Agent:
Returns: Returns:
_type_: _description_ _type_: _description_
""" """
logger.info(f"Adding memory: {message}")
return self.short_memory.add( return self.short_memory.add(
role=self.agent_name, content=message role=self.agent_name, content=message
) )
@ -770,10 +833,12 @@ class Agent:
tasks (List[str]): A list of tasks to run. tasks (List[str]): A list of tasks to run.
""" """
try: try:
logger.info(f"Running concurrent tasks: {tasks}")
task_coroutines = [ task_coroutines = [
self.run_async(task, **kwargs) for task in tasks self.run_async(task, **kwargs) for task in tasks
] ]
completed_tasks = await asyncio.gather(*task_coroutines) completed_tasks = await asyncio.gather(*task_coroutines)
logger.info(f"Completed tasks: {completed_tasks}")
return completed_tasks return completed_tasks
except Exception as error: except Exception as error:
print( print(
@ -789,6 +854,7 @@ class Agent:
def bulk_run(self, inputs: List[Dict[str, Any]]) -> List[str]: def bulk_run(self, inputs: List[Dict[str, Any]]) -> List[str]:
try: try:
"""Generate responses for multiple input sets.""" """Generate responses for multiple input sets."""
logger.info(f"Running bulk tasks: {inputs}")
return [self.run(**input_data) for input_data in inputs] return [self.run(**input_data) for input_data in inputs]
except Exception as error: except Exception as error:
print(colored(f"Error running bulk run: {error}", "red")) print(colored(f"Error running bulk run: {error}", "red"))
@ -881,6 +947,7 @@ class Agent:
""" """
try: try:
logger.info(f"Running a single step: {task}")
# Generate the response using lm # Generate the response using lm
response = self.llm(task, **kwargs) response = self.llm(task, **kwargs)
@ -960,6 +1027,7 @@ class Agent:
""" """
logger.info(f"Adding response filter: {filter_word}")
self.reponse_filters.append(filter_word) self.reponse_filters.append(filter_word)
def apply_reponse_filters(self, response: str) -> str: def apply_reponse_filters(self, response: str) -> str:
@ -967,6 +1035,9 @@ class Agent:
Apply the response filters to the response Apply the response filters to the response
""" """
logger.info(
f"Applying response filters to response: {response}"
)
for word in self.response_filters: for word in self.response_filters:
response = response.replace(word, "[FILTERED]") response = response.replace(word, "[FILTERED]")
return response return response
@ -978,11 +1049,13 @@ class Agent:
response = agent.filtered_run("Generate a report on finance") response = agent.filtered_run("Generate a report on finance")
print(response) print(response)
""" """
logger.info(f"Running filtered task: {task}")
raw_response = self.run(task) raw_response = self.run(task)
return self.apply_response_filters(raw_response) return self.apply_response_filters(raw_response)
def interactive_run(self, max_loops: int = 5) -> None: def interactive_run(self, max_loops: int = 5) -> None:
"""Interactive run mode""" """Interactive run mode"""
logger.info("Running in interactive mode")
response = input("Start the cnversation") response = input("Start the cnversation")
for i in range(max_loops): for i in range(max_loops):
@ -992,27 +1065,21 @@ class Agent:
# Get user input # Get user input
response = input("You: ") response = input("You: ")
def streamed_generation(self, prompt: str) -> str: def save_to_yaml(self, file_path: str) -> None:
""" """
Stream the generation of the response Save the agent to a YAML file
Args: Args:
prompt (str): The prompt to use file_path (str): The path to the YAML file
Example:
# Feature 4: Streamed generation
response = agent.streamed_generation("Generate a report on finance")
print(response)
""" """
tokens = list(prompt) try:
response = "" logger.info(f"Saving agent to YAML file: {file_path}")
for token in tokens: with open(file_path, "w") as f:
time.sleep(0.1) yaml.dump(self.__dict__, f)
response += token except Exception as error:
print(token, end="", flush=True) print(
print() colored(f"Error saving agent to YAML: {error}", "red")
return response )
def save_state(self, file_path: str) -> None: def save_state(self, file_path: str) -> None:
""" """
@ -1025,6 +1092,7 @@ class Agent:
>>> agent.save_state('saved_flow.json') >>> agent.save_state('saved_flow.json')
""" """
try: try:
logger.info(f"Saving agent state to: {file_path}")
state = { state = {
"agent_id": str(self.id), "agent_id": str(self.id),
"agent_name": self.agent_name, "agent_name": self.agent_name,
@ -1104,54 +1172,55 @@ class Agent:
>>> agent.run("Continue with the task") >>> agent.run("Continue with the task")
""" """
with open(file_path) as f: try:
state = json.load(f) with open(file_path) as f:
state = json.load(f)
# Restore other saved attributes
self.id = state.get("agent_id", self.id) # Restore other saved attributes
self.agent_name = state.get("agent_name", self.agent_name) self.id = state.get("agent_id", self.id)
self.agent_description = state.get( self.agent_name = state.get("agent_name", self.agent_name)
"agent_description", self.agent_description self.agent_description = state.get(
) "agent_description", self.agent_description
self.system_prompt = state.get( )
"system_prompt", self.system_prompt self.system_prompt = state.get(
) "system_prompt", self.system_prompt
self.sop = state.get("sop", self.sop) )
self.short_memory = state.get("short_memory", []) self.sop = state.get("sop", self.sop)
self.max_loops = state.get("max_loops", 5) self.short_memory = state.get("short_memory", [])
self.loop_interval = state.get("loop_interval", 1) self.max_loops = state.get("max_loops", 5)
self.retry_attempts = state.get("retry_attempts", 3) self.loop_interval = state.get("loop_interval", 1)
self.retry_interval = state.get("retry_interval", 1) self.retry_attempts = state.get("retry_attempts", 3)
self.interactive = state.get("interactive", False) self.retry_interval = state.get("retry_interval", 1)
self.interactive = state.get("interactive", False)
print(f"Agent state loaded from {file_path}")
print(f"Agent state loaded from {file_path}")
except Exception as error:
print(
colored(f"Error loading agent state: {error}", "red")
)
def retry_on_failure( def retry_on_failure(
self, function, retries: int = 3, retry_delay: int = 1 self,
function: callable,
retries: int = 3,
retry_delay: int = 1,
): ):
"""Retry wrapper for LLM calls.""" """Retry wrapper for LLM calls."""
attempt = 0 try:
while attempt < retries: logger.info(f"Retrying function: {function}")
try: attempt = 0
return function() while attempt < retries:
except Exception as error: try:
logging.error(f"Error generating response: {error}") return function()
attempt += 1 except Exception as error:
time.sleep(retry_delay) logging.error(
raise Exception("All retry attempts failed") f"Error generating response: {error}"
)
def generate_reply(self, history: str, **kwargs) -> str: attempt += 1
""" time.sleep(retry_delay)
Generate a response based on initial or task raise Exception("All retry attempts failed")
""" except Exception as error:
prompt = f""" print(colored(f"Error retrying function: {error}", "red"))
SYSTEM_PROMPT: {self.system_prompt}
History: {history}
"""
response = self.llm(prompt, **kwargs)
return {"role": self.agent_name, "content": response}
def update_system_prompt(self, system_prompt: str): def update_system_prompt(self, system_prompt: str):
"""Upddate the system message""" """Upddate the system message"""
@ -1181,9 +1250,13 @@ class Agent:
""" """
text -> parse_code by looking for code inside 6 backticks `````-> run_code text -> parse_code by looking for code inside 6 backticks `````-> run_code
""" """
parsed_code = extract_code_from_markdown(code) try:
run_code = self.code_executor.run(parsed_code) logger.info(f"Running code: {code}")
return run_code parsed_code = extract_code_from_markdown(code)
run_code = self.code_executor.run(parsed_code)
return run_code
except Exception as error:
logger.debug(f"Error running code: {error}")
def pdf_connector(self, pdf: str = None): def pdf_connector(self, pdf: str = None):
"""Transforms the pdf into text """Transforms the pdf into text
@ -1194,9 +1267,13 @@ class Agent:
Returns: Returns:
_type_: _description_ _type_: _description_
""" """
pdf = pdf or self.pdf_path try:
text = pdf_to_text(pdf) pdf = pdf or self.pdf_path
return text text = pdf_to_text(pdf)
return text
except Exception as error:
print(f"Error connecting to the pdf: {error}")
raise error
def pdf_chunker(self, text: str = None, num_limits: int = 1000): def pdf_chunker(self, text: str = None, num_limits: int = 1000):
"""Chunk the pdf into sentences """Chunk the pdf into sentences
@ -1220,12 +1297,15 @@ class Agent:
Returns: Returns:
_type_: _description_ _type_: _description_
""" """
for doc in docs: try:
data = data_to_text(doc) for doc in docs:
data = data_to_text(doc)
return self.short_memory.add( return self.short_memory.add(
role=self.user_name, content=data role=self.user_name, content=data
) )
except Exception as error:
print(colored(f"Error ingesting docs: {error}", "red"))
def ingest_pdf(self, pdf: str): def ingest_pdf(self, pdf: str):
"""Ingest the pdf into the memory """Ingest the pdf into the memory
@ -1236,22 +1316,37 @@ class Agent:
Returns: Returns:
_type_: _description_ _type_: _description_
""" """
text = pdf_to_text(pdf) try:
return self.short_memory.add( logger.info(f"Ingesting pdf: {pdf}")
role=self.user_name, content=text text = pdf_to_text(pdf)
) return self.short_memory.add(
role=self.user_name, content=text
)
except Exception as error:
print(colored(f"Error ingesting pdf: {error}", "red"))
def receieve_mesage(self, name: str, message: str): def receieve_mesage(self, name: str, message: str):
"""Receieve a message""" """Receieve a message"""
message = f"{name}: {message}" try:
return self.short_memory.add(role=name, content=message) message = f"{name}: {message}"
return self.short_memory.add(role=name, content=message)
except Exception as error:
print(colored(f"Error receiving message: {error}", "red"))
def send_agent_message( def send_agent_message(
self, agent_name: str, message: str, *args, **kwargs self, agent_name: str, message: str, *args, **kwargs
): ):
"""Send a message to the agent""" """Send a message to the agent"""
message = f"{agent_name}: {message}" try:
return self.run(message, *args, **kwargs) logger.info(f"Sending agent message: {message}")
message = f"{agent_name}: {message}"
return self.run(message, *args, **kwargs)
except Exception as error:
print(
colored(
f"Error sending agent message: {error}", "red"
)
)
def truncate_history(self): def truncate_history(self):
""" """
@ -1278,13 +1373,22 @@ class Agent:
def get_docs_from_doc_folders(self): def get_docs_from_doc_folders(self):
"""Get the docs from the files""" """Get the docs from the files"""
# Get the list of files then extract them and add them to the memory try:
files = os.listdir(self.docs_folder) logger.info("Getting docs from doc folders")
# Get the list of files then extract them and add them to the memory
files = os.listdir(self.docs_folder)
# Extract the text from the files # Extract the text from the files
for file in files: for file in files:
text = data_to_text(file) text = data_to_text(file)
return self.short_memory.add( return self.short_memory.add(
role=self.user_name, content=text role=self.user_name, content=text
) )
except Exception as error:
print(
colored(
f"Error getting docs from doc folders: {error}",
"red",
)
)

@ -43,7 +43,7 @@ class AsyncWorkflow:
loop: Optional[asyncio.AbstractEventLoop] = None loop: Optional[asyncio.AbstractEventLoop] = None
stopping_condition: Optional[Callable] = None stopping_condition: Optional[Callable] = None
async def add(self, task: Any, tasks: List[Any]): async def add(self, task: Any = None, tasks: List[Any] = None):
"""Add tasks to the workflow""" """Add tasks to the workflow"""
try: try:
if tasks: if tasks:

@ -13,7 +13,7 @@ class LongContextSwarmLeader:
- agents (List[Agent]): The agents in the swarm. - agents (List[Agent]): The agents in the swarm.
- prompt_template_json (str): The SOP template in JSON format. - prompt_template_json (str): The SOP template in JSON format.
- return_parsed (bool): Whether to return the parsed output. - return_parsed (bool): Whether to return the parsed output.
""" """
def __init__( def __init__(
@ -30,17 +30,16 @@ class LongContextSwarmLeader:
self.agents = agents self.agents = agents
self.prompt_template_json = prompt_template_json self.prompt_template_json = prompt_template_json
self.return_parsed = return_parsed self.return_parsed = return_parsed
# Create an instance of the Agent class # Create an instance of the Agent class
self.agent = Agent( self.agent = Agent(
llm=llm, llm=llm,
system_prompt=None, system_prompt=None,
sop=self.prompt_template_json, sop=self.prompt_template_json,
*args, *args,
**kwargs **kwargs,
) )
def prep_schema(self, task: str, *args, **kwargs): def prep_schema(self, task: str, *args, **kwargs):
""" """
Returns a formatted string containing the metadata of all agents in the swarm. Returns a formatted string containing the metadata of all agents in the swarm.
@ -71,16 +70,15 @@ class LongContextSwarmLeader:
""" """
for agent in self.agents: for agent in self.agents:
prompt += f"Member Name: {agent.ai_name}\nMember ID: {agent.id}\nMember Description: {agent.description}\n\n" prompt += (
f"Member Name: {agent.ai_name}\nMember ID:"
f" {agent.id}\nMember Description:"
f" {agent.description}\n\n"
)
return prompt return prompt
def prep_schema_second(self, task_description: str, task: str):
def prep_schema_second(
self,
task_description: str,
task: str
):
prompt = f""" prompt = f"""
You are the leader of a team of {len(self.agents)} You are the leader of a team of {len(self.agents)}
members. Your team will need to collaborate to members. Your team will need to collaborate to
@ -115,7 +113,6 @@ class LongContextSwarmLeader:
""" """
return prompt return prompt
def run(self, task: str, *args, **kwargs): def run(self, task: str, *args, **kwargs):
""" """
@ -131,12 +128,13 @@ class LongContextSwarmLeader:
""" """
task = self.prep_schema(task) task = self.prep_schema(task)
out = self.agent.run(task, *args, **kwargs) out = self.agent.run(task, *args, **kwargs)
if self.return_parsed: if self.return_parsed:
out = extract_code_from_markdown(out) out = extract_code_from_markdown(out)
return out return out
# class LongContextSwarm(BaseSwarm): # class LongContextSwarm(BaseSwarm):
# def __init__( # def __init__(
# self, # self,

@ -7,7 +7,21 @@ from typing import Any, List
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.utils.logger import logger from loguru import logger
import sys
# Configure loguru logger with advanced settings
logger.remove()
logger.add(
sys.stderr,
colorize=True,
format="<green>{time}</green> <level>{message}</level>",
backtrace=True,
diagnose=True,
enqueue=True,
catch=True,
)
def extract_last_python_code_block(text): def extract_last_python_code_block(text):
@ -157,11 +171,18 @@ class MajorityVoting:
time_enabled=True, *args, **kwargs time_enabled=True, *args, **kwargs
) )
# # Configure logging # If autosave is enabled, save the conversation to a file
# self.logging.basicConfig( if self.autosave:
# level=logging.INFO, self.conversation.save()
# format="%(asctime)s - %(levelname)s - %(message)s",
# ) # Log the agents
logger.info("Initializing majority voting system")
# Length of agents
logger.info(f"Number of agents: {len(self.agents)}")
logger.info(
"Agents:"
f" {', '.join(agent.agent_name for agent in self.agents)}"
)
def run(self, task: str, *args, **kwargs) -> List[Any]: def run(self, task: str, *args, **kwargs) -> List[Any]:
""" """
@ -176,10 +197,11 @@ class MajorityVoting:
List[Any]: The majority vote. List[Any]: The majority vote.
""" """
# Route to each agent # Route to each agent
if self.concurrent: if self.concurrent:
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor() as executor:
# Log the agents
logger.info("Running agents concurrently")
futures = [ futures = [
executor.submit(agent.run, task, *args) executor.submit(agent.run, task, *args)
for agent in self.agents for agent in self.agents
@ -191,6 +213,7 @@ class MajorityVoting:
) )
] ]
elif self.multithreaded: elif self.multithreaded:
logger.info("Running agents using multithreading")
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor() as executor:
results = [ results = [
executor.submit(agent.run, task, *args) executor.submit(agent.run, task, *args)
@ -198,6 +221,7 @@ class MajorityVoting:
] ]
results = [future.result() for future in results] results = [future.result() for future in results]
elif self.multiprocess: elif self.multiprocess:
logger.info("Running agents using multiprocessing")
with Pool() as pool: with Pool() as pool:
results = pool.starmap( results = pool.starmap(
Agent.run, Agent.run,
@ -218,6 +242,8 @@ class MajorityVoting:
# Add responses to conversation and log them # Add responses to conversation and log them
for agent, response in zip(self.agents, results): for agent, response in zip(self.agents, results):
logger.info(f"[{agent.agent_id}][{response}]")
response = ( response = (
response if isinstance(response, list) else [response] response if isinstance(response, list) else [response]
) )
@ -227,6 +253,9 @@ class MajorityVoting:
# Perform majority voting on the conversation # Perform majority voting on the conversation
majority_vote = majority_voting(self.conversation.responses) majority_vote = majority_voting(self.conversation.responses)
# Log the majority vote
logger.info(f"Majority vote: {majority_vote}")
# If an output parser is provided, parse the output # If an output parser is provided, parse the output
if self.output_parser: if self.output_parser:
majority_vote = self.output_parser( majority_vote = self.output_parser(

@ -0,0 +1,214 @@
import hashlib
from time import time_ns
from typing import Callable, List, Optional, Sequence, Union
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.loguru_logger import logger
def _hash(input: str):
"""
Hashes the input string using SHA256 algorithm.
Args:
input (str): The string to be hashed.
Returns:
str: The hexadecimal representation of the hash value.
"""
hex_dig = hashlib.sha256(input.encode("utf-8")).hexdigest()
return hex_dig
def msg_hash(
agent: Agent, content: str, turn: int, msg_type: str = "text"
):
"""
Generate a hash value for a message.
Args:
agent (Agent): The agent sending the message.
content (str): The content of the message.
turn (int): The turn number of the message.
msg_type (str, optional): The type of the message. Defaults to "text".
Returns:
int: The hash value of the message.
"""
time = time_ns()
return _hash(
f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:"
f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}"
)
class MessagePool(BaseSwarm):
"""
A class representing a message pool for agents in a swarm.
Attributes:
agents (Optional[Sequence[Agent]]): The list of agents in the swarm.
moderator (Optional[Agent]): The moderator agent.
turns (Optional[int]): The number of turns.
routing_function (Optional[Callable]): The routing function for message distribution.
show_names (Optional[bool]): Flag indicating whether to show agent names.
messages (List[Dict]): The list of messages in the pool.
Examples:
>>> from swarms.structs.agent import Agent
>>> from swarms.structs.message_pool import MessagePool
>>> agent1 = Agent(agent_name="agent1")
>>> agent2 = Agent(agent_name="agent2")
>>> agent3 = Agent(agent_name="agent3")
>>> moderator = Agent(agent_name="moderator")
>>> agents = [agent1, agent2, agent3]
>>> message_pool = MessagePool(agents=agents, moderator=moderator, turns=5)
>>> message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
>>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
>>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
>>> message_pool.get_all_messages()
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent1, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
>>> message_pool.get_visible_messages(agent=agent2, turn=1)
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
"""
def __init__(
self,
agents: Optional[Sequence[Agent]] = None,
moderator: Optional[Agent] = None,
turns: Optional[int] = 5,
routing_function: Optional[Callable] = None,
show_names: Optional[bool] = False,
autosave: Optional[bool] = False,
*args,
**kwargs,
):
super().__init__()
self.agent = agents
self.moderator = moderator
self.turns = turns
self.routing_function = routing_function
self.show_names = show_names
self.autosave = autosave
self.messages = []
logger.info("MessagePool initialized")
logger.info(f"Number of agents: {len(agents)}")
logger.info(
f"Agents: {[agent.agent_name for agent in agents]}"
)
logger.info(f"moderator: {moderator.agent_name} is available")
logger.info(f"Number of turns: {turns}")
def add(
self,
agent: Agent,
content: str,
turn: int,
visible_to: Union[str, List[str]] = "all",
logged: bool = True,
):
"""
Add a message to the pool.
Args:
agent (Agent): The agent sending the message.
content (str): The content of the message.
turn (int): The turn number.
visible_to (Union[str, List[str]], optional): The agents who can see the message. Defaults to "all".
logged (bool, optional): Flag indicating whether the message should be logged. Defaults to True.
"""
self.messages.append(
{
"agent": agent,
"content": content,
"turn": turn,
"visible_to": visible_to,
"logged": logged,
}
)
logger.info(f"Message added: {content}")
def reset(self):
"""
Reset the message pool.
"""
self.messages = []
logger.info("MessagePool reset")
def last_turn(self):
"""
Get the last turn number.
Returns:
int: The last turn number.
"""
if len(self.messages) == 0:
return 0
else:
return self.messages[-1]["turn"]
@property
def last_message(self):
"""
Get the last message in the pool.
Returns:
dict: The last message.
"""
if len(self.messages) == 0:
return None
else:
return self.messages[-1]
def get_all_messages(self):
"""
Get all messages in the pool.
Returns:
List[Dict]: The list of all messages.
"""
return self.messages
def get_visible_messages(self, agent: Agent, turn: int):
"""
Get the visible messages for a given agent and turn.
Args:
agent (Agent): The agent.
turn (int): The turn number.
Returns:
List[Dict]: The list of visible messages.
"""
# Get the messages before the current turn
prev_messages = [
message
for message in self.messages
if message["turn"] < turn
]
visible_messages = []
for message in prev_messages:
if (
message["visible_to"] == "all"
or agent.agent_name in message["visible_to"]
):
visible_messages.append(message)
return visible_messages
def query(self, query: str):
"""
Query a message from the messages list and then pass it to the moderator
"""
return [
(mod, content)
for mod, content in self.messages
if mod == self.moderator
]

@ -20,10 +20,12 @@ class SubprocessCodeInterpreter:
Example: Example:
""" """
def __init__(self): def __init__(
self.start_cmd = "" self,
start_cmd: str = "",
debug_mode: bool = False,
):
self.process = None self.process = None
self.debug_mode = False
self.output_queue = queue.Queue() self.output_queue = queue.Queue()
self.done = threading.Event() self.done = threading.Event()

@ -1,13 +1,9 @@
import logging import logging
import os import os
import sys
import warnings import warnings
def disable_logging(): def disable_logging():
log_file = open("errors.txt", "w")
sys.stderr = log_file
warnings.filterwarnings("ignore", category=UserWarning) warnings.filterwarnings("ignore", category=UserWarning)
# disable tensorflow warnings # disable tensorflow warnings
@ -29,6 +25,11 @@ def disable_logging():
"numexpr", "numexpr",
"git", "git",
"wandb.docker.auth", "wandb.docker.auth",
"langchain",
"distutils",
"urllib3",
"elasticsearch",
"packaging",
]: ]:
logger = logging.getLogger(logger_name) logger = logging.getLogger(logger_name)
logger.setLevel(logging.ERROR) logger.setLevel(logging.ERROR)

@ -0,0 +1,10 @@
from loguru import logger
logger = logger.add(
"MessagePool.log",
level="INFO",
colorize=True,
format="<green>{time}</green> <level>{message}</level>",
backtrace=True,
diagnose=True,
)

@ -108,7 +108,7 @@ class Code:
self.value = value self.value = value
def __str__(self): def __str__(self):
return "%d" % self.value return f"{int(self.value)}"
class Color(Code): class Color(Code):

@ -0,0 +1,57 @@
import pytest
from unittest.mock import patch, MagicMock
from swarms.agents.multion_agent import MultiOnAgent
@patch("swarms.agents.multion_agent.multion")
def test_multion_agent_run(mock_multion):
mock_response = MagicMock()
mock_response.result = "result"
mock_response.status = "status"
mock_response.lastUrl = "lastUrl"
mock_multion.browse.return_value = mock_response
agent = MultiOnAgent(
multion_api_key="test_key",
max_steps=5,
starting_url="https://www.example.com",
)
result, status, last_url = agent.run("task")
assert result == "result"
assert status == "status"
assert last_url == "lastUrl"
mock_multion.browse.assert_called_once_with(
{
"cmd": "task",
"url": "https://www.example.com",
"maxSteps": 5,
}
)
# Additional tests for different tasks
@pytest.mark.parametrize(
"task", ["task1", "task2", "task3", "task4", "task5"]
)
@patch("swarms.agents.multion_agent.multion")
def test_multion_agent_run_different_tasks(mock_multion, task):
mock_response = MagicMock()
mock_response.result = "result"
mock_response.status = "status"
mock_response.lastUrl = "lastUrl"
mock_multion.browse.return_value = mock_response
agent = MultiOnAgent(
multion_api_key="test_key",
max_steps=5,
starting_url="https://www.example.com",
)
result, status, last_url = agent.run(task)
assert result == "result"
assert status == "status"
assert last_url == "lastUrl"
mock_multion.browse.assert_called_once_with(
{"cmd": task, "url": "https://www.example.com", "maxSteps": 5}
)

@ -0,0 +1,45 @@
from unittest.mock import MagicMock
from swarms.models.fire_function import FireFunctionCaller
def test_fire_function_caller_run(mocker):
# Create mock model and tokenizer
model = MagicMock()
tokenizer = MagicMock()
mocker.patch.object(FireFunctionCaller, "model", model)
mocker.patch.object(FireFunctionCaller, "tokenizer", tokenizer)
# Create mock task and arguments
task = "Add 2 and 3"
args = (2, 3)
kwargs = {}
# Create mock generated_ids and decoded output
generated_ids = [1, 2, 3]
decoded_output = "5"
model.generate.return_value = generated_ids
tokenizer.batch_decode.return_value = [decoded_output]
# Create FireFunctionCaller instance
fire_function_caller = FireFunctionCaller()
# Run the function
fire_function_caller.run(task, *args, **kwargs)
# Assert model.generate was called with the correct inputs
model.generate.assert_called_once_with(
tokenizer.apply_chat_template.return_value,
max_new_tokens=fire_function_caller.max_tokens,
*args,
**kwargs,
)
# Assert tokenizer.batch_decode was called with the correct inputs
tokenizer.batch_decode.assert_called_once_with(generated_ids)
# Assert the decoded output is printed
assert decoded_output in mocker.patch.object(
print, "call_args_list"
)

@ -0,0 +1,117 @@
from swarms.structs.agent import Agent
from swarms.structs.message_pool import MessagePool
from swarms import OpenAIChat
def test_message_pool_initialization():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
agent2 = Agent(llm=OpenAIChat(), agent_name="agent1")
moderator = Agent(llm=OpenAIChat(), agent_name="agent1")
agents = [agent1, agent2]
message_pool = MessagePool(
agents=agents, moderator=moderator, turns=5
)
assert message_pool.agent == agents
assert message_pool.moderator == moderator
assert message_pool.turns == 5
assert message_pool.messages == []
def test_message_pool_add():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
message_pool = MessagePool(
agents=[agent1], moderator=agent1, turns=5
)
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
assert message_pool.messages == [
{
"agent": agent1,
"content": "Hello, world!",
"turn": 1,
"visible_to": "all",
"logged": True,
}
]
def test_message_pool_reset():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
message_pool = MessagePool(
agents=[agent1], moderator=agent1, turns=5
)
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
message_pool.reset()
assert message_pool.messages == []
def test_message_pool_last_turn():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
message_pool = MessagePool(
agents=[agent1], moderator=agent1, turns=5
)
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
assert message_pool.last_turn() == 1
def test_message_pool_last_message():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
message_pool = MessagePool(
agents=[agent1], moderator=agent1, turns=5
)
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
assert message_pool.last_message == {
"agent": agent1,
"content": "Hello, world!",
"turn": 1,
"visible_to": "all",
"logged": True,
}
def test_message_pool_get_all_messages():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
message_pool = MessagePool(
agents=[agent1], moderator=agent1, turns=5
)
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
assert message_pool.get_all_messages() == [
{
"agent": agent1,
"content": "Hello, world!",
"turn": 1,
"visible_to": "all",
"logged": True,
}
]
def test_message_pool_get_visible_messages():
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
agent2 = Agent(agent_name="agent2")
message_pool = MessagePool(
agents=[agent1, agent2], moderator=agent1, turns=5
)
message_pool.add(
agent=agent1,
content="Hello, agent2!",
turn=1,
visible_to=[agent2.agent_name],
)
assert message_pool.get_visible_messages(
agent=agent2, turn=2
) == [
{
"agent": agent1,
"content": "Hello, agent2!",
"turn": 1,
"visible_to": [agent2.agent_name],
"logged": True,
}
]
Loading…
Cancel
Save