commit
880a0ba67b
@ -1,323 +0,0 @@
|
|||||||
# This file is automatically @generated by Cargo.
|
|
||||||
# It is not intended for manual editing.
|
|
||||||
version = 3
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "autocfg"
|
|
||||||
version = "1.1.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "bitflags"
|
|
||||||
version = "1.3.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "cfg-if"
|
|
||||||
version = "1.0.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "crossbeam-deque"
|
|
||||||
version = "0.8.5"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
|
|
||||||
dependencies = [
|
|
||||||
"crossbeam-epoch",
|
|
||||||
"crossbeam-utils",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "crossbeam-epoch"
|
|
||||||
version = "0.9.18"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
|
|
||||||
dependencies = [
|
|
||||||
"crossbeam-utils",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "crossbeam-utils"
|
|
||||||
version = "0.8.19"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "either"
|
|
||||||
version = "1.10.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "engine"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"log",
|
|
||||||
"pyo3",
|
|
||||||
"rayon",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "indoc"
|
|
||||||
version = "0.3.6"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "47741a8bc60fb26eb8d6e0238bbb26d8575ff623fdc97b1a2c00c050b9684ed8"
|
|
||||||
dependencies = [
|
|
||||||
"indoc-impl",
|
|
||||||
"proc-macro-hack",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "indoc-impl"
|
|
||||||
version = "0.3.6"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ce046d161f000fffde5f432a0d034d0341dc152643b2598ed5bfce44c4f3a8f0"
|
|
||||||
dependencies = [
|
|
||||||
"proc-macro-hack",
|
|
||||||
"proc-macro2",
|
|
||||||
"quote",
|
|
||||||
"syn",
|
|
||||||
"unindent",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "instant"
|
|
||||||
version = "0.1.12"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "libc"
|
|
||||||
version = "0.2.153"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "lock_api"
|
|
||||||
version = "0.4.11"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"
|
|
||||||
dependencies = [
|
|
||||||
"autocfg",
|
|
||||||
"scopeguard",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "log"
|
|
||||||
version = "0.4.20"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "once_cell"
|
|
||||||
version = "1.19.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "parking_lot"
|
|
||||||
version = "0.11.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
|
|
||||||
dependencies = [
|
|
||||||
"instant",
|
|
||||||
"lock_api",
|
|
||||||
"parking_lot_core",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "parking_lot_core"
|
|
||||||
version = "0.8.6"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if",
|
|
||||||
"instant",
|
|
||||||
"libc",
|
|
||||||
"redox_syscall",
|
|
||||||
"smallvec",
|
|
||||||
"winapi",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "paste"
|
|
||||||
version = "0.1.18"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
|
|
||||||
dependencies = [
|
|
||||||
"paste-impl",
|
|
||||||
"proc-macro-hack",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "paste-impl"
|
|
||||||
version = "0.1.18"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
|
|
||||||
dependencies = [
|
|
||||||
"proc-macro-hack",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "proc-macro-hack"
|
|
||||||
version = "0.5.20+deprecated"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "proc-macro2"
|
|
||||||
version = "1.0.78"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae"
|
|
||||||
dependencies = [
|
|
||||||
"unicode-ident",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "pyo3"
|
|
||||||
version = "0.15.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "d41d50a7271e08c7c8a54cd24af5d62f73ee3a6f6a314215281ebdec421d5752"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if",
|
|
||||||
"indoc",
|
|
||||||
"libc",
|
|
||||||
"parking_lot",
|
|
||||||
"paste",
|
|
||||||
"pyo3-build-config",
|
|
||||||
"pyo3-macros",
|
|
||||||
"unindent",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "pyo3-build-config"
|
|
||||||
version = "0.15.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "779239fc40b8e18bc8416d3a37d280ca9b9fb04bda54b98037bb6748595c2410"
|
|
||||||
dependencies = [
|
|
||||||
"once_cell",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "pyo3-macros"
|
|
||||||
version = "0.15.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "00b247e8c664be87998d8628e86f282c25066165f1f8dda66100c48202fdb93a"
|
|
||||||
dependencies = [
|
|
||||||
"pyo3-macros-backend",
|
|
||||||
"quote",
|
|
||||||
"syn",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "pyo3-macros-backend"
|
|
||||||
version = "0.15.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "5a8c2812c412e00e641d99eeb79dd478317d981d938aa60325dfa7157b607095"
|
|
||||||
dependencies = [
|
|
||||||
"proc-macro2",
|
|
||||||
"pyo3-build-config",
|
|
||||||
"quote",
|
|
||||||
"syn",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "quote"
|
|
||||||
version = "1.0.35"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
|
|
||||||
dependencies = [
|
|
||||||
"proc-macro2",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "rayon"
|
|
||||||
version = "1.8.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051"
|
|
||||||
dependencies = [
|
|
||||||
"either",
|
|
||||||
"rayon-core",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "rayon-core"
|
|
||||||
version = "1.12.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
|
|
||||||
dependencies = [
|
|
||||||
"crossbeam-deque",
|
|
||||||
"crossbeam-utils",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "redox_syscall"
|
|
||||||
version = "0.2.16"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
|
|
||||||
dependencies = [
|
|
||||||
"bitflags",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "scopeguard"
|
|
||||||
version = "1.2.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "smallvec"
|
|
||||||
version = "1.13.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "syn"
|
|
||||||
version = "1.0.109"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
|
|
||||||
dependencies = [
|
|
||||||
"proc-macro2",
|
|
||||||
"quote",
|
|
||||||
"unicode-ident",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "unicode-ident"
|
|
||||||
version = "1.0.12"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "unindent"
|
|
||||||
version = "0.1.11"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "e1766d682d402817b5ac4490b3c3002d91dfa0d22812f341609f97b08757359c"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "winapi"
|
|
||||||
version = "0.3.9"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
|
||||||
dependencies = [
|
|
||||||
"winapi-i686-pc-windows-gnu",
|
|
||||||
"winapi-x86_64-pc-windows-gnu",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "winapi-i686-pc-windows-gnu"
|
|
||||||
version = "0.4.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "winapi-x86_64-pc-windows-gnu"
|
|
||||||
version = "0.4.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
|
@ -1,28 +0,0 @@
|
|||||||
from swarms import HierarchicalSwarm
|
|
||||||
|
|
||||||
swarm = HierarchicalSwarm(
|
|
||||||
openai_api_key="key",
|
|
||||||
model_type="openai",
|
|
||||||
model_id="gpt-4",
|
|
||||||
use_vectorstore=False,
|
|
||||||
use_async=False,
|
|
||||||
human_in_the_loop=False,
|
|
||||||
logging_enabled=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# run the swarm with an objective
|
|
||||||
result = swarm.run("Design a new car")
|
|
||||||
|
|
||||||
# or huggingface
|
|
||||||
swarm = HierarchicalSwarm(
|
|
||||||
model_type="huggingface",
|
|
||||||
model_id="tiaueu/falcon",
|
|
||||||
use_vectorstore=True,
|
|
||||||
embedding_size=768,
|
|
||||||
use_async=False,
|
|
||||||
human_in_the_loop=True,
|
|
||||||
logging_enabled=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run the swarm with a particular objective
|
|
||||||
result = swarm.run("Write a sci-fi short story")
|
|
@ -1,11 +1,16 @@
|
|||||||
from swarms.memory import chroma
|
from swarms.memory import ChromaDB
|
||||||
|
|
||||||
chromadbcl = chroma.ChromaClient()
|
|
||||||
|
|
||||||
chromadbcl.add_vectors(
|
# Initialize the memory
|
||||||
["This is a document", "BONSAIIIIIII", "the walking dead"]
|
chroma = ChromaDB(
|
||||||
|
metric="cosine",
|
||||||
|
limit_tokens=1000,
|
||||||
|
verbose=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
results = chromadbcl.search_vectors("zombie", limit=1)
|
# Add text
|
||||||
|
text = "This is a test"
|
||||||
|
chroma.add(text)
|
||||||
|
|
||||||
print(results)
|
# Search for similar text
|
||||||
|
similar_text = chroma.query(text)
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
from swarms import Orchestrator, Worker
|
|
||||||
|
|
||||||
# Instantiate the Orchestrator with 10 agents
|
|
||||||
orchestrator = Orchestrator(
|
|
||||||
Worker, agent_list=[Worker] * 10, task_queue=[]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Agent 1 sends a message to Agent 2
|
|
||||||
orchestrator.chat(
|
|
||||||
sender_id=1, receiver_id=2, message="Hello, Agent 2!"
|
|
||||||
)
|
|
@ -1,7 +1,14 @@
|
|||||||
from swarms import swarm
|
from swarms import Agent, OpenAIChat
|
||||||
|
|
||||||
# Use the function
|
## Initialize the workflow
|
||||||
api_key = "APIKEY"
|
agent = Agent(
|
||||||
objective = "What is the capital of the UK?"
|
llm=OpenAIChat(),
|
||||||
result = swarm(api_key, objective)
|
max_loops=1,
|
||||||
print(result) # Prints: "The capital of the UK is London."
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
streaming_on=True,
|
||||||
|
verbose=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the workflow on a task
|
||||||
|
agent("Find a chick fil a equivalent in hayes valley")
|
||||||
|
@ -0,0 +1,86 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms import Agent, OpenAIChat
|
||||||
|
from swarms.agents.multion_agent import MultiOnAgent
|
||||||
|
from swarms.memory.chroma_db import ChromaDB
|
||||||
|
from swarms.tools.tool import tool
|
||||||
|
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
|
||||||
|
|
||||||
|
# Load the environment variables
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
|
# Memory
|
||||||
|
chroma_db = ChromaDB()
|
||||||
|
|
||||||
|
|
||||||
|
# MultiOntool
|
||||||
|
@tool
|
||||||
|
def multion_tool(
|
||||||
|
task: str,
|
||||||
|
api_key: str = os.environ.get("MULTION_API_KEY"),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Executes a task using the MultiOnAgent.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (str): The task to be executed.
|
||||||
|
api_key (str, optional): The API key for the MultiOnAgent. Defaults to the value of the MULTION_API_KEY environment variable.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The result of the task execution.
|
||||||
|
"""
|
||||||
|
multion = MultiOnAgent(multion_api_key=api_key)
|
||||||
|
return multion(task)
|
||||||
|
|
||||||
|
|
||||||
|
# Execute the interpreter tool
|
||||||
|
@tool
|
||||||
|
def execute_interpreter_tool(
|
||||||
|
code: str,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Executes a single command using the interpreter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (str): The command to be executed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
out = SubprocessCodeInterpreter(debug_mode=True)
|
||||||
|
out = out.run(code)
|
||||||
|
return code
|
||||||
|
|
||||||
|
|
||||||
|
# Get the API key from the environment
|
||||||
|
api_key = os.environ.get("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
# Initialize the language model
|
||||||
|
llm = OpenAIChat(
|
||||||
|
temperature=0.5,
|
||||||
|
openai_api_key=api_key,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize the workflow
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Research Agent",
|
||||||
|
agent_description="An agent that performs research tasks.",
|
||||||
|
system_prompt="Perform a research task.",
|
||||||
|
llm=llm,
|
||||||
|
max_loops=1,
|
||||||
|
dashboard=True,
|
||||||
|
# tools=[multion_tool, execute_interpreter_tool],
|
||||||
|
verbose=True,
|
||||||
|
long_term_memory=chroma_db,
|
||||||
|
stopping_token="done",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the workflow on a task
|
||||||
|
out = agent.run(
|
||||||
|
"Generate a 10,000 word blog on health and wellness, and say done"
|
||||||
|
" when you are done"
|
||||||
|
)
|
||||||
|
print(out)
|
@ -0,0 +1,19 @@
|
|||||||
|
from swarms.structs.agent import Agent
|
||||||
|
from swarms.structs.message_pool import MessagePool
|
||||||
|
from swarms import OpenAIChat
|
||||||
|
|
||||||
|
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||||
|
agent2 = Agent(llm=OpenAIChat(), agent_name="agent2")
|
||||||
|
agent3 = Agent(llm=OpenAIChat(), agent_name="agent3")
|
||||||
|
|
||||||
|
moderator = Agent(agent_name="moderator")
|
||||||
|
agents = [agent1, agent2, agent3]
|
||||||
|
message_pool = MessagePool(
|
||||||
|
agents=agents, moderator=moderator, turns=5
|
||||||
|
)
|
||||||
|
message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
|
||||||
|
message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
|
||||||
|
message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
|
||||||
|
message_pool.get_all_messages()
|
||||||
|
message_pool.get_visible_messages(agent=agent1, turn=1)
|
||||||
|
message_pool.get_visible_messages(agent=agent2, turn=1)
|
@ -1,19 +0,0 @@
|
|||||||
from swarms import Orchestrator, Worker
|
|
||||||
|
|
||||||
node = Worker(
|
|
||||||
openai_api_key="",
|
|
||||||
ai_name="Optimus Prime",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Instantiate the Orchestrator with 10 agents
|
|
||||||
orchestrator = Orchestrator(
|
|
||||||
node, agent_list=[node] * 10, task_queue=[]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Agent 7 sends a message to Agent 9
|
|
||||||
orchestrator.chat(
|
|
||||||
sender_id=7,
|
|
||||||
receiver_id=9,
|
|
||||||
message="Can you help me with this task?",
|
|
||||||
)
|
|
@ -1,19 +0,0 @@
|
|||||||
from ..swarms import HierarchicalSwarm
|
|
||||||
|
|
||||||
# Retrieve your API key from the environment or replace with your actual key
|
|
||||||
api_key = "sksdsds"
|
|
||||||
|
|
||||||
# Initialize HierarchicalSwarm with your API key
|
|
||||||
swarm = HierarchicalSwarm(openai_api_key=api_key)
|
|
||||||
|
|
||||||
# Define an objective
|
|
||||||
objective = """
|
|
||||||
Please develop and serve a simple community web service.
|
|
||||||
People can signup, login, post, comment.
|
|
||||||
Post and comment should be visible at once.
|
|
||||||
I want it to have neumorphism-style.
|
|
||||||
The ports you can use are 4500 and 6500.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Run HierarchicalSwarm
|
|
||||||
swarm.run(objective)
|
|
@ -1,16 +0,0 @@
|
|||||||
from swarms import HierarchicalSwarm
|
|
||||||
|
|
||||||
# Retrieve your API key from the environment or replace with your actual key
|
|
||||||
api_key = ""
|
|
||||||
|
|
||||||
# Initialize HierarchicalSwarm with your API key
|
|
||||||
swarm = HierarchicalSwarm(api_key)
|
|
||||||
|
|
||||||
# Define an objective
|
|
||||||
objective = (
|
|
||||||
"Find 20 potential customers for a HierarchicalSwarm based AI"
|
|
||||||
" Agent automation infrastructure"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Run HierarchicalSwarm
|
|
||||||
swarm.run(objective)
|
|
@ -1,19 +0,0 @@
|
|||||||
from swarms import HierarchicalSwarm
|
|
||||||
|
|
||||||
# Retrieve your API key from the environment or replace with your actual key
|
|
||||||
api_key = "sksdsds"
|
|
||||||
|
|
||||||
# Initialize HierarchicalSwarm with your API key
|
|
||||||
swarm = HierarchicalSwarm(openai_api_key=api_key)
|
|
||||||
|
|
||||||
# Define an objective
|
|
||||||
objective = """
|
|
||||||
Please develop and serve a simple web TODO app.
|
|
||||||
The user can list all TODO items and add or delete each TODO item.
|
|
||||||
I want it to have neumorphism-style.
|
|
||||||
The ports you can use are 4500 and 6500.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Run HierarchicalSwarm
|
|
||||||
swarm.run(objective)
|
|
@ -1,19 +0,0 @@
|
|||||||
from swarms.tools.tool import tool
|
|
||||||
from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs
|
|
||||||
|
|
||||||
|
|
||||||
@tool
|
|
||||||
def search_api(query: str) -> str:
|
|
||||||
"""Search API
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): _description_
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: _description_
|
|
||||||
"""
|
|
||||||
print(f"Searching API for {query}")
|
|
||||||
|
|
||||||
|
|
||||||
tool_docs = scrape_tool_func_docs(search_api)
|
|
||||||
print(tool_docs)
|
|
@ -1,7 +0,0 @@
|
|||||||
from swarms.models import OpenAIChat
|
|
||||||
from swarms.structs.workflow import Workflow
|
|
||||||
|
|
||||||
llm = OpenAIChat()
|
|
||||||
|
|
||||||
|
|
||||||
workflow = Workflow(llm)
|
|
@ -1,22 +0,0 @@
|
|||||||
from swarms.tools.tool import tool
|
|
||||||
from swarms.tools.tool_func_doc_scraper import scrape_tool_func_docs
|
|
||||||
|
|
||||||
# Define a tool by decorating a function with the tool decorator and providing a docstring
|
|
||||||
|
|
||||||
|
|
||||||
@tool(return_direct=True)
|
|
||||||
def search_api(query: str):
|
|
||||||
"""Search the web for the query
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): _description_
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
_type_: _description_
|
|
||||||
"""
|
|
||||||
return f"Search results for {query}"
|
|
||||||
|
|
||||||
|
|
||||||
# Scrape the tool func docs to prepare for injection into the agent prompt
|
|
||||||
out = scrape_tool_func_docs(search_api)
|
|
||||||
print(out)
|
|
@ -1,10 +0,0 @@
|
|||||||
from swarms import Workflow
|
|
||||||
from swarms.models import ChatOpenAI
|
|
||||||
|
|
||||||
workflow = Workflow(ChatOpenAI)
|
|
||||||
|
|
||||||
workflow.add("What's the weather in miami")
|
|
||||||
workflow.add("Provide details for {{ parent_output }}")
|
|
||||||
workflow.add("Summarize the above information: {{ parent_output}}")
|
|
||||||
|
|
||||||
workflow.run()
|
|
@ -0,0 +1,69 @@
|
|||||||
|
import os
|
||||||
|
import multion
|
||||||
|
|
||||||
|
from swarms.models.base_llm import AbstractLLM
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
# Load environment variables
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
# Muliton key
|
||||||
|
MULTION_API_KEY = os.getenv("MULTION_API_KEY")
|
||||||
|
|
||||||
|
|
||||||
|
class MultiOnAgent(AbstractLLM):
|
||||||
|
"""
|
||||||
|
Represents a multi-on agent that performs browsing tasks.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
max_steps (int): The maximum number of steps to perform during browsing.
|
||||||
|
starting_url (str): The starting URL for browsing.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
max_steps (int): The maximum number of steps to perform during browsing.
|
||||||
|
starting_url (str): The starting URL for browsing.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
multion_api_key: str = MULTION_API_KEY,
|
||||||
|
max_steps: int = 4,
|
||||||
|
starting_url: str = "https://www.google.com",
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.multion_api_key = multion_api_key
|
||||||
|
self.max_steps = max_steps
|
||||||
|
self.starting_url = starting_url
|
||||||
|
|
||||||
|
def run(self, task: str, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Runs a browsing task.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (str): The task to perform during browsing.
|
||||||
|
*args: Additional positional arguments.
|
||||||
|
**kwargs: Additional keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: The response from the browsing task.
|
||||||
|
"""
|
||||||
|
multion.login(
|
||||||
|
use_api=True,
|
||||||
|
multion_api_key=str(self.multion_api_key),
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
response = multion.browse(
|
||||||
|
{
|
||||||
|
"cmd": task,
|
||||||
|
"url": self.starting_url,
|
||||||
|
"maxSteps": self.max_steps,
|
||||||
|
},
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
return response.result, response.status, response.lastUrl
|
@ -1,77 +0,0 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import IO
|
|
||||||
|
|
||||||
from pypdf import PdfReader
|
|
||||||
|
|
||||||
from swarms.utils.hash import str_to_hash
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class TextArtifact:
|
|
||||||
text: str
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class PDFLoader:
|
|
||||||
"""
|
|
||||||
A class for loading PDF files and extracting text artifacts.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
tokenizer (str): The tokenizer to use for chunking the text.
|
|
||||||
max_tokens (int): The maximum number of tokens per chunk.
|
|
||||||
|
|
||||||
Methods:
|
|
||||||
load(source, password=None, *args, **kwargs):
|
|
||||||
Load a single PDF file and extract text artifacts.
|
|
||||||
|
|
||||||
load_collection(sources, password=None, *args, **kwargs):
|
|
||||||
Load a collection of PDF files and extract text artifacts.
|
|
||||||
|
|
||||||
Private Methods:
|
|
||||||
_load_pdf(stream, password=None):
|
|
||||||
Load a PDF file and extract text artifacts.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
tokenizer (str): The tokenizer used for chunking the text.
|
|
||||||
max_tokens (int): The maximum number of tokens per chunk.
|
|
||||||
"""
|
|
||||||
|
|
||||||
tokenizer: str
|
|
||||||
max_tokens: int
|
|
||||||
|
|
||||||
def __post_init__(self):
|
|
||||||
self.chunker = PdfChunker(
|
|
||||||
tokenizer=self.tokenizer, max_tokens=self.max_tokens
|
|
||||||
)
|
|
||||||
|
|
||||||
def load(
|
|
||||||
self,
|
|
||||||
source: str | IO | Path,
|
|
||||||
password: str | None = None,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
) -> list[TextArtifact]:
|
|
||||||
return self._load_pdf(source, password)
|
|
||||||
|
|
||||||
def load_collection(
|
|
||||||
self,
|
|
||||||
sources: list[str | IO | Path],
|
|
||||||
password: str | None = None,
|
|
||||||
*args,
|
|
||||||
**kwargs,
|
|
||||||
) -> dict[str, list[TextArtifact]]:
|
|
||||||
return {
|
|
||||||
str_to_hash(str(s)): self._load_pdf(s, password)
|
|
||||||
for s in sources
|
|
||||||
}
|
|
||||||
|
|
||||||
def _load_pdf(
|
|
||||||
self, stream: str | IO | Path, password: str | None
|
|
||||||
) -> list[TextArtifact]:
|
|
||||||
reader = PdfReader(stream, strict=True, password=password)
|
|
||||||
return [
|
|
||||||
TextArtifact(text=p.extract_text()) for p in reader.pages
|
|
||||||
]
|
|
@ -0,0 +1,528 @@
|
|||||||
|
import base64
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from io import BytesIO
|
||||||
|
from typing import List, Literal, Optional, Tuple, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from PIL import Image
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from transformers import (
|
||||||
|
AutoModelForCausalLM,
|
||||||
|
LlamaTokenizer,
|
||||||
|
TextIteratorStreamer,
|
||||||
|
)
|
||||||
|
|
||||||
|
from swarms.models.base_multimodal_model import BaseMultiModalModel
|
||||||
|
from swarms.utils.logger import logger
|
||||||
|
|
||||||
|
MODEL_PATH = "THUDM/cogvlm-chat-hf"
|
||||||
|
TOKENIZER_PATH = "lmsys/vicuna-7b-v1.5"
|
||||||
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
||||||
|
QUANT_ENABLED = False
|
||||||
|
|
||||||
|
|
||||||
|
class ImageUrl(BaseModel):
|
||||||
|
url: str
|
||||||
|
|
||||||
|
|
||||||
|
class TextContent(BaseModel):
|
||||||
|
type: Literal["text"]
|
||||||
|
text: str
|
||||||
|
|
||||||
|
|
||||||
|
class ImageUrlContent(BaseModel):
|
||||||
|
type: Literal["image_url"]
|
||||||
|
image_url: ImageUrl
|
||||||
|
|
||||||
|
|
||||||
|
ContentItem = Union[TextContent, ImageUrlContent]
|
||||||
|
|
||||||
|
|
||||||
|
class ChatMessageInput(BaseModel):
|
||||||
|
role: Literal["user", "assistant", "system"]
|
||||||
|
content: Union[str, List[ContentItem]]
|
||||||
|
name: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class ChatMessageResponse(BaseModel):
|
||||||
|
role: Literal["assistant"]
|
||||||
|
content: str = None
|
||||||
|
name: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class DeltaMessage(BaseModel):
|
||||||
|
role: Optional[Literal["user", "assistant", "system"]] = None
|
||||||
|
content: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class ChatCompletionRequest(BaseModel):
|
||||||
|
model: str
|
||||||
|
messages: List[ChatMessageInput]
|
||||||
|
temperature: Optional[float] = 0.8
|
||||||
|
top_p: Optional[float] = 0.8
|
||||||
|
max_tokens: Optional[int] = None
|
||||||
|
stream: Optional[bool] = False
|
||||||
|
# Additional parameters
|
||||||
|
repetition_penalty: Optional[float] = 1.0
|
||||||
|
|
||||||
|
|
||||||
|
class ChatCompletionResponseChoice(BaseModel):
|
||||||
|
index: int
|
||||||
|
message: ChatMessageResponse
|
||||||
|
|
||||||
|
|
||||||
|
class ChatCompletionResponseStreamChoice(BaseModel):
|
||||||
|
index: int
|
||||||
|
delta: DeltaMessage
|
||||||
|
|
||||||
|
|
||||||
|
class UsageInfo(BaseModel):
|
||||||
|
prompt_tokens: int = 0
|
||||||
|
total_tokens: int = 0
|
||||||
|
completion_tokens: Optional[int] = 0
|
||||||
|
|
||||||
|
|
||||||
|
class ChatCompletionResponse(BaseModel):
|
||||||
|
model: str
|
||||||
|
object: Literal["chat.completion", "chat.completion.chunk"]
|
||||||
|
choices: List[
|
||||||
|
Union[
|
||||||
|
ChatCompletionResponseChoice,
|
||||||
|
ChatCompletionResponseStreamChoice,
|
||||||
|
]
|
||||||
|
]
|
||||||
|
created: Optional[int] = Field(
|
||||||
|
default_factory=lambda: int(time.time())
|
||||||
|
)
|
||||||
|
usage: Optional[UsageInfo] = None
|
||||||
|
|
||||||
|
|
||||||
|
# async def create_chat_completion(request: ChatCompletionRequest):
|
||||||
|
# global model, tokenizer
|
||||||
|
|
||||||
|
# gen_params = dict(
|
||||||
|
# messages=request.messages,
|
||||||
|
# temperature=request.temperature,
|
||||||
|
# top_p=request.top_p,
|
||||||
|
# max_tokens=request.max_tokens or 1024,
|
||||||
|
# echo=False,
|
||||||
|
# stream=request.stream,
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # if request.stream:
|
||||||
|
# # predict(request.model, gen_params)
|
||||||
|
# # response = generate_cogvlm(model, tokenizer, gen_params)
|
||||||
|
|
||||||
|
# usage = UsageInfo()
|
||||||
|
|
||||||
|
# message = ChatMessageResponse(
|
||||||
|
# role="assistant",
|
||||||
|
# content=response["text"],
|
||||||
|
# )
|
||||||
|
# logger.debug(f"==== message ====\n{message}")
|
||||||
|
# choice_data = ChatCompletionResponseChoice(
|
||||||
|
# index=0,
|
||||||
|
# message=message,
|
||||||
|
# )
|
||||||
|
# task_usage = UsageInfo.model_validate(response["usage"])
|
||||||
|
# for usage_key, usage_value in task_usage.model_dump().items():
|
||||||
|
# setattr(
|
||||||
|
# usage, usage_key, getattr(usage, usage_key) + usage_value
|
||||||
|
# )
|
||||||
|
# return ChatCompletionResponse(
|
||||||
|
# model=request.model,
|
||||||
|
# choices=[choice_data],
|
||||||
|
# object="chat.completion",
|
||||||
|
# usage=usage,
|
||||||
|
# )
|
||||||
|
|
||||||
|
|
||||||
|
class CogVLMMultiModal(BaseMultiModalModel):
|
||||||
|
"""
|
||||||
|
Initializes the CogVLM model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_name (str): The path or name of the pre-trained model.
|
||||||
|
tokenizer (str): The path or name of the tokenizer.
|
||||||
|
device (str): The device to run the model on.
|
||||||
|
quantize (bool): Whether to enable quantization.
|
||||||
|
torch_type (str): The torch data type to use.
|
||||||
|
temperature (float): The temperature for sampling.
|
||||||
|
top_p (float): The top-p value for sampling.
|
||||||
|
max_tokens (int): The maximum number of tokens to generate.
|
||||||
|
echo (bool): Whether to echo the input text.
|
||||||
|
stream (bool): Whether to stream the output.
|
||||||
|
repetition_penalty (float): The repetition penalty for sampling.
|
||||||
|
do_sample (bool): Whether to use sampling during generation.
|
||||||
|
*args: Additional positional arguments.
|
||||||
|
**kwargs: Additional keyword arguments.
|
||||||
|
|
||||||
|
Methods:
|
||||||
|
run: Generates a response using the CogVLM model.
|
||||||
|
generate_stream_cogvlm: Generates a stream of responses using the CogVLM model in inference mode.
|
||||||
|
process_history_and_images: Processes history messages to extract text, identify the last user query, and convert base64 encoded image URLs to PIL images.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
>>> model = CogVLMMultiModal()
|
||||||
|
>>> response = model("Describe this image with meticlous details.", "https://example.com/image.jpg")
|
||||||
|
>>> print(response)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_name: str = MODEL_PATH,
|
||||||
|
tokenizer: str = TOKENIZER_PATH,
|
||||||
|
device: str = DEVICE,
|
||||||
|
quantize: bool = QUANT_ENABLED,
|
||||||
|
torch_type: str = "float16",
|
||||||
|
temperature: float = 0.5,
|
||||||
|
top_p: float = 0.9,
|
||||||
|
max_tokens: int = 3500,
|
||||||
|
echo: bool = False,
|
||||||
|
stream: bool = False,
|
||||||
|
repetition_penalty: float = 1.0,
|
||||||
|
do_sample: bool = True,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.model_name = model_name
|
||||||
|
self.device = device
|
||||||
|
self.tokenizer = tokenizer
|
||||||
|
self.device = device
|
||||||
|
self.quantize = quantize
|
||||||
|
self.torch_type = torch_type
|
||||||
|
self.temperature = temperature
|
||||||
|
self.top_p = top_p
|
||||||
|
self.max_tokens = max_tokens
|
||||||
|
self.echo = echo
|
||||||
|
self.stream = stream
|
||||||
|
self.repetition_penalty = repetition_penalty
|
||||||
|
self.do_sample = do_sample
|
||||||
|
|
||||||
|
if os.environ.get("QUANT_ENABLED"):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
with torch.cuda.device(device):
|
||||||
|
__, total_bytes = torch.cuda.mem_get_info()
|
||||||
|
total_gb = total_bytes / (1 << 30)
|
||||||
|
if total_gb < 40:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
self.tokenizer = LlamaTokenizer.from_pretrained(
|
||||||
|
tokenizer, trust_remote_code=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
torch.cuda.is_available()
|
||||||
|
and torch.cuda.get_device_capability()[0] >= 8
|
||||||
|
):
|
||||||
|
torch_type = torch.bfloat16
|
||||||
|
else:
|
||||||
|
torch_type = torch.float16
|
||||||
|
|
||||||
|
print(
|
||||||
|
f"========Use torch type as:{torch_type} with"
|
||||||
|
f" device:{device}========\n\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
if "cuda" in device:
|
||||||
|
if QUANT_ENABLED:
|
||||||
|
self.model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_name,
|
||||||
|
load_in_4bit=True,
|
||||||
|
trust_remote_code=True,
|
||||||
|
torch_dtype=torch_type,
|
||||||
|
low_cpu_mem_usage=True,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
).eval()
|
||||||
|
else:
|
||||||
|
self.model = (
|
||||||
|
AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_name,
|
||||||
|
load_in_4bit=False,
|
||||||
|
trust_remote_code=True,
|
||||||
|
torch_dtype=torch_type,
|
||||||
|
low_cpu_mem_usage=True,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
.to(device)
|
||||||
|
.eval()
|
||||||
|
)
|
||||||
|
|
||||||
|
else:
|
||||||
|
self.model = (
|
||||||
|
AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_name,
|
||||||
|
trust_remote_code=True,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
.float()
|
||||||
|
.to(device)
|
||||||
|
.eval()
|
||||||
|
)
|
||||||
|
|
||||||
|
def run(self, task: str, img: str, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Generates a response using the CogVLM model. It processes the chat history and image data, if any,
|
||||||
|
and then invokes the model to generate a response.
|
||||||
|
"""
|
||||||
|
messages = [task]
|
||||||
|
|
||||||
|
params = dict(
|
||||||
|
messages=messages,
|
||||||
|
temperature=self.temperature,
|
||||||
|
repitition_penalty=self.repetition_penalty,
|
||||||
|
top_p=self.top_p,
|
||||||
|
max_new_tokens=self.max_tokens,
|
||||||
|
)
|
||||||
|
|
||||||
|
for response in self.generate_stream_cogvlm(params):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
@torch.inference_mode()
|
||||||
|
def generate_stream_cogvlm(
|
||||||
|
self,
|
||||||
|
params: dict,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Generates a stream of responses using the CogVLM model in inference mode.
|
||||||
|
It's optimized to handle continuous input-output interactions with the model in a streaming manner.
|
||||||
|
"""
|
||||||
|
messages = params["messages"]
|
||||||
|
temperature = float(params.get("temperature", 1.0))
|
||||||
|
repetition_penalty = float(
|
||||||
|
params.get("repetition_penalty", 1.0)
|
||||||
|
)
|
||||||
|
top_p = float(params.get("top_p", 1.0))
|
||||||
|
max_new_tokens = int(params.get("max_tokens", 256))
|
||||||
|
query, history, image_list = self.process_history_and_images(
|
||||||
|
messages
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.debug(f"==== request ====\n{query}")
|
||||||
|
|
||||||
|
input_by_model = self.model.build_conversation_input_ids(
|
||||||
|
self.tokenizer,
|
||||||
|
query=query,
|
||||||
|
history=history,
|
||||||
|
images=[image_list[-1]],
|
||||||
|
)
|
||||||
|
inputs = {
|
||||||
|
"input_ids": (
|
||||||
|
input_by_model["input_ids"]
|
||||||
|
.unsqueeze(0)
|
||||||
|
.to(self.device)
|
||||||
|
),
|
||||||
|
"token_type_ids": (
|
||||||
|
input_by_model["token_type_ids"]
|
||||||
|
.unsqueeze(0)
|
||||||
|
.to(self.device)
|
||||||
|
),
|
||||||
|
"attention_mask": (
|
||||||
|
input_by_model["attention_mask"]
|
||||||
|
.unsqueeze(0)
|
||||||
|
.to(self.device)
|
||||||
|
),
|
||||||
|
"images": [
|
||||||
|
[
|
||||||
|
input_by_model["images"][0]
|
||||||
|
.to(self.device)
|
||||||
|
.to(self.torch_type)
|
||||||
|
]
|
||||||
|
],
|
||||||
|
}
|
||||||
|
if (
|
||||||
|
"cross_images" in input_by_model
|
||||||
|
and input_by_model["cross_images"]
|
||||||
|
):
|
||||||
|
inputs["cross_images"] = [
|
||||||
|
[
|
||||||
|
input_by_model["cross_images"][0]
|
||||||
|
.to(self.device)
|
||||||
|
.to(self.torch_type)
|
||||||
|
]
|
||||||
|
]
|
||||||
|
|
||||||
|
input_echo_len = len(inputs["input_ids"][0])
|
||||||
|
streamer = TextIteratorStreamer(
|
||||||
|
tokenizer=self.tokenizer,
|
||||||
|
timeout=60.0,
|
||||||
|
skip_promptb=True,
|
||||||
|
skip_special_tokens=True,
|
||||||
|
)
|
||||||
|
gen_kwargs = {
|
||||||
|
"repetition_penalty": repetition_penalty,
|
||||||
|
"max_new_tokens": max_new_tokens,
|
||||||
|
"do_sample": True if temperature > 1e-5 else False,
|
||||||
|
"top_p": top_p if temperature > 1e-5 else 0,
|
||||||
|
"streamer": streamer,
|
||||||
|
}
|
||||||
|
if temperature > 1e-5:
|
||||||
|
gen_kwargs["temperature"] = temperature
|
||||||
|
|
||||||
|
total_len = 0
|
||||||
|
generated_text = ""
|
||||||
|
with torch.no_grad():
|
||||||
|
self.model.generate(**inputs, **gen_kwargs)
|
||||||
|
for next_text in streamer:
|
||||||
|
generated_text += next_text
|
||||||
|
yield {
|
||||||
|
"text": generated_text,
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": input_echo_len,
|
||||||
|
"completion_tokens": (
|
||||||
|
total_len - input_echo_len
|
||||||
|
),
|
||||||
|
"total_tokens": total_len,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ret = {
|
||||||
|
"text": generated_text,
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": input_echo_len,
|
||||||
|
"completion_tokens": total_len - input_echo_len,
|
||||||
|
"total_tokens": total_len,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
yield ret
|
||||||
|
|
||||||
|
def process_history_and_images(
|
||||||
|
self,
|
||||||
|
messages: List[ChatMessageInput],
|
||||||
|
) -> Tuple[
|
||||||
|
Optional[str],
|
||||||
|
Optional[List[Tuple[str, str]]],
|
||||||
|
Optional[List[Image.Image]],
|
||||||
|
]:
|
||||||
|
"""
|
||||||
|
Process history messages to extract text, identify the last user query,
|
||||||
|
and convert base64 encoded image URLs to PIL images.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages(List[ChatMessageInput]): List of ChatMessageInput objects.
|
||||||
|
return: A tuple of three elements:
|
||||||
|
- The last user query as a string.
|
||||||
|
- Text history formatted as a list of tuples for the model.
|
||||||
|
- List of PIL Image objects extracted from the messages.
|
||||||
|
"""
|
||||||
|
formatted_history = []
|
||||||
|
image_list = []
|
||||||
|
last_user_query = ""
|
||||||
|
|
||||||
|
for i, message in enumerate(messages):
|
||||||
|
role = message.role
|
||||||
|
content = message.content
|
||||||
|
|
||||||
|
# Extract text content
|
||||||
|
if isinstance(content, list): # text
|
||||||
|
text_content = " ".join(
|
||||||
|
item.text
|
||||||
|
for item in content
|
||||||
|
if isinstance(item, TextContent)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
text_content = content
|
||||||
|
|
||||||
|
# Extract image data
|
||||||
|
if isinstance(content, list): # image
|
||||||
|
for item in content:
|
||||||
|
if isinstance(item, ImageUrlContent):
|
||||||
|
image_url = item.image_url.url
|
||||||
|
if image_url.startswith(
|
||||||
|
"data:image/jpeg;base64,"
|
||||||
|
):
|
||||||
|
base64_encoded_image = image_url.split(
|
||||||
|
"data:image/jpeg;base64,"
|
||||||
|
)[1]
|
||||||
|
image_data = base64.b64decode(
|
||||||
|
base64_encoded_image
|
||||||
|
)
|
||||||
|
image = Image.open(
|
||||||
|
BytesIO(image_data)
|
||||||
|
).convert("RGB")
|
||||||
|
image_list.append(image)
|
||||||
|
|
||||||
|
# Format history
|
||||||
|
if role == "user":
|
||||||
|
if i == len(messages) - 1:
|
||||||
|
last_user_query = text_content
|
||||||
|
else:
|
||||||
|
formatted_history.append((text_content, ""))
|
||||||
|
elif role == "assistant":
|
||||||
|
if formatted_history:
|
||||||
|
if formatted_history[-1][1] != "":
|
||||||
|
assert False, (
|
||||||
|
"the last query is answered. answer"
|
||||||
|
f" again. {formatted_history[-1][0]},"
|
||||||
|
f" {formatted_history[-1][1]},"
|
||||||
|
f" {text_content}"
|
||||||
|
)
|
||||||
|
formatted_history[-1] = (
|
||||||
|
formatted_history[-1][0],
|
||||||
|
text_content,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
assert False, "assistant reply before user"
|
||||||
|
else:
|
||||||
|
assert False, f"unrecognized role: {role}"
|
||||||
|
|
||||||
|
return last_user_query, formatted_history, image_list
|
||||||
|
|
||||||
|
async def predict(self, params: dict):
|
||||||
|
"""
|
||||||
|
Handle streaming predictions. It continuously generates responses for a given input stream.
|
||||||
|
This is particularly useful for real-time, continuous interactions with the model.
|
||||||
|
"""
|
||||||
|
|
||||||
|
choice_data = ChatCompletionResponseStreamChoice(
|
||||||
|
index=0,
|
||||||
|
delta=DeltaMessage(role="assistant"),
|
||||||
|
finish_reason=None,
|
||||||
|
)
|
||||||
|
chunk = ChatCompletionResponse(
|
||||||
|
model=self.model_name,
|
||||||
|
choices=[choice_data],
|
||||||
|
object="chat.completion.chunk",
|
||||||
|
)
|
||||||
|
yield f"{chunk.model_dump_json(exclude_unset=True)}"
|
||||||
|
|
||||||
|
previous_text = ""
|
||||||
|
for new_response in self.generate_stream_cogvlm(params):
|
||||||
|
decoded_unicode = new_response["text"]
|
||||||
|
delta_text = decoded_unicode[len(previous_text) :]
|
||||||
|
previous_text = decoded_unicode
|
||||||
|
delta = DeltaMessage(
|
||||||
|
content=delta_text,
|
||||||
|
role="assistant",
|
||||||
|
)
|
||||||
|
choice_data = ChatCompletionResponseStreamChoice(
|
||||||
|
index=0,
|
||||||
|
delta=delta,
|
||||||
|
)
|
||||||
|
chunk = ChatCompletionResponse(
|
||||||
|
model=self.model_name,
|
||||||
|
choices=[choice_data],
|
||||||
|
object="chat.completion.chunk",
|
||||||
|
)
|
||||||
|
yield f"{chunk.model_dump_json(exclude_unset=True)}"
|
||||||
|
choice_data = ChatCompletionResponseStreamChoice(
|
||||||
|
index=0,
|
||||||
|
delta=DeltaMessage(),
|
||||||
|
)
|
||||||
|
chunk = ChatCompletionResponse(
|
||||||
|
model=self.model_name,
|
||||||
|
choices=[choice_data],
|
||||||
|
object="chat.completion.chunk",
|
||||||
|
)
|
||||||
|
yield f"{chunk.model_dump_json(exclude_unset=True)}"
|
@ -0,0 +1,87 @@
|
|||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
import json
|
||||||
|
from swarms.models.base_llm import AbstractLLM
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
|
||||||
|
class FireFunctionCaller(AbstractLLM):
|
||||||
|
"""
|
||||||
|
A class that represents a caller for the FireFunction model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_name (str): The name of the model to be used.
|
||||||
|
device (str): The device to be used.
|
||||||
|
function_spec (Any): The specification of the function.
|
||||||
|
max_tokens (int): The maximum number of tokens.
|
||||||
|
system_prompt (str): The system prompt.
|
||||||
|
*args: Variable length argument list.
|
||||||
|
**kwargs: Arbitrary keyword arguments.
|
||||||
|
|
||||||
|
Methods:
|
||||||
|
run(self, task: str, *args, **kwargs) -> None: Run the function with the given task and arguments.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> fire_function_caller = FireFunctionCaller()
|
||||||
|
>>> fire_function_caller.run("Add 2 and 3")
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_name: str = "fireworks-ai/firefunction-v1",
|
||||||
|
device: str = "cuda",
|
||||||
|
function_spec: Any = None,
|
||||||
|
max_tokens: int = 3000,
|
||||||
|
system_prompt: str = "You are a helpful assistant with access to functions. Use them if required.",
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__(model_name, device)
|
||||||
|
self.model_name = model_name
|
||||||
|
self.device = device
|
||||||
|
self.fucntion_spec = function_spec
|
||||||
|
self.max_tokens = max_tokens
|
||||||
|
self.system_prompt = system_prompt
|
||||||
|
|
||||||
|
self.model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_name, device_map="auto", *args, **kwargs
|
||||||
|
)
|
||||||
|
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||||
|
|
||||||
|
self.functions = json.dumps(function_spec, indent=4)
|
||||||
|
|
||||||
|
def run(self, task: str, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Run the function with the given task and arguments.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (str): The task to be performed.
|
||||||
|
*args: Variable length argument list.
|
||||||
|
**kwargs: Arbitrary keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
None
|
||||||
|
"""
|
||||||
|
messages = [
|
||||||
|
{"role": "functions", "content": self.functions},
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": self.system_prompt,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": task,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
model_inputs = self.tokenizer.apply_chat_template(
|
||||||
|
messages, return_tensors="pt"
|
||||||
|
).to(self.model.device)
|
||||||
|
|
||||||
|
generated_ids = self.model.generate(
|
||||||
|
model_inputs,
|
||||||
|
max_new_tokens=self.max_tokens,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
decoded = self.tokenizer.batch_decode(generated_ids)
|
||||||
|
print(decoded[0])
|
@ -0,0 +1,43 @@
|
|||||||
|
from unittest.mock import MagicMock
|
||||||
|
from swarms.models.fire_function import FireFunctionCaller
|
||||||
|
|
||||||
|
|
||||||
|
def test_fire_function_caller_run(mocker):
|
||||||
|
# Create mock model and tokenizer
|
||||||
|
model = MagicMock()
|
||||||
|
tokenizer = MagicMock()
|
||||||
|
mocker.patch.object(FireFunctionCaller, "model", model)
|
||||||
|
mocker.patch.object(FireFunctionCaller, "tokenizer", tokenizer)
|
||||||
|
|
||||||
|
# Create mock task and arguments
|
||||||
|
task = "Add 2 and 3"
|
||||||
|
args = (2, 3)
|
||||||
|
kwargs = {}
|
||||||
|
|
||||||
|
# Create mock generated_ids and decoded output
|
||||||
|
generated_ids = [1, 2, 3]
|
||||||
|
decoded_output = "5"
|
||||||
|
model.generate.return_value = generated_ids
|
||||||
|
tokenizer.batch_decode.return_value = [decoded_output]
|
||||||
|
|
||||||
|
# Create FireFunctionCaller instance
|
||||||
|
fire_function_caller = FireFunctionCaller()
|
||||||
|
|
||||||
|
# Run the function
|
||||||
|
fire_function_caller.run(task, *args, **kwargs)
|
||||||
|
|
||||||
|
# Assert model.generate was called with the correct inputs
|
||||||
|
model.generate.assert_called_once_with(
|
||||||
|
tokenizer.apply_chat_template.return_value,
|
||||||
|
max_new_tokens=fire_function_caller.max_tokens,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Assert tokenizer.batch_decode was called with the correct inputs
|
||||||
|
tokenizer.batch_decode.assert_called_once_with(generated_ids)
|
||||||
|
|
||||||
|
# Assert the decoded output is printed
|
||||||
|
assert decoded_output in mocker.patch.object(
|
||||||
|
print, "call_args_list"
|
||||||
|
)
|
@ -0,0 +1,214 @@
|
|||||||
|
import hashlib
|
||||||
|
from time import time_ns
|
||||||
|
from typing import Callable, List, Optional, Sequence, Union
|
||||||
|
|
||||||
|
from swarms.structs.agent import Agent
|
||||||
|
from swarms.structs.base_swarm import BaseSwarm
|
||||||
|
from swarms.utils.loguru_logger import logger
|
||||||
|
|
||||||
|
|
||||||
|
def _hash(input: str):
|
||||||
|
"""
|
||||||
|
Hashes the input string using SHA256 algorithm.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input (str): The string to be hashed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The hexadecimal representation of the hash value.
|
||||||
|
"""
|
||||||
|
hex_dig = hashlib.sha256(input.encode("utf-8")).hexdigest()
|
||||||
|
return hex_dig
|
||||||
|
|
||||||
|
|
||||||
|
def msg_hash(
|
||||||
|
agent: Agent, content: str, turn: int, msg_type: str = "text"
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Generate a hash value for a message.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent (Agent): The agent sending the message.
|
||||||
|
content (str): The content of the message.
|
||||||
|
turn (int): The turn number of the message.
|
||||||
|
msg_type (str, optional): The type of the message. Defaults to "text".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: The hash value of the message.
|
||||||
|
"""
|
||||||
|
time = time_ns()
|
||||||
|
return _hash(
|
||||||
|
f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:"
|
||||||
|
f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MessagePool(BaseSwarm):
|
||||||
|
"""
|
||||||
|
A class representing a message pool for agents in a swarm.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
agents (Optional[Sequence[Agent]]): The list of agents in the swarm.
|
||||||
|
moderator (Optional[Agent]): The moderator agent.
|
||||||
|
turns (Optional[int]): The number of turns.
|
||||||
|
routing_function (Optional[Callable]): The routing function for message distribution.
|
||||||
|
show_names (Optional[bool]): Flag indicating whether to show agent names.
|
||||||
|
messages (List[Dict]): The list of messages in the pool.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> from swarms.structs.agent import Agent
|
||||||
|
>>> from swarms.structs.message_pool import MessagePool
|
||||||
|
>>> agent1 = Agent(agent_name="agent1")
|
||||||
|
>>> agent2 = Agent(agent_name="agent2")
|
||||||
|
>>> agent3 = Agent(agent_name="agent3")
|
||||||
|
>>> moderator = Agent(agent_name="moderator")
|
||||||
|
>>> agents = [agent1, agent2, agent3]
|
||||||
|
>>> message_pool = MessagePool(agents=agents, moderator=moderator, turns=5)
|
||||||
|
>>> message_pool.add(agent=agent1, content="Hello, agent2!", turn=1)
|
||||||
|
>>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1)
|
||||||
|
>>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1)
|
||||||
|
>>> message_pool.get_all_messages()
|
||||||
|
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
|
||||||
|
>>> message_pool.get_visible_messages(agent=agent1, turn=1)
|
||||||
|
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
|
||||||
|
>>> message_pool.get_visible_messages(agent=agent2, turn=1)
|
||||||
|
[{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}]
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
agents: Optional[Sequence[Agent]] = None,
|
||||||
|
moderator: Optional[Agent] = None,
|
||||||
|
turns: Optional[int] = 5,
|
||||||
|
routing_function: Optional[Callable] = None,
|
||||||
|
show_names: Optional[bool] = False,
|
||||||
|
autosave: Optional[bool] = False,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.agent = agents
|
||||||
|
self.moderator = moderator
|
||||||
|
self.turns = turns
|
||||||
|
self.routing_function = routing_function
|
||||||
|
self.show_names = show_names
|
||||||
|
self.autosave = autosave
|
||||||
|
|
||||||
|
self.messages = []
|
||||||
|
|
||||||
|
logger.info("MessagePool initialized")
|
||||||
|
logger.info(f"Number of agents: {len(agents)}")
|
||||||
|
logger.info(
|
||||||
|
f"Agents: {[agent.agent_name for agent in agents]}"
|
||||||
|
)
|
||||||
|
logger.info(f"moderator: {moderator.agent_name} is available")
|
||||||
|
logger.info(f"Number of turns: {turns}")
|
||||||
|
|
||||||
|
def add(
|
||||||
|
self,
|
||||||
|
agent: Agent,
|
||||||
|
content: str,
|
||||||
|
turn: int,
|
||||||
|
visible_to: Union[str, List[str]] = "all",
|
||||||
|
logged: bool = True,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Add a message to the pool.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent (Agent): The agent sending the message.
|
||||||
|
content (str): The content of the message.
|
||||||
|
turn (int): The turn number.
|
||||||
|
visible_to (Union[str, List[str]], optional): The agents who can see the message. Defaults to "all".
|
||||||
|
logged (bool, optional): Flag indicating whether the message should be logged. Defaults to True.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.messages.append(
|
||||||
|
{
|
||||||
|
"agent": agent,
|
||||||
|
"content": content,
|
||||||
|
"turn": turn,
|
||||||
|
"visible_to": visible_to,
|
||||||
|
"logged": logged,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
logger.info(f"Message added: {content}")
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""
|
||||||
|
Reset the message pool.
|
||||||
|
"""
|
||||||
|
self.messages = []
|
||||||
|
logger.info("MessagePool reset")
|
||||||
|
|
||||||
|
def last_turn(self):
|
||||||
|
"""
|
||||||
|
Get the last turn number.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: The last turn number.
|
||||||
|
"""
|
||||||
|
if len(self.messages) == 0:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
return self.messages[-1]["turn"]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def last_message(self):
|
||||||
|
"""
|
||||||
|
Get the last message in the pool.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: The last message.
|
||||||
|
"""
|
||||||
|
if len(self.messages) == 0:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return self.messages[-1]
|
||||||
|
|
||||||
|
def get_all_messages(self):
|
||||||
|
"""
|
||||||
|
Get all messages in the pool.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict]: The list of all messages.
|
||||||
|
"""
|
||||||
|
return self.messages
|
||||||
|
|
||||||
|
def get_visible_messages(self, agent: Agent, turn: int):
|
||||||
|
"""
|
||||||
|
Get the visible messages for a given agent and turn.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent (Agent): The agent.
|
||||||
|
turn (int): The turn number.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict]: The list of visible messages.
|
||||||
|
"""
|
||||||
|
# Get the messages before the current turn
|
||||||
|
prev_messages = [
|
||||||
|
message
|
||||||
|
for message in self.messages
|
||||||
|
if message["turn"] < turn
|
||||||
|
]
|
||||||
|
|
||||||
|
visible_messages = []
|
||||||
|
for message in prev_messages:
|
||||||
|
if (
|
||||||
|
message["visible_to"] == "all"
|
||||||
|
or agent.agent_name in message["visible_to"]
|
||||||
|
):
|
||||||
|
visible_messages.append(message)
|
||||||
|
return visible_messages
|
||||||
|
|
||||||
|
def query(self, query: str):
|
||||||
|
"""
|
||||||
|
Query a message from the messages list and then pass it to the moderator
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
(mod, content)
|
||||||
|
for mod, content in self.messages
|
||||||
|
if mod == self.moderator
|
||||||
|
]
|
@ -0,0 +1,10 @@
|
|||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
logger = logger.add(
|
||||||
|
"MessagePool.log",
|
||||||
|
level="INFO",
|
||||||
|
colorize=True,
|
||||||
|
format="<green>{time}</green> <level>{message}</level>",
|
||||||
|
backtrace=True,
|
||||||
|
diagnose=True,
|
||||||
|
)
|
@ -0,0 +1,57 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
from swarms.agents.multion_agent import MultiOnAgent
|
||||||
|
|
||||||
|
|
||||||
|
@patch("swarms.agents.multion_agent.multion")
|
||||||
|
def test_multion_agent_run(mock_multion):
|
||||||
|
mock_response = MagicMock()
|
||||||
|
mock_response.result = "result"
|
||||||
|
mock_response.status = "status"
|
||||||
|
mock_response.lastUrl = "lastUrl"
|
||||||
|
mock_multion.browse.return_value = mock_response
|
||||||
|
|
||||||
|
agent = MultiOnAgent(
|
||||||
|
multion_api_key="test_key",
|
||||||
|
max_steps=5,
|
||||||
|
starting_url="https://www.example.com",
|
||||||
|
)
|
||||||
|
result, status, last_url = agent.run("task")
|
||||||
|
|
||||||
|
assert result == "result"
|
||||||
|
assert status == "status"
|
||||||
|
assert last_url == "lastUrl"
|
||||||
|
mock_multion.browse.assert_called_once_with(
|
||||||
|
{
|
||||||
|
"cmd": "task",
|
||||||
|
"url": "https://www.example.com",
|
||||||
|
"maxSteps": 5,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Additional tests for different tasks
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"task", ["task1", "task2", "task3", "task4", "task5"]
|
||||||
|
)
|
||||||
|
@patch("swarms.agents.multion_agent.multion")
|
||||||
|
def test_multion_agent_run_different_tasks(mock_multion, task):
|
||||||
|
mock_response = MagicMock()
|
||||||
|
mock_response.result = "result"
|
||||||
|
mock_response.status = "status"
|
||||||
|
mock_response.lastUrl = "lastUrl"
|
||||||
|
mock_multion.browse.return_value = mock_response
|
||||||
|
|
||||||
|
agent = MultiOnAgent(
|
||||||
|
multion_api_key="test_key",
|
||||||
|
max_steps=5,
|
||||||
|
starting_url="https://www.example.com",
|
||||||
|
)
|
||||||
|
result, status, last_url = agent.run(task)
|
||||||
|
|
||||||
|
assert result == "result"
|
||||||
|
assert status == "status"
|
||||||
|
assert last_url == "lastUrl"
|
||||||
|
mock_multion.browse.assert_called_once_with(
|
||||||
|
{"cmd": task, "url": "https://www.example.com", "maxSteps": 5}
|
||||||
|
)
|
@ -0,0 +1,45 @@
|
|||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
|
||||||
|
from swarms.models.fire_function import FireFunctionCaller
|
||||||
|
|
||||||
|
|
||||||
|
def test_fire_function_caller_run(mocker):
|
||||||
|
# Create mock model and tokenizer
|
||||||
|
model = MagicMock()
|
||||||
|
tokenizer = MagicMock()
|
||||||
|
mocker.patch.object(FireFunctionCaller, "model", model)
|
||||||
|
mocker.patch.object(FireFunctionCaller, "tokenizer", tokenizer)
|
||||||
|
|
||||||
|
# Create mock task and arguments
|
||||||
|
task = "Add 2 and 3"
|
||||||
|
args = (2, 3)
|
||||||
|
kwargs = {}
|
||||||
|
|
||||||
|
# Create mock generated_ids and decoded output
|
||||||
|
generated_ids = [1, 2, 3]
|
||||||
|
decoded_output = "5"
|
||||||
|
model.generate.return_value = generated_ids
|
||||||
|
tokenizer.batch_decode.return_value = [decoded_output]
|
||||||
|
|
||||||
|
# Create FireFunctionCaller instance
|
||||||
|
fire_function_caller = FireFunctionCaller()
|
||||||
|
|
||||||
|
# Run the function
|
||||||
|
fire_function_caller.run(task, *args, **kwargs)
|
||||||
|
|
||||||
|
# Assert model.generate was called with the correct inputs
|
||||||
|
model.generate.assert_called_once_with(
|
||||||
|
tokenizer.apply_chat_template.return_value,
|
||||||
|
max_new_tokens=fire_function_caller.max_tokens,
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Assert tokenizer.batch_decode was called with the correct inputs
|
||||||
|
tokenizer.batch_decode.assert_called_once_with(generated_ids)
|
||||||
|
|
||||||
|
# Assert the decoded output is printed
|
||||||
|
assert decoded_output in mocker.patch.object(
|
||||||
|
print, "call_args_list"
|
||||||
|
)
|
@ -0,0 +1,117 @@
|
|||||||
|
from swarms.structs.agent import Agent
|
||||||
|
from swarms.structs.message_pool import MessagePool
|
||||||
|
from swarms import OpenAIChat
|
||||||
|
|
||||||
|
|
||||||
|
def test_message_pool_initialization():
|
||||||
|
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||||
|
agent2 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||||
|
moderator = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||||
|
agents = [agent1, agent2]
|
||||||
|
message_pool = MessagePool(
|
||||||
|
agents=agents, moderator=moderator, turns=5
|
||||||
|
)
|
||||||
|
|
||||||
|
assert message_pool.agent == agents
|
||||||
|
assert message_pool.moderator == moderator
|
||||||
|
assert message_pool.turns == 5
|
||||||
|
assert message_pool.messages == []
|
||||||
|
|
||||||
|
|
||||||
|
def test_message_pool_add():
|
||||||
|
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||||
|
message_pool = MessagePool(
|
||||||
|
agents=[agent1], moderator=agent1, turns=5
|
||||||
|
)
|
||||||
|
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
|
||||||
|
|
||||||
|
assert message_pool.messages == [
|
||||||
|
{
|
||||||
|
"agent": agent1,
|
||||||
|
"content": "Hello, world!",
|
||||||
|
"turn": 1,
|
||||||
|
"visible_to": "all",
|
||||||
|
"logged": True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def test_message_pool_reset():
|
||||||
|
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||||
|
message_pool = MessagePool(
|
||||||
|
agents=[agent1], moderator=agent1, turns=5
|
||||||
|
)
|
||||||
|
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
|
||||||
|
message_pool.reset()
|
||||||
|
|
||||||
|
assert message_pool.messages == []
|
||||||
|
|
||||||
|
|
||||||
|
def test_message_pool_last_turn():
|
||||||
|
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||||
|
message_pool = MessagePool(
|
||||||
|
agents=[agent1], moderator=agent1, turns=5
|
||||||
|
)
|
||||||
|
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
|
||||||
|
|
||||||
|
assert message_pool.last_turn() == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_message_pool_last_message():
|
||||||
|
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||||
|
message_pool = MessagePool(
|
||||||
|
agents=[agent1], moderator=agent1, turns=5
|
||||||
|
)
|
||||||
|
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
|
||||||
|
|
||||||
|
assert message_pool.last_message == {
|
||||||
|
"agent": agent1,
|
||||||
|
"content": "Hello, world!",
|
||||||
|
"turn": 1,
|
||||||
|
"visible_to": "all",
|
||||||
|
"logged": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def test_message_pool_get_all_messages():
|
||||||
|
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||||
|
message_pool = MessagePool(
|
||||||
|
agents=[agent1], moderator=agent1, turns=5
|
||||||
|
)
|
||||||
|
message_pool.add(agent=agent1, content="Hello, world!", turn=1)
|
||||||
|
|
||||||
|
assert message_pool.get_all_messages() == [
|
||||||
|
{
|
||||||
|
"agent": agent1,
|
||||||
|
"content": "Hello, world!",
|
||||||
|
"turn": 1,
|
||||||
|
"visible_to": "all",
|
||||||
|
"logged": True,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def test_message_pool_get_visible_messages():
|
||||||
|
agent1 = Agent(llm=OpenAIChat(), agent_name="agent1")
|
||||||
|
agent2 = Agent(agent_name="agent2")
|
||||||
|
message_pool = MessagePool(
|
||||||
|
agents=[agent1, agent2], moderator=agent1, turns=5
|
||||||
|
)
|
||||||
|
message_pool.add(
|
||||||
|
agent=agent1,
|
||||||
|
content="Hello, agent2!",
|
||||||
|
turn=1,
|
||||||
|
visible_to=[agent2.agent_name],
|
||||||
|
)
|
||||||
|
|
||||||
|
assert message_pool.get_visible_messages(
|
||||||
|
agent=agent2, turn=2
|
||||||
|
) == [
|
||||||
|
{
|
||||||
|
"agent": agent1,
|
||||||
|
"content": "Hello, agent2!",
|
||||||
|
"turn": 1,
|
||||||
|
"visible_to": [agent2.agent_name],
|
||||||
|
"logged": True,
|
||||||
|
}
|
||||||
|
]
|
Loading…
Reference in new issue