[CLEANUP][No rust]

pull/413/head^2
Kye 10 months ago
parent 57eb57e7bd
commit 0ddd37514c

@ -60,9 +60,9 @@ nav:
- Contributing: "contributing.md" - Contributing: "contributing.md"
- Limitations of Individual Agents: "limits_of_individual_agents.md" - Limitations of Individual Agents: "limits_of_individual_agents.md"
- Why Swarms: "why_swarms.md" - Why Swarms: "why_swarms.md"
- DIY Build Your Own Agent: "diy_your_own_agent.md"
- Swarms Framework: - Swarms Framework:
- Overview: "swarms/index.md" - Overview: "swarms/index.md"
- DIY Build Your Own Agent: "diy_your_own_agent.md"
- swarms.agents: - swarms.agents:
- Agents: - Agents:
- WorkerAgent: "swarms/agents/workeragent.md" - WorkerAgent: "swarms/agents/workeragent.md"

@ -0,0 +1,45 @@
# Import necessary libraries
from transformers import AutoModelForCausalLM, AutoTokenizer
from pydantic import BaseModel
# from swarms import ToolAgent
from swarms.utils.json_utils import base_model_schema_to_json
# Load the pre-trained model and tokenizer
model = AutoModelForCausalLM.from_pretrained(
"databricks/dolly-v2-12b",
load_in_4bit=True,
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
class Schema(BaseModel):
name: str
agent: int
is_student: bool
courses: list[str]
json_schema = str(base_model_schema_to_json(Schema))
print(json_schema)
# # Define the task to generate a person's information
# task = (
# "Generate a person's information based on the following schema:"
# )
# # Create an instance of the ToolAgent class
# agent = ToolAgent(
# name="dolly-function-agent",
# description="Ana gent to create a child data",
# model=model,
# tokenizer=tokenizer,
# json_schema=json_schema,
# )
# # Run the agent to generate the person's information
# generated_data = agent.run(task)
# # Print the generated data
# print(f"Generated data: {generated_data}")

@ -5,13 +5,13 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "4.2.6" version = "4.2.7"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]
homepage = "https://github.com/kyegomez/swarms" homepage = "https://github.com/kyegomez/swarms"
documentation = "https://swarms.apac.ai" documentation = "https://swarms.apac.ai"
readme = "README.md" # Assuming you have a README.md readme = "README.md"
repository = "https://github.com/kyegomez/swarms" repository = "https://github.com/kyegomez/swarms"
keywords = ["artificial intelligence", "deep learning", "optimizers", "Prompt Engineering", "swarms", "agents"] keywords = ["artificial intelligence", "deep learning", "optimizers", "Prompt Engineering", "swarms", "agents"]
classifiers = [ classifiers = [
@ -24,7 +24,7 @@ classifiers = [
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.6.1" python = "^3.7.1"
torch = "2.1.1" torch = "2.1.1"
transformers = "4.37.1" transformers = "4.37.1"
openai = "0.28.0" openai = "0.28.0"
@ -33,8 +33,6 @@ asyncio = "3.4.3"
einops = "0.7.0" einops = "0.7.0"
google-generativeai = "0.3.1" google-generativeai = "0.3.1"
langchain-experimental = "0.0.10" langchain-experimental = "0.0.10"
tensorflow = "*"
weaviate-client = "3.25.3"
opencv-python-headless = "4.8.1.78" opencv-python-headless = "4.8.1.78"
faiss-cpu = "1.7.4" faiss-cpu = "1.7.4"
backoff = "2.2.1" backoff = "2.2.1"
@ -48,24 +46,19 @@ anthropic = "*"
sentencepiece = "0.1.98" sentencepiece = "0.1.98"
httpx = "0.24.1" httpx = "0.24.1"
tiktoken = "0.4.0" tiktoken = "0.4.0"
attrs = "22.2.0"
ratelimit = "2.2.1" ratelimit = "2.2.1"
loguru = "0.7.2" loguru = "0.7.2"
cohere = "4.24"
huggingface-hub = "*" huggingface-hub = "*"
pydantic = "1.10.12" pydantic = "1.10.12"
tenacity = "8.2.2" tenacity = "8.2.2"
Pillow = "9.4.0" Pillow = "9.4.0"
chromadb = "*" chromadb = "*"
termcolor = "2.2.0" termcolor = "2.2.0"
black = "23.3.0"
soundfile = "0.12.1"
torchvision = "0.16.1" torchvision = "0.16.1"
rich = "13.5.2" rich = "13.5.2"
sqlalchemy = "*" sqlalchemy = "*"
bitsandbytes = "*" bitsandbytes = "*"
pgvector = "*" pgvector = "*"
qdrant-client = "*"
sentence-transformers = "*" sentence-transformers = "*"
peft = "*" peft = "*"
psutil = "*" psutil = "*"
@ -76,6 +69,9 @@ scikit-image = "*"
pinecone-client = "*" pinecone-client = "*"
roboflow = "*" roboflow = "*"
[tool.poetry.dev-dependencies]
black = "23.3.0"
[tool.poetry.group.lint.dependencies] [tool.poetry.group.lint.dependencies]
ruff = ">=0.0.249,<0.1.7" ruff = ">=0.0.249,<0.1.7"
@ -86,15 +82,16 @@ types-chardet = "^5.0.4.6"
mypy-protobuf = "^3.0.0" mypy-protobuf = "^3.0.0"
[tool.autopep8]
max_line_length = 70
ignore = "E501,W6" # or ["E501", "W6"]
in-place = true
recursive = true
aggressive = 3
[tool.ruff] [tool.ruff]
line-length = 70 line-length = 70
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
select = ["E4", "E7", "E9", "F"]
ignore = []
# Allow fix for all enabled rules (when `--fix`) is provided.
fixable = ["ALL"]
unfixable = []
[tool.black] [tool.black]
line-length = 70 line-length = 70

@ -18,7 +18,6 @@ pypdf==4.0.1
accelerate==0.22.0 accelerate==0.22.0
loguru==0.7.2 loguru==0.7.2
chromadb chromadb
tensorflow
optimum optimum
toml toml
tiktoken==0.4.0 tiktoken==0.4.0
@ -34,8 +33,6 @@ numpy
openai==0.28.0 openai==0.28.0
opencv-python==4.9.0.80 opencv-python==4.9.0.80
timm timm
yapf
autopep8
cohere==4.24 cohere==4.24
torchvision==0.16.1 torchvision==0.16.1
rich==13.5.2 rich==13.5.2
@ -47,7 +44,7 @@ peft
psutil psutil
ultralytics ultralytics
supervision supervision
scikit-image
anthropic anthropic
pinecone-client pinecone-client
roboflow roboflow
black

@ -1,93 +0,0 @@
use pyo3::prelude::*;
use pyo3::wrap_pyfunction;
use pyo3::types::IntoPyDict;
use rayon::{ThreadPool, ThreadPoolBuilder};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
use std::thread;
#[pymodule]
fn rust_module(py: Python, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(concurrent_exec, m)?)?;
Ok(())
}
/// This function wraps Python code in Rust concurrency for ultra high performance.
///
/// # Arguments
///
/// * `py_codes` - A vector of string slices that holds the Python codes to be executed.
/// * `timeout` - An optional duration to specify a timeout for the Python code execution.
/// * `num_threads` - The number of threads to use for executing the Python code.
/// * `error_handler` - A function to handle errors during Python code execution.
/// * `log_function` - A function to log the execution of the Python code.
/// * `result_handler` - A function to handle the results of the Python code execution.
///
/// # Example
///
/// ```
/// let py_codes = vec!["print('Hello, World!')", "print('Hello, Rust!')"];
/// let timeout = Some(Duration::from_secs(5));
/// let num_threads = 4;
/// let error_handler = |e| eprintln!("Error: {}", e);
/// let log_function = |s| println!("Log: {}", s);
/// let result_handler = |r| println!("Result: {:?}", r);
/// execute_python_codes(py_codes, timeout, num_threads, error_handler, log_function, result_handler);
/// ```
#[pyfunction]
pub fn concurrent_exec<F, G, H>(
py_codes: Vec<&str>,
timeout: Option<Duration>,
num_threads: usize,
error_handler: F,
log_function: G,
result_handler: H,
) -> PyResult<Vec<PyResult<()>>>
where
F: Fn(&str),
G: Fn(&str),
H: Fn(&PyResult<()>),
{
let gil = Python::acquire_gil();
let py = gil.python();
let py_codes = Arc::new(Mutex::new(py_codes));
let results = Arc::new(Mutex::new(Vec::new()));
let pool = ThreadPool::new(num_threads);
pool.install(|| {
py_codes.par_iter().for_each(|code| {
let locals = [("__name__", "__main__")].into_py_dict(py);
let globals = [("__name__", "__main__")].into_py_dict(py);
log_function(&format!("Executing Python code: {}", code));
let result = py.run(code, Some(globals), Some(locals));
match timeout {
Some(t) => {
let now = Instant::now();
let timeout_thread = thread::spawn(move || {
while now.elapsed() < t {
if let Ok(_) = result {
break;
}
}
if now.elapsed() >= t {
error_handler(&format!("Python code execution timed out: {}", code));
}
});
timeout_thread.join().unwrap();
}
None => {}
}
results.lock().unwrap().push(result.clone(result));
result_handler(&result);
});
});
pool.join();
Ok(results.lock().unwrap().clone())
}

@ -1,71 +0,0 @@
use pyo3::prelude::*;
use rustacuda::prelude::*;
use rustacuda::memory::DeviceBox;
use std::error::Error;
use std::ffi::CString;
#[pymodule]
fn rust_cuda(_py: Python, m: &PyModule) -> PyResult<()> {
#[pyfn(m, "execute_on_device")]
fn execute_on_device(py: Python, device_id: u32, a: f32, b: f32) -> PyResult<f32> {
/// The result of executing the CUDA operation.
let result = py.allow_threads(|| {
execute_cuda(device_id, a, b)
});
match result {
Ok(res) => Ok(res),
Err(err) => Err(PyErr::new::<pyo3::exceptions::PyException, _>(format!("{}", err))),
}
}
Ok(())
}
fn execute_cuda(device_id: u32, a: f32, b: f32) -> Result<f32, Box<dyn Error>> {
rustacuda::init(CudaFlags::empty())?;
let device = Device::get_device(device_id)?;
/// Creates a new CUDA context and pushes it onto the current thread's stack.
///
/// # Arguments
///
/// * `flags` - The flags to be used when creating the context.
/// * `device` - The device on which the context will be created.
///
/// # Returns
///
/// The newly created CUDA context.
///
/// # Errors
///
/// Returns an error if the context creation fails.
///
/// # Example
///
/// ```rust
/// use swarms::cuda_wrapper::Context;
///
/// let device = 0;
/// let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?;
/// ```
pub fn create_and_push(flags: ContextFlags, device: i32) -> Result<Context, CudaError> {
// implementation goes here
}
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?;
let module_data = CString::new(include_str!("../resources/add.ptx"))?;
let module = Module::load_from_string(&module_data)?;
let stream = Stream::new(StreamFlags::NON_BLOCKING, None)?;
let mut x = DeviceBox::new(&a)?;
let mut y = DeviceBox::new(&b)?;
let mut result = DeviceBox::new(&0.0f32)?;
unsafe {
launch!(module.sum<<<1, 1, 0, stream>>>(
x.as_device_ptr(),
y.as_device_ptr(),
result.as_device_ptr(),
1
))?;
}
stream.synchronize()?;
let mut result_host = 0.0f32;
result.copy_to(&mut result_host)?;
Ok(result_host)
}

@ -1,37 +0,0 @@
use std::fs::File;
use std::io::prelude::*;
use std::time::Instant;
use std::io::{BufReader, io};
use ranyon::prelude::{IntoParallelRefIterator, ParallelIterator};
fn read_file(path: &str) -> Vec<String> {
/// Reads the contents of a file located at the specified path.
///
/// # Arguments
///
/// * `path` - The path to the file.
///
/// # Returns
///
/// A `Result` containing a vector of strings representing the lines of the file if the file was successfully read,
/// or an `io::Error` if there was an error reading the file.
///
/// # Example
///
/// ```
/// use std::io;
/// use std::fs::File;
/// use std::io::BufReader;
///
/// fn read_file(path: &str) -> io::Result<Vec<String>> {
/// let contents: io::Result<Vec<String>> = BufReader::new(File::open(path).expect("Could not open file"))
/// .lines()
/// .collect();
/// contents
/// }
/// ```
let contents: io::Result<Vec<String>> = BufReader::new(File::open(path).expect("Could not open file"))
.lines()
.collect();
return contents.expect("Could not read file");
}

@ -1,113 +0,0 @@
/// This module provides a multi-threading processor for executing Python modules and functions in parallel.
/// It utilizes the `rayon` crate for parallel processing and the `pyo3` crate for interacting with the Python interpreter.
/// The `multithreading_processor` function takes a vector of `PythonModule` structs and the number of threads to use.
/// Each `PythonModule` struct contains the name of the Python module, the name of the function to call, and any arguments to pass to the function.
/// The function imports the Python module, calls the specified function, and sends any errors encountered back to the main thread.
/// If an import error occurs, a `PythonError::ImportError` is returned.
/// If a function call error occurs, a `PythonError::FunctionError` is returned.
use pyo3::prelude::*;
use pyo3::wrap_pyfunction;
use rayon::prelude::*;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex};
use log::{info, error};
struct PythonModule<'a> {
name: &'a str,
function: &'a str,
}
enum PythonError {
ImportError(String),
FunctionError(String),
}
#[pyfunction]
fn my_module(py: Python, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(process_python_modules, m)?)?;
Ok(())
}
/// The function returns `Ok(())` if all modules are processed successfully.
/// Note: This code assumes that the necessary dependencies (`pyo3`, `rayon`, `log`) are already imported and initialized.
///
/// # Arguments
///
/// * `modules` - A vector of `PythonModule` structs representing the Python modules and functions to execute.
/// * `num_threads` - The number of threads to use for parallel processing.
///
/// # Examples
///
/// ```
/// use pyo3::types::PyModule;
/// use pyo3::types::PyResult;
/// use pyo3::prelude::*;
///
/// struct PythonModule<'a> {
/// name: &'a str,
/// function: &'a str,
/// args: Vec<&'a str>,
/// }
///
/// #[pymodule]
/// fn multithreading_processor(modules: Vec<PythonModule>, num_threads: usize) -> Result<(), PythonError> {
/// // Function implementation
/// Ok(())
/// }
/// ```
///
/// # Errors
///
/// Returns a `PythonError` if an import error or a function call error occurs.
///
/// # Panics
///
/// This function does not panic.
///
/// # Safety
///
/// This function is safe to call, but it assumes that the necessary dependencies (`pyo3`, `rayon`, `log`) are already imported and initialized.
// Initialize Python interpreter
#[pyfunction]
fn process_python_modules(modules: Vec<PythonModule>, num_threads: usize) -> Result<(), PythonError> {
let gil = Python::acquire_gil();
let py = gil.python();
// Set the global thread pool's configuration
rayon::ThreadPoolBuilder::new()
.num_threads(num_threads)
.build_global()
.unwrap();
// Create a channel to send errors from threads to the main thread
let (tx, rx) = channel();
let tx = Arc::new(Mutex::new(tx));
// Process each Python module in parallel
modules.par_iter().for_each(|module| {
let result = PyModule::import(py, module.name)
.map_err(|_| PythonError::ImportError(module.name.to_string()))
.and_then(|m| m.call0(module.function)
.map_err(|_| PythonError::FunctionError(module.function.to_string())));
if let Err(e) = result {
let tx = tx.lock().unwrap();
tx.send(e).unwrap();
}
});
// Check for errors
drop(tx); // Close the sender
for error in rx {
match error {
PythonError::ImportError(module) => error!("Failed to import module {}", module),
PythonError::FunctionError(function) => error!("Failed to call function {}", function),
}
}
Ok(())
}

@ -1,59 +0,0 @@
use pyo3::prelude::*;
use pyo3::types::PyList;
use rayon::prelude::*;
use std::fs;
use std::time::Instant;
// Define the new execute function
fn exec_concurrently(script_path: &str, threads: usize) -> PyResult<()> {
(0..threads).into_par_iter().for_each(|_| {
Python::with_gil(|py| {
let sys = py.import("sys").unwrap();
let path: &PyList = match sys.getattr("path") {
Ok(path) => match path.downcast() {
Ok(path) => path,
Err(e) => {
eprintln!("Failed to downcast path: {:?}", e);
return;
}
},
Err(e) => {
eprintln!("Failed to get path attribute: {:?}", e);
return;
}
};
if let Err(e) = path.append("lib/python3.11/site-packages") {
eprintln!("Failed to append path: {:?}", e);
}
let script = fs::read_to_string(script_path).unwrap();
py.run(&script, None, None).unwrap();
});
});
Ok(())
}
fn main() -> PyResult<()> {
let args: Vec<String> = std::env::args().collect();
let threads = 20;
if args.len() < 2 {
eprintln!("Usage: {} <path_to_python_script>", args[0]);
std::process::exit(1);
}
let script_path = &args[1];
let start = Instant::now();
// Call the execute function
exec_concurrently(script_path, threads)?;
let duration = start.elapsed();
match fs::write("/tmp/elapsed.time", format!("booting time: {:?}", duration)) {
Ok(_) => println!("Successfully wrote elapsed time to /tmp/elapsed.time"),
Err(e) => eprintln!("Failed to write elapsed time: {:?}", e),
}
Ok(())
}

@ -1,80 +0,0 @@
Introduction to Agents in Swarms
================================
Welcome to the revolutionary world of Agents in Swarms. I'm a big believer in simplicity, modularity, and the power of open collaboration. The same principles apply here.
Agents are the individual building blocks in a swarm. They are the worker bees, each with a specific task, but all working together towards a common goal. In our case, an agent is a combination of a Language Model (LLM), Long Term Memory, and Tools.
In other words, an agent is:
`LLM => Long Term Memory => Tools`
That's it. That's as simple as it can get.
Why does this work? Because each component has a specific, well-defined role. The Language Model is the driving force, generating text based on a given prompt. The Long Term Memory stores information that the agent can draw upon to make its output more coherent and contextually relevant. The Tools provide additional capabilities, such as the ability to parse text, search the web, or interact with APIs.
But the real beauty of this system is not just in the individual components, but in how they work together. The output of one becomes the input of another, creating a feedback loop of continuous learning and improvement.
And the best part? Our Agent classes are designed to be as simple as humanely possible. They are plug-and-play with any of our language model classes, vector stores, and tools. This means you can easily swap out one component for another, allowing for endless customization and flexibility.
The file structure is equally straightforward:
```
* memory
* models
* tools
* utils
```
Each directory contains different components of the swarm. The `models` directory contains the language models, the `memory` directory contains the long-term memory, the `tools` directory contains the tools, the `utils` directory contains various utility functions.
Let's see how simple it is to use these components with some examples:
```python
# Import the necessary classes
from swarms.agents import Anthropic, HuggingFaceLLM
# Create an instance of the Anthropic class
anthropic = Anthropic(model="claude-2", max_tokens_to_sample=100, temperature=0.8)
# Use the Anthropic instance to generate text
prompt = "Once upon a time"
stop = ["The end"]
print("Anthropic output:")
print(anthropic.generate(prompt, stop))
# Create an instance of the HuggingFaceLLM class
huggingface = HuggingFaceLLM(model_id="gpt2", device="cpu", max_length=50)
# Use the HuggingFaceLLM instance to generate text
prompt = "Once upon a time"
print("\nHuggingFaceLLM output:")
print(huggingface.generate(prompt))
```
And to build an agent:
```python
from swarms.agents import vectorstore, tool, Agent
# Create an instance of the Agent class
agent = Agent(
llm=huggingface,
memory=vectorstore,
tools=tool,
)
agent.run("Make me an instagram clone")
```
In conclusion, the Agents in Swarms represent a new way of thinking about AI. They are simple, modular, and highly customizable, allowing you to create powerful AI systems that are more than the sum of their parts. And as always, we're just getting started. There's always room for improvement, for simplification, for making things even better. That's the spirit of open collaboration. That's the spirit of Swarms.
Thanks for becoming an alpha build user, email kye@apac.ai with all complaints.

@ -14,13 +14,7 @@ class AbstractAgent:
""" """
def __init__( def __init__(self, name: str, *args, **kwargs):
self,
name: str,
*args,
**kwargs
):
""" """
Args: Args:
name (str): name of the agent. name (str): name of the agent.

@ -1,5 +1,6 @@
from pydantic import BaseModel from pydantic import BaseModel
class ActionSubtaskEntry(BaseModel): class ActionSubtaskEntry(BaseModel):
"""Used to store ActionSubtask data to preserve TaskMemory pointers and context in the form of thought and action. """Used to store ActionSubtask data to preserve TaskMemory pointers and context in the form of thought and action.

@ -8,6 +8,7 @@ from swarms.utils.loguru_logger import logger
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
class AbstractSwarm(ABC): class AbstractSwarm(ABC):
""" """
Abstract Swarm Class for multi-agent systems Abstract Swarm Class for multi-agent systems
@ -55,10 +56,9 @@ class AbstractSwarm(ABC):
""" """
# @abstractmethod
def __init__( def __init__(
self, self,
agents: List[Agent], agents: List[Agent],
max_loops: int = 200, max_loops: int = 200,
callbacks: Optional[Sequence[callable]] = None, callbacks: Optional[Sequence[callable]] = None,
autosave: bool = False, autosave: bool = False,
@ -79,6 +79,9 @@ class AbstractSwarm(ABC):
self.logging = logging self.logging = logging
self.return_metadata = return_metadata self.return_metadata = return_metadata
self.metadata_filename = metadata_filename self.metadata_filename = metadata_filename
self.stopping_function = stopping_function
self.stopping_condition = stopping_condition
self.stopping_condition_args = stopping_condition_args
self.conversation = Conversation( self.conversation = Conversation(
time_enabled=True, *args, **kwargs time_enabled=True, *args, **kwargs
) )
@ -140,8 +143,6 @@ class AbstractSwarm(ABC):
def step(self): def step(self):
"""Step the swarm""" """Step the swarm"""
# @abstractmethod # @abstractmethod
def add_agent(self, agent: "Agent"): def add_agent(self, agent: "Agent"):
@ -474,7 +475,6 @@ class AbstractSwarm(ABC):
None None
""" """
@abstractmethod
def add_agent_entry(self, agent: Agent): def add_agent_entry(self, agent: Agent):
""" """
Add the information of an Agent to the registry. Add the information of an Agent to the registry.
@ -486,7 +486,6 @@ class AbstractSwarm(ABC):
None None
""" """
@abstractmethod
def retrieve_swarm_information(self, swarm_id: str): def retrieve_swarm_information(self, swarm_id: str):
""" """
Retrieve the information of a specific Swarm from the registry. Retrieve the information of a specific Swarm from the registry.
@ -498,7 +497,6 @@ class AbstractSwarm(ABC):
SwarmManagerBase: Instance of SwarmManagerBase representing the retrieved Swarm, or None if not found. SwarmManagerBase: Instance of SwarmManagerBase representing the retrieved Swarm, or None if not found.
""" """
@abstractmethod
def retrieve_joined_agents(self, agent_id: str) -> List[Agent]: def retrieve_joined_agents(self, agent_id: str) -> List[Agent]:
""" """
Retrieve the information the Agents which have joined the registry. Retrieve the information the Agents which have joined the registry.
@ -507,7 +505,6 @@ class AbstractSwarm(ABC):
Agent: Instance of Agent representing the retrieved Agent, or None if not found. Agent: Instance of Agent representing the retrieved Agent, or None if not found.
""" """
@abstractmethod
def join_swarm( def join_swarm(
self, from_entity: Agent | Agent, to_entity: Agent self, from_entity: Agent | Agent, to_entity: Agent
): ):

@ -10,9 +10,9 @@ class SermonSwarm:
priest (Agent): The priest agent responsible for generating sermons. priest (Agent): The priest agent responsible for generating sermons.
agents (Sequence[Agent]): The list of agents in the swarm. agents (Sequence[Agent]): The list of agents in the swarm.
max_loops (int, optional): The maximum number of loops to run the agents. Defaults to 5. max_loops (int, optional): The maximum number of loops to run the agents. Defaults to 5.
stop_condition (Union[str, List[str]], optional): The condition(s) that can stop the agents. stop_condition (Union[str, List[str]], optional): The condition(s) that can stop the agents.
Defaults to "stop". Defaults to "stop".
stop_function (Union[None, Callable], optional): The function to apply to the sermons before stop_function (Union[None, Callable], optional): The function to apply to the sermons before
checking the stop condition. Defaults to None. checking the stop condition. Defaults to None.
""" """
@ -24,7 +24,7 @@ class SermonSwarm:
stop_condition: Union[str, List[str]] = "stop", stop_condition: Union[str, List[str]] = "stop",
stop_function: Union[None, Callable] = None, stop_function: Union[None, Callable] = None,
*args, *args,
**kwargs **kwargs,
): ):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.priest = priest self.priest = priest
@ -32,7 +32,7 @@ class SermonSwarm:
self.max_loops = max_loops self.max_loops = max_loops
self.stop_condition = stop_condition self.stop_condition = stop_condition
self.stop_function = stop_function self.stop_function = stop_function
def run(self, task: str, *args, **kwargs): def run(self, task: str, *args, **kwargs):
""" """
Runs the swarm by generating sermons from the priest and executing the task on each agent. Runs the swarm by generating sermons from the priest and executing the task on each agent.
@ -43,23 +43,22 @@ class SermonSwarm:
**kwargs: Additional keyword arguments for the task. **kwargs: Additional keyword arguments for the task.
""" """
sermon = self.priest(task, *args, **kwargs) sermon = self.priest(task, *args, **kwargs)
# Add the sermon to the memory of all agents # Add the sermon to the memory of all agents
for agent in self.agents: for agent in self.agents:
agent.add_message_to_memory(sermon) agent.add_message_to_memory(sermon)
# Then run the agents # Then run the agents
for _ in range(self.max_loops): for _ in range(self.max_loops):
for agent in self.agents: for agent in self.agents:
preach = agent.run(task, *args, **kwargs) preach = agent.run(task, *args, **kwargs)
if self.stop_function: if self.stop_function:
preach = self.stop_function(preach) preach = self.stop_function(preach)
if self.stop_condition in preach: if self.stop_condition in preach:
if self.stop_condition is True: if self.stop_condition is True:
break break
elif self.stop_condition in preach: elif self.stop_condition in preach:
break break

@ -31,7 +31,6 @@ def sanitize_file_path(file_path: str):
return sanitized_path return sanitized_path
def load_json(json_string: str): def load_json(json_string: str):
""" """
Loads a JSON string and returns the corresponding Python object. Loads a JSON string and returns the corresponding Python object.

@ -0,0 +1,42 @@
import concurrent.futures
from typing import Any, Callable, Dict, List
from inspect import iscoroutinefunction
import asyncio
# Helper function to run an asynchronous function in a synchronous way
def run_async_function_in_sync(
func: Callable, *args, **kwargs
) -> Any:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
coroutine = func(*args, **kwargs)
return loop.run_until_complete(coroutine)
# Main omni function for parallel execution
def omni_parallel_function_caller(
function_calls: List[Dict[str, Any]]
) -> List[Any]:
results = []
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_call = {}
for call in function_calls:
func = call["function"]
args = call.get("args", ())
kwargs = call.get("kwargs", {})
if iscoroutinefunction(func):
# Wrap and execute asynchronous function in a separate process
future = executor.submit(
run_async_function_in_sync, func, *args, **kwargs
)
else:
# Directly execute synchronous function in a thread
future = executor.submit(func, *args, **kwargs)
future_to_call[future] = call
for future in concurrent.futures.as_completed(future_to_call):
results.append(future.result())
return results

@ -0,0 +1,17 @@
import json
from pydantic import BaseModel
def base_model_schema_to_json(model: BaseModel):
"""
Converts the JSON schema of a base model to a formatted JSON string.
Args:
model (BaseModel): The base model for which to generate the JSON schema.
Returns:
str: The JSON schema of the base model as a formatted JSON string.
"""
return json.dumps(model.model_json_schema(), indent=2)
Loading…
Cancel
Save