commit
616b3b2723
@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "swarms-runtime" # The name of your project
|
||||
version = "0.1.0" # The current version, adhering to semantic versioning
|
||||
edition = "2021" # Specifies which edition of Rust you're using, e.g., 2018 or 2021
|
||||
authors = ["Your Name <your.email@example.com>"] # Optional: specify the package authors
|
||||
license = "MIT" # Optional: the license for your project
|
||||
description = "A brief description of my project" # Optional: a short description of your project
|
||||
|
||||
[dependencies]
|
||||
cpython = "0.5"
|
||||
rayon = "1.5"
|
||||
|
||||
[dependencies.pyo3]
|
||||
version = "0.20.3"
|
||||
features = ["extension-module", "auto-initialize"]
|
@ -0,0 +1,23 @@
|
||||
# Use an official CUDA runtime as a parent image
|
||||
FROM nvidia/cuda:11.4.2-runtime-ubuntu20.04
|
||||
|
||||
# Set the working directory in the container to /app
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the current directory contents into the container at /app
|
||||
COPY . /app
|
||||
|
||||
# Install any needed packages specified in requirements.txt
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python3-pip \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
RUN pip3 install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Make port 80 available to the world outside this container
|
||||
EXPOSE 80
|
||||
|
||||
# Define environment variable
|
||||
# ENV NAME World
|
||||
|
||||
# Run app.py when the container launches
|
||||
CMD ["python3", "example.py"]
|
After Width: | Height: | Size: 40 KiB |
@ -1,7 +1,110 @@
|
||||
This page summarizes questions we were asked on [Discord](https://discord.gg/gnWRz88eym), Hacker News, and Reddit. Feel free to post a question to [Discord](https://discord.gg/gnWRz88eym) or open a discussion on our [Github Page](https://github.com/kyegomez) or hit us up directly: [kye@apac.ai](mailto:hello@swarms.ai).
|
||||
### FAQ on Swarm Intelligence and Multi-Agent Systems
|
||||
|
||||
## 1. How is Swarms different from LangChain?
|
||||
#### What is an agent in the context of AI and swarm intelligence?
|
||||
|
||||
Swarms is an open source alternative to LangChain and differs in its approach to creating LLM pipelines and DAGs. In addition to agents, it uses more general-purpose DAGs and pipelines. A close proxy might be *Airflow for LLMs*. Swarms still implements chain of thought logic for prompt tasks that use "tools" but it also supports any type of input / output (images, audio, etc.).
|
||||
In artificial intelligence (AI), an agent refers to an LLM with some objective to accomplish.
|
||||
|
||||
In swarm intelligence, each agent interacts with other agents and possibly the environment to achieve complex collective behaviors or solve problems more efficiently than individual agents could on their own.
|
||||
|
||||
|
||||
#### What do you need Swarms at all?
|
||||
Individual agents are limited by a vast array of issues such as context window loss, single task execution, hallucination, and no collaboration.
|
||||
|
||||
|
||||
#### How does a swarm work?
|
||||
|
||||
A swarm works through the principles of decentralized control, local interactions, and simple rules followed by each agent. Unlike centralized systems, where a single entity dictates the behavior of all components, in a swarm, each agent makes its own decisions based on local information and interactions with nearby agents. These local interactions lead to the emergence of complex, organized behaviors or solutions at the collective level, enabling the swarm to tackle tasks efficiently.
|
||||
|
||||
#### Why do you need more agents in a swarm?
|
||||
|
||||
More agents in a swarm can enhance its problem-solving capabilities, resilience, and efficiency. With more agents:
|
||||
|
||||
- **Diversity and Specialization**: The swarm can leverage a wider range of skills, knowledge, and perspectives, allowing for more creative and effective solutions to complex problems.
|
||||
- **Scalability**: Adding more agents can increase the swarm's capacity to handle larger tasks or multiple tasks simultaneously.
|
||||
- **Robustness**: A larger number of agents enhances the system's redundancy and fault tolerance, as the failure of a few agents has a minimal impact on the overall performance of the swarm.
|
||||
|
||||
#### Isn't it more expensive to use more agents?
|
||||
|
||||
While deploying more agents can initially increase costs, especially in terms of computational resources, hosting, and potentially API usage, there are several factors and strategies that can mitigate these expenses:
|
||||
|
||||
- **Efficiency at Scale**: Larger swarms can often solve problems more quickly or effectively, reducing the overall computational time and resources required.
|
||||
- **Optimization and Caching**: Implementing optimizations and caching strategies can reduce redundant computations, lowering the workload on individual agents and the overall system.
|
||||
- **Dynamic Scaling**: Utilizing cloud services that offer dynamic scaling can ensure you only pay for the resources you need when you need them, optimizing cost-efficiency.
|
||||
|
||||
#### Can swarms make decisions better than individual agents?
|
||||
|
||||
Yes, swarms can make better decisions than individual agents for several reasons:
|
||||
|
||||
- **Collective Intelligence**: Swarms combine the knowledge and insights of multiple agents, leading to more informed and well-rounded decision-making processes.
|
||||
- **Error Correction**: The collaborative nature of swarms allows for error checking and correction among agents, reducing the likelihood of mistakes.
|
||||
- **Adaptability**: Swarms are highly adaptable to changing environments or requirements, as the collective can quickly reorganize or shift strategies based on new information.
|
||||
|
||||
#### How do agents in a swarm communicate?
|
||||
|
||||
Communication in a swarm can vary based on the design and purpose of the system but generally involves either direct or indirect interactions:
|
||||
|
||||
- **Direct Communication**: Agents exchange information directly through messaging, signals, or other communication protocols designed for the system.
|
||||
- **Indirect Communication**: Agents influence each other through the environment, a method known as stigmergy. Actions by one agent alter the environment, which in turn influences the behavior of other agents.
|
||||
|
||||
#### Are swarms only useful in computational tasks?
|
||||
|
||||
While swarms are often associated with computational tasks, their applications extend far beyond. Swarms can be utilized in:
|
||||
|
||||
- **Robotics**: Coordinating multiple robots for tasks like search and rescue, exploration, or surveillance.
|
||||
- **Environmental Monitoring**: Using sensor networks to monitor pollution, wildlife, or climate conditions.
|
||||
- **Social Sciences**: Modeling social behaviors or economic systems to understand complex societal dynamics.
|
||||
- **Healthcare**: Coordinating care strategies in hospital settings or managing pandemic responses through distributed data analysis.
|
||||
|
||||
#### How do you ensure the security of a swarm system?
|
||||
|
||||
Security in swarm systems involves:
|
||||
|
||||
- **Encryption**: Ensuring all communications between agents are encrypted to prevent unauthorized access or manipulation.
|
||||
- **Authentication**: Implementing strict authentication mechanisms to verify the identity of each agent in the swarm.
|
||||
- **Resilience to Attacks**: Designing the swarm to continue functioning effectively even if some agents are compromised or attacked, utilizing redundancy and fault tolerance strategies.
|
||||
|
||||
#### How do individual agents within a swarm share insights without direct learning mechanisms like reinforcement learning?
|
||||
|
||||
In the context of pre-trained Large Language Models (LLMs) that operate within a swarm, sharing insights typically involves explicit communication and data exchange protocols rather than direct learning mechanisms like reinforcement learning. Here's how it can work:
|
||||
|
||||
- **Shared Databases and Knowledge Bases**: Agents can write to and read from a shared database or knowledge base where insights, generated content, and relevant data are stored. This allows agents to benefit from the collective experience of the swarm by accessing information that other agents have contributed.
|
||||
|
||||
- **APIs for Information Exchange**: Custom APIs can facilitate the exchange of information between agents. Through these APIs, agents can request specific information or insights from others within the swarm, effectively sharing knowledge without direct learning.
|
||||
|
||||
#### How do you balance the autonomy of individual LLMs with the need for coherent collective behavior in a swarm?
|
||||
|
||||
Balancing autonomy with collective coherence in a swarm of LLMs involves:
|
||||
|
||||
- **Central Coordination Mechanism**: Implementing a lightweight central coordination mechanism that can assign tasks, distribute information, and collect outputs from individual LLMs. This ensures that while each LLM operates autonomously, their actions are aligned with the swarm's overall objectives.
|
||||
|
||||
- **Standardized Communication Protocols**: Developing standardized protocols for how LLMs communicate and share information ensures that even though each agent works autonomously, the information exchange remains coherent and aligned with the collective goals.
|
||||
|
||||
#### How do LLM swarms adapt to changing environments or tasks without machine learning techniques?
|
||||
|
||||
Adaptation in LLM swarms, without relying on machine learning techniques for dynamic learning, can be achieved through:
|
||||
|
||||
- **Dynamic Task Allocation**: A central system or distributed algorithm can dynamically allocate tasks to different LLMs based on the changing environment or requirements. This ensures that the most suitable LLMs are addressing tasks for which they are best suited as conditions change.
|
||||
|
||||
- **Pre-trained Versatility**: Utilizing a diverse set of pre-trained LLMs with different specialties or training data allows the swarm to select the most appropriate agent for a task as the requirements evolve.
|
||||
|
||||
- **In Context Learning**: In context learning is another mechanism that can be employed within LLM swarms to adapt to changing environments or tasks. This approach involves leveraging the collective knowledge and experiences of the swarm to facilitate learning and improve performance. Here's how it can work:
|
||||
|
||||
|
||||
#### Can LLM swarms operate in physical environments, or are they limited to digital spaces?
|
||||
|
||||
LLM swarms primarily operate in digital spaces, given their nature as software entities. However, they can interact with physical environments indirectly through interfaces with sensors, actuaries, or other devices connected to the Internet of Things (IoT). For example, LLMs can process data from physical sensors and control devices based on their outputs, enabling applications like smart home management or autonomous vehicle navigation.
|
||||
|
||||
#### Without direct learning from each other, how do agents in a swarm improve over time?
|
||||
|
||||
Improvement over time in a swarm of pre-trained LLMs, without direct learning from each other, can be achieved through:
|
||||
|
||||
- **Human Feedback**: Incorporating feedback from human operators or users can guide adjustments to the usage patterns or selection criteria of LLMs within the swarm, optimizing performance based on observed outcomes.
|
||||
|
||||
- **Periodic Re-training and Updating**: The individual LLMs can be periodically re-trained or updated by their developers based on collective insights and feedback from their deployment within swarms. While this does not involve direct learning from each encounter, it allows the LLMs to improve over time based on aggregated experiences.
|
||||
|
||||
These adjustments to the FAQ reflect the specific context of pre-trained LLMs operating within a swarm, focusing on communication, coordination, and adaptation mechanisms that align with their capabilities and constraints.
|
||||
|
||||
|
||||
#### Conclusion
|
||||
|
||||
Swarms represent a powerful paradigm in AI, offering innovative solutions to complex, dynamic problems through collective intelligence and decentralized control. While challenges exist, particularly regarding cost and security, strategic design and management can leverage the strengths of swarm intelligence to achieve remarkable efficiency, adaptability, and robustness in a wide range of applications.
|
@ -0,0 +1,55 @@
|
||||
# The Limits of Individual Agents
|
||||
|
||||

|
||||
|
||||
|
||||
Individual agents have pushed the boundaries of what machines can learn and accomplish. However, despite their impressive capabilities, these agents face inherent limitations that can hinder their effectiveness in complex, real-world applications. This blog explores the critical constraints of individual agents, such as context window limits, hallucination, single-task threading, and lack of collaboration, and illustrates how multi-agent collaboration can address these limitations. In short,
|
||||
|
||||
- Context Window Limits
|
||||
- Single Task Execution
|
||||
- Hallucination
|
||||
- No collaboration
|
||||
|
||||
|
||||
|
||||
#### Context Window Limits
|
||||
|
||||
One of the most significant constraints of individual agents, particularly in the domain of language models, is the context window limit. This limitation refers to the maximum amount of information an agent can consider at any given time. For instance, many language models can only process a fixed number of tokens (words or characters) in a single inference, restricting their ability to understand and generate responses based on longer texts. This limitation can lead to a lack of coherence in longer compositions and an inability to maintain context in extended conversations or documents.
|
||||
|
||||
#### Hallucination
|
||||
|
||||
Hallucination in AI refers to the phenomenon where an agent generates information that is not grounded in the input data or real-world facts. This can manifest as making up facts, entities, or events that do not exist or are incorrect. Hallucinations pose a significant challenge in ensuring the reliability and trustworthiness of AI-generated content, particularly in critical applications such as news generation, academic research, and legal advice.
|
||||
|
||||
#### Single Task Threading
|
||||
|
||||
Individual agents are often designed to excel at specific tasks, leveraging their architecture and training data to optimize performance in a narrowly defined domain. However, this specialization can also be a drawback, as it limits the agent's ability to multitask or adapt to tasks that fall outside its primary domain. Single-task threading means an agent may excel in language translation but struggle with image recognition or vice versa, necessitating the deployment of multiple specialized agents for comprehensive AI solutions.
|
||||
|
||||
#### Lack of Collaboration
|
||||
|
||||
Traditional AI agents operate in isolation, processing inputs and generating outputs independently. This isolation limits their ability to leverage diverse perspectives, share knowledge, or build upon the insights of other agents. In complex problem-solving scenarios, where multiple facets of a problem need to be addressed simultaneously, this lack of collaboration can lead to suboptimal solutions or an inability to tackle multifaceted challenges effectively.
|
||||
|
||||
# The Elegant yet Simple Solution
|
||||
|
||||
- ## Multi-Agent Collaboration
|
||||
|
||||
Recognizing the limitations of individual agents, researchers and practitioners have explored the potential of multi-agent collaboration as a means to transcend these constraints. Multi-agent systems comprise several agents that can interact, communicate, and collaborate to achieve common goals or solve complex problems. This collaborative approach offers several advantages:
|
||||
|
||||
#### Overcoming Context Window Limits
|
||||
|
||||
By dividing a large task among multiple agents, each focusing on different segments of the problem, multi-agent systems can effectively overcome the context window limits of individual agents. For instance, in processing a long document, different agents could be responsible for understanding and analyzing different sections, pooling their insights to generate a coherent understanding of the entire text.
|
||||
|
||||
#### Mitigating Hallucination
|
||||
|
||||
Through collaboration, agents can cross-verify facts and information, reducing the likelihood of hallucinations. If one agent generates a piece of information, other agents can provide checks and balances, verifying the accuracy against known data or through consensus mechanisms.
|
||||
|
||||
#### Enhancing Multitasking Capabilities
|
||||
|
||||
Multi-agent systems can tackle tasks that require a diverse set of skills by leveraging the specialization of individual agents. For example, in a complex project that involves both natural language processing and image analysis, one agent specialized in text can collaborate with another specialized in visual data, enabling a comprehensive approach to the task.
|
||||
|
||||
#### Facilitating Collaboration and Knowledge Sharing
|
||||
|
||||
Multi-agent collaboration inherently encourages the sharing of knowledge and insights, allowing agents to learn from each other and improve their collective performance. This can be particularly powerful in scenarios where iterative learning and adaptation are crucial, such as dynamic environments or tasks that evolve over time.
|
||||
|
||||
### Conclusion
|
||||
|
||||
While individual AI agents have made remarkable strides in various domains, their inherent limitations necessitate innovative approaches to unlock the full potential of artificial intelligence. Multi-agent collaboration emerges as a compelling solution, offering a pathway to transcend individual constraints through collective intelligence. By harnessing the power of collaborative AI, we can address more complex, multifaceted problems, paving the way for more versatile, efficient, and effective AI systems in the future.
|
@ -0,0 +1,134 @@
|
||||
# **Documentation for `swarms.structs.JSON` Class**
|
||||
|
||||
The `swarms.structs.JSON` class is a helper class that provides a templated framework for creating new classes that deal with JSON objects and need to validate these objects against a JSON Schema. Being an abstract base class (ABC), the `JSON` class allows for the creation of subclasses that implement specific behavior while ensuring that they all adhere to a common interface, particularly the `validate` method.
|
||||
|
||||
Given that documenting the entire code provided in full detail would exceed our platform's limitations, below is a generated documentation for the `JSON` class following the steps you provided. This is an outline and would need to be expanded upon to reach the desired word count and thoroughness in a full, professional documentation.
|
||||
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
||||
JSON (JavaScript Object Notation) is a lightweight data interchange format that is easy for humans to read and write and easy for machines to parse and generate. `swarms.structs.JSON` class aims to provide a basic structure for utilizing JSON and validating it against a pre-defined schema. This is essential for applications where data integrity and structure are crucial, such as configurations for applications, communications over networks, and data storage.
|
||||
|
||||
## Class Definition
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|---------------|------------|------------------------------------|
|
||||
| `schema_path` | `str` | The path to the JSON schema file. |
|
||||
|
||||
### `JSON.__init__(self, schema_path)`
|
||||
Class constructor that initializes a `JSON` object with the specified JSON schema path.
|
||||
```python
|
||||
def __init__(self, schema_path):
|
||||
self.schema_path = schema_path
|
||||
self.schema = self.load_schema()
|
||||
```
|
||||
|
||||
### `JSON.load_schema(self)`
|
||||
Private method that loads and returns the JSON schema from the file specified at the `schema_path`.
|
||||
|
||||
### `JSON.validate(self, data)`
|
||||
Abstract method that needs to be implemented by subclasses to validate input `data` against the JSON schema.
|
||||
|
||||
---
|
||||
|
||||
## Functionality and Usage
|
||||
|
||||
### Why use `JSON` Class
|
||||
|
||||
The `JSON` class abstracts away the details of loading and validating JSON data, allowing for easy implementation in any subclass that needs to handle JSON input. It sets up a standard for all subclasses to follow, ensuring consistency across different parts of code or different projects.
|
||||
|
||||
By enforcing a JSON schema, the `JSON` class helps maintain the integrity of the data, catching errors early in the process of reading and writing JSON.
|
||||
|
||||
### Step-by-step Guide
|
||||
|
||||
1. Subclass the `JSON` class.
|
||||
2. Provide an implementation for the `validate` method.
|
||||
3. Use the provided schema to enforce required fields and types within your JSON data.
|
||||
|
||||
---
|
||||
|
||||
## Example Usage
|
||||
|
||||
### Implementing a Subclass
|
||||
|
||||
Suppose we have a JSON Schema in `config_schema.json` for application configuration.
|
||||
|
||||
```json
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"debug": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"window_size": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "number"
|
||||
},
|
||||
"minItems": 2,
|
||||
"maxItems": 2
|
||||
}
|
||||
},
|
||||
"required": ["debug", "window_size"]
|
||||
}
|
||||
```
|
||||
|
||||
Now we'll create a subclass `AppConfig` that uses this schema.
|
||||
|
||||
```python
|
||||
from swarms.structs import JSON
|
||||
|
||||
|
||||
class AppConfig(JSON):
|
||||
def __init__(self, schema_path):
|
||||
super().__init__(schema_path)
|
||||
|
||||
def validate(self, config_data):
|
||||
# Here we'll use a JSON Schema validation library like jsonschema
|
||||
from jsonschema import ValidationError, validate
|
||||
|
||||
try:
|
||||
validate(instance=config_data, schema=self.schema)
|
||||
except ValidationError as e:
|
||||
print(f"Invalid configuration: {e}")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Main Example Usage
|
||||
|
||||
if __name__ == "__main__":
|
||||
config = {"debug": True, "window_size": [800, 600]}
|
||||
|
||||
app_config = AppConfig("config_schema.json")
|
||||
|
||||
if app_config.validate(config):
|
||||
print("Config is valid!")
|
||||
else:
|
||||
print("Config is invalid.")
|
||||
```
|
||||
|
||||
In this example, an `AppConfig` class that inherits from `JSON` is created. The `validate` method is implemented to check whether a configuration dictionary is valid against the provided schema.
|
||||
|
||||
### Note
|
||||
|
||||
- Validate real JSON data using this class in a production environment.
|
||||
- Catch and handle any exceptions as necessary to avoid application crashes.
|
||||
- Extend functionality within subclasses as required for your application.
|
||||
|
||||
---
|
||||
|
||||
## Additional Information and Tips
|
||||
|
||||
- Use detailed JSON Schemas for complex data validation.
|
||||
- Use the jsonschema library for advanced validation features.
|
||||
|
||||
## References and Resources
|
||||
|
||||
- Official Python Documentation for ABCs: https://docs.python.org/3/library/abc.html
|
||||
- JSON Schema: https://json-schema.org/
|
||||
- jsonschema Python package: https://pypi.org/project/jsonschema/
|
||||
|
||||
This generated documentation serves as a template and starting point intended for creating in-depth, practical documentation. Expanding upon each section, in practice, would involve deeper code examples, common patterns and pitfalls, and more thorough explanations of the `JSON` class internals and how to best utilize them in various real-world scenarios.
|
@ -0,0 +1,111 @@
|
||||
# `MajorityVoting` Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
The `swarms.structs` library provides a flexible architecture for creating and managing swarms of agents capable of performing tasks and making decisions based on majority voting. This documentation will guide you through the `MajorityVoting` class, explaining its purpose, architecture, and usage with examples.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Installation](#installation)
|
||||
- [The `MajorityVoting` Class](#the-majorityvoting-class)
|
||||
- [Class Definition](#class-definition)
|
||||
- [Parameters](#parameters)
|
||||
- [Methods](#methods)
|
||||
- [`__init__`](#__init__)
|
||||
- [`run`](#run)
|
||||
- [Usage Examples](#usage-examples)
|
||||
- [Basic Usage](#basic-usage)
|
||||
- [Concurrent Execution](#concurrent-execution)
|
||||
- [Asynchronous Execution](#asynchronous-execution)
|
||||
- [Advanced Features](#advanced-features)
|
||||
- [Troubleshooting and FAQ](#troubleshooting-and-faq)
|
||||
- [Conclusion](#conclusion)
|
||||
- [References](#references)
|
||||
|
||||
## Introduction
|
||||
|
||||
The `swarms.structs` library introduces a mode of distributed computation through "agents" that collaborate to determine the outcome of tasks using a majority voting system. It becomes crucial in scenarios where collective decision-making is preferred over individual agent accuracy.
|
||||
|
||||
## Installation
|
||||
|
||||
To install the `swarms.structs` library, run the following command:
|
||||
|
||||
```bash
|
||||
pip install swarms-structs
|
||||
```
|
||||
|
||||
## The `MajorityVoting` Class
|
||||
|
||||
The `MajorityVoting` class is a high-level abstraction used to coordinate a group of agents that perform tasks and return results. These results are then aggregated to form a majority vote, determining the final output.
|
||||
|
||||
### Class Definition
|
||||
|
||||
### Parameters
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------------|------------|----------|----------------------------------------------------------------------|
|
||||
| agents | List[Agent]| Required | A list of agent instances to participate in the voting process. |
|
||||
| concurrent | bool | False | Enables concurrent execution using threading if set to `True`. |
|
||||
| multithreaded | bool | False | Enables execution using multiple threads if set to `True`. |
|
||||
| multiprocess | bool | False | Enables execution using multiple processes if set to `True`. |
|
||||
| asynchronous | bool | False | Enables asynchronous execution if set to `True`. |
|
||||
| output_parser | callable | None | A function to parse the output from the majority voting function. |
|
||||
| autosave | bool | False | Enables automatic saving of the process state if set to `True`. (currently not used in source code) |
|
||||
| verbose | bool | False | Enables verbose logging if set to `True`. |
|
||||
|
||||
### Methods
|
||||
|
||||
#### `__init__`
|
||||
|
||||
The constructor for the `MajorityVoting` class. Initializes a new majority voting system with the given configuration.
|
||||
|
||||
*This method doesn't return any value.*
|
||||
|
||||
#### `run`
|
||||
|
||||
Executes the given task by all participating agents and aggregates the results through majority voting.
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|-----------|----------------------------------|
|
||||
| task | str | The task to be performed. |
|
||||
| *args | list | Additional positional arguments. |
|
||||
| **kwargs | dict | Additional keyword arguments. |
|
||||
|
||||
*Returns:* List[Any] - The result based on the majority vote.
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```python
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.majority_voting import MajorityVoting
|
||||
|
||||
|
||||
def create_agent(name):
|
||||
return Agent(name)
|
||||
|
||||
|
||||
agents = [create_agent(name) for name in ["GPT-3", "Codex", "Tabnine"]]
|
||||
majority_voting = MajorityVoting(agents)
|
||||
result = majority_voting.run("What is the capital of France?")
|
||||
print(result) # Output: Paris
|
||||
```
|
||||
|
||||
### Concurrent Execution
|
||||
|
||||
```python
|
||||
majority_voting = MajorityVoting(agents, concurrent=True)
|
||||
result = majority_voting.run("What is the largest continent?")
|
||||
print(result) # Example Output: Asia
|
||||
```
|
||||
|
||||
### Asynchronous Execution
|
||||
|
||||
```python
|
||||
majority_voting = MajorityVoting(agents, asynchronous=True)
|
||||
result = majority_voting.run("What is the square root of 16?")
|
||||
print(result) # Output: 4
|
||||
```
|
||||
|
@ -0,0 +1,130 @@
|
||||
|
||||
# `TaskQueueBase`
|
||||
|
||||
## Introduction
|
||||
The `swarms.structs` library is a key component of a multi-agent system's task management infrastructure. It provides the necessary classes and methods to create and manage queues of tasks that can be distributed among a swarm of agents. The purpose of this documentation is to guide users through the proper use of the `TaskQueueBase` class, which serves as an abstract base class for implementing task queues.
|
||||
|
||||
## TaskQueueBase Class
|
||||
|
||||
```python
|
||||
import threading
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
# Include any additional imports that are relevant to decorators and other classes such as Task and Agent if needed
|
||||
|
||||
# Definition of the synchronized_queue decorator (if necessary)
|
||||
|
||||
|
||||
class TaskQueueBase(ABC):
|
||||
def __init__(self):
|
||||
self.lock = threading.Lock()
|
||||
|
||||
@synchronized_queue
|
||||
@abstractmethod
|
||||
def add_task(self, task: Task) -> bool:
|
||||
pass
|
||||
|
||||
@synchronized_queue
|
||||
@abstractmethod
|
||||
def get_task(self, agent: Agent) -> Task:
|
||||
pass
|
||||
|
||||
@synchronized_queue
|
||||
@abstractmethod
|
||||
def complete_task(self, task_id: str):
|
||||
pass
|
||||
|
||||
@synchronized_queue
|
||||
@abstractmethod
|
||||
def reset_task(self, task_id: str):
|
||||
pass
|
||||
```
|
||||
|
||||
### Architecture and Purpose
|
||||
The `TaskQueueBase` class provides an abstract interface for task queue implementations. This class uses the `threading.Lock` to ensure mutual exclusion, making it suitable for concurrent environments. The `@synchronized_queue` decorator implies that each method should be synchronized to prevent race conditions.
|
||||
|
||||
Tasks are generally represented by the `Task` class, and agents by the `Agent` class. Implementations of the `TaskQueueBase` will provide the logic to store tasks, distribute them to agents, and manage their lifecycles.
|
||||
|
||||
#### Methods and Their Arguments
|
||||
|
||||
Here's an overview of each method and its arguments:
|
||||
|
||||
| Method | Arguments | Return Type | Description |
|
||||
|----------------|----------------|-------------|-----------------------------------------------------------------------------------------------|
|
||||
| add_task | task (Task) | bool | Adds a task to the queue and returns True if successfully added, False otherwise. |
|
||||
| get_task | agent (Agent) | Task | Retrieves the next task for the given agent. |
|
||||
| complete_task | task_id (str) | None | Marks the task identified by task_id as completed. |
|
||||
| reset_task | task_id (str) | None | Resets the task identified by task_id, typically done if an agent fails to complete the task. |
|
||||
|
||||
### Example Usage
|
||||
|
||||
Below are three examples of how the `TaskQueueBase` class can be implemented and used.
|
||||
|
||||
**Note:** The actual code for decorators, Task, Agent, and concrete implementations of `TaskQueueBase` is not provided and should be created as per specific requirements.
|
||||
|
||||
#### Example 1: Basic Implementation
|
||||
|
||||
```python
|
||||
# file: basic_queue.py
|
||||
|
||||
# Assume synchronized_queue decorator is defined elsewhere
|
||||
from decorators import synchronized_queue
|
||||
|
||||
from swarms.structs import Agent, Task, TaskQueueBase
|
||||
|
||||
|
||||
class BasicTaskQueue(TaskQueueBase):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.tasks = []
|
||||
|
||||
@synchronized_queue
|
||||
def add_task(self, task: Task) -> bool:
|
||||
self.tasks.append(task)
|
||||
return True
|
||||
|
||||
@synchronized_queue
|
||||
def get_task(self, agent: Agent) -> Task:
|
||||
return self.tasks.pop(0)
|
||||
|
||||
@synchronized_queue
|
||||
def complete_task(self, task_id: str):
|
||||
# Logic to mark task as completed
|
||||
pass
|
||||
|
||||
@synchronized_queue
|
||||
def reset_task(self, task_id: str):
|
||||
# Logic to reset the task
|
||||
pass
|
||||
|
||||
|
||||
# Usage
|
||||
queue = BasicTaskQueue()
|
||||
# Add task, assuming Task object is created
|
||||
queue.add_task(someTask)
|
||||
# Get task for an agent, assuming Agent object is created
|
||||
task = queue.get_task(someAgent)
|
||||
```
|
||||
|
||||
#### Example 2: Priority Queue Implementation
|
||||
|
||||
```python
|
||||
# file: priority_queue.py
|
||||
# Similar to example 1, but tasks are managed based on priority within add_task and get_task methods
|
||||
```
|
||||
|
||||
#### Example 3: Persistent Queue Implementation
|
||||
|
||||
```python
|
||||
# file: persistent_queue.py
|
||||
# An example demonstrating tasks being saved to a database or filesystem. Methods would include logic for persistence.
|
||||
```
|
||||
|
||||
### Additional Information and Common Issues
|
||||
|
||||
This section would provide insights on thread safety, error handling, and best practices in working with task queues in a multi-agent system.
|
||||
|
||||
### References
|
||||
|
||||
Links to further resources and any academic papers or external documentation related to task queues and multi-agent systems would be included here.
|
||||
|
@ -1,249 +0,0 @@
|
||||
# `ModelParallelizer` Documentation
|
||||
|
||||
## Table of Contents
|
||||
1. [Understanding the Purpose](#understanding-the-purpose)
|
||||
2. [Overview and Introduction](#overview-and-introduction)
|
||||
3. [Class Definition](#class-definition)
|
||||
4. [Functionality and Usage](#functionality-and-usage)
|
||||
5. [Additional Information](#additional-information)
|
||||
6. [Examples](#examples)
|
||||
7. [Conclusion](#conclusion)
|
||||
|
||||
## 1. Understanding the Purpose <a name="understanding-the-purpose"></a>
|
||||
|
||||
To create comprehensive documentation for the `ModelParallelizer` class, let's begin by understanding its purpose and functionality.
|
||||
|
||||
### Purpose and Functionality
|
||||
|
||||
`ModelParallelizer` is a class designed to facilitate the orchestration of multiple Language Model Models (LLMs) to perform various tasks simultaneously. It serves as a powerful tool for managing, distributing, and collecting responses from these models.
|
||||
|
||||
Key features and functionality include:
|
||||
|
||||
- **Parallel Task Execution**: `ModelParallelizer` can distribute tasks to multiple LLMs and execute them in parallel, improving efficiency and reducing response time.
|
||||
|
||||
- **Structured Response Presentation**: The class presents the responses from LLMs in a structured tabular format, making it easy for users to compare and analyze the results.
|
||||
|
||||
- **Task History Tracking**: `ModelParallelizer` keeps a record of tasks that have been submitted, allowing users to review previous tasks and responses.
|
||||
|
||||
- **Asynchronous Execution**: The class provides options for asynchronous task execution, which can be particularly useful for handling a large number of tasks.
|
||||
|
||||
Now that we have an understanding of its purpose, let's proceed to provide a detailed overview and introduction.
|
||||
|
||||
## 2. Overview and Introduction <a name="overview-and-introduction"></a>
|
||||
|
||||
### Overview
|
||||
|
||||
The `ModelParallelizer` class is a crucial component for managing and utilizing multiple LLMs in various natural language processing (NLP) tasks. Its architecture and functionality are designed to address the need for parallel processing and efficient response handling.
|
||||
|
||||
### Importance and Relevance
|
||||
|
||||
In the rapidly evolving field of NLP, it has become common to use multiple language models to achieve better results in tasks such as translation, summarization, and question answering. `ModelParallelizer` streamlines this process by allowing users to harness the capabilities of several LLMs simultaneously.
|
||||
|
||||
Key points:
|
||||
|
||||
- **Parallel Processing**: `ModelParallelizer` leverages multithreading to execute tasks concurrently, significantly reducing the time required for processing.
|
||||
|
||||
- **Response Visualization**: The class presents responses in a structured tabular format, enabling users to visualize and analyze the outputs from different LLMs.
|
||||
|
||||
- **Task Tracking**: Developers can track the history of tasks submitted to `ModelParallelizer`, making it easier to manage and monitor ongoing work.
|
||||
|
||||
### Architecture and How It Works
|
||||
|
||||
The architecture and working of `ModelParallelizer` can be summarized in four steps:
|
||||
|
||||
1. **Task Reception**: `ModelParallelizer` receives a task from the user.
|
||||
|
||||
2. **Task Distribution**: The class distributes the task to all registered LLMs.
|
||||
|
||||
3. **Response Collection**: `ModelParallelizer` collects the responses generated by the LLMs.
|
||||
|
||||
4. **Response Presentation**: Finally, the class presents the responses from all LLMs in a structured tabular format, making it easy for users to compare and analyze the results.
|
||||
|
||||
Now that we have an overview, let's proceed with a detailed class definition.
|
||||
|
||||
## 3. Class Definition <a name="class-definition"></a>
|
||||
|
||||
### Class Attributes
|
||||
|
||||
- `llms`: A list of LLMs (Language Model Models) that `ModelParallelizer` manages.
|
||||
|
||||
- `last_responses`: Stores the responses from the most recent task.
|
||||
|
||||
- `task_history`: Keeps a record of all tasks submitted to `ModelParallelizer`.
|
||||
|
||||
### Methods
|
||||
|
||||
The `ModelParallelizer` class defines various methods to facilitate task distribution, execution, and response presentation. Let's examine some of the key methods:
|
||||
|
||||
- `run(task)`: Distributes a task to all LLMs, collects responses, and returns them.
|
||||
|
||||
- `print_responses(task)`: Prints responses from all LLMs in a structured tabular format.
|
||||
|
||||
- `run_all(task)`: Runs the task on all LLMs sequentially and returns responses.
|
||||
|
||||
- `arun_all(task)`: Asynchronously runs the task on all LLMs and returns responses.
|
||||
|
||||
- `print_arun_all(task)`: Prints responses from all LLMs after asynchronous execution.
|
||||
|
||||
- `save_responses_to_file(filename)`: Saves responses to a file for future reference.
|
||||
|
||||
- `load_llms_from_file(filename)`: Loads LLMs from a file, making it easy to configure `ModelParallelizer` for different tasks.
|
||||
|
||||
- `get_task_history()`: Retrieves the task history, allowing users to review previous tasks.
|
||||
|
||||
- `summary()`: Provides a summary of task history and the last responses, aiding in post-processing and analysis.
|
||||
|
||||
Now that we have covered the class definition, let's delve into the functionality and usage of `ModelParallelizer`.
|
||||
|
||||
## 4. Functionality and Usage <a name="functionality-and-usage"></a>
|
||||
|
||||
### Distributing a Task and Collecting Responses
|
||||
|
||||
One of the primary use cases of `ModelParallelizer` is to distribute a task to all registered LLMs and collect their responses. This can be achieved using the `run(task)` method. Below is an example:
|
||||
|
||||
```python
|
||||
parallelizer = ModelParallelizer(llms)
|
||||
responses = parallelizer.run("Translate the following English text to French: 'Hello, how are you?'")
|
||||
```
|
||||
|
||||
### Printing Responses
|
||||
|
||||
To present the responses from all LLMs in a structured tabular format, use the `print_responses(task)` method. Example:
|
||||
|
||||
```python
|
||||
parallelizer.print_responses("Summarize the main points of 'War and Peace.'")
|
||||
```
|
||||
|
||||
### Saving Responses to a File
|
||||
|
||||
Users can save the responses to a file using the `save_responses_to_file(filename)` method. This is useful for archiving and reviewing responses later. Example:
|
||||
|
||||
```python
|
||||
parallelizer.save_responses_to_file("responses.txt")
|
||||
```
|
||||
|
||||
### Task History
|
||||
|
||||
The `ModelParallelizer` class keeps track of the task history. Developers can access the task history using the `get_task_history()` method. Example:
|
||||
|
||||
```python
|
||||
task_history = parallelizer.get_task_history()
|
||||
for i, task in enumerate(task_history):
|
||||
print(f"Task {i + 1}: {task}")
|
||||
```
|
||||
|
||||
## 5. Additional Information <a name="additional-information"></a>
|
||||
|
||||
### Parallel Execution
|
||||
|
||||
`ModelParallelizer` employs multithreading to execute tasks concurrently. This parallel processing capability significantly improves the efficiency of handling multiple tasks simultaneously.
|
||||
|
||||
### Response Visualization
|
||||
|
||||
The structured tabular format used for presenting responses simplifies the comparison and analysis of outputs from different LLMs.
|
||||
|
||||
## 6. Examples <a name="examples"></a>
|
||||
|
||||
Let's explore additional usage examples to illustrate the versatility of `ModelParallelizer` in handling various NLP tasks.
|
||||
|
||||
### Example 1: Sentiment Analysis
|
||||
|
||||
```python
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.swarms import ModelParallelizer
|
||||
from swarms.workers.worker import Worker
|
||||
|
||||
# Create an instance of an LLM for sentiment analysis
|
||||
llm = OpenAIChat(model_name="gpt-4", openai_api_key="api-key", temperature=0.5)
|
||||
|
||||
# Create worker agents
|
||||
worker1 = Worker(
|
||||
llm=llm,
|
||||
ai_name="Bumble Bee",
|
||||
ai_role="Worker in a swarm",
|
||||
external_tools=None,
|
||||
human_in_the_loop=False,
|
||||
temperature=0.5,
|
||||
)
|
||||
worker2 = Worker
|
||||
|
||||
(
|
||||
llm=llm,
|
||||
ai_name="Optimus Prime",
|
||||
ai_role="Worker in a swarm",
|
||||
external_tools=None,
|
||||
human_in_the_loop=False,
|
||||
temperature=0.5,
|
||||
)
|
||||
worker3 = Worker(
|
||||
llm=llm,
|
||||
ai_name="Megatron",
|
||||
ai_role="Worker in a swarm",
|
||||
external_tools=None,
|
||||
human_in_the_loop=False,
|
||||
temperature=0.5,
|
||||
)
|
||||
|
||||
# Register the worker agents with ModelParallelizer
|
||||
agents = [worker1, worker2, worker3]
|
||||
parallelizer = ModelParallelizer(agents)
|
||||
|
||||
# Task for sentiment analysis
|
||||
task = "Please analyze the sentiment of the following sentence: 'This movie is amazing!'"
|
||||
|
||||
# Print responses from all agents
|
||||
parallelizer.print_responses(task)
|
||||
```
|
||||
|
||||
### Example 2: Translation
|
||||
|
||||
```python
|
||||
from swarms.models import OpenAIChat
|
||||
|
||||
from swarms.swarms import ModelParallelizer
|
||||
|
||||
# Define LLMs for translation tasks
|
||||
translator1 = OpenAIChat(model_name="translator-en-fr", openai_api_key="api-key", temperature=0.7)
|
||||
translator2 = OpenAIChat(model_name="translator-en-es", openai_api_key="api-key", temperature=0.7)
|
||||
translator3 = OpenAIChat(model_name="translator-en-de", openai_api_key="api-key", temperature=0.7)
|
||||
|
||||
# Register translation agents with ModelParallelizer
|
||||
translators = [translator1, translator2, translator3]
|
||||
parallelizer = ModelParallelizer(translators)
|
||||
|
||||
# Task for translation
|
||||
task = "Translate the following English text to French: 'Hello, how are you?'"
|
||||
|
||||
# Print translated responses from all agents
|
||||
parallelizer.print_responses(task)
|
||||
```
|
||||
|
||||
### Example 3: Summarization
|
||||
|
||||
```python
|
||||
from swarms.models import OpenAIChat
|
||||
|
||||
from swarms.swarms import ModelParallelizer
|
||||
|
||||
|
||||
# Define LLMs for summarization tasks
|
||||
summarizer1 = OpenAIChat(model_name="summarizer-en", openai_api_key="api-key", temperature=0.6)
|
||||
summarizer2 = OpenAIChat(model_name="summarizer-en", openai_api_key="api-key", temperature=0.6)
|
||||
summarizer3 = OpenAIChat(model_name="summarizer-en", openai_api_key="api-key", temperature=0.6)
|
||||
|
||||
# Register summarization agents with ModelParallelizer
|
||||
summarizers = [summarizer1, summarizer2, summarizer3]
|
||||
parallelizer = ModelParallelizer(summarizers)
|
||||
|
||||
# Task for summarization
|
||||
task = "Summarize the main points of the article titled 'Climate Change and Its Impact on the Environment.'"
|
||||
|
||||
# Print summarized responses from all agents
|
||||
parallelizer.print_responses(task)
|
||||
```
|
||||
|
||||
## 7. Conclusion <a name="conclusion"></a>
|
||||
|
||||
In conclusion, the `ModelParallelizer` class is a powerful tool for managing and orchestrating multiple Language Model Models in natural language processing tasks. Its ability to distribute tasks, collect responses, and present them in a structured format makes it invaluable for streamlining NLP workflows. By following the provided documentation, users can harness the full potential of `ModelParallelizer` to enhance their natural language processing projects.
|
||||
|
||||
For further information on specific LLMs or advanced usage, refer to the documentation of the respective models and their APIs. Additionally, external resources on parallel execution and response visualization can provide deeper insights into these topics.
|
@ -0,0 +1,53 @@
|
||||
# Why Swarms?
|
||||
|
||||
The need for multiple agents to work together in artificial intelligence (AI) and particularly in the context of Large Language Models (LLMs) stems from several inherent limitations and challenges in handling complex, dynamic, and multifaceted tasks with single-agent systems. Collaborating with multiple agents offers a pathway to enhance computational efficiency, cognitive diversity, and problem-solving capabilities. This section delves into the rationale behind employing multi-agent systems and strategizes on overcoming the associated expenses, such as API bills and hosting costs.
|
||||
|
||||
### Why Multiple Agents Are Necessary
|
||||
|
||||
#### 1. **Cognitive Diversity**
|
||||
|
||||
Different agents can bring varied perspectives, knowledge bases, and problem-solving approaches to a task. This diversity is crucial in complex problem-solving scenarios where a single approach might not be sufficient. Cognitive diversity enhances creativity, leading to innovative solutions and the ability to tackle a broader range of problems.
|
||||
|
||||
#### 2. **Specialization and Expertise**
|
||||
|
||||
In many cases, tasks are too complex for a single agent to handle efficiently. By dividing the task among multiple specialized agents, each can focus on a segment where it excels, thereby increasing the overall efficiency and effectiveness of the solution. This approach leverages the expertise of individual agents to achieve superior performance in tasks that require multifaceted knowledge and skills.
|
||||
|
||||
#### 3. **Scalability and Flexibility**
|
||||
|
||||
Multi-agent systems can more easily scale to handle large-scale or evolving tasks. Adding more agents to the system can increase its capacity or capabilities, allowing it to adapt to larger workloads or new types of tasks. This scalability is essential in dynamic environments where the demand and nature of tasks can change rapidly.
|
||||
|
||||
#### 4. **Robustness and Redundancy**
|
||||
|
||||
Collaboration among multiple agents enhances the system's robustness by introducing redundancy. If one agent fails or encounters an error, others can compensate, ensuring the system remains operational. This redundancy is critical in mission-critical applications where failure is not an option.
|
||||
|
||||
### Overcoming Expenses with API Bills and Hosting
|
||||
|
||||
Deploying multiple agents, especially when relying on cloud-based services or APIs, can incur significant costs. Here are strategies to manage and reduce these expenses:
|
||||
|
||||
#### 1. **Optimize Agent Efficiency**
|
||||
|
||||
Before scaling up the number of agents, ensure each agent operates as efficiently as possible. This can involve refining algorithms, reducing unnecessary API calls, and optimizing data processing to minimize computational requirements and, consequently, the associated costs.
|
||||
|
||||
#### 2. **Use Open Source and Self-Hosted Solutions**
|
||||
|
||||
Where possible, leverage open-source models and technologies that can be self-hosted. While there is an initial investment in setting up the infrastructure, over time, self-hosting can significantly reduce costs related to API calls and reliance on third-party services.
|
||||
|
||||
#### 3. **Implement Intelligent Caching**
|
||||
|
||||
Caching results for frequently asked questions or common tasks can drastically reduce the need for repeated computations or API calls. Intelligent caching systems can determine what information to store and for how long, optimizing the balance between fresh data and computational savings.
|
||||
|
||||
#### 4. **Dynamic Scaling and Load Balancing**
|
||||
|
||||
Use cloud services that offer dynamic scaling and load balancing to adjust the resources allocated based on the current demand. This ensures you're not paying for idle resources during low-usage periods while still being able to handle high demand when necessary.
|
||||
|
||||
#### 5. **Collaborative Cost-Sharing Models**
|
||||
|
||||
In scenarios where multiple stakeholders benefit from the multi-agent system, consider implementing a cost-sharing model. This approach distributes the financial burden among the users or beneficiaries, making it more sustainable.
|
||||
|
||||
#### 6. **Monitor and Analyze Costs**
|
||||
|
||||
Regularly monitor and analyze your usage and associated costs to identify potential savings. Many cloud providers offer tools to track and forecast expenses, helping you to adjust your usage patterns and configurations to minimize costs without sacrificing performance.
|
||||
|
||||
### Conclusion
|
||||
|
||||
The collaboration of multiple agents in AI systems presents a robust solution to the complexity, specialization, scalability, and robustness challenges inherent in single-agent approaches. While the associated costs can be significant, strategic optimization, leveraging open-source technologies, intelligent caching, dynamic resource management, collaborative cost-sharing, and diligent monitoring can mitigate these expenses. By adopting these strategies, organizations can harness the power of multi-agent systems to tackle complex problems more effectively and efficiently, ensuring the sustainable deployment of these advanced technologies.
|
@ -0,0 +1,49 @@
|
||||
import timeit
|
||||
|
||||
from swarms import Agent, ConcurrentWorkflow, Task
|
||||
from swarms.agents.multion_agent import MultiOnAgent
|
||||
|
||||
# model
|
||||
model = MultiOnAgent(multion_api_key="api-key")
|
||||
|
||||
|
||||
# out = model.run("search for a recipe")
|
||||
agent = Agent(
|
||||
agent_name="MultiOnAgent",
|
||||
description="A multi-on agent that performs browsing tasks.",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
system_prompt=None,
|
||||
)
|
||||
|
||||
# logger.info("[Agent][ID][MultiOnAgent][Initialized][Successfully")
|
||||
|
||||
# Task
|
||||
task = Task(
|
||||
agent=agent,
|
||||
description="Download https://www.coachcamel.com/",
|
||||
)
|
||||
|
||||
# Swarm
|
||||
# logger.info(
|
||||
# f"Running concurrent workflow with task: {task.description}"
|
||||
# )
|
||||
|
||||
# Measure execution time
|
||||
start_time = timeit.default_timer()
|
||||
|
||||
workflow = ConcurrentWorkflow(
|
||||
max_workers=20,
|
||||
autosave=True,
|
||||
print_results=True,
|
||||
return_results=True,
|
||||
)
|
||||
|
||||
# Add task to workflow
|
||||
workflow.add(task)
|
||||
workflow.run()
|
||||
|
||||
# Calculate execution time
|
||||
execution_time = timeit.default_timer() - start_time
|
||||
# logger.info(f"Execution time: {execution_time} seconds")
|
||||
print(f"Execution time: {execution_time} seconds")
|
@ -0,0 +1,72 @@
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
import swarms.prompts.security_team as stsp
|
||||
from swarms.models import GPT4VisionAPI
|
||||
from swarms.structs import Agent
|
||||
|
||||
# Load environment variables and initialize the Vision API
|
||||
load_dotenv()
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
llm = GPT4VisionAPI(openai_api_key=api_key)
|
||||
|
||||
# Image for analysis
|
||||
img = "bank_robbery.jpg"
|
||||
|
||||
# Initialize agents with respective prompts for security tasks
|
||||
crowd_analysis_agent = Agent(
|
||||
llm=llm,
|
||||
sop=stsp.CROWD_ANALYSIS_AGENT_PROMPT,
|
||||
max_loops=1,
|
||||
multi_modal=True,
|
||||
)
|
||||
|
||||
weapon_detection_agent = Agent(
|
||||
llm=llm,
|
||||
sop=stsp.WEAPON_DETECTION_AGENT_PROMPT,
|
||||
max_loops=1,
|
||||
multi_modal=True,
|
||||
)
|
||||
|
||||
surveillance_monitoring_agent = Agent(
|
||||
llm=llm,
|
||||
sop=stsp.SURVEILLANCE_MONITORING_AGENT_PROMPT,
|
||||
max_loops=1,
|
||||
multi_modal=True,
|
||||
)
|
||||
|
||||
emergency_response_coordinator = Agent(
|
||||
llm=llm,
|
||||
sop=stsp.EMERGENCY_RESPONSE_COORDINATOR_PROMPT,
|
||||
max_loops=1,
|
||||
multi_modal=True,
|
||||
)
|
||||
|
||||
# Run agents with respective tasks on the same image
|
||||
crowd_analysis = crowd_analysis_agent.run(
|
||||
"Analyze the crowd dynamics in the scene", img
|
||||
)
|
||||
|
||||
weapon_detection_analysis = weapon_detection_agent.run(
|
||||
"Inspect the scene for any potential threats", img
|
||||
)
|
||||
|
||||
surveillance_monitoring_analysis = surveillance_monitoring_agent.run(
|
||||
"Monitor the overall scene for unusual activities", img
|
||||
)
|
||||
|
||||
emergency_response_analysis = emergency_response_coordinator.run(
|
||||
"Develop a response plan based on the scene analysis", img
|
||||
)
|
||||
|
||||
# Process and output results for each task
|
||||
# Example output (uncomment to use):
|
||||
print(f"Crowd Analysis: {crowd_analysis}")
|
||||
print(f"Weapon Detection Analysis: {weapon_detection_analysis}")
|
||||
print(
|
||||
"Surveillance Monitoring Analysis:"
|
||||
f" {surveillance_monitoring_analysis}"
|
||||
)
|
||||
print(f"Emergency Response Analysis: {emergency_response_analysis}")
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue