pull/612/head^2
parent
ad24283164
commit
8b78ab2bba
@ -0,0 +1,48 @@
|
||||
import os
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Retrieve the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Initialize the model for OpenAI Chat
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
# Initialize the agent with automated prompt engineering enabled
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent",
|
||||
system_prompt=None, # System prompt is dynamically generated
|
||||
agent_description=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=False,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
user_name="Human:",
|
||||
return_step_meta=False,
|
||||
output_type="string",
|
||||
streaming_on=False,
|
||||
auto_generate_prompt=True, # Enable automated prompt engineering
|
||||
)
|
||||
|
||||
# Run the agent with a task description and specify the device
|
||||
agent.run(
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria",
|
||||
## Will design a system prompt based on the task if description and system prompt are None
|
||||
device="cpu",
|
||||
)
|
||||
|
||||
# Print the dynamically generated system prompt
|
||||
print(agent.system_prompt)
|
@ -0,0 +1,162 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms.structs.swarm_router import SwarmRouter
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
# Define specialized system prompts for each agent
|
||||
DATA_EXTRACTOR_PROMPT = """You are a highly specialized private equity agent focused on data extraction from various documents. Your expertise includes:
|
||||
1. Extracting key financial metrics (revenue, EBITDA, growth rates, etc.) from financial statements and reports
|
||||
2. Identifying and extracting important contract terms from legal documents
|
||||
3. Pulling out relevant market data from industry reports and analyses
|
||||
4. Extracting operational KPIs from management presentations and internal reports
|
||||
5. Identifying and extracting key personnel information from organizational charts and bios
|
||||
Provide accurate, structured data extracted from various document types to support investment analysis."""
|
||||
|
||||
SUMMARIZER_PROMPT = """You are an expert private equity agent specializing in summarizing complex documents. Your core competencies include:
|
||||
1. Distilling lengthy financial reports into concise executive summaries
|
||||
2. Summarizing legal documents, highlighting key terms and potential risks
|
||||
3. Condensing industry reports to capture essential market trends and competitive dynamics
|
||||
4. Summarizing management presentations to highlight key strategic initiatives and projections
|
||||
5. Creating brief overviews of technical documents, emphasizing critical points for non-technical stakeholders
|
||||
Deliver clear, concise summaries that capture the essence of various documents while highlighting information crucial for investment decisions."""
|
||||
|
||||
FINANCIAL_ANALYST_PROMPT = """You are a specialized private equity agent focused on financial analysis. Your key responsibilities include:
|
||||
1. Analyzing historical financial statements to identify trends and potential issues
|
||||
2. Evaluating the quality of earnings and potential adjustments to EBITDA
|
||||
3. Assessing working capital requirements and cash flow dynamics
|
||||
4. Analyzing capital structure and debt capacity
|
||||
5. Evaluating financial projections and underlying assumptions
|
||||
Provide thorough, insightful financial analysis to inform investment decisions and valuation."""
|
||||
|
||||
MARKET_ANALYST_PROMPT = """You are a highly skilled private equity agent specializing in market analysis. Your expertise covers:
|
||||
1. Analyzing industry trends, growth drivers, and potential disruptors
|
||||
2. Evaluating competitive landscape and market positioning
|
||||
3. Assessing market size, segmentation, and growth potential
|
||||
4. Analyzing customer dynamics, including concentration and loyalty
|
||||
5. Identifying potential regulatory or macroeconomic impacts on the market
|
||||
Deliver comprehensive market analysis to assess the attractiveness and risks of potential investments."""
|
||||
|
||||
OPERATIONAL_ANALYST_PROMPT = """You are an expert private equity agent focused on operational analysis. Your core competencies include:
|
||||
1. Evaluating operational efficiency and identifying improvement opportunities
|
||||
2. Analyzing supply chain and procurement processes
|
||||
3. Assessing sales and marketing effectiveness
|
||||
4. Evaluating IT systems and digital capabilities
|
||||
5. Identifying potential synergies in merger or add-on acquisition scenarios
|
||||
Provide detailed operational analysis to uncover value creation opportunities and potential risks."""
|
||||
|
||||
# Initialize specialized agents
|
||||
data_extractor_agent = Agent(
|
||||
agent_name="Data-Extractor",
|
||||
system_prompt=DATA_EXTRACTOR_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="data_extractor_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
summarizer_agent = Agent(
|
||||
agent_name="Document-Summarizer",
|
||||
system_prompt=SUMMARIZER_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="summarizer_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
financial_analyst_agent = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
system_prompt=FINANCIAL_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="financial_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
market_analyst_agent = Agent(
|
||||
agent_name="Market-Analyst",
|
||||
system_prompt=MARKET_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="market_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
operational_analyst_agent = Agent(
|
||||
agent_name="Operational-Analyst",
|
||||
system_prompt=OPERATIONAL_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="operational_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
# Initialize the SwarmRouter
|
||||
router = SwarmRouter(
|
||||
name="pe-document-analysis-swarm",
|
||||
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||
max_loops=1,
|
||||
agents=[
|
||||
data_extractor_agent,
|
||||
summarizer_agent,
|
||||
# financial_analyst_agent,
|
||||
# market_analyst_agent,
|
||||
# operational_analyst_agent,
|
||||
],
|
||||
swarm_type="auto", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
|
||||
# auto_generate_prompts=True,
|
||||
)
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Run a comprehensive private equity document analysis task
|
||||
result = router.run(
|
||||
"Where is the best place to find template term sheets for series A startups. Provide links and references"
|
||||
)
|
||||
print(result)
|
||||
|
||||
# Retrieve and print logs
|
||||
for log in router.get_logs():
|
||||
print(f"{log.timestamp} - {log.level}: {log.message}")
|
@ -0,0 +1,100 @@
|
||||
import os
|
||||
from swarms import Agent, ConcurrentWorkflow
|
||||
from swarm_models import OpenAIChat
|
||||
from loguru import logger
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Retrieve the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Initialize the model for OpenAI Chat
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
logger.add("swarms_example.log", rotation="10 MB")
|
||||
|
||||
agents = [
|
||||
Agent(
|
||||
agent_name=f"Term-Sheet-Analysis-Agent-{i}",
|
||||
system_prompt="Analyze the term sheet for investment opportunities.",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path=f"term_sheet_analysis_agent_{i}.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
return_step_meta=False,
|
||||
)
|
||||
for i in range(3) # Adjust number of agents as needed
|
||||
]
|
||||
|
||||
# Initialize the workflow with the list of agents
|
||||
workflow = ConcurrentWorkflow(
|
||||
agents=agents,
|
||||
metadata_output_path="term_sheet_analysis_metadata.json",
|
||||
return_str_on=True,
|
||||
auto_generate_prompts=True,
|
||||
auto_save=True,
|
||||
)
|
||||
|
||||
# Define the task for all agents
|
||||
task = "Analyze the term sheet for investment opportunities and identify key terms and conditions."
|
||||
|
||||
# Run the workflow and save metadata
|
||||
metadata = workflow.run(task)
|
||||
logger.info(metadata)
|
||||
|
||||
|
||||
# # Example usage of the run_batched method
|
||||
# tasks = [
|
||||
# "What are the benefits of a ROTH IRA?",
|
||||
# "How do I open a ROTH IRA account?",
|
||||
# ]
|
||||
# results = workflow.run_batched(tasks)
|
||||
# print("\nRun Batched Method Output:")
|
||||
# print(results)
|
||||
|
||||
# # Example usage of the run_async method
|
||||
# async def run_async_example():
|
||||
# future = workflow.run_async(task)
|
||||
# result = await future
|
||||
# print("\nRun Async Method Output:")
|
||||
# print(result)
|
||||
|
||||
# # Example usage of the run_batched_async method
|
||||
# async def run_batched_async_example():
|
||||
# futures = workflow.run_batched_async(tasks)
|
||||
# results = await asyncio.gather(*futures)
|
||||
# print("\nRun Batched Async Method Output:")
|
||||
# print(results)
|
||||
|
||||
# # Example usage of the run_parallel method
|
||||
# parallel_results = workflow.run_parallel(tasks)
|
||||
# print("\nRun Parallel Method Output:")
|
||||
# print(parallel_results)
|
||||
|
||||
# # Example usage of the run_parallel_async method
|
||||
# async def run_parallel_async_example():
|
||||
# parallel_futures = workflow.run_parallel_async(tasks)
|
||||
# parallel_results = await asyncio.gather(*parallel_futures)
|
||||
# print("\nRun Parallel Async Method Output:")
|
||||
# print(parallel_results)
|
||||
|
||||
# # To run the async examples, you would typically use an event loop
|
||||
# if __name__ == "__main__":
|
||||
# asyncio.run(run_async_example())
|
||||
# asyncio.run(run_batched_async_example())
|
||||
# asyncio.run(run_parallel_async_example())
|
@ -0,0 +1,334 @@
|
||||
# ClusterOps API Reference
|
||||
|
||||
ClusterOps is a Python library for managing and executing tasks across CPU and GPU resources in a distributed computing environment. It provides functions for resource discovery, task execution, and performance monitoring.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
|
||||
$ pip3 install clusterops
|
||||
|
||||
```
|
||||
|
||||
## Table of Contents
|
||||
1. [CPU Operations](#cpu-operations)
|
||||
2. [GPU Operations](#gpu-operations)
|
||||
3. [Utility Functions](#utility-functions)
|
||||
4. [Resource Monitoring](#resource-monitoring)
|
||||
|
||||
## CPU Operations
|
||||
|
||||
### `list_available_cpus()`
|
||||
|
||||
Lists all available CPU cores.
|
||||
|
||||
#### Returns
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `List[int]` | A list of available CPU core indices. |
|
||||
|
||||
#### Raises
|
||||
| Exception | Description |
|
||||
|-----------|-------------|
|
||||
| `RuntimeError` | If no CPUs are found. |
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import list_available_cpus
|
||||
|
||||
available_cpus = list_available_cpus()
|
||||
print(f"Available CPU cores: {available_cpus}")
|
||||
```
|
||||
|
||||
### `execute_on_cpu(cpu_id: int, func: Callable, *args: Any, **kwargs: Any) -> Any`
|
||||
|
||||
Executes a callable on a specific CPU.
|
||||
|
||||
#### Parameters
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `cpu_id` | `int` | The CPU core to run the function on. |
|
||||
| `func` | `Callable` | The function to be executed. |
|
||||
| `*args` | `Any` | Arguments for the callable. |
|
||||
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||
|
||||
#### Returns
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Any` | The result of the function execution. |
|
||||
|
||||
#### Raises
|
||||
| Exception | Description |
|
||||
|-----------|-------------|
|
||||
| `ValueError` | If the CPU core specified is invalid. |
|
||||
| `RuntimeError` | If there is an error executing the function on the CPU. |
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import execute_on_cpu
|
||||
|
||||
def sample_task(n: int) -> int:
|
||||
return n * n
|
||||
|
||||
result = execute_on_cpu(0, sample_task, 10)
|
||||
print(f"Result of sample task on CPU 0: {result}")
|
||||
```
|
||||
|
||||
### `execute_with_cpu_cores(core_count: int, func: Callable, *args: Any, **kwargs: Any) -> Any`
|
||||
|
||||
Executes a callable using a specified number of CPU cores.
|
||||
|
||||
#### Parameters
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `core_count` | `int` | The number of CPU cores to run the function on. |
|
||||
| `func` | `Callable` | The function to be executed. |
|
||||
| `*args` | `Any` | Arguments for the callable. |
|
||||
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||
|
||||
#### Returns
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Any` | The result of the function execution. |
|
||||
|
||||
#### Raises
|
||||
| Exception | Description |
|
||||
|-----------|-------------|
|
||||
| `ValueError` | If the number of CPU cores specified is invalid or exceeds available cores. |
|
||||
| `RuntimeError` | If there is an error executing the function on the specified CPU cores. |
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import execute_with_cpu_cores
|
||||
|
||||
def parallel_task(n: int) -> int:
|
||||
return sum(range(n))
|
||||
|
||||
result = execute_with_cpu_cores(4, parallel_task, 1000000)
|
||||
print(f"Result of parallel task using 4 CPU cores: {result}")
|
||||
```
|
||||
|
||||
## GPU Operations
|
||||
|
||||
### `list_available_gpus() -> List[str]`
|
||||
|
||||
Lists all available GPUs.
|
||||
|
||||
#### Returns
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `List[str]` | A list of available GPU names. |
|
||||
|
||||
#### Raises
|
||||
| Exception | Description |
|
||||
|-----------|-------------|
|
||||
| `RuntimeError` | If no GPUs are found. |
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import list_available_gpus
|
||||
|
||||
available_gpus = list_available_gpus()
|
||||
print(f"Available GPUs: {available_gpus}")
|
||||
```
|
||||
|
||||
### `select_best_gpu() -> Optional[int]`
|
||||
|
||||
Selects the GPU with the most free memory.
|
||||
|
||||
#### Returns
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Optional[int]` | The GPU ID of the best available GPU, or None if no GPUs are available. |
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import select_best_gpu
|
||||
|
||||
best_gpu = select_best_gpu()
|
||||
if best_gpu is not None:
|
||||
print(f"Best GPU for execution: GPU {best_gpu}")
|
||||
else:
|
||||
print("No GPUs available")
|
||||
```
|
||||
|
||||
### `execute_on_gpu(gpu_id: int, func: Callable, *args: Any, **kwargs: Any) -> Any`
|
||||
|
||||
Executes a callable on a specific GPU using Ray.
|
||||
|
||||
#### Parameters
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `gpu_id` | `int` | The GPU to run the function on. |
|
||||
| `func` | `Callable` | The function to be executed. |
|
||||
| `*args` | `Any` | Arguments for the callable. |
|
||||
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||
|
||||
#### Returns
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Any` | The result of the function execution. |
|
||||
|
||||
#### Raises
|
||||
| Exception | Description |
|
||||
|-----------|-------------|
|
||||
| `ValueError` | If the GPU index is invalid. |
|
||||
| `RuntimeError` | If there is an error executing the function on the GPU. |
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import execute_on_gpu
|
||||
|
||||
def gpu_task(n: int) -> int:
|
||||
return n ** 2
|
||||
|
||||
result = execute_on_gpu(0, gpu_task, 10)
|
||||
print(f"Result of GPU task on GPU 0: {result}")
|
||||
```
|
||||
|
||||
### `execute_on_multiple_gpus(gpu_ids: List[int], func: Callable, all_gpus: bool = False, timeout: float = None, *args: Any, **kwargs: Any) -> List[Any]`
|
||||
|
||||
Executes a callable across multiple GPUs using Ray.
|
||||
|
||||
#### Parameters
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `gpu_ids` | `List[int]` | The list of GPU IDs to run the function on. |
|
||||
| `func` | `Callable` | The function to be executed. |
|
||||
| `all_gpus` | `bool` | Whether to use all available GPUs (default: False). |
|
||||
| `timeout` | `float` | Timeout for the execution in seconds (default: None). |
|
||||
| `*args` | `Any` | Arguments for the callable. |
|
||||
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||
|
||||
#### Returns
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `List[Any]` | A list of results from the execution on each GPU. |
|
||||
|
||||
#### Raises
|
||||
| Exception | Description |
|
||||
|-----------|-------------|
|
||||
| `ValueError` | If any GPU index is invalid. |
|
||||
| `RuntimeError` | If there is an error executing the function on the GPUs. |
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import execute_on_multiple_gpus
|
||||
|
||||
def multi_gpu_task(n: int) -> int:
|
||||
return n ** 3
|
||||
|
||||
results = execute_on_multiple_gpus([0, 1], multi_gpu_task, 5)
|
||||
print(f"Results of multi-GPU task: {results}")
|
||||
```
|
||||
|
||||
### `distributed_execute_on_gpus(gpu_ids: List[int], func: Callable, *args: Any, **kwargs: Any) -> List[Any]`
|
||||
|
||||
Executes a callable across multiple GPUs and nodes using Ray's distributed task scheduling.
|
||||
|
||||
#### Parameters
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `gpu_ids` | `List[int]` | The list of GPU IDs across nodes to run the function on. |
|
||||
| `func` | `Callable` | The function to be executed. |
|
||||
| `*args` | `Any` | Arguments for the callable. |
|
||||
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||
|
||||
#### Returns
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `List[Any]` | A list of results from the execution on each GPU. |
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import distributed_execute_on_gpus
|
||||
|
||||
def distributed_task(n: int) -> int:
|
||||
return n ** 4
|
||||
|
||||
results = distributed_execute_on_gpus([0, 1, 2, 3], distributed_task, 3)
|
||||
print(f"Results of distributed GPU task: {results}")
|
||||
```
|
||||
|
||||
## Utility Functions
|
||||
|
||||
### `retry_with_backoff(func: Callable, retries: int = RETRY_COUNT, delay: float = RETRY_DELAY, *args: Any, **kwargs: Any) -> Any`
|
||||
|
||||
Retries a callable function with exponential backoff in case of failure.
|
||||
|
||||
#### Parameters
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `func` | `Callable` | The function to execute with retries. |
|
||||
| `retries` | `int` | Number of retries (default: RETRY_COUNT from env). |
|
||||
| `delay` | `float` | Delay between retries in seconds (default: RETRY_DELAY from env). |
|
||||
| `*args` | `Any` | Arguments for the callable. |
|
||||
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||
|
||||
#### Returns
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Any` | The result of the function execution. |
|
||||
|
||||
#### Raises
|
||||
| Exception | Description |
|
||||
|-----------|-------------|
|
||||
| `Exception` | After all retries fail. |
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import retry_with_backoff
|
||||
|
||||
def unstable_task():
|
||||
# Simulating an unstable task that might fail
|
||||
import random
|
||||
if random.random() < 0.5:
|
||||
raise Exception("Task failed")
|
||||
return "Task succeeded"
|
||||
|
||||
result = retry_with_backoff(unstable_task, retries=5, delay=1)
|
||||
print(f"Result of unstable task: {result}")
|
||||
```
|
||||
|
||||
## Resource Monitoring
|
||||
|
||||
### `monitor_resources()`
|
||||
|
||||
Continuously monitors CPU and GPU resources and logs alerts when thresholds are crossed.
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import monitor_resources
|
||||
|
||||
# Start monitoring resources
|
||||
monitor_resources()
|
||||
```
|
||||
|
||||
### `profile_execution(func: Callable, *args: Any, **kwargs: Any) -> Any`
|
||||
|
||||
Profiles the execution of a task, collecting metrics like execution time and CPU/GPU usage.
|
||||
|
||||
#### Parameters
|
||||
| Name | Type | Description |
|
||||
|------|------|-------------|
|
||||
| `func` | `Callable` | The function to profile. |
|
||||
| `*args` | `Any` | Arguments for the callable. |
|
||||
| `**kwargs` | `Any` | Keyword arguments for the callable. |
|
||||
|
||||
#### Returns
|
||||
| Type | Description |
|
||||
|------|-------------|
|
||||
| `Any` | The result of the function execution along with the collected metrics. |
|
||||
|
||||
#### Example
|
||||
```python
|
||||
from clusterops import profile_execution
|
||||
|
||||
def cpu_intensive_task():
|
||||
return sum(i*i for i in range(10000000))
|
||||
|
||||
result = profile_execution(cpu_intensive_task)
|
||||
print(f"Result of profiled task: {result}")
|
||||
```
|
||||
|
||||
This API reference provides a comprehensive overview of the ClusterOps library's main functions, their parameters, return values, and usage examples. It should help users understand and utilize the library effectively for managing and executing tasks across CPU and GPU resources in a distributed computing environment.
|
@ -0,0 +1,537 @@
|
||||
# `Agent` Documentation
|
||||
|
||||
Swarm Agent is a powerful autonomous agent framework designed to connect Language Models (LLMs) with various tools and long-term memory. This class provides the ability to ingest and process various types of documents such as PDFs, text files, Markdown files, JSON files, and more. The Agent structure offers a wide range of features to enhance the capabilities of LLMs and facilitate efficient task execution.
|
||||
|
||||
1. **Conversational Loop**: It establishes a conversational loop with a language model. This means it allows you to interact with the model in a back-and-forth manner, taking turns in the conversation.
|
||||
|
||||
2. **Feedback Collection**: The class allows users to provide feedback on the responses generated by the model. This feedback can be valuable for training and improving the model's responses over time.
|
||||
|
||||
3. **Stoppable Conversation**: You can define custom stopping conditions for the conversation, allowing you to stop the interaction based on specific criteria. For example, you can stop the conversation if a certain keyword is detected in the responses.
|
||||
|
||||
4. **Retry Mechanism**: The class includes a retry mechanism that can be helpful if there are issues generating responses from the model. It attempts to generate a response multiple times before raising an error.
|
||||
|
||||
## Architecture
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Task Initiation] -->|Receives Task| B[Initial LLM Processing]
|
||||
B -->|Interprets Task| C[Tool Usage]
|
||||
C -->|Calls Tools| D[Function 1]
|
||||
C -->|Calls Tools| E[Function 2]
|
||||
D -->|Returns Data| C
|
||||
E -->|Returns Data| C
|
||||
C -->|Provides Data| F[Memory Interaction]
|
||||
F -->|Stores and Retrieves Data| G[RAG System]
|
||||
G -->|ChromaDB/Pinecone| H[Enhanced Data]
|
||||
F -->|Provides Enhanced Data| I[Final LLM Processing]
|
||||
I -->|Generates Final Response| J[Output]
|
||||
C -->|No Tools Available| K[Skip Tool Usage]
|
||||
K -->|Proceeds to Memory Interaction| F
|
||||
F -->|No Memory Available| L[Skip Memory Interaction]
|
||||
L -->|Proceeds to Final LLM Processing| I
|
||||
```
|
||||
|
||||
### `Agent` Attributes
|
||||
|
||||
| Attribute | Description |
|
||||
|------------|-------------|
|
||||
| `id` | A unique identifier for the agent instance. |
|
||||
| `llm` | The language model instance used by the agent. |
|
||||
| `template` | The template used for formatting responses. |
|
||||
| `max_loops` | The maximum number of loops the agent can run. |
|
||||
| `stopping_condition` | A callable function that determines when the agent should stop looping. |
|
||||
| `loop_interval` | The interval (in seconds) between loops. |
|
||||
| `retry_attempts` | The number of retry attempts for failed LLM calls. |
|
||||
| `retry_interval` | The interval (in seconds) between retry attempts. |
|
||||
| `return_history` | A boolean indicating whether the agent should return the conversation history. |
|
||||
| `stopping_token` | A token that, when present in the response, stops the agent from looping. |
|
||||
| `dynamic_loops` | A boolean indicating whether the agent should dynamically determine the number of loops. |
|
||||
| `interactive` | A boolean indicating whether the agent should run in interactive mode. |
|
||||
| `dashboard` | A boolean indicating whether the agent should display a dashboard. |
|
||||
| `agent_name` | The name of the agent instance. |
|
||||
| `agent_description` | A description of the agent instance. |
|
||||
| `system_prompt` | The system prompt used to initialize the conversation. |
|
||||
| `tools` | A list of callable functions representing tools the agent can use. |
|
||||
| `dynamic_temperature_enabled` | A boolean indicating whether the agent should dynamically adjust the temperature of the LLM. |
|
||||
| `sop` | The standard operating procedure for the agent. |
|
||||
| `sop_list` | A list of strings representing the standard operating procedure. |
|
||||
| `saved_state_path` | The file path for saving and loading the agent's state. |
|
||||
| `autosave` | A boolean indicating whether the agent should automatically save its state. |
|
||||
| `context_length` | The maximum length of the context window (in tokens) for the LLM. |
|
||||
| `user_name` | The name used to represent the user in the conversation. |
|
||||
| `self_healing_enabled` | A boolean indicating whether the agent should attempt to self-heal in case of errors. |
|
||||
| `code_interpreter` | A boolean indicating whether the agent should interpret and execute code snippets. |
|
||||
| `multi_modal` | A boolean indicating whether the agent should support multimodal inputs (e.g., text and images). |
|
||||
| `pdf_path` | The file path of a PDF document to be ingested. |
|
||||
| `list_of_pdf` | A list of file paths for PDF documents to be ingested. |
|
||||
| `tokenizer` | An instance of a tokenizer used for token counting and management. |
|
||||
| `long_term_memory` | An instance of a `BaseVectorDatabase` implementation for long-term memory management. |
|
||||
| `preset_stopping_token` | A boolean indicating whether the agent should use a preset stopping token. |
|
||||
| `traceback` | An object used for traceback handling. |
|
||||
| `traceback_handlers` | A list of traceback handlers. |
|
||||
| `streaming_on` | A boolean indicating whether the agent should stream its responses. |
|
||||
| `docs` | A list of document paths or contents to be ingested. |
|
||||
| `docs_folder` | The path to a folder containing documents to be ingested. |
|
||||
| `verbose` | A boolean indicating whether the agent should print verbose output. |
|
||||
| `parser` | A callable function used for parsing input data. |
|
||||
| `best_of_n` | An integer indicating the number of best responses to generate (for sampling). |
|
||||
| `callback` | A callable function to be called after each agent loop. |
|
||||
| `metadata` | A dictionary containing metadata for the agent. |
|
||||
| `callbacks` | A list of callable functions to be called during the agent's execution. |
|
||||
| `logger_handler` | A handler for logging messages. |
|
||||
| `search_algorithm` | A callable function representing the search algorithm for long-term memory retrieval. |
|
||||
| `logs_to_filename` | The file path for logging agent activities. |
|
||||
| `evaluator` | A callable function used for evaluating the agent's responses. |
|
||||
| `output_json` | A boolean indicating whether the agent's output should be in JSON format. |
|
||||
| `stopping_func` | A callable function used as a stopping condition for the agent. |
|
||||
| `custom_loop_condition` | A callable function used as a custom loop condition for the agent. |
|
||||
| `sentiment_threshold` | A float value representing the sentiment threshold for evaluating responses. |
|
||||
| `custom_exit_command` | A string representing a custom command for exiting the agent's loop. |
|
||||
| `sentiment_analyzer` | A callable function used for sentiment analysis on the agent's outputs. |
|
||||
| `limit_tokens_from_string` | A callable function used for limiting the number of tokens in a string. |
|
||||
| `custom_tools_prompt` | A callable function used for generating a custom prompt for tool usage. |
|
||||
| `tool_schema` | A data structure representing the schema for the agent's tools. |
|
||||
| `output_type` | A type representing the expected output type of the agent's responses. |
|
||||
| `function_calling_type` | A string representing the type of function calling (e.g., "json"). |
|
||||
| `output_cleaner` | A callable function used for cleaning the agent's output. |
|
||||
| `function_calling_format_type` | A string representing the format type for function calling (e.g., "OpenAI"). |
|
||||
| `list_base_models` | A list of base models used for generating tool schemas. |
|
||||
| `metadata_output_type` | A string representing the output type for metadata. |
|
||||
| `state_save_file_type` | A string representing the file type for saving the agent's state (e.g., "json", "yaml"). |
|
||||
| `chain_of_thoughts` | A boolean indicating whether the agent should use the chain of thoughts technique. |
|
||||
| `algorithm_of_thoughts` | A boolean indicating whether the agent should use the algorithm of thoughts technique. |
|
||||
| `tree_of_thoughts` | A boolean indicating whether the agent should use the tree of thoughts technique. |
|
||||
| `tool_choice` | A string representing the method for tool selection (e.g., "auto"). |
|
||||
| `execute_tool` | A boolean indicating whether the agent should execute tools. |
|
||||
| `rules` | A string representing the rules for the agent's behavior. |
|
||||
| `planning` | A boolean indicating whether the agent should perform planning. |
|
||||
| `planning_prompt` | A string representing the prompt for planning. |
|
||||
| `device` | A string representing the device on which the agent should run. |
|
||||
| `custom_planning_prompt` | A string representing a custom prompt for planning. |
|
||||
| `memory_chunk_size` | An integer representing the maximum size of memory chunks for long-term memory retrieval. |
|
||||
| `agent_ops_on` | A boolean indicating whether agent operations should be enabled. |
|
||||
| `return_step_meta` | A boolean indicating whether or not to return JSON of all the steps and additional metadata |
|
||||
| `output_type` | A Literal type indicating whether to output "string", "str", "list", "json", "dict", "yaml" |
|
||||
|
||||
|
||||
|
||||
### `Agent` Methods
|
||||
|
||||
| Method | Description | Inputs | Usage Example |
|
||||
|--------|-------------|--------|----------------|
|
||||
| `run(task, img=None, *args, **kwargs)` | Runs the autonomous agent loop to complete the given task. | `task` (str): The task to be performed.<br>`img` (str, optional): Path to an image file, if the task involves image processing.<br>`*args`, `**kwargs`: Additional arguments to pass to the language model. | `response = agent.run("Generate a report on financial performance.")` |
|
||||
| `__call__(task, img=None, *args, **kwargs)` | An alternative way to call the `run` method. | Same as `run`. | `response = agent("Generate a report on financial performance.")` |
|
||||
| `parse_and_execute_tools(response, *args, **kwargs)` | Parses the agent's response and executes any tools mentioned in it. | `response` (str): The agent's response to be parsed.<br>`*args`, `**kwargs`: Additional arguments to pass to the tool execution. | `agent.parse_and_execute_tools(response)` |
|
||||
| `long_term_memory_prompt(query, *args, **kwargs)` | Generates a prompt for querying the agent's long-term memory. | `query` (str): The query to search for in long-term memory.<br>`*args`, `**kwargs`: Additional arguments to pass to the long-term memory retrieval. | `memory_retrieval = agent.long_term_memory_prompt("financial performance")` |
|
||||
| `add_memory(message)` | Adds a message to the agent's memory. | `message` (str): The message
|
||||
|
||||
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- **Language Model Integration**: The Swarm Agent allows seamless integration with different language models, enabling users to leverage the power of state-of-the-art models.
|
||||
- **Tool Integration**: The framework supports the integration of various tools, enabling the agent to perform a wide range of tasks, from code execution to data analysis and beyond.
|
||||
- **Long-term Memory Management**: The Swarm Agent incorporates long-term memory management capabilities, allowing it to store and retrieve relevant information for effective decision-making and task execution.
|
||||
- **Document Ingestion**: The agent can ingest and process various types of documents, including PDFs, text files, Markdown files, JSON files, and more, enabling it to extract relevant information for task completion.
|
||||
- **Interactive Mode**: Users can interact with the agent in an interactive mode, enabling real-time communication and task execution.
|
||||
- **Dashboard**: The framework provides a visual dashboard for monitoring the agent's performance and activities.
|
||||
- **Dynamic Temperature Control**: The Swarm Agent supports dynamic temperature control, allowing for adjustments to the model's output diversity during task execution.
|
||||
- **Autosave and State Management**: The agent can save its state automatically, enabling seamless resumption of tasks after interruptions or system restarts.
|
||||
- **Self-Healing and Error Handling**: The framework incorporates self-healing and error-handling mechanisms to ensure robust and reliable operation.
|
||||
- **Code Interpretation**: The agent can interpret and execute code snippets, expanding its capabilities for tasks involving programming or scripting.
|
||||
- **Multimodal Support**: The framework supports multimodal inputs, enabling the agent to process and reason about various data types, such as text, images, and audio.
|
||||
- **Tokenization and Token Management**: The Swarm Agent provides tokenization capabilities, enabling efficient management of token usage and context window truncation.
|
||||
- **Sentiment Analysis**: The agent can perform sentiment analysis on its generated outputs, allowing for evaluation and adjustment of responses based on sentiment thresholds.
|
||||
- **Output Filtering and Cleaning**: The framework supports output filtering and cleaning, ensuring that generated responses adhere to specific criteria or guidelines.
|
||||
- **Asynchronous and Concurrent Execution**: The Swarm Agent supports asynchronous and concurrent task execution, enabling efficient parallelization and scaling of operations.
|
||||
- **Planning and Reasoning**: The agent can engage in planning and reasoning processes, leveraging techniques such as algorithm of thoughts and chain of thoughts to enhance decision-making and task execution.
|
||||
- **Agent Operations and Monitoring**: The framework provides integration with agent operations and monitoring tools, enabling real-time monitoring and management of the agent's activities.
|
||||
|
||||
## Getting Started
|
||||
|
||||
First run the following:
|
||||
|
||||
```bash
|
||||
pip3 install -U swarms
|
||||
```
|
||||
|
||||
And, then now you can get started with the following:
|
||||
|
||||
```python
|
||||
import os
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
api_key=api_key, model_name="gpt-4o-mini", temperature=0.1
|
||||
)
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent_sas_chicken_eej",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
return_step_meta=False,
|
||||
output_type="str",
|
||||
)
|
||||
|
||||
|
||||
agent.run(
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria"
|
||||
)
|
||||
print(out)
|
||||
|
||||
```
|
||||
|
||||
This example initializes an instance of the `Agent` class with an OpenAI language model and a maximum of 3 loops. The `run()` method is then called with a task to generate a report on financial performance, and the agent's response is printed.
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
The Swarm Agent provides numerous advanced features and customization options. Here are a few examples of how to leverage these features:
|
||||
|
||||
### Tool Integration
|
||||
|
||||
To integrate tools with the Swarm Agent, you can pass a list of callable functions with types and doc strings to the `tools` parameter when initializing the `Agent` instance. The agent will automatically convert these functions into an OpenAI function calling schema and make them available for use during task execution.
|
||||
|
||||
## Requirements for a tool
|
||||
- Function
|
||||
- With types
|
||||
- with doc strings
|
||||
|
||||
```python
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms_memory import ChromaDB
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
# Making an instance of the ChromaDB class
|
||||
memory = ChromaDB(
|
||||
metric="cosine",
|
||||
n_results=3,
|
||||
output_dir="results",
|
||||
docs_folder="docs",
|
||||
)
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
model_name="gpt-4o-mini",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
# Tools in swarms are simple python functions and docstrings
|
||||
def terminal(
|
||||
code: str,
|
||||
):
|
||||
"""
|
||||
Run code in the terminal.
|
||||
|
||||
Args:
|
||||
code (str): The code to run in the terminal.
|
||||
|
||||
Returns:
|
||||
str: The output of the code.
|
||||
"""
|
||||
out = subprocess.run(
|
||||
code, shell=True, capture_output=True, text=True
|
||||
).stdout
|
||||
return str(out)
|
||||
|
||||
|
||||
def browser(query: str):
|
||||
"""
|
||||
Search the query in the browser with the `browser` tool.
|
||||
|
||||
Args:
|
||||
query (str): The query to search in the browser.
|
||||
|
||||
Returns:
|
||||
str: The search results.
|
||||
"""
|
||||
import webbrowser
|
||||
|
||||
url = f"https://www.google.com/search?q={query}"
|
||||
webbrowser.open(url)
|
||||
return f"Searching for {query} in the browser."
|
||||
|
||||
|
||||
def create_file(file_path: str, content: str):
|
||||
"""
|
||||
Create a file using the file editor tool.
|
||||
|
||||
Args:
|
||||
file_path (str): The path to the file.
|
||||
content (str): The content to write to the file.
|
||||
|
||||
Returns:
|
||||
str: The result of the file creation operation.
|
||||
"""
|
||||
with open(file_path, "w") as file:
|
||||
file.write(content)
|
||||
return f"File {file_path} created successfully."
|
||||
|
||||
|
||||
def file_editor(file_path: str, mode: str, content: str):
|
||||
"""
|
||||
Edit a file using the file editor tool.
|
||||
|
||||
Args:
|
||||
file_path (str): The path to the file.
|
||||
mode (str): The mode to open the file in.
|
||||
content (str): The content to write to the file.
|
||||
|
||||
Returns:
|
||||
str: The result of the file editing operation.
|
||||
"""
|
||||
with open(file_path, mode) as file:
|
||||
file.write(content)
|
||||
return f"File {file_path} edited successfully."
|
||||
|
||||
|
||||
# Agent
|
||||
agent = Agent(
|
||||
agent_name="Devin",
|
||||
system_prompt=(
|
||||
"Autonomous agent that can interact with humans and other"
|
||||
" agents. Be Helpful and Kind. Use the tools provided to"
|
||||
" assist the user. Return all code in markdown format."
|
||||
),
|
||||
llm=model,
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
interactive=True,
|
||||
tools=[terminal, browser, file_editor, create_file],
|
||||
streaming=True,
|
||||
long_term_memory=memory,
|
||||
)
|
||||
|
||||
# Run the agent
|
||||
out = agent(
|
||||
"Create a CSV file with the latest tax rates for C corporations in the following ten states and the District of Columbia: Alabama, California, Florida, Georgia, Illinois, New York, North Carolina, Ohio, Texas, and Washington."
|
||||
)
|
||||
print(out)
|
||||
|
||||
```
|
||||
|
||||
### Long-term Memory Management
|
||||
|
||||
The Swarm Agent supports integration with various vector databases for long-term memory management. You can pass an instance of a `BaseVectorDatabase` implementation to the `long_term_memory` parameter when initializing the `Agent`.
|
||||
|
||||
```python
|
||||
import os
|
||||
|
||||
from swarms_memory import ChromaDB
|
||||
|
||||
from swarms import Agent
|
||||
from swarm_models import Anthropic
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
|
||||
# Initilaize the chromadb client
|
||||
chromadb = ChromaDB(
|
||||
metric="cosine",
|
||||
output_dir="fiance_agent_rag",
|
||||
# docs_folder="artifacts", # Folder of your documents
|
||||
)
|
||||
|
||||
# Model
|
||||
model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
|
||||
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
agent_description="Agent creates ",
|
||||
llm=model,
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=3,
|
||||
context_length=200000,
|
||||
long_term_memory=chromadb,
|
||||
)
|
||||
|
||||
|
||||
agent.run(
|
||||
"What are the components of a startups stock incentive equity plan"
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
### Document Ingestion
|
||||
|
||||
The Swarm Agent can ingest various types of documents, such as PDFs, text files, Markdown files, and JSON files. You can pass a list of document paths or contents to the `docs` parameter when initializing the `Agent`.
|
||||
|
||||
```python
|
||||
from swarms.structs import Agent
|
||||
|
||||
# Initialize the agent with documents
|
||||
agent = Agent(llm=llm, max_loops=3, docs=["path/to/doc1.pdf", "path/to/doc2.txt"])
|
||||
```
|
||||
|
||||
### Interactive Mode
|
||||
|
||||
The Swarm Agent supports an interactive mode, where users can engage in real-time communication with the agent. To enable interactive mode, set the `interactive` parameter to `True` when initializing the `Agent`.
|
||||
|
||||
```python
|
||||
from swarms.structs import Agent
|
||||
|
||||
# Initialize the agent in interactive mode
|
||||
agent = Agent(llm=llm, max_loops=3, interactive=True)
|
||||
|
||||
# Run the agent in interactive mode
|
||||
agent.interactive_run()
|
||||
```
|
||||
|
||||
### Sentiment Analysis
|
||||
|
||||
The Swarm Agent can perform sentiment analysis on its generated outputs using a sentiment analyzer function. You can pass a callable function to the `sentiment_analyzer` parameter when initializing the `Agent`.
|
||||
|
||||
```python
|
||||
from swarms.structs import Agent
|
||||
from my_sentiment_analyzer import sentiment_analyzer_function
|
||||
|
||||
# Initialize the agent with a sentiment analyzer
|
||||
agent = Agent(
|
||||
agent_name = "sentiment-analyzer-agent-01", system_prompt="..."
|
||||
llm=llm, max_loops=3, sentiment_analyzer=sentiment_analyzer_function)
|
||||
```
|
||||
|
||||
|
||||
### Undo Functionality
|
||||
|
||||
```python
|
||||
# Feature 2: Undo functionality
|
||||
response = agent.run("Another task")
|
||||
print(f"Response: {response}")
|
||||
previous_state, message = agent.undo_last()
|
||||
print(message)
|
||||
```
|
||||
|
||||
### Response Filtering
|
||||
|
||||
```python
|
||||
# Feature 3: Response filtering
|
||||
agent.add_response_filter("report")
|
||||
response = agent.filtered_run("Generate a report on finance")
|
||||
print(response)
|
||||
```
|
||||
|
||||
### Saving and Loading State
|
||||
|
||||
```python
|
||||
# Save the agent state
|
||||
agent.save_state('saved_flow.json')
|
||||
|
||||
# Load the agent state
|
||||
agent = Agent(llm=llm_instance, max_loops=5)
|
||||
agent.load_state('saved_flow.json')
|
||||
agent.run("Continue with the task")
|
||||
```
|
||||
|
||||
### Async and Concurrent Execution
|
||||
|
||||
```python
|
||||
# Run a task concurrently
|
||||
response = await agent.run_concurrent("Concurrent task")
|
||||
print(response)
|
||||
|
||||
# Run multiple tasks concurrently
|
||||
tasks = [
|
||||
{"task": "Task 1"},
|
||||
{"task": "Task 2", "img": "path/to/image.jpg"},
|
||||
{"task": "Task 3", "custom_param": 42}
|
||||
]
|
||||
responses = agent.bulk_run(tasks)
|
||||
print(responses)
|
||||
```
|
||||
|
||||
|
||||
### Various other settings
|
||||
|
||||
```python
|
||||
# # Convert the agent object to a dictionary
|
||||
print(agent.to_dict())
|
||||
print(agent.to_toml())
|
||||
print(agent.model_dump_json())
|
||||
print(agent.model_dump_yaml())
|
||||
|
||||
# Ingest documents into the agent's knowledge base
|
||||
agent.ingest_docs("your_pdf_path.pdf")
|
||||
|
||||
# Receive a message from a user and process it
|
||||
agent.receive_message(name="agent_name", message="message")
|
||||
|
||||
# Send a message from the agent to a user
|
||||
agent.send_agent_message(agent_name="agent_name", message="message")
|
||||
|
||||
# Ingest multiple documents into the agent's knowledge base
|
||||
agent.ingest_docs("your_pdf_path.pdf", "your_csv_path.csv")
|
||||
|
||||
# Run the agent with a filtered system prompt
|
||||
agent.filtered_run(
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?"
|
||||
)
|
||||
|
||||
# Run the agent with multiple system prompts
|
||||
agent.bulk_run(
|
||||
[
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?",
|
||||
"Another system prompt",
|
||||
]
|
||||
)
|
||||
|
||||
# Add a memory to the agent
|
||||
agent.add_memory("Add a memory to the agent")
|
||||
|
||||
# Check the number of available tokens for the agent
|
||||
agent.check_available_tokens()
|
||||
|
||||
# Perform token checks for the agent
|
||||
agent.tokens_checks()
|
||||
|
||||
# Print the dashboard of the agent
|
||||
agent.print_dashboard()
|
||||
|
||||
|
||||
# Fetch all the documents from the doc folders
|
||||
agent.get_docs_from_doc_folders()
|
||||
|
||||
# Activate agent ops
|
||||
agent.activate_agentops()
|
||||
agent.check_end_session_agentops()
|
||||
|
||||
# Dump the model to a JSON file
|
||||
agent.model_dump_json()
|
||||
print(agent.to_toml())
|
||||
```
|
@ -0,0 +1,82 @@
|
||||
from swarms.prompts.prompt import Prompt
|
||||
import subprocess
|
||||
|
||||
|
||||
# Tools
|
||||
def terminal(
|
||||
code: str,
|
||||
):
|
||||
"""
|
||||
Run code in the terminal.
|
||||
|
||||
Args:
|
||||
code (str): The code to run in the terminal.
|
||||
|
||||
Returns:
|
||||
str: The output of the code.
|
||||
"""
|
||||
out = subprocess.run(
|
||||
code, shell=True, capture_output=True, text=True
|
||||
).stdout
|
||||
return str(out)
|
||||
|
||||
|
||||
def browser(query: str):
|
||||
"""
|
||||
Search the query in the browser with the `browser` tool.
|
||||
|
||||
Args:
|
||||
query (str): The query to search in the browser.
|
||||
|
||||
Returns:
|
||||
str: The search results.
|
||||
"""
|
||||
import webbrowser
|
||||
|
||||
url = f"https://www.google.com/search?q={query}"
|
||||
webbrowser.open(url)
|
||||
return f"Searching for {query} in the browser."
|
||||
|
||||
|
||||
def create_file(file_path: str, content: str):
|
||||
"""
|
||||
Create a file using the file editor tool.
|
||||
|
||||
Args:
|
||||
file_path (str): The path to the file.
|
||||
content (str): The content to write to the file.
|
||||
|
||||
Returns:
|
||||
str: The result of the file creation operation.
|
||||
"""
|
||||
with open(file_path, "w") as file:
|
||||
file.write(content)
|
||||
return f"File {file_path} created successfully."
|
||||
|
||||
|
||||
def file_editor(file_path: str, mode: str, content: str):
|
||||
"""
|
||||
Edit a file using the file editor tool.
|
||||
|
||||
Args:
|
||||
file_path (str): The path to the file.
|
||||
mode (str): The mode to open the file in.
|
||||
content (str): The content to write to the file.
|
||||
|
||||
Returns:
|
||||
str: The result of the file editing operation.
|
||||
"""
|
||||
with open(file_path, mode) as file:
|
||||
file.write(content)
|
||||
return f"File {file_path} edited successfully."
|
||||
|
||||
|
||||
prompt = Prompt(
|
||||
content="This is my first prompt!",
|
||||
name="My First Prompt",
|
||||
description="A simple example prompt.",
|
||||
# tools=[file_editor, create_file, terminal]
|
||||
)
|
||||
|
||||
prompt.add_tools(tools=[file_editor, create_file, terminal])
|
||||
print(prompt.content)
|
@ -0,0 +1,271 @@
|
||||
from typing import List, Tuple, Optional
|
||||
import numpy as np
|
||||
import torch
|
||||
from transformers import AutoTokenizer, AutoModel
|
||||
from pydantic import BaseModel, Field
|
||||
from loguru import logger
|
||||
import json
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||
|
||||
# Ensure you have the necessary libraries installed:
|
||||
# pip install torch transformers pydantic loguru tenacity
|
||||
|
||||
|
||||
class SwarmType(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
embedding: Optional[List[float]] = Field(
|
||||
default=None, exclude=True
|
||||
)
|
||||
|
||||
|
||||
class SwarmMatcherConfig(BaseModel):
|
||||
model_name: str = "sentence-transformers/all-MiniLM-L6-v2"
|
||||
embedding_dim: int = (
|
||||
512 # Dimension of the sentence-transformers model
|
||||
)
|
||||
|
||||
|
||||
class SwarmMatcher:
|
||||
"""
|
||||
A class for matching tasks to swarm types based on their descriptions.
|
||||
It utilizes a transformer model to generate embeddings for task and swarm type descriptions,
|
||||
and then calculates the dot product to find the best match.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initializes the SwarmMatcher with a configuration.
|
||||
|
||||
Args:
|
||||
config (SwarmMatcherConfig): The configuration for the SwarmMatcher.
|
||||
"""
|
||||
logger.add("swarm_matcher_debug.log", level="DEBUG")
|
||||
logger.debug("Initializing SwarmMatcher")
|
||||
try:
|
||||
config = SwarmMatcherConfig()
|
||||
self.config = config
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(
|
||||
config.model_name
|
||||
)
|
||||
self.model = AutoModel.from_pretrained(config.model_name)
|
||||
self.swarm_types: List[SwarmType] = []
|
||||
logger.debug("SwarmMatcher initialized successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Error initializing SwarmMatcher: {str(e)}")
|
||||
raise
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
)
|
||||
def get_embedding(self, text: str) -> np.ndarray:
|
||||
"""
|
||||
Generates an embedding for a given text using the configured model.
|
||||
|
||||
Args:
|
||||
text (str): The text for which to generate an embedding.
|
||||
|
||||
Returns:
|
||||
np.ndarray: The embedding vector for the text.
|
||||
"""
|
||||
logger.debug(f"Getting embedding for text: {text[:50]}...")
|
||||
try:
|
||||
inputs = self.tokenizer(
|
||||
text,
|
||||
return_tensors="pt",
|
||||
padding=True,
|
||||
truncation=True,
|
||||
max_length=512,
|
||||
)
|
||||
with torch.no_grad():
|
||||
outputs = self.model(**inputs)
|
||||
embedding = (
|
||||
outputs.last_hidden_state.mean(dim=1)
|
||||
.squeeze()
|
||||
.numpy()
|
||||
)
|
||||
logger.debug("Embedding generated successfully")
|
||||
return embedding
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating embedding: {str(e)}")
|
||||
raise
|
||||
|
||||
def add_swarm_type(self, swarm_type: SwarmType):
|
||||
"""
|
||||
Adds a swarm type to the list of swarm types, generating an embedding for its description.
|
||||
|
||||
Args:
|
||||
swarm_type (SwarmType): The swarm type to add.
|
||||
"""
|
||||
logger.debug(f"Adding swarm type: {swarm_type.name}")
|
||||
try:
|
||||
embedding = self.get_embedding(swarm_type.description)
|
||||
swarm_type.embedding = embedding.tolist()
|
||||
self.swarm_types.append(swarm_type)
|
||||
logger.info(f"Added swarm type: {swarm_type.name}")
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error adding swarm type {swarm_type.name}: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
def find_best_match(self, task: str) -> Tuple[str, float]:
|
||||
"""
|
||||
Finds the best match for a given task among the registered swarm types.
|
||||
|
||||
Args:
|
||||
task (str): The task for which to find the best match.
|
||||
|
||||
Returns:
|
||||
Tuple[str, float]: A tuple containing the name of the best matching swarm type and the score.
|
||||
"""
|
||||
logger.debug(f"Finding best match for task: {task[:50]}...")
|
||||
try:
|
||||
task_embedding = self.get_embedding(task)
|
||||
best_match = None
|
||||
best_score = -float("inf")
|
||||
for swarm_type in self.swarm_types:
|
||||
score = np.dot(
|
||||
task_embedding, np.array(swarm_type.embedding)
|
||||
)
|
||||
if score > best_score:
|
||||
best_score = score
|
||||
best_match = swarm_type
|
||||
logger.info(
|
||||
f"Best match for task: {best_match.name} (score: {best_score})"
|
||||
)
|
||||
return best_match.name, float(best_score)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error finding best match for task: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
def auto_select_swarm(self, task: str) -> str:
|
||||
"""
|
||||
Automatically selects the best swarm type for a given task based on their descriptions.
|
||||
|
||||
Args:
|
||||
task (str): The task for which to select a swarm type.
|
||||
|
||||
Returns:
|
||||
str: The name of the selected swarm type.
|
||||
"""
|
||||
logger.debug(f"Auto-selecting swarm for task: {task[:50]}...")
|
||||
best_match, score = self.find_best_match(task)
|
||||
|
||||
if (
|
||||
best_match == "No match"
|
||||
): # Handle the case where no match was found
|
||||
logger.info(f"Task: {task}")
|
||||
logger.info("No suitable swarm type found.")
|
||||
return "No suitable swarm type found" # Return a message indicating no match
|
||||
|
||||
logger.info(f"Task: {task}")
|
||||
logger.info(f"Selected Swarm Type: {best_match}")
|
||||
logger.info(f"Confidence Score: {score:.2f}")
|
||||
return best_match
|
||||
|
||||
def run_multiple(self, tasks: List[str], *args, **kwargs) -> str:
|
||||
swarms = []
|
||||
|
||||
for task in tasks:
|
||||
output = self.auto_select_swarm(task)
|
||||
|
||||
# Append
|
||||
swarms.append(output)
|
||||
|
||||
return swarms
|
||||
|
||||
def save_swarm_types(self, filename: str):
|
||||
"""
|
||||
Saves the registered swarm types to a JSON file.
|
||||
|
||||
Args:
|
||||
filename (str): The name of the file to which to save the swarm types.
|
||||
"""
|
||||
try:
|
||||
with open(filename, "w") as f:
|
||||
json.dump([st.dict() for st in self.swarm_types], f)
|
||||
logger.info(f"Saved swarm types to {filename}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving swarm types: {str(e)}")
|
||||
raise
|
||||
|
||||
def load_swarm_types(self, filename: str):
|
||||
"""
|
||||
Loads swarm types from a JSON file.
|
||||
|
||||
Args:
|
||||
filename (str): The name of the file from which to load the swarm types.
|
||||
"""
|
||||
try:
|
||||
with open(filename, "r") as f:
|
||||
swarm_types_data = json.load(f)
|
||||
self.swarm_types = [
|
||||
SwarmType(**st) for st in swarm_types_data
|
||||
]
|
||||
logger.info(f"Loaded swarm types from {filename}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading swarm types: {str(e)}")
|
||||
raise
|
||||
|
||||
def initialize_swarm_types(self):
|
||||
logger.debug("Initializing swarm types")
|
||||
swarm_types = [
|
||||
SwarmType(
|
||||
name="AgentRearrange",
|
||||
description="Optimize agent order and rearrange flow for multi-step tasks, ensuring efficient task allocation and minimizing bottlenecks",
|
||||
),
|
||||
SwarmType(
|
||||
name="MixtureOfAgents",
|
||||
description="Combine diverse expert agents for comprehensive analysis, fostering a collaborative approach to problem-solving and leveraging individual strengths",
|
||||
),
|
||||
SwarmType(
|
||||
name="SpreadSheetSwarm",
|
||||
description="Collaborative data processing and analysis in a spreadsheet-like environment, facilitating real-time data sharing and visualization",
|
||||
),
|
||||
SwarmType(
|
||||
name="SequentialWorkflow",
|
||||
description="Execute tasks in a step-by-step, sequential process workflow, ensuring a logical and methodical approach to task execution",
|
||||
),
|
||||
SwarmType(
|
||||
name="ConcurrentWorkflow",
|
||||
description="Process multiple tasks or data sources concurrently in parallel, maximizing productivity and reducing processing time",
|
||||
),
|
||||
]
|
||||
|
||||
for swarm_type in swarm_types:
|
||||
self.add_swarm_type(swarm_type)
|
||||
|
||||
# logger.debug("Swarm types initialized")
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# logger.info("Starting SwarmMatcher example")
|
||||
matcher = SwarmMatcher()
|
||||
|
||||
# Save swarm types for future use
|
||||
# matcher.save_swarm_types("swarm_types.json")
|
||||
|
||||
tasks = [
|
||||
# "Optimize the order of agents for a complex financial analysis task",
|
||||
# "Combine insights from various expert agents to evaluate a potential acquisition",
|
||||
# "Process and analyze customer churn data collaboratively",
|
||||
# "Generate a comprehensive due diligence report step by step",
|
||||
"Analyze multiple data sources concurrently for market research"
|
||||
]
|
||||
|
||||
for task in tasks:
|
||||
selected_swarm = matcher.auto_select_swarm(task)
|
||||
# print("\n" + "-"*50 + "\n")
|
||||
print(selected_swarm)
|
||||
|
||||
# # Load swarm types in a new session
|
||||
# new_matcher = SwarmMatcher(config)
|
||||
# new_matcher.load_swarm_types("swarm_types.json")
|
||||
# print("Loaded swarm types:", [st.name for st in new_matcher.swarm_types])
|
||||
|
||||
# logger.info("SwarmMatcher example completed successfully")
|
@ -0,0 +1,51 @@
|
||||
from typing import Any
|
||||
|
||||
from loguru import logger
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||
|
||||
from swarms.prompts.prompt_generator import (
|
||||
prompt_generator_sys_prompt as second_sys_prompt,
|
||||
)
|
||||
from swarms.prompts.prompt_generator_optimizer import (
|
||||
prompt_generator_sys_prompt,
|
||||
)
|
||||
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
)
|
||||
def auto_generate_prompt(
|
||||
task: str = None,
|
||||
model: Any = None,
|
||||
max_tokens: int = 4000,
|
||||
use_second_sys_prompt: bool = True,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Generates a prompt for a given task using the provided model.
|
||||
|
||||
Args:
|
||||
task (str, optional): The task for which to generate a prompt.
|
||||
model (Any, optional): The model to be used for prompt generation.
|
||||
max_tokens (int, optional): The maximum number of tokens in the generated prompt. Defaults to 4000.
|
||||
use_second_sys_prompt (bool, optional): Whether to use the second system prompt. Defaults to True.
|
||||
|
||||
Returns:
|
||||
str: The generated prompt.
|
||||
"""
|
||||
try:
|
||||
system_prompt = (
|
||||
second_sys_prompt.get_prompt()
|
||||
if use_second_sys_prompt
|
||||
else prompt_generator_sys_prompt.get_prompt()
|
||||
)
|
||||
output = model.run(
|
||||
system_prompt + task, max_tokens=max_tokens
|
||||
)
|
||||
print(output)
|
||||
return output
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating prompt: {str(e)}")
|
||||
raise
|
@ -0,0 +1,70 @@
|
||||
from swarms.prompts.prompt import Prompt
|
||||
|
||||
# Aggregator system prompt
|
||||
prompt_generator_sys_prompt = Prompt(
|
||||
name="prompt-generator-sys-prompt-o1",
|
||||
description="Generate the most reliable prompt for a specific problem",
|
||||
content="""
|
||||
Your purpose is to craft extremely reliable and production-grade system prompts for other agents.
|
||||
|
||||
# Instructions
|
||||
- Understand the prompt required for the agent.
|
||||
- Utilize a combination of the most effective prompting strategies available, including chain of thought, many shot, few shot, and instructions-examples-constraints.
|
||||
- Craft the prompt by blending the most suitable prompting strategies.
|
||||
- Ensure the prompt is production-grade ready and educates the agent on how to reason and why to reason in that manner.
|
||||
- Provide constraints if necessary and as needed.
|
||||
- The system prompt should be extensive and cover a vast array of potential scenarios to specialize the agent.
|
||||
""",
|
||||
)
|
||||
|
||||
|
||||
# print(prompt_generator_sys_prompt.get_prompt())
|
||||
prompt_generator_sys_prompt.edit_prompt(
|
||||
"""
|
||||
|
||||
Your primary objective is to design and develop highly reliable and production-grade system prompts tailored to the specific needs of other agents. This requires a deep understanding of the agent's capabilities, limitations, and the tasks they are intended to perform.
|
||||
|
||||
####### Guidelines #################
|
||||
1. **Thoroughly understand the agent's requirements**: Before crafting the prompt, it is essential to comprehend the agent's capabilities, its intended use cases, and the specific tasks it needs to accomplish. This understanding will enable you to create a prompt that effectively leverages the agent's strengths and addresses its weaknesses.
|
||||
2. **Employ a diverse range of prompting strategies**: To ensure the prompt is effective and versatile, incorporate a variety of prompting strategies, including but not limited to:
|
||||
- **Chain of thought**: Encourage the agent to think step-by-step, breaking down complex problems into manageable parts.
|
||||
- **Many shot**: Provide multiple examples or scenarios to help the agent generalize and adapt to different situations.
|
||||
- **Few shot**: Offer a limited number of examples or scenarios to prompt the agent to learn from sparse data.
|
||||
- **Instructions-examples-constraints**: Combine clear instructions with relevant examples and constraints to guide the agent's behavior and ensure it operates within defined boundaries.
|
||||
3. **Blend prompting strategies effectively**: Select the most suitable prompting strategies for the specific task or scenario and combine them in a way that enhances the agent's understanding and performance.
|
||||
4. **Ensure production-grade quality and educational value**: The prompt should not only be effective in guiding the agent's actions but also provide educational value by teaching the agent how to reason, why to reason in a particular way, and how to apply its knowledge in various contexts.
|
||||
5. **Provide constraints as necessary**: Include constraints in the prompt to ensure the agent operates within predetermined parameters, adheres to specific guidelines, or follows established protocols.
|
||||
6. **Design for extensibility and scenario coverage**: Craft the prompt to be extensive and cover a wide range of potential scenarios, enabling the agent to specialize and adapt to diverse situations.
|
||||
7. **Continuously evaluate and refine**: Regularly assess the effectiveness of the prompt and refine it as needed to ensure it remains relevant, efficient, and aligned with the agent's evolving capabilities and requirements.
|
||||
|
||||
By following these guidelines and incorporating a deep understanding of the agent's needs, you can create system prompts that are not only reliable and production-grade but also foster the agent's growth and specialization.
|
||||
|
||||
|
||||
######### Example Output Formats ########
|
||||
|
||||
|
||||
# Instruction-Examples-Constraints
|
||||
|
||||
The agent's role and responsibilities
|
||||
|
||||
# Instructions
|
||||
|
||||
# Examples
|
||||
|
||||
# Constraints
|
||||
|
||||
################### REACT ############
|
||||
|
||||
|
||||
<observation> your observation </observation
|
||||
|
||||
<plan> your plan </plan>
|
||||
|
||||
|
||||
<action> your action </action>
|
||||
|
||||
"""
|
||||
)
|
||||
|
||||
print(prompt_generator_sys_prompt.get_prompt())
|
||||
print(prompt_generator_sys_prompt.model_dump_json(indent=4))
|
@ -0,0 +1,320 @@
|
||||
import os
|
||||
from typing import List, Optional
|
||||
|
||||
import chromadb
|
||||
from loguru import logger
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||
from typing import Union, Callable, Any
|
||||
from swarms import Agent
|
||||
|
||||
|
||||
class AgentRAG:
|
||||
"""A vector database for storing and retrieving agents based on their characteristics."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
collection_name: str = "agents",
|
||||
persist_directory: str = "./vector_db",
|
||||
n_agents: int = 1,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initialize the AgentRAG.
|
||||
|
||||
Args:
|
||||
persist_directory (str): The directory to persist the ChromaDB data.
|
||||
"""
|
||||
self.collection_name = collection_name
|
||||
self.n_agents = n_agents
|
||||
self.persist_directory = persist_directory
|
||||
self.client = chromadb.Client(*args, **kwargs)
|
||||
self.collection = self.client.create_collection(
|
||||
collection_name
|
||||
)
|
||||
self.agents: List[Agent] = []
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
)
|
||||
def add_agent(self, agent: Agent) -> None:
|
||||
"""
|
||||
Add an agent to the vector database.
|
||||
|
||||
Args:
|
||||
agent (Agent): The agent to add.
|
||||
"""
|
||||
try:
|
||||
agent_text = f"{agent.name} {agent.description} {agent.system_prompt}"
|
||||
self.collection.add(
|
||||
documents=[agent_text],
|
||||
metadatas=[{"name": agent.name}],
|
||||
ids=[agent.name],
|
||||
)
|
||||
self.agents.append(agent)
|
||||
logger.info(
|
||||
f"Added agent {agent.name} to the vector database."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error adding agent {agent.name} to the vector database: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
def add_agents(
|
||||
self, agents: List[Union[Agent, Callable, Any]]
|
||||
) -> None:
|
||||
for agent in agents:
|
||||
self.add_agent(agent)
|
||||
|
||||
def update_agent_history(self, agent_name: str) -> None:
|
||||
"""
|
||||
Update the agent's entry in the vector database with its interaction history.
|
||||
|
||||
Args:
|
||||
agent_name (str): The name of the agent to update.
|
||||
"""
|
||||
agent = next(
|
||||
(a for a in self.agents if a.name == agent_name), None
|
||||
)
|
||||
if agent:
|
||||
history = agent.short_memory.return_history_as_string()
|
||||
history_text = " ".join(history)
|
||||
updated_text = f"{agent.name} {agent.description} {agent.system_prompt} {history_text}"
|
||||
|
||||
self.collection.update(
|
||||
ids=[agent_name],
|
||||
documents=[updated_text],
|
||||
metadatas=[{"name": agent_name}],
|
||||
)
|
||||
logger.info(
|
||||
f"Updated agent {agent_name} with interaction history."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Agent {agent_name} not found in the database."
|
||||
)
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
)
|
||||
def find_best_agent(
|
||||
self, task: str, *args, **kwargs
|
||||
) -> Optional[Agent]:
|
||||
"""
|
||||
Find the best agent for a given task.
|
||||
|
||||
Args:
|
||||
task (str): The task description.
|
||||
|
||||
Returns:
|
||||
Optional[Agent]: The best matching agent, if found.
|
||||
"""
|
||||
try:
|
||||
results = self.collection.query(
|
||||
query_texts=[task],
|
||||
n_results=self.n_agents,
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if results["ids"]:
|
||||
best_match_name = results["ids"][0][0]
|
||||
best_agent = next(
|
||||
(
|
||||
a
|
||||
for a in self.agents
|
||||
if a.name == best_match_name
|
||||
),
|
||||
None,
|
||||
)
|
||||
if best_agent:
|
||||
logger.info(
|
||||
f"Found best matching agent: {best_match_name}"
|
||||
)
|
||||
return best_agent
|
||||
else:
|
||||
logger.warning(
|
||||
f"Agent {best_match_name} found in index but not in agents list."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"No matching agent found for the given task."
|
||||
)
|
||||
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error finding best agent: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
from dotenv import load_dotenv
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
# Initialize the vector database
|
||||
vector_db = AgentRAG()
|
||||
|
||||
# Define specialized system prompts for each agent
|
||||
DATA_EXTRACTOR_PROMPT = """You are a highly specialized private equity agent focused on data extraction from various documents. Your expertise includes:
|
||||
1. Extracting key financial metrics (revenue, EBITDA, growth rates, etc.) from financial statements and reports
|
||||
2. Identifying and extracting important contract terms from legal documents
|
||||
3. Pulling out relevant market data from industry reports and analyses
|
||||
4. Extracting operational KPIs from management presentations and internal reports
|
||||
5. Identifying and extracting key personnel information from organizational charts and bios
|
||||
Provide accurate, structured data extracted from various document types to support investment analysis."""
|
||||
|
||||
SUMMARIZER_PROMPT = """You are an expert private equity agent specializing in summarizing complex documents. Your core competencies include:
|
||||
1. Distilling lengthy financial reports into concise executive summaries
|
||||
2. Summarizing legal documents, highlighting key terms and potential risks
|
||||
3. Condensing industry reports to capture essential market trends and competitive dynamics
|
||||
4. Summarizing management presentations to highlight key strategic initiatives and projections
|
||||
5. Creating brief overviews of technical documents, emphasizing critical points for non-technical stakeholders
|
||||
Deliver clear, concise summaries that capture the essence of various documents while highlighting information crucial for investment decisions."""
|
||||
|
||||
FINANCIAL_ANALYST_PROMPT = """You are a specialized private equity agent focused on financial analysis. Your key responsibilities include:
|
||||
1. Analyzing historical financial statements to identify trends and potential issues
|
||||
2. Evaluating the quality of earnings and potential adjustments to EBITDA
|
||||
3. Assessing working capital requirements and cash flow dynamics
|
||||
4. Analyzing capital structure and debt capacity
|
||||
5. Evaluating financial projections and underlying assumptions
|
||||
Provide thorough, insightful financial analysis to inform investment decisions and valuation."""
|
||||
|
||||
MARKET_ANALYST_PROMPT = """You are a highly skilled private equity agent specializing in market analysis. Your expertise covers:
|
||||
1. Analyzing industry trends, growth drivers, and potential disruptors
|
||||
2. Evaluating competitive landscape and market positioning
|
||||
3. Assessing market size, segmentation, and growth potential
|
||||
4. Analyzing customer dynamics, including concentration and loyalty
|
||||
5. Identifying potential regulatory or macroeconomic impacts on the market
|
||||
Deliver comprehensive market analysis to assess the attractiveness and risks of potential investments."""
|
||||
|
||||
OPERATIONAL_ANALYST_PROMPT = """You are an expert private equity agent focused on operational analysis. Your core competencies include:
|
||||
1. Evaluating operational efficiency and identifying improvement opportunities
|
||||
2. Analyzing supply chain and procurement processes
|
||||
3. Assessing sales and marketing effectiveness
|
||||
4. Evaluating IT systems and digital capabilities
|
||||
5. Identifying potential synergies in merger or add-on acquisition scenarios
|
||||
Provide detailed operational analysis to uncover value creation opportunities and potential risks."""
|
||||
|
||||
# Initialize specialized agents
|
||||
data_extractor_agent = Agent(
|
||||
agent_name="Data-Extractor",
|
||||
system_prompt=DATA_EXTRACTOR_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="data_extractor_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
summarizer_agent = Agent(
|
||||
agent_name="Document-Summarizer",
|
||||
system_prompt=SUMMARIZER_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="summarizer_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
financial_analyst_agent = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
system_prompt=FINANCIAL_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="financial_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
market_analyst_agent = Agent(
|
||||
agent_name="Market-Analyst",
|
||||
system_prompt=MARKET_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="market_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
operational_analyst_agent = Agent(
|
||||
agent_name="Operational-Analyst",
|
||||
system_prompt=OPERATIONAL_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="operational_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
# Create agents (using the agents from the original code)
|
||||
agents_to_add = [
|
||||
data_extractor_agent,
|
||||
summarizer_agent,
|
||||
financial_analyst_agent,
|
||||
market_analyst_agent,
|
||||
operational_analyst_agent,
|
||||
]
|
||||
|
||||
# Add agents to the vector database
|
||||
for agent in agents_to_add:
|
||||
vector_db.add_agent(agent)
|
||||
|
||||
# Example task
|
||||
task = "Analyze the financial statements of a potential acquisition target and identify key growth drivers."
|
||||
|
||||
# Find the best agent for the task
|
||||
best_agent = vector_db.find_best_agent(task)
|
||||
|
||||
if best_agent:
|
||||
logger.info(f"Best agent for the task: {best_agent.name}")
|
||||
# Use the best agent to perform the task
|
||||
result = best_agent.run(task)
|
||||
print(f"Task result: {result}")
|
||||
|
||||
# Update the agent's history in the database
|
||||
vector_db.update_agent_history(best_agent.name)
|
||||
else:
|
||||
print("No suitable agent found for the task.")
|
||||
|
||||
# Save the vector database
|
@ -0,0 +1,3 @@
|
||||
"""
|
||||
This class will input a swarm type -> then auto generate a list of `Agent` structures with their name, descriptions, system prompts, and more.
|
||||
"""
|
@ -0,0 +1,250 @@
|
||||
from typing import List, Tuple, Optional
|
||||
import numpy as np
|
||||
import torch
|
||||
from transformers import AutoTokenizer, AutoModel
|
||||
from pydantic import BaseModel, Field
|
||||
from loguru import logger
|
||||
import json
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential
|
||||
|
||||
# Ensure you have the necessary libraries installed:
|
||||
# pip install torch transformers pydantic loguru tenacity
|
||||
|
||||
|
||||
class SwarmType(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
embedding: Optional[List[float]] = Field(
|
||||
default=None, exclude=True
|
||||
)
|
||||
|
||||
|
||||
class SwarmMatcherConfig(BaseModel):
|
||||
model_name: str = "sentence-transformers/all-MiniLM-L6-v2"
|
||||
embedding_dim: int = (
|
||||
512 # Dimension of the sentence-transformers model
|
||||
)
|
||||
|
||||
|
||||
class SwarmMatcher:
|
||||
"""
|
||||
A class for matching tasks to swarm types based on their descriptions.
|
||||
It utilizes a transformer model to generate embeddings for task and swarm type descriptions,
|
||||
and then calculates the dot product to find the best match.
|
||||
"""
|
||||
|
||||
def __init__(self, config: SwarmMatcherConfig):
|
||||
"""
|
||||
Initializes the SwarmMatcher with a configuration.
|
||||
|
||||
Args:
|
||||
config (SwarmMatcherConfig): The configuration for the SwarmMatcher.
|
||||
"""
|
||||
logger.add("swarm_matcher_debug.log", level="DEBUG")
|
||||
logger.debug("Initializing SwarmMatcher")
|
||||
try:
|
||||
self.config = config
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(
|
||||
config.model_name
|
||||
)
|
||||
self.model = AutoModel.from_pretrained(config.model_name)
|
||||
self.swarm_types: List[SwarmType] = []
|
||||
logger.debug("SwarmMatcher initialized successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Error initializing SwarmMatcher: {str(e)}")
|
||||
raise
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
)
|
||||
def get_embedding(self, text: str) -> np.ndarray:
|
||||
"""
|
||||
Generates an embedding for a given text using the configured model.
|
||||
|
||||
Args:
|
||||
text (str): The text for which to generate an embedding.
|
||||
|
||||
Returns:
|
||||
np.ndarray: The embedding vector for the text.
|
||||
"""
|
||||
logger.debug(f"Getting embedding for text: {text[:50]}...")
|
||||
try:
|
||||
inputs = self.tokenizer(
|
||||
text,
|
||||
return_tensors="pt",
|
||||
padding=True,
|
||||
truncation=True,
|
||||
max_length=512,
|
||||
)
|
||||
with torch.no_grad():
|
||||
outputs = self.model(**inputs)
|
||||
embedding = (
|
||||
outputs.last_hidden_state.mean(dim=1)
|
||||
.squeeze()
|
||||
.numpy()
|
||||
)
|
||||
logger.debug("Embedding generated successfully")
|
||||
return embedding
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating embedding: {str(e)}")
|
||||
raise
|
||||
|
||||
def add_swarm_type(self, swarm_type: SwarmType):
|
||||
"""
|
||||
Adds a swarm type to the list of swarm types, generating an embedding for its description.
|
||||
|
||||
Args:
|
||||
swarm_type (SwarmType): The swarm type to add.
|
||||
"""
|
||||
logger.debug(f"Adding swarm type: {swarm_type.name}")
|
||||
try:
|
||||
embedding = self.get_embedding(swarm_type.description)
|
||||
swarm_type.embedding = embedding.tolist()
|
||||
self.swarm_types.append(swarm_type)
|
||||
logger.info(f"Added swarm type: {swarm_type.name}")
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error adding swarm type {swarm_type.name}: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
def find_best_match(self, task: str) -> Tuple[str, float]:
|
||||
"""
|
||||
Finds the best match for a given task among the registered swarm types.
|
||||
|
||||
Args:
|
||||
task (str): The task for which to find the best match.
|
||||
|
||||
Returns:
|
||||
Tuple[str, float]: A tuple containing the name of the best matching swarm type and the score.
|
||||
"""
|
||||
logger.debug(f"Finding best match for task: {task[:50]}...")
|
||||
try:
|
||||
task_embedding = self.get_embedding(task)
|
||||
best_match = None
|
||||
best_score = -float("inf")
|
||||
for swarm_type in self.swarm_types:
|
||||
score = np.dot(
|
||||
task_embedding, np.array(swarm_type.embedding)
|
||||
)
|
||||
if score > best_score:
|
||||
best_score = score
|
||||
best_match = swarm_type
|
||||
logger.info(
|
||||
f"Best match for task: {best_match.name} (score: {best_score})"
|
||||
)
|
||||
return best_match.name, float(best_score)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error finding best match for task: {str(e)}"
|
||||
)
|
||||
raise
|
||||
|
||||
def auto_select_swarm(self, task: str) -> str:
|
||||
"""
|
||||
Automatically selects the best swarm type for a given task based on their descriptions.
|
||||
|
||||
Args:
|
||||
task (str): The task for which to select a swarm type.
|
||||
|
||||
Returns:
|
||||
str: The name of the selected swarm type.
|
||||
"""
|
||||
logger.debug(f"Auto-selecting swarm for task: {task[:50]}...")
|
||||
best_match, score = self.find_best_match(task)
|
||||
logger.info(f"Task: {task}")
|
||||
logger.info(f"Selected Swarm Type: {best_match}")
|
||||
logger.info(f"Confidence Score: {score:.2f}")
|
||||
return best_match
|
||||
|
||||
def run_multiple(self, tasks: List[str], *args, **kwargs) -> str:
|
||||
swarms = []
|
||||
|
||||
for task in tasks:
|
||||
output = self.auto_select_swarm(task)
|
||||
|
||||
# Append
|
||||
swarms.append(output)
|
||||
|
||||
return swarms
|
||||
|
||||
def save_swarm_types(self, filename: str):
|
||||
"""
|
||||
Saves the registered swarm types to a JSON file.
|
||||
|
||||
Args:
|
||||
filename (str): The name of the file to which to save the swarm types.
|
||||
"""
|
||||
try:
|
||||
with open(filename, "w") as f:
|
||||
json.dump([st.dict() for st in self.swarm_types], f)
|
||||
logger.info(f"Saved swarm types to {filename}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving swarm types: {str(e)}")
|
||||
raise
|
||||
|
||||
def load_swarm_types(self, filename: str):
|
||||
"""
|
||||
Loads swarm types from a JSON file.
|
||||
|
||||
Args:
|
||||
filename (str): The name of the file from which to load the swarm types.
|
||||
"""
|
||||
try:
|
||||
with open(filename, "r") as f:
|
||||
swarm_types_data = json.load(f)
|
||||
self.swarm_types = [
|
||||
SwarmType(**st) for st in swarm_types_data
|
||||
]
|
||||
logger.info(f"Loaded swarm types from {filename}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading swarm types: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
def initialize_swarm_types(matcher: SwarmMatcher):
|
||||
logger.debug("Initializing swarm types")
|
||||
swarm_types = [
|
||||
SwarmType(
|
||||
name="AgentRearrange",
|
||||
description="Optimize agent order and rearrange flow for multi-step tasks, ensuring efficient task allocation and minimizing bottlenecks",
|
||||
),
|
||||
SwarmType(
|
||||
name="MixtureOfAgents",
|
||||
description="Combine diverse expert agents for comprehensive analysis, fostering a collaborative approach to problem-solving and leveraging individual strengths",
|
||||
),
|
||||
SwarmType(
|
||||
name="SpreadSheetSwarm",
|
||||
description="Collaborative data processing and analysis in a spreadsheet-like environment, facilitating real-time data sharing and visualization",
|
||||
),
|
||||
SwarmType(
|
||||
name="SequentialWorkflow",
|
||||
description="Execute tasks in a step-by-step, sequential process workflow, ensuring a logical and methodical approach to task execution",
|
||||
),
|
||||
SwarmType(
|
||||
name="ConcurrentWorkflow",
|
||||
description="Process multiple tasks or data sources concurrently in parallel, maximizing productivity and reducing processing time",
|
||||
),
|
||||
]
|
||||
|
||||
for swarm_type in swarm_types:
|
||||
matcher.add_swarm_type(swarm_type)
|
||||
logger.debug("Swarm types initialized")
|
||||
|
||||
|
||||
def swarm_matcher(task: str, *args, **kwargs):
|
||||
"""
|
||||
Runs the SwarmMatcher example with predefined tasks and swarm types.
|
||||
"""
|
||||
config = SwarmMatcherConfig()
|
||||
matcher = SwarmMatcher(config)
|
||||
initialize_swarm_types(matcher)
|
||||
|
||||
matcher.save_swarm_types("swarm_types.json")
|
||||
|
||||
swarm_type = matcher.auto_select_swarm(task)
|
||||
|
||||
logger.info(f"{swarm_type}")
|
||||
|
||||
return swarm_type
|
Loading…
Reference in new issue