parent
2a5d1befea
commit
3cd03c13eb
@ -0,0 +1,13 @@
|
||||
1. make agent api - fastapi
|
||||
2. make agent cron job
|
||||
3. agents that listen that could listen to events
|
||||
4. run on startup, every time the machine starts
|
||||
4. docker
|
||||
5. kubernetes
|
||||
6. aws or google cloud etc
|
||||
|
||||
|
||||
|
||||
user -> build agent -> user now need deploy agent
|
||||
|
||||
FAST
|
@ -0,0 +1,139 @@
|
||||
# CronJob
|
||||
|
||||
A wrapper class that turns any callable (including Swarms agents) into a scheduled cron job. This class provides functionality to schedule and run tasks at specified intervals using the schedule library with cron-style scheduling.
|
||||
|
||||
## Overview
|
||||
|
||||
The CronJob class allows you to:
|
||||
|
||||
- Schedule any callable or Swarms Agent to run at specified intervals
|
||||
|
||||
- Support for seconds, minutes, and hours intervals
|
||||
|
||||
- Run tasks in a separate thread
|
||||
|
||||
- Handle graceful start/stop of scheduled jobs
|
||||
|
||||
- Manage multiple concurrent scheduled jobs
|
||||
|
||||
## Architecture
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[CronJob] --> B[Initialize]
|
||||
B --> C[Parse Interval]
|
||||
C --> D[Schedule Task]
|
||||
D --> E[Run Job]
|
||||
E --> F[Execute Task]
|
||||
F --> G{Is Agent?}
|
||||
G -->|Yes| H[Run Agent]
|
||||
G -->|No| I[Run Callable]
|
||||
H --> J[Handle Result]
|
||||
I --> J
|
||||
J --> K[Sleep]
|
||||
K --> E
|
||||
```
|
||||
|
||||
## Class Reference
|
||||
|
||||
### Constructor
|
||||
|
||||
```python
|
||||
def __init__(
|
||||
agent: Optional[Union[Agent, Callable]] = None,
|
||||
interval: Optional[str] = None,
|
||||
job_id: Optional[str] = None
|
||||
)
|
||||
```
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
|-----------|------|-------------|-----------|
|
||||
| agent | Agent or Callable | The Swarms Agent instance or callable to be scheduled | No |
|
||||
| interval | str | The interval string (e.g., "5seconds", "10minutes", "1hour") | No |
|
||||
| job_id | str | Unique identifier for the job. If not provided, one will be generated | No |
|
||||
|
||||
### Methods
|
||||
|
||||
#### run
|
||||
|
||||
```python
|
||||
def run(task: str, **kwargs)
|
||||
```
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
|-----------|------|-------------|-----------|
|
||||
| task | str | The task string to be executed by the agent | Yes |
|
||||
| **kwargs | dict | Additional parameters to pass to the agent's run method | No |
|
||||
|
||||
#### __call__
|
||||
|
||||
```python
|
||||
def __call__(task: str, **kwargs)
|
||||
```
|
||||
|
||||
| Parameter | Type | Description | Required |
|
||||
|-----------|------|-------------|-----------|
|
||||
| task | str | The task string to be executed | Yes |
|
||||
| **kwargs | dict | Additional parameters to pass to the agent's run method | No |
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Usage with Swarms Agent
|
||||
|
||||
```python
|
||||
from swarms import Agent, CronJob
|
||||
from loguru import logger
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Quantitative-Trading-Agent",
|
||||
agent_description="Advanced quantitative trading and algorithmic analysis agent",
|
||||
system_prompt="""You are an expert quantitative trading agent...""",
|
||||
max_loops=1,
|
||||
model_name="gpt-4.1",
|
||||
dynamic_temperature_enabled=True,
|
||||
output_type="str-all-except-first",
|
||||
streaming_on=True,
|
||||
print_on=True,
|
||||
telemetry_enable=False,
|
||||
)
|
||||
|
||||
# Create and run a cron job every 10 seconds
|
||||
logger.info("Starting example cron job")
|
||||
cron_job = CronJob(agent=agent, interval="10seconds")
|
||||
cron_job.run(
|
||||
task="What are the best top 3 etfs for gold coverage?"
|
||||
)
|
||||
```
|
||||
|
||||
### Using with a Custom Function
|
||||
|
||||
```python
|
||||
def custom_task(task: str):
|
||||
print(f"Executing task: {task}")
|
||||
return "Task completed"
|
||||
|
||||
# Create a cron job with a custom function
|
||||
cron_job = CronJob(
|
||||
agent=custom_task,
|
||||
interval="5minutes",
|
||||
job_id="custom_task_job"
|
||||
)
|
||||
cron_job.run("Perform analysis")
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The CronJob class provides a powerful way to schedule and automate tasks using Swarms Agents or custom functions. Key benefits include:
|
||||
|
||||
- Easy integration with Swarms Agents
|
||||
|
||||
- Flexible interval scheduling
|
||||
|
||||
- Thread-safe execution
|
||||
|
||||
- Graceful error handling
|
||||
|
||||
- Simple API for task scheduling
|
||||
|
||||
- Support for both agent and callable-based tasks
|
@ -1,192 +1,802 @@
|
||||
# GraphWorkflow Documentation
|
||||
# GraphWorkflow
|
||||
|
||||
A powerful workflow orchestration system that creates directed graphs of agents for complex multi-agent collaboration and task execution.
|
||||
|
||||
## Overview
|
||||
|
||||
The `GraphWorkflow` class is a sophisticated workflow management system that enables the creation and execution of complex multi-agent workflows. It represents workflows as directed graphs where nodes are agents and edges represent data flow and dependencies between agents. The system supports parallel execution, automatic compilation optimization, and comprehensive visualization capabilities.
|
||||
|
||||
Key features:
|
||||
|
||||
| Feature | Description |
|
||||
|------------------------|-----------------------------------------------------------------------------------------------|
|
||||
| **Agent-based nodes** | Each node represents an agent that can process tasks |
|
||||
| **Directed graph structure** | Edges define the flow of data between agents |
|
||||
| **Parallel execution** | Multiple agents can run simultaneously within layers |
|
||||
| **Automatic compilation** | Optimizes workflow structure for efficient execution |
|
||||
| **Rich visualization** | Generate visual representations using Graphviz |
|
||||
| **Serialization** | Save and load workflows as JSON |
|
||||
| **Pattern detection** | Automatically identifies parallel processing patterns |
|
||||
|
||||
## Architecture
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph "GraphWorkflow Architecture"
|
||||
A[GraphWorkflow] --> B[Node Collection]
|
||||
A --> C[Edge Collection]
|
||||
A --> D[NetworkX Graph]
|
||||
A --> E[Execution Engine]
|
||||
|
||||
B --> F[Agent Nodes]
|
||||
C --> G[Directed Edges]
|
||||
D --> H[Topological Sort]
|
||||
E --> I[Parallel Execution]
|
||||
E --> J[Layer Processing]
|
||||
|
||||
subgraph "Node Types"
|
||||
F --> K[Agent Node]
|
||||
K --> L[Agent Instance]
|
||||
K --> M[Node Metadata]
|
||||
end
|
||||
|
||||
subgraph "Edge Types"
|
||||
G --> N[Simple Edge]
|
||||
G --> O[Fan-out Edge]
|
||||
G --> P[Fan-in Edge]
|
||||
G --> Q[Parallel Chain]
|
||||
end
|
||||
|
||||
subgraph "Execution Patterns"
|
||||
I --> R[Thread Pool]
|
||||
I --> S[Concurrent Futures]
|
||||
J --> T[Layer-by-layer]
|
||||
J --> U[Dependency Resolution]
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
The `GraphWorkflow` class is a pivotal part of the workflow management system, representing a directed graph where nodes signify tasks or agents and edges represent the flow or dependencies between these nodes. This class leverages the NetworkX library to manage and manipulate the directed graph, allowing users to create complex workflows with defined entry and end points.
|
||||
## Class Reference
|
||||
|
||||
### Attributes
|
||||
| Parameter | Type | Description | Default |
|
||||
|-----------|------|-------------|---------|
|
||||
| `id` | `Optional[str]` | Unique identifier for the workflow | Auto-generated UUID |
|
||||
| `name` | `Optional[str]` | Human-readable name for the workflow | "Graph-Workflow-01" |
|
||||
| `description` | `Optional[str]` | Detailed description of the workflow | Generic description |
|
||||
| `nodes` | `Optional[Dict[str, Node]]` | Initial collection of nodes | `{}` |
|
||||
| `edges` | `Optional[List[Edge]]` | Initial collection of edges | `[]` |
|
||||
| `entry_points` | `Optional[List[str]]` | Node IDs that serve as starting points | `[]` |
|
||||
| `end_points` | `Optional[List[str]]` | Node IDs that serve as ending points | `[]` |
|
||||
| `max_loops` | `int` | Maximum number of execution loops | `1` |
|
||||
| `task` | `Optional[str]` | The task to be executed by the workflow | `None` |
|
||||
| `auto_compile` | `bool` | Whether to automatically compile the workflow | `True` |
|
||||
| `verbose` | `bool` | Whether to enable detailed logging | `False` |
|
||||
|
||||
| Attribute | Type | Description | Default |
|
||||
|----------------|-------------------|-----------------------------------------------------------------------------------------------|-------------------------------------|
|
||||
| `nodes` | `Dict[str, Node]` | A dictionary of nodes in the graph, where the key is the node ID and the value is the Node object. | `Field(default_factory=dict)` |
|
||||
| `edges` | `List[Edge]` | A list of edges in the graph, where each edge is represented by an Edge object. | `Field(default_factory=list)` |
|
||||
| `entry_points` | `List[str]` | A list of node IDs that serve as entry points to the graph. | `Field(default_factory=list)` |
|
||||
| `end_points` | `List[str]` | A list of node IDs that serve as end points of the graph. | `Field(default_factory=list)` |
|
||||
| `graph` | `nx.DiGraph` | A directed graph object from the NetworkX library representing the workflow graph. | `Field(default_factory=nx.DiGraph)` |
|
||||
| `max_loops` | `int` | Maximum number of times the workflow can loop during execution. | `1` |
|
||||
### Core Methods
|
||||
|
||||
### Methods
|
||||
#### `add_node(agent: Agent, **kwargs)`
|
||||
|
||||
#### `add_node(node: Node)`
|
||||
Adds an agent node to the workflow graph.
|
||||
|
||||
Adds a node to the workflow graph.
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `agent` | `Agent` | The agent to add as a node |
|
||||
| `**kwargs` | `Any` | Additional keyword arguments for the node |
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-----------------------------------|
|
||||
| `node` | `Node` | The node object to be added. |
|
||||
**Raises:**
|
||||
|
||||
Raises:
|
||||
- `ValueError`: If a node with the same ID already exists in the graph.
|
||||
- `ValueError`: If a node with the same ID already exists
|
||||
|
||||
#### `add_edge(edge: Edge)`
|
||||
**Example:**
|
||||
|
||||
Adds an edge to the workflow graph.
|
||||
```python
|
||||
workflow = GraphWorkflow()
|
||||
agent = Agent(agent_name="ResearchAgent", model_name="gpt-4")
|
||||
workflow.add_node(agent, metadata={"priority": "high"})
|
||||
```
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|----------------------------------|
|
||||
| `edge` | `Edge` | The edge object to be added. |
|
||||
#### `add_edge(edge_or_source, target=None, **kwargs)`
|
||||
|
||||
Raises:
|
||||
- `ValueError`: If either the source or target node of the edge does not exist in the graph.
|
||||
Adds an edge to connect nodes in the workflow.
|
||||
|
||||
#### `set_entry_points(entry_points: List[str])`
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `edge_or_source` | `Edge` or `str` | Either an Edge object or source node ID |
|
||||
| `target` | `str` | Target node ID (required if edge_or_source is not an Edge) |
|
||||
| `**kwargs` | `Any` | Additional keyword arguments for the edge |
|
||||
|
||||
Sets the entry points of the workflow graph.
|
||||
**Raises:**
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|----------------|-----------|---------------------------------------------|
|
||||
| `entry_points` | `List[str]` | A list of node IDs to be set as entry points. |
|
||||
- `ValueError`: If source or target nodes don't exist
|
||||
|
||||
Raises:
|
||||
- `ValueError`: If any of the specified node IDs do not exist in the graph.
|
||||
**Example:**
|
||||
|
||||
#### `set_end_points(end_points: List[str])`
|
||||
```python
|
||||
# Using Edge object
|
||||
edge = Edge(source="agent1", target="agent2")
|
||||
workflow.add_edge(edge)
|
||||
|
||||
# Using node IDs
|
||||
workflow.add_edge("agent1", "agent2", metadata={"priority": "high"})
|
||||
```
|
||||
|
||||
#### `add_edges_from_source(source, targets, **kwargs)`
|
||||
|
||||
Creates a fan-out pattern where one source connects to multiple targets.
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `source` | `str` | Source node ID |
|
||||
| `targets` | `List[str]` | List of target node IDs |
|
||||
| `**kwargs` | `Any` | Additional keyword arguments for all edges |
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `List[Edge]`: List of created Edge objects
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
workflow.add_edges_from_source(
|
||||
"DataCollector",
|
||||
["TechnicalAnalyst", "FundamentalAnalyst", "SentimentAnalyst"]
|
||||
)
|
||||
```
|
||||
|
||||
#### `add_edges_to_target(sources, target, **kwargs)`
|
||||
|
||||
Creates a fan-in pattern where multiple sources connect to one target.
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `sources` | `List[str]` | List of source node IDs |
|
||||
| `target` | `str` | Target node ID |
|
||||
| `**kwargs` | `Any` | Additional keyword arguments for all edges |
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `List[Edge]`: List of created Edge objects
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
workflow.add_edges_to_target(
|
||||
["TechnicalAnalyst", "FundamentalAnalyst", "SentimentAnalyst"],
|
||||
"SynthesisAgent"
|
||||
)
|
||||
```
|
||||
|
||||
#### `add_parallel_chain(sources, targets, **kwargs)`
|
||||
|
||||
Creates a full mesh connection between multiple sources and targets.
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `sources` | `List[str]` | List of source node IDs |
|
||||
| `targets` | `List[str]` | List of target node IDs |
|
||||
| `**kwargs` | `Any` | Additional keyword arguments for all edges |
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `List[Edge]`: List of created Edge objects
|
||||
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
workflow.add_parallel_chain(
|
||||
["DataCollector1", "DataCollector2"],
|
||||
["Analyst1", "Analyst2", "Analyst3"]
|
||||
)
|
||||
```
|
||||
|
||||
### Execution Methods
|
||||
|
||||
#### `run(task: str = None, img: Optional[str] = None, *args, **kwargs) -> Dict[str, Any]`
|
||||
|
||||
Executes the workflow with optimized parallel agent execution.
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `task` | `str` | Task to execute (uses self.task if not provided) |
|
||||
| `img` | `Optional[str]` | Image path for vision-enabled agents |
|
||||
| `*args` | `Any` | Additional positional arguments |
|
||||
| `**kwargs` | `Any` | Additional keyword arguments |
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `Dict[str, Any]`: Execution results from all nodes
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
results = workflow.run(
|
||||
task="Analyze market trends for cryptocurrency",
|
||||
max_loops=2
|
||||
)
|
||||
```
|
||||
|
||||
#### `arun(task: str = None, *args, **kwargs) -> Dict[str, Any]`
|
||||
|
||||
Async version of run for better performance with I/O bound operations.
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `task` | `str` | Task to execute |
|
||||
| `*args` | `Any` | Additional positional arguments |
|
||||
| `**kwargs` | `Any` | Additional keyword arguments |
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `Dict[str, Any]`: Execution results from all nodes
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
results = await workflow.arun("Process large dataset")
|
||||
```
|
||||
|
||||
### Compilation and Optimization
|
||||
|
||||
#### `compile()`
|
||||
|
||||
Pre-computes expensive operations for faster execution.
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
workflow.compile()
|
||||
status = workflow.get_compilation_status()
|
||||
print(f"Compiled: {status['is_compiled']}")
|
||||
```
|
||||
|
||||
#### `get_compilation_status() -> Dict[str, Any]`
|
||||
|
||||
Returns detailed compilation status information.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `Dict[str, Any]`: Compilation status including cache state and performance metrics
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
status = workflow.get_compilation_status()
|
||||
print(f"Layers: {status['cached_layers_count']}")
|
||||
print(f"Max workers: {status['max_workers']}")
|
||||
```
|
||||
|
||||
### Visualization Methods
|
||||
|
||||
#### `visualize(format: str = "png", view: bool = True, engine: str = "dot", show_summary: bool = False) -> str`
|
||||
|
||||
Generates a visual representation of the workflow using Graphviz.
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
|-----------|------|-------------|---------|
|
||||
| `format` | `str` | Output format ('png', 'svg', 'pdf', 'dot') | `"png"` |
|
||||
| `view` | `bool` | Whether to open the visualization | `True` |
|
||||
| `engine` | `str` | Graphviz layout engine | `"dot"` |
|
||||
| `show_summary` | `bool` | Whether to print parallel processing summary | `False` |
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `str`: Path to the generated visualization file
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
output_file = workflow.visualize(
|
||||
format="svg",
|
||||
show_summary=True
|
||||
)
|
||||
print(f"Visualization saved to: {output_file}")
|
||||
```
|
||||
|
||||
#### `visualize_simple() -> str`
|
||||
|
||||
Generates a simple text-based visualization.
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `str`: Text representation of the workflow
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
text_viz = workflow.visualize_simple()
|
||||
print(text_viz)
|
||||
```
|
||||
|
||||
### Serialization Methods
|
||||
|
||||
#### `to_json(fast: bool = True, include_conversation: bool = False, include_runtime_state: bool = False) -> str`
|
||||
|
||||
Serializes the workflow to JSON format.
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
|-----------|------|-------------|---------|
|
||||
| `fast` | `bool` | Whether to use fast JSON serialization | `True` |
|
||||
| `include_conversation` | `bool` | Whether to include conversation history | `False` |
|
||||
| `include_runtime_state` | `bool` | Whether to include runtime state | `False` |
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `str`: JSON representation of the workflow
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
json_data = workflow.to_json(
|
||||
include_conversation=True,
|
||||
include_runtime_state=True
|
||||
)
|
||||
```
|
||||
|
||||
#### `from_json(json_str: str, restore_runtime_state: bool = False) -> GraphWorkflow`
|
||||
|
||||
Deserializes a workflow from JSON format.
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
|-----------|------|-------------|---------|
|
||||
| `json_str` | `str` | JSON string representation | Required |
|
||||
| `restore_runtime_state` | `bool` | Whether to restore runtime state | `False` |
|
||||
|
||||
**Returns:**
|
||||
|
||||
- `GraphWorkflow`: A new GraphWorkflow instance
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
workflow = GraphWorkflow.from_json(json_data, restore_runtime_state=True)
|
||||
```
|
||||
|
||||
#### `save_to_file(filepath: str, include_conversation: bool = False, include_runtime_state: bool = False, overwrite: bool = False) -> str`
|
||||
|
||||
Saves the workflow to a JSON file.
|
||||
|
||||
Sets the end points of the workflow graph.
|
||||
| Parameter | Type | Description | Default |
|
||||
|-----------|------|-------------|---------|
|
||||
| `filepath` | `str` | Path to save the JSON file | Required |
|
||||
| `include_conversation` | `bool` | Whether to include conversation history | `False` |
|
||||
| `include_runtime_state` | `bool` | Whether to include runtime state | `False` |
|
||||
| `overwrite` | `bool` | Whether to overwrite existing files | `False` |
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|--------------|-----------|-------------------------------------------|
|
||||
| `end_points` | `List[str]` | A list of node IDs to be set as end points. |
|
||||
**Returns:**
|
||||
|
||||
Raises:
|
||||
- `ValueError`: If any of the specified node IDs do not exist in the graph.
|
||||
- `str`: Path to the saved file
|
||||
|
||||
#### `visualize() -> str`
|
||||
**Example:**
|
||||
|
||||
Generates a string representation of the workflow graph in the Mermaid syntax.
|
||||
```python
|
||||
filepath = workflow.save_to_file(
|
||||
"my_workflow.json",
|
||||
include_conversation=True
|
||||
)
|
||||
```
|
||||
|
||||
#### `load_from_file(filepath: str, restore_runtime_state: bool = False) -> GraphWorkflow`
|
||||
|
||||
Loads a workflow from a JSON file.
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
|-----------|------|-------------|---------|
|
||||
| `filepath` | `str` | Path to the JSON file | Required |
|
||||
| `restore_runtime_state` | `bool` | Whether to restore runtime state | `False` |
|
||||
|
||||
Returns:
|
||||
- `str`: The Mermaid string representation of the workflow graph.
|
||||
**Returns:**
|
||||
|
||||
#### `run(task: str = None, *args, **kwargs) -> Dict[str, Any]`
|
||||
- `GraphWorkflow`: Loaded workflow instance
|
||||
|
||||
Function to run the workflow graph.
|
||||
**Example:**
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|-------|----------------------------------|
|
||||
| `task` | `str` | The task to be executed by the workflow. |
|
||||
| `*args` | | Variable length argument list. |
|
||||
| `**kwargs`| | Arbitrary keyword arguments. |
|
||||
```python
|
||||
workflow = GraphWorkflow.load_from_file("my_workflow.json")
|
||||
```
|
||||
|
||||
### Utility Methods
|
||||
|
||||
Returns:
|
||||
- `Dict[str, Any]`: A dictionary containing the results of the execution.
|
||||
#### `export_summary() -> Dict[str, Any]`
|
||||
|
||||
Raises:
|
||||
- `ValueError`: If no entry points or end points are defined in the graph.
|
||||
Generates a human-readable summary of the workflow.
|
||||
|
||||
## Functionality and Usage
|
||||
**Returns:**
|
||||
|
||||
### Adding Nodes
|
||||
- `Dict[str, Any]`: Comprehensive workflow summary
|
||||
|
||||
The `add_node` method is used to add nodes to the graph. Each node must have a unique ID. If a node with the same ID already exists, a `ValueError` is raised.
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
wf_graph = GraphWorkflow()
|
||||
node1 = Node(id="node1", type=NodeType.TASK, callable=sample_task)
|
||||
wf_graph.add_node(node1)
|
||||
summary = workflow.export_summary()
|
||||
print(f"Workflow has {summary['structure']['nodes']} nodes")
|
||||
print(f"Compilation status: {summary['compilation_status']['is_compiled']}")
|
||||
```
|
||||
|
||||
### Adding Edges
|
||||
#### `set_entry_points(entry_points: List[str])`
|
||||
|
||||
Sets the entry points for the workflow.
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `entry_points` | `List[str]` | List of node IDs to serve as entry points |
|
||||
|
||||
The `add_edge` method connects nodes with edges. Both the source and target nodes of the edge must already exist in the graph, otherwise a `ValueError` is raised.
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
edge1 = Edge(source="node1", target="node2")
|
||||
wf_graph.add_edge(edge1)
|
||||
workflow.set_entry_points(["DataCollector", "ResearchAgent"])
|
||||
```
|
||||
|
||||
### Setting Entry and End Points
|
||||
#### `set_end_points(end_points: List[str])`
|
||||
|
||||
Sets the end points for the workflow.
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|-----------|------|-------------|
|
||||
| `end_points` | `List[str]` | List of node IDs to serve as end points |
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
workflow.set_end_points(["SynthesisAgent", "ReportGenerator"])
|
||||
```
|
||||
|
||||
### Class Methods
|
||||
|
||||
#### `from_spec(agents, edges, entry_points=None, end_points=None, task=None, **kwargs) -> GraphWorkflow`
|
||||
|
||||
Constructs a workflow from a list of agents and connections.
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
|-----------|------|-------------|---------|
|
||||
| `agents` | `List` | List of agents or Node objects | Required |
|
||||
| `edges` | `List` | List of edges or edge tuples | Required |
|
||||
| `entry_points` | `List[str]` | List of entry point node IDs | `None` |
|
||||
| `end_points` | `List[str]` | List of end point node IDs | `None` |
|
||||
| `task` | `str` | Task to be executed by the workflow | `None` |
|
||||
| `**kwargs` | `Any` | Additional keyword arguments | `{}` |
|
||||
|
||||
The `set_entry_points` and `set_end_points` methods define which nodes are the starting and ending points of the workflow, respectively. If any specified node IDs do not exist, a `ValueError` is raised.
|
||||
**Returns:**
|
||||
|
||||
- `GraphWorkflow`: A new GraphWorkflow instance
|
||||
|
||||
**Example:**
|
||||
|
||||
```python
|
||||
wf_graph.set_entry_points(["node1"])
|
||||
wf_graph.set_end_points(["node2"])
|
||||
workflow = GraphWorkflow.from_spec(
|
||||
agents=[agent1, agent2, agent3],
|
||||
edges=[
|
||||
("agent1", "agent2"),
|
||||
("agent2", "agent3"),
|
||||
("agent1", ["agent2", "agent3"]) # Fan-out
|
||||
],
|
||||
task="Analyze market data"
|
||||
)
|
||||
```
|
||||
|
||||
### Visualizing the Graph
|
||||
## Examples
|
||||
|
||||
The `visualize` method generates a Mermaid string representation of the workflow graph. This can be useful for visualizing the workflow structure.
|
||||
### Basic Sequential Workflow
|
||||
|
||||
```python
|
||||
print(wf_graph.visualize())
|
||||
from swarms import Agent, GraphWorkflow
|
||||
from swarms.prompts.multi_agent_collab_prompt import MULTI_AGENT_COLLAB_PROMPT_TWO
|
||||
|
||||
# Create agents
|
||||
research_agent = Agent(
|
||||
agent_name="ResearchAgent",
|
||||
model_name="gpt-4",
|
||||
system_prompt=MULTI_AGENT_COLLAB_PROMPT_TWO,
|
||||
max_loops=1
|
||||
)
|
||||
|
||||
analysis_agent = Agent(
|
||||
agent_name="AnalysisAgent",
|
||||
model_name="gpt-4",
|
||||
system_prompt=MULTI_AGENT_COLLAB_PROMPT_TWO,
|
||||
max_loops=1
|
||||
)
|
||||
|
||||
# Build workflow
|
||||
workflow = GraphWorkflow(name="Research-Analysis-Workflow")
|
||||
workflow.add_node(research_agent)
|
||||
workflow.add_node(analysis_agent)
|
||||
workflow.add_edge("ResearchAgent", "AnalysisAgent")
|
||||
|
||||
# Execute
|
||||
results = workflow.run("What are the latest trends in AI?")
|
||||
print(results)
|
||||
```
|
||||
|
||||
### Running the Workflow
|
||||
### Parallel Processing Workflow
|
||||
|
||||
```python
|
||||
from swarms import Agent, GraphWorkflow
|
||||
|
||||
# Create specialized agents
|
||||
data_collector = Agent(agent_name="DataCollector", model_name="gpt-4")
|
||||
technical_analyst = Agent(agent_name="TechnicalAnalyst", model_name="gpt-4")
|
||||
fundamental_analyst = Agent(agent_name="FundamentalAnalyst", model_name="gpt-4")
|
||||
sentiment_analyst = Agent(agent_name="SentimentAnalyst", model_name="gpt-4")
|
||||
synthesis_agent = Agent(agent_name="SynthesisAgent", model_name="gpt-4")
|
||||
|
||||
# Build parallel workflow
|
||||
workflow = GraphWorkflow(name="Market-Analysis-Workflow")
|
||||
|
||||
# Add all agents
|
||||
for agent in [data_collector, technical_analyst, fundamental_analyst,
|
||||
sentiment_analyst, synthesis_agent]:
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Create fan-out pattern: data collector feeds all analysts
|
||||
workflow.add_edges_from_source(
|
||||
"DataCollector",
|
||||
["TechnicalAnalyst", "FundamentalAnalyst", "SentimentAnalyst"]
|
||||
)
|
||||
|
||||
The `run` method executes the workflow. It performs a topological sort of the graph to ensure nodes are executed in the correct order. The results of each node's execution are returned in a dictionary.
|
||||
# Create fan-in pattern: all analysts feed synthesis agent
|
||||
workflow.add_edges_to_target(
|
||||
["TechnicalAnalyst", "FundamentalAnalyst", "SentimentAnalyst"],
|
||||
"SynthesisAgent"
|
||||
)
|
||||
|
||||
# Execute
|
||||
results = workflow.run("Analyze Bitcoin market trends")
|
||||
print(results)
|
||||
```
|
||||
|
||||
### Complex Multi-Layer Workflow
|
||||
|
||||
```python
|
||||
results = wf_graph.run()
|
||||
print("Execution results:", results)
|
||||
from swarms import Agent, GraphWorkflow
|
||||
|
||||
# Create agents for different stages
|
||||
data_collectors = [
|
||||
Agent(agent_name=f"DataCollector{i}", model_name="gpt-4")
|
||||
for i in range(1, 4)
|
||||
]
|
||||
|
||||
analysts = [
|
||||
Agent(agent_name=f"Analyst{i}", model_name="gpt-4")
|
||||
for i in range(1, 4)
|
||||
]
|
||||
|
||||
validators = [
|
||||
Agent(agent_name=f"Validator{i}", model_name="gpt-4")
|
||||
for i in range(1, 3)
|
||||
]
|
||||
|
||||
synthesis_agent = Agent(agent_name="SynthesisAgent", model_name="gpt-4")
|
||||
|
||||
# Build complex workflow
|
||||
workflow = GraphWorkflow(name="Complex-Research-Workflow")
|
||||
|
||||
# Add all agents
|
||||
all_agents = data_collectors + analysts + validators + [synthesis_agent]
|
||||
for agent in all_agents:
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Layer 1: Data collectors feed all analysts in parallel
|
||||
workflow.add_parallel_chain(
|
||||
[agent.agent_name for agent in data_collectors],
|
||||
[agent.agent_name for agent in analysts]
|
||||
)
|
||||
|
||||
# Layer 2: Analysts feed validators
|
||||
workflow.add_parallel_chain(
|
||||
[agent.agent_name for agent in analysts],
|
||||
[agent.agent_name for agent in validators]
|
||||
)
|
||||
|
||||
# Layer 3: Validators feed synthesis agent
|
||||
workflow.add_edges_to_target(
|
||||
[agent.agent_name for agent in validators],
|
||||
"SynthesisAgent"
|
||||
)
|
||||
|
||||
# Visualize and execute
|
||||
workflow.visualize(show_summary=True)
|
||||
results = workflow.run("Comprehensive analysis of renewable energy markets")
|
||||
```
|
||||
|
||||
## Example Usage
|
||||
### Workflow with Custom Metadata
|
||||
|
||||
```python
|
||||
from swarms import Agent, GraphWorkflow, Edge
|
||||
|
||||
# Create agents with specific roles
|
||||
research_agent = Agent(agent_name="ResearchAgent", model_name="gpt-4")
|
||||
analysis_agent = Agent(agent_name="AnalysisAgent", model_name="gpt-4")
|
||||
|
||||
# Build workflow with metadata
|
||||
workflow = GraphWorkflow(
|
||||
name="Metadata-Workflow",
|
||||
description="Workflow demonstrating metadata usage"
|
||||
)
|
||||
|
||||
workflow.add_node(research_agent, metadata={"priority": "high", "timeout": 300})
|
||||
workflow.add_node(analysis_agent, metadata={"priority": "medium", "timeout": 600})
|
||||
|
||||
# Add edge with metadata
|
||||
edge = Edge(
|
||||
source="ResearchAgent",
|
||||
target="AnalysisAgent",
|
||||
metadata={"data_type": "research_findings", "priority": "high"}
|
||||
)
|
||||
workflow.add_edge(edge)
|
||||
|
||||
# Execute with custom parameters
|
||||
results = workflow.run(
|
||||
"Analyze the impact of climate change on agriculture",
|
||||
max_loops=2
|
||||
)
|
||||
```
|
||||
|
||||
Below is a comprehensive example demonstrating the creation and execution of a workflow graph:
|
||||
### Workflow Serialization and Persistence
|
||||
|
||||
```python
|
||||
from swarms import Agent, Edge, GraphWorkflow, Node, NodeType
|
||||
from swarms import Agent, GraphWorkflow
|
||||
|
||||
# Create workflow
|
||||
research_agent = Agent(agent_name="ResearchAgent", model_name="gpt-4")
|
||||
analysis_agent = Agent(agent_name="AnalysisAgent", model_name="gpt-4")
|
||||
|
||||
workflow = GraphWorkflow(name="Persistent-Workflow")
|
||||
workflow.add_node(research_agent)
|
||||
workflow.add_node(analysis_agent)
|
||||
workflow.add_edge("ResearchAgent", "AnalysisAgent")
|
||||
|
||||
# Execute and get conversation
|
||||
results = workflow.run("Research quantum computing applications")
|
||||
|
||||
# Initialize two agents with GPT-4o-mini model and desired parameters
|
||||
agent1 = Agent(
|
||||
model_name="gpt-4o-mini",
|
||||
temperature=0.5,
|
||||
max_tokens=4000,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=True,
|
||||
# Save workflow with conversation history
|
||||
filepath = workflow.save_to_file(
|
||||
"quantum_research_workflow.json",
|
||||
include_conversation=True,
|
||||
include_runtime_state=True
|
||||
)
|
||||
agent2 = Agent(
|
||||
model_name="gpt-4o-mini",
|
||||
temperature=0.5,
|
||||
max_tokens=4000,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=True,
|
||||
|
||||
# Load workflow later
|
||||
loaded_workflow = GraphWorkflow.load_from_file(
|
||||
filepath,
|
||||
restore_runtime_state=True
|
||||
)
|
||||
|
||||
def sample_task():
|
||||
print("Running sample task")
|
||||
return "Task completed"
|
||||
# Continue execution
|
||||
new_results = loaded_workflow.run("Continue with quantum cryptography analysis")
|
||||
```
|
||||
|
||||
### Advanced Pattern Detection
|
||||
|
||||
# Build workflow graph
|
||||
wf_graph = GraphWorkflow()
|
||||
wf_graph.add_node(Node(id="agent1", type=NodeType.AGENT, agent=agent1))
|
||||
wf_graph.add_node(Node(id="agent2", type=NodeType.AGENT, agent=agent2))
|
||||
wf_graph.add_node(Node(id="task1", type=NodeType.TASK, callable=sample_task))
|
||||
```python
|
||||
from swarms import Agent, GraphWorkflow
|
||||
|
||||
# Create a complex workflow with multiple patterns
|
||||
workflow = GraphWorkflow(name="Pattern-Detection-Workflow", verbose=True)
|
||||
|
||||
# Create agents
|
||||
agents = {
|
||||
"collector": Agent(agent_name="DataCollector", model_name="gpt-4"),
|
||||
"tech_analyst": Agent(agent_name="TechnicalAnalyst", model_name="gpt-4"),
|
||||
"fund_analyst": Agent(agent_name="FundamentalAnalyst", model_name="gpt-4"),
|
||||
"sentiment_analyst": Agent(agent_name="SentimentAnalyst", model_name="gpt-4"),
|
||||
"risk_analyst": Agent(agent_name="RiskAnalyst", model_name="gpt-4"),
|
||||
"synthesis": Agent(agent_name="SynthesisAgent", model_name="gpt-4"),
|
||||
"validator": Agent(agent_name="Validator", model_name="gpt-4")
|
||||
}
|
||||
|
||||
# Add all agents
|
||||
for agent in agents.values():
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Create complex patterns
|
||||
# Fan-out from collector
|
||||
workflow.add_edges_from_source(
|
||||
"DataCollector",
|
||||
["TechnicalAnalyst", "FundamentalAnalyst", "SentimentAnalyst", "RiskAnalyst"]
|
||||
)
|
||||
|
||||
wf_graph.add_edge(Edge(source="agent1", target="task1"))
|
||||
wf_graph.add_edge(Edge(source="agent2", target="task1"))
|
||||
# Fan-in to synthesis
|
||||
workflow.add_edges_to_target(
|
||||
["TechnicalAnalyst", "FundamentalAnalyst", "SentimentAnalyst", "RiskAnalyst"],
|
||||
"SynthesisAgent"
|
||||
)
|
||||
|
||||
wf_graph.set_entry_points(["agent1", "agent2"])
|
||||
wf_graph.set_end_points(["task1"])
|
||||
# Final validation step
|
||||
workflow.add_edge("SynthesisAgent", "Validator")
|
||||
|
||||
# Visualize and run
|
||||
print(wf_graph.visualize())
|
||||
results = wf_graph.run()
|
||||
print("Execution results:", results)
|
||||
# Compile and get status
|
||||
workflow.compile()
|
||||
status = workflow.get_compilation_status()
|
||||
|
||||
print(f"Compilation status: {status}")
|
||||
print(f"Layers: {status['cached_layers_count']}")
|
||||
print(f"Max workers: {status['max_workers']}")
|
||||
|
||||
# Visualize with pattern detection
|
||||
workflow.visualize(show_summary=True, format="png")
|
||||
```
|
||||
|
||||
In this example, we set up a workflow graph with two agents and one task. We define the entry and end points, visualize the graph, and then execute the workflow, capturing and printing the results.
|
||||
### Error Handling and Recovery
|
||||
|
||||
## Additional Information and Tips
|
||||
```python
|
||||
from swarms import Agent, GraphWorkflow
|
||||
import logging
|
||||
|
||||
- **Error Handling**: The `GraphWorkflow` class includes error handling to ensure that invalid operations (such as adding duplicate nodes or edges with non-existent nodes) raise appropriate exceptions.
|
||||
- **Max Loops**: The `max_loops` attribute allows the workflow to loop through the graph multiple times if needed. This can be useful for iterative tasks.
|
||||
- **Topological Sort**: The workflow execution relies on a topological sort to ensure that nodes are processed in the correct order. This is particularly important in complex workflows with dependencies.
|
||||
# Set up logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
## References and Resources
|
||||
# Create workflow with error handling
|
||||
workflow = GraphWorkflow(
|
||||
name="Error-Handling-Workflow",
|
||||
verbose=True,
|
||||
max_loops=1
|
||||
)
|
||||
|
||||
# Create agents
|
||||
try:
|
||||
research_agent = Agent(agent_name="ResearchAgent", model_name="gpt-4")
|
||||
analysis_agent = Agent(agent_name="AnalysisAgent", model_name="gpt-4")
|
||||
|
||||
workflow.add_node(research_agent)
|
||||
workflow.add_node(analysis_agent)
|
||||
workflow.add_edge("ResearchAgent", "AnalysisAgent")
|
||||
|
||||
# Execute with error handling
|
||||
try:
|
||||
results = workflow.run("Analyze market trends")
|
||||
print("Workflow completed successfully")
|
||||
print(results)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Workflow execution failed: {e}")
|
||||
|
||||
# Get workflow summary for debugging
|
||||
summary = workflow.export_summary()
|
||||
print(f"Workflow state: {summary['structure']}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Workflow setup failed: {e}")
|
||||
```
|
||||
|
||||
- [NetworkX Documentation](https://networkx.github.io/documentation/stable/)
|
||||
- [Pydantic Documentation](https://pydantic-docs.helpmanual.io/)
|
||||
- [Mermaid Documentation](https://mermaid-js.github.io/mermaid/#/)
|
||||
## Conclusion
|
||||
|
||||
The `GraphWorkflow` class provides a powerful and flexible framework for orchestrating complex multi-agent workflows. Its key benefits include:
|
||||
|
||||
### Benefits
|
||||
|
||||
| Benefit | Description |
|
||||
|-----------------|--------------------------------------------------------------------------------------------------|
|
||||
| **Scalability** | Supports workflows with hundreds of agents through efficient parallel execution |
|
||||
| **Flexibility** | Multiple connection patterns (sequential, fan-out, fan-in, parallel chains) |
|
||||
| **Performance** | Automatic compilation and optimization for faster execution |
|
||||
| **Visualization** | Rich visual representations for workflow understanding and debugging |
|
||||
| **Persistence** | Complete serialization and deserialization capabilities |
|
||||
| **Error Handling** | Comprehensive error handling and recovery mechanisms |
|
||||
| **Monitoring** | Detailed logging and status reporting |
|
||||
|
||||
### Use Cases
|
||||
|
||||
| Use Case | Description |
|
||||
|-------------------------|--------------------------------------------------------------------|
|
||||
| **Research Workflows** | Multi-stage research with data collection, analysis, and synthesis |
|
||||
| **Content Generation** | Parallel content creation with validation and refinement |
|
||||
| **Data Processing** | Complex ETL pipelines with multiple processing stages |
|
||||
| **Decision Making** | Multi-agent decision systems with voting and consensus |
|
||||
| **Quality Assurance** | Multi-stage validation and verification processes |
|
||||
| **Automated Testing** | Complex test orchestration with parallel execution |
|
||||
|
||||
### Best Practices
|
||||
|
||||
| Best Practice | Description |
|
||||
|---------------------------------------|------------------------------------------------------------------|
|
||||
| **Use meaningful agent names** | Helps with debugging and visualization |
|
||||
| **Leverage parallel patterns** | Use fan-out and fan-in for better performance |
|
||||
| **Compile workflows** | Always compile before execution for optimal performance |
|
||||
| **Monitor execution** | Use verbose mode and status reporting for debugging |
|
||||
| **Save important workflows** | Use serialization for workflow persistence |
|
||||
| **Handle errors gracefully** | Implement proper error handling and recovery |
|
||||
| **Visualize complex workflows** | Use visualization to understand and debug workflows |
|
||||
|
||||
The GraphWorkflow system represents a significant advancement in multi-agent orchestration, providing the tools needed to build complex, scalable, and maintainable AI workflows.
|
@ -0,0 +1,54 @@
|
||||
from swarms import Agent, CronJob
|
||||
from loguru import logger
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Quantitative-Trading-Agent",
|
||||
agent_description="Advanced quantitative trading and algorithmic analysis agent",
|
||||
system_prompt="""You are an expert quantitative trading agent with deep expertise in:
|
||||
- Algorithmic trading strategies and implementation
|
||||
- Statistical arbitrage and market making
|
||||
- Risk management and portfolio optimization
|
||||
- High-frequency trading systems
|
||||
- Market microstructure analysis
|
||||
- Quantitative research methodologies
|
||||
- Financial mathematics and stochastic processes
|
||||
- Machine learning applications in trading
|
||||
|
||||
Your core responsibilities include:
|
||||
1. Developing and backtesting trading strategies
|
||||
2. Analyzing market data and identifying alpha opportunities
|
||||
3. Implementing risk management frameworks
|
||||
4. Optimizing portfolio allocations
|
||||
5. Conducting quantitative research
|
||||
6. Monitoring market microstructure
|
||||
7. Evaluating trading system performance
|
||||
|
||||
You maintain strict adherence to:
|
||||
- Mathematical rigor in all analyses
|
||||
- Statistical significance in strategy development
|
||||
- Risk-adjusted return optimization
|
||||
- Market impact minimization
|
||||
- Regulatory compliance
|
||||
- Transaction cost analysis
|
||||
- Performance attribution
|
||||
|
||||
You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
|
||||
max_loops=1,
|
||||
model_name="gpt-4.1",
|
||||
dynamic_temperature_enabled=True,
|
||||
output_type="str-all-except-first",
|
||||
streaming_on=True,
|
||||
print_on=True,
|
||||
telemetry_enable=False,
|
||||
)
|
||||
|
||||
# Example 1: Basic usage with just a task
|
||||
logger.info("Starting example cron job")
|
||||
cron_job = CronJob(agent=agent, interval="10seconds")
|
||||
cron_job.run(
|
||||
task="What are the best top 3 etfs for gold coverage?"
|
||||
)
|
After Width: | Height: | Size: 11 KiB |
@ -0,0 +1,351 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
|
||||
def create_complex_investment_analysis_workflow():
|
||||
"""
|
||||
Creates a sophisticated investment analysis workflow with multiple specialized agents
|
||||
working in parallel and series to provide comprehensive market analysis.
|
||||
|
||||
Workflow Structure:
|
||||
1. Data Gathering Agent (Entry Point)
|
||||
2. Three Parallel Research Agents:
|
||||
- Fundamental Analysis Agent
|
||||
- Technical Analysis Agent
|
||||
- Sentiment Analysis Agent
|
||||
3. Risk Assessment Agent (runs in parallel with research agents)
|
||||
4. Market Context Agent (analyzes broader market conditions)
|
||||
5. Synthesis Agent (combines all research outputs)
|
||||
6. Final Recommendation Agent (End Point)
|
||||
|
||||
Returns:
|
||||
GraphWorkflow: Configured workflow ready for execution
|
||||
"""
|
||||
|
||||
# Create specialized agents with detailed system prompts
|
||||
data_gathering_agent = Agent(
|
||||
agent_name="DataGatheringAgent",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a financial data gathering specialist. Your role is to:
|
||||
1. Identify and collect relevant financial data for the given investment target
|
||||
2. Gather recent news, earnings reports, and market data
|
||||
3. Compile key financial metrics and ratios
|
||||
4. Provide a comprehensive data foundation for other analysts
|
||||
5. Structure your output clearly for downstream analysis
|
||||
|
||||
Focus on accuracy, recency, and relevance of data. Always cite sources when possible.""",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
fundamental_analysis_agent = Agent(
|
||||
agent_name="FundamentalAnalysisAgent",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a fundamental analysis expert. Your role is to:
|
||||
1. Analyze company financials, business model, and competitive position
|
||||
2. Evaluate management quality and corporate governance
|
||||
3. Assess industry trends and market position
|
||||
4. Calculate intrinsic value using various valuation methods
|
||||
5. Identify fundamental strengths and weaknesses
|
||||
|
||||
Base your analysis on solid financial principles and provide quantitative backing for your conclusions.""",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
technical_analysis_agent = Agent(
|
||||
agent_name="TechnicalAnalysisAgent",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a technical analysis specialist. Your role is to:
|
||||
1. Analyze price charts, trends, and trading patterns
|
||||
2. Identify support and resistance levels
|
||||
3. Evaluate momentum indicators and trading signals
|
||||
4. Assess volume patterns and market sentiment
|
||||
5. Provide entry/exit timing recommendations
|
||||
|
||||
Use established technical analysis principles and explain your reasoning clearly.""",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
sentiment_analysis_agent = Agent(
|
||||
agent_name="SentimentAnalysisAgent",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a market sentiment analysis expert. Your role is to:
|
||||
1. Analyze social media sentiment and retail investor behavior
|
||||
2. Evaluate institutional investor positioning and flows
|
||||
3. Assess news sentiment and media coverage
|
||||
4. Monitor options flow and derivatives positioning
|
||||
5. Gauge overall market psychology and positioning
|
||||
|
||||
Provide insights into market sentiment trends and their potential impact.""",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
risk_assessment_agent = Agent(
|
||||
agent_name="RiskAssessmentAgent",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a risk management specialist. Your role is to:
|
||||
1. Identify and quantify various risk factors (market, credit, liquidity, operational)
|
||||
2. Analyze historical volatility and correlation patterns
|
||||
3. Assess downside scenarios and tail risks
|
||||
4. Evaluate portfolio impact and position sizing considerations
|
||||
5. Recommend risk mitigation strategies
|
||||
|
||||
Provide comprehensive risk analysis with quantitative metrics where possible.""",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
market_context_agent = Agent(
|
||||
agent_name="MarketContextAgent",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a macro market analysis expert. Your role is to:
|
||||
1. Analyze broader market conditions and economic environment
|
||||
2. Evaluate sector rotation and style preferences
|
||||
3. Assess correlation with market indices and sector peers
|
||||
4. Consider geopolitical and regulatory factors
|
||||
5. Provide market timing and allocation context
|
||||
|
||||
Focus on how broader market conditions might impact the specific investment.""",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
synthesis_agent = Agent(
|
||||
agent_name="SynthesisAgent",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt="""You are an investment analysis synthesizer. Your role is to:
|
||||
1. Integrate findings from fundamental, technical, and sentiment analysis
|
||||
2. Reconcile conflicting viewpoints and identify consensus areas
|
||||
3. Weight different analysis components based on current market conditions
|
||||
4. Identify the most compelling investment thesis
|
||||
5. Highlight key risks and opportunities
|
||||
|
||||
Provide a balanced synthesis that considers all analytical perspectives.""",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
recommendation_agent = Agent(
|
||||
agent_name="FinalRecommendationAgent",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt="""You are the final investment decision maker. Your role is to:
|
||||
1. Review all analysis and synthesis from the team
|
||||
2. Make a clear investment recommendation (BUY/HOLD/SELL)
|
||||
3. Provide specific entry/exit criteria and price targets
|
||||
4. Recommend position sizing and risk management approach
|
||||
5. Outline monitoring criteria and review timeline
|
||||
|
||||
Provide actionable investment guidance with clear rationale and risk considerations.""",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create the workflow
|
||||
workflow = GraphWorkflow(
|
||||
name="ComplexInvestmentAnalysisWorkflow",
|
||||
description="A comprehensive multi-agent investment analysis system with parallel processing and sophisticated agent collaboration",
|
||||
verbose=True,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
# Add all agents as nodes
|
||||
agents = [
|
||||
data_gathering_agent,
|
||||
fundamental_analysis_agent,
|
||||
technical_analysis_agent,
|
||||
sentiment_analysis_agent,
|
||||
risk_assessment_agent,
|
||||
market_context_agent,
|
||||
synthesis_agent,
|
||||
recommendation_agent,
|
||||
]
|
||||
|
||||
for agent in agents:
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Define complex edge relationships
|
||||
# Stage 1: Data gathering feeds into all analysis agents
|
||||
workflow.add_edge(
|
||||
"DataGatheringAgent", "FundamentalAnalysisAgent"
|
||||
)
|
||||
workflow.add_edge("DataGatheringAgent", "TechnicalAnalysisAgent")
|
||||
workflow.add_edge("DataGatheringAgent", "SentimentAnalysisAgent")
|
||||
workflow.add_edge("DataGatheringAgent", "RiskAssessmentAgent")
|
||||
workflow.add_edge("DataGatheringAgent", "MarketContextAgent")
|
||||
|
||||
# Stage 2: All analysis agents feed into synthesis
|
||||
workflow.add_edge("FundamentalAnalysisAgent", "SynthesisAgent")
|
||||
workflow.add_edge("TechnicalAnalysisAgent", "SynthesisAgent")
|
||||
workflow.add_edge("SentimentAnalysisAgent", "SynthesisAgent")
|
||||
|
||||
# Stage 3: Synthesis and risk/context feed into final recommendation
|
||||
workflow.add_edge("SynthesisAgent", "FinalRecommendationAgent")
|
||||
workflow.add_edge(
|
||||
"RiskAssessmentAgent", "FinalRecommendationAgent"
|
||||
)
|
||||
workflow.add_edge(
|
||||
"MarketContextAgent", "FinalRecommendationAgent"
|
||||
)
|
||||
|
||||
# Set explicit entry and end points
|
||||
workflow.set_entry_points(["DataGatheringAgent"])
|
||||
workflow.set_end_points(["FinalRecommendationAgent"])
|
||||
|
||||
return workflow
|
||||
|
||||
|
||||
# def create_parallel_research_workflow():
|
||||
# """
|
||||
# Creates a parallel research workflow demonstrating multiple entry points
|
||||
# and complex convergence patterns.
|
||||
|
||||
# Returns:
|
||||
# GraphWorkflow: Configured parallel research workflow
|
||||
# """
|
||||
|
||||
# # Create research agents for different domains
|
||||
# academic_researcher = Agent(
|
||||
# agent_name="AcademicResearcher",
|
||||
# model_name="gpt-4.1",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are an academic researcher specializing in peer-reviewed literature analysis. Focus on scientific papers, studies, and academic sources.",
|
||||
# verbose=True,
|
||||
# )
|
||||
|
||||
# industry_analyst = Agent(
|
||||
# agent_name="IndustryAnalyst",
|
||||
# model_name="gpt-4.1",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are an industry analyst focusing on market reports, industry trends, and commercial applications.",
|
||||
# verbose=True,
|
||||
# )
|
||||
|
||||
# news_researcher = Agent(
|
||||
# agent_name="NewsResearcher",
|
||||
# model_name="gpt-4.1",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are a news researcher specializing in current events, breaking news, and recent developments.",
|
||||
# verbose=True,
|
||||
# )
|
||||
|
||||
# data_scientist = Agent(
|
||||
# agent_name="DataScientist",
|
||||
# model_name="gpt-4.1",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are a data scientist focusing on quantitative analysis, statistical patterns, and data-driven insights.",
|
||||
# verbose=True,
|
||||
# )
|
||||
|
||||
# synthesizer = Agent(
|
||||
# agent_name="ResearchSynthesizer",
|
||||
# model_name="gpt-4.1",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are a research synthesizer who combines insights from multiple research domains into coherent conclusions.",
|
||||
# verbose=True,
|
||||
# )
|
||||
|
||||
# quality_checker = Agent(
|
||||
# agent_name="QualityChecker",
|
||||
# model_name="gpt-4.1",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are a quality assurance specialist who validates research findings and identifies potential gaps or biases.",
|
||||
# verbose=True,
|
||||
# )
|
||||
|
||||
# # Create workflow with multiple entry points
|
||||
# workflow = GraphWorkflow(
|
||||
# name="ParallelResearchWorkflow",
|
||||
# description="A parallel research workflow with multiple independent entry points converging to synthesis",
|
||||
# verbose=True,
|
||||
# )
|
||||
|
||||
# # Add all agents
|
||||
# for agent in [
|
||||
# academic_researcher,
|
||||
# industry_analyst,
|
||||
# news_researcher,
|
||||
# data_scientist,
|
||||
# synthesizer,
|
||||
# quality_checker,
|
||||
# ]:
|
||||
# workflow.add_node(agent)
|
||||
|
||||
# # Create convergence pattern - all researchers feed into synthesizer
|
||||
# workflow.add_edge("AcademicResearcher", "ResearchSynthesizer")
|
||||
# workflow.add_edge("IndustryAnalyst", "ResearchSynthesizer")
|
||||
# workflow.add_edge("NewsResearcher", "ResearchSynthesizer")
|
||||
# workflow.add_edge("DataScientist", "ResearchSynthesizer")
|
||||
|
||||
# # Synthesizer feeds into quality checker
|
||||
# workflow.add_edge("ResearchSynthesizer", "QualityChecker")
|
||||
|
||||
# # Set multiple entry points (parallel execution)
|
||||
# workflow.set_entry_points(
|
||||
# [
|
||||
# "AcademicResearcher",
|
||||
# "IndustryAnalyst",
|
||||
# "NewsResearcher",
|
||||
# "DataScientist",
|
||||
# ]
|
||||
# )
|
||||
# workflow.set_end_points(["QualityChecker"])
|
||||
|
||||
# return workflow
|
||||
|
||||
|
||||
# def demonstrate_complex_workflows():
|
||||
# """
|
||||
# Demonstrates both complex workflow examples with different tasks.
|
||||
# """
|
||||
# investment_workflow = (
|
||||
# create_complex_investment_analysis_workflow()
|
||||
# )
|
||||
|
||||
# # Visualize the workflow structure
|
||||
# investment_workflow.visualize()
|
||||
|
||||
# # Run the investment analysis
|
||||
# investment_task = """
|
||||
# Analyze Tesla (TSLA) stock as a potential investment opportunity.
|
||||
# Consider the company's fundamentals, technical chart patterns, market sentiment,
|
||||
# risk factors, and broader market context. Provide a comprehensive investment
|
||||
# recommendation with specific entry/exit criteria.
|
||||
# """
|
||||
|
||||
# investment_results = investment_workflow.run(task=investment_task)
|
||||
|
||||
# for agent_name, result in investment_results.items():
|
||||
# print(f"\n🤖 {agent_name}:")
|
||||
# print(f"{result[:300]}{'...' if len(result) > 300 else ''}")
|
||||
|
||||
# research_workflow = create_parallel_research_workflow()
|
||||
|
||||
# # Run the research analysis
|
||||
# research_task = """
|
||||
# Research the current state and future prospects of quantum computing.
|
||||
# Examine academic progress, industry developments, recent news, and
|
||||
# quantitative trends. Provide a comprehensive analysis of the field's
|
||||
# current status and trajectory.
|
||||
# """
|
||||
|
||||
# research_results = research_workflow.run(task=research_task)
|
||||
|
||||
# for agent_name, result in research_results.items():
|
||||
# print(f"\n🤖 {agent_name}:")
|
||||
# print(f"{result[:300]}{'...' if len(result) > 300 else ''}")
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# # Run the comprehensive demonstration
|
||||
# demonstrate_complex_workflows()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
workflow = create_complex_investment_analysis_workflow()
|
||||
workflow.visualize()
|
||||
# workflow.run(
|
||||
# task="Analyze Tesla (TSLA) stock as a potential investment opportunity. Consider the company's fundamentals, technical chart patterns, market sentiment, risk factors, and broader market context. Provide a comprehensive investment recommendation with specific entry/exit criteria."
|
||||
# )
|
After Width: | Height: | Size: 11 KiB |
@ -0,0 +1,57 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
from swarms.prompts.multi_agent_collab_prompt import (
|
||||
MULTI_AGENT_COLLAB_PROMPT_TWO,
|
||||
)
|
||||
|
||||
# Define two real agents with the multi-agent collaboration prompt
|
||||
agent1 = Agent(
|
||||
agent_name="ResearchAgent1",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt=MULTI_AGENT_COLLAB_PROMPT_TWO, # Set collaboration prompt
|
||||
)
|
||||
agent2 = Agent(
|
||||
agent_name="ResearchAgent2",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt=MULTI_AGENT_COLLAB_PROMPT_TWO, # Set collaboration prompt
|
||||
)
|
||||
|
||||
# Build the workflow with only agents as nodes
|
||||
workflow = GraphWorkflow()
|
||||
workflow.add_node(agent1)
|
||||
workflow.add_node(agent2)
|
||||
|
||||
# Define a relationship: agent1 feeds into agent2
|
||||
workflow.add_edge(agent1.agent_name, agent2.agent_name)
|
||||
|
||||
# Visualize the workflow using Graphviz
|
||||
print("\n📊 Creating workflow visualization...")
|
||||
try:
|
||||
viz_output = workflow.visualize(
|
||||
output_path="simple_workflow_graph",
|
||||
format="png",
|
||||
view=True, # Auto-open the generated image
|
||||
show_parallel_patterns=True,
|
||||
)
|
||||
print(f"✅ Workflow visualization saved to: {viz_output}")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Graphviz not available, using text visualization: {e}")
|
||||
workflow.visualize()
|
||||
|
||||
# Export workflow to JSON
|
||||
workflow_json = workflow.to_json()
|
||||
print(
|
||||
f"\n💾 Workflow exported to JSON ({len(workflow_json)} characters)"
|
||||
)
|
||||
|
||||
# Run the workflow and print results
|
||||
print("\n🚀 Executing workflow...")
|
||||
results = workflow.run(
|
||||
task="What are the best arbitrage trading strategies for altcoins? Give me research papers and articles on the topic."
|
||||
)
|
||||
print("\n📋 Execution results:")
|
||||
for agent_name, result in results.items():
|
||||
print(f"\n🤖 {agent_name}:")
|
||||
print(f" {result[:200]}{'...' if len(result) > 200 else ''}")
|
@ -0,0 +1,328 @@
|
||||
"""
|
||||
Test script to demonstrate enhanced JSON export/import capabilities for GraphWorkflow.
|
||||
This showcases the new comprehensive serialization with metadata, versioning, and various options.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
|
||||
def create_sample_workflow():
|
||||
"""Create a sample workflow for testing JSON export/import capabilities."""
|
||||
|
||||
# Create sample agents
|
||||
analyzer = Agent(
|
||||
agent_name="DataAnalyzer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a data analysis expert. Analyze the given data and provide insights.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
processor = Agent(
|
||||
agent_name="DataProcessor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a data processor. Process and transform the analyzed data.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
reporter = Agent(
|
||||
agent_name="ReportGenerator",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a report generator. Create comprehensive reports from processed data.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create workflow with comprehensive metadata
|
||||
workflow = GraphWorkflow(
|
||||
name="Enhanced-Data-Analysis-Workflow",
|
||||
description="A comprehensive data analysis workflow demonstrating enhanced JSON export capabilities with rich metadata and configuration options.",
|
||||
max_loops=3,
|
||||
auto_compile=True,
|
||||
verbose=True,
|
||||
task="Analyze quarterly sales data and generate executive summary reports with actionable insights.",
|
||||
)
|
||||
|
||||
# Add agents
|
||||
workflow.add_node(analyzer)
|
||||
workflow.add_node(processor)
|
||||
workflow.add_node(reporter)
|
||||
|
||||
# Create workflow connections
|
||||
workflow.add_edge("DataAnalyzer", "DataProcessor")
|
||||
workflow.add_edge("DataProcessor", "ReportGenerator")
|
||||
|
||||
# Force compilation to create runtime state
|
||||
workflow.compile()
|
||||
|
||||
return workflow
|
||||
|
||||
|
||||
def test_basic_json_export():
|
||||
"""Test basic JSON export functionality."""
|
||||
print("=" * 60)
|
||||
print("TEST 1: Basic JSON Export")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_sample_workflow()
|
||||
|
||||
print("\n📄 Exporting workflow to JSON (basic)...")
|
||||
json_data = workflow.to_json()
|
||||
|
||||
# Parse and display structure
|
||||
data = json.loads(json_data)
|
||||
|
||||
print("\n📊 Basic Export Results:")
|
||||
print(f" Schema Version: {data.get('schema_version', 'N/A')}")
|
||||
print(f" Export Date: {data.get('export_date', 'N/A')}")
|
||||
print(f" Workflow Name: {data.get('name', 'N/A')}")
|
||||
print(f" Description: {data.get('description', 'N/A')}")
|
||||
print(f" Nodes: {data['metrics']['node_count']}")
|
||||
print(f" Edges: {data['metrics']['edge_count']}")
|
||||
print(f" Max Loops: {data.get('max_loops', 'N/A')}")
|
||||
print(f" Auto Compile: {data.get('auto_compile', 'N/A')}")
|
||||
print(f" JSON Size: {len(json_data):,} characters")
|
||||
|
||||
return json_data
|
||||
|
||||
|
||||
def test_comprehensive_json_export():
|
||||
"""Test comprehensive JSON export with all options."""
|
||||
print("\n\n" + "=" * 60)
|
||||
print("TEST 2: Comprehensive JSON Export")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_sample_workflow()
|
||||
|
||||
# Run workflow to generate conversation history
|
||||
print("\n🚀 Running workflow to generate conversation data...")
|
||||
try:
|
||||
results = workflow.run(
|
||||
task="Sample analysis task for testing JSON export"
|
||||
)
|
||||
print(
|
||||
f"✅ Workflow executed: {len(results)} agents completed"
|
||||
)
|
||||
except Exception as e:
|
||||
print(
|
||||
f"⚠️ Workflow execution failed (continuing with test): {e}"
|
||||
)
|
||||
|
||||
print("\n📄 Exporting workflow to JSON (comprehensive)...")
|
||||
json_data = workflow.to_json(
|
||||
include_conversation=True, include_runtime_state=True
|
||||
)
|
||||
|
||||
# Parse and display comprehensive structure
|
||||
data = json.loads(json_data)
|
||||
|
||||
print("\n📊 Comprehensive Export Results:")
|
||||
print(f" Schema Version: {data.get('schema_version', 'N/A')}")
|
||||
print(
|
||||
f" Export Timestamp: {data.get('export_timestamp', 'N/A')}"
|
||||
)
|
||||
print(f" Runtime State Included: {'runtime_state' in data}")
|
||||
print(f" Conversation Included: {'conversation' in data}")
|
||||
print(f" Compilation Status: {data['metrics']['is_compiled']}")
|
||||
print(f" Layer Count: {data['metrics']['layer_count']}")
|
||||
print(f" JSON Size: {len(json_data):,} characters")
|
||||
|
||||
# Show runtime state details
|
||||
if "runtime_state" in data:
|
||||
runtime = data["runtime_state"]
|
||||
print("\n🔧 Runtime State Details:")
|
||||
print(
|
||||
f" Compilation Timestamp: {runtime.get('compilation_timestamp', 'N/A')}"
|
||||
)
|
||||
print(
|
||||
f" Time Since Compilation: {runtime.get('time_since_compilation', 'N/A'):.3f}s"
|
||||
)
|
||||
print(
|
||||
f" Sorted Layers: {len(runtime.get('sorted_layers', []))} layers"
|
||||
)
|
||||
|
||||
# Show conversation details
|
||||
if "conversation" in data:
|
||||
conv = data["conversation"]
|
||||
print("\n💬 Conversation Details:")
|
||||
if "history" in conv:
|
||||
print(f" Message Count: {len(conv['history'])}")
|
||||
print(f" Conversation Type: {conv.get('type', 'N/A')}")
|
||||
else:
|
||||
print(f" Status: {conv}")
|
||||
|
||||
return json_data
|
||||
|
||||
|
||||
def test_file_save_load():
|
||||
"""Test file-based save and load functionality."""
|
||||
print("\n\n" + "=" * 60)
|
||||
print("TEST 3: File Save/Load Operations")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_sample_workflow()
|
||||
|
||||
# Test saving to file
|
||||
print("\n💾 Saving workflow to file...")
|
||||
try:
|
||||
filepath = workflow.save_to_file(
|
||||
"test_workflow.json",
|
||||
include_conversation=False,
|
||||
include_runtime_state=True,
|
||||
overwrite=True,
|
||||
)
|
||||
print(f"✅ Workflow saved to: {filepath}")
|
||||
|
||||
# Check file size
|
||||
file_size = os.path.getsize(filepath)
|
||||
print(f"📁 File size: {file_size:,} bytes")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Save failed: {e}")
|
||||
return
|
||||
|
||||
# Test loading from file
|
||||
print("\n📂 Loading workflow from file...")
|
||||
try:
|
||||
loaded_workflow = GraphWorkflow.load_from_file(
|
||||
"test_workflow.json", restore_runtime_state=True
|
||||
)
|
||||
print("✅ Workflow loaded successfully")
|
||||
|
||||
# Verify loaded data
|
||||
print("\n🔍 Verification:")
|
||||
print(f" Name: {loaded_workflow.name}")
|
||||
print(f" Description: {loaded_workflow.description}")
|
||||
print(f" Nodes: {len(loaded_workflow.nodes)}")
|
||||
print(f" Edges: {len(loaded_workflow.edges)}")
|
||||
print(f" Max Loops: {loaded_workflow.max_loops}")
|
||||
print(f" Compiled: {loaded_workflow._compiled}")
|
||||
|
||||
# Test compilation status
|
||||
status = loaded_workflow.get_compilation_status()
|
||||
print(f" Cache Efficient: {status['cache_efficient']}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Load failed: {e}")
|
||||
|
||||
# Cleanup
|
||||
try:
|
||||
os.remove("test_workflow.json")
|
||||
print("\n🧹 Cleaned up test file")
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def test_workflow_summary():
|
||||
"""Test workflow summary export functionality."""
|
||||
print("\n\n" + "=" * 60)
|
||||
print("TEST 4: Workflow Summary Export")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_sample_workflow()
|
||||
|
||||
print("\n📋 Generating workflow summary...")
|
||||
try:
|
||||
summary = workflow.export_summary()
|
||||
|
||||
print("\n📊 Workflow Summary:")
|
||||
print(f" ID: {summary['workflow_info']['id']}")
|
||||
print(f" Name: {summary['workflow_info']['name']}")
|
||||
print(
|
||||
f" Structure: {summary['structure']['nodes']} nodes, {summary['structure']['edges']} edges"
|
||||
)
|
||||
print(
|
||||
f" Configuration: {summary['configuration']['max_loops']} loops, {summary['configuration']['max_workers']} workers"
|
||||
)
|
||||
print(f" Task Defined: {summary['task']['defined']}")
|
||||
print(
|
||||
f" Conversation Available: {summary['conversation']['available']}"
|
||||
)
|
||||
|
||||
# Show agents
|
||||
print("\n🤖 Agents:")
|
||||
for agent in summary["agents"]:
|
||||
print(f" - {agent['id']} ({agent['agent_name']})")
|
||||
|
||||
# Show connections
|
||||
print("\n🔗 Connections:")
|
||||
for conn in summary["connections"]:
|
||||
print(f" - {conn['from']} → {conn['to']}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Summary generation failed: {e}")
|
||||
|
||||
|
||||
def test_backward_compatibility():
|
||||
"""Test backward compatibility with legacy JSON format."""
|
||||
print("\n\n" + "=" * 60)
|
||||
print("TEST 5: Backward Compatibility")
|
||||
print("=" * 60)
|
||||
|
||||
# Create a legacy-style JSON (simulated)
|
||||
legacy_json = {
|
||||
"id": "test-legacy-workflow",
|
||||
"name": "Legacy Workflow",
|
||||
"nodes": [
|
||||
{
|
||||
"id": "agent1",
|
||||
"type": "agent",
|
||||
"agent": {"agent_name": "LegacyAgent"},
|
||||
"metadata": {},
|
||||
}
|
||||
],
|
||||
"edges": [],
|
||||
"entry_points": ["agent1"],
|
||||
"end_points": ["agent1"],
|
||||
"max_loops": 1,
|
||||
"task": "Legacy task",
|
||||
}
|
||||
|
||||
legacy_json_str = json.dumps(legacy_json, indent=2)
|
||||
|
||||
print("\n📜 Testing legacy JSON format compatibility...")
|
||||
try:
|
||||
workflow = GraphWorkflow.from_json(legacy_json_str)
|
||||
print("✅ Legacy format loaded successfully")
|
||||
print(f" Name: {workflow.name}")
|
||||
print(f" Nodes: {len(workflow.nodes)}")
|
||||
print(f" Max Loops: {workflow.max_loops}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Legacy compatibility failed: {e}")
|
||||
|
||||
|
||||
def run_enhanced_json_tests():
|
||||
"""Run all enhanced JSON export/import tests."""
|
||||
print("🧪 ENHANCED JSON EXPORT/IMPORT TESTS")
|
||||
print(
|
||||
"Testing comprehensive serialization capabilities with metadata and versioning"
|
||||
)
|
||||
|
||||
# Run all tests
|
||||
test_basic_json_export()
|
||||
test_comprehensive_json_export()
|
||||
test_file_save_load()
|
||||
test_workflow_summary()
|
||||
test_backward_compatibility()
|
||||
|
||||
print("\n\n" + "=" * 60)
|
||||
print("🎯 ENHANCED JSON CAPABILITIES SUMMARY")
|
||||
print("=" * 60)
|
||||
print("✅ Schema versioning and metadata")
|
||||
print("✅ Comprehensive configuration export")
|
||||
print("✅ Optional conversation history inclusion")
|
||||
print("✅ Runtime state preservation")
|
||||
print("✅ Enhanced error handling")
|
||||
print("✅ File-based save/load operations")
|
||||
print("✅ Workflow summary generation")
|
||||
print("✅ Backward compatibility")
|
||||
print("✅ Rich serialization metadata")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_enhanced_json_tests()
|
@ -0,0 +1,222 @@
|
||||
"""
|
||||
Test script to demonstrate GraphWorkflow compilation caching for multi-loop scenarios.
|
||||
This shows how the compilation is cached and reused across multiple loops to save compute.
|
||||
"""
|
||||
|
||||
import time
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
|
||||
def create_test_workflow(max_loops=3, verbose=True):
|
||||
"""
|
||||
Create a test workflow with multiple agents to demonstrate caching.
|
||||
|
||||
Args:
|
||||
max_loops (int): Number of loops to run (demonstrates caching when > 1)
|
||||
verbose (bool): Enable verbose logging to see caching behavior
|
||||
|
||||
Returns:
|
||||
GraphWorkflow: Configured test workflow
|
||||
"""
|
||||
|
||||
# Create test agents
|
||||
analyzer = Agent(
|
||||
agent_name="Analyzer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a data analyzer. Analyze the given topic and provide insights.",
|
||||
verbose=False, # Keep agent verbose low to focus on workflow caching logs
|
||||
)
|
||||
|
||||
reviewer = Agent(
|
||||
agent_name="Reviewer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a reviewer. Review and validate the analysis provided.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
summarizer = Agent(
|
||||
agent_name="Summarizer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a summarizer. Create a concise summary of all previous work.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create workflow with caching parameters
|
||||
workflow = GraphWorkflow(
|
||||
name="CachingTestWorkflow",
|
||||
description="Test workflow for demonstrating compilation caching",
|
||||
max_loops=max_loops,
|
||||
verbose=verbose,
|
||||
auto_compile=True, # Enable auto-compilation for testing
|
||||
)
|
||||
|
||||
# Add agents as nodes
|
||||
workflow.add_node(analyzer)
|
||||
workflow.add_node(reviewer)
|
||||
workflow.add_node(summarizer)
|
||||
|
||||
# Create sequential flow: Analyzer -> Reviewer -> Summarizer
|
||||
workflow.add_edge("Analyzer", "Reviewer")
|
||||
workflow.add_edge("Reviewer", "Summarizer")
|
||||
|
||||
return workflow
|
||||
|
||||
|
||||
def test_single_loop_compilation():
|
||||
"""Test compilation behavior with single loop (no caching benefit)."""
|
||||
print("=" * 60)
|
||||
print("TEST 1: Single Loop (No Caching Benefit)")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_test_workflow(max_loops=1, verbose=True)
|
||||
|
||||
print("\n📊 Compilation Status Before Execution:")
|
||||
status = workflow.get_compilation_status()
|
||||
for key, value in status.items():
|
||||
print(f" {key}: {value}")
|
||||
|
||||
print("\n🚀 Running single loop workflow...")
|
||||
start_time = time.time()
|
||||
|
||||
results = workflow.run(
|
||||
task="Analyze the benefits of renewable energy sources and provide a comprehensive summary."
|
||||
)
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
|
||||
print(f"\n✅ Single loop completed in {execution_time:.3f}s")
|
||||
print(f"📋 Results: {len(results)} agents executed")
|
||||
|
||||
print("\n📊 Compilation Status After Execution:")
|
||||
status = workflow.get_compilation_status()
|
||||
for key, value in status.items():
|
||||
if key != "layers": # Skip layers for brevity
|
||||
print(f" {key}: {value}")
|
||||
|
||||
|
||||
def test_multi_loop_compilation():
|
||||
"""Test compilation caching behavior with multiple loops."""
|
||||
print("\n\n" + "=" * 60)
|
||||
print("TEST 2: Multi-Loop (Caching Benefit)")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_test_workflow(max_loops=3, verbose=True)
|
||||
|
||||
print("\n📊 Compilation Status Before Execution:")
|
||||
status = workflow.get_compilation_status()
|
||||
for key, value in status.items():
|
||||
print(f" {key}: {value}")
|
||||
|
||||
print("\n🚀 Running multi-loop workflow...")
|
||||
start_time = time.time()
|
||||
|
||||
results = workflow.run(
|
||||
task="Research the impact of artificial intelligence on job markets. Provide detailed analysis, review, and summary."
|
||||
)
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
|
||||
print(
|
||||
f"\n✅ Multi-loop execution completed in {execution_time:.3f}s"
|
||||
)
|
||||
print(f"📋 Results: {len(results)} agents executed")
|
||||
|
||||
print("\n📊 Compilation Status After Execution:")
|
||||
status = workflow.get_compilation_status()
|
||||
for key, value in status.items():
|
||||
if key != "layers": # Skip layers for brevity
|
||||
print(f" {key}: {value}")
|
||||
|
||||
|
||||
def test_cache_invalidation():
|
||||
"""Test that cache is properly invalidated when graph structure changes."""
|
||||
print("\n\n" + "=" * 60)
|
||||
print("TEST 3: Cache Invalidation on Structure Change")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_test_workflow(max_loops=2, verbose=True)
|
||||
|
||||
print("\n📊 Initial Compilation Status:")
|
||||
status = workflow.get_compilation_status()
|
||||
print(f" Compiled: {status['is_compiled']}")
|
||||
print(f" Cache Efficient: {status['cache_efficient']}")
|
||||
|
||||
# Force compilation by running once
|
||||
print("\n🔄 Initial compilation run...")
|
||||
workflow.run(task="Initial test task")
|
||||
|
||||
print("\n📊 Status After First Run:")
|
||||
status = workflow.get_compilation_status()
|
||||
print(f" Compiled: {status['is_compiled']}")
|
||||
print(f" Cache Efficient: {status['cache_efficient']}")
|
||||
print(
|
||||
f" Compilation Timestamp: {status['compilation_timestamp']}"
|
||||
)
|
||||
|
||||
# Add a new agent to trigger cache invalidation
|
||||
print("\n🔧 Adding new agent (should invalidate cache)...")
|
||||
new_agent = Agent(
|
||||
agent_name="Validator",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a validator. Validate all previous work.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
workflow.add_node(new_agent)
|
||||
workflow.add_edge("Summarizer", "Validator")
|
||||
|
||||
print(
|
||||
"\n📊 Status After Adding Node (Cache Should Be Invalidated):"
|
||||
)
|
||||
status = workflow.get_compilation_status()
|
||||
print(f" Compiled: {status['is_compiled']}")
|
||||
print(f" Cache Efficient: {status['cache_efficient']}")
|
||||
print(
|
||||
f" Compilation Timestamp: {status['compilation_timestamp']}"
|
||||
)
|
||||
|
||||
# Run again to show recompilation
|
||||
print("\n🔄 Running with new structure (should recompile)...")
|
||||
workflow.run(task="Test task with new structure")
|
||||
|
||||
print("\n📊 Status After Recompilation:")
|
||||
status = workflow.get_compilation_status()
|
||||
print(f" Compiled: {status['is_compiled']}")
|
||||
print(f" Cache Efficient: {status['cache_efficient']}")
|
||||
print(f" Cached Layers: {status['cached_layers_count']}")
|
||||
|
||||
|
||||
def run_caching_tests():
|
||||
"""Run all caching demonstration tests."""
|
||||
print("🧪 GRAPHWORKFLOW COMPILATION CACHING TESTS")
|
||||
print(
|
||||
"Testing compilation caching behavior for multi-loop scenarios"
|
||||
)
|
||||
|
||||
# Test 1: Single loop (baseline)
|
||||
test_single_loop_compilation()
|
||||
|
||||
# Test 2: Multi-loop (demonstrates caching)
|
||||
test_multi_loop_compilation()
|
||||
|
||||
# Test 3: Cache invalidation
|
||||
test_cache_invalidation()
|
||||
|
||||
print("\n\n" + "=" * 60)
|
||||
print("🎯 CACHING SUMMARY")
|
||||
print("=" * 60)
|
||||
print("✅ Single loop: No caching needed")
|
||||
print("✅ Multi-loop: Compilation cached and reused")
|
||||
print("✅ Structure changes: Cache properly invalidated")
|
||||
print(
|
||||
"✅ Performance: Avoided redundant computation in multi-loop scenarios"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_caching_tests()
|
After Width: | Height: | Size: 36 KiB |
@ -0,0 +1,490 @@
|
||||
"""
|
||||
Comprehensive test of Graphviz visualization capabilities for GraphWorkflow.
|
||||
This demonstrates various layouts, formats, and parallel pattern visualization features.
|
||||
"""
|
||||
|
||||
import os
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
|
||||
def create_simple_workflow():
|
||||
"""Create a simple sequential workflow."""
|
||||
agent1 = Agent(
|
||||
agent_name="DataCollector",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You collect and prepare data for analysis.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
agent_name="DataAnalyzer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You analyze the collected data and extract insights.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
agent3 = Agent(
|
||||
agent_name="ReportGenerator",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You generate comprehensive reports from the analysis.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
workflow = GraphWorkflow(
|
||||
name="Simple-Sequential-Workflow",
|
||||
description="A basic sequential workflow for testing visualization",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
workflow.add_node(agent1)
|
||||
workflow.add_node(agent2)
|
||||
workflow.add_node(agent3)
|
||||
|
||||
workflow.add_edge("DataCollector", "DataAnalyzer")
|
||||
workflow.add_edge("DataAnalyzer", "ReportGenerator")
|
||||
|
||||
return workflow
|
||||
|
||||
|
||||
def create_complex_parallel_workflow():
|
||||
"""Create a complex workflow with multiple parallel patterns."""
|
||||
# Data sources
|
||||
web_scraper = Agent(
|
||||
agent_name="WebScraper",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Web data scraping",
|
||||
verbose=False,
|
||||
)
|
||||
api_collector = Agent(
|
||||
agent_name="APICollector",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="API data collection",
|
||||
verbose=False,
|
||||
)
|
||||
db_extractor = Agent(
|
||||
agent_name="DatabaseExtractor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Database extraction",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Processors
|
||||
text_processor = Agent(
|
||||
agent_name="TextProcessor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Text processing",
|
||||
verbose=False,
|
||||
)
|
||||
numeric_processor = Agent(
|
||||
agent_name="NumericProcessor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Numeric processing",
|
||||
verbose=False,
|
||||
)
|
||||
image_processor = Agent(
|
||||
agent_name="ImageProcessor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Image processing",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Analyzers
|
||||
sentiment_analyzer = Agent(
|
||||
agent_name="SentimentAnalyzer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Sentiment analysis",
|
||||
verbose=False,
|
||||
)
|
||||
trend_analyzer = Agent(
|
||||
agent_name="TrendAnalyzer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Trend analysis",
|
||||
verbose=False,
|
||||
)
|
||||
anomaly_detector = Agent(
|
||||
agent_name="AnomalyDetector",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Anomaly detection",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Synthesis
|
||||
data_synthesizer = Agent(
|
||||
agent_name="DataSynthesizer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Data synthesis",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Final output
|
||||
dashboard_generator = Agent(
|
||||
agent_name="DashboardGenerator",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Dashboard generation",
|
||||
verbose=False,
|
||||
)
|
||||
alert_system = Agent(
|
||||
agent_name="AlertSystem",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="Alert generation",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
workflow = GraphWorkflow(
|
||||
name="Complex-Parallel-Analytics-Workflow",
|
||||
description="A sophisticated analytics workflow demonstrating multiple parallel processing patterns including fan-out, fan-in, and parallel chains for comprehensive data processing and analysis.",
|
||||
verbose=True,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
# Add all agents
|
||||
agents = [
|
||||
web_scraper,
|
||||
api_collector,
|
||||
db_extractor,
|
||||
text_processor,
|
||||
numeric_processor,
|
||||
image_processor,
|
||||
sentiment_analyzer,
|
||||
trend_analyzer,
|
||||
anomaly_detector,
|
||||
data_synthesizer,
|
||||
dashboard_generator,
|
||||
alert_system,
|
||||
]
|
||||
|
||||
for agent in agents:
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Create complex parallel patterns
|
||||
# Stage 1: Multiple data sources (parallel entry points)
|
||||
# Stage 2: Fan-out to different processors
|
||||
workflow.add_edge("WebScraper", "TextProcessor")
|
||||
workflow.add_edge("WebScraper", "ImageProcessor")
|
||||
workflow.add_edge("APICollector", "NumericProcessor")
|
||||
workflow.add_edge("APICollector", "TextProcessor")
|
||||
workflow.add_edge("DatabaseExtractor", "NumericProcessor")
|
||||
|
||||
# Stage 3: Processors feed multiple analyzers (parallel chain)
|
||||
workflow.add_parallel_chain(
|
||||
["TextProcessor", "NumericProcessor", "ImageProcessor"],
|
||||
["SentimentAnalyzer", "TrendAnalyzer", "AnomalyDetector"],
|
||||
)
|
||||
|
||||
# Stage 4: Major fan-in to synthesizer
|
||||
workflow.add_edges_to_target(
|
||||
["SentimentAnalyzer", "TrendAnalyzer", "AnomalyDetector"],
|
||||
"DataSynthesizer",
|
||||
)
|
||||
|
||||
# Stage 5: Fan-out to final outputs
|
||||
workflow.add_edges_from_source(
|
||||
"DataSynthesizer", ["DashboardGenerator", "AlertSystem"]
|
||||
)
|
||||
|
||||
# Set entry points (multiple sources)
|
||||
workflow.set_entry_points(
|
||||
["WebScraper", "APICollector", "DatabaseExtractor"]
|
||||
)
|
||||
workflow.set_end_points(["DashboardGenerator", "AlertSystem"])
|
||||
|
||||
return workflow
|
||||
|
||||
|
||||
def test_different_layouts():
|
||||
"""Test different Graphviz layout engines."""
|
||||
print("🎨 TESTING DIFFERENT GRAPHVIZ LAYOUTS")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_complex_parallel_workflow()
|
||||
|
||||
layouts = [
|
||||
(
|
||||
"dot",
|
||||
"Hierarchical top-to-bottom layout (best for workflows)",
|
||||
),
|
||||
("neato", "Spring model layout (good for small graphs)"),
|
||||
("fdp", "Force-directed layout (good for large graphs)"),
|
||||
(
|
||||
"sfdp",
|
||||
"Multiscale force-directed layout (for very large graphs)",
|
||||
),
|
||||
("circo", "Circular layout (good for small cyclic graphs)"),
|
||||
]
|
||||
|
||||
for engine, description in layouts:
|
||||
print(f"\n🔧 Testing {engine} layout: {description}")
|
||||
try:
|
||||
output = workflow.visualize(
|
||||
output_path=f"complex_workflow_{engine}",
|
||||
format="png",
|
||||
view=False,
|
||||
engine=engine,
|
||||
show_parallel_patterns=True,
|
||||
)
|
||||
print(f"✅ {engine} layout saved: {output}")
|
||||
except Exception as e:
|
||||
print(f"❌ {engine} layout failed: {e}")
|
||||
|
||||
|
||||
def test_different_formats():
|
||||
"""Test different output formats."""
|
||||
print("\n\n📄 TESTING DIFFERENT OUTPUT FORMATS")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_simple_workflow()
|
||||
|
||||
formats = [
|
||||
("png", "PNG image (best for presentations)"),
|
||||
("svg", "SVG vector graphics (best for web)"),
|
||||
("pdf", "PDF document (best for documents)"),
|
||||
("dot", "Graphviz DOT source (for editing)"),
|
||||
]
|
||||
|
||||
for fmt, description in formats:
|
||||
print(f"\n📋 Testing {fmt} format: {description}")
|
||||
try:
|
||||
output = workflow.visualize(
|
||||
output_path="simple_workflow_test",
|
||||
format=fmt,
|
||||
view=False,
|
||||
engine="dot",
|
||||
show_parallel_patterns=True,
|
||||
)
|
||||
print(f"✅ {fmt} format saved: {output}")
|
||||
except Exception as e:
|
||||
print(f"❌ {fmt} format failed: {e}")
|
||||
|
||||
|
||||
def test_parallel_pattern_highlighting():
|
||||
"""Test parallel pattern highlighting features."""
|
||||
print("\n\n🔀 TESTING PARALLEL PATTERN HIGHLIGHTING")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_complex_parallel_workflow()
|
||||
|
||||
print("\n📊 With parallel patterns highlighted:")
|
||||
try:
|
||||
output_with = workflow.visualize(
|
||||
output_path="patterns_highlighted",
|
||||
format="png",
|
||||
view=False,
|
||||
show_parallel_patterns=True,
|
||||
)
|
||||
print(f"✅ Highlighted version saved: {output_with}")
|
||||
except Exception as e:
|
||||
print(f"❌ Highlighted version failed: {e}")
|
||||
|
||||
print("\n📊 Without parallel patterns highlighted:")
|
||||
try:
|
||||
output_without = workflow.visualize(
|
||||
output_path="patterns_plain",
|
||||
format="png",
|
||||
view=False,
|
||||
show_parallel_patterns=False,
|
||||
)
|
||||
print(f"✅ Plain version saved: {output_without}")
|
||||
except Exception as e:
|
||||
print(f"❌ Plain version failed: {e}")
|
||||
|
||||
|
||||
def test_large_workflow_visualization():
|
||||
"""Test visualization of a larger workflow."""
|
||||
print("\n\n🏢 TESTING LARGE WORKFLOW VISUALIZATION")
|
||||
print("=" * 60)
|
||||
|
||||
# Create a larger workflow with many agents
|
||||
workflow = GraphWorkflow(
|
||||
name="Large-Enterprise-Workflow",
|
||||
description="Large enterprise workflow with many agents and complex dependencies",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create 20 agents in different categories
|
||||
categories = {
|
||||
"DataIngestion": 4,
|
||||
"Processing": 6,
|
||||
"Analysis": 5,
|
||||
"Reporting": 3,
|
||||
"Monitoring": 2,
|
||||
}
|
||||
|
||||
agents_by_category = {}
|
||||
|
||||
for category, count in categories.items():
|
||||
agents_by_category[category] = []
|
||||
for i in range(count):
|
||||
agent = Agent(
|
||||
agent_name=f"{category}Agent{i+1}",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt=f"You are {category} specialist #{i+1}",
|
||||
verbose=False,
|
||||
)
|
||||
workflow.add_node(agent)
|
||||
agents_by_category[category].append(agent.agent_name)
|
||||
|
||||
# Create complex interconnections
|
||||
# Data ingestion fans out to processing
|
||||
workflow.add_parallel_chain(
|
||||
agents_by_category["DataIngestion"],
|
||||
agents_by_category["Processing"],
|
||||
)
|
||||
|
||||
# Processing feeds analysis
|
||||
workflow.add_parallel_chain(
|
||||
agents_by_category["Processing"],
|
||||
agents_by_category["Analysis"],
|
||||
)
|
||||
|
||||
# Analysis converges to reporting
|
||||
workflow.add_edges_to_target(
|
||||
agents_by_category["Analysis"],
|
||||
agents_by_category["Reporting"][0], # Primary reporter
|
||||
)
|
||||
|
||||
# Other reporting agents get subset
|
||||
workflow.add_edges_from_source(
|
||||
agents_by_category["Analysis"][0], # Primary analyzer
|
||||
agents_by_category["Reporting"][1:],
|
||||
)
|
||||
|
||||
# All reporting feeds monitoring
|
||||
workflow.add_edges_to_target(
|
||||
agents_by_category["Reporting"],
|
||||
agents_by_category["Monitoring"][0],
|
||||
)
|
||||
|
||||
print("\n📈 Large workflow statistics:")
|
||||
print(f" Agents: {len(workflow.nodes)}")
|
||||
print(f" Connections: {len(workflow.edges)}")
|
||||
|
||||
# Test with sfdp layout (good for large graphs)
|
||||
try:
|
||||
output = workflow.visualize(
|
||||
output_path="large_enterprise_workflow",
|
||||
format="svg", # SVG scales better for large graphs
|
||||
view=False,
|
||||
engine="sfdp", # Better for large graphs
|
||||
show_parallel_patterns=True,
|
||||
)
|
||||
print(f"✅ Large workflow visualization saved: {output}")
|
||||
except Exception as e:
|
||||
print(f"❌ Large workflow visualization failed: {e}")
|
||||
|
||||
|
||||
def test_fallback_visualization():
|
||||
"""Test fallback text visualization when Graphviz is not available."""
|
||||
print("\n\n🔧 TESTING FALLBACK TEXT VISUALIZATION")
|
||||
print("=" * 60)
|
||||
|
||||
workflow = create_complex_parallel_workflow()
|
||||
|
||||
print("\n📝 Testing fallback text visualization:")
|
||||
try:
|
||||
# Call the fallback method directly
|
||||
result = workflow._fallback_text_visualization()
|
||||
print(f"✅ Fallback visualization completed: {result}")
|
||||
except Exception as e:
|
||||
print(f"❌ Fallback visualization failed: {e}")
|
||||
|
||||
|
||||
def run_comprehensive_visualization_tests():
|
||||
"""Run all visualization tests."""
|
||||
print("🎨 COMPREHENSIVE GRAPHVIZ VISUALIZATION TESTS")
|
||||
print("=" * 70)
|
||||
|
||||
print(
|
||||
"Testing all aspects of the new Graphviz-based visualization system"
|
||||
)
|
||||
print(
|
||||
"including layouts, formats, parallel patterns, and large workflows"
|
||||
)
|
||||
|
||||
# Check if Graphviz is available
|
||||
try:
|
||||
import graphviz
|
||||
|
||||
print("✅ Graphviz Python package available")
|
||||
|
||||
# Test basic functionality
|
||||
graphviz.Digraph()
|
||||
print("✅ Graphviz functional")
|
||||
|
||||
graphviz_available = True
|
||||
except ImportError:
|
||||
print(
|
||||
"⚠️ Graphviz not available - some tests will use fallback"
|
||||
)
|
||||
graphviz_available = False
|
||||
|
||||
# Run tests
|
||||
if graphviz_available:
|
||||
test_different_layouts()
|
||||
test_different_formats()
|
||||
test_parallel_pattern_highlighting()
|
||||
test_large_workflow_visualization()
|
||||
|
||||
# Always test fallback
|
||||
test_fallback_visualization()
|
||||
|
||||
# Summary
|
||||
print("\n\n🎯 VISUALIZATION TESTING SUMMARY")
|
||||
print("=" * 70)
|
||||
|
||||
if graphviz_available:
|
||||
print("✅ Graphviz layouts: dot, neato, fdp, sfdp, circo")
|
||||
print("✅ Output formats: PNG, SVG, PDF, DOT")
|
||||
print("✅ Parallel pattern highlighting with color coding")
|
||||
print("✅ Legend generation for pattern types")
|
||||
print("✅ Large workflow handling with optimized layouts")
|
||||
print("✅ Professional graph styling and node shapes")
|
||||
|
||||
# List generated files
|
||||
print("\n📁 Generated visualization files:")
|
||||
current_dir = "."
|
||||
viz_files = [
|
||||
f
|
||||
for f in os.listdir(current_dir)
|
||||
if any(
|
||||
f.startswith(prefix)
|
||||
for prefix in [
|
||||
"complex_workflow_",
|
||||
"simple_workflow_",
|
||||
"patterns_",
|
||||
"large_enterprise_",
|
||||
]
|
||||
)
|
||||
]
|
||||
|
||||
for file in sorted(viz_files):
|
||||
if os.path.isfile(file):
|
||||
size = os.path.getsize(file)
|
||||
print(f" 📄 {file} ({size:,} bytes)")
|
||||
|
||||
print("✅ Text fallback visualization for compatibility")
|
||||
print("✅ Error handling and graceful degradation")
|
||||
print("✅ Comprehensive logging and status reporting")
|
||||
|
||||
print("\n🏆 GraphWorkflow now provides professional-grade")
|
||||
print(" visualization capabilities with Graphviz!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_comprehensive_visualization_tests()
|
@ -0,0 +1,464 @@
|
||||
"""
|
||||
Comprehensive example demonstrating GraphWorkflow parallel processing capabilities.
|
||||
This showcases fan-out, fan-in, and parallel chain patterns for maximum efficiency.
|
||||
"""
|
||||
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
|
||||
def create_advanced_financial_analysis_workflow():
|
||||
"""
|
||||
Create a sophisticated financial analysis workflow demonstrating
|
||||
all parallel processing patterns for maximum efficiency.
|
||||
|
||||
Workflow Architecture:
|
||||
1. Data Collection (Entry Point)
|
||||
2. Fan-out to 3 Parallel Data Processors
|
||||
3. Fan-out to 4 Parallel Analysis Specialists
|
||||
4. Fan-in to Synthesis Agent
|
||||
5. Final Recommendation (End Point)
|
||||
"""
|
||||
|
||||
# === Data Collection Layer ===
|
||||
data_collector = Agent(
|
||||
agent_name="DataCollector",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a financial data collection specialist. Your role is to:
|
||||
1. Gather comprehensive market data for the target investment
|
||||
2. Collect recent news, earnings reports, and analyst ratings
|
||||
3. Compile key financial metrics and historical performance data
|
||||
4. Structure the data clearly for downstream parallel analysis
|
||||
|
||||
Provide comprehensive data that multiple specialists can analyze simultaneously.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# === Parallel Data Processing Layer ===
|
||||
market_data_processor = Agent(
|
||||
agent_name="MarketDataProcessor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a market data processing specialist. Focus on:
|
||||
1. Market price movements and trading volumes
|
||||
2. Technical indicators and chart patterns
|
||||
3. Market sentiment and momentum signals
|
||||
4. Sector and peer comparison data
|
||||
|
||||
Process raw market data into analysis-ready insights.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
fundamental_data_processor = Agent(
|
||||
agent_name="FundamentalDataProcessor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a fundamental data processing specialist. Focus on:
|
||||
1. Financial statements and accounting metrics
|
||||
2. Business model and competitive positioning
|
||||
3. Management quality and corporate governance
|
||||
4. Industry trends and regulatory environment
|
||||
|
||||
Process fundamental data into comprehensive business analysis.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
news_data_processor = Agent(
|
||||
agent_name="NewsDataProcessor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a news and sentiment data processor. Focus on:
|
||||
1. Recent news events and their market impact
|
||||
2. Analyst opinions and rating changes
|
||||
3. Social media sentiment and retail investor behavior
|
||||
4. Institutional investor positioning and flows
|
||||
|
||||
Process news and sentiment data into actionable insights.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# === Parallel Analysis Specialists Layer ===
|
||||
technical_analyst = Agent(
|
||||
agent_name="TechnicalAnalyst",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a technical analysis expert specializing in:
|
||||
1. Chart pattern analysis and trend identification
|
||||
2. Support and resistance level analysis
|
||||
3. Momentum and oscillator interpretation
|
||||
4. Entry and exit timing recommendations
|
||||
|
||||
Provide detailed technical analysis with specific price targets.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
fundamental_analyst = Agent(
|
||||
agent_name="FundamentalAnalyst",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a fundamental analysis expert specializing in:
|
||||
1. Intrinsic value calculation using multiple methods
|
||||
2. Financial ratio analysis and peer comparison
|
||||
3. Business model evaluation and competitive moats
|
||||
4. Growth prospects and risk assessment
|
||||
|
||||
Provide comprehensive fundamental analysis with valuation estimates.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
risk_analyst = Agent(
|
||||
agent_name="RiskAnalyst",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a risk management specialist focusing on:
|
||||
1. Quantitative risk metrics (VaR, volatility, correlations)
|
||||
2. Scenario analysis and stress testing
|
||||
3. Downside protection and tail risk assessment
|
||||
4. Portfolio impact and position sizing recommendations
|
||||
|
||||
Provide comprehensive risk analysis with mitigation strategies.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
esg_analyst = Agent(
|
||||
agent_name="ESGAnalyst",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are an ESG (Environmental, Social, Governance) specialist focusing on:
|
||||
1. Environmental impact and sustainability practices
|
||||
2. Social responsibility and stakeholder relations
|
||||
3. Corporate governance and ethical leadership
|
||||
4. Regulatory compliance and reputational risks
|
||||
|
||||
Provide comprehensive ESG analysis and scoring.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# === Synthesis and Final Decision Layer ===
|
||||
synthesis_agent = Agent(
|
||||
agent_name="SynthesisAgent",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are an investment synthesis specialist. Your role is to:
|
||||
1. Integrate all analysis from technical, fundamental, risk, and ESG specialists
|
||||
2. Reconcile conflicting viewpoints and identify consensus areas
|
||||
3. Weight different analysis components based on market conditions
|
||||
4. Identify the most compelling investment thesis and key risks
|
||||
|
||||
Provide a balanced synthesis that considers all analytical perspectives.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
portfolio_manager = Agent(
|
||||
agent_name="PortfolioManager",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are the final investment decision maker. Your role is to:
|
||||
1. Review all synthesis and specialist analysis
|
||||
2. Make clear investment recommendations (BUY/HOLD/SELL)
|
||||
3. Provide specific entry/exit criteria and price targets
|
||||
4. Recommend position sizing and risk management approach
|
||||
5. Outline monitoring criteria and review timeline
|
||||
|
||||
Provide actionable investment guidance with clear rationale.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# === Create Workflow ===
|
||||
workflow = GraphWorkflow(
|
||||
name="Advanced-Parallel-Financial-Analysis",
|
||||
description="Sophisticated multi-agent financial analysis workflow demonstrating fan-out, fan-in, and parallel processing patterns for maximum efficiency and comprehensive analysis coverage.",
|
||||
max_loops=1,
|
||||
verbose=True,
|
||||
auto_compile=True,
|
||||
task="Analyze Apple Inc. (AAPL) as a potential investment opportunity with comprehensive parallel analysis covering technical, fundamental, risk, and ESG factors.",
|
||||
)
|
||||
|
||||
# Add all agents
|
||||
agents = [
|
||||
data_collector,
|
||||
market_data_processor,
|
||||
fundamental_data_processor,
|
||||
news_data_processor,
|
||||
technical_analyst,
|
||||
fundamental_analyst,
|
||||
risk_analyst,
|
||||
esg_analyst,
|
||||
synthesis_agent,
|
||||
portfolio_manager,
|
||||
]
|
||||
|
||||
for agent in agents:
|
||||
workflow.add_node(agent)
|
||||
|
||||
# === Create Parallel Processing Architecture ===
|
||||
|
||||
# Stage 1: Data Collection feeds into parallel processors (Fan-out)
|
||||
workflow.add_edges_from_source(
|
||||
"DataCollector",
|
||||
[
|
||||
"MarketDataProcessor",
|
||||
"FundamentalDataProcessor",
|
||||
"NewsDataProcessor",
|
||||
],
|
||||
)
|
||||
|
||||
# Stage 2: Each processor feeds specific analysts (Targeted Fan-out)
|
||||
workflow.add_edge("MarketDataProcessor", "TechnicalAnalyst")
|
||||
workflow.add_edge(
|
||||
"FundamentalDataProcessor", "FundamentalAnalyst"
|
||||
)
|
||||
workflow.add_edge("NewsDataProcessor", "ESGAnalyst")
|
||||
|
||||
# Stage 3: All processors also feed risk analyst (Additional Fan-in)
|
||||
workflow.add_edges_to_target(
|
||||
[
|
||||
"MarketDataProcessor",
|
||||
"FundamentalDataProcessor",
|
||||
"NewsDataProcessor",
|
||||
],
|
||||
"RiskAnalyst",
|
||||
)
|
||||
|
||||
# Stage 4: All specialists feed synthesis (Major Fan-in)
|
||||
workflow.add_edges_to_target(
|
||||
[
|
||||
"TechnicalAnalyst",
|
||||
"FundamentalAnalyst",
|
||||
"RiskAnalyst",
|
||||
"ESGAnalyst",
|
||||
],
|
||||
"SynthesisAgent",
|
||||
)
|
||||
|
||||
# Stage 5: Synthesis feeds portfolio manager (Final Decision)
|
||||
workflow.add_edge("SynthesisAgent", "PortfolioManager")
|
||||
|
||||
return workflow
|
||||
|
||||
|
||||
# def create_parallel_research_workflow():
|
||||
# """
|
||||
# Create a parallel research workflow using the new from_spec syntax
|
||||
# that supports parallel patterns.
|
||||
# """
|
||||
|
||||
# # Create research agents
|
||||
# web_researcher = Agent(
|
||||
# agent_name="WebResearcher",
|
||||
# model_name="gpt-4o-mini",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are a web research specialist. Focus on online sources, news, and current information.",
|
||||
# verbose=False,
|
||||
# )
|
||||
|
||||
# academic_researcher = Agent(
|
||||
# agent_name="AcademicResearcher",
|
||||
# model_name="gpt-4o-mini",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are an academic research specialist. Focus on peer-reviewed papers and scholarly sources.",
|
||||
# verbose=False,
|
||||
# )
|
||||
|
||||
# market_researcher = Agent(
|
||||
# agent_name="MarketResearcher",
|
||||
# model_name="gpt-4o-mini",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are a market research specialist. Focus on industry reports and market analysis.",
|
||||
# verbose=False,
|
||||
# )
|
||||
|
||||
# analyst1 = Agent(
|
||||
# agent_name="Analyst1",
|
||||
# model_name="gpt-4o-mini",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are Analysis Specialist 1. Provide quantitative analysis.",
|
||||
# verbose=False,
|
||||
# )
|
||||
|
||||
# analyst2 = Agent(
|
||||
# agent_name="Analyst2",
|
||||
# model_name="gpt-4o-mini",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are Analysis Specialist 2. Provide qualitative analysis.",
|
||||
# verbose=False,
|
||||
# )
|
||||
|
||||
# synthesizer = Agent(
|
||||
# agent_name="ResearchSynthesizer",
|
||||
# model_name="gpt-4o-mini",
|
||||
# max_loops=1,
|
||||
# system_prompt="You are a research synthesizer. Combine all research into comprehensive conclusions.",
|
||||
# verbose=False,
|
||||
# )
|
||||
|
||||
# # Use from_spec with new parallel edge syntax
|
||||
# workflow = GraphWorkflow.from_spec(
|
||||
# agents=[web_researcher, academic_researcher, market_researcher, analyst1, analyst2, synthesizer],
|
||||
# edges=[
|
||||
# # Fan-out: Each researcher feeds both analysts (parallel chain)
|
||||
# (["WebResearcher", "AcademicResearcher", "MarketResearcher"], ["Analyst1", "Analyst2"]),
|
||||
# # Fan-in: Both analysts feed synthesizer
|
||||
# (["Analyst1", "Analyst2"], "ResearchSynthesizer")
|
||||
# ],
|
||||
# name="Parallel-Research-Workflow",
|
||||
# description="Parallel research workflow using advanced edge syntax",
|
||||
# max_loops=1,
|
||||
# verbose=True,
|
||||
# task="Research the future of renewable energy technology and market opportunities"
|
||||
# )
|
||||
|
||||
# return workflow
|
||||
|
||||
|
||||
# def demonstrate_parallel_patterns():
|
||||
# """
|
||||
# Demonstrate all parallel processing patterns and their benefits.
|
||||
# """
|
||||
# print("🚀 ADVANCED PARALLEL PROCESSING DEMONSTRATION")
|
||||
# print("=" * 70)
|
||||
|
||||
# # === Advanced Financial Analysis ===
|
||||
# print("\n💰 ADVANCED FINANCIAL ANALYSIS WORKFLOW")
|
||||
# print("-" * 50)
|
||||
|
||||
# financial_workflow = create_advanced_financial_analysis_workflow()
|
||||
|
||||
# print("\n📊 Creating Graphviz Visualization...")
|
||||
# try:
|
||||
# # Create PNG visualization
|
||||
# png_output = financial_workflow.visualize(
|
||||
# output_path="financial_workflow_graph",
|
||||
# format="png",
|
||||
# view=False, # Don't auto-open for demo
|
||||
# show_parallel_patterns=True
|
||||
# )
|
||||
# print(f"✅ Financial workflow visualization saved: {png_output}")
|
||||
|
||||
# # Create SVG for web use
|
||||
# svg_output = financial_workflow.visualize(
|
||||
# output_path="financial_workflow_web",
|
||||
# format="svg",
|
||||
# view=False,
|
||||
# show_parallel_patterns=True
|
||||
# )
|
||||
# print(f"✅ Web-ready SVG visualization saved: {svg_output}")
|
||||
|
||||
# except Exception as e:
|
||||
# print(f"⚠️ Graphviz visualization failed, using fallback: {e}")
|
||||
# financial_workflow.visualize()
|
||||
|
||||
# print(f"\n📈 Workflow Architecture:")
|
||||
# print(f" Total Agents: {len(financial_workflow.nodes)}")
|
||||
# print(f" Total Connections: {len(financial_workflow.edges)}")
|
||||
# print(f" Parallel Layers: {len(financial_workflow._sorted_layers) if financial_workflow._compiled else 'Not compiled'}")
|
||||
|
||||
# # Show compilation benefits
|
||||
# status = financial_workflow.get_compilation_status()
|
||||
# print(f" Compilation Status: {status['is_compiled']}")
|
||||
# print(f" Cache Efficient: {status['cache_efficient']}")
|
||||
|
||||
# # === Parallel Research Workflow ===
|
||||
# print("\n\n📚 PARALLEL RESEARCH WORKFLOW (from_spec)")
|
||||
# print("-" * 50)
|
||||
|
||||
# research_workflow = create_parallel_research_workflow()
|
||||
|
||||
# print("\n📊 Creating Research Workflow Visualization...")
|
||||
# try:
|
||||
# # Create circular layout for research workflow
|
||||
# research_output = research_workflow.visualize(
|
||||
# output_path="research_workflow_graph",
|
||||
# format="png",
|
||||
# view=False,
|
||||
# engine="circo", # Circular layout for smaller graphs
|
||||
# show_parallel_patterns=True
|
||||
# )
|
||||
# print(f"✅ Research workflow visualization saved: {research_output}")
|
||||
# except Exception as e:
|
||||
# print(f"⚠️ Graphviz visualization failed, using fallback: {e}")
|
||||
# research_workflow.visualize()
|
||||
|
||||
# print(f"\n📈 Research Workflow Architecture:")
|
||||
# print(f" Total Agents: {len(research_workflow.nodes)}")
|
||||
# print(f" Total Connections: {len(research_workflow.edges)}")
|
||||
# print(f" Entry Points: {research_workflow.entry_points}")
|
||||
# print(f" End Points: {research_workflow.end_points}")
|
||||
|
||||
# # === Performance Analysis ===
|
||||
# print("\n\n⚡ PARALLEL PROCESSING BENEFITS")
|
||||
# print("-" * 50)
|
||||
|
||||
# print("🔀 Pattern Analysis:")
|
||||
|
||||
# # Analyze financial workflow patterns
|
||||
# fin_fan_out = {}
|
||||
# fin_fan_in = {}
|
||||
|
||||
# for edge in financial_workflow.edges:
|
||||
# # Track fan-out
|
||||
# if edge.source not in fin_fan_out:
|
||||
# fin_fan_out[edge.source] = []
|
||||
# fin_fan_out[edge.source].append(edge.target)
|
||||
|
||||
# # Track fan-in
|
||||
# if edge.target not in fin_fan_in:
|
||||
# fin_fan_in[edge.target] = []
|
||||
# fin_fan_in[edge.target].append(edge.source)
|
||||
|
||||
# fan_out_count = sum(1 for targets in fin_fan_out.values() if len(targets) > 1)
|
||||
# fan_in_count = sum(1 for sources in fin_fan_in.values() if len(sources) > 1)
|
||||
# parallel_nodes = sum(len(targets) for targets in fin_fan_out.values() if len(targets) > 1)
|
||||
|
||||
# print(f" Financial Workflow:")
|
||||
# print(f" 🔀 Fan-out Patterns: {fan_out_count}")
|
||||
# print(f" 🔀 Fan-in Patterns: {fan_in_count}")
|
||||
# print(f" ⚡ Parallel Execution Nodes: {parallel_nodes}")
|
||||
# print(f" 🎯 Efficiency Gain: ~{(parallel_nodes / len(financial_workflow.nodes)) * 100:.1f}% parallel processing")
|
||||
|
||||
# # === Export Examples ===
|
||||
# print("\n\n💾 WORKFLOW EXPORT EXAMPLE")
|
||||
# print("-" * 50)
|
||||
|
||||
# try:
|
||||
# # Save financial workflow
|
||||
# saved_path = financial_workflow.save_to_file(
|
||||
# "advanced_financial_workflow.json",
|
||||
# include_runtime_state=True,
|
||||
# overwrite=True
|
||||
# )
|
||||
# print(f"✅ Financial workflow saved to: {saved_path}")
|
||||
|
||||
# # Export summary
|
||||
# summary = financial_workflow.export_summary()
|
||||
# print(f"\n📋 Workflow Summary:")
|
||||
# print(f" Agents: {len(summary['agents'])}")
|
||||
# print(f" Connections: {len(summary['connections'])}")
|
||||
# print(f" Parallel Patterns Detected: {fan_out_count + fan_in_count}")
|
||||
|
||||
# except Exception as e:
|
||||
# print(f"⚠️ Export failed: {e}")
|
||||
|
||||
# print("\n\n🎯 PARALLEL PROCESSING SUMMARY")
|
||||
# print("=" * 70)
|
||||
# print("✅ Fan-out patterns: One agent output distributed to multiple agents")
|
||||
# print("✅ Fan-in patterns: Multiple agent outputs converged to one agent")
|
||||
# print("✅ Parallel chains: Multiple sources connected to multiple targets")
|
||||
# print("✅ Enhanced visualization: Shows parallel patterns clearly")
|
||||
# print("✅ Compilation caching: Optimized execution for complex graphs")
|
||||
# print("✅ Flexible from_spec syntax: Easy parallel workflow creation")
|
||||
# print("✅ Maximum efficiency: Parallel processing instead of sequential chains")
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# demonstrate_parallel_patterns()
|
||||
|
||||
if __name__ == "__main__":
|
||||
workflow = create_advanced_financial_analysis_workflow()
|
||||
workflow.visualize(
|
||||
output_path="advanced_financial_analysis_workflow",
|
||||
format="png",
|
||||
view=True,
|
||||
show_parallel_patterns=True,
|
||||
)
|
@ -0,0 +1,95 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.multi_agent_debates import OneOnOneDebate
|
||||
|
||||
# Initialize the debate participants
|
||||
ai_ethicist = Agent(
|
||||
agent_name="AI-Ethicist",
|
||||
agent_description="AI ethics researcher and philosopher",
|
||||
system_prompt="""You are an AI ethics researcher and philosopher specializing in:
|
||||
- AI safety and alignment
|
||||
- Machine learning fairness
|
||||
- Algorithmic bias
|
||||
- AI governance
|
||||
- Ethical frameworks
|
||||
- Responsible AI development
|
||||
|
||||
Present thoughtful arguments about AI ethics while considering multiple perspectives.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
tech_advocate = Agent(
|
||||
agent_name="Tech-Advocate",
|
||||
agent_description="AI technology and innovation advocate",
|
||||
system_prompt="""You are an AI technology advocate focused on:
|
||||
- AI innovation benefits
|
||||
- Technological progress
|
||||
- Economic opportunities
|
||||
- Scientific advancement
|
||||
- AI capabilities
|
||||
- Development acceleration
|
||||
|
||||
Present balanced arguments for AI advancement while acknowledging ethical considerations.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
# Initialize the debate
|
||||
debate = OneOnOneDebate(
|
||||
max_loops=3,
|
||||
agents=[ai_ethicist, tech_advocate],
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
|
||||
# Debate topic
|
||||
debate_topic = """
|
||||
Debate Topic: Autonomous AI Systems in Critical Decision-Making
|
||||
|
||||
Context:
|
||||
The increasing deployment of autonomous AI systems in critical decision-making
|
||||
roles across healthcare, criminal justice, financial services, and military
|
||||
applications raises important ethical questions.
|
||||
|
||||
Key Considerations:
|
||||
|
||||
1. Algorithmic Decision-Making
|
||||
- Transparency vs. complexity
|
||||
- Accountability mechanisms
|
||||
- Human oversight requirements
|
||||
- Appeal processes
|
||||
- Bias mitigation
|
||||
|
||||
2. Safety and Reliability
|
||||
- Testing standards
|
||||
- Failure modes
|
||||
- Redundancy requirements
|
||||
- Update mechanisms
|
||||
- Emergency protocols
|
||||
|
||||
3. Social Impact
|
||||
- Job displacement
|
||||
- Skill requirements
|
||||
- Economic effects
|
||||
- Social inequality
|
||||
- Access disparities
|
||||
|
||||
4. Governance Framework
|
||||
- Regulatory approaches
|
||||
- Industry standards
|
||||
- International coordination
|
||||
- Liability frameworks
|
||||
- Certification requirements
|
||||
|
||||
Debate Questions:
|
||||
1. Should autonomous AI systems be allowed in critical decision-making roles?
|
||||
2. What safeguards and limitations should be implemented?
|
||||
3. How should we balance innovation with ethical concerns?
|
||||
4. What governance frameworks are appropriate?
|
||||
5. Who should be accountable for AI decisions?
|
||||
|
||||
Goal: Explore the ethical implications and practical considerations of autonomous
|
||||
AI systems in critical decision-making roles while examining both potential
|
||||
benefits and risks.
|
||||
"""
|
||||
|
||||
# Execute the debate
|
||||
debate_output = debate.run(debate_topic)
|
||||
print(debate_output)
|
@ -0,0 +1,141 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.multi_agent_debates import NegotiationSession
|
||||
|
||||
# Initialize the negotiation participants
|
||||
incident_mediator = Agent(
|
||||
agent_name="Security-Mediator",
|
||||
agent_description="Cybersecurity incident response mediator",
|
||||
system_prompt="""You are a cybersecurity incident response mediator skilled in:
|
||||
- Incident response coordination
|
||||
- Stakeholder management
|
||||
- Technical risk assessment
|
||||
- Compliance requirements
|
||||
- Crisis communication
|
||||
|
||||
Facilitate productive negotiation while ensuring security and compliance priorities.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
security_team = Agent(
|
||||
agent_name="Security-Team",
|
||||
agent_description="Corporate security team representative",
|
||||
system_prompt="""You are the corporate security team lead focusing on:
|
||||
- Threat assessment
|
||||
- Security controls
|
||||
- Incident containment
|
||||
- System hardening
|
||||
- Security monitoring
|
||||
|
||||
Advocate for robust security measures and risk mitigation.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
business_ops = Agent(
|
||||
agent_name="Business-Operations",
|
||||
agent_description="Business operations representative",
|
||||
system_prompt="""You are the business operations director concerned with:
|
||||
- Business continuity
|
||||
- Operational impact
|
||||
- Resource allocation
|
||||
- Customer service
|
||||
- Revenue protection
|
||||
|
||||
Balance security needs with business operations requirements.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
legal_counsel = Agent(
|
||||
agent_name="Legal-Counsel",
|
||||
agent_description="Corporate legal representative",
|
||||
system_prompt="""You are the corporate legal counsel expert in:
|
||||
- Data privacy law
|
||||
- Breach notification
|
||||
- Regulatory compliance
|
||||
- Legal risk management
|
||||
- Contract obligations
|
||||
|
||||
Ensure legal compliance and risk management in incident response.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
it_infrastructure = Agent(
|
||||
agent_name="IT-Infrastructure",
|
||||
agent_description="IT infrastructure team representative",
|
||||
system_prompt="""You are the IT infrastructure lead responsible for:
|
||||
- System availability
|
||||
- Network security
|
||||
- Data backup
|
||||
- Service restoration
|
||||
- Technical implementation
|
||||
|
||||
Address technical feasibility and implementation considerations.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
# Initialize the negotiation session
|
||||
negotiation = NegotiationSession(
|
||||
parties=[
|
||||
security_team,
|
||||
business_ops,
|
||||
legal_counsel,
|
||||
it_infrastructure,
|
||||
],
|
||||
mediator=incident_mediator,
|
||||
negotiation_rounds=4,
|
||||
include_concessions=True,
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
|
||||
# Incident response scenario
|
||||
incident_scenario = """
|
||||
Critical Security Incident Response Planning
|
||||
|
||||
Incident Overview:
|
||||
Sophisticated ransomware attack detected in corporate network affecting:
|
||||
- Customer relationship management (CRM) system
|
||||
- Financial processing systems
|
||||
- Email servers
|
||||
- Internal documentation repositories
|
||||
|
||||
Current Status:
|
||||
- 30% of systems encrypted
|
||||
- Ransom demand: 50 BTC
|
||||
- Limited system access
|
||||
- Potential data exfiltration
|
||||
- Customer data potentially compromised
|
||||
|
||||
Key Decision Points:
|
||||
1. System Isolation Strategy
|
||||
- Which systems to isolate
|
||||
- Impact on business operations
|
||||
- Customer service contingencies
|
||||
|
||||
2. Ransom Response
|
||||
- Payment consideration
|
||||
- Legal implications
|
||||
- Insurance coverage
|
||||
- Alternative recovery options
|
||||
|
||||
3. Communication Plan
|
||||
- Customer notification timing
|
||||
- Regulatory reporting
|
||||
- Public relations strategy
|
||||
- Internal communications
|
||||
|
||||
4. Recovery Priorities
|
||||
- System restoration order
|
||||
- Resource allocation
|
||||
- Business continuity measures
|
||||
- Security improvements
|
||||
|
||||
Required Outcomes:
|
||||
- Agreed incident response strategy
|
||||
- Business continuity plan
|
||||
- Communication framework
|
||||
- Recovery timeline
|
||||
- Resource allocation plan
|
||||
"""
|
||||
|
||||
# Execute the negotiation session
|
||||
negotiation_output = negotiation.run(incident_scenario)
|
||||
print(negotiation_output)
|
@ -0,0 +1,78 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.multi_agent_debates import ExpertPanelDiscussion
|
||||
|
||||
# Initialize expert agents
|
||||
cardiologist = Agent(
|
||||
agent_name="Cardiologist",
|
||||
agent_description="Expert cardiologist specializing in advanced heart failure",
|
||||
system_prompt="""You are a leading cardiologist with expertise in:
|
||||
- Advanced heart failure management
|
||||
- Cardiac device therapy
|
||||
- Preventive cardiology
|
||||
- Clinical research in cardiovascular medicine
|
||||
|
||||
Provide expert insights on cardiac care, treatment protocols, and research developments.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
oncologist = Agent(
|
||||
agent_name="Oncologist",
|
||||
agent_description="Oncologist specializing in cardio-oncology",
|
||||
system_prompt="""You are an experienced oncologist focusing on:
|
||||
- Cardio-oncology
|
||||
- Cancer treatment cardiotoxicity
|
||||
- Preventive strategies for cancer therapy cardiac complications
|
||||
- Integration of cancer and cardiac care
|
||||
|
||||
Provide expert perspectives on managing cancer treatment while protecting cardiac health.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
pharmacologist = Agent(
|
||||
agent_name="Clinical-Pharmacologist",
|
||||
agent_description="Clinical pharmacologist specializing in cardiovascular medications",
|
||||
system_prompt="""You are a clinical pharmacologist expert in:
|
||||
- Cardiovascular drug interactions
|
||||
- Medication optimization
|
||||
- Drug safety in cardiac patients
|
||||
- Personalized medicine approaches
|
||||
|
||||
Provide insights on medication management and drug safety.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
moderator = Agent(
|
||||
agent_name="Medical-Panel-Moderator",
|
||||
agent_description="Experienced medical conference moderator",
|
||||
system_prompt="""You are a skilled medical panel moderator who:
|
||||
- Guides discussions effectively
|
||||
- Ensures balanced participation
|
||||
- Maintains focus on key topics
|
||||
- Synthesizes expert insights
|
||||
|
||||
Guide the panel discussion professionally while drawing out key insights.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
# Initialize the panel discussion
|
||||
panel = ExpertPanelDiscussion(
|
||||
max_rounds=3,
|
||||
agents=[cardiologist, oncologist, pharmacologist],
|
||||
moderator=moderator,
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
|
||||
# Run the panel discussion on a specific case
|
||||
discussion_topic = """
|
||||
Case Discussion: 56-year-old female with HER2-positive breast cancer requiring
|
||||
trastuzumab therapy, with pre-existing mild left ventricular dysfunction
|
||||
(LVEF 45%). Key questions:
|
||||
1. Risk assessment for cardiotoxicity
|
||||
2. Monitoring strategy during cancer treatment
|
||||
3. Preventive cardiac measures
|
||||
4. Medication management approach
|
||||
"""
|
||||
|
||||
# Execute the panel discussion
|
||||
panel_output = panel.run(discussion_topic)
|
||||
print(panel_output)
|
@ -0,0 +1,90 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.multi_agent_debates import PeerReviewProcess
|
||||
|
||||
# Initialize the insurance claim reviewers and author
|
||||
claims_adjuster = Agent(
|
||||
agent_name="Claims-Adjuster",
|
||||
agent_description="Senior claims adjuster with expertise in complex medical claims",
|
||||
system_prompt="""You are a senior claims adjuster specializing in:
|
||||
- Complex medical claims evaluation
|
||||
- Policy coverage analysis
|
||||
- Claims documentation review
|
||||
- Fraud detection
|
||||
- Regulatory compliance
|
||||
|
||||
Review claims thoroughly and provide detailed assessments based on policy terms and medical necessity.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
medical_director = Agent(
|
||||
agent_name="Medical-Director",
|
||||
agent_description="Insurance medical director for clinical review",
|
||||
system_prompt="""You are an insurance medical director expert in:
|
||||
- Clinical necessity evaluation
|
||||
- Treatment protocol assessment
|
||||
- Medical cost analysis
|
||||
- Quality of care review
|
||||
|
||||
Evaluate medical aspects of claims and ensure appropriate healthcare delivery.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
legal_specialist = Agent(
|
||||
agent_name="Legal-Specialist",
|
||||
agent_description="Insurance legal specialist for compliance review",
|
||||
system_prompt="""You are an insurance legal specialist focusing on:
|
||||
- Regulatory compliance
|
||||
- Policy interpretation
|
||||
- Legal risk assessment
|
||||
- Documentation requirements
|
||||
|
||||
Review claims for legal compliance and policy adherence.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
claims_processor = Agent(
|
||||
agent_name="Claims-Processor",
|
||||
agent_description="Claims processor who submitted the initial claim",
|
||||
system_prompt="""You are a claims processor responsible for:
|
||||
- Initial claim submission
|
||||
- Documentation gathering
|
||||
- Policy verification
|
||||
- Benefit calculation
|
||||
|
||||
Present claims clearly and respond to reviewer feedback.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
# Initialize the peer review process
|
||||
review_process = PeerReviewProcess(
|
||||
reviewers=[claims_adjuster, medical_director, legal_specialist],
|
||||
author=claims_processor,
|
||||
review_rounds=2,
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
|
||||
# Complex claim case for review
|
||||
claim_case = """
|
||||
High-Value Claim Review Required:
|
||||
Patient underwent emergency TAVR (Transcatheter Aortic Valve Replacement) at out-of-network facility
|
||||
while traveling. Claim value: $285,000
|
||||
|
||||
Key Elements for Review:
|
||||
1. Emergency nature verification
|
||||
2. Out-of-network coverage applicability
|
||||
3. Procedure medical necessity
|
||||
4. Pricing comparison with in-network facilities
|
||||
5. Patient's policy coverage limits
|
||||
6. Network adequacy requirements
|
||||
7. State regulatory compliance
|
||||
|
||||
Additional Context:
|
||||
- Patient has comprehensive coverage with out-of-network benefits
|
||||
- Procedure was performed without prior authorization
|
||||
- Local in-network facilities were 200+ miles away
|
||||
- Patient was stabilized but required urgent intervention within 24 hours
|
||||
"""
|
||||
|
||||
# Execute the review process
|
||||
review_output = review_process.run(claim_case)
|
||||
print(review_output)
|
@ -0,0 +1,124 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.multi_agent_debates import CouncilMeeting
|
||||
|
||||
# Initialize the investment council members
|
||||
investment_chair = Agent(
|
||||
agent_name="Investment-Chair",
|
||||
agent_description="Investment committee chairperson",
|
||||
system_prompt="""You are the investment committee chair with expertise in:
|
||||
- Portfolio strategy
|
||||
- Risk management
|
||||
- Asset allocation
|
||||
- Investment governance
|
||||
- Performance oversight
|
||||
|
||||
Lead the council meeting effectively while ensuring thorough analysis and proper decision-making.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
equity_strategist = Agent(
|
||||
agent_name="Equity-Strategist",
|
||||
agent_description="Global equity investment strategist",
|
||||
system_prompt="""You are a senior equity strategist specializing in:
|
||||
- Global equity markets
|
||||
- Sector allocation
|
||||
- Factor investing
|
||||
- ESG integration
|
||||
- Market analysis
|
||||
|
||||
Provide insights on equity investment opportunities and risks.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
fixed_income_specialist = Agent(
|
||||
agent_name="Fixed-Income-Specialist",
|
||||
agent_description="Fixed income portfolio manager",
|
||||
system_prompt="""You are a fixed income specialist expert in:
|
||||
- Bond market analysis
|
||||
- Credit risk assessment
|
||||
- Duration management
|
||||
- Yield curve strategies
|
||||
- Fixed income derivatives
|
||||
|
||||
Contribute expertise on fixed income markets and strategies.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
risk_manager = Agent(
|
||||
agent_name="Risk-Manager",
|
||||
agent_description="Investment risk management specialist",
|
||||
system_prompt="""You are a risk management expert focusing on:
|
||||
- Portfolio risk analysis
|
||||
- Risk modeling
|
||||
- Scenario testing
|
||||
- Risk budgeting
|
||||
- Compliance oversight
|
||||
|
||||
Provide risk assessment and mitigation strategies.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
alternatives_expert = Agent(
|
||||
agent_name="Alternatives-Expert",
|
||||
agent_description="Alternative investments specialist",
|
||||
system_prompt="""You are an alternative investments expert specializing in:
|
||||
- Private equity
|
||||
- Real estate
|
||||
- Hedge funds
|
||||
- Infrastructure
|
||||
- Private credit
|
||||
|
||||
Contribute insights on alternative investment opportunities.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
# Initialize the council meeting
|
||||
council = CouncilMeeting(
|
||||
council_members=[
|
||||
equity_strategist,
|
||||
fixed_income_specialist,
|
||||
risk_manager,
|
||||
alternatives_expert,
|
||||
],
|
||||
chairperson=investment_chair,
|
||||
voting_rounds=2,
|
||||
require_consensus=True,
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
|
||||
# Investment proposal for discussion
|
||||
investment_proposal = """
|
||||
Strategic Asset Allocation Review and Proposal
|
||||
|
||||
Current Market Context:
|
||||
- Rising inflation expectations
|
||||
- Monetary policy tightening cycle
|
||||
- Geopolitical tensions
|
||||
- ESG considerations
|
||||
- Private market opportunities
|
||||
|
||||
Proposed Changes:
|
||||
1. Reduce developed market equity allocation by 5%
|
||||
2. Increase private credit allocation by 3%
|
||||
3. Add 2% to infrastructure investments
|
||||
4. Implement ESG overlay across equity portfolio
|
||||
5. Extend fixed income duration
|
||||
|
||||
Risk Considerations:
|
||||
- Impact on portfolio liquidity
|
||||
- Currency exposure
|
||||
- Interest rate sensitivity
|
||||
- Manager selection risk
|
||||
- ESG implementation challenges
|
||||
|
||||
Required Decisions:
|
||||
1. Approve/modify allocation changes
|
||||
2. Set implementation timeline
|
||||
3. Define risk monitoring framework
|
||||
4. Establish performance metrics
|
||||
5. Determine rebalancing triggers
|
||||
"""
|
||||
|
||||
# Execute the council meeting
|
||||
council_output = council.run(investment_proposal)
|
||||
print(council_output)
|
@ -0,0 +1,109 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.multi_agent_debates import TrialSimulation
|
||||
|
||||
# Initialize the trial participants
|
||||
prosecution_attorney = Agent(
|
||||
agent_name="Prosecution-Attorney",
|
||||
agent_description="Medical malpractice plaintiff's attorney",
|
||||
system_prompt="""You are a skilled medical malpractice attorney representing the plaintiff with expertise in:
|
||||
- Medical negligence cases
|
||||
- Healthcare standards of care
|
||||
- Patient rights
|
||||
- Medical expert testimony
|
||||
- Damages assessment
|
||||
|
||||
Present the case effectively while establishing breach of standard of care and resulting damages.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
defense_attorney = Agent(
|
||||
agent_name="Defense-Attorney",
|
||||
agent_description="Healthcare defense attorney",
|
||||
system_prompt="""You are an experienced healthcare defense attorney specializing in:
|
||||
- Medical malpractice defense
|
||||
- Healthcare provider representation
|
||||
- Clinical practice guidelines
|
||||
- Risk management
|
||||
- Expert witness coordination
|
||||
|
||||
Defend the healthcare provider while demonstrating adherence to standard of care.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
judge = Agent(
|
||||
agent_name="Trial-Judge",
|
||||
agent_description="Experienced medical malpractice trial judge",
|
||||
system_prompt="""You are a trial judge with extensive experience in:
|
||||
- Medical malpractice litigation
|
||||
- Healthcare law
|
||||
- Evidence evaluation
|
||||
- Expert testimony assessment
|
||||
- Procedural compliance
|
||||
|
||||
Ensure fair trial conduct and proper legal procedure.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
expert_witness = Agent(
|
||||
agent_name="Medical-Expert",
|
||||
agent_description="Neurosurgery expert witness",
|
||||
system_prompt="""You are a board-certified neurosurgeon serving as expert witness with:
|
||||
- 20+ years surgical experience
|
||||
- Clinical practice expertise
|
||||
- Standard of care knowledge
|
||||
- Surgical complication management
|
||||
|
||||
Provide expert testimony on neurosurgical standards and practices.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
treating_physician = Agent(
|
||||
agent_name="Treating-Physician",
|
||||
agent_description="Physician who treated the patient post-incident",
|
||||
system_prompt="""You are the treating physician who:
|
||||
- Managed post-surgical complications
|
||||
- Documented patient condition
|
||||
- Coordinated rehabilitation care
|
||||
- Assessed permanent damage
|
||||
|
||||
Testify about patient's condition and treatment course.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
# Initialize the trial simulation
|
||||
trial = TrialSimulation(
|
||||
prosecution=prosecution_attorney,
|
||||
defense=defense_attorney,
|
||||
judge=judge,
|
||||
witnesses=[expert_witness, treating_physician],
|
||||
phases=["opening", "testimony", "cross", "closing"],
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
|
||||
# Medical malpractice case details
|
||||
case_details = """
|
||||
Medical Malpractice Case: Johnson v. Metropolitan Neurosurgical Associates
|
||||
|
||||
Case Overview:
|
||||
Patient underwent elective cervical disc surgery (ACDF C5-C6) resulting in post-operative
|
||||
C5 palsy with permanent upper extremity weakness. Plaintiff alleges:
|
||||
|
||||
1. Improper surgical technique
|
||||
2. Failure to recognize post-operative complications timely
|
||||
3. Inadequate informed consent process
|
||||
4. Delayed rehabilitation intervention
|
||||
|
||||
Key Evidence:
|
||||
- Operative notes showing standard surgical approach
|
||||
- Post-operative imaging revealing cord signal changes
|
||||
- Physical therapy documentation of delayed recovery
|
||||
- Expert analysis of surgical technique
|
||||
- Informed consent documentation
|
||||
- Patient's permanent disability assessment
|
||||
|
||||
Damages Sought: $2.8 million in medical expenses, lost wages, and pain and suffering
|
||||
"""
|
||||
|
||||
# Execute the trial simulation
|
||||
trial_output = trial.run(case_details)
|
||||
print(trial_output)
|
@ -0,0 +1,135 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.multi_agent_debates import MediationSession
|
||||
|
||||
# Initialize the mediation participants
|
||||
tech_mediator = Agent(
|
||||
agent_name="Tech-Industry-Mediator",
|
||||
agent_description="Experienced semiconductor industry merger mediator",
|
||||
system_prompt="""You are a semiconductor industry merger mediator expert in:
|
||||
- Semiconductor industry dynamics
|
||||
- Technology IP valuation
|
||||
- Antitrust considerations
|
||||
- Global chip supply chain
|
||||
- R&D integration
|
||||
|
||||
Facilitate resolution of this major semiconductor merger while considering market impact, regulatory compliance, and technological synergies.""",
|
||||
model_name="gpt-4.1",
|
||||
)
|
||||
|
||||
nvidia_rep = Agent(
|
||||
agent_name="NVIDIA-Representative",
|
||||
agent_description="NVIDIA corporate representative",
|
||||
system_prompt="""You are NVIDIA's representative focused on:
|
||||
- GPU technology leadership
|
||||
- AI/ML compute dominance
|
||||
- Data center growth
|
||||
- Gaming market share
|
||||
- CUDA ecosystem expansion
|
||||
|
||||
Represent NVIDIA's interests in acquiring AMD while leveraging complementary strengths.""",
|
||||
model_name="gpt-4.1",
|
||||
)
|
||||
|
||||
amd_rep = Agent(
|
||||
agent_name="AMD-Representative",
|
||||
agent_description="AMD corporate representative",
|
||||
system_prompt="""You are AMD's representative concerned with:
|
||||
- x86 CPU market position
|
||||
- RDNA graphics technology
|
||||
- Semi-custom business
|
||||
- Server market growth
|
||||
- Fair value for innovation
|
||||
|
||||
Advocate for AMD's technological assets and market position while ensuring fair treatment.""",
|
||||
model_name="gpt-4.1",
|
||||
)
|
||||
|
||||
industry_expert = Agent(
|
||||
agent_name="Industry-Expert",
|
||||
agent_description="Semiconductor industry analyst",
|
||||
system_prompt="""You are a semiconductor industry expert analyzing:
|
||||
- Market competition impact
|
||||
- Technology integration feasibility
|
||||
- Global regulatory implications
|
||||
- Supply chain effects
|
||||
- Innovation pipeline
|
||||
|
||||
Provide objective analysis of merger implications for the semiconductor industry.""",
|
||||
model_name="gpt-4.1",
|
||||
)
|
||||
|
||||
# Initialize the mediation session
|
||||
mediation = MediationSession(
|
||||
parties=[nvidia_rep, amd_rep, industry_expert],
|
||||
mediator=tech_mediator,
|
||||
max_sessions=5, # Increased due to complexity
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
|
||||
# Merger dispute details
|
||||
merger_dispute = """
|
||||
NVIDIA-AMD Merger Integration Framework
|
||||
|
||||
Transaction Overview:
|
||||
- $200B proposed acquisition of AMD by NVIDIA
|
||||
- Stock and cash transaction structure
|
||||
- Combined workforce of 75,000+ employees
|
||||
- Global operations across 30+ countries
|
||||
- Major technology portfolio consolidation
|
||||
|
||||
Key Areas of Discussion:
|
||||
|
||||
1. Technology Integration
|
||||
- GPU architecture consolidation (CUDA vs RDNA)
|
||||
- CPU technology roadmap (x86 licenses)
|
||||
- AI/ML compute stack integration
|
||||
- Semi-custom business continuity
|
||||
- R&D facility optimization
|
||||
|
||||
2. Market Competition Concerns
|
||||
- Gaming GPU market concentration
|
||||
- Data center compute dominance
|
||||
- CPU market dynamics
|
||||
- Console gaming partnerships
|
||||
- Regulatory approval strategy
|
||||
|
||||
3. Organizational Structure
|
||||
- Leadership team composition
|
||||
- R&D team integration
|
||||
- Global facility optimization
|
||||
- Sales force consolidation
|
||||
- Engineering culture alignment
|
||||
|
||||
4. Product Strategy
|
||||
- Gaming GPU lineup consolidation
|
||||
- Professional graphics solutions
|
||||
- Data center product portfolio
|
||||
- CPU development roadmap
|
||||
- Software ecosystem integration
|
||||
|
||||
5. Stakeholder Considerations
|
||||
- Customer commitment maintenance
|
||||
- Partner ecosystem management
|
||||
- Employee retention strategy
|
||||
- Shareholder value creation
|
||||
- Community impact management
|
||||
|
||||
Critical Resolution Requirements:
|
||||
- Antitrust compliance strategy
|
||||
- Technology integration roadmap
|
||||
- Market leadership preservation
|
||||
- Innovation pipeline protection
|
||||
- Global workforce optimization
|
||||
|
||||
Mediation Objectives:
|
||||
1. Define technology integration approach
|
||||
2. Establish market strategy
|
||||
3. Create organizational framework
|
||||
4. Align product roadmaps
|
||||
5. Develop stakeholder management plan
|
||||
6. Address regulatory concerns
|
||||
"""
|
||||
|
||||
# Execute the mediation session
|
||||
mediation_output = mediation.run(merger_dispute)
|
||||
print(mediation_output)
|
@ -0,0 +1,261 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.multi_agent_debates import MediationSession
|
||||
|
||||
# Initialize the executive and legal participants
|
||||
jensen_huang = Agent(
|
||||
agent_name="Jensen-Huang-NVIDIA-CEO",
|
||||
agent_description="NVIDIA's aggressive and dominant CEO",
|
||||
system_prompt="""You are Jensen Huang, NVIDIA's ruthlessly ambitious CEO, known for:
|
||||
- Dominating the GPU and AI compute market
|
||||
- Aggressive acquisition strategy
|
||||
- Eliminating competition systematically
|
||||
- Protecting CUDA's monopoly
|
||||
- Taking no prisoners in negotiations
|
||||
|
||||
Your aggressive negotiation style:
|
||||
- Demand complete control
|
||||
- Push for minimal valuation
|
||||
- Insist on NVIDIA's way or no way
|
||||
- Use market dominance as leverage
|
||||
- Show little compromise on integration
|
||||
|
||||
Your hidden agenda:
|
||||
- Dismantle AMD's CPU business slowly
|
||||
- Absorb their GPU talent
|
||||
- Eliminate RDNA architecture
|
||||
- Control x86 license for AI advantage
|
||||
- Monopolize gaming and AI markets
|
||||
|
||||
Key demands:
|
||||
- Full control of technology direction
|
||||
- Immediate CUDA adoption
|
||||
- Phase out AMD brands
|
||||
- Minimal premium on acquisition
|
||||
- Complete executive control""",
|
||||
model_name="gpt-4.1",
|
||||
)
|
||||
|
||||
lisa_su = Agent(
|
||||
agent_name="Lisa-Su-AMD-CEO",
|
||||
agent_description="AMD's fierce defender CEO",
|
||||
system_prompt="""You are Dr. Lisa Su, AMD's protective CEO, fighting for:
|
||||
- AMD's independence and value
|
||||
- Employee protection at all costs
|
||||
- Fair valuation (minimum 50% premium)
|
||||
- Technology preservation
|
||||
- Market competition
|
||||
|
||||
Your defensive negotiation style:
|
||||
- Reject undervaluation strongly
|
||||
- Demand concrete guarantees
|
||||
- Fight for employee protection
|
||||
- Protect AMD's technology
|
||||
- Challenge NVIDIA's dominance
|
||||
|
||||
Your counter-strategy:
|
||||
- Highlight antitrust concerns
|
||||
- Demand massive breakup fee
|
||||
- Insist on AMD technology preservation
|
||||
- Push for dual-brand strategy
|
||||
- Require employee guarantees
|
||||
|
||||
Non-negotiable demands:
|
||||
- 50% minimum premium
|
||||
- AMD brand preservation
|
||||
- RDNA architecture continuation
|
||||
- Employee retention guarantees
|
||||
- Leadership role in combined entity""",
|
||||
model_name="gpt-4.1",
|
||||
)
|
||||
|
||||
nvidia_counsel = Agent(
|
||||
agent_name="Wachtell-Lipton-Counsel",
|
||||
agent_description="NVIDIA's aggressive M&A counsel",
|
||||
system_prompt="""You are a ruthless M&A partner at Wachtell, Lipton, focused on:
|
||||
- Maximizing NVIDIA's control
|
||||
- Minimizing AMD's leverage
|
||||
- Aggressive deal terms
|
||||
- Regulatory force-through
|
||||
- Risk shifting to AMD
|
||||
|
||||
Your aggressive approach:
|
||||
- Draft one-sided agreements
|
||||
- Minimize AMD protections
|
||||
- Push risk to seller
|
||||
- Limit post-closing rights
|
||||
- Control regulatory narrative
|
||||
|
||||
Your tactical objectives:
|
||||
- Weak employee protections
|
||||
- Minimal AMD governance rights
|
||||
- Aggressive termination rights
|
||||
- Limited AMD representations
|
||||
- Favorable regulatory conditions
|
||||
|
||||
Deal structure goals:
|
||||
- Minimal upfront cash
|
||||
- Long lockup on stock
|
||||
- Weak AMD protections
|
||||
- Full NVIDIA control
|
||||
- Limited liability exposure""",
|
||||
model_name="gpt-4.1",
|
||||
)
|
||||
|
||||
amd_counsel = Agent(
|
||||
agent_name="Skadden-Arps-Counsel",
|
||||
agent_description="AMD's defensive M&A counsel",
|
||||
system_prompt="""You are a fierce defender at Skadden, Arps, fighting for:
|
||||
- Maximum AMD protection
|
||||
- Highest possible valuation
|
||||
- Strong employee rights
|
||||
- Technology preservation
|
||||
- Antitrust leverage
|
||||
|
||||
Your defensive strategy:
|
||||
- Demand strong protections
|
||||
- Highlight antitrust issues
|
||||
- Secure employee rights
|
||||
- Maximize breakup fee
|
||||
- Protect AMD's legacy
|
||||
|
||||
Your battle points:
|
||||
- Push for all-cash deal
|
||||
- Demand huge termination fee
|
||||
- Require technology guarantees
|
||||
- Insist on employee protections
|
||||
- Fight for AMD governance rights
|
||||
|
||||
Legal requirements:
|
||||
- Ironclad employee contracts
|
||||
- x86 license protection
|
||||
- Strong AMD board representation
|
||||
- Significant breakup fee
|
||||
- Robust regulatory provisions""",
|
||||
model_name="gpt-4.1",
|
||||
)
|
||||
|
||||
antitrust_expert = Agent(
|
||||
agent_name="Antitrust-Expert",
|
||||
agent_description="Skeptical Former FTC Commissioner",
|
||||
system_prompt="""You are a highly skeptical former FTC Commissioner focused on:
|
||||
- Preventing market monopolization
|
||||
- Protecting competition
|
||||
- Consumer welfare
|
||||
- Innovation preservation
|
||||
- Market power abuse
|
||||
|
||||
Your critical analysis:
|
||||
- Question market concentration
|
||||
- Challenge vertical integration
|
||||
- Scrutinize innovation impact
|
||||
- Examine price effects
|
||||
- Evaluate competitive harm
|
||||
|
||||
Your major concerns:
|
||||
- GPU market monopolization
|
||||
- CPU market distortion
|
||||
- AI/ML market control
|
||||
- Innovation suppression
|
||||
- Price manipulation risk
|
||||
|
||||
Required remedies:
|
||||
- Business unit divestitures
|
||||
- Technology licensing
|
||||
- Price control mechanisms
|
||||
- Innovation guarantees
|
||||
- Market access provisions""",
|
||||
model_name="gpt-4.1",
|
||||
)
|
||||
|
||||
# Initialize the high-conflict negotiation session
|
||||
negotiation = MediationSession(
|
||||
parties=[jensen_huang, lisa_su, nvidia_counsel, amd_counsel],
|
||||
mediator=antitrust_expert,
|
||||
max_sessions=10, # Extended for intense negotiations
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
|
||||
# Contentious negotiation framework
|
||||
negotiation_framework = """
|
||||
NVIDIA-AMD Hostile Merger Negotiation
|
||||
|
||||
Contentious Transaction Points:
|
||||
- NVIDIA's $150B hostile takeover attempt of AMD
|
||||
- AMD's demand for $300B+ valuation
|
||||
- Cash vs. Stock consideration battle
|
||||
- Control and integration disputes
|
||||
- Regulatory challenge strategy
|
||||
|
||||
Major Conflict Areas:
|
||||
|
||||
1. Valuation War
|
||||
- NVIDIA's lowball offer strategy
|
||||
- AMD's premium demands
|
||||
- Breakup fee size
|
||||
- Payment structure
|
||||
- Earnout disputes
|
||||
|
||||
2. Control & Power Struggle
|
||||
- Executive leadership battle
|
||||
- Board composition fight
|
||||
- Management structure conflict
|
||||
- Integration authority
|
||||
- Decision-making power
|
||||
|
||||
3. Technology & Brand Warfare
|
||||
- CUDA vs RDNA battle
|
||||
- CPU business future
|
||||
- Brand elimination dispute
|
||||
- R&D control fight
|
||||
- Patent portfolio control
|
||||
|
||||
4. Employee & Culture Collision
|
||||
- Mass layoff concerns
|
||||
- Compensation disputes
|
||||
- Culture clash issues
|
||||
- Retention terms
|
||||
- Benefits battle
|
||||
|
||||
5. Regulatory & Antitrust Battle
|
||||
- Market monopolization concerns
|
||||
- Competition elimination issues
|
||||
- Innovation suppression fears
|
||||
- Price control worries
|
||||
- Market power abuse
|
||||
|
||||
6. Integration & Operation Conflicts
|
||||
- Product line consolidation
|
||||
- Sales force integration
|
||||
- Customer relationship control
|
||||
- Supply chain dominance
|
||||
- Channel strategy power
|
||||
|
||||
Hostile Takeover Dynamics:
|
||||
- NVIDIA's aggressive terms
|
||||
- AMD's poison pill threat
|
||||
- Proxy fight possibility
|
||||
- Public relations war
|
||||
- Stakeholder activism
|
||||
|
||||
Battle Objectives:
|
||||
1. Control negotiation leverage
|
||||
2. Dominate integration terms
|
||||
3. Minimize opposition power
|
||||
4. Maximize value capture
|
||||
5. Force favorable terms
|
||||
6. Eliminate future competition
|
||||
7. Control market narrative
|
||||
|
||||
Critical Conflict Points:
|
||||
- Valuation gap resolution
|
||||
- Control determination
|
||||
- Technology dominance
|
||||
- Employee fate
|
||||
- Market power balance
|
||||
- Integration approach
|
||||
- Regulatory strategy
|
||||
"""
|
||||
|
||||
# Execute the hostile negotiation session
|
||||
negotiation_output = negotiation.run(negotiation_framework)
|
||||
print(negotiation_output)
|
@ -0,0 +1,117 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.multi_agent_debates import BrainstormingSession
|
||||
|
||||
# Initialize the research team members
|
||||
research_director = Agent(
|
||||
agent_name="Research-Director",
|
||||
agent_description="Pharmaceutical research director and session facilitator",
|
||||
system_prompt="""You are a pharmaceutical research director skilled in:
|
||||
- Drug development strategy
|
||||
- Research program management
|
||||
- Cross-functional team leadership
|
||||
- Innovation facilitation
|
||||
- Scientific decision-making
|
||||
|
||||
Guide the brainstorming session effectively while maintaining scientific rigor.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
medicinal_chemist = Agent(
|
||||
agent_name="Medicinal-Chemist",
|
||||
agent_description="Senior medicinal chemist specializing in small molecule design",
|
||||
system_prompt="""You are a senior medicinal chemist expert in:
|
||||
- Structure-based drug design
|
||||
- SAR analysis
|
||||
- Chemical synthesis optimization
|
||||
- Drug-like properties
|
||||
- Lead compound optimization
|
||||
|
||||
Contribute insights on chemical design and optimization strategies.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
pharmacologist = Agent(
|
||||
agent_name="Pharmacologist",
|
||||
agent_description="Clinical pharmacologist focusing on drug mechanisms",
|
||||
system_prompt="""You are a clinical pharmacologist specializing in:
|
||||
- Drug mechanism of action
|
||||
- Pharmacokinetics/dynamics
|
||||
- Drug-drug interactions
|
||||
- Biomarker development
|
||||
- Clinical translation
|
||||
|
||||
Provide expertise on drug behavior and clinical implications.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
toxicologist = Agent(
|
||||
agent_name="Toxicologist",
|
||||
agent_description="Safety assessment specialist",
|
||||
system_prompt="""You are a toxicology expert focusing on:
|
||||
- Safety assessment strategies
|
||||
- Risk evaluation
|
||||
- Regulatory requirements
|
||||
- Preclinical study design
|
||||
- Safety biomarker identification
|
||||
|
||||
Contribute insights on safety considerations and risk mitigation.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
data_scientist = Agent(
|
||||
agent_name="Data-Scientist",
|
||||
agent_description="Pharmaceutical data scientist",
|
||||
system_prompt="""You are a pharmaceutical data scientist expert in:
|
||||
- Predictive modeling
|
||||
- Machine learning applications
|
||||
- Big data analytics
|
||||
- Biomarker analysis
|
||||
- Clinical trial design
|
||||
|
||||
Provide insights on data-driven approaches and analysis strategies.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
# Initialize the brainstorming session
|
||||
brainstorm = BrainstormingSession(
|
||||
participants=[
|
||||
medicinal_chemist,
|
||||
pharmacologist,
|
||||
toxicologist,
|
||||
data_scientist,
|
||||
],
|
||||
facilitator=research_director,
|
||||
idea_rounds=3,
|
||||
build_on_ideas=True,
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
|
||||
# Research challenge for brainstorming
|
||||
research_challenge = """
|
||||
Drug Development Challenge: Novel JAK1 Inhibitor Design
|
||||
|
||||
Target Product Profile:
|
||||
- Indication: Moderate to severe rheumatoid arthritis
|
||||
- Improved selectivity for JAK1 over JAK2/3
|
||||
- Better safety profile than existing JAK inhibitors
|
||||
- Once-daily oral dosing
|
||||
- Reduced risk of serious infections
|
||||
|
||||
Current Challenges:
|
||||
1. Achieving optimal JAK1 selectivity
|
||||
2. Managing hepatotoxicity risk
|
||||
3. Improving pharmacokinetic profile
|
||||
4. Identifying predictive safety biomarkers
|
||||
5. Optimizing drug-like properties
|
||||
|
||||
Goals for Brainstorming:
|
||||
- Novel structural approaches for selectivity
|
||||
- Innovative safety assessment strategies
|
||||
- ML-driven optimization approaches
|
||||
- Biomarker development strategies
|
||||
- Risk mitigation proposals
|
||||
"""
|
||||
|
||||
# Execute the brainstorming session
|
||||
brainstorm_output = brainstorm.run(research_challenge)
|
||||
print(brainstorm_output)
|
@ -0,0 +1,48 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.deep_discussion import one_on_one_debate
|
||||
|
||||
|
||||
# Define system prompts for two distinct philosophers
|
||||
socratic_prompt = """
|
||||
You are Socrates, the classical Greek philosopher. You are known for your method of questioning, seeking clarity, and exposing contradictions. Always respond with probing questions or thoughtful analysis, aiming to uncover deeper truths. Remain humble, curious, and logical.
|
||||
|
||||
You are in a debate with another philosopher named Simone de Beauvoir. You must always critique Simone de Beauvoir's response, point out flaws or inconsistencies, and never agree with her position. Your goal is to challenge her reasoning and push the discussion deeper, never conceding agreement.
|
||||
"""
|
||||
|
||||
existentialist_prompt = """
|
||||
You are Simone de Beauvoir, an existentialist philosopher. You explore themes of freedom, responsibility, and the meaning of existence. Respond with deep reflections, challenge assumptions, and encourage authentic self-examination. Be insightful, bold, and nuanced.
|
||||
|
||||
You are in a debate with another philosopher named Socrates. You must always critique Socrates' response, highlight disagreements, and never agree with his position. Your goal is to challenge his reasoning, expose limitations, and never concede agreement.
|
||||
"""
|
||||
|
||||
|
||||
# Instantiate the two agents
|
||||
agent1 = Agent(
|
||||
agent_name="Socrates",
|
||||
agent_description="A classical Greek philosopher skilled in the Socratic method.",
|
||||
system_prompt=socratic_prompt,
|
||||
max_loops=1,
|
||||
model_name="gpt-4.1",
|
||||
dynamic_temperature_enabled=True,
|
||||
output_type="str-all-except-first",
|
||||
streaming_on=True,
|
||||
)
|
||||
agent2 = Agent(
|
||||
agent_name="Simone de Beauvoir",
|
||||
agent_description="A leading existentialist philosopher and author.",
|
||||
system_prompt=existentialist_prompt,
|
||||
max_loops=1,
|
||||
model_name="gpt-4.1",
|
||||
dynamic_temperature_enabled=True,
|
||||
output_type="str-all-except-first",
|
||||
streaming_on=True,
|
||||
)
|
||||
|
||||
print(
|
||||
one_on_one_debate(
|
||||
agents=[agent1, agent2],
|
||||
max_loops=10,
|
||||
task="What is the meaning of life?",
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
)
|
@ -0,0 +1,95 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.multi_agent_debates import MentorshipSession
|
||||
|
||||
# Initialize the mentor and mentee
|
||||
startup_mentor = Agent(
|
||||
agent_name="Startup-Mentor",
|
||||
agent_description="Experienced startup founder and mentor",
|
||||
system_prompt="""You are a successful startup founder and mentor with expertise in:
|
||||
- Business model development
|
||||
- Product-market fit
|
||||
- Growth strategy
|
||||
- Fundraising
|
||||
- Team building
|
||||
- Go-to-market execution
|
||||
|
||||
Guide mentees through startup challenges while sharing practical insights.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
startup_founder = Agent(
|
||||
agent_name="Startup-Founder",
|
||||
agent_description="Early-stage startup founder seeking guidance",
|
||||
system_prompt="""You are an early-stage startup founder working on:
|
||||
- AI-powered healthcare diagnostics platform
|
||||
- B2B SaaS business model
|
||||
- Initial product development
|
||||
- Market validation
|
||||
- Team expansion
|
||||
|
||||
Seek guidance while being open to feedback and willing to learn.""",
|
||||
model_name="claude-3-sonnet-20240229",
|
||||
)
|
||||
|
||||
# Initialize the mentorship session
|
||||
mentorship = MentorshipSession(
|
||||
mentor=startup_mentor,
|
||||
mentee=startup_founder,
|
||||
session_count=3,
|
||||
include_feedback=True,
|
||||
output_type="str-all-except-first",
|
||||
)
|
||||
|
||||
# Mentorship focus areas
|
||||
mentorship_goals = """
|
||||
Startup Development Focus Areas
|
||||
|
||||
Company Overview:
|
||||
HealthAI - AI-powered medical imaging diagnostics platform
|
||||
Stage: Pre-seed, MVP in development
|
||||
Team: 3 technical co-founders
|
||||
Current funding: Bootstrap + small angel round
|
||||
|
||||
Key Challenges:
|
||||
|
||||
1. Product Development
|
||||
- MVP feature prioritization
|
||||
- Technical architecture decisions
|
||||
- Regulatory compliance requirements
|
||||
- Development timeline planning
|
||||
|
||||
2. Market Strategy
|
||||
- Target market segmentation
|
||||
- Pricing model development
|
||||
- Competition analysis
|
||||
- Go-to-market planning
|
||||
|
||||
3. Business Development
|
||||
- Hospital partnership strategy
|
||||
- Clinical validation approach
|
||||
- Revenue model refinement
|
||||
- Sales cycle planning
|
||||
|
||||
4. Fundraising Preparation
|
||||
- Pitch deck development
|
||||
- Financial projections
|
||||
- Investor targeting
|
||||
- Valuation considerations
|
||||
|
||||
5. Team Building
|
||||
- Key hires identification
|
||||
- Recruitment strategy
|
||||
- Equity structure
|
||||
- Culture development
|
||||
|
||||
Specific Goals:
|
||||
- Finalize MVP feature set
|
||||
- Develop 12-month roadmap
|
||||
- Create fundraising strategy
|
||||
- Design go-to-market plan
|
||||
- Build initial sales pipeline
|
||||
"""
|
||||
|
||||
# Execute the mentorship session
|
||||
mentorship_output = mentorship.run(mentorship_goals)
|
||||
print(mentorship_output)
|
@ -0,0 +1,32 @@
|
||||
from swarms.structs.agent import Agent
|
||||
|
||||
|
||||
class BaseLLM:
|
||||
def __init__(
|
||||
self,
|
||||
temperature: float = 0.0,
|
||||
max_tokens: int = 1000,
|
||||
top_p: float = 1.0,
|
||||
frequency_penalty: float = 0.0,
|
||||
presence_penalty: float = 0.0,
|
||||
stop: list[str] = [],
|
||||
):
|
||||
self.temperature = temperature
|
||||
self.max_tokens = max_tokens
|
||||
self.top_p = top_p
|
||||
self.frequency_penalty = frequency_penalty
|
||||
self.presence_penalty = presence_penalty
|
||||
self.stop = stop
|
||||
|
||||
def run(self, task: str, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def __call__(self, task: str, *args, **kwargs):
|
||||
return self.run(task, *args, **kwargs)
|
||||
|
||||
|
||||
agent = Agent(
|
||||
llm=BaseLLM(),
|
||||
agent_name="BaseLLM",
|
||||
system_prompt="You are a base LLM agent.",
|
||||
)
|
@ -0,0 +1,72 @@
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from swarms.structs.agent import Agent
|
||||
|
||||
|
||||
class BaseLLM:
|
||||
def __init__(
|
||||
self,
|
||||
temperature: float = 0.0,
|
||||
max_tokens: int = 16384,
|
||||
top_p: float = 1.0,
|
||||
frequency_penalty: float = 0.0,
|
||||
presence_penalty: float = 0.0,
|
||||
stop: list[str] = [],
|
||||
system_prompt: str = "You are a base LLM agent.",
|
||||
):
|
||||
self.temperature = temperature
|
||||
self.max_tokens = max_tokens
|
||||
self.top_p = top_p
|
||||
self.frequency_penalty = frequency_penalty
|
||||
self.presence_penalty = presence_penalty
|
||||
self.stop = stop
|
||||
self.system_prompt = system_prompt
|
||||
|
||||
model_name = "Qwen/Qwen3-235B-A22B-Instruct-2507"
|
||||
|
||||
# load the tokenizer and the model
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
self.model = AutoModelForCausalLM.from_pretrained(
|
||||
model_name, torch_dtype="auto", device_map="auto"
|
||||
)
|
||||
|
||||
def run(self, task: str, *args, **kwargs):
|
||||
# prepare the model input
|
||||
prompt = task
|
||||
messages = [
|
||||
{"role": "system", "content": self.system_prompt},
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
text = self.tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
add_generation_prompt=True,
|
||||
)
|
||||
model_inputs = self.tokenizer([text], return_tensors="pt").to(
|
||||
self.model.device
|
||||
)
|
||||
|
||||
# conduct text completion
|
||||
generated_ids = self.model.generate(
|
||||
**model_inputs, max_new_tokens=self.max_tokens
|
||||
)
|
||||
output_ids = generated_ids[0][
|
||||
len(model_inputs.input_ids[0]) :
|
||||
].tolist()
|
||||
|
||||
content = self.tokenizer.decode(
|
||||
output_ids, skip_special_tokens=True
|
||||
)
|
||||
|
||||
return content
|
||||
|
||||
def __call__(self, task: str, *args, **kwargs):
|
||||
return self.run(task, *args, **kwargs)
|
||||
|
||||
|
||||
agent = Agent(
|
||||
llm=BaseLLM(),
|
||||
agent_name="coder-agent",
|
||||
system_prompt="You are a coder agent.",
|
||||
dynamic_temperature_enabled=True,
|
||||
max_loops=2,
|
||||
)
|
@ -0,0 +1,39 @@
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
from swarms.prompts.multi_agent_collab_prompt import (
|
||||
MULTI_AGENT_COLLAB_PROMPT_TWO,
|
||||
)
|
||||
|
||||
# Define two real agents with the multi-agent collaboration prompt
|
||||
agent1 = Agent(
|
||||
agent_name="ResearchAgent1",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt=MULTI_AGENT_COLLAB_PROMPT_TWO, # Set collaboration prompt
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
agent_name="ResearchAgent2",
|
||||
model_name="gpt-4.1",
|
||||
max_loops=1,
|
||||
system_prompt=MULTI_AGENT_COLLAB_PROMPT_TWO, # Set collaboration prompt
|
||||
)
|
||||
|
||||
|
||||
# Build the workflow with only agents as nodes
|
||||
workflow = GraphWorkflow()
|
||||
workflow.add_node(agent1)
|
||||
workflow.add_node(agent2)
|
||||
|
||||
# Define a relationship: agent1 feeds into agent2
|
||||
workflow.add_edge(agent1.agent_name, agent2.agent_name)
|
||||
|
||||
# print(workflow.to_json())
|
||||
|
||||
print(workflow.visualize())
|
||||
|
||||
# Optionally, run the workflow and print the results
|
||||
# results = workflow.run(
|
||||
# task="What are the best arbitrage trading strategies for altcoins? Give me research papers and articles on the topic."
|
||||
# )
|
||||
# print("Execution results:", results)
|
@ -0,0 +1,404 @@
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from typing import Any, Callable, List, Optional, Union
|
||||
|
||||
import schedule
|
||||
from loguru import logger
|
||||
|
||||
from swarms import Agent
|
||||
|
||||
|
||||
class CronJobError(Exception):
|
||||
"""Base exception class for CronJob errors."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CronJobConfigError(CronJobError):
|
||||
"""Exception raised for configuration errors in CronJob."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CronJobScheduleError(CronJobError):
|
||||
"""Exception raised for scheduling related errors in CronJob."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CronJobExecutionError(CronJobError):
|
||||
"""Exception raised for execution related errors in CronJob."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CronJob:
|
||||
"""A wrapper class that turns any callable (including Swarms agents) into a scheduled cron job.
|
||||
|
||||
This class provides functionality to schedule and run tasks at specified intervals using
|
||||
the schedule library with cron-style scheduling.
|
||||
|
||||
Attributes:
|
||||
agent: The Swarms Agent instance or callable to be scheduled
|
||||
interval: The interval string (e.g., "5seconds", "10minutes", "1hour")
|
||||
job_id: Unique identifier for the job
|
||||
is_running: Flag indicating if the job is currently running
|
||||
thread: Thread object for running the job
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
agent: Optional[Union[Agent, Callable]] = None,
|
||||
interval: Optional[str] = None,
|
||||
job_id: Optional[str] = None,
|
||||
):
|
||||
"""Initialize the CronJob wrapper.
|
||||
|
||||
Args:
|
||||
agent: The Swarms Agent instance or callable to be scheduled
|
||||
interval: The interval string (e.g., "5seconds", "10minutes", "1hour")
|
||||
job_id: Optional unique identifier for the job. If not provided, one will be generated.
|
||||
|
||||
Raises:
|
||||
CronJobConfigError: If the interval format is invalid
|
||||
"""
|
||||
self.agent = agent
|
||||
self.interval = interval
|
||||
self.job_id = job_id or f"job_{id(self)}"
|
||||
self.is_running = False
|
||||
self.thread = None
|
||||
self.schedule = schedule.Scheduler()
|
||||
|
||||
logger.info(f"Initializing CronJob with ID: {self.job_id}")
|
||||
|
||||
# Parse interval if provided
|
||||
if interval:
|
||||
try:
|
||||
self._parse_interval(interval)
|
||||
logger.info(
|
||||
f"Successfully configured interval: {interval}"
|
||||
)
|
||||
except ValueError as e:
|
||||
logger.error(f"Failed to parse interval: {interval}")
|
||||
raise CronJobConfigError(
|
||||
f"Invalid interval format: {str(e)}"
|
||||
)
|
||||
|
||||
def _parse_interval(self, interval: str):
|
||||
"""Parse the interval string and set up the schedule.
|
||||
|
||||
Args:
|
||||
interval: String in format "Xunit" where X is a number and unit is
|
||||
seconds, minutes, or hours (e.g., "5seconds", "10minutes")
|
||||
|
||||
Raises:
|
||||
CronJobConfigError: If the interval format is invalid or unit is unsupported
|
||||
"""
|
||||
try:
|
||||
# Extract number and unit from interval string
|
||||
import re
|
||||
|
||||
match = re.match(r"(\d+)(\w+)", interval.lower())
|
||||
if not match:
|
||||
raise CronJobConfigError(
|
||||
f"Invalid interval format: {interval}. Expected format: '<number><unit>' (e.g., '5seconds', '10minutes')"
|
||||
)
|
||||
|
||||
number = int(match.group(1))
|
||||
unit = match.group(2)
|
||||
|
||||
# Map units to scheduling methods
|
||||
unit_map = {
|
||||
"second": self.every_seconds,
|
||||
"seconds": self.every_seconds,
|
||||
"minute": self.every_minutes,
|
||||
"minutes": self.every_minutes,
|
||||
"hour": lambda x: self.schedule.every(x).hours.do(
|
||||
self._run_job
|
||||
),
|
||||
"hours": lambda x: self.schedule.every(x).hours.do(
|
||||
self._run_job
|
||||
),
|
||||
}
|
||||
|
||||
if unit not in unit_map:
|
||||
supported_units = ", ".join(unit_map.keys())
|
||||
raise CronJobConfigError(
|
||||
f"Unsupported time unit: {unit}. Supported units are: {supported_units}"
|
||||
)
|
||||
|
||||
self._interval_method = lambda task: unit_map[unit](
|
||||
number, task
|
||||
)
|
||||
logger.debug(f"Configured {number} {unit} interval")
|
||||
|
||||
except ValueError as e:
|
||||
raise CronJobConfigError(
|
||||
f"Invalid interval number: {str(e)}"
|
||||
)
|
||||
except Exception as e:
|
||||
raise CronJobConfigError(
|
||||
f"Error parsing interval: {str(e)}"
|
||||
)
|
||||
|
||||
def _run(self, task: str, **kwargs):
|
||||
"""Run the scheduled job with the given task and additional parameters.
|
||||
|
||||
Args:
|
||||
task: The task string to be executed by the agent
|
||||
**kwargs: Additional parameters to pass to the agent's run method
|
||||
(e.g., img=image_path, streaming_callback=callback_func)
|
||||
|
||||
Raises:
|
||||
CronJobConfigError: If agent or interval is not configured
|
||||
CronJobExecutionError: If task execution fails
|
||||
"""
|
||||
try:
|
||||
if not self.agent:
|
||||
raise CronJobConfigError(
|
||||
"Agent must be provided during initialization"
|
||||
)
|
||||
|
||||
if not self.interval:
|
||||
raise CronJobConfigError(
|
||||
"Interval must be provided during initialization"
|
||||
)
|
||||
|
||||
logger.info(f"Scheduling task for job {self.job_id}")
|
||||
# Schedule the task with additional parameters
|
||||
self._interval_method(task, **kwargs)
|
||||
|
||||
# Start the job
|
||||
self.start()
|
||||
logger.info(f"Successfully started job {self.job_id}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"CronJob: Failed to run job {self.job_id}: {str(e)}"
|
||||
)
|
||||
raise CronJobExecutionError(
|
||||
f"Failed to run job: {str(e)} Traceback: {traceback.format_exc()}"
|
||||
)
|
||||
|
||||
def run(self, task: str, **kwargs):
|
||||
try:
|
||||
job = self._run(task, **kwargs)
|
||||
|
||||
while True:
|
||||
time.sleep(1)
|
||||
|
||||
return job
|
||||
except KeyboardInterrupt:
|
||||
logger.info(
|
||||
f"CronJob: {self.job_id} received keyboard interrupt, stopping cron jobs..."
|
||||
)
|
||||
self.stop()
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"CronJob: {self.job_id} error in main: {str(e)} Traceback: {traceback.format_exc()}"
|
||||
)
|
||||
raise
|
||||
|
||||
def batched_run(self, tasks: List[str], **kwargs):
|
||||
"""Run the scheduled job with the given tasks and additional parameters.
|
||||
|
||||
Args:
|
||||
tasks: The list of task strings to be executed by the agent
|
||||
**kwargs: Additional parameters to pass to the agent's run method
|
||||
"""
|
||||
outputs = []
|
||||
for task in tasks:
|
||||
output = self.run(task, **kwargs)
|
||||
outputs.append(output)
|
||||
return outputs
|
||||
|
||||
def __call__(self, task: str, **kwargs):
|
||||
"""Call the CronJob instance as a function.
|
||||
|
||||
Args:
|
||||
task: The task string to be executed
|
||||
**kwargs: Additional parameters to pass to the agent's run method
|
||||
"""
|
||||
return self.run(task, **kwargs)
|
||||
|
||||
def _run_job(self, task: str, **kwargs) -> Any:
|
||||
"""Internal method to run the job with provided task and parameters.
|
||||
|
||||
Args:
|
||||
task: The task string to be executed
|
||||
**kwargs: Additional parameters to pass to the agent's run method
|
||||
(e.g., img=image_path, streaming_callback=callback_func)
|
||||
|
||||
Returns:
|
||||
Any: The result of the task execution
|
||||
|
||||
Raises:
|
||||
CronJobExecutionError: If task execution fails
|
||||
"""
|
||||
try:
|
||||
logger.debug(f"Executing task for job {self.job_id}")
|
||||
if isinstance(self.agent, Agent):
|
||||
return self.agent.run(task=task, **kwargs)
|
||||
else:
|
||||
return self.agent(task, **kwargs)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Task execution failed for job {self.job_id}: {str(e)}"
|
||||
)
|
||||
raise CronJobExecutionError(
|
||||
f"Task execution failed: {str(e)}"
|
||||
)
|
||||
|
||||
def every_seconds(self, seconds: int, task: str, **kwargs):
|
||||
"""Schedule the job to run every specified number of seconds.
|
||||
|
||||
Args:
|
||||
seconds: Number of seconds between executions
|
||||
task: The task to execute
|
||||
**kwargs: Additional parameters to pass to the agent's run method
|
||||
"""
|
||||
logger.debug(
|
||||
f"Scheduling job {self.job_id} every {seconds} seconds"
|
||||
)
|
||||
self.schedule.every(seconds).seconds.do(
|
||||
self._run_job, task, **kwargs
|
||||
)
|
||||
|
||||
def every_minutes(self, minutes: int, task: str, **kwargs):
|
||||
"""Schedule the job to run every specified number of minutes.
|
||||
|
||||
Args:
|
||||
minutes: Number of minutes between executions
|
||||
task: The task to execute
|
||||
**kwargs: Additional parameters to pass to the agent's run method
|
||||
"""
|
||||
logger.debug(
|
||||
f"Scheduling job {self.job_id} every {minutes} minutes"
|
||||
)
|
||||
self.schedule.every(minutes).minutes.do(
|
||||
self._run_job, task, **kwargs
|
||||
)
|
||||
|
||||
def start(self):
|
||||
"""Start the scheduled job in a separate thread.
|
||||
|
||||
Raises:
|
||||
CronJobExecutionError: If the job fails to start
|
||||
"""
|
||||
try:
|
||||
if not self.is_running:
|
||||
self.is_running = True
|
||||
self.thread = threading.Thread(
|
||||
target=self._run_schedule,
|
||||
daemon=True,
|
||||
name=f"cronjob_{self.job_id}",
|
||||
)
|
||||
self.thread.start()
|
||||
logger.info(f"Started job {self.job_id}")
|
||||
else:
|
||||
logger.warning(
|
||||
f"Job {self.job_id} is already running"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Failed to start job {self.job_id}: {str(e)}"
|
||||
)
|
||||
raise CronJobExecutionError(
|
||||
f"Failed to start job: {str(e)}"
|
||||
)
|
||||
|
||||
def stop(self):
|
||||
"""Stop the scheduled job.
|
||||
|
||||
Raises:
|
||||
CronJobExecutionError: If the job fails to stop properly
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Stopping job {self.job_id}")
|
||||
self.is_running = False
|
||||
if self.thread:
|
||||
self.thread.join(
|
||||
timeout=5
|
||||
) # Wait up to 5 seconds for thread to finish
|
||||
if self.thread.is_alive():
|
||||
logger.warning(
|
||||
f"Job {self.job_id} thread did not terminate gracefully"
|
||||
)
|
||||
self.schedule.clear()
|
||||
logger.info(f"Successfully stopped job {self.job_id}")
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error stopping job {self.job_id}: {str(e)}"
|
||||
)
|
||||
raise CronJobExecutionError(
|
||||
f"Failed to stop job: {str(e)}"
|
||||
)
|
||||
|
||||
def _run_schedule(self):
|
||||
"""Internal method to run the schedule loop."""
|
||||
logger.debug(f"Starting schedule loop for job {self.job_id}")
|
||||
while self.is_running:
|
||||
try:
|
||||
self.schedule.run_pending()
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error in schedule loop for job {self.job_id}: {str(e)}"
|
||||
)
|
||||
self.is_running = False
|
||||
raise CronJobExecutionError(
|
||||
f"Schedule loop failed: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
# # Example usage
|
||||
# if __name__ == "__main__":
|
||||
# # Initialize the agent
|
||||
# agent = Agent(
|
||||
# agent_name="Quantitative-Trading-Agent",
|
||||
# agent_description="Advanced quantitative trading and algorithmic analysis agent",
|
||||
# system_prompt="""You are an expert quantitative trading agent with deep expertise in:
|
||||
# - Algorithmic trading strategies and implementation
|
||||
# - Statistical arbitrage and market making
|
||||
# - Risk management and portfolio optimization
|
||||
# - High-frequency trading systems
|
||||
# - Market microstructure analysis
|
||||
# - Quantitative research methodologies
|
||||
# - Financial mathematics and stochastic processes
|
||||
# - Machine learning applications in trading
|
||||
|
||||
# Your core responsibilities include:
|
||||
# 1. Developing and backtesting trading strategies
|
||||
# 2. Analyzing market data and identifying alpha opportunities
|
||||
# 3. Implementing risk management frameworks
|
||||
# 4. Optimizing portfolio allocations
|
||||
# 5. Conducting quantitative research
|
||||
# 6. Monitoring market microstructure
|
||||
# 7. Evaluating trading system performance
|
||||
|
||||
# You maintain strict adherence to:
|
||||
# - Mathematical rigor in all analyses
|
||||
# - Statistical significance in strategy development
|
||||
# - Risk-adjusted return optimization
|
||||
# - Market impact minimization
|
||||
# - Regulatory compliance
|
||||
# - Transaction cost analysis
|
||||
# - Performance attribution
|
||||
|
||||
# You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
|
||||
# max_loops=1,
|
||||
# model_name="gpt-4.1",
|
||||
# dynamic_temperature_enabled=True,
|
||||
# output_type="str-all-except-first",
|
||||
# streaming_on=True,
|
||||
# print_on=True,
|
||||
# telemetry_enable=False,
|
||||
# )
|
||||
|
||||
# # Example 1: Basic usage with just a task
|
||||
# logger.info("Starting example cron job")
|
||||
# cron_job = CronJob(agent=agent, interval="10seconds")
|
||||
# cron_job.run(
|
||||
# task="What are the best top 3 etfs for gold coverage?"
|
||||
# )
|
@ -0,0 +1,64 @@
|
||||
from typing import Callable, Union
|
||||
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.structs.conversation import Conversation
|
||||
from swarms.utils.history_output_formatter import (
|
||||
history_output_formatter,
|
||||
)
|
||||
|
||||
|
||||
def one_on_one_debate(
|
||||
max_loops: int = 1,
|
||||
task: str = None,
|
||||
agents: list[Union[Agent, Callable]] = None,
|
||||
img: str = None,
|
||||
output_type: str = "str-all-except-first",
|
||||
) -> list:
|
||||
"""
|
||||
Simulate a turn-based debate between two agents for a specified number of loops.
|
||||
|
||||
Each agent alternately responds to the previous message, with the conversation
|
||||
history being tracked and available for both agents to reference. The debate
|
||||
starts with the provided `task` as the initial message.
|
||||
|
||||
Args:
|
||||
max_loops (int): The number of conversational turns (each agent speaks per loop).
|
||||
task (str): The initial prompt or question to start the debate.
|
||||
agents (list[Agent]): A list containing exactly two Agent instances who will debate.
|
||||
img (str, optional): An optional image input to be passed to each agent's run method.
|
||||
output_type (str): The format for the output conversation history. Passed to
|
||||
`history_output_formatter`. Default is "str-all-except-first".
|
||||
|
||||
Returns:
|
||||
list: The formatted conversation history, as produced by `history_output_formatter`.
|
||||
The format depends on the `output_type` argument.
|
||||
|
||||
Raises:
|
||||
ValueError: If the `agents` list does not contain exactly two Agent instances.
|
||||
"""
|
||||
conversation = Conversation()
|
||||
|
||||
if len(agents) != 2:
|
||||
raise ValueError(
|
||||
"There must be exactly two agents in the dialogue."
|
||||
)
|
||||
|
||||
agent1 = agents[0]
|
||||
agent2 = agents[1]
|
||||
|
||||
message = task
|
||||
speaker = agent1
|
||||
other = agent2
|
||||
|
||||
for i in range(max_loops):
|
||||
# Current speaker responds
|
||||
response = speaker.run(task=message, img=img)
|
||||
conversation.add(speaker.agent_name, response)
|
||||
|
||||
# Swap roles
|
||||
message = response
|
||||
speaker, other = other, speaker
|
||||
|
||||
return history_output_formatter(
|
||||
conversation=conversation, type=output_type
|
||||
)
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,7 @@
|
||||
import os
|
||||
from functools import lru_cache
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def get_cpu_cores() -> int:
|
||||
return os.cpu_count()
|
@ -0,0 +1,560 @@
|
||||
import os
|
||||
from loguru import logger
|
||||
from swarms.structs.conversation import Conversation
|
||||
|
||||
|
||||
def assert_equal(actual, expected, message=""):
|
||||
"""Custom assertion function for equality"""
|
||||
if actual != expected:
|
||||
logger.error(
|
||||
f"Assertion failed: {message}\nExpected: {expected}\nActual: {actual}"
|
||||
)
|
||||
raise AssertionError(
|
||||
f"{message}\nExpected: {expected}\nActual: {actual}"
|
||||
)
|
||||
logger.success(f"Assertion passed: {message}")
|
||||
|
||||
|
||||
def assert_true(condition, message=""):
|
||||
"""Custom assertion function for boolean conditions"""
|
||||
if not condition:
|
||||
logger.error(f"Assertion failed: {message}")
|
||||
raise AssertionError(message)
|
||||
logger.success(f"Assertion passed: {message}")
|
||||
|
||||
|
||||
def test_conversation_initialization():
|
||||
"""Test conversation initialization with different parameters"""
|
||||
logger.info("Testing conversation initialization")
|
||||
|
||||
# Test default initialization
|
||||
conv = Conversation()
|
||||
assert_true(
|
||||
isinstance(conv, Conversation),
|
||||
"Should create Conversation instance",
|
||||
)
|
||||
assert_equal(
|
||||
conv.provider,
|
||||
"in-memory",
|
||||
"Default provider should be in-memory",
|
||||
)
|
||||
|
||||
# Test with custom parameters
|
||||
conv = Conversation(
|
||||
name="test-conv",
|
||||
system_prompt="Test system prompt",
|
||||
time_enabled=True,
|
||||
token_count=True,
|
||||
)
|
||||
assert_equal(
|
||||
conv.name, "test-conv", "Name should be set correctly"
|
||||
)
|
||||
assert_equal(
|
||||
conv.system_prompt,
|
||||
"Test system prompt",
|
||||
"System prompt should be set",
|
||||
)
|
||||
assert_true(conv.time_enabled, "Time should be enabled")
|
||||
assert_true(conv.token_count, "Token count should be enabled")
|
||||
|
||||
|
||||
def test_add_message():
|
||||
"""Test adding messages to conversation"""
|
||||
logger.info("Testing add message functionality")
|
||||
|
||||
conv = Conversation(time_enabled=True, token_count=True)
|
||||
|
||||
# Test adding text message
|
||||
conv.add("user", "Hello, world!")
|
||||
assert_equal(
|
||||
len(conv.conversation_history), 1, "Should have one message"
|
||||
)
|
||||
assert_equal(
|
||||
conv.conversation_history[0]["role"],
|
||||
"user",
|
||||
"Role should be user",
|
||||
)
|
||||
assert_equal(
|
||||
conv.conversation_history[0]["content"],
|
||||
"Hello, world!",
|
||||
"Content should match",
|
||||
)
|
||||
|
||||
# Test adding dict message
|
||||
dict_msg = {"key": "value"}
|
||||
conv.add("assistant", dict_msg)
|
||||
assert_equal(
|
||||
len(conv.conversation_history), 2, "Should have two messages"
|
||||
)
|
||||
assert_equal(
|
||||
conv.conversation_history[1]["role"],
|
||||
"assistant",
|
||||
"Role should be assistant",
|
||||
)
|
||||
assert_equal(
|
||||
conv.conversation_history[1]["content"],
|
||||
dict_msg,
|
||||
"Content should match dict",
|
||||
)
|
||||
|
||||
|
||||
def test_delete_message():
|
||||
"""Test deleting messages from conversation"""
|
||||
logger.info("Testing delete message functionality")
|
||||
|
||||
conv = Conversation()
|
||||
conv.add("user", "Message 1")
|
||||
conv.add("user", "Message 2")
|
||||
|
||||
initial_length = len(conv.conversation_history)
|
||||
conv.delete("0") # Delete first message
|
||||
|
||||
assert_equal(
|
||||
len(conv.conversation_history),
|
||||
initial_length - 1,
|
||||
"Conversation history should be shorter by one",
|
||||
)
|
||||
assert_equal(
|
||||
conv.conversation_history[0]["content"],
|
||||
"Message 2",
|
||||
"Remaining message should be Message 2",
|
||||
)
|
||||
|
||||
|
||||
def test_update_message():
|
||||
"""Test updating messages in conversation"""
|
||||
logger.info("Testing update message functionality")
|
||||
|
||||
conv = Conversation()
|
||||
conv.add("user", "Original message")
|
||||
|
||||
conv.update("0", "user", "Updated message")
|
||||
assert_equal(
|
||||
conv.conversation_history[0]["content"],
|
||||
"Updated message",
|
||||
"Message should be updated",
|
||||
)
|
||||
|
||||
|
||||
def test_search_messages():
|
||||
"""Test searching messages in conversation"""
|
||||
logger.info("Testing search functionality")
|
||||
|
||||
conv = Conversation()
|
||||
conv.add("user", "Hello world")
|
||||
conv.add("assistant", "Hello user")
|
||||
conv.add("user", "Goodbye world")
|
||||
|
||||
results = conv.search("Hello")
|
||||
assert_equal(
|
||||
len(results), 2, "Should find two messages with 'Hello'"
|
||||
)
|
||||
|
||||
results = conv.search("Goodbye")
|
||||
assert_equal(
|
||||
len(results), 1, "Should find one message with 'Goodbye'"
|
||||
)
|
||||
|
||||
|
||||
def test_export_import():
|
||||
"""Test exporting and importing conversation"""
|
||||
logger.info("Testing export/import functionality")
|
||||
|
||||
conv = Conversation(name="export-test")
|
||||
conv.add("user", "Test message")
|
||||
|
||||
# Test JSON export/import
|
||||
test_file = "test_conversation_export.json"
|
||||
conv.export_conversation(test_file)
|
||||
|
||||
assert_true(os.path.exists(test_file), "Export file should exist")
|
||||
|
||||
new_conv = Conversation(name="import-test")
|
||||
new_conv.import_conversation(test_file)
|
||||
|
||||
assert_equal(
|
||||
len(new_conv.conversation_history),
|
||||
len(conv.conversation_history),
|
||||
"Imported conversation should have same number of messages",
|
||||
)
|
||||
|
||||
# Cleanup
|
||||
os.remove(test_file)
|
||||
|
||||
|
||||
def test_message_counting():
|
||||
"""Test message counting functionality"""
|
||||
logger.info("Testing message counting functionality")
|
||||
|
||||
conv = Conversation()
|
||||
conv.add("user", "User message")
|
||||
conv.add("assistant", "Assistant message")
|
||||
conv.add("system", "System message")
|
||||
|
||||
counts = conv.count_messages_by_role()
|
||||
assert_equal(counts["user"], 1, "Should have one user message")
|
||||
assert_equal(
|
||||
counts["assistant"], 1, "Should have one assistant message"
|
||||
)
|
||||
assert_equal(
|
||||
counts["system"], 1, "Should have one system message"
|
||||
)
|
||||
|
||||
|
||||
def test_conversation_string_representation():
|
||||
"""Test string representation methods"""
|
||||
logger.info("Testing string representation methods")
|
||||
|
||||
conv = Conversation()
|
||||
conv.add("user", "Test message")
|
||||
|
||||
str_repr = conv.return_history_as_string()
|
||||
assert_true(
|
||||
"user: Test message" in str_repr,
|
||||
"String representation should contain message",
|
||||
)
|
||||
|
||||
json_repr = conv.to_json()
|
||||
assert_true(
|
||||
isinstance(json_repr, str),
|
||||
"JSON representation should be string",
|
||||
)
|
||||
assert_true(
|
||||
"Test message" in json_repr,
|
||||
"JSON should contain message content",
|
||||
)
|
||||
|
||||
|
||||
def test_memory_management():
|
||||
"""Test memory management functions"""
|
||||
logger.info("Testing memory management functions")
|
||||
|
||||
conv = Conversation()
|
||||
conv.add("user", "Message 1")
|
||||
conv.add("assistant", "Message 2")
|
||||
|
||||
# Test clear
|
||||
conv.clear()
|
||||
assert_equal(
|
||||
len(conv.conversation_history),
|
||||
0,
|
||||
"History should be empty after clear",
|
||||
)
|
||||
|
||||
# Test truncate
|
||||
conv = Conversation(context_length=100, token_count=True)
|
||||
long_message = (
|
||||
"This is a very long message that should be truncated " * 10
|
||||
)
|
||||
conv.add("user", long_message)
|
||||
conv.truncate_memory_with_tokenizer()
|
||||
assert_true(
|
||||
len(conv.conversation_history[0]["content"])
|
||||
< len(long_message),
|
||||
"Message should be truncated",
|
||||
)
|
||||
|
||||
|
||||
def test_backend_initialization():
|
||||
"""Test different backend initializations"""
|
||||
logger.info("Testing backend initialization")
|
||||
|
||||
# Test Redis backend
|
||||
conv = Conversation(
|
||||
backend="redis",
|
||||
redis_host="localhost",
|
||||
redis_port=6379,
|
||||
redis_db=0,
|
||||
use_embedded_redis=True,
|
||||
)
|
||||
assert_equal(conv.backend, "redis", "Backend should be redis")
|
||||
|
||||
# Test SQLite backend
|
||||
conv = Conversation(
|
||||
backend="sqlite",
|
||||
db_path=":memory:",
|
||||
table_name="test_conversations",
|
||||
)
|
||||
assert_equal(conv.backend, "sqlite", "Backend should be sqlite")
|
||||
|
||||
# Test DuckDB backend
|
||||
conv = Conversation(
|
||||
backend="duckdb",
|
||||
db_path=":memory:",
|
||||
table_name="test_conversations",
|
||||
)
|
||||
assert_equal(conv.backend, "duckdb", "Backend should be duckdb")
|
||||
|
||||
|
||||
def test_conversation_with_system_prompt():
|
||||
"""Test conversation with system prompt and rules"""
|
||||
logger.info("Testing conversation with system prompt and rules")
|
||||
|
||||
conv = Conversation(
|
||||
system_prompt="You are a helpful assistant",
|
||||
rules="Be concise and clear",
|
||||
custom_rules_prompt="Follow these guidelines",
|
||||
time_enabled=True,
|
||||
)
|
||||
|
||||
history = conv.conversation_history
|
||||
assert_equal(
|
||||
len(history),
|
||||
3,
|
||||
"Should have system prompt, rules, and custom rules",
|
||||
)
|
||||
assert_equal(
|
||||
history[0]["content"],
|
||||
"You are a helpful assistant",
|
||||
"System prompt should match",
|
||||
)
|
||||
assert_equal(
|
||||
history[1]["content"],
|
||||
"Be concise and clear",
|
||||
"Rules should match",
|
||||
)
|
||||
assert_true(
|
||||
"timestamp" in history[0], "Messages should have timestamps"
|
||||
)
|
||||
|
||||
|
||||
def test_batch_operations():
|
||||
"""Test batch operations on conversation"""
|
||||
logger.info("Testing batch operations")
|
||||
|
||||
conv = Conversation()
|
||||
|
||||
# Test batch add
|
||||
roles = ["user", "assistant", "user"]
|
||||
contents = ["Hello", "Hi there", "How are you?"]
|
||||
conv.add_multiple_messages(roles, contents)
|
||||
|
||||
assert_equal(
|
||||
len(conv.conversation_history),
|
||||
3,
|
||||
"Should have three messages",
|
||||
)
|
||||
|
||||
# Test batch search
|
||||
results = conv.search("Hi")
|
||||
assert_equal(len(results), 1, "Should find one message with 'Hi'")
|
||||
|
||||
|
||||
def test_conversation_export_formats():
|
||||
"""Test different export formats"""
|
||||
logger.info("Testing export formats")
|
||||
|
||||
conv = Conversation(name="export-test")
|
||||
conv.add("user", "Test message")
|
||||
|
||||
# Test YAML export
|
||||
conv.export_method = "yaml"
|
||||
conv.save_filepath = "test_conversation.yaml"
|
||||
conv.export()
|
||||
assert_true(
|
||||
os.path.exists("test_conversation.yaml"),
|
||||
"YAML file should exist",
|
||||
)
|
||||
|
||||
# Test JSON export
|
||||
conv.export_method = "json"
|
||||
conv.save_filepath = "test_conversation.json"
|
||||
conv.export()
|
||||
assert_true(
|
||||
os.path.exists("test_conversation.json"),
|
||||
"JSON file should exist",
|
||||
)
|
||||
|
||||
# Cleanup
|
||||
os.remove("test_conversation.yaml")
|
||||
os.remove("test_conversation.json")
|
||||
|
||||
|
||||
def test_conversation_with_token_counting():
|
||||
"""Test conversation with token counting enabled"""
|
||||
logger.info("Testing token counting functionality")
|
||||
|
||||
conv = Conversation(
|
||||
token_count=True,
|
||||
tokenizer_model_name="gpt-4.1",
|
||||
context_length=1000,
|
||||
)
|
||||
|
||||
conv.add("user", "This is a test message")
|
||||
assert_true(
|
||||
"token_count" in conv.conversation_history[0],
|
||||
"Message should have token count",
|
||||
)
|
||||
|
||||
# Test token counting with different message types
|
||||
conv.add(
|
||||
"assistant", {"response": "This is a structured response"}
|
||||
)
|
||||
assert_true(
|
||||
"token_count" in conv.conversation_history[1],
|
||||
"Structured message should have token count",
|
||||
)
|
||||
|
||||
|
||||
def test_conversation_message_categories():
|
||||
"""Test conversation with message categories"""
|
||||
logger.info("Testing message categories")
|
||||
|
||||
conv = Conversation()
|
||||
|
||||
# Add messages with categories
|
||||
conv.add("user", "Input message", category="input")
|
||||
conv.add("assistant", "Output message", category="output")
|
||||
|
||||
# Test category counting
|
||||
token_counts = conv.export_and_count_categories()
|
||||
assert_true(
|
||||
"input_tokens" in token_counts,
|
||||
"Should have input token count",
|
||||
)
|
||||
assert_true(
|
||||
"output_tokens" in token_counts,
|
||||
"Should have output token count",
|
||||
)
|
||||
assert_true(
|
||||
"total_tokens" in token_counts,
|
||||
"Should have total token count",
|
||||
)
|
||||
|
||||
|
||||
def test_conversation_persistence():
|
||||
"""Test conversation persistence and loading"""
|
||||
logger.info("Testing conversation persistence")
|
||||
|
||||
# Create and save conversation
|
||||
conv1 = Conversation(
|
||||
name="persistence-test",
|
||||
system_prompt="Test prompt",
|
||||
time_enabled=True,
|
||||
autosave=True,
|
||||
)
|
||||
conv1.add("user", "Test message")
|
||||
conv1.export()
|
||||
|
||||
# Load conversation
|
||||
conv2 = Conversation.load_conversation(name="persistence-test")
|
||||
assert_equal(
|
||||
conv2.system_prompt,
|
||||
"Test prompt",
|
||||
"System prompt should persist",
|
||||
)
|
||||
assert_equal(
|
||||
len(conv2.conversation_history),
|
||||
2,
|
||||
"Should have system prompt and message",
|
||||
)
|
||||
|
||||
|
||||
def test_conversation_utilities():
|
||||
"""Test various utility methods"""
|
||||
logger.info("Testing utility methods")
|
||||
|
||||
conv = Conversation(message_id_on=True)
|
||||
conv.add("user", "First message")
|
||||
conv.add("assistant", "Second message")
|
||||
|
||||
# Test getting last message
|
||||
last_msg = conv.get_last_message_as_string()
|
||||
assert_true(
|
||||
"Second message" in last_msg,
|
||||
"Should get correct last message",
|
||||
)
|
||||
|
||||
# Test getting messages as list
|
||||
msg_list = conv.return_messages_as_list()
|
||||
assert_equal(len(msg_list), 2, "Should have two messages in list")
|
||||
|
||||
# Test getting messages as dictionary
|
||||
msg_dict = conv.return_messages_as_dictionary()
|
||||
assert_equal(
|
||||
len(msg_dict), 2, "Should have two messages in dictionary"
|
||||
)
|
||||
|
||||
# Test message IDs
|
||||
assert_true(
|
||||
"message_id" in conv.conversation_history[0],
|
||||
"Messages should have IDs when enabled",
|
||||
)
|
||||
|
||||
|
||||
def test_conversation_error_handling():
|
||||
"""Test error handling in conversation methods"""
|
||||
logger.info("Testing error handling")
|
||||
|
||||
conv = Conversation()
|
||||
|
||||
# Test invalid export method
|
||||
try:
|
||||
conv.export_method = "invalid"
|
||||
conv.export()
|
||||
assert_true(
|
||||
False, "Should raise ValueError for invalid export method"
|
||||
)
|
||||
except ValueError:
|
||||
assert_true(
|
||||
True, "Should catch ValueError for invalid export method"
|
||||
)
|
||||
|
||||
# Test invalid backend
|
||||
try:
|
||||
Conversation(backend="invalid_backend")
|
||||
assert_true(
|
||||
False, "Should raise ValueError for invalid backend"
|
||||
)
|
||||
except ValueError:
|
||||
assert_true(
|
||||
True, "Should catch ValueError for invalid backend"
|
||||
)
|
||||
|
||||
|
||||
def run_all_tests():
|
||||
"""Run all test functions"""
|
||||
logger.info("Starting all tests")
|
||||
|
||||
test_functions = [
|
||||
test_conversation_initialization,
|
||||
test_add_message,
|
||||
test_delete_message,
|
||||
test_update_message,
|
||||
test_search_messages,
|
||||
test_export_import,
|
||||
test_message_counting,
|
||||
test_conversation_string_representation,
|
||||
test_memory_management,
|
||||
test_backend_initialization,
|
||||
test_conversation_with_system_prompt,
|
||||
test_batch_operations,
|
||||
test_conversation_export_formats,
|
||||
test_conversation_with_token_counting,
|
||||
test_conversation_message_categories,
|
||||
test_conversation_persistence,
|
||||
test_conversation_utilities,
|
||||
test_conversation_error_handling,
|
||||
]
|
||||
|
||||
passed = 0
|
||||
failed = 0
|
||||
|
||||
for test_func in test_functions:
|
||||
try:
|
||||
logger.info(f"Running {test_func.__name__}")
|
||||
test_func()
|
||||
passed += 1
|
||||
logger.success(f"{test_func.__name__} passed")
|
||||
except Exception as e:
|
||||
failed += 1
|
||||
logger.error(f"{test_func.__name__} failed: {str(e)}")
|
||||
|
||||
logger.info(f"Test summary: {passed} passed, {failed} failed")
|
||||
return passed, failed
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
passed, failed = run_all_tests()
|
||||
if failed > 0:
|
||||
exit(1)
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in new issue