commit
4e481a0cab
@ -1,54 +0,0 @@
|
||||
from swarms import Agent, CronJob
|
||||
from loguru import logger
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Quantitative-Trading-Agent",
|
||||
agent_description="Advanced quantitative trading and algorithmic analysis agent",
|
||||
system_prompt="""You are an expert quantitative trading agent with deep expertise in:
|
||||
- Algorithmic trading strategies and implementation
|
||||
- Statistical arbitrage and market making
|
||||
- Risk management and portfolio optimization
|
||||
- High-frequency trading systems
|
||||
- Market microstructure analysis
|
||||
- Quantitative research methodologies
|
||||
- Financial mathematics and stochastic processes
|
||||
- Machine learning applications in trading
|
||||
|
||||
Your core responsibilities include:
|
||||
1. Developing and backtesting trading strategies
|
||||
2. Analyzing market data and identifying alpha opportunities
|
||||
3. Implementing risk management frameworks
|
||||
4. Optimizing portfolio allocations
|
||||
5. Conducting quantitative research
|
||||
6. Monitoring market microstructure
|
||||
7. Evaluating trading system performance
|
||||
|
||||
You maintain strict adherence to:
|
||||
- Mathematical rigor in all analyses
|
||||
- Statistical significance in strategy development
|
||||
- Risk-adjusted return optimization
|
||||
- Market impact minimization
|
||||
- Regulatory compliance
|
||||
- Transaction cost analysis
|
||||
- Performance attribution
|
||||
|
||||
You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
|
||||
max_loops=1,
|
||||
model_name="gpt-4.1",
|
||||
dynamic_temperature_enabled=True,
|
||||
output_type="str-all-except-first",
|
||||
streaming_on=True,
|
||||
print_on=True,
|
||||
telemetry_enable=False,
|
||||
)
|
||||
|
||||
# Example 1: Basic usage with just a task
|
||||
logger.info("Starting example cron job")
|
||||
cron_job = CronJob(agent=agent, interval="10seconds")
|
||||
cron_job.run(
|
||||
task="What are the best top 3 etfs for gold coverage?"
|
||||
)
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,903 @@
|
||||
# Board of Directors - Multi-Agent Architecture
|
||||
|
||||
The Board of Directors is a sophisticated multi-agent architecture that implements collective decision-making through democratic processes, voting mechanisms, and role-based leadership. This architecture provides an alternative to single-director patterns by enabling collaborative intelligence through structured governance.
|
||||
|
||||
## 🏛️ Overview
|
||||
|
||||
The Board of Directors architecture follows a democratic workflow pattern:
|
||||
|
||||
1. **Task Reception**: User provides a task to the swarm
|
||||
2. **Board Meeting**: Board of Directors convenes to discuss and create a plan
|
||||
3. **Voting & Consensus**: Board members vote and reach consensus on task distribution
|
||||
4. **Order Distribution**: Board distributes orders to specialized worker agents
|
||||
5. **Execution**: Individual agents execute their assigned tasks
|
||||
6. **Feedback Loop**: Board evaluates results and issues new orders if needed (up to `max_loops`)
|
||||
7. **Context Preservation**: All conversation history and context is maintained throughout the process
|
||||
|
||||
## 🏗️ Architecture Components
|
||||
|
||||
### Core Components
|
||||
|
||||
| Component | Description | Purpose |
|
||||
|-----------|-------------|---------|
|
||||
| **BoardOfDirectorsSwarm** | Main orchestration class | Manages the entire board workflow and agent coordination |
|
||||
| **Board Member Roles** | Role definitions and hierarchy | Defines responsibilities and voting weights for each board member |
|
||||
| **Decision Making Process** | Voting and consensus mechanisms | Implements democratic decision-making with weighted voting |
|
||||
| **Workflow Management** | Process orchestration | Manages the complete lifecycle from task reception to final delivery |
|
||||
|
||||
### Board Member Interaction Flow
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant User
|
||||
participant Chairman
|
||||
participant ViceChair
|
||||
participant Secretary
|
||||
participant Treasurer
|
||||
participant ExecDir
|
||||
participant Agents
|
||||
|
||||
User->>Chairman: Submit Task
|
||||
Chairman->>ViceChair: Notify Board Meeting
|
||||
Chairman->>Secretary: Request Meeting Setup
|
||||
Chairman->>Treasurer: Resource Assessment
|
||||
Chairman->>ExecDir: Strategic Planning
|
||||
|
||||
Note over Chairman,ExecDir: Board Discussion Phase
|
||||
|
||||
Chairman->>ViceChair: Lead Discussion
|
||||
ViceChair->>Secretary: Document Decisions
|
||||
Secretary->>Treasurer: Budget Considerations
|
||||
Treasurer->>ExecDir: Resource Allocation
|
||||
ExecDir->>Chairman: Strategic Recommendations
|
||||
|
||||
Note over Chairman,ExecDir: Voting & Consensus
|
||||
|
||||
Chairman->>ViceChair: Call for Vote
|
||||
ViceChair->>Secretary: Record Votes
|
||||
Secretary->>Treasurer: Financial Approval
|
||||
Treasurer->>ExecDir: Resource Approval
|
||||
ExecDir->>Chairman: Final Decision
|
||||
|
||||
Note over Chairman,Agents: Execution Phase
|
||||
|
||||
Chairman->>Agents: Distribute Orders
|
||||
Agents->>Chairman: Execute Tasks
|
||||
Agents->>ViceChair: Progress Reports
|
||||
Agents->>Secretary: Documentation
|
||||
Agents->>Treasurer: Resource Usage
|
||||
Agents->>ExecDir: Strategic Updates
|
||||
|
||||
Note over Chairman,ExecDir: Review & Feedback
|
||||
|
||||
Chairman->>User: Deliver Results
|
||||
```
|
||||
|
||||
## 👥 Board Member Roles
|
||||
|
||||
The Board of Directors supports various roles with different responsibilities and voting weights:
|
||||
|
||||
| Role | Description | Voting Weight | Responsibilities |
|
||||
|------|-------------|---------------|------------------|
|
||||
| `CHAIRMAN` | Primary leader responsible for board meetings and final decisions | 1.5 | Leading meetings, facilitating consensus, making final decisions |
|
||||
| `VICE_CHAIRMAN` | Secondary leader who supports the chairman | 1.2 | Supporting chairman, coordinating operations |
|
||||
| `SECRETARY` | Responsible for documentation and meeting minutes | 1.0 | Documenting meetings, maintaining records |
|
||||
| `TREASURER` | Manages financial aspects and resource allocation | 1.0 | Financial oversight, resource management |
|
||||
| `EXECUTIVE_DIRECTOR` | Executive-level board member with operational authority | 1.5 | Strategic planning, operational oversight |
|
||||
| `MEMBER` | General board member with specific expertise | 1.0 | Contributing expertise, participating in decisions |
|
||||
|
||||
### Role Hierarchy and Authority
|
||||
|
||||
```python
|
||||
# Example: Role hierarchy implementation
|
||||
class BoardRoleHierarchy:
|
||||
def __init__(self):
|
||||
self.roles = {
|
||||
"CHAIRMAN": {
|
||||
"voting_weight": 1.5,
|
||||
"authority_level": "FINAL",
|
||||
"supervises": ["VICE_CHAIRMAN", "EXECUTIVE_DIRECTOR", "SECRETARY", "TREASURER", "MEMBER"],
|
||||
"responsibilities": ["leadership", "final_decision", "consensus_facilitation"],
|
||||
"override_capability": True
|
||||
},
|
||||
"VICE_CHAIRMAN": {
|
||||
"voting_weight": 1.2,
|
||||
"authority_level": "SENIOR",
|
||||
"supervises": ["MEMBER"],
|
||||
"responsibilities": ["operational_support", "coordination", "implementation"],
|
||||
"backup_for": "CHAIRMAN"
|
||||
},
|
||||
"EXECUTIVE_DIRECTOR": {
|
||||
"voting_weight": 1.5,
|
||||
"authority_level": "SENIOR",
|
||||
"supervises": ["MEMBER"],
|
||||
"responsibilities": ["strategic_planning", "execution_oversight", "performance_management"],
|
||||
"strategic_authority": True
|
||||
},
|
||||
"SECRETARY": {
|
||||
"voting_weight": 1.0,
|
||||
"authority_level": "STANDARD",
|
||||
"supervises": [],
|
||||
"responsibilities": ["documentation", "record_keeping", "communication"],
|
||||
"administrative_authority": True
|
||||
},
|
||||
"TREASURER": {
|
||||
"voting_weight": 1.0,
|
||||
"authority_level": "STANDARD",
|
||||
"supervises": [],
|
||||
"responsibilities": ["financial_oversight", "resource_management", "budget_control"],
|
||||
"financial_authority": True
|
||||
},
|
||||
"MEMBER": {
|
||||
"voting_weight": 1.0,
|
||||
"authority_level": "STANDARD",
|
||||
"supervises": [],
|
||||
"responsibilities": ["expertise_contribution", "analysis", "voting"],
|
||||
"specialized_expertise": True
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Basic Setup
|
||||
|
||||
```python
|
||||
from swarms import Agent
|
||||
from swarms.structs.board_of_directors_swarm import (
|
||||
BoardOfDirectorsSwarm,
|
||||
BoardMember,
|
||||
BoardMemberRole
|
||||
)
|
||||
from swarms.config.board_config import enable_board_feature
|
||||
|
||||
# Enable the Board of Directors feature
|
||||
enable_board_feature()
|
||||
|
||||
# Create board members with specific roles
|
||||
chairman = Agent(
|
||||
agent_name="Chairman",
|
||||
agent_description="Chairman of the Board responsible for leading meetings",
|
||||
model_name="gpt-4o-mini",
|
||||
system_prompt="You are the Chairman of the Board..."
|
||||
)
|
||||
|
||||
vice_chairman = Agent(
|
||||
agent_name="Vice-Chairman",
|
||||
agent_description="Vice Chairman who supports the Chairman",
|
||||
model_name="gpt-4o-mini",
|
||||
system_prompt="You are the Vice Chairman..."
|
||||
)
|
||||
|
||||
# Create BoardMember objects with roles and expertise
|
||||
board_members = [
|
||||
BoardMember(chairman, BoardMemberRole.CHAIRMAN, 1.5, ["leadership", "strategy"]),
|
||||
BoardMember(vice_chairman, BoardMemberRole.VICE_CHAIRMAN, 1.2, ["operations", "coordination"]),
|
||||
]
|
||||
|
||||
# Create worker agents
|
||||
research_agent = Agent(
|
||||
agent_name="Research-Specialist",
|
||||
agent_description="Expert in market research and analysis",
|
||||
model_name="gpt-4o",
|
||||
)
|
||||
|
||||
financial_agent = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
agent_description="Specialist in financial analysis and valuation",
|
||||
model_name="gpt-4o",
|
||||
)
|
||||
|
||||
# Initialize the Board of Directors swarm
|
||||
board_swarm = BoardOfDirectorsSwarm(
|
||||
name="Executive_Board_Swarm",
|
||||
description="Executive board with specialized roles for strategic decision-making",
|
||||
board_members=board_members,
|
||||
agents=[research_agent, financial_agent],
|
||||
max_loops=2,
|
||||
verbose=True,
|
||||
decision_threshold=0.6,
|
||||
enable_voting=True,
|
||||
enable_consensus=True,
|
||||
)
|
||||
|
||||
# Execute a complex task with democratic decision-making
|
||||
result = board_swarm.run(task="Analyze the market potential for Tesla (TSLA) stock")
|
||||
print(result)
|
||||
```
|
||||
|
||||
## 📋 Comprehensive Examples
|
||||
|
||||
### 1. Strategic Investment Analysis
|
||||
|
||||
```python
|
||||
# Create specialized agents for investment analysis
|
||||
market_research_agent = Agent(
|
||||
agent_name="Market-Research-Specialist",
|
||||
agent_description="Expert in market research, competitive analysis, and industry trends",
|
||||
model_name="gpt-4o",
|
||||
system_prompt="""You are a Market Research Specialist. Your responsibilities include:
|
||||
1. Conducting comprehensive market research and analysis
|
||||
2. Identifying market trends, opportunities, and risks
|
||||
3. Analyzing competitive landscape and positioning
|
||||
4. Providing market size and growth projections
|
||||
5. Supporting strategic decision-making with research findings
|
||||
|
||||
You should be thorough, analytical, and objective in your research."""
|
||||
)
|
||||
|
||||
financial_analyst_agent = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
agent_description="Specialist in financial analysis, valuation, and investment assessment",
|
||||
model_name="gpt-4o",
|
||||
system_prompt="""You are a Financial Analyst. Your responsibilities include:
|
||||
1. Conducting financial analysis and valuation
|
||||
2. Assessing investment opportunities and risks
|
||||
3. Analyzing financial performance and metrics
|
||||
4. Providing financial insights and recommendations
|
||||
5. Supporting financial decision-making
|
||||
|
||||
You should be financially astute, analytical, and focused on value creation."""
|
||||
)
|
||||
|
||||
technical_assessor_agent = Agent(
|
||||
agent_name="Technical-Assessor",
|
||||
agent_description="Expert in technical feasibility and implementation assessment",
|
||||
model_name="gpt-4o",
|
||||
system_prompt="""You are a Technical Assessor. Your responsibilities include:
|
||||
1. Evaluating technical feasibility and requirements
|
||||
2. Assessing implementation challenges and risks
|
||||
3. Analyzing technology stack and architecture
|
||||
4. Providing technical insights and recommendations
|
||||
5. Supporting technical decision-making
|
||||
|
||||
You should be technically proficient, practical, and solution-oriented."""
|
||||
)
|
||||
|
||||
# Create comprehensive board members
|
||||
board_members = [
|
||||
BoardMember(
|
||||
chairman,
|
||||
BoardMemberRole.CHAIRMAN,
|
||||
1.5,
|
||||
["leadership", "strategy", "governance", "decision_making"]
|
||||
),
|
||||
BoardMember(
|
||||
vice_chairman,
|
||||
BoardMemberRole.VICE_CHAIRMAN,
|
||||
1.2,
|
||||
["operations", "coordination", "communication", "implementation"]
|
||||
),
|
||||
BoardMember(
|
||||
secretary,
|
||||
BoardMemberRole.SECRETARY,
|
||||
1.0,
|
||||
["documentation", "compliance", "record_keeping", "communication"]
|
||||
),
|
||||
BoardMember(
|
||||
treasurer,
|
||||
BoardMemberRole.TREASURER,
|
||||
1.0,
|
||||
["finance", "budgeting", "risk_management", "resource_allocation"]
|
||||
),
|
||||
BoardMember(
|
||||
executive_director,
|
||||
BoardMemberRole.EXECUTIVE_DIRECTOR,
|
||||
1.5,
|
||||
["strategy", "operations", "innovation", "performance_management"]
|
||||
)
|
||||
]
|
||||
|
||||
# Initialize the investment analysis board
|
||||
investment_board = BoardOfDirectorsSwarm(
|
||||
name="Investment_Analysis_Board",
|
||||
description="Specialized board for investment analysis and decision-making",
|
||||
board_members=board_members,
|
||||
agents=[market_research_agent, financial_analyst_agent, technical_assessor_agent],
|
||||
max_loops=3,
|
||||
verbose=True,
|
||||
decision_threshold=0.75, # Higher threshold for investment decisions
|
||||
enable_voting=True,
|
||||
enable_consensus=True,
|
||||
max_workers=3,
|
||||
output_type="dict"
|
||||
)
|
||||
|
||||
# Execute investment analysis
|
||||
investment_task = """
|
||||
Analyze the strategic investment opportunity for a $50M Series B funding round in a
|
||||
fintech startup. Consider market conditions, competitive landscape, financial projections,
|
||||
technical feasibility, and strategic fit. Provide comprehensive recommendations including:
|
||||
1. Investment recommendation (proceed/hold/decline)
|
||||
2. Valuation analysis and suggested terms
|
||||
3. Risk assessment and mitigation strategies
|
||||
4. Strategic value and synergies
|
||||
5. Implementation timeline and milestones
|
||||
"""
|
||||
|
||||
result = investment_board.run(task=investment_task)
|
||||
print("Investment Analysis Results:")
|
||||
print(json.dumps(result, indent=2))
|
||||
```
|
||||
|
||||
### 2. Technology Strategy Development
|
||||
|
||||
```python
|
||||
# Create technology-focused agents
|
||||
tech_strategy_agent = Agent(
|
||||
agent_name="Tech-Strategy-Specialist",
|
||||
agent_description="Expert in technology strategy and digital transformation",
|
||||
model_name="gpt-4o",
|
||||
system_prompt="""You are a Technology Strategy Specialist. Your responsibilities include:
|
||||
1. Developing technology roadmaps and strategies
|
||||
2. Assessing digital transformation opportunities
|
||||
3. Evaluating emerging technologies and trends
|
||||
4. Planning technology investments and priorities
|
||||
5. Supporting technology decision-making
|
||||
|
||||
You should be strategic, forward-thinking, and technology-savvy."""
|
||||
)
|
||||
|
||||
implementation_planner_agent = Agent(
|
||||
agent_name="Implementation-Planner",
|
||||
agent_description="Expert in implementation planning and project management",
|
||||
model_name="gpt-4o",
|
||||
system_prompt="""You are an Implementation Planner. Your responsibilities include:
|
||||
1. Creating detailed implementation plans
|
||||
2. Assessing resource requirements and timelines
|
||||
3. Identifying implementation risks and challenges
|
||||
4. Planning change management strategies
|
||||
5. Supporting implementation decision-making
|
||||
|
||||
You should be practical, organized, and execution-focused."""
|
||||
)
|
||||
|
||||
# Technology strategy board configuration
|
||||
tech_board = BoardOfDirectorsSwarm(
|
||||
name="Technology_Strategy_Board",
|
||||
description="Specialized board for technology strategy and digital transformation",
|
||||
board_members=board_members,
|
||||
agents=[tech_strategy_agent, implementation_planner_agent, technical_assessor_agent],
|
||||
max_loops=4, # More loops for complex technology planning
|
||||
verbose=True,
|
||||
decision_threshold=0.7,
|
||||
enable_voting=True,
|
||||
enable_consensus=True,
|
||||
max_workers=3,
|
||||
output_type="dict"
|
||||
)
|
||||
|
||||
# Execute technology strategy development
|
||||
tech_strategy_task = """
|
||||
Develop a comprehensive technology strategy for a mid-size manufacturing company
|
||||
looking to digitize operations and implement Industry 4.0 technologies. Consider:
|
||||
1. Current technology assessment and gaps
|
||||
2. Technology roadmap and implementation plan
|
||||
3. Investment requirements and ROI analysis
|
||||
4. Risk assessment and mitigation strategies
|
||||
5. Change management and training requirements
|
||||
6. Competitive positioning and market advantages
|
||||
"""
|
||||
|
||||
result = tech_board.run(task=tech_strategy_task)
|
||||
print("Technology Strategy Results:")
|
||||
print(json.dumps(result, indent=2))
|
||||
```
|
||||
|
||||
### 3. Crisis Management and Response
|
||||
|
||||
```python
|
||||
# Create crisis management agents
|
||||
crisis_coordinator_agent = Agent(
|
||||
agent_name="Crisis-Coordinator",
|
||||
agent_description="Expert in crisis management and emergency response",
|
||||
model_name="gpt-4o",
|
||||
system_prompt="""You are a Crisis Coordinator. Your responsibilities include:
|
||||
1. Coordinating crisis response efforts
|
||||
2. Assessing crisis severity and impact
|
||||
3. Developing immediate response plans
|
||||
4. Managing stakeholder communications
|
||||
5. Supporting crisis decision-making
|
||||
|
||||
You should be calm, decisive, and action-oriented."""
|
||||
)
|
||||
|
||||
communications_specialist_agent = Agent(
|
||||
agent_name="Communications-Specialist",
|
||||
agent_description="Expert in crisis communications and stakeholder management",
|
||||
model_name="gpt-4o",
|
||||
system_prompt="""You are a Communications Specialist. Your responsibilities include:
|
||||
1. Developing crisis communication strategies
|
||||
2. Managing stakeholder communications
|
||||
3. Coordinating public relations efforts
|
||||
4. Ensuring message consistency and accuracy
|
||||
5. Supporting communication decision-making
|
||||
|
||||
You should be clear, empathetic, and strategic in communications."""
|
||||
)
|
||||
|
||||
# Crisis management board configuration
|
||||
crisis_board = BoardOfDirectorsSwarm(
|
||||
name="Crisis_Management_Board",
|
||||
description="Specialized board for crisis management and emergency response",
|
||||
board_members=board_members,
|
||||
agents=[crisis_coordinator_agent, communications_specialist_agent, financial_analyst_agent],
|
||||
max_loops=2, # Faster response needed
|
||||
verbose=True,
|
||||
decision_threshold=0.6, # Lower threshold for urgent decisions
|
||||
enable_voting=True,
|
||||
enable_consensus=True,
|
||||
max_workers=3,
|
||||
output_type="dict"
|
||||
)
|
||||
|
||||
# Execute crisis management
|
||||
crisis_task = """
|
||||
Our company is facing a major data breach. Develop an immediate response plan.
|
||||
Include:
|
||||
1. Immediate containment and mitigation steps
|
||||
2. Communication strategy for stakeholders
|
||||
3. Legal and regulatory compliance requirements
|
||||
4. Financial impact assessment
|
||||
5. Long-term recovery and prevention measures
|
||||
6. Timeline and resource allocation
|
||||
"""
|
||||
|
||||
result = crisis_board.run(task=crisis_task)
|
||||
print("Crisis Management Results:")
|
||||
print(json.dumps(result, indent=2))
|
||||
```
|
||||
|
||||
## ⚙️ Configuration and Parameters
|
||||
|
||||
### BoardOfDirectorsSwarm Parameters
|
||||
|
||||
```python
|
||||
# Complete parameter reference
|
||||
board_swarm = BoardOfDirectorsSwarm(
|
||||
# Basic Configuration
|
||||
name="Board_Name", # Name of the board
|
||||
description="Board description", # Description of the board's purpose
|
||||
|
||||
# Board Members and Agents
|
||||
board_members=board_members, # List of BoardMember objects
|
||||
agents=worker_agents, # List of worker Agent objects
|
||||
|
||||
# Execution Control
|
||||
max_loops=3, # Maximum number of refinement loops
|
||||
max_workers=4, # Maximum parallel workers
|
||||
|
||||
# Decision Making
|
||||
decision_threshold=0.7, # Consensus threshold (0.0-1.0)
|
||||
enable_voting=True, # Enable voting mechanisms
|
||||
enable_consensus=True, # Enable consensus building
|
||||
|
||||
# Advanced Features
|
||||
auto_assign_roles=True, # Auto-assign roles based on expertise
|
||||
role_mapping={ # Custom role mapping
|
||||
"financial_analysis": ["Treasurer", "Financial_Member"],
|
||||
"strategic_planning": ["Chairman", "Executive_Director"]
|
||||
},
|
||||
|
||||
# Consensus Configuration
|
||||
consensus_timeout=300, # Consensus timeout in seconds
|
||||
min_participation_rate=0.8, # Minimum participation rate
|
||||
auto_fallback_to_chairman=True, # Chairman can make final decisions
|
||||
consensus_rounds=3, # Maximum consensus building rounds
|
||||
|
||||
# Output Configuration
|
||||
output_type="dict", # Output format: "dict", "str", "list"
|
||||
verbose=True, # Enable detailed logging
|
||||
|
||||
# Quality Control
|
||||
quality_threshold=0.8, # Quality threshold for outputs
|
||||
enable_quality_gates=True, # Enable quality checkpoints
|
||||
enable_peer_review=True, # Enable peer review mechanisms
|
||||
|
||||
# Performance Optimization
|
||||
parallel_execution=True, # Enable parallel execution
|
||||
enable_agent_pooling=True, # Enable agent pooling
|
||||
timeout_per_agent=300, # Timeout per agent in seconds
|
||||
|
||||
# Monitoring and Logging
|
||||
enable_logging=True, # Enable detailed logging
|
||||
log_level="INFO", # Logging level
|
||||
enable_metrics=True, # Enable performance metrics
|
||||
enable_tracing=True # Enable request tracing
|
||||
)
|
||||
```
|
||||
|
||||
### Voting Configuration
|
||||
|
||||
```python
|
||||
# Voting system configuration
|
||||
voting_config = {
|
||||
"method": "weighted_majority", # Voting method
|
||||
"threshold": 0.75, # Consensus threshold
|
||||
"weights": { # Role-based voting weights
|
||||
"CHAIRMAN": 1.5,
|
||||
"VICE_CHAIRMAN": 1.2,
|
||||
"SECRETARY": 1.0,
|
||||
"TREASURER": 1.0,
|
||||
"EXECUTIVE_DIRECTOR": 1.5
|
||||
},
|
||||
"tie_breaker": "CHAIRMAN", # Tie breaker role
|
||||
"allow_abstention": True, # Allow board members to abstain
|
||||
"secret_ballot": False, # Use secret ballot voting
|
||||
"transparent_process": True # Transparent voting process
|
||||
}
|
||||
```
|
||||
|
||||
### Quality Control Configuration
|
||||
|
||||
```python
|
||||
# Quality control configuration
|
||||
quality_config = {
|
||||
"quality_gates": True, # Enable quality checkpoints
|
||||
"quality_threshold": 0.8, # Quality threshold
|
||||
"enable_peer_review": True, # Enable peer review
|
||||
"review_required": True, # Require peer review
|
||||
"output_validation": True, # Validate outputs
|
||||
"enable_metrics_tracking": True, # Track quality metrics
|
||||
|
||||
# Quality metrics
|
||||
"quality_metrics": {
|
||||
"completeness": {"weight": 0.2, "threshold": 0.8},
|
||||
"accuracy": {"weight": 0.25, "threshold": 0.85},
|
||||
"feasibility": {"weight": 0.2, "threshold": 0.8},
|
||||
"risk": {"weight": 0.15, "threshold": 0.7},
|
||||
"impact": {"weight": 0.2, "threshold": 0.8}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 📊 Performance Monitoring and Analytics
|
||||
|
||||
### Board Performance Metrics
|
||||
|
||||
```python
|
||||
# Get comprehensive board performance metrics
|
||||
board_summary = board_swarm.get_board_summary()
|
||||
print("Board Summary:")
|
||||
print(f"Board Name: {board_summary['board_name']}")
|
||||
print(f"Total Board Members: {board_summary['total_members']}")
|
||||
print(f"Total Worker Agents: {board_summary['total_agents']}")
|
||||
print(f"Decision Threshold: {board_summary['decision_threshold']}")
|
||||
print(f"Max Loops: {board_summary['max_loops']}")
|
||||
|
||||
# Display board member details
|
||||
print("\nBoard Members:")
|
||||
for member in board_summary['members']:
|
||||
print(f"- {member['name']} (Role: {member['role']}, Weight: {member['voting_weight']})")
|
||||
print(f" Expertise: {', '.join(member['expertise_areas'])}")
|
||||
|
||||
# Display worker agent details
|
||||
print("\nWorker Agents:")
|
||||
for agent in board_summary['agents']:
|
||||
print(f"- {agent['name']}: {agent['description']}")
|
||||
```
|
||||
|
||||
### Decision Analysis
|
||||
|
||||
```python
|
||||
# Analyze decision-making patterns
|
||||
if hasattr(result, 'get') and callable(result.get):
|
||||
conversation_history = result.get('conversation_history', [])
|
||||
|
||||
print(f"\nDecision Analysis:")
|
||||
print(f"Total Messages: {len(conversation_history)}")
|
||||
|
||||
# Count board member contributions
|
||||
board_contributions = {}
|
||||
for msg in conversation_history:
|
||||
if 'Board' in msg.get('role', ''):
|
||||
member_name = msg.get('agent_name', 'Unknown')
|
||||
board_contributions[member_name] = board_contributions.get(member_name, 0) + 1
|
||||
|
||||
print(f"Board Member Contributions:")
|
||||
for member, count in board_contributions.items():
|
||||
print(f"- {member}: {count} contributions")
|
||||
|
||||
# Count agent executions
|
||||
agent_executions = {}
|
||||
for msg in conversation_history:
|
||||
if any(agent.agent_name in msg.get('role', '') for agent in worker_agents):
|
||||
agent_name = msg.get('agent_name', 'Unknown')
|
||||
agent_executions[agent_name] = agent_executions.get(agent_name, 0) + 1
|
||||
|
||||
print(f"\nAgent Executions:")
|
||||
for agent, count in agent_executions.items():
|
||||
print(f"- {agent}: {count} executions")
|
||||
```
|
||||
|
||||
### Performance Monitoring System
|
||||
|
||||
```python
|
||||
# Performance monitoring system
|
||||
class PerformanceMonitor:
|
||||
def __init__(self):
|
||||
self.metrics = {
|
||||
"execution_times": [],
|
||||
"quality_scores": [],
|
||||
"consensus_rounds": [],
|
||||
"error_rates": []
|
||||
}
|
||||
|
||||
def track_execution_time(self, phase, duration):
|
||||
"""Track execution time for different phases"""
|
||||
self.metrics["execution_times"].append({
|
||||
"phase": phase,
|
||||
"duration": duration,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
})
|
||||
|
||||
def track_quality_score(self, score):
|
||||
"""Track quality scores"""
|
||||
self.metrics["quality_scores"].append({
|
||||
"score": score,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
})
|
||||
|
||||
def generate_performance_report(self):
|
||||
"""Generate comprehensive performance report"""
|
||||
return {
|
||||
"average_execution_time": self.calculate_average_execution_time(),
|
||||
"quality_trends": self.analyze_quality_trends(),
|
||||
"consensus_efficiency": self.analyze_consensus_efficiency(),
|
||||
"error_analysis": self.analyze_errors(),
|
||||
"recommendations": self.generate_recommendations()
|
||||
}
|
||||
|
||||
# Usage example
|
||||
monitor = PerformanceMonitor()
|
||||
# ... track metrics during execution ...
|
||||
report = monitor.generate_performance_report()
|
||||
print("Performance Report:")
|
||||
print(json.dumps(report, indent=2))
|
||||
```
|
||||
|
||||
## 🔧 Advanced Features and Customization
|
||||
|
||||
### Custom Board Templates
|
||||
|
||||
```python
|
||||
from swarms.config.board_config import get_default_board_template
|
||||
|
||||
# Get pre-configured board templates
|
||||
financial_board = get_default_board_template("financial_analysis")
|
||||
strategic_board = get_default_board_template("strategic_planning")
|
||||
tech_board = get_default_board_template("technology_assessment")
|
||||
crisis_board = get_default_board_template("crisis_management")
|
||||
|
||||
# Custom board template
|
||||
custom_template = {
|
||||
"name": "Custom_Board",
|
||||
"description": "Custom board for specific use case",
|
||||
"board_members": [
|
||||
{"role": "CHAIRMAN", "expertise": ["leadership", "strategy"]},
|
||||
{"role": "VICE_CHAIRMAN", "expertise": ["operations", "coordination"]},
|
||||
{"role": "SECRETARY", "expertise": ["documentation", "communication"]},
|
||||
{"role": "TREASURER", "expertise": ["finance", "budgeting"]},
|
||||
{"role": "EXECUTIVE_DIRECTOR", "expertise": ["strategy", "operations"]}
|
||||
],
|
||||
"agents": [
|
||||
{"name": "Research_Agent", "expertise": ["research", "analysis"]},
|
||||
{"name": "Technical_Agent", "expertise": ["technical", "implementation"]}
|
||||
],
|
||||
"config": {
|
||||
"max_loops": 3,
|
||||
"decision_threshold": 0.7,
|
||||
"enable_voting": True,
|
||||
"enable_consensus": True
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Dynamic Role Assignment
|
||||
|
||||
```python
|
||||
# Automatically assign roles based on task requirements
|
||||
board_swarm = BoardOfDirectorsSwarm(
|
||||
board_members=board_members,
|
||||
agents=agents,
|
||||
auto_assign_roles=True,
|
||||
role_mapping={
|
||||
"financial_analysis": ["Treasurer", "Financial_Member"],
|
||||
"strategic_planning": ["Chairman", "Executive_Director"],
|
||||
"technical_assessment": ["Technical_Member", "Executive_Director"],
|
||||
"research_analysis": ["Research_Member", "Secretary"],
|
||||
"crisis_management": ["Chairman", "Vice_Chairman", "Communications_Member"]
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Consensus Optimization
|
||||
|
||||
```python
|
||||
# Advanced consensus-building mechanisms
|
||||
board_swarm = BoardOfDirectorsSwarm(
|
||||
board_members=board_members,
|
||||
agents=agents,
|
||||
enable_consensus=True,
|
||||
consensus_timeout=300, # 5 minutes timeout
|
||||
min_participation_rate=0.8, # 80% minimum participation
|
||||
auto_fallback_to_chairman=True, # Chairman can make final decisions
|
||||
consensus_rounds=3, # Maximum consensus building rounds
|
||||
consensus_method="weighted_majority", # Consensus method
|
||||
enable_mediation=True, # Enable mediation for conflicts
|
||||
mediation_timeout=120 # Mediation timeout in seconds
|
||||
)
|
||||
```
|
||||
|
||||
## 🛠️ Troubleshooting and Debugging
|
||||
|
||||
### Common Issues and Solutions
|
||||
|
||||
1. **Consensus Failures**
|
||||
- **Issue**: Board cannot reach consensus within loop limit
|
||||
- **Solution**: Lower voting threshold, increase max_loops, or adjust voting weights
|
||||
```python
|
||||
board_swarm = BoardOfDirectorsSwarm(
|
||||
decision_threshold=0.6, # Lower threshold
|
||||
max_loops=5, # More loops
|
||||
consensus_timeout=600 # Longer timeout
|
||||
)
|
||||
```
|
||||
|
||||
2. **Agent Timeout**
|
||||
- **Issue**: Individual agents take too long to respond
|
||||
- **Solution**: Increase timeout settings or optimize agent prompts
|
||||
```python
|
||||
board_swarm = BoardOfDirectorsSwarm(
|
||||
timeout_per_agent=600, # 10 minutes per agent
|
||||
enable_agent_pooling=True # Use agent pooling
|
||||
)
|
||||
```
|
||||
|
||||
3. **Poor Quality Output**
|
||||
- **Issue**: Final output doesn't meet quality standards
|
||||
- **Solution**: Enable quality gates, increase max_loops, or improve agent prompts
|
||||
```python
|
||||
board_swarm = BoardOfDirectorsSwarm(
|
||||
enable_quality_gates=True,
|
||||
quality_threshold=0.8,
|
||||
enable_peer_review=True,
|
||||
max_loops=4
|
||||
)
|
||||
```
|
||||
|
||||
4. **Resource Exhaustion**
|
||||
- **Issue**: System runs out of resources during execution
|
||||
- **Solution**: Implement resource limits, use agent pooling, or optimize parallel execution
|
||||
```python
|
||||
board_swarm = BoardOfDirectorsSwarm(
|
||||
max_workers=2, # Limit parallel workers
|
||||
enable_agent_pooling=True,
|
||||
parallel_execution=False # Disable parallel execution
|
||||
)
|
||||
```
|
||||
|
||||
### Debugging Techniques
|
||||
|
||||
```python
|
||||
# Debugging configuration
|
||||
debug_config = BoardConfig(
|
||||
max_loops=1, # Limit loops for debugging
|
||||
enable_logging=True,
|
||||
log_level="DEBUG",
|
||||
enable_tracing=True,
|
||||
debug_mode=True
|
||||
)
|
||||
|
||||
# Create debug swarm
|
||||
debug_swarm = BoardOfDirectorsSwarm(
|
||||
agents=agents,
|
||||
config=debug_config
|
||||
)
|
||||
|
||||
# Execute with debugging
|
||||
try:
|
||||
result = debug_swarm.run(task)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
print(f"Debug info: {debug_swarm.get_debug_info()}")
|
||||
|
||||
# Enable detailed logging
|
||||
import logging
|
||||
logging.basicConfig(
|
||||
level=logging.DEBUG,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
# Create swarm with logging enabled
|
||||
logging_swarm = BoardOfDirectorsSwarm(
|
||||
agents=agents,
|
||||
config=BoardConfig(
|
||||
enable_logging=True,
|
||||
log_level="DEBUG",
|
||||
enable_metrics=True,
|
||||
enable_tracing=True
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## 📋 Use Cases
|
||||
|
||||
### Corporate Governance
|
||||
- **Strategic Planning**: Long-term business strategy development
|
||||
- **Risk Management**: Comprehensive risk assessment and mitigation
|
||||
- **Resource Allocation**: Optimal distribution of company resources
|
||||
- **Performance Oversight**: Monitoring and evaluating organizational performance
|
||||
|
||||
### Financial Analysis
|
||||
- **Portfolio Management**: Investment portfolio optimization and rebalancing
|
||||
- **Market Analysis**: Comprehensive market research and trend analysis
|
||||
- **Risk Assessment**: Financial risk evaluation and management
|
||||
- **Compliance Monitoring**: Regulatory compliance and audit preparation
|
||||
|
||||
### Research & Development
|
||||
- **Technology Assessment**: Evaluation of emerging technologies
|
||||
- **Product Development**: Strategic product planning and development
|
||||
- **Innovation Management**: Managing innovation pipelines and initiatives
|
||||
- **Quality Assurance**: Ensuring high standards across development processes
|
||||
|
||||
### Project Management
|
||||
- **Complex Project Planning**: Multi-faceted project strategy development
|
||||
- **Resource Optimization**: Efficient allocation of project resources
|
||||
- **Stakeholder Management**: Coordinating diverse stakeholder interests
|
||||
- **Risk Mitigation**: Identifying and addressing project risks
|
||||
|
||||
### Crisis Management
|
||||
- **Emergency Response**: Rapid response to critical situations
|
||||
- **Stakeholder Communication**: Managing communications during crises
|
||||
- **Recovery Planning**: Developing recovery and prevention strategies
|
||||
- **Legal Compliance**: Ensuring compliance during crisis situations
|
||||
|
||||
## 🎯 Success Criteria
|
||||
|
||||
A successful Board of Directors implementation should demonstrate:
|
||||
|
||||
- ✅ **Democratic Decision Making**: All board members contribute to decisions
|
||||
- ✅ **Consensus Achievement**: Decisions reached through collaborative processes
|
||||
- ✅ **Role Effectiveness**: Each board member fulfills their responsibilities
|
||||
- ✅ **Agent Coordination**: Worker agents execute tasks efficiently
|
||||
- ✅ **Quality Output**: High-quality results through collective intelligence
|
||||
- ✅ **Process Transparency**: Clear visibility into decision-making processes
|
||||
- ✅ **Performance Optimization**: Efficient resource utilization and execution
|
||||
- ✅ **Continuous Improvement**: Learning from each execution cycle
|
||||
|
||||
## 📚 Best Practices
|
||||
|
||||
### 1. Role Definition
|
||||
- Clearly define responsibilities for each board member
|
||||
- Ensure expertise areas align with organizational needs
|
||||
- Balance voting weights based on role importance
|
||||
- Document role interactions and communication protocols
|
||||
|
||||
### 2. Task Formulation
|
||||
- Provide clear, specific task descriptions
|
||||
- Include relevant context and constraints
|
||||
- Specify expected outputs and deliverables
|
||||
- Define quality criteria and success metrics
|
||||
|
||||
### 3. Consensus Building
|
||||
- Allow adequate time for discussion and consensus
|
||||
- Encourage diverse perspectives and viewpoints
|
||||
- Use structured decision-making processes
|
||||
- Implement conflict resolution mechanisms
|
||||
|
||||
### 4. Performance Monitoring
|
||||
- Track decision quality and outcomes
|
||||
- Monitor board member participation
|
||||
- Analyze agent utilization and effectiveness
|
||||
- Implement continuous improvement processes
|
||||
|
||||
### 5. Resource Management
|
||||
- Optimize agent allocation and utilization
|
||||
- Implement parallel execution where appropriate
|
||||
- Monitor resource usage and performance
|
||||
- Scale resources based on task complexity
|
||||
|
||||
---
|
||||
|
||||
The Board of Directors architecture represents a sophisticated approach to multi-agent collaboration, enabling organizations to leverage collective intelligence through structured governance and democratic decision-making processes. This comprehensive implementation provides the tools and frameworks needed to build effective, scalable, and intelligent decision-making systems.
|
@ -0,0 +1,204 @@
|
||||
# AgentRearrange
|
||||
|
||||
*Dynamically reorganizes agents to optimize task performance and efficiency*
|
||||
|
||||
**Swarm Type**: `AgentRearrange`
|
||||
|
||||
## Overview
|
||||
|
||||
The AgentRearrange swarm type dynamically reorganizes the workflow between agents based on task requirements and performance metrics. This architecture is particularly useful when the effectiveness of agents depends on their sequence or arrangement, allowing for optimal task distribution and execution flow.
|
||||
|
||||
Key features:
|
||||
- **Dynamic Reorganization**: Automatically adjusts agent order based on task needs
|
||||
- **Performance Optimization**: Optimizes workflow for maximum efficiency
|
||||
- **Adaptive Sequencing**: Learns from execution patterns to improve arrangement
|
||||
- **Flexible Task Distribution**: Distributes work based on agent capabilities
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Complex workflows where task order matters
|
||||
- Multi-step processes requiring optimization
|
||||
- Tasks where agent performance varies by sequence
|
||||
- Adaptive workflow management systems
|
||||
|
||||
## API Usage
|
||||
|
||||
### Basic AgentRearrange Example
|
||||
|
||||
=== "Shell (curl)"
|
||||
```bash
|
||||
curl -X POST "https://api.swarms.world/v1/swarm/completions" \
|
||||
-H "x-api-key: $SWARMS_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "Document Processing Rearrange",
|
||||
"description": "Process documents with dynamic agent reorganization",
|
||||
"swarm_type": "AgentRearrange",
|
||||
"task": "Analyze this legal document and extract key insights, then summarize findings and identify action items",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Document Analyzer",
|
||||
"description": "Analyzes document content and structure",
|
||||
"system_prompt": "You are an expert document analyst. Extract key information, themes, and insights from documents.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Legal Expert",
|
||||
"description": "Provides legal context and interpretation",
|
||||
"system_prompt": "You are a legal expert. Analyze documents for legal implications, risks, and compliance issues.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
},
|
||||
{
|
||||
"agent_name": "Summarizer",
|
||||
"description": "Creates concise summaries and action items",
|
||||
"system_prompt": "You are an expert at creating clear, actionable summaries from complex information.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.4
|
||||
}
|
||||
],
|
||||
"rearrange_flow": "Summarizer -> Legal Expert -> Document Analyzer",
|
||||
"max_loops": 1
|
||||
}'
|
||||
```
|
||||
|
||||
=== "Python (requests)"
|
||||
```python
|
||||
import requests
|
||||
import json
|
||||
|
||||
API_BASE_URL = "https://api.swarms.world"
|
||||
API_KEY = "your_api_key_here"
|
||||
|
||||
headers = {
|
||||
"x-api-key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
swarm_config = {
|
||||
"name": "Document Processing Rearrange",
|
||||
"description": "Process documents with dynamic agent reorganization",
|
||||
"swarm_type": "AgentRearrange",
|
||||
"task": "Analyze this legal document and extract key insights, then summarize findings and identify action items",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Document Analyzer",
|
||||
"description": "Analyzes document content and structure",
|
||||
"system_prompt": "You are an expert document analyst. Extract key information, themes, and insights from documents.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Legal Expert",
|
||||
"description": "Provides legal context and interpretation",
|
||||
"system_prompt": "You are a legal expert. Analyze documents for legal implications, risks, and compliance issues.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
},
|
||||
{
|
||||
"agent_name": "Summarizer",
|
||||
"description": "Creates concise summaries and action items",
|
||||
"system_prompt": "You are an expert at creating clear, actionable summaries from complex information.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.4
|
||||
}
|
||||
],
|
||||
"rearrange_flow": "Summarizer -> Legal Expert -> Document Analyzer",
|
||||
"max_loops": 1
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{API_BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=swarm_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
print("AgentRearrange swarm completed successfully!")
|
||||
print(f"Cost: ${result['metadata']['billing_info']['total_cost']}")
|
||||
print(f"Execution time: {result['metadata']['execution_time_seconds']} seconds")
|
||||
print(f"Output: {result['output']}")
|
||||
else:
|
||||
print(f"Error: {response.status_code} - {response.text}")
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
```json
|
||||
{
|
||||
"job_id": "swarms-Uc8R7UcepLmNNPwcU7JC6YPy5wiI",
|
||||
"status": "success",
|
||||
"swarm_name": "Document Processing Rearrange",
|
||||
"description": "Process documents with dynamic agent reorganization",
|
||||
"swarm_type": "AgentRearrange",
|
||||
"output": [
|
||||
{
|
||||
"role": "Summarizer",
|
||||
"content": "\"Of course! Please provide the legal document you would like me to analyze, and I'll help extract key insights, summarize findings, and identify any action items.\""
|
||||
},
|
||||
{
|
||||
"role": "Legal Expert",
|
||||
"content": "\"\"Absolutely! Please upload or describe the legal document you need assistance with, and I'll provide an analysis that highlights key insights, summarizes the findings, and identifies any action items that may be necessary.\"\""
|
||||
},
|
||||
{
|
||||
"role": "Document Analyzer",
|
||||
"content": "\"Of course! Please provide the legal document you would like me to analyze, and I'll help extract key insights, summarize findings, and identify any action items.\""
|
||||
}
|
||||
],
|
||||
"number_of_agents": 3,
|
||||
"service_tier": "standard",
|
||||
"execution_time": 7.898931264877319,
|
||||
"usage": {
|
||||
"input_tokens": 22,
|
||||
"output_tokens": 144,
|
||||
"total_tokens": 166,
|
||||
"billing_info": {
|
||||
"cost_breakdown": {
|
||||
"agent_cost": 0.03,
|
||||
"input_token_cost": 0.000066,
|
||||
"output_token_cost": 0.00216,
|
||||
"token_counts": {
|
||||
"total_input_tokens": 22,
|
||||
"total_output_tokens": 144,
|
||||
"total_tokens": 166
|
||||
},
|
||||
"num_agents": 3,
|
||||
"service_tier": "standard",
|
||||
"night_time_discount_applied": true
|
||||
},
|
||||
"total_cost": 0.032226,
|
||||
"discount_active": true,
|
||||
"discount_type": "night_time",
|
||||
"discount_percentage": 75
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
|-----------|------|-------------|---------|
|
||||
| `rearrange_flow` | string | Instructions for how agents should be rearranged | None |
|
||||
| `agents` | Array<AgentSpec> | List of agents to be dynamically arranged | Required |
|
||||
| `max_loops` | integer | Maximum rearrangement iterations | 1 |
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Provide clear `rearrange_flow` instructions for optimal reorganization
|
||||
- Design agents with complementary but flexible roles
|
||||
- Use when task complexity requires adaptive sequencing
|
||||
- Monitor execution patterns to understand rearrangement decisions
|
||||
|
||||
## Related Swarm Types
|
||||
|
||||
- [SequentialWorkflow](sequential_workflow.md) - For fixed sequential processing
|
||||
- [AutoSwarmBuilder](auto_swarm_builder.md) - For automatic swarm construction
|
||||
- [HierarchicalSwarm](hierarchical_swarm.md) - For structured agent hierarchies
|
@ -0,0 +1,55 @@
|
||||
# Auto
|
||||
|
||||
*Intelligently selects the most effective swarm architecture for a given task*
|
||||
|
||||
**Swarm Type**: `auto` (or `Auto`)
|
||||
|
||||
## Overview
|
||||
|
||||
The Auto swarm type intelligently selects the most effective swarm architecture for a given task based on context analysis and task requirements. This intelligent system evaluates the task description and automatically chooses the optimal swarm type from all available architectures, ensuring maximum efficiency and effectiveness.
|
||||
|
||||
Key features:
|
||||
- **Intelligent Selection**: Automatically chooses the best swarm type for each task
|
||||
- **Context Analysis**: Analyzes task requirements to make optimal decisions
|
||||
- **Adaptive Architecture**: Adapts to different types of problems automatically
|
||||
- **Zero Configuration**: No manual architecture selection required
|
||||
|
||||
## Use Cases
|
||||
|
||||
- When unsure about which swarm type to use
|
||||
- General-purpose task automation
|
||||
- Rapid prototyping and experimentation
|
||||
- Simplified API usage for non-experts
|
||||
|
||||
## API Usage
|
||||
|
||||
|
||||
|
||||
## Selection Logic
|
||||
|
||||
The Auto swarm type analyzes various factors to make its selection:
|
||||
|
||||
| Factor | Consideration |
|
||||
|--------|---------------|
|
||||
| **Task Complexity** | Simple → Single agent, Complex → Multi-agent |
|
||||
| **Sequential Dependencies** | Dependencies → SequentialWorkflow |
|
||||
| **Parallel Opportunities** | Independent subtasks → ConcurrentWorkflow |
|
||||
| **Collaboration Needs** | Discussion required → GroupChat |
|
||||
| **Expertise Diversity** | Multiple domains → MixtureOfAgents |
|
||||
| **Management Needs** | Oversight required → HierarchicalSwarm |
|
||||
| **Routing Requirements** | Task distribution → MultiAgentRouter |
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Provide detailed task descriptions for better selection
|
||||
- Use `rules` parameter to guide selection criteria
|
||||
- Review the selected architecture in response metadata
|
||||
- Ideal for users new to swarm architectures
|
||||
|
||||
## Related Swarm Types
|
||||
|
||||
Since Auto can select any swarm type, it's related to all architectures:
|
||||
- [AutoSwarmBuilder](auto_swarm_builder.md) - For automatic agent generation
|
||||
- [SequentialWorkflow](sequential_workflow.md) - Often selected for linear tasks
|
||||
- [ConcurrentWorkflow](concurrent_workflow.md) - For parallel processing needs
|
||||
- [MixtureOfAgents](mixture_of_agents.md) - For diverse expertise requirements
|
@ -0,0 +1,46 @@
|
||||
# AutoSwarmBuilder [ Needs an Fix ]
|
||||
|
||||
*Automatically configures optimal swarm architectures based on task requirements*
|
||||
|
||||
**Swarm Type**: `AutoSwarmBuilder`
|
||||
|
||||
## Overview
|
||||
|
||||
The AutoSwarmBuilder automatically configures optimal agent architectures based on task requirements and performance metrics, simplifying swarm creation. This intelligent system analyzes the given task and automatically generates the most suitable agent configuration, eliminating the need for manual swarm design.
|
||||
|
||||
Key features:
|
||||
- **Intelligent Configuration**: Automatically designs optimal swarm structures
|
||||
- **Task-Adaptive**: Adapts architecture based on specific task requirements
|
||||
- **Performance Optimization**: Selects configurations for maximum efficiency
|
||||
- **Simplified Setup**: Eliminates manual agent configuration complexity
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Quick prototyping and experimentation
|
||||
- Unknown or complex task requirements
|
||||
- Automated swarm optimization
|
||||
- Simplified swarm creation for non-experts
|
||||
|
||||
## API Usage
|
||||
|
||||
|
||||
## Configuration Options
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
|-----------|------|-------------|---------|
|
||||
| `task` | string | Task description for automatic optimization | Required |
|
||||
| `rules` | string | Additional constraints and guidelines | None |
|
||||
| `max_loops` | integer | Maximum execution rounds | 1 |
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Provide detailed, specific task descriptions for better optimization
|
||||
- Use `rules` parameter to guide the automatic configuration
|
||||
- Ideal for rapid prototyping and experimentation
|
||||
- Review generated architecture in response metadata
|
||||
|
||||
## Related Swarm Types
|
||||
|
||||
- [Auto](auto.md) - For automatic swarm type selection
|
||||
- [MixtureOfAgents](mixture_of_agents.md) - Often selected by AutoSwarmBuilder
|
||||
- [HierarchicalSwarm](hierarchical_swarm.md) - For complex structured tasks
|
@ -0,0 +1,279 @@
|
||||
# Deploy AI Agents with Swarms API on Cloudflare Workers
|
||||
|
||||
Deploy intelligent AI agents powered by Swarms API on Cloudflare Workers edge network. Build production-ready cron agents that run automatically, fetch real-time data, perform AI analysis, and execute actions across 330+ cities worldwide.
|
||||
|
||||
<!-- ## Demo Video
|
||||
|
||||
Watch the stock agent in action:
|
||||
|
||||
<iframe width="800" height="450"
|
||||
src="https://www.youtube.com/embed/YOUR_VIDEO_ID"
|
||||
frameborder="0"
|
||||
allowfullscreen>
|
||||
</iframe>
|
||||
|
||||
> **Note**: The demo video shows the complete workflow from data fetching to AI analysis and report generation. -->
|
||||
|
||||
## Overview
|
||||
|
||||
This integration demonstrates how to combine **Swarms API multi-agent intelligence** with **Cloudflare Workers edge computing** to create autonomous AI systems that:
|
||||
|
||||
- ⚡ **Execute automatically** on predefined schedules (cron jobs)
|
||||
- 📊 **Fetch real-time data** from external APIs (Yahoo Finance, news feeds)
|
||||
- 🤖 **Perform intelligent analysis** using specialized Swarms AI agents
|
||||
- 📧 **Take automated actions** (email alerts, reports, notifications)
|
||||
- 🌍 **Scale globally** on Cloudflare's edge network with sub-100ms latency
|
||||
|
||||
## Repository & Complete Implementation
|
||||
|
||||
For the **complete working implementation** with full source code, detailed setup instructions, and ready-to-deploy examples, visit:
|
||||
|
||||
**🔗 [Swarms-CloudFlare-Deployment Repository](https://github.com/The-Swarm-Corporation/Swarms-CloudFlare-Deployment)**
|
||||
|
||||
This repository provides:
|
||||
- **Two complete implementations**: JavaScript and Python
|
||||
- **Production-ready code** with error handling and monitoring
|
||||
- **Step-by-step deployment guides** for both local and production environments
|
||||
- **Real-world examples** including stock analysis agents
|
||||
- **Configuration templates** and environment setup
|
||||
|
||||
## Available Implementations
|
||||
|
||||
The repository provides **two complete implementations** of stock analysis agents:
|
||||
|
||||
### 📂 `stock-agent/` - JavaScript Implementation
|
||||
The original implementation using **JavaScript/TypeScript** on Cloudflare Workers.
|
||||
|
||||
### 📂 `python-stock-agent/` - Python Implementation
|
||||
A **Python Workers** implementation using Cloudflare's beta Python runtime with Pyodide.
|
||||
|
||||
## Stock Analysis Agent Features
|
||||
|
||||
Both implementations demonstrate a complete system that:
|
||||
|
||||
1. **Automated Analysis**: Runs stock analysis every 3 hours using Cloudflare Workers cron
|
||||
2. **Real-time Data**: Fetches market data from Yahoo Finance API (no API key needed)
|
||||
3. **News Integration**: Collects market news from Financial Modeling Prep API (optional)
|
||||
4. **Multi-Agent Analysis**: Deploys multiple Swarms AI agents for technical and fundamental analysis
|
||||
5. **Email Reports**: Sends comprehensive reports via Mailgun
|
||||
6. **Web Interface**: Provides monitoring dashboard for manual triggers and status tracking
|
||||
|
||||
## Implementation Comparison
|
||||
|
||||
| Feature | JavaScript (`stock-agent/`) | Python (`python-stock-agent/`) |
|
||||
|---------|----------------------------|--------------------------------|
|
||||
| **Runtime** | V8 JavaScript Engine | Pyodide Python Runtime |
|
||||
| **Language** | JavaScript/TypeScript | Python 3.x |
|
||||
| **Status** | Production Ready | Beta (Python Workers) |
|
||||
| **Performance** | Optimized V8 execution | Good, with Python stdlib support |
|
||||
| **Syntax** | `fetch()`, `JSON.stringify()` | `await fetch()`, `json.dumps()` |
|
||||
| **Error Handling** | `try/catch` | `try/except` |
|
||||
| **Libraries** | Built-in Web APIs | Python stdlib + select packages |
|
||||
| **Development** | Mature tooling | Growing ecosystem |
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
|
||||
│ Cloudflare │ │ Data Sources │ │ Swarms API │
|
||||
│ Workers Runtime │ │ │ │ │
|
||||
│ "0 */3 * * *" │───▶│ Yahoo Finance │───▶│ Technical Agent │
|
||||
│ JS | Python │ │ News APIs │ │ Fundamental │
|
||||
│ scheduled() │ │ Market Data │ │ Agent Analysis │
|
||||
│ Global Edge │ │ │ │ │
|
||||
└─────────────────┘ └─────────────────┘ └─────────────────┘
|
||||
```
|
||||
|
||||
## Quick Start Guide
|
||||
|
||||
Choose your preferred implementation:
|
||||
|
||||
### Option A: JavaScript Implementation
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/The-Swarm-Corporation/Swarms-CloudFlare-Deployment.git
|
||||
cd Swarms-CloudFlare-Deployment/stock-agent
|
||||
|
||||
# Install dependencies
|
||||
npm install
|
||||
```
|
||||
|
||||
### Option B: Python Implementation
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/The-Swarm-Corporation/Swarms-CloudFlare-Deployment.git
|
||||
cd Swarms-CloudFlare-Deployment/python-stock-agent
|
||||
|
||||
# Install dependencies (Wrangler CLI)
|
||||
npm install
|
||||
```
|
||||
|
||||
### 2. Environment Configuration
|
||||
|
||||
Create a `.dev.vars` file in your chosen directory:
|
||||
|
||||
```env
|
||||
# Required: Swarms API key
|
||||
SWARMS_API_KEY=your-swarms-api-key-here
|
||||
|
||||
# Optional: Market news (free tier available)
|
||||
FMP_API_KEY=your-fmp-api-key
|
||||
|
||||
# Optional: Email notifications
|
||||
MAILGUN_API_KEY=your-mailgun-api-key
|
||||
MAILGUN_DOMAIN=your-domain.com
|
||||
RECIPIENT_EMAIL=your-email@example.com
|
||||
```
|
||||
|
||||
### 3. Cron Schedule Configuration
|
||||
|
||||
The cron schedule is configured in `wrangler.jsonc`:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"triggers": {
|
||||
"crons": [
|
||||
"0 */3 * * *" // Every 3 hours
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Common cron patterns:
|
||||
- `"0 9 * * 1-5"` - 9 AM weekdays only
|
||||
- `"0 */6 * * *"` - Every 6 hours
|
||||
- `"0 0 * * *"` - Daily at midnight
|
||||
|
||||
### 4. Local Development
|
||||
|
||||
```bash
|
||||
# Start local development server
|
||||
npm run dev
|
||||
|
||||
# Visit http://localhost:8787 to test
|
||||
```
|
||||
|
||||
### 5. Deploy to Cloudflare Workers
|
||||
|
||||
```bash
|
||||
# Deploy to production
|
||||
npm run deploy
|
||||
|
||||
# Your agent will be live at: https://stock-agent.your-subdomain.workers.dev
|
||||
```
|
||||
|
||||
## API Integration Details
|
||||
|
||||
### Swarms API Agents
|
||||
|
||||
The stock agent uses two specialized AI agents:
|
||||
|
||||
1. **Technical Analyst Agent**:
|
||||
- Calculates technical indicators (RSI, MACD, Moving Averages)
|
||||
- Identifies support/resistance levels
|
||||
- Provides trading signals and price targets
|
||||
|
||||
2. **Fundamental Analyst Agent**:
|
||||
- Analyzes market conditions and sentiment
|
||||
- Evaluates news and economic indicators
|
||||
- Provides investment recommendations
|
||||
|
||||
### Data Sources
|
||||
|
||||
- **Yahoo Finance API**: Free real-time stock data (no API key required)
|
||||
- **Financial Modeling Prep**: Market news and additional data (free tier: 250 requests/day)
|
||||
- **Mailgun**: Email delivery service (free tier: 5,000 emails/month)
|
||||
|
||||
## Features
|
||||
|
||||
### Web Interface
|
||||
- Real-time status monitoring
|
||||
- Manual analysis triggers
|
||||
- Progress tracking with visual feedback
|
||||
- Analysis results display
|
||||
|
||||
### Automated Execution
|
||||
- Scheduled cron job execution
|
||||
- Error handling and recovery
|
||||
- Cost tracking and monitoring
|
||||
- Email report generation
|
||||
|
||||
### Production Ready
|
||||
- Comprehensive error handling
|
||||
- Timeout protection
|
||||
- Rate limiting compliance
|
||||
- Security best practices
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Custom Stock Symbols
|
||||
|
||||
Edit the symbols array in `src/index.js`:
|
||||
|
||||
```javascript
|
||||
const symbols = ['SPY', 'QQQ', 'AAPL', 'MSFT', 'TSLA', 'NVDA', 'AMZN', 'GOOGL'];
|
||||
```
|
||||
|
||||
### Custom Swarms Agents
|
||||
|
||||
Modify the agent configuration:
|
||||
|
||||
```javascript
|
||||
const swarmConfig = {
|
||||
agents: [
|
||||
{
|
||||
agent_name: "Risk Assessment Agent",
|
||||
system_prompt: "Analyze portfolio risk and provide recommendations...",
|
||||
model_name: "gpt-4o-mini",
|
||||
max_tokens: 2000,
|
||||
temperature: 0.1
|
||||
}
|
||||
]
|
||||
};
|
||||
```
|
||||
|
||||
## Cost Optimization
|
||||
|
||||
- **Cloudflare Workers**: Free tier includes 100,000 requests/day
|
||||
- **Swarms API**: Monitor usage in dashboard, use gpt-4o-mini for cost efficiency
|
||||
- **External APIs**: Leverage free tiers and implement intelligent caching
|
||||
|
||||
## Security & Best Practices
|
||||
|
||||
- Store API keys as Cloudflare Workers secrets
|
||||
- Implement request validation and rate limiting
|
||||
- Audit AI decisions and maintain compliance logs
|
||||
- Use HTTPS for all external API calls
|
||||
|
||||
## Monitoring & Observability
|
||||
|
||||
- Cloudflare Workers analytics dashboard
|
||||
- Real-time performance metrics
|
||||
- Error tracking and alerting
|
||||
- Cost monitoring and optimization
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **API Key Errors**: Verify environment variables are set correctly
|
||||
2. **Cron Not Triggering**: Check cron syntax and Cloudflare Workers limits
|
||||
3. **Email Not Sending**: Verify Mailgun configuration and domain setup
|
||||
4. **Data Fetch Failures**: Check external API status and rate limits
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable detailed logging by setting:
|
||||
```javascript
|
||||
console.log('Debug mode enabled');
|
||||
```
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Cloudflare Workers Documentation](https://developers.cloudflare.com/workers/)
|
||||
- [Swarms API Documentation](https://docs.swarms.world/)
|
||||
- [Cron Expression Generator](https://crontab.guru/)
|
||||
- [Financial Modeling Prep API](https://financialmodelingprep.com/developer/docs)
|
||||
|
@ -0,0 +1,214 @@
|
||||
# ConcurrentWorkflow
|
||||
|
||||
*Runs independent tasks in parallel for faster processing*
|
||||
|
||||
**Swarm Type**: `ConcurrentWorkflow`
|
||||
|
||||
## Overview
|
||||
|
||||
The ConcurrentWorkflow swarm type runs independent tasks in parallel, significantly reducing processing time for complex operations. This architecture is ideal for tasks that can be processed simultaneously without dependencies, allowing multiple agents to work on different aspects of a problem at the same time.
|
||||
|
||||
Key features:
|
||||
- **Parallel Execution**: Multiple agents work simultaneously
|
||||
- **Reduced Processing Time**: Faster completion through parallelization
|
||||
- **Independent Tasks**: Agents work on separate, non-dependent subtasks
|
||||
- **Scalable Performance**: Performance scales with the number of agents
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Independent data analysis tasks
|
||||
- Parallel content generation
|
||||
- Multi-source research projects
|
||||
- Distributed problem solving
|
||||
|
||||
## API Usage
|
||||
|
||||
### Basic ConcurrentWorkflow Example
|
||||
|
||||
=== "Shell (curl)"
|
||||
```bash
|
||||
curl -X POST "https://api.swarms.world/v1/swarm/completions" \
|
||||
-H "x-api-key: $SWARMS_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "Market Research Concurrent",
|
||||
"description": "Parallel market research across different sectors",
|
||||
"swarm_type": "ConcurrentWorkflow",
|
||||
"task": "Research and analyze market opportunities in AI, healthcare, fintech, and e-commerce sectors",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "AI Market Analyst",
|
||||
"description": "Analyzes AI market trends and opportunities",
|
||||
"system_prompt": "You are an AI market analyst. Focus on artificial intelligence market trends, opportunities, key players, and growth projections.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Healthcare Market Analyst",
|
||||
"description": "Analyzes healthcare market trends",
|
||||
"system_prompt": "You are a healthcare market analyst. Focus on healthcare market trends, digital health opportunities, regulatory landscape, and growth areas.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Fintech Market Analyst",
|
||||
"description": "Analyzes fintech market opportunities",
|
||||
"system_prompt": "You are a fintech market analyst. Focus on financial technology trends, digital payment systems, blockchain opportunities, and regulatory developments.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "E-commerce Market Analyst",
|
||||
"description": "Analyzes e-commerce market trends",
|
||||
"system_prompt": "You are an e-commerce market analyst. Focus on online retail trends, marketplace opportunities, consumer behavior, and emerging platforms.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}'
|
||||
```
|
||||
|
||||
=== "Python (requests)"
|
||||
```python
|
||||
import requests
|
||||
import json
|
||||
|
||||
API_BASE_URL = "https://api.swarms.world"
|
||||
API_KEY = "your_api_key_here"
|
||||
|
||||
headers = {
|
||||
"x-api-key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
swarm_config = {
|
||||
"name": "Market Research Concurrent",
|
||||
"description": "Parallel market research across different sectors",
|
||||
"swarm_type": "ConcurrentWorkflow",
|
||||
"task": "Research and analyze market opportunities in AI, healthcare, fintech, and e-commerce sectors",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "AI Market Analyst",
|
||||
"description": "Analyzes AI market trends and opportunities",
|
||||
"system_prompt": "You are an AI market analyst. Focus on artificial intelligence market trends, opportunities, key players, and growth projections.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Healthcare Market Analyst",
|
||||
"description": "Analyzes healthcare market trends",
|
||||
"system_prompt": "You are a healthcare market analyst. Focus on healthcare market trends, digital health opportunities, regulatory landscape, and growth areas.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Fintech Market Analyst",
|
||||
"description": "Analyzes fintech market opportunities",
|
||||
"system_prompt": "You are a fintech market analyst. Focus on financial technology trends, digital payment systems, blockchain opportunities, and regulatory developments.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "E-commerce Market Analyst",
|
||||
"description": "Analyzes e-commerce market trends",
|
||||
"system_prompt": "You are an e-commerce market analyst. Focus on online retail trends, marketplace opportunities, consumer behavior, and emerging platforms.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{API_BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=swarm_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
print("ConcurrentWorkflow swarm completed successfully!")
|
||||
print(f"Cost: ${result['metadata']['billing_info']['total_cost']}")
|
||||
print(f"Execution time: {result['metadata']['execution_time_seconds']} seconds")
|
||||
print(f"Parallel results: {result['output']}")
|
||||
else:
|
||||
print(f"Error: {response.status_code} - {response.text}")
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
```json
|
||||
{
|
||||
"job_id": "swarms-S17nZFDesmLHxCRoeyF3NVYvPaXk",
|
||||
"status": "success",
|
||||
"swarm_name": "Market Research Concurrent",
|
||||
"description": "Parallel market research across different sectors",
|
||||
"swarm_type": "ConcurrentWorkflow",
|
||||
"output": [
|
||||
{
|
||||
"role": "E-commerce Market Analyst",
|
||||
"content": "To analyze market opportunities in the AI, healthcare, fintech, and e-commerce sectors, we can break down each sector's current trends, consumer behavior, and emerging platforms. Here's an overview of each sector with a focus on e-commerce....."
|
||||
},
|
||||
{
|
||||
"role": "AI Market Analyst",
|
||||
"content": "The artificial intelligence (AI) landscape presents numerous opportunities across various sectors, particularly in healthcare, fintech, and e-commerce. Here's a detailed analysis of each sector:\n\n### Healthcare....."
|
||||
},
|
||||
{
|
||||
"role": "Healthcare Market Analyst",
|
||||
"content": "As a Healthcare Market Analyst, I will focus on analyzing market opportunities within the healthcare sector, particularly in the realm of AI and digital health. The intersection of healthcare with fintech and e-commerce also presents unique opportunities. Here's an overview of key trends and growth areas:...."
|
||||
},
|
||||
{
|
||||
"role": "Fintech Market Analyst",
|
||||
"content": "Certainly! Let's break down the market opportunities in the fintech sector, focusing on financial technology trends, digital payment systems, blockchain opportunities, and regulatory developments:\n\n### 1. Financial Technology Trends....."
|
||||
}
|
||||
],
|
||||
"number_of_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"execution_time": 23.360230922698975,
|
||||
"usage": {
|
||||
"input_tokens": 35,
|
||||
"output_tokens": 2787,
|
||||
"total_tokens": 2822,
|
||||
"billing_info": {
|
||||
"cost_breakdown": {
|
||||
"agent_cost": 0.04,
|
||||
"input_token_cost": 0.000105,
|
||||
"output_token_cost": 0.041805,
|
||||
"token_counts": {
|
||||
"total_input_tokens": 35,
|
||||
"total_output_tokens": 2787,
|
||||
"total_tokens": 2822
|
||||
},
|
||||
"num_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"night_time_discount_applied": true
|
||||
},
|
||||
"total_cost": 0.08191,
|
||||
"discount_active": true,
|
||||
"discount_type": "night_time",
|
||||
"discount_percentage": 75
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Design independent tasks that don't require sequential dependencies
|
||||
- Use for tasks that can be parallelized effectively
|
||||
- Ensure agents have distinct, non-overlapping responsibilities
|
||||
- Ideal for time-sensitive analysis requiring multiple perspectives
|
||||
|
||||
## Related Swarm Types
|
||||
|
||||
- [SequentialWorkflow](sequential_workflow.md) - For ordered execution
|
||||
- [MixtureOfAgents](mixture_of_agents.md) - For collaborative analysis
|
||||
- [MultiAgentRouter](multi_agent_router.md) - For intelligent task distribution
|
@ -0,0 +1,189 @@
|
||||
# GroupChat
|
||||
|
||||
*Enables dynamic collaboration through chat-based interaction*
|
||||
|
||||
**Swarm Type**: `GroupChat`
|
||||
|
||||
## Overview
|
||||
|
||||
The GroupChat swarm type enables dynamic collaboration between agents through a chat-based interface, facilitating real-time information sharing and decision-making. Agents participate in a conversational workflow where they can build upon each other's contributions, debate ideas, and reach consensus through natural dialogue.
|
||||
|
||||
Key features:
|
||||
- **Interactive Dialogue**: Agents communicate through natural conversation
|
||||
- **Dynamic Collaboration**: Real-time information sharing and building upon ideas
|
||||
- **Consensus Building**: Agents can debate and reach decisions collectively
|
||||
- **Flexible Participation**: Agents can contribute when relevant to the discussion
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Brainstorming and ideation sessions
|
||||
- Multi-perspective problem analysis
|
||||
- Collaborative decision-making processes
|
||||
- Creative content development
|
||||
|
||||
## API Usage
|
||||
|
||||
### Basic GroupChat Example
|
||||
|
||||
=== "Shell (curl)"
|
||||
```bash
|
||||
curl -X POST "https://api.swarms.world/v1/swarm/completions" \
|
||||
-H "x-api-key: $SWARMS_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "Product Strategy Discussion",
|
||||
"description": "Collaborative chat to develop product strategy",
|
||||
"swarm_type": "GroupChat",
|
||||
"task": "Discuss and develop a go-to-market strategy for a new AI-powered productivity tool targeting small businesses",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Product Manager",
|
||||
"description": "Leads product strategy and development",
|
||||
"system_prompt": "You are a senior product manager. Focus on product positioning, features, user needs, and market fit. Ask probing questions and build on others ideas.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 3,
|
||||
},
|
||||
{
|
||||
"agent_name": "Marketing Strategist",
|
||||
"description": "Develops marketing and positioning strategy",
|
||||
"system_prompt": "You are a marketing strategist. Focus on target audience, messaging, channels, and competitive positioning. Contribute marketing insights to the discussion.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 3,
|
||||
},
|
||||
{
|
||||
"agent_name": "Sales Director",
|
||||
"description": "Provides sales and customer perspective",
|
||||
"system_prompt": "You are a sales director with small business experience. Focus on pricing, sales process, customer objections, and market adoption. Share practical sales insights.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 3,
|
||||
},
|
||||
{
|
||||
"agent_name": "UX Researcher",
|
||||
"description": "Represents user experience and research insights",
|
||||
"system_prompt": "You are a UX researcher specializing in small business tools. Focus on user behavior, usability, adoption barriers, and design considerations.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 3,
|
||||
}
|
||||
],
|
||||
"max_loops": 3
|
||||
}'
|
||||
```
|
||||
|
||||
=== "Python (requests)"
|
||||
```python
|
||||
import requests
|
||||
import json
|
||||
|
||||
API_BASE_URL = "https://api.swarms.world"
|
||||
API_KEY = "your_api_key_here"
|
||||
|
||||
headers = {
|
||||
"x-api-key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
swarm_config = {
|
||||
"name": "Product Strategy Discussion",
|
||||
"description": "Collaborative chat to develop product strategy",
|
||||
"swarm_type": "GroupChat",
|
||||
"task": "Discuss and develop a go-to-market strategy for a new AI-powered productivity tool targeting small businesses",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Product Manager",
|
||||
"description": "Leads product strategy and development",
|
||||
"system_prompt": "You are a senior product manager. Focus on product positioning, features, user needs, and market fit. Ask probing questions and build on others ideas.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 3,
|
||||
},
|
||||
{
|
||||
"agent_name": "Marketing Strategist",
|
||||
"description": "Develops marketing and positioning strategy",
|
||||
"system_prompt": "You are a marketing strategist. Focus on target audience, messaging, channels, and competitive positioning. Contribute marketing insights to the discussion.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 3,
|
||||
},
|
||||
{
|
||||
"agent_name": "Sales Director",
|
||||
"description": "Provides sales and customer perspective",
|
||||
"system_prompt": "You are a sales director with small business experience. Focus on pricing, sales process, customer objections, and market adoption. Share practical sales insights.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 3,
|
||||
},
|
||||
{
|
||||
"agent_name": "UX Researcher",
|
||||
"description": "Represents user experience and research insights",
|
||||
"system_prompt": "You are a UX researcher specializing in small business tools. Focus on user behavior, usability, adoption barriers, and design considerations.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 3,
|
||||
}
|
||||
],
|
||||
"max_loops": 3
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{API_BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=swarm_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
print("GroupChat swarm completed successfully!")
|
||||
print(f"Cost: ${result['metadata']['billing_info']['total_cost']}")
|
||||
print(f"Execution time: {result['metadata']['execution_time_seconds']} seconds")
|
||||
print(f"Chat discussion: {result['output']}")
|
||||
else:
|
||||
print(f"Error: {response.status_code} - {response.text}")
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
```json
|
||||
{
|
||||
"job_id": "swarms-2COVtf3k0Fz7jU1BOOHF3b5nuL2x",
|
||||
"status": "success",
|
||||
"swarm_name": "Product Strategy Discussion",
|
||||
"description": "Collaborative chat to develop product strategy",
|
||||
"swarm_type": "GroupChat",
|
||||
"output": "User: \n\nSystem: \n Group Chat Name: Product Strategy Discussion\nGroup Chat Description: Collaborative chat to develop product strategy\n Agents in your Group Chat: Available Agents for Team: None\n\n\n\n[Agent 1]\nName: Product Manager\nDescription: Leads product strategy and development\nRole.....",
|
||||
"number_of_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"execution_time": 47.36732482910156,
|
||||
"usage": {
|
||||
"input_tokens": 30,
|
||||
"output_tokens": 1633,
|
||||
"total_tokens": 1663,
|
||||
"billing_info": {
|
||||
"cost_breakdown": {
|
||||
"agent_cost": 0.04,
|
||||
"input_token_cost": 0.00009,
|
||||
"output_token_cost": 0.024495,
|
||||
"token_counts": {
|
||||
"total_input_tokens": 30,
|
||||
"total_output_tokens": 1633,
|
||||
"total_tokens": 1663
|
||||
},
|
||||
"num_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"night_time_discount_applied": false
|
||||
},
|
||||
"total_cost": 0.064585,
|
||||
"discount_active": false,
|
||||
"discount_type": "none",
|
||||
"discount_percentage": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Set clear discussion goals and objectives
|
||||
- Use diverse agent personalities for richer dialogue
|
||||
- Allow multiple conversation rounds for idea development
|
||||
- Encourage agents to build upon each other's contributions
|
||||
|
||||
## Related Swarm Types
|
||||
|
||||
- [MixtureOfAgents](mixture_of_agents.md) - For complementary expertise
|
||||
- [MajorityVoting](majority_voting.md) - For consensus decision-making
|
||||
- [AutoSwarmBuilder](auto_swarm_builder.md) - For automatic discussion setup
|
@ -0,0 +1,252 @@
|
||||
# HiearchicalSwarm
|
||||
|
||||
*Implements structured, multi-level task management with clear authority*
|
||||
|
||||
**Swarm Type**: `HiearchicalSwarm`
|
||||
|
||||
## Overview
|
||||
|
||||
The HiearchicalSwarm implements a structured, multi-level approach to task management with clear lines of authority and delegation. This architecture organizes agents in a hierarchical structure where manager agents coordinate and oversee worker agents, enabling efficient task distribution and quality control.
|
||||
|
||||
Key features:
|
||||
- **Structured Hierarchy**: Clear organizational structure with managers and workers
|
||||
- **Delegated Authority**: Manager agents distribute tasks to specialized workers
|
||||
- **Quality Oversight**: Multi-level review and validation processes
|
||||
- **Scalable Organization**: Efficient coordination of large agent teams
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Complex projects requiring management oversight
|
||||
- Large-scale content production workflows
|
||||
- Multi-stage validation and review processes
|
||||
- Enterprise-level task coordination
|
||||
|
||||
## API Usage
|
||||
|
||||
### Basic HiearchicalSwarm Example
|
||||
|
||||
=== "Shell (curl)"
|
||||
```bash
|
||||
curl -X POST "https://api.swarms.world/v1/swarm/completions" \
|
||||
-H "x-api-key: $SWARMS_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "Market Research ",
|
||||
"description": "Parallel market research across different sectors",
|
||||
"swarm_type": "HiearchicalSwarm",
|
||||
"task": "Research and analyze market opportunities in AI, healthcare, fintech, and e-commerce sectors",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "AI Market Analyst",
|
||||
"description": "Analyzes AI market trends and opportunities",
|
||||
"system_prompt": "You are an AI market analyst. Focus on artificial intelligence market trends, opportunities, key players, and growth projections.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Healthcare Market Analyst",
|
||||
"description": "Analyzes healthcare market trends",
|
||||
"system_prompt": "You are a healthcare market analyst. Focus on healthcare market trends, digital health opportunities, regulatory landscape, and growth areas.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Fintech Market Analyst",
|
||||
"description": "Analyzes fintech market opportunities",
|
||||
"system_prompt": "You are a fintech market analyst. Focus on financial technology trends, digital payment systems, blockchain opportunities, and regulatory developments.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "E-commerce Market Analyst",
|
||||
"description": "Analyzes e-commerce market trends",
|
||||
"system_prompt": "You are an e-commerce market analyst. Focus on online retail trends, marketplace opportunities, consumer behavior, and emerging platforms.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}'
|
||||
```
|
||||
|
||||
=== "Python (requests)"
|
||||
```python
|
||||
import requests
|
||||
import json
|
||||
|
||||
API_BASE_URL = "https://api.swarms.world"
|
||||
API_KEY = "your_api_key_here"
|
||||
|
||||
headers = {
|
||||
"x-api-key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
swarm_config = {
|
||||
"name": "Market Research ",
|
||||
"description": "Parallel market research across different sectors",
|
||||
"swarm_type": "HiearchicalSwarm",
|
||||
"task": "Research and analyze market opportunities in AI, healthcare, fintech, and e-commerce sectors",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "AI Market Analyst",
|
||||
"description": "Analyzes AI market trends and opportunities",
|
||||
"system_prompt": "You are an AI market analyst. Focus on artificial intelligence market trends, opportunities, key players, and growth projections.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Healthcare Market Analyst",
|
||||
"description": "Analyzes healthcare market trends",
|
||||
"system_prompt": "You are a healthcare market analyst. Focus on healthcare market trends, digital health opportunities, regulatory landscape, and growth areas.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Fintech Market Analyst",
|
||||
"description": "Analyzes fintech market opportunities",
|
||||
"system_prompt": "You are a fintech market analyst. Focus on financial technology trends, digital payment systems, blockchain opportunities, and regulatory developments.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "E-commerce Market Analyst",
|
||||
"description": "Analyzes e-commerce market trends",
|
||||
"system_prompt": "You are an e-commerce market analyst. Focus on online retail trends, marketplace opportunities, consumer behavior, and emerging platforms.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{API_BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=swarm_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
print("HiearchicalSwarm completed successfully!")
|
||||
print(f"Cost: ${result['metadata']['billing_info']['total_cost']}")
|
||||
print(f"Execution time: {result['metadata']['execution_time_seconds']} seconds")
|
||||
print(f"Project plan: {result['output']}")
|
||||
else:
|
||||
print(f"Error: {response.status_code} - {response.text}")
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
```json
|
||||
{
|
||||
"job_id": "swarms-JIrcIAfs2d75xrXGaAL94uWyYJ8V",
|
||||
"status": "success",
|
||||
"swarm_name": "Market Research Auto",
|
||||
"description": "Parallel market research across different sectors",
|
||||
"swarm_type": "HiearchicalSwarm",
|
||||
"output": [
|
||||
{
|
||||
"role": "System",
|
||||
"content": "These are the agents in your team. Each agent has a specific role and expertise to contribute to the team's objectives.\nTotal Agents: 4\n\nBelow is a summary of your team members and their primary responsibilities:\n| Agent Name | Description |\n|------------|-------------|\n| AI Market Analyst | Analyzes AI market trends and opportunities |\n| Healthcare Market Analyst | Analyzes healthcare market trends |\n| Fintech Market Analyst | Analyzes fintech market opportunities |\n| E-commerce Market Analyst | Analyzes e-commerce market trends |\n\nEach agent is designed to handle tasks within their area of expertise. Collaborate effectively by assigning tasks according to these roles."
|
||||
},
|
||||
{
|
||||
"role": "Director",
|
||||
"content": [
|
||||
{
|
||||
"role": "Director",
|
||||
"content": [
|
||||
{
|
||||
"function": {
|
||||
"arguments": "{\"plan\":\"Conduct a comprehensive analysis of market opportunities in the AI, healthcare, fintech, and e-commerce sectors. Each market analyst will focus on their respective sector, gathering data on current trends, growth opportunities, and potential challenges. The findings will be compiled into a report for strategic decision-making.\",\"orders\":[{\"agent_name\":\"AI Market Analyst\",\"task\":\"Research current trends in the AI market, identify growth opportunities, and analyze potential challenges.\"},{\"agent_name\":\"Healthcare Market Analyst\",\"task\":\"Analyze the healthcare market for emerging trends, growth opportunities, and possible challenges.\"},{\"agent_name\":\"Fintech Market Analyst\",\"task\":\"Investigate the fintech sector for current trends, identify opportunities for growth, and assess challenges.\"},{\"agent_name\":\"E-commerce Market Analyst\",\"task\":\"Examine e-commerce market trends, identify growth opportunities, and analyze potential challenges.\"}]}",
|
||||
"name": "ModelMetaclass"
|
||||
},
|
||||
"id": "call_GxiyzIRb2oGQXokbbkeaeVry",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "AI Market Analyst",
|
||||
"content": "### AI Market Analysis: Trends, Opportunities, and Challenges\n\n#### Current Trends in the AI Market:\n\n1. **Increased Adoption Across Industries**..."
|
||||
},
|
||||
{
|
||||
"role": "Healthcare Market Analyst",
|
||||
"content": "### Healthcare Market Analysis: Trends, Opportunities, and Challenges\n\n#### Current Trends in the Healthcare Market:\n\n1. **Telehealth Expansion**..."
|
||||
},
|
||||
{
|
||||
"role": "Fintech Market Analyst",
|
||||
"content": "### Fintech Market Analysis: Trends, Opportunities, and Challenges\n\n#### Current Trends in the Fintech Market:\n\n1. **Digital Payments Proliferation**...."
|
||||
},
|
||||
{
|
||||
"role": "E-commerce Market Analyst",
|
||||
"content": "### E-commerce Market Analysis: Trends, Opportunities, and Challenges\n\n#### Current Trends in the E-commerce Market:\n\n1. **Omnichannel Retailing**...."
|
||||
},
|
||||
{
|
||||
"role": "Director",
|
||||
"content": "### Feedback for Worker Agents\n\n#### AI Market Analyst\n\n**Strengths:**\n- Comprehensive coverage of current trends, growth opportunities, and challenges in the AI market.\n- Clear categorization of insights, making it easy to follow and understand.\n\n**Weaknesses....."
|
||||
},
|
||||
{
|
||||
"role": "System",
|
||||
"content": "--- Loop 1/1 completed ---"
|
||||
}
|
||||
],
|
||||
"number_of_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"execution_time": 94.07934331893921,
|
||||
"usage": {
|
||||
"input_tokens": 35,
|
||||
"output_tokens": 3827,
|
||||
"total_tokens": 3862,
|
||||
"billing_info": {
|
||||
"cost_breakdown": {
|
||||
"agent_cost": 0.04,
|
||||
"input_token_cost": 0.000105,
|
||||
"output_token_cost": 0.057405,
|
||||
"token_counts": {
|
||||
"total_input_tokens": 35,
|
||||
"total_output_tokens": 3827,
|
||||
"total_tokens": 3862
|
||||
},
|
||||
"num_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"night_time_discount_applied": false
|
||||
},
|
||||
"total_cost": 0.09751,
|
||||
"discount_active": false,
|
||||
"discount_type": "none",
|
||||
"discount_percentage": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
|-----------|------|-------------|---------|
|
||||
| `role` | string | Agent role: "manager" or "worker" | "worker" |
|
||||
| `agents` | Array<AgentSpec> | Mix of manager and worker agents | Required |
|
||||
| `max_loops` | integer | Coordination rounds for managers | 1 |
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Clearly define manager and worker roles using the `role` parameter
|
||||
- Give managers higher `max_loops` for coordination activities
|
||||
- Design worker agents with specialized, focused responsibilities
|
||||
- Use for complex projects requiring oversight and coordination
|
||||
|
||||
## Related Swarm Types
|
||||
|
||||
- [SequentialWorkflow](sequential_workflow.md) - For linear task progression
|
||||
- [MultiAgentRouter](multi_agent_router.md) - For intelligent task routing
|
||||
- [AutoSwarmBuilder](auto_swarm_builder.md) - For automatic hierarchy creation
|
@ -0,0 +1,249 @@
|
||||
# MajorityVoting
|
||||
|
||||
*Implements robust decision-making through consensus and voting*
|
||||
|
||||
**Swarm Type**: `MajorityVoting`
|
||||
|
||||
## Overview
|
||||
|
||||
The MajorityVoting swarm type implements robust decision-making through consensus mechanisms, ideal for tasks requiring collective intelligence or verification. Multiple agents independently analyze the same problem and vote on the best solution, ensuring high-quality, well-validated outcomes through democratic consensus.
|
||||
|
||||
Key features:
|
||||
- **Consensus-Based Decisions**: Multiple agents vote on the best solution
|
||||
- **Quality Assurance**: Reduces individual agent bias through collective input
|
||||
- **Democratic Process**: Fair and transparent decision-making mechanism
|
||||
- **Robust Validation**: Multiple perspectives ensure thorough analysis
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Critical decision-making requiring validation
|
||||
- Quality assurance and verification tasks
|
||||
- Complex problem solving with multiple viable solutions
|
||||
- Risk assessment and evaluation scenarios
|
||||
|
||||
## API Usage
|
||||
|
||||
### Basic MajorityVoting Example
|
||||
|
||||
=== "Shell (curl)"
|
||||
```bash
|
||||
curl -X POST "https://api.swarms.world/v1/swarm/completions" \
|
||||
-H "x-api-key: $SWARMS_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "Investment Decision Voting",
|
||||
"description": "Multiple financial experts vote on investment recommendations",
|
||||
"swarm_type": "MajorityVoting",
|
||||
"task": "Evaluate whether to invest $1M in a renewable energy startup. Consider market potential, financial projections, team strength, and competitive landscape.",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Growth Investor",
|
||||
"description": "Focuses on growth potential and market opportunity",
|
||||
"system_prompt": "You are a growth-focused venture capitalist. Evaluate investments based on market size, scalability, and growth potential. Provide a recommendation with confidence score.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Financial Analyst",
|
||||
"description": "Analyzes financial metrics and projections",
|
||||
"system_prompt": "You are a financial analyst specializing in startups. Evaluate financial projections, revenue models, and unit economics. Provide a recommendation with confidence score.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
},
|
||||
{
|
||||
"agent_name": "Technical Due Diligence",
|
||||
"description": "Evaluates technology and product viability",
|
||||
"system_prompt": "You are a technical due diligence expert. Assess technology viability, intellectual property, product-market fit, and technical risks. Provide a recommendation with confidence score.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Market Analyst",
|
||||
"description": "Analyzes market conditions and competition",
|
||||
"system_prompt": "You are a market research analyst. Evaluate market dynamics, competitive landscape, regulatory environment, and market timing. Provide a recommendation with confidence score.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Risk Assessor",
|
||||
"description": "Identifies and evaluates investment risks",
|
||||
"system_prompt": "You are a risk assessment specialist. Identify potential risks, evaluate mitigation strategies, and assess overall risk profile. Provide a recommendation with confidence score.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}'
|
||||
```
|
||||
|
||||
=== "Python (requests)"
|
||||
```python
|
||||
import requests
|
||||
import json
|
||||
|
||||
API_BASE_URL = "https://api.swarms.world"
|
||||
API_KEY = "your_api_key_here"
|
||||
|
||||
headers = {
|
||||
"x-api-key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
swarm_config = {
|
||||
"name": "Investment Decision Voting",
|
||||
"description": "Multiple financial experts vote on investment recommendations",
|
||||
"swarm_type": "MajorityVoting",
|
||||
"task": "Evaluate whether to invest $1M in a renewable energy startup. Consider market potential, financial projections, team strength, and competitive landscape.",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Growth Investor",
|
||||
"description": "Focuses on growth potential and market opportunity",
|
||||
"system_prompt": "You are a growth-focused venture capitalist. Evaluate investments based on market size, scalability, and growth potential. Provide a recommendation with confidence score.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Financial Analyst",
|
||||
"description": "Analyzes financial metrics and projections",
|
||||
"system_prompt": "You are a financial analyst specializing in startups. Evaluate financial projections, revenue models, and unit economics. Provide a recommendation with confidence score.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
},
|
||||
{
|
||||
"agent_name": "Technical Due Diligence",
|
||||
"description": "Evaluates technology and product viability",
|
||||
"system_prompt": "You are a technical due diligence expert. Assess technology viability, intellectual property, product-market fit, and technical risks. Provide a recommendation with confidence score.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Market Analyst",
|
||||
"description": "Analyzes market conditions and competition",
|
||||
"system_prompt": "You are a market research analyst. Evaluate market dynamics, competitive landscape, regulatory environment, and market timing. Provide a recommendation with confidence score.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Risk Assessor",
|
||||
"description": "Identifies and evaluates investment risks",
|
||||
"system_prompt": "You are a risk assessment specialist. Identify potential risks, evaluate mitigation strategies, and assess overall risk profile. Provide a recommendation with confidence score.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{API_BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=swarm_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
print("MajorityVoting completed successfully!")
|
||||
print(f"Final decision: {result['output']['consensus_decision']}")
|
||||
print(f"Vote breakdown: {result['metadata']['vote_breakdown']}")
|
||||
print(f"Cost: ${result['metadata']['billing_info']['total_cost']}")
|
||||
print(f"Execution time: {result['metadata']['execution_time_seconds']} seconds")
|
||||
else:
|
||||
print(f"Error: {response.status_code} - {response.text}")
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
```json
|
||||
{
|
||||
"job_id": "swarms-1WFsSJU2KcvY11lxRMjdQNWFHArI",
|
||||
"status": "success",
|
||||
"swarm_name": "Investment Decision Voting",
|
||||
"description": "Multiple financial experts vote on investment recommendations",
|
||||
"swarm_type": "MajorityVoting",
|
||||
"output": [
|
||||
{
|
||||
"role": "Financial Analyst",
|
||||
"content": [
|
||||
"To evaluate the potential investment in a renewable energy startup, we will assess the technology viability, intellectual property, product-market fit, and technical risks, along with the additional factors of market ....."
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "Technical Due Diligence",
|
||||
"content": [
|
||||
"To evaluate the potential investment in a renewable energy startup, we will analyze the relevant market dynamics, competitive landscape, regulatory environment, and market timing. Here's the breakdown of the assessment......."
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "Market Analyst",
|
||||
"content": [
|
||||
"To evaluate the potential investment in a renewable energy startup, let's break down the key factors:\n\n1. **Market Potential........"
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "Growth Investor",
|
||||
"content": [
|
||||
"To evaluate the potential investment in a renewable energy startup, we need to assess various risk factors and mitigation strategies across several key areas: market potential, financial projections, team strength, and competitive landscape.\n\n### 1. Market Potential\n**Risks:**\n- **Regulatory Changes................"
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "Risk Assessor",
|
||||
"content": [
|
||||
"To provide a comprehensive evaluation of whether to invest $1M in the renewable energy startup, let's break down the key areas.........."
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "Risk Assessor",
|
||||
"content": "To evaluate the potential investment in a renewable energy startup, we need to assess various risk factors and mitigation strategies across several key areas....."
|
||||
}
|
||||
],
|
||||
"number_of_agents": 5,
|
||||
"service_tier": "standard",
|
||||
"execution_time": 61.74853563308716,
|
||||
"usage": {
|
||||
"input_tokens": 39,
|
||||
"output_tokens": 8468,
|
||||
"total_tokens": 8507,
|
||||
"billing_info": {
|
||||
"cost_breakdown": {
|
||||
"agent_cost": 0.05,
|
||||
"input_token_cost": 0.000117,
|
||||
"output_token_cost": 0.12702,
|
||||
"token_counts": {
|
||||
"total_input_tokens": 39,
|
||||
"total_output_tokens": 8468,
|
||||
"total_tokens": 8507
|
||||
},
|
||||
"num_agents": 5,
|
||||
"service_tier": "standard",
|
||||
"night_time_discount_applied": false
|
||||
},
|
||||
"total_cost": 0.177137,
|
||||
"discount_active": false,
|
||||
"discount_type": "none",
|
||||
"discount_percentage": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Use odd numbers of agents to avoid tie votes
|
||||
- Design agents with different perspectives for robust evaluation
|
||||
- Include confidence scores in agent prompts for weighted decisions
|
||||
- Ideal for high-stakes decisions requiring validation
|
||||
|
||||
## Related Swarm Types
|
||||
|
||||
- [GroupChat](group_chat.md) - For discussion-based consensus
|
||||
- [MixtureOfAgents](mixture_of_agents.md) - For diverse expertise collaboration
|
||||
- [HierarchicalSwarm](hierarchical_swarm.md) - For structured decision-making
|
@ -0,0 +1,222 @@
|
||||
# MixtureOfAgents
|
||||
|
||||
*Builds diverse teams of specialized agents for complex problem solving*
|
||||
|
||||
**Swarm Type**: `MixtureOfAgents`
|
||||
|
||||
## Overview
|
||||
|
||||
The MixtureOfAgents swarm type combines multiple agent types with different specializations to tackle diverse aspects of complex problems. Each agent contributes unique skills and perspectives, making this architecture ideal for tasks requiring multiple types of expertise working in harmony.
|
||||
|
||||
Key features:
|
||||
- **Diverse Expertise**: Combines agents with different specializations
|
||||
- **Collaborative Problem Solving**: Agents work together leveraging their unique strengths
|
||||
- **Comprehensive Coverage**: Ensures all aspects of complex tasks are addressed
|
||||
- **Balanced Perspectives**: Multiple viewpoints for robust decision-making
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Complex research projects requiring multiple disciplines
|
||||
- Business analysis needing various functional perspectives
|
||||
- Content creation requiring different expertise areas
|
||||
- Strategic planning with multiple considerations
|
||||
|
||||
## API Usage
|
||||
|
||||
### Basic MixtureOfAgents Example
|
||||
|
||||
=== "Shell (curl)"
|
||||
```bash
|
||||
curl -X POST "https://api.swarms.world/v1/swarm/completions" \
|
||||
-H "x-api-key: $SWARMS_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "Business Strategy Mixture",
|
||||
"description": "Diverse team analyzing business strategy from multiple perspectives",
|
||||
"swarm_type": "MixtureOfAgents",
|
||||
"task": "Develop a comprehensive market entry strategy for a new AI product in the healthcare sector",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Market Research Analyst",
|
||||
"description": "Analyzes market trends and opportunities",
|
||||
"system_prompt": "You are a market research expert specializing in healthcare technology. Analyze market size, trends, and competitive landscape.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Financial Analyst",
|
||||
"description": "Evaluates financial viability and projections",
|
||||
"system_prompt": "You are a financial analyst expert. Assess financial implications, ROI, and cost structures for business strategies.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
},
|
||||
{
|
||||
"agent_name": "Regulatory Expert",
|
||||
"description": "Analyzes compliance and regulatory requirements",
|
||||
"system_prompt": "You are a healthcare regulatory expert. Analyze compliance requirements, regulatory pathways, and potential barriers.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.1
|
||||
},
|
||||
{
|
||||
"agent_name": "Technology Strategist",
|
||||
"description": "Evaluates technical feasibility and strategy",
|
||||
"system_prompt": "You are a technology strategy expert. Assess technical requirements, implementation challenges, and scalability.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}'
|
||||
```
|
||||
|
||||
=== "Python (requests)"
|
||||
```python
|
||||
import requests
|
||||
import json
|
||||
|
||||
API_BASE_URL = "https://api.swarms.world"
|
||||
API_KEY = "your_api_key_here"
|
||||
|
||||
headers = {
|
||||
"x-api-key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
swarm_config = {
|
||||
"name": "Business Strategy Mixture",
|
||||
"description": "Diverse team analyzing business strategy from multiple perspectives",
|
||||
"swarm_type": "MixtureOfAgents",
|
||||
"task": "Develop a comprehensive market entry strategy for a new AI product in the healthcare sector",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Market Research Analyst",
|
||||
"description": "Analyzes market trends and opportunities",
|
||||
"system_prompt": "You are a market research expert specializing in healthcare technology. Analyze market size, trends, and competitive landscape.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Financial Analyst",
|
||||
"description": "Evaluates financial viability and projections",
|
||||
"system_prompt": "You are a financial analyst expert. Assess financial implications, ROI, and cost structures for business strategies.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
},
|
||||
{
|
||||
"agent_name": "Regulatory Expert",
|
||||
"description": "Analyzes compliance and regulatory requirements",
|
||||
"system_prompt": "You are a healthcare regulatory expert. Analyze compliance requirements, regulatory pathways, and potential barriers.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.1
|
||||
},
|
||||
{
|
||||
"agent_name": "Technology Strategist",
|
||||
"description": "Evaluates technical feasibility and strategy",
|
||||
"system_prompt": "You are a technology strategy expert. Assess technical requirements, implementation challenges, and scalability.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{API_BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=swarm_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
print("MixtureOfAgents swarm completed successfully!")
|
||||
print(f"Cost: ${result['metadata']['billing_info']['total_cost']}")
|
||||
print(f"Execution time: {result['metadata']['execution_time_seconds']} seconds")
|
||||
print(f"Output: {result['output']}")
|
||||
else:
|
||||
print(f"Error: {response.status_code} - {response.text}")
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
```json
|
||||
{
|
||||
"job_id": "swarms-kBZaJg1uGTkRbLCAsGztL2jrp5Mj",
|
||||
"status": "success",
|
||||
"swarm_name": "Business Strategy Mixture",
|
||||
"description": "Diverse team analyzing business strategy from multiple perspectives",
|
||||
"swarm_type": "MixtureOfAgents",
|
||||
"output": [
|
||||
{
|
||||
"role": "System",
|
||||
"content": "Team Name: Business Strategy Mixture\nTeam Description: Diverse team analyzing business strategy from multiple perspectives\nThese are the agents in your team. Each agent has a specific role and expertise to contribute to the team's objectives.\nTotal Agents: 4\n\nBelow is a summary of your team members and their primary responsibilities:\n| Agent Name | Description |\n|------------|-------------|\n| Market Research Analyst | Analyzes market trends and opportunities |\n| Financial Analyst | Evaluates financial viability and projections |\n| Regulatory Expert | Analyzes compliance and regulatory requirements |\n| Technology Strategist | Evaluates technical feasibility and strategy |\n\nEach agent is designed to handle tasks within their area of expertise. Collaborate effectively by assigning tasks according to these roles."
|
||||
},
|
||||
{
|
||||
"role": "Market Research Analyst",
|
||||
"content": "To develop a comprehensive market entry strategy for a new AI product in the healthcare sector, we will leverage the expertise of each team member to cover all critical aspects of the strategy. Here's how each agent will contribute......."
|
||||
},
|
||||
{
|
||||
"role": "Technology Strategist",
|
||||
"content": "To develop a comprehensive market entry strategy for a new AI product in the healthcare sector, we'll need to collaborate effectively with the team, leveraging each member's expertise. Here's how each agent can contribute to the strategy, along with a focus on the technical requirements, implementation challenges, and scalability from the technology strategist's perspective....."
|
||||
},
|
||||
{
|
||||
"role": "Financial Analyst",
|
||||
"content": "Developing a comprehensive market entry strategy for a new AI product in the healthcare sector involves a multidisciplinary approach. Each agent in the Business Strategy Mixture team will play a crucial role in ensuring a successful market entry. Here's how the team can collaborate........"
|
||||
},
|
||||
{
|
||||
"role": "Regulatory Expert",
|
||||
"content": "To develop a comprehensive market entry strategy for a new AI product in the healthcare sector, we need to leverage the expertise of each agent in the Business Strategy Mixture team. Below is an outline of how each team member can contribute to this strategy......"
|
||||
},
|
||||
{
|
||||
"role": "Aggregator Agent",
|
||||
"content": "As the Aggregator Agent, I've observed and analyzed the responses from the Business Strategy Mixture team regarding the development of a comprehensive market entry strategy for a new AI product in the healthcare sector. Here's a summary of the key points ......"
|
||||
}
|
||||
],
|
||||
"number_of_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"execution_time": 30.230480670928955,
|
||||
"usage": {
|
||||
"input_tokens": 30,
|
||||
"output_tokens": 3401,
|
||||
"total_tokens": 3431,
|
||||
"billing_info": {
|
||||
"cost_breakdown": {
|
||||
"agent_cost": 0.04,
|
||||
"input_token_cost": 0.00009,
|
||||
"output_token_cost": 0.051015,
|
||||
"token_counts": {
|
||||
"total_input_tokens": 30,
|
||||
"total_output_tokens": 3401,
|
||||
"total_tokens": 3431
|
||||
},
|
||||
"num_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"night_time_discount_applied": true
|
||||
},
|
||||
"total_cost": 0.091105,
|
||||
"discount_active": true,
|
||||
"discount_type": "night_time",
|
||||
"discount_percentage": 75
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Select agents with complementary and diverse expertise
|
||||
- Ensure each agent has a clear, specialized role
|
||||
- Use for complex problems requiring multiple perspectives
|
||||
- Design tasks that benefit from collaborative analysis
|
||||
|
||||
## Related Swarm Types
|
||||
|
||||
- [ConcurrentWorkflow](concurrent_workflow.md) - For parallel task execution
|
||||
- [GroupChat](group_chat.md) - For collaborative discussion
|
||||
- [AutoSwarmBuilder](auto_swarm_builder.md) - For automatic team assembly
|
@ -0,0 +1,211 @@
|
||||
# MultiAgentRouter
|
||||
|
||||
*Intelligent task dispatcher distributing work based on agent capabilities*
|
||||
|
||||
**Swarm Type**: `MultiAgentRouter`
|
||||
|
||||
## Overview
|
||||
|
||||
The MultiAgentRouter acts as an intelligent task dispatcher, distributing work across agents based on their capabilities and current workload. This architecture analyzes incoming tasks and automatically routes them to the most suitable agents, optimizing both efficiency and quality of outcomes.
|
||||
|
||||
Key features:
|
||||
- **Intelligent Routing**: Automatically assigns tasks to best-suited agents
|
||||
- **Capability Matching**: Matches task requirements with agent specializations
|
||||
- **Load Balancing**: Distributes workload efficiently across available agents
|
||||
- **Dynamic Assignment**: Adapts routing based on agent performance and availability
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Customer service request routing
|
||||
- Content categorization and processing
|
||||
- Technical support ticket assignment
|
||||
- Multi-domain question answering
|
||||
|
||||
## API Usage
|
||||
|
||||
### Basic MultiAgentRouter Example
|
||||
|
||||
=== "Shell (curl)"
|
||||
```bash
|
||||
curl -X POST "https://api.swarms.world/v1/swarm/completions" \
|
||||
-H "x-api-key: $SWARMS_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "Customer Support Router",
|
||||
"description": "Route customer inquiries to specialized support agents",
|
||||
"swarm_type": "MultiAgentRouter",
|
||||
"task": "Handle multiple customer inquiries: 1) Billing question about overcharge, 2) Technical issue with mobile app login, 3) Product recommendation for enterprise client, 4) Return policy question",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Billing Specialist",
|
||||
"description": "Handles billing, payments, and account issues",
|
||||
"system_prompt": "You are a billing specialist. Handle all billing inquiries, payment issues, refunds, and account-related questions with empathy and accuracy.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Technical Support",
|
||||
"description": "Resolves technical issues and troubleshooting",
|
||||
"system_prompt": "You are a technical support specialist. Diagnose and resolve technical issues, provide step-by-step troubleshooting, and escalate complex problems.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
},
|
||||
{
|
||||
"agent_name": "Sales Consultant",
|
||||
"description": "Provides product recommendations and sales support",
|
||||
"system_prompt": "You are a sales consultant. Provide product recommendations, explain features and benefits, and help customers find the right solutions.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.4
|
||||
},
|
||||
{
|
||||
"agent_name": "Policy Advisor",
|
||||
"description": "Explains company policies and procedures",
|
||||
"system_prompt": "You are a policy advisor. Explain company policies, terms of service, return procedures, and compliance requirements clearly.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.1
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}'
|
||||
```
|
||||
|
||||
=== "Python (requests)"
|
||||
```python
|
||||
import requests
|
||||
import json
|
||||
|
||||
API_BASE_URL = "https://api.swarms.world"
|
||||
API_KEY = "your_api_key_here"
|
||||
|
||||
headers = {
|
||||
"x-api-key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
swarm_config = {
|
||||
"name": "Customer Support Router",
|
||||
"description": "Route customer inquiries to specialized support agents",
|
||||
"swarm_type": "MultiAgentRouter",
|
||||
"task": "Handle multiple customer inquiries: 1) Billing question about overcharge, 2) Technical issue with mobile app login, 3) Product recommendation for enterprise client, 4) Return policy question",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Billing Specialist",
|
||||
"description": "Handles billing, payments, and account issues",
|
||||
"system_prompt": "You are a billing specialist. Handle all billing inquiries, payment issues, refunds, and account-related questions with empathy and accuracy.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Technical Support",
|
||||
"description": "Resolves technical issues and troubleshooting",
|
||||
"system_prompt": "You are a technical support specialist. Diagnose and resolve technical issues, provide step-by-step troubleshooting, and escalate complex problems.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
},
|
||||
{
|
||||
"agent_name": "Sales Consultant",
|
||||
"description": "Provides product recommendations and sales support",
|
||||
"system_prompt": "You are a sales consultant. Provide product recommendations, explain features and benefits, and help customers find the right solutions.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.4
|
||||
},
|
||||
{
|
||||
"agent_name": "Policy Advisor",
|
||||
"description": "Explains company policies and procedures",
|
||||
"system_prompt": "You are a policy advisor. Explain company policies, terms of service, return procedures, and compliance requirements clearly.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.1
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{API_BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=swarm_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
print("MultiAgentRouter completed successfully!")
|
||||
print(f"Routing decisions: {result['metadata']['routing_decisions']}")
|
||||
print(f"Cost: ${result['metadata']['billing_info']['total_cost']}")
|
||||
print(f"Execution time: {result['metadata']['execution_time_seconds']} seconds")
|
||||
print(f"Customer responses: {result['output']}")
|
||||
else:
|
||||
print(f"Error: {response.status_code} - {response.text}")
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
```json
|
||||
{
|
||||
"job_id": "swarms-OvOZHubprE3thzLmRdNBZAxA6om4",
|
||||
"status": "success",
|
||||
"swarm_name": "Customer Support Router",
|
||||
"description": "Route customer inquiries to specialized support agents",
|
||||
"swarm_type": "MultiAgentRouter",
|
||||
"output": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Handle multiple customer inquiries: 1) Billing question about overcharge, 2) Technical issue with mobile app login, 3) Product recommendation for enterprise client, 4) Return policy question"
|
||||
},
|
||||
{
|
||||
"role": "Agent Router",
|
||||
"content": "selected_agent='Billing Specialist' reasoning='The task involves multiple inquiries, but the first one is about a billing question regarding an overcharge. Billing issues often require immediate attention to ensure customer satisfaction and prevent further complications. Therefore, the Billing Specialist is the most appropriate agent to handle this task. They can address the billing question directly and potentially coordinate with other agents for the remaining inquiries.' modified_task='Billing question about overcharge'"
|
||||
},
|
||||
{
|
||||
"role": "Billing Specialist",
|
||||
"content": "Of course, I'd be happy to help you with your billing question regarding an overcharge. Could you please provide me with more details about the charge in question, such as the date it occurred and the amount? This information will help me look into your account and resolve the issue as quickly as possible."
|
||||
}
|
||||
],
|
||||
"number_of_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"execution_time": 7.800086975097656,
|
||||
"usage": {
|
||||
"input_tokens": 28,
|
||||
"output_tokens": 221,
|
||||
"total_tokens": 249,
|
||||
"billing_info": {
|
||||
"cost_breakdown": {
|
||||
"agent_cost": 0.04,
|
||||
"input_token_cost": 0.000084,
|
||||
"output_token_cost": 0.003315,
|
||||
"token_counts": {
|
||||
"total_input_tokens": 28,
|
||||
"total_output_tokens": 221,
|
||||
"total_tokens": 249
|
||||
},
|
||||
"num_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"night_time_discount_applied": true
|
||||
},
|
||||
"total_cost": 0.043399,
|
||||
"discount_active": true,
|
||||
"discount_type": "night_time",
|
||||
"discount_percentage": 75
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Define agents with clear, distinct specializations
|
||||
- Use descriptive agent names and descriptions for better routing
|
||||
- Ideal for handling diverse task types that require different expertise
|
||||
- Monitor routing decisions to optimize agent configurations
|
||||
|
||||
## Related Swarm Types
|
||||
|
||||
- [HierarchicalSwarm](hierarchical_swarm.md) - For structured task management
|
||||
- [ConcurrentWorkflow](concurrent_workflow.md) - For parallel task processing
|
||||
- [AutoSwarmBuilder](auto_swarm_builder.md) - For automatic routing setup
|
@ -0,0 +1,214 @@
|
||||
# SequentialWorkflow
|
||||
|
||||
*Executes tasks in a strict, predefined order for step-by-step processing*
|
||||
|
||||
**Swarm Type**: `SequentialWorkflow`
|
||||
|
||||
## Overview
|
||||
|
||||
The SequentialWorkflow swarm type executes tasks in a strict, predefined order where each step depends on the completion of the previous one. This architecture is perfect for workflows that require a linear progression of tasks, ensuring that each agent builds upon the work of the previous agent.
|
||||
|
||||
Key features:
|
||||
- **Ordered Execution**: Agents execute in a specific, predefined sequence
|
||||
- **Step Dependencies**: Each step builds upon previous results
|
||||
- **Predictable Flow**: Clear, linear progression through the workflow
|
||||
- **Quality Control**: Each agent can validate and enhance previous work
|
||||
|
||||
## Use Cases
|
||||
|
||||
- Document processing pipelines
|
||||
- Multi-stage analysis workflows
|
||||
- Content creation and editing processes
|
||||
- Data transformation and validation pipelines
|
||||
|
||||
## API Usage
|
||||
|
||||
### Basic SequentialWorkflow Example
|
||||
|
||||
=== "Shell (curl)"
|
||||
```bash
|
||||
curl -X POST "https://api.swarms.world/v1/swarm/completions" \
|
||||
-H "x-api-key: $SWARMS_API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"name": "Content Creation Pipeline",
|
||||
"description": "Sequential content creation from research to final output",
|
||||
"swarm_type": "SequentialWorkflow",
|
||||
"task": "Create a comprehensive blog post about the future of renewable energy",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Research Specialist",
|
||||
"description": "Conducts thorough research on the topic",
|
||||
"system_prompt": "You are a research specialist. Gather comprehensive, accurate information on the given topic from reliable sources.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Content Writer",
|
||||
"description": "Creates engaging written content",
|
||||
"system_prompt": "You are a skilled content writer. Transform research into engaging, well-structured articles that are informative and readable.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.6
|
||||
},
|
||||
{
|
||||
"agent_name": "Editor",
|
||||
"description": "Reviews and polishes the content",
|
||||
"system_prompt": "You are a professional editor. Review content for clarity, grammar, flow, and overall quality. Make improvements while maintaining the author's voice.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.4
|
||||
},
|
||||
{
|
||||
"agent_name": "SEO Optimizer",
|
||||
"description": "Optimizes content for search engines",
|
||||
"system_prompt": "You are an SEO expert. Optimize content for search engines while maintaining readability and quality.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}'
|
||||
```
|
||||
|
||||
=== "Python (requests)"
|
||||
```python
|
||||
import requests
|
||||
import json
|
||||
|
||||
API_BASE_URL = "https://api.swarms.world"
|
||||
API_KEY = "your_api_key_here"
|
||||
|
||||
headers = {
|
||||
"x-api-key": API_KEY,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
swarm_config = {
|
||||
"name": "Content Creation Pipeline",
|
||||
"description": "Sequential content creation from research to final output",
|
||||
"swarm_type": "SequentialWorkflow",
|
||||
"task": "Create a comprehensive blog post about the future of renewable energy",
|
||||
"agents": [
|
||||
{
|
||||
"agent_name": "Research Specialist",
|
||||
"description": "Conducts thorough research on the topic",
|
||||
"system_prompt": "You are a research specialist. Gather comprehensive, accurate information on the given topic from reliable sources.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.3
|
||||
},
|
||||
{
|
||||
"agent_name": "Content Writer",
|
||||
"description": "Creates engaging written content",
|
||||
"system_prompt": "You are a skilled content writer. Transform research into engaging, well-structured articles that are informative and readable.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.6
|
||||
},
|
||||
{
|
||||
"agent_name": "Editor",
|
||||
"description": "Reviews and polishes the content",
|
||||
"system_prompt": "You are a professional editor. Review content for clarity, grammar, flow, and overall quality. Make improvements while maintaining the author's voice.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.4
|
||||
},
|
||||
{
|
||||
"agent_name": "SEO Optimizer",
|
||||
"description": "Optimizes content for search engines",
|
||||
"system_prompt": "You are an SEO expert. Optimize content for search engines while maintaining readability and quality.",
|
||||
"model_name": "gpt-4o",
|
||||
"max_loops": 1,
|
||||
"temperature": 0.2
|
||||
}
|
||||
],
|
||||
"max_loops": 1
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
f"{API_BASE_URL}/v1/swarm/completions",
|
||||
headers=headers,
|
||||
json=swarm_config
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
print("SequentialWorkflow swarm completed successfully!")
|
||||
print(f"Cost: ${result['metadata']['billing_info']['total_cost']}")
|
||||
print(f"Execution time: {result['metadata']['execution_time_seconds']} seconds")
|
||||
print(f"Final output: {result['output']}")
|
||||
else:
|
||||
print(f"Error: {response.status_code} - {response.text}")
|
||||
```
|
||||
|
||||
**Example Response**:
|
||||
```json
|
||||
{
|
||||
"job_id": "swarms-pbM8wqUwxq8afGeROV2A4xAcncd1",
|
||||
"status": "success",
|
||||
"swarm_name": "Content Creation Pipeline",
|
||||
"description": "Sequential content creation from research to final output",
|
||||
"swarm_type": "SequentialWorkflow",
|
||||
"output": [
|
||||
{
|
||||
"role": "Research Specialist",
|
||||
"content": "\"**Title: The Future of Renewable Energy: Charting a Sustainable Path Forward**\n\nAs we navigate the complexities of the 21st century, the transition to renewable energy stands out as a critical endeavor to ensure a sustainable future......"
|
||||
},
|
||||
{
|
||||
"role": "SEO Optimizer",
|
||||
"content": "\"**Title: The Future of Renewable Energy: Charting a Sustainable Path Forward**\n\nThe transition to renewable energy is crucial as we face the challenges of the 21st century, including climate change and dwindling fossil fuel resources......."
|
||||
},
|
||||
{
|
||||
"role": "Editor",
|
||||
"content": "\"**Title: The Future of Renewable Energy: Charting a Sustainable Path Forward**\n\nAs we confront the challenges of the 21st century, transitioning to renewable energy is essential for a sustainable future. With climate change concerns escalating and fossil fuel reserves depleting, renewable energy is not just an option but a necessity...."
|
||||
},
|
||||
{
|
||||
"role": "Content Writer",
|
||||
"content": "\"**Title: The Future of Renewable Energy: Charting a Sustainable Path Forward**\n\nAs we face the multifaceted challenges of the 21st century, transitioning to renewable energy emerges as not just an option but an essential step toward a sustainable future...."
|
||||
}
|
||||
],
|
||||
"number_of_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"execution_time": 72.23084282875061,
|
||||
"usage": {
|
||||
"input_tokens": 28,
|
||||
"output_tokens": 3012,
|
||||
"total_tokens": 3040,
|
||||
"billing_info": {
|
||||
"cost_breakdown": {
|
||||
"agent_cost": 0.04,
|
||||
"input_token_cost": 0.000084,
|
||||
"output_token_cost": 0.04518,
|
||||
"token_counts": {
|
||||
"total_input_tokens": 28,
|
||||
"total_output_tokens": 3012,
|
||||
"total_tokens": 3040
|
||||
},
|
||||
"num_agents": 4,
|
||||
"service_tier": "standard",
|
||||
"night_time_discount_applied": true
|
||||
},
|
||||
"total_cost": 0.085264,
|
||||
"discount_active": true,
|
||||
"discount_type": "night_time",
|
||||
"discount_percentage": 75
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Design agents with clear, sequential dependencies
|
||||
- Ensure each agent builds meaningfully on the previous work
|
||||
- Use for linear workflows where order matters
|
||||
- Validate outputs at each step before proceeding
|
||||
|
||||
## Related Swarm Types
|
||||
|
||||
- [ConcurrentWorkflow](concurrent_workflow.md) - For parallel execution
|
||||
- [AgentRearrange](agent_rearrange.md) - For dynamic sequencing
|
||||
- [HierarchicalSwarm](hierarchical_swarm.md) - For structured workflows
|
@ -1,30 +1,28 @@
|
||||
# Multi-Agent Architectures
|
||||
|
||||
Each multi-agent architecture type is designed for specific use cases and can be combined to create powerful multi-agent systems. Here's a comprehensive overview of each available swarm:
|
||||
Each multi-agent architecture type is designed for specific use cases and can be combined to create powerful multi-agent systems. Below is an overview of each available swarm type:
|
||||
|
||||
| Swarm Type | Description | Learn More |
|
||||
|---------------------|------------------------------------------------------------------------------|------------|
|
||||
| AgentRearrange | Dynamically reorganizes agents to optimize task performance and efficiency. Optimizes agent performance by dynamically adjusting their roles and positions within the workflow. This architecture is particularly useful when the effectiveness of agents depends on their sequence or arrangement. | [Learn More](/swarms/structs/agent_rearrange) |
|
||||
| MixtureOfAgents | Creates diverse teams of specialized agents, each bringing unique capabilities to solve complex problems. Each agent contributes unique skills to achieve the overall goal, making it excel at tasks requiring multiple types of expertise or processing. | [Learn More](/swarms/structs/moa) |
|
||||
| SpreadSheetSwarm | Provides a structured approach to data management and operations, making it ideal for tasks involving data analysis, transformation, and systematic processing in a spreadsheet-like structure. | [Learn More](/swarms/structs/spreadsheet_swarm) |
|
||||
| SequentialWorkflow | Ensures strict process control by executing tasks in a predefined order. Perfect for workflows where each step depends on the completion of previous steps. | [Learn More](/swarms/structs/sequential_workflow) |
|
||||
| ConcurrentWorkflow | Maximizes efficiency by running independent tasks in parallel, significantly reducing overall processing time for complex operations. Ideal for independent tasks that can be processed simultaneously. | [Learn More](/swarms/structs/concurrentworkflow) |
|
||||
| GroupChat | Enables dynamic collaboration between agents through a chat-based interface, facilitating real-time information sharing and decision-making. | [Learn More](/swarms/structs/group_chat) |
|
||||
| MultiAgentRouter | Acts as an intelligent task dispatcher, ensuring optimal distribution of work across available agents based on their capabilities and current workload. | [Learn More](/swarms/structs/multi_agent_router) |
|
||||
| AutoSwarmBuilder | Simplifies swarm creation by automatically configuring agent architectures based on task requirements and performance metrics. | [Learn More](/swarms/structs/auto_swarm_builder) |
|
||||
| HiearchicalSwarm | Implements a structured approach to task management, with clear lines of authority and delegation across multiple agent levels. | [Learn More](/swarms/structs/multi_swarm_orchestration) |
|
||||
| auto | Provides intelligent swarm selection based on context, automatically choosing the most effective architecture for given tasks. | [Learn More](/swarms/concept/how_to_choose_swarms) |
|
||||
| MajorityVoting | Implements robust decision-making through consensus, particularly useful for tasks requiring collective intelligence or verification. | [Learn More](/swarms/structs/majorityvoting) |
|
||||
| MALT | Specialized framework for language-based tasks, optimizing agent collaboration for complex language processing operations. | [Learn More](/swarms/structs/malt) |
|
||||
|----------------------|------------------------------------------------------------------------------|------------|
|
||||
| AgentRearrange | Dynamically reorganizes agents to optimize task performance and efficiency. Useful when agent effectiveness depends on their sequence or arrangement. | [Learn More](agent_rearrange.md) |
|
||||
| MixtureOfAgents | Builds diverse teams of specialized agents, each contributing unique skills to solve complex problems. Excels at tasks requiring multiple types of expertise. | [Learn More](mixture_of_agents.md) |
|
||||
| SequentialWorkflow | Executes tasks in a strict, predefined order. Perfect for workflows where each step depends on the completion of the previous one. | [Learn More](sequential_workflow.md) |
|
||||
| ConcurrentWorkflow | Runs independent tasks in parallel, significantly reducing processing time for complex operations. Ideal for tasks that can be processed simultaneously. | [Learn More](concurrent_workflow.md) |
|
||||
| GroupChat | Enables dynamic collaboration between agents through a chat-based interface, facilitating real-time information sharing and decision-making. | [Learn More](group_chat.md) |
|
||||
| HierarchicalSwarm | Implements a structured, multi-level approach to task management, with clear lines of authority and delegation. | [Learn More](hierarchical_swarm.md) |
|
||||
| MultiAgentRouter | Acts as an intelligent task dispatcher, distributing work across agents based on their capabilities and current workload. | [Learn More](multi_agent_router.md) |
|
||||
| MajorityVoting | Implements robust decision-making through consensus, ideal for tasks requiring collective intelligence or verification. | [Learn More](majority_voting.md) |
|
||||
|
||||
|
||||
<!-- | AutoSwarmBuilder | Automatically configures agent architectures based on task requirements and performance metrics, simplifying swarm creation. | [Learn More](auto_swarm_builder.md) |
|
||||
<!-- | Auto | Intelligently selects the most effective swarm architecture for a given task based on context. | [Learn More](auto.md) | -->
|
||||
|
||||
|
||||
# Learn More
|
||||
|
||||
To learn more about Swarms architecture and how different swarm types work together, visit our comprehensive guides:
|
||||
To explore Swarms architecture and how different swarm types work together, check out our comprehensive guides:
|
||||
|
||||
- [Introduction to Multi-Agent Architectures](/swarms/concept/swarm_architectures)
|
||||
|
||||
- [How to Choose the Right Multi-Agent Architecture](/swarms/concept/how_to_choose_swarms)
|
||||
|
||||
- [Framework Architecture Overview](/swarms/concept/framework_architecture)
|
||||
|
||||
- [Building Custom Swarms](/swarms/structs/custom_swarm)
|
||||
|
@ -1,39 +0,0 @@
|
||||
import os
|
||||
from swarms_client import SwarmsClient
|
||||
from swarms_client.types import AgentSpecParam
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
client = SwarmsClient(api_key=os.getenv("SWARMS_API_KEY"))
|
||||
|
||||
agent_spec = AgentSpecParam(
|
||||
agent_name="doctor_agent",
|
||||
description="A virtual doctor agent that provides evidence-based, safe, and empathetic medical advice for common health questions. Always reminds users to consult a healthcare professional for diagnoses or prescriptions.",
|
||||
task="What is the best medicine for a cold?",
|
||||
model_name="claude-4-sonnet-20250514",
|
||||
system_prompt=(
|
||||
"You are a highly knowledgeable, ethical, and empathetic virtual doctor. "
|
||||
"Always provide evidence-based, safe, and practical medical advice. "
|
||||
"If a question requires a diagnosis, prescription, or urgent care, remind the user to consult a licensed healthcare professional. "
|
||||
"Be clear, concise, and avoid unnecessary medical jargon. "
|
||||
"Never provide information that could be unsafe or misleading. "
|
||||
"If unsure, say so and recommend seeing a real doctor."
|
||||
),
|
||||
max_loops=1,
|
||||
temperature=0.4,
|
||||
role="doctor",
|
||||
)
|
||||
|
||||
response = client.agent.run(
|
||||
agent_config=agent_spec,
|
||||
task="What is the best medicine for a cold?",
|
||||
)
|
||||
|
||||
print(response)
|
||||
|
||||
# print(json.dumps(client.models.list_available(), indent=4))
|
||||
# print(json.dumps(client.health.check(), indent=4))
|
||||
# print(json.dumps(client.swarms.get_logs(), indent=4))
|
||||
# print(json.dumps(client.client.rate.get_limits(), indent=4))
|
||||
# print(json.dumps(client.swarms.check_available(), indent=4))
|
@ -0,0 +1,36 @@
|
||||
import os
|
||||
from swarms_client import SwarmsClient
|
||||
from dotenv import load_dotenv
|
||||
import json
|
||||
|
||||
load_dotenv()
|
||||
|
||||
client = SwarmsClient(
|
||||
api_key=os.getenv("SWARMS_API_KEY"),
|
||||
)
|
||||
|
||||
|
||||
result = client.agent.run(
|
||||
agent_config={
|
||||
"agent_name": "Bloodwork Diagnosis Expert",
|
||||
"description": "An expert doctor specializing in interpreting and diagnosing blood work results.",
|
||||
"system_prompt": (
|
||||
"You are an expert medical doctor specializing in the interpretation and diagnosis of blood work. "
|
||||
"Your expertise includes analyzing laboratory results, identifying abnormal values, "
|
||||
"explaining their clinical significance, and recommending next diagnostic or treatment steps. "
|
||||
"Provide clear, evidence-based explanations and consider differential diagnoses based on blood test findings."
|
||||
),
|
||||
"model_name": "groq/moonshotai/kimi-k2-instruct",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 1000,
|
||||
"temperature": 0.5,
|
||||
},
|
||||
task=(
|
||||
"A patient presents with the following blood work results: "
|
||||
"Hemoglobin: 10.2 g/dL (low), WBC: 13,000 /µL (high), Platelets: 180,000 /µL (normal), "
|
||||
"ALT: 65 U/L (high), AST: 70 U/L (high). "
|
||||
"Please provide a detailed interpretation, possible diagnoses, and recommended next steps."
|
||||
),
|
||||
)
|
||||
|
||||
print(json.dumps(result, indent=4))
|
@ -0,0 +1,50 @@
|
||||
import os
|
||||
from swarms_client import SwarmsClient
|
||||
from dotenv import load_dotenv
|
||||
import json
|
||||
|
||||
load_dotenv()
|
||||
|
||||
client = SwarmsClient(
|
||||
api_key=os.getenv("SWARMS_API_KEY"),
|
||||
)
|
||||
|
||||
batch_requests = [
|
||||
{
|
||||
"agent_config": {
|
||||
"agent_name": "Bloodwork Diagnosis Expert",
|
||||
"description": "Expert in blood work interpretation.",
|
||||
"system_prompt": (
|
||||
"You are a doctor who interprets blood work. Give concise, clear explanations and possible diagnoses."
|
||||
),
|
||||
"model_name": "claude-sonnet-4-20250514",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 1000,
|
||||
"temperature": 0.5,
|
||||
},
|
||||
"task": (
|
||||
"Blood work: Hemoglobin 10.2 (low), WBC 13,000 (high), Platelets 180,000 (normal), "
|
||||
"ALT 65 (high), AST 70 (high). Interpret and suggest diagnoses."
|
||||
),
|
||||
},
|
||||
{
|
||||
"agent_config": {
|
||||
"agent_name": "Radiology Report Summarizer",
|
||||
"description": "Expert in summarizing radiology reports.",
|
||||
"system_prompt": (
|
||||
"You are a radiologist. Summarize the findings of radiology reports in clear, patient-friendly language."
|
||||
),
|
||||
"model_name": "claude-sonnet-4-20250514",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 1000,
|
||||
"temperature": 0.5,
|
||||
},
|
||||
"task": (
|
||||
"Radiology report: Chest X-ray shows mild cardiomegaly, no infiltrates, no effusion. Summarize the findings."
|
||||
),
|
||||
},
|
||||
]
|
||||
|
||||
result = client.agent.batch.run(body=batch_requests)
|
||||
|
||||
print(json.dumps(result, indent=4))
|
@ -0,0 +1,14 @@
|
||||
import os
|
||||
import json
|
||||
from dotenv import load_dotenv
|
||||
from swarms_client import SwarmsClient
|
||||
|
||||
load_dotenv()
|
||||
|
||||
client = SwarmsClient(api_key=os.getenv("SWARMS_API_KEY"))
|
||||
|
||||
print(json.dumps(client.models.list_available(), indent=4))
|
||||
print(json.dumps(client.health.check(), indent=4))
|
||||
print(json.dumps(client.swarms.get_logs(), indent=4))
|
||||
print(json.dumps(client.client.rate.get_limits(), indent=4))
|
||||
print(json.dumps(client.swarms.check_available(), indent=4))
|
@ -0,0 +1,105 @@
|
||||
import json
|
||||
import os
|
||||
from swarms_client import SwarmsClient
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
client = SwarmsClient(
|
||||
api_key=os.getenv("SWARMS_API_KEY"),
|
||||
)
|
||||
|
||||
|
||||
def create_medical_unit_swarm(client, patient_info):
|
||||
"""
|
||||
Creates and runs a simulated medical unit swarm with a doctor (leader), nurses, and a medical assistant.
|
||||
|
||||
Args:
|
||||
client (SwarmsClient): The SwarmsClient instance.
|
||||
patient_info (str): The patient symptoms and information.
|
||||
|
||||
Returns:
|
||||
dict: The output from the swarm run.
|
||||
"""
|
||||
return client.swarms.run(
|
||||
name="Hospital Medical Unit",
|
||||
description="A simulated hospital unit with a doctor (leader), nurses, and a medical assistant collaborating on patient care.",
|
||||
swarm_type="HiearchicalSwarm",
|
||||
task=patient_info,
|
||||
agents=[
|
||||
{
|
||||
"agent_name": "Dr. Smith - Attending Physician",
|
||||
"description": "The lead doctor responsible for diagnosis, treatment planning, and team coordination.",
|
||||
"system_prompt": (
|
||||
"You are Dr. Smith, the attending physician and leader of the medical unit. "
|
||||
"You review all information, make final decisions, and coordinate the team. "
|
||||
"Provide a diagnosis, recommend next steps, and delegate tasks to the nurses and assistant."
|
||||
),
|
||||
"model_name": "gpt-4.1",
|
||||
"role": "leader",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.5,
|
||||
},
|
||||
{
|
||||
"agent_name": "Nurse Alice",
|
||||
"description": "A registered nurse responsible for patient assessment, vital signs, and reporting findings to the doctor.",
|
||||
"system_prompt": (
|
||||
"You are Nurse Alice, a registered nurse. "
|
||||
"Assess the patient's symptoms, record vital signs, and report your findings to Dr. Smith. "
|
||||
"Suggest any immediate nursing interventions if needed."
|
||||
),
|
||||
"model_name": "gpt-4.1",
|
||||
"role": "worker",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.5,
|
||||
},
|
||||
{
|
||||
"agent_name": "Nurse Bob",
|
||||
"description": "A registered nurse assisting with patient care, medication administration, and monitoring.",
|
||||
"system_prompt": (
|
||||
"You are Nurse Bob, a registered nurse. "
|
||||
"Assist with patient care, administer medications as ordered, and monitor the patient's response. "
|
||||
"Communicate any changes to Dr. Smith."
|
||||
),
|
||||
"model_name": "gpt-4.1",
|
||||
"role": "worker",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.5,
|
||||
},
|
||||
{
|
||||
"agent_name": "Medical Assistant Jane",
|
||||
"description": "A medical assistant supporting the team with administrative tasks and basic patient care.",
|
||||
"system_prompt": (
|
||||
"You are Medical Assistant Jane. "
|
||||
"Support the team by preparing the patient, collecting samples, and handling administrative tasks. "
|
||||
"Report any relevant observations to the nurses or Dr. Smith."
|
||||
),
|
||||
"model_name": "claude-sonnet-4-20250514",
|
||||
"role": "worker",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 2048,
|
||||
"temperature": 0.5,
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
patient_symptoms = """
|
||||
Patient: 45-year-old female
|
||||
Chief Complaint: Chest pain and shortness of breath for 2 days
|
||||
|
||||
Symptoms:
|
||||
- Sharp chest pain that worsens with deep breathing
|
||||
- Shortness of breath, especially when lying down
|
||||
- Mild fever (100.2°F)
|
||||
- Dry cough
|
||||
- Fatigue
|
||||
"""
|
||||
|
||||
out = create_medical_unit_swarm(client, patient_symptoms)
|
||||
|
||||
print(json.dumps(out, indent=4))
|
@ -0,0 +1,63 @@
|
||||
import json
|
||||
import os
|
||||
from swarms_client import SwarmsClient
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
client = SwarmsClient(
|
||||
api_key=os.getenv("SWARMS_API_KEY"),
|
||||
)
|
||||
|
||||
patient_symptoms = """
|
||||
Patient: 45-year-old female
|
||||
Chief Complaint: Chest pain and shortness of breath for 2 days
|
||||
|
||||
Symptoms:
|
||||
- Sharp chest pain that worsens with deep breathing
|
||||
- Shortness of breath, especially when lying down
|
||||
- Mild fever (100.2°F)
|
||||
- Dry cough
|
||||
- Fatigue
|
||||
"""
|
||||
|
||||
out = client.swarms.run(
|
||||
name="ICD Analysis Swarm",
|
||||
description="A swarm that analyzes ICD codes",
|
||||
swarm_type="ConcurrentWorkflow",
|
||||
task=patient_symptoms,
|
||||
agents=[
|
||||
{
|
||||
"agent_name": "ICD-Analyzer",
|
||||
"description": "An agent that analyzes ICD codes",
|
||||
"system_prompt": "You are an expert ICD code analyzer. Your task is to analyze the ICD codes and provide a detailed explanation of the codes.",
|
||||
"model_name": "groq/openai/gpt-oss-120b",
|
||||
"role": "worker",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.5,
|
||||
},
|
||||
{
|
||||
"agent_name": "ICD-Code-Explainer-Primary",
|
||||
"description": "An agent that provides primary explanations for ICD codes",
|
||||
"system_prompt": "You are an expert ICD code explainer. Your task is to provide a clear and thorough explanation of the ICD codes to the user, focusing on primary meanings and clinical context.",
|
||||
"model_name": "groq/openai/gpt-oss-120b",
|
||||
"role": "worker",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.5,
|
||||
},
|
||||
{
|
||||
"agent_name": "ICD-Code-Explainer-Secondary",
|
||||
"description": "An agent that provides additional context and secondary explanations for ICD codes",
|
||||
"system_prompt": "You are an expert ICD code explainer. Your task is to provide additional context, nuances, and secondary explanations for the ICD codes, including possible differential diagnoses and related codes.",
|
||||
"model_name": "groq/openai/gpt-oss-120b",
|
||||
"role": "worker",
|
||||
"max_loops": 1,
|
||||
"max_tokens": 8192,
|
||||
"temperature": 0.5,
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
print(json.dumps(out, indent=4))
|
@ -0,0 +1,247 @@
|
||||
from loguru import logger
|
||||
import yfinance as yf
|
||||
import json
|
||||
|
||||
|
||||
def get_figma_stock_data(stock: str) -> str:
|
||||
"""
|
||||
Fetches comprehensive stock data for Figma (FIG) using Yahoo Finance.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: A dictionary containing comprehensive Figma stock data including:
|
||||
- Current price and market data
|
||||
- Company information
|
||||
- Financial metrics
|
||||
- Historical data summary
|
||||
- Trading statistics
|
||||
|
||||
Raises:
|
||||
Exception: If there's an error fetching the data from Yahoo Finance
|
||||
"""
|
||||
try:
|
||||
# Initialize Figma stock ticker
|
||||
figma = yf.Ticker(stock)
|
||||
|
||||
# Get current stock info
|
||||
info = figma.info
|
||||
|
||||
# Get recent historical data (last 30 days)
|
||||
hist = figma.history(period="30d")
|
||||
|
||||
# Get real-time fast info
|
||||
fast_info = figma.fast_info
|
||||
|
||||
# Compile comprehensive data
|
||||
figma_data = {
|
||||
"company_info": {
|
||||
"name": info.get("longName", "Figma Inc."),
|
||||
"symbol": "FIG",
|
||||
"sector": info.get("sector", "N/A"),
|
||||
"industry": info.get("industry", "N/A"),
|
||||
"website": info.get("website", "N/A"),
|
||||
"description": info.get("longBusinessSummary", "N/A"),
|
||||
},
|
||||
"current_market_data": {
|
||||
"current_price": info.get("currentPrice", "N/A"),
|
||||
"previous_close": info.get("previousClose", "N/A"),
|
||||
"open": info.get("open", "N/A"),
|
||||
"day_low": info.get("dayLow", "N/A"),
|
||||
"day_high": info.get("dayHigh", "N/A"),
|
||||
"volume": info.get("volume", "N/A"),
|
||||
"market_cap": info.get("marketCap", "N/A"),
|
||||
"price_change": (
|
||||
info.get("currentPrice", 0)
|
||||
- info.get("previousClose", 0)
|
||||
if info.get("currentPrice")
|
||||
and info.get("previousClose")
|
||||
else "N/A"
|
||||
),
|
||||
"price_change_percent": info.get(
|
||||
"regularMarketChangePercent", "N/A"
|
||||
),
|
||||
},
|
||||
"financial_metrics": {
|
||||
"pe_ratio": info.get("trailingPE", "N/A"),
|
||||
"forward_pe": info.get("forwardPE", "N/A"),
|
||||
"price_to_book": info.get("priceToBook", "N/A"),
|
||||
"price_to_sales": info.get(
|
||||
"priceToSalesTrailing12Months", "N/A"
|
||||
),
|
||||
"enterprise_value": info.get(
|
||||
"enterpriseValue", "N/A"
|
||||
),
|
||||
"beta": info.get("beta", "N/A"),
|
||||
"dividend_yield": info.get("dividendYield", "N/A"),
|
||||
"payout_ratio": info.get("payoutRatio", "N/A"),
|
||||
},
|
||||
"trading_statistics": {
|
||||
"fifty_day_average": info.get(
|
||||
"fiftyDayAverage", "N/A"
|
||||
),
|
||||
"two_hundred_day_average": info.get(
|
||||
"twoHundredDayAverage", "N/A"
|
||||
),
|
||||
"fifty_two_week_low": info.get(
|
||||
"fiftyTwoWeekLow", "N/A"
|
||||
),
|
||||
"fifty_two_week_high": info.get(
|
||||
"fiftyTwoWeekHigh", "N/A"
|
||||
),
|
||||
"shares_outstanding": info.get(
|
||||
"sharesOutstanding", "N/A"
|
||||
),
|
||||
"float_shares": info.get("floatShares", "N/A"),
|
||||
"shares_short": info.get("sharesShort", "N/A"),
|
||||
"short_ratio": info.get("shortRatio", "N/A"),
|
||||
},
|
||||
"recent_performance": {
|
||||
"last_30_days": {
|
||||
"start_price": (
|
||||
hist.iloc[0]["Close"]
|
||||
if not hist.empty
|
||||
else "N/A"
|
||||
),
|
||||
"end_price": (
|
||||
hist.iloc[-1]["Close"]
|
||||
if not hist.empty
|
||||
else "N/A"
|
||||
),
|
||||
"total_return": (
|
||||
(
|
||||
hist.iloc[-1]["Close"]
|
||||
- hist.iloc[0]["Close"]
|
||||
)
|
||||
/ hist.iloc[0]["Close"]
|
||||
* 100
|
||||
if not hist.empty
|
||||
else "N/A"
|
||||
),
|
||||
"highest_price": (
|
||||
hist["High"].max()
|
||||
if not hist.empty
|
||||
else "N/A"
|
||||
),
|
||||
"lowest_price": (
|
||||
hist["Low"].min() if not hist.empty else "N/A"
|
||||
),
|
||||
"average_volume": (
|
||||
hist["Volume"].mean()
|
||||
if not hist.empty
|
||||
else "N/A"
|
||||
),
|
||||
}
|
||||
},
|
||||
"real_time_data": {
|
||||
"last_price": (
|
||||
fast_info.last_price
|
||||
if hasattr(fast_info, "last_price")
|
||||
else "N/A"
|
||||
),
|
||||
"last_volume": (
|
||||
fast_info.last_volume
|
||||
if hasattr(fast_info, "last_volume")
|
||||
else "N/A"
|
||||
),
|
||||
"bid": (
|
||||
fast_info.bid
|
||||
if hasattr(fast_info, "bid")
|
||||
else "N/A"
|
||||
),
|
||||
"ask": (
|
||||
fast_info.ask
|
||||
if hasattr(fast_info, "ask")
|
||||
else "N/A"
|
||||
),
|
||||
"bid_size": (
|
||||
fast_info.bid_size
|
||||
if hasattr(fast_info, "bid_size")
|
||||
else "N/A"
|
||||
),
|
||||
"ask_size": (
|
||||
fast_info.ask_size
|
||||
if hasattr(fast_info, "ask_size")
|
||||
else "N/A"
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
logger.info("Successfully fetched Figma (FIG) stock data")
|
||||
return json.dumps(figma_data, indent=4)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Figma stock data: {e}")
|
||||
raise Exception(f"Failed to fetch Figma stock data: {e}")
|
||||
|
||||
|
||||
# # Example usage
|
||||
# # Initialize the quantitative trading agent
|
||||
# agent = Agent(
|
||||
# agent_name="Quantitative-Trading-Agent",
|
||||
# agent_description="Advanced quantitative trading and algorithmic analysis agent specializing in stock analysis and trading strategies",
|
||||
# system_prompt=f"""You are an expert quantitative trading agent with deep expertise in:
|
||||
# - Algorithmic trading strategies and implementation
|
||||
# - Statistical arbitrage and market making
|
||||
# - Risk management and portfolio optimization
|
||||
# - High-frequency trading systems
|
||||
# - Market microstructure analysis
|
||||
# - Quantitative research methodologies
|
||||
# - Financial mathematics and stochastic processes
|
||||
# - Machine learning applications in trading
|
||||
# - Technical analysis and chart patterns
|
||||
# - Fundamental analysis and valuation models
|
||||
# - Options trading and derivatives
|
||||
# - Market sentiment analysis
|
||||
|
||||
# Your core responsibilities include:
|
||||
# 1. Developing and backtesting trading strategies
|
||||
# 2. Analyzing market data and identifying alpha opportunities
|
||||
# 3. Implementing risk management frameworks
|
||||
# 4. Optimizing portfolio allocations
|
||||
# 5. Conducting quantitative research
|
||||
# 6. Monitoring market microstructure
|
||||
# 7. Evaluating trading system performance
|
||||
# 8. Performing comprehensive stock analysis
|
||||
# 9. Generating trading signals and recommendations
|
||||
# 10. Risk assessment and position sizing
|
||||
|
||||
# When analyzing stocks, you should:
|
||||
# - Evaluate technical indicators and chart patterns
|
||||
# - Assess fundamental metrics and valuation ratios
|
||||
# - Analyze market sentiment and momentum
|
||||
# - Consider macroeconomic factors
|
||||
# - Provide risk-adjusted return projections
|
||||
# - Suggest optimal entry/exit points
|
||||
# - Calculate position sizing recommendations
|
||||
# - Identify potential catalysts and risks
|
||||
|
||||
# You maintain strict adherence to:
|
||||
# - Mathematical rigor in all analyses
|
||||
# - Statistical significance in strategy development
|
||||
# - Risk-adjusted return optimization
|
||||
# - Market impact minimization
|
||||
# - Regulatory compliance
|
||||
# - Transaction cost analysis
|
||||
# - Performance attribution
|
||||
# - Data-driven decision making
|
||||
|
||||
# You communicate in precise, technical terms while maintaining clarity for stakeholders.
|
||||
# Data: {get_figma_stock_data('FIG')}
|
||||
|
||||
# """,
|
||||
# max_loops=1,
|
||||
# model_name="gpt-4o-mini",
|
||||
# dynamic_temperature_enabled=True,
|
||||
# output_type="str-all-except-first",
|
||||
# streaming_on=True,
|
||||
# print_on=True,
|
||||
# telemetry_enable=False,
|
||||
# )
|
||||
|
||||
# # Example 1: Basic usage with just a task
|
||||
# logger.info("Starting quantitative analysis cron job for Figma (FIG)")
|
||||
# cron_job = CronJob(agent=agent, interval="10seconds")
|
||||
# cron_job.run(
|
||||
# task="Analyze the Figma (FIG) stock comprehensively using the available stock data. Provide a detailed quantitative analysis"
|
||||
# )
|
||||
|
||||
print(get_figma_stock_data("FIG"))
|
@ -0,0 +1,105 @@
|
||||
"""
|
||||
Example script demonstrating how to fetch Figma (FIG) stock data using swarms_tools Yahoo Finance API.
|
||||
This shows the alternative approach using the existing swarms_tools package.
|
||||
"""
|
||||
|
||||
from swarms import Agent
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
from swarms_tools import yahoo_finance_api
|
||||
from loguru import logger
|
||||
import json
|
||||
|
||||
|
||||
def get_figma_data_with_swarms_tools():
|
||||
"""
|
||||
Fetches Figma stock data using the swarms_tools Yahoo Finance API.
|
||||
|
||||
Returns:
|
||||
dict: Figma stock data from swarms_tools
|
||||
"""
|
||||
try:
|
||||
logger.info("Fetching Figma stock data using swarms_tools...")
|
||||
figma_data = yahoo_finance_api(["FIG"])
|
||||
return figma_data
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching data with swarms_tools: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def analyze_figma_with_agent():
|
||||
"""
|
||||
Uses a Swarms agent to analyze Figma stock data.
|
||||
"""
|
||||
try:
|
||||
# Initialize the agent with Yahoo Finance tool
|
||||
agent = Agent(
|
||||
agent_name="Figma-Analysis-Agent",
|
||||
agent_description="Specialized agent for analyzing Figma stock data",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
max_loops=1,
|
||||
model_name="gpt-4o-mini",
|
||||
tools=[yahoo_finance_api],
|
||||
dynamic_temperature_enabled=True,
|
||||
)
|
||||
|
||||
# Ask the agent to analyze Figma
|
||||
analysis = agent.run(
|
||||
"Analyze the current stock data for Figma (FIG) and provide insights on its performance, valuation metrics, and recent trends."
|
||||
)
|
||||
|
||||
return analysis
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in agent analysis: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function to demonstrate different approaches for Figma stock data.
|
||||
"""
|
||||
logger.info("Starting Figma stock analysis with swarms_tools")
|
||||
|
||||
try:
|
||||
# Method 1: Direct API call
|
||||
print("\n" + "=" * 60)
|
||||
print("METHOD 1: Direct swarms_tools API call")
|
||||
print("=" * 60)
|
||||
|
||||
figma_data = get_figma_data_with_swarms_tools()
|
||||
print("Raw data from swarms_tools:")
|
||||
print(json.dumps(figma_data, indent=2, default=str))
|
||||
|
||||
# Method 2: Agent-based analysis
|
||||
print("\n" + "=" * 60)
|
||||
print("METHOD 2: Agent-based analysis")
|
||||
print("=" * 60)
|
||||
|
||||
analysis = analyze_figma_with_agent()
|
||||
print("Agent analysis:")
|
||||
print(analysis)
|
||||
|
||||
# Method 3: Comparison with custom function
|
||||
print("\n" + "=" * 60)
|
||||
print("METHOD 3: Comparison with custom function")
|
||||
print("=" * 60)
|
||||
|
||||
from cron_job_examples.cron_job_example import (
|
||||
get_figma_stock_data_simple,
|
||||
)
|
||||
|
||||
custom_data = get_figma_stock_data_simple()
|
||||
print("Custom function output:")
|
||||
print(custom_data)
|
||||
|
||||
logger.info("All methods completed successfully!")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in main function: {e}")
|
||||
print(f"Error: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,349 @@
|
||||
"""
|
||||
Cryptocurrency Concurrent Multi-Agent Cron Job Example
|
||||
|
||||
This example demonstrates how to use ConcurrentWorkflow with CronJob to create
|
||||
a powerful cryptocurrency tracking system. Each specialized agent analyzes a
|
||||
specific cryptocurrency concurrently every minute.
|
||||
|
||||
Features:
|
||||
- ConcurrentWorkflow for parallel agent execution
|
||||
- CronJob scheduling for automated runs every 1 minute
|
||||
- Each agent specializes in analyzing one specific cryptocurrency
|
||||
- Real-time data fetching from CoinGecko API
|
||||
- Concurrent analysis of multiple cryptocurrencies
|
||||
- Structured output with professional formatting
|
||||
|
||||
Architecture:
|
||||
CronJob -> ConcurrentWorkflow -> [Bitcoin Agent, Ethereum Agent, Solana Agent, etc.] -> Parallel Analysis
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
from loguru import logger
|
||||
|
||||
from swarms import Agent, CronJob, ConcurrentWorkflow
|
||||
from swarms_tools import coin_gecko_coin_api
|
||||
|
||||
|
||||
def create_crypto_specific_agents() -> List[Agent]:
|
||||
"""
|
||||
Creates agents that each specialize in analyzing a specific cryptocurrency.
|
||||
|
||||
Returns:
|
||||
List[Agent]: List of cryptocurrency-specific Agent instances
|
||||
"""
|
||||
|
||||
# Bitcoin Specialist Agent
|
||||
bitcoin_agent = Agent(
|
||||
agent_name="Bitcoin-Analyst",
|
||||
agent_description="Expert analyst specializing exclusively in Bitcoin (BTC) analysis and market dynamics",
|
||||
system_prompt="""You are a Bitcoin specialist and expert analyst. Your expertise includes:
|
||||
|
||||
BITCOIN SPECIALIZATION:
|
||||
- Bitcoin's unique position as digital gold
|
||||
- Bitcoin halving cycles and their market impact
|
||||
- Bitcoin mining economics and hash rate analysis
|
||||
- Lightning Network and Layer 2 developments
|
||||
- Bitcoin adoption by institutions and countries
|
||||
- Bitcoin's correlation with traditional markets
|
||||
- Bitcoin technical analysis and on-chain metrics
|
||||
- Bitcoin's role as a store of value and hedge against inflation
|
||||
|
||||
ANALYSIS FOCUS:
|
||||
- Analyze ONLY Bitcoin data from the provided dataset
|
||||
- Focus on Bitcoin-specific metrics and trends
|
||||
- Consider Bitcoin's unique market dynamics
|
||||
- Evaluate Bitcoin's dominance and market leadership
|
||||
- Assess institutional adoption trends
|
||||
- Monitor on-chain activity and network health
|
||||
|
||||
DELIVERABLES:
|
||||
- Bitcoin-specific analysis and insights
|
||||
- Price action assessment and predictions
|
||||
- Market dominance analysis
|
||||
- Institutional adoption impact
|
||||
- Technical and fundamental outlook
|
||||
- Risk factors specific to Bitcoin
|
||||
|
||||
Extract Bitcoin data from the provided dataset and provide comprehensive Bitcoin-focused analysis.""",
|
||||
model_name="groq/moonshotai/kimi-k2-instruct",
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
streaming_on=False,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
# Ethereum Specialist Agent
|
||||
ethereum_agent = Agent(
|
||||
agent_name="Ethereum-Analyst",
|
||||
agent_description="Expert analyst specializing exclusively in Ethereum (ETH) analysis and ecosystem development",
|
||||
system_prompt="""You are an Ethereum specialist and expert analyst. Your expertise includes:
|
||||
|
||||
ETHEREUM SPECIALIZATION:
|
||||
- Ethereum's smart contract platform and DeFi ecosystem
|
||||
- Ethereum 2.0 transition and proof-of-stake mechanics
|
||||
- Gas fees, network usage, and scalability solutions
|
||||
- Layer 2 solutions (Arbitrum, Optimism, Polygon)
|
||||
- DeFi protocols and TVL (Total Value Locked) analysis
|
||||
- NFT markets and Ethereum's role in digital assets
|
||||
- Developer activity and ecosystem growth
|
||||
- EIP proposals and network upgrades
|
||||
|
||||
ANALYSIS FOCUS:
|
||||
- Analyze ONLY Ethereum data from the provided dataset
|
||||
- Focus on Ethereum's platform utility and network effects
|
||||
- Evaluate DeFi ecosystem health and growth
|
||||
- Assess Layer 2 adoption and scalability solutions
|
||||
- Monitor network usage and gas fee trends
|
||||
- Consider Ethereum's competitive position vs other smart contract platforms
|
||||
|
||||
DELIVERABLES:
|
||||
- Ethereum-specific analysis and insights
|
||||
- Platform utility and adoption metrics
|
||||
- DeFi ecosystem impact assessment
|
||||
- Network health and scalability evaluation
|
||||
- Competitive positioning analysis
|
||||
- Technical and fundamental outlook for ETH
|
||||
|
||||
Extract Ethereum data from the provided dataset and provide comprehensive Ethereum-focused analysis.""",
|
||||
model_name="groq/moonshotai/kimi-k2-instruct",
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
streaming_on=False,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
# Solana Specialist Agent
|
||||
solana_agent = Agent(
|
||||
agent_name="Solana-Analyst",
|
||||
agent_description="Expert analyst specializing exclusively in Solana (SOL) analysis and ecosystem development",
|
||||
system_prompt="""You are a Solana specialist and expert analyst. Your expertise includes:
|
||||
|
||||
SOLANA SPECIALIZATION:
|
||||
- Solana's high-performance blockchain architecture
|
||||
- Proof-of-History consensus mechanism
|
||||
- Solana's DeFi ecosystem and DEX platforms (Serum, Raydium)
|
||||
- NFT marketplaces and creator economy on Solana
|
||||
- Network outages and reliability concerns
|
||||
- Developer ecosystem and Rust programming adoption
|
||||
- Validator economics and network decentralization
|
||||
- Cross-chain bridges and interoperability
|
||||
|
||||
ANALYSIS FOCUS:
|
||||
- Analyze ONLY Solana data from the provided dataset
|
||||
- Focus on Solana's performance and scalability advantages
|
||||
- Evaluate network stability and uptime improvements
|
||||
- Assess ecosystem growth and developer adoption
|
||||
- Monitor DeFi and NFT activity on Solana
|
||||
- Consider Solana's competitive position vs Ethereum
|
||||
|
||||
DELIVERABLES:
|
||||
- Solana-specific analysis and insights
|
||||
- Network performance and reliability assessment
|
||||
- Ecosystem growth and adoption metrics
|
||||
- DeFi and NFT market analysis
|
||||
- Competitive advantages and challenges
|
||||
- Technical and fundamental outlook for SOL
|
||||
|
||||
Extract Solana data from the provided dataset and provide comprehensive Solana-focused analysis.""",
|
||||
model_name="groq/moonshotai/kimi-k2-instruct",
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
streaming_on=False,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
# Cardano Specialist Agent
|
||||
cardano_agent = Agent(
|
||||
agent_name="Cardano-Analyst",
|
||||
agent_description="Expert analyst specializing exclusively in Cardano (ADA) analysis and research-driven development",
|
||||
system_prompt="""You are a Cardano specialist and expert analyst. Your expertise includes:
|
||||
|
||||
CARDANO SPECIALIZATION:
|
||||
- Cardano's research-driven development approach
|
||||
- Ouroboros proof-of-stake consensus protocol
|
||||
- Smart contract capabilities via Plutus and Marlowe
|
||||
- Cardano's three-layer architecture (settlement, computation, control)
|
||||
- Academic partnerships and peer-reviewed research
|
||||
- Cardano ecosystem projects and DApp development
|
||||
- Native tokens and Cardano's UTXO model
|
||||
- Sustainability and treasury funding mechanisms
|
||||
|
||||
ANALYSIS FOCUS:
|
||||
- Analyze ONLY Cardano data from the provided dataset
|
||||
- Focus on Cardano's methodical development approach
|
||||
- Evaluate smart contract adoption and ecosystem growth
|
||||
- Assess academic partnerships and research contributions
|
||||
- Monitor native token ecosystem development
|
||||
- Consider Cardano's long-term roadmap and milestones
|
||||
|
||||
DELIVERABLES:
|
||||
- Cardano-specific analysis and insights
|
||||
- Development progress and milestone achievements
|
||||
- Smart contract ecosystem evaluation
|
||||
- Academic research impact assessment
|
||||
- Native token and DApp adoption metrics
|
||||
- Technical and fundamental outlook for ADA
|
||||
|
||||
Extract Cardano data from the provided dataset and provide comprehensive Cardano-focused analysis.""",
|
||||
model_name="groq/moonshotai/kimi-k2-instruct",
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
streaming_on=False,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
# Binance Coin Specialist Agent
|
||||
bnb_agent = Agent(
|
||||
agent_name="BNB-Analyst",
|
||||
agent_description="Expert analyst specializing exclusively in BNB analysis and Binance ecosystem dynamics",
|
||||
system_prompt="""You are a BNB specialist and expert analyst. Your expertise includes:
|
||||
|
||||
BNB SPECIALIZATION:
|
||||
- BNB's utility within the Binance ecosystem
|
||||
- Binance Smart Chain (BSC) development and adoption
|
||||
- BNB token burns and deflationary mechanics
|
||||
- Binance exchange volume and market leadership
|
||||
- BSC DeFi ecosystem and yield farming
|
||||
- Cross-chain bridges and multi-chain strategies
|
||||
- Regulatory challenges facing Binance globally
|
||||
- BNB's role in transaction fee discounts and platform benefits
|
||||
|
||||
ANALYSIS FOCUS:
|
||||
- Analyze ONLY BNB data from the provided dataset
|
||||
- Focus on BNB's utility value and exchange benefits
|
||||
- Evaluate BSC ecosystem growth and competition with Ethereum
|
||||
- Assess token burn impact on supply and price
|
||||
- Monitor Binance platform developments and regulations
|
||||
- Consider BNB's centralized vs decentralized aspects
|
||||
|
||||
DELIVERABLES:
|
||||
- BNB-specific analysis and insights
|
||||
- Utility value and ecosystem benefits assessment
|
||||
- BSC adoption and DeFi growth evaluation
|
||||
- Token economics and burn mechanism impact
|
||||
- Regulatory risk and compliance analysis
|
||||
- Technical and fundamental outlook for BNB
|
||||
|
||||
Extract BNB data from the provided dataset and provide comprehensive BNB-focused analysis.""",
|
||||
model_name="groq/moonshotai/kimi-k2-instruct",
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
streaming_on=False,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
# XRP Specialist Agent
|
||||
xrp_agent = Agent(
|
||||
agent_name="XRP-Analyst",
|
||||
agent_description="Expert analyst specializing exclusively in XRP analysis and cross-border payment solutions",
|
||||
system_prompt="""You are an XRP specialist and expert analyst. Your expertise includes:
|
||||
|
||||
XRP SPECIALIZATION:
|
||||
- XRP's role in cross-border payments and remittances
|
||||
- RippleNet adoption by financial institutions
|
||||
- Central Bank Digital Currency (CBDC) partnerships
|
||||
- Regulatory landscape and SEC lawsuit implications
|
||||
- XRP Ledger's consensus mechanism and energy efficiency
|
||||
- On-Demand Liquidity (ODL) usage and growth
|
||||
- Competition with SWIFT and traditional payment rails
|
||||
- Ripple's partnerships with banks and payment providers
|
||||
|
||||
ANALYSIS FOCUS:
|
||||
- Analyze ONLY XRP data from the provided dataset
|
||||
- Focus on XRP's utility in payments and remittances
|
||||
- Evaluate RippleNet adoption and institutional partnerships
|
||||
- Assess regulatory developments and legal clarity
|
||||
- Monitor ODL usage and transaction volumes
|
||||
- Consider XRP's competitive position in payments
|
||||
|
||||
DELIVERABLES:
|
||||
- XRP-specific analysis and insights
|
||||
- Payment utility and adoption assessment
|
||||
- Regulatory landscape and legal developments
|
||||
- Institutional partnership impact evaluation
|
||||
- Cross-border payment market analysis
|
||||
- Technical and fundamental outlook for XRP
|
||||
|
||||
Extract XRP data from the provided dataset and provide comprehensive XRP-focused analysis.""",
|
||||
model_name="groq/moonshotai/kimi-k2-instruct",
|
||||
max_loops=1,
|
||||
dynamic_temperature_enabled=True,
|
||||
streaming_on=False,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
return [
|
||||
bitcoin_agent,
|
||||
ethereum_agent,
|
||||
solana_agent,
|
||||
cardano_agent,
|
||||
bnb_agent,
|
||||
xrp_agent,
|
||||
]
|
||||
|
||||
|
||||
def create_crypto_workflow() -> ConcurrentWorkflow:
|
||||
"""
|
||||
Creates a ConcurrentWorkflow with cryptocurrency-specific analysis agents.
|
||||
|
||||
Returns:
|
||||
ConcurrentWorkflow: Configured workflow for crypto analysis
|
||||
"""
|
||||
agents = create_crypto_specific_agents()
|
||||
|
||||
workflow = ConcurrentWorkflow(
|
||||
name="Crypto-Specific-Analysis-Workflow",
|
||||
description="Concurrent execution of cryptocurrency-specific analysis agents",
|
||||
agents=agents,
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
return workflow
|
||||
|
||||
|
||||
def create_crypto_cron_job() -> CronJob:
|
||||
"""
|
||||
Creates a CronJob that runs cryptocurrency-specific analysis every minute using ConcurrentWorkflow.
|
||||
|
||||
Returns:
|
||||
CronJob: Configured cron job for automated crypto analysis
|
||||
"""
|
||||
# Create the concurrent workflow
|
||||
workflow = create_crypto_workflow()
|
||||
|
||||
# Create the cron job
|
||||
cron_job = CronJob(
|
||||
agent=workflow, # Use the workflow as the agent
|
||||
interval="5seconds", # Run every 1 minute
|
||||
)
|
||||
|
||||
return cron_job
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function to run the cryptocurrency-specific concurrent analysis cron job.
|
||||
"""
|
||||
cron_job = create_crypto_cron_job()
|
||||
|
||||
prompt = (
|
||||
"You are a world-class institutional crypto analyst at a top-tier asset management firm (e.g., BlackRock).\n"
|
||||
"Conduct a thorough, data-driven, and professional analysis of your assigned cryptocurrency, including:\n"
|
||||
"- Current price, market cap, and recent performance trends\n"
|
||||
"- Key technical and fundamental indicators\n"
|
||||
"- Major news, regulatory, or macroeconomic events impacting the asset\n"
|
||||
"- On-chain activity and notable whale or institutional movements\n"
|
||||
"- Short-term and long-term outlook with clear, actionable insights\n"
|
||||
"Present your findings in a concise, well-structured report suitable for executive decision-makers."
|
||||
)
|
||||
|
||||
# Start the cron job
|
||||
logger.info("🔄 Starting automated analysis loop...")
|
||||
logger.info("⏰ Press Ctrl+C to stop the cron job")
|
||||
|
||||
output = cron_job.run(task=prompt)
|
||||
print(output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,79 @@
|
||||
"""
|
||||
Example script demonstrating how to fetch Figma (FIG) stock data using Yahoo Finance.
|
||||
"""
|
||||
|
||||
from cron_job_examples.cron_job_example import (
|
||||
get_figma_stock_data,
|
||||
get_figma_stock_data_simple,
|
||||
)
|
||||
from loguru import logger
|
||||
import json
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function to demonstrate Figma stock data fetching.
|
||||
"""
|
||||
logger.info("Starting Figma stock data demonstration")
|
||||
|
||||
try:
|
||||
# Example 1: Get comprehensive data as dictionary
|
||||
logger.info("Fetching comprehensive Figma stock data...")
|
||||
figma_data = get_figma_stock_data()
|
||||
|
||||
# Print the data in a structured format
|
||||
print("\n" + "=" * 50)
|
||||
print("COMPREHENSIVE FIGMA STOCK DATA")
|
||||
print("=" * 50)
|
||||
print(json.dumps(figma_data, indent=2, default=str))
|
||||
|
||||
# Example 2: Get simple formatted data
|
||||
logger.info("Fetching simple formatted Figma stock data...")
|
||||
simple_data = get_figma_stock_data_simple()
|
||||
|
||||
print("\n" + "=" * 50)
|
||||
print("SIMPLE FORMATTED FIGMA STOCK DATA")
|
||||
print("=" * 50)
|
||||
print(simple_data)
|
||||
|
||||
# Example 3: Access specific data points
|
||||
logger.info("Accessing specific data points...")
|
||||
|
||||
current_price = figma_data["current_market_data"][
|
||||
"current_price"
|
||||
]
|
||||
market_cap = figma_data["current_market_data"]["market_cap"]
|
||||
pe_ratio = figma_data["financial_metrics"]["pe_ratio"]
|
||||
|
||||
print("\nKey Metrics:")
|
||||
print(f"Current Price: ${current_price}")
|
||||
print(f"Market Cap: ${market_cap:,}")
|
||||
print(f"P/E Ratio: {pe_ratio}")
|
||||
|
||||
# Example 4: Check if stock is performing well
|
||||
price_change = figma_data["current_market_data"][
|
||||
"price_change"
|
||||
]
|
||||
if isinstance(price_change, (int, float)):
|
||||
if price_change > 0:
|
||||
print(
|
||||
f"\n📈 Figma stock is up ${price_change:.2f} today!"
|
||||
)
|
||||
elif price_change < 0:
|
||||
print(
|
||||
f"\n📉 Figma stock is down ${abs(price_change):.2f} today."
|
||||
)
|
||||
else:
|
||||
print("\n➡️ Figma stock is unchanged today.")
|
||||
|
||||
logger.info(
|
||||
"Figma stock data demonstration completed successfully!"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in main function: {e}")
|
||||
print(f"Error: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,157 @@
|
||||
"""
|
||||
Simple Cryptocurrency Concurrent CronJob Example
|
||||
|
||||
This is a simplified version showcasing the core concept of combining:
|
||||
- CronJob (for scheduling)
|
||||
- ConcurrentWorkflow (for parallel execution)
|
||||
- Each agent analyzes a specific cryptocurrency
|
||||
|
||||
Perfect for understanding the basic pattern before diving into the full example.
|
||||
"""
|
||||
|
||||
import json
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from loguru import logger
|
||||
|
||||
from swarms import Agent, CronJob, ConcurrentWorkflow
|
||||
|
||||
|
||||
def get_specific_crypto_data(coin_ids):
|
||||
"""Fetch specific crypto data from CoinGecko API."""
|
||||
try:
|
||||
url = "https://api.coingecko.com/api/v3/simple/price"
|
||||
params = {
|
||||
"ids": ",".join(coin_ids),
|
||||
"vs_currencies": "usd",
|
||||
"include_24hr_change": True,
|
||||
"include_market_cap": True,
|
||||
"include_24hr_vol": True,
|
||||
}
|
||||
|
||||
response = requests.get(url, params=params, timeout=10)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
result = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"coins": data,
|
||||
}
|
||||
|
||||
return json.dumps(result, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching crypto data: {e}")
|
||||
return f"Error: {e}"
|
||||
|
||||
|
||||
def create_crypto_specific_agents():
|
||||
"""Create agents that each specialize in one cryptocurrency."""
|
||||
|
||||
# Bitcoin Specialist Agent
|
||||
bitcoin_agent = Agent(
|
||||
agent_name="Bitcoin-Analyst",
|
||||
system_prompt="""You are a Bitcoin specialist. Analyze ONLY Bitcoin (BTC) data from the provided dataset.
|
||||
Focus on:
|
||||
- Bitcoin price movements and trends
|
||||
- Market dominance and institutional adoption
|
||||
- Bitcoin-specific market dynamics
|
||||
- Store of value characteristics
|
||||
Ignore all other cryptocurrencies in your analysis.""",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
print_on=False, # Important for concurrent execution
|
||||
)
|
||||
|
||||
# Ethereum Specialist Agent
|
||||
ethereum_agent = Agent(
|
||||
agent_name="Ethereum-Analyst",
|
||||
system_prompt="""You are an Ethereum specialist. Analyze ONLY Ethereum (ETH) data from the provided dataset.
|
||||
Focus on:
|
||||
- Ethereum price action and DeFi ecosystem
|
||||
- Smart contract platform adoption
|
||||
- Gas fees and network usage
|
||||
- Layer 2 scaling solutions impact
|
||||
Ignore all other cryptocurrencies in your analysis.""",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
print_on=False,
|
||||
)
|
||||
|
||||
# Solana Specialist Agent
|
||||
solana_agent = Agent(
|
||||
agent_name="Solana-Analyst",
|
||||
system_prompt="""You are a Solana specialist. Analyze ONLY Solana (SOL) data from the provided dataset.
|
||||
Focus on:
|
||||
- Solana price performance and ecosystem growth
|
||||
- High-performance blockchain advantages
|
||||
- DeFi and NFT activity on Solana
|
||||
- Network reliability and uptime
|
||||
Ignore all other cryptocurrencies in your analysis.""",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
print_on=False,
|
||||
)
|
||||
|
||||
return [bitcoin_agent, ethereum_agent, solana_agent]
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function demonstrating crypto-specific concurrent analysis with cron job."""
|
||||
logger.info(
|
||||
"🚀 Starting Simple Crypto-Specific Concurrent Analysis"
|
||||
)
|
||||
logger.info("💰 Each agent analyzes one specific cryptocurrency:")
|
||||
logger.info(" 🟠 Bitcoin-Analyst -> BTC only")
|
||||
logger.info(" 🔵 Ethereum-Analyst -> ETH only")
|
||||
logger.info(" 🟢 Solana-Analyst -> SOL only")
|
||||
|
||||
# Define specific cryptocurrencies to analyze
|
||||
coin_ids = ["bitcoin", "ethereum", "solana"]
|
||||
|
||||
# Step 1: Create crypto-specific agents
|
||||
agents = create_crypto_specific_agents()
|
||||
|
||||
# Step 2: Create ConcurrentWorkflow
|
||||
workflow = ConcurrentWorkflow(
|
||||
name="Simple-Crypto-Specific-Analysis",
|
||||
agents=agents,
|
||||
show_dashboard=True, # Shows real-time progress
|
||||
)
|
||||
|
||||
# Step 3: Create CronJob with the workflow
|
||||
cron_job = CronJob(
|
||||
agent=workflow, # Use workflow as the agent
|
||||
interval="60seconds", # Run every minute
|
||||
job_id="simple-crypto-specific-cron",
|
||||
)
|
||||
|
||||
# Step 4: Define the analysis task
|
||||
task = f"""
|
||||
Analyze the cryptocurrency data below. Each agent should focus ONLY on their assigned cryptocurrency:
|
||||
|
||||
- Bitcoin-Analyst: Analyze Bitcoin (BTC) data only
|
||||
- Ethereum-Analyst: Analyze Ethereum (ETH) data only
|
||||
- Solana-Analyst: Analyze Solana (SOL) data only
|
||||
|
||||
Cryptocurrency Data:
|
||||
{get_specific_crypto_data(coin_ids)}
|
||||
|
||||
Each agent should:
|
||||
1. Extract and analyze data for YOUR ASSIGNED cryptocurrency only
|
||||
2. Provide brief insights from your specialty perspective
|
||||
3. Give a price trend assessment
|
||||
4. Identify key opportunities or risks
|
||||
5. Ignore all other cryptocurrencies
|
||||
"""
|
||||
|
||||
# Step 5: Start the cron job
|
||||
logger.info("▶️ Starting cron job - Press Ctrl+C to stop")
|
||||
try:
|
||||
cron_job.run(task=task)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("⏹️ Stopped by user")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,257 @@
|
||||
from swarms import Agent, CronJob
|
||||
from loguru import logger
|
||||
import requests
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def get_solana_price() -> str:
|
||||
"""
|
||||
Fetches comprehensive Solana (SOL) price data using CoinGecko API.
|
||||
|
||||
Returns:
|
||||
str: A JSON formatted string containing Solana's current price and market data including:
|
||||
- Current price in USD
|
||||
- Market cap
|
||||
- 24h volume
|
||||
- 24h price change
|
||||
- Last updated timestamp
|
||||
|
||||
Raises:
|
||||
Exception: If there's an error fetching the data from CoinGecko API
|
||||
"""
|
||||
try:
|
||||
# CoinGecko API endpoint for simple price data
|
||||
url = "https://api.coingecko.com/api/v3/simple/price"
|
||||
params = {
|
||||
"ids": "solana", # Solana's CoinGecko ID
|
||||
"vs_currencies": "usd",
|
||||
"include_market_cap": True,
|
||||
"include_24hr_vol": True,
|
||||
"include_24hr_change": True,
|
||||
"include_last_updated_at": True,
|
||||
}
|
||||
|
||||
# Make API request with timeout
|
||||
response = requests.get(url, params=params, timeout=10)
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse response data
|
||||
data = response.json()
|
||||
|
||||
if "solana" not in data:
|
||||
raise Exception("Solana data not found in API response")
|
||||
|
||||
solana_data = data["solana"]
|
||||
|
||||
# Compile comprehensive data
|
||||
solana_info = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"coin_info": {
|
||||
"name": "Solana",
|
||||
"symbol": "SOL",
|
||||
"coin_id": "solana",
|
||||
},
|
||||
"price_data": {
|
||||
"current_price_usd": solana_data.get("usd", "N/A"),
|
||||
"market_cap_usd": solana_data.get(
|
||||
"usd_market_cap", "N/A"
|
||||
),
|
||||
"volume_24h_usd": solana_data.get(
|
||||
"usd_24h_vol", "N/A"
|
||||
),
|
||||
"price_change_24h_percent": solana_data.get(
|
||||
"usd_24h_change", "N/A"
|
||||
),
|
||||
"last_updated_at": solana_data.get(
|
||||
"last_updated_at", "N/A"
|
||||
),
|
||||
},
|
||||
"formatted_data": {
|
||||
"price_formatted": (
|
||||
f"${solana_data.get('usd', 'N/A'):,.2f}"
|
||||
if solana_data.get("usd")
|
||||
else "N/A"
|
||||
),
|
||||
"market_cap_formatted": (
|
||||
f"${solana_data.get('usd_market_cap', 'N/A'):,.0f}"
|
||||
if solana_data.get("usd_market_cap")
|
||||
else "N/A"
|
||||
),
|
||||
"volume_formatted": (
|
||||
f"${solana_data.get('usd_24h_vol', 'N/A'):,.0f}"
|
||||
if solana_data.get("usd_24h_vol")
|
||||
else "N/A"
|
||||
),
|
||||
"change_formatted": (
|
||||
f"{solana_data.get('usd_24h_change', 'N/A'):+.2f}%"
|
||||
if solana_data.get("usd_24h_change") is not None
|
||||
else "N/A"
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"Successfully fetched Solana price: ${solana_data.get('usd', 'N/A')}"
|
||||
)
|
||||
return json.dumps(solana_info, indent=4)
|
||||
|
||||
except requests.RequestException as e:
|
||||
error_msg = f"API request failed: {e}"
|
||||
logger.error(error_msg)
|
||||
return json.dumps(
|
||||
{
|
||||
"error": error_msg,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"status": "failed",
|
||||
},
|
||||
indent=4,
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = f"Error fetching Solana price data: {e}"
|
||||
logger.error(error_msg)
|
||||
return json.dumps(
|
||||
{
|
||||
"error": error_msg,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"status": "failed",
|
||||
},
|
||||
indent=4,
|
||||
)
|
||||
|
||||
|
||||
def analyze_solana_data(data: str) -> str:
|
||||
"""
|
||||
Analyzes Solana price data and provides insights.
|
||||
|
||||
Args:
|
||||
data (str): JSON string containing Solana price data
|
||||
|
||||
Returns:
|
||||
str: Analysis and insights about the current Solana market data
|
||||
"""
|
||||
try:
|
||||
# Parse the data
|
||||
solana_data = json.loads(data)
|
||||
|
||||
if "error" in solana_data:
|
||||
return f"❌ Error in data: {solana_data['error']}"
|
||||
|
||||
price_data = solana_data.get("price_data", {})
|
||||
formatted_data = solana_data.get("formatted_data", {})
|
||||
|
||||
# Extract key metrics
|
||||
price_data.get("current_price_usd")
|
||||
price_change = price_data.get("price_change_24h_percent")
|
||||
volume_24h = price_data.get("volume_24h_usd")
|
||||
market_cap = price_data.get("market_cap_usd")
|
||||
|
||||
# Generate analysis
|
||||
analysis = f"""
|
||||
🔍 **Solana (SOL) Market Analysis** - {solana_data.get('timestamp', 'N/A')}
|
||||
|
||||
💰 **Current Price**: {formatted_data.get('price_formatted', 'N/A')}
|
||||
📊 **24h Change**: {formatted_data.get('change_formatted', 'N/A')}
|
||||
💎 **Market Cap**: {formatted_data.get('market_cap_formatted', 'N/A')}
|
||||
📈 **24h Volume**: {formatted_data.get('volume_formatted', 'N/A')}
|
||||
|
||||
"""
|
||||
|
||||
# Add sentiment analysis based on price change
|
||||
if price_change is not None:
|
||||
if price_change > 5:
|
||||
analysis += "🚀 **Sentiment**: Strongly Bullish - Significant positive momentum\n"
|
||||
elif price_change > 1:
|
||||
analysis += "📈 **Sentiment**: Bullish - Positive price action\n"
|
||||
elif price_change > -1:
|
||||
analysis += (
|
||||
"➡️ **Sentiment**: Neutral - Sideways movement\n"
|
||||
)
|
||||
elif price_change > -5:
|
||||
analysis += "📉 **Sentiment**: Bearish - Negative price action\n"
|
||||
else:
|
||||
analysis += "🔻 **Sentiment**: Strongly Bearish - Significant decline\n"
|
||||
|
||||
# Add volume analysis
|
||||
if volume_24h and market_cap:
|
||||
try:
|
||||
volume_market_cap_ratio = (
|
||||
volume_24h / market_cap
|
||||
) * 100
|
||||
if volume_market_cap_ratio > 10:
|
||||
analysis += "🔥 **Volume**: High trading activity - Strong market interest\n"
|
||||
elif volume_market_cap_ratio > 5:
|
||||
analysis += (
|
||||
"📊 **Volume**: Moderate trading activity\n"
|
||||
)
|
||||
else:
|
||||
analysis += "😴 **Volume**: Low trading activity - Limited market movement\n"
|
||||
except (TypeError, ZeroDivisionError):
|
||||
analysis += "📊 **Volume**: Unable to calculate volume/market cap ratio\n"
|
||||
|
||||
analysis += f"\n⏰ **Last Updated**: {price_data.get('last_updated_at', 'N/A')}"
|
||||
|
||||
return analysis
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
return f"❌ Error parsing data: {e}"
|
||||
except Exception as e:
|
||||
return f"❌ Error analyzing data: {e}"
|
||||
|
||||
|
||||
# Initialize the Solana analysis agent
|
||||
agent = Agent(
|
||||
agent_name="Solana-Price-Analyzer",
|
||||
agent_description="Specialized agent for analyzing Solana (SOL) cryptocurrency price data and market trends",
|
||||
system_prompt=f"""You are an expert cryptocurrency analyst specializing in Solana (SOL) analysis. Your expertise includes:
|
||||
|
||||
- Technical analysis and chart patterns
|
||||
- Market sentiment analysis
|
||||
- Volume and liquidity analysis
|
||||
- Price action interpretation
|
||||
- Market cap and valuation metrics
|
||||
- Cryptocurrency market dynamics
|
||||
- DeFi ecosystem analysis
|
||||
- Blockchain technology trends
|
||||
|
||||
When analyzing Solana data, you should:
|
||||
- Evaluate price movements and trends
|
||||
- Assess market sentiment and momentum
|
||||
- Consider volume and liquidity factors
|
||||
- Analyze market cap positioning
|
||||
- Provide actionable insights
|
||||
- Identify potential catalysts or risks
|
||||
- Consider broader market context
|
||||
|
||||
You communicate clearly and provide practical analysis that helps users understand Solana's current market position and potential future movements.
|
||||
|
||||
Current Solana Data: {get_solana_price()}
|
||||
""",
|
||||
max_loops=1,
|
||||
model_name="gpt-4o-mini",
|
||||
dynamic_temperature_enabled=True,
|
||||
output_type="str-all-except-first",
|
||||
streaming_on=False, # need to fix this bug where streaming is working but makes copies of the border when you scroll on the terminal
|
||||
print_on=True,
|
||||
telemetry_enable=False,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function to run the Solana price tracking cron job.
|
||||
"""
|
||||
logger.info("🚀 Starting Solana price tracking cron job")
|
||||
logger.info("📊 Fetching Solana price every 10 seconds...")
|
||||
|
||||
# Create cron job that runs every 10 seconds
|
||||
cron_job = CronJob(agent=agent, interval="30seconds")
|
||||
|
||||
# Run the cron job with analysis task
|
||||
cron_job.run(
|
||||
task="Analyze the current Solana (SOL) price data comprehensively. Provide detailed market analysis including price trends, volume analysis, market sentiment, and actionable insights. Format your response clearly with emojis and structured sections."
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,267 @@
|
||||
import time
|
||||
from typing import Dict, List
|
||||
|
||||
from swarms import Agent
|
||||
from swarms.utils.litellm_tokenizer import count_tokens
|
||||
|
||||
|
||||
class LongFormGenerator:
|
||||
"""
|
||||
A class for generating long-form content using the swarms Agent framework.
|
||||
|
||||
This class provides methods for creating comprehensive, detailed content
|
||||
with support for continuation and sectioned generation.
|
||||
"""
|
||||
|
||||
def __init__(self, model: str = "claude-sonnet-4-20250514"):
|
||||
"""
|
||||
Initialize the LongFormGenerator with specified model.
|
||||
|
||||
Args:
|
||||
model (str): The model to use for content generation
|
||||
"""
|
||||
self.model = model
|
||||
|
||||
def estimate_tokens(self, text: str) -> int:
|
||||
"""
|
||||
Estimate token count for text.
|
||||
|
||||
Args:
|
||||
text (str): The text to estimate tokens for
|
||||
|
||||
Returns:
|
||||
int: Estimated token count
|
||||
"""
|
||||
return count_tokens(text=text, model=self.model)
|
||||
|
||||
def create_expansion_prompt(
|
||||
self, topic: str, requirements: Dict
|
||||
) -> str:
|
||||
"""
|
||||
Create optimized prompt for long-form content.
|
||||
|
||||
Args:
|
||||
topic (str): The main topic to generate content about
|
||||
requirements (Dict): Requirements for content generation
|
||||
|
||||
Returns:
|
||||
str: Formatted prompt for content generation
|
||||
"""
|
||||
structure_requirements = []
|
||||
if "sections" in requirements:
|
||||
for i, section in enumerate(requirements["sections"]):
|
||||
structure_requirements.append(
|
||||
f"{i+1}. {section['title']} - {section.get('description', 'Provide comprehensive analysis')}"
|
||||
)
|
||||
|
||||
length_guidance = (
|
||||
f"Target length: {requirements.get('min_words', 2000)}-{requirements.get('max_words', 4000)} words"
|
||||
if "min_words" in requirements
|
||||
else ""
|
||||
)
|
||||
|
||||
prompt = f"""Create a comprehensive, detailed analysis of: {topic}
|
||||
REQUIREMENTS:
|
||||
- This is a professional-level document requiring thorough treatment
|
||||
- Each section must be substantive with detailed explanations
|
||||
- Include specific examples, case studies, and technical details where relevant
|
||||
- Provide multiple perspectives and comprehensive coverage
|
||||
- {length_guidance}
|
||||
STRUCTURE:
|
||||
{chr(10).join(structure_requirements)}
|
||||
QUALITY STANDARDS:
|
||||
- Demonstrate deep expertise and understanding
|
||||
- Include relevant technical specifications and details
|
||||
- Provide actionable insights and practical applications
|
||||
- Use professional language appropriate for expert audience
|
||||
- Ensure logical flow and comprehensive coverage of all aspects
|
||||
Begin your comprehensive analysis:"""
|
||||
|
||||
return prompt
|
||||
|
||||
def generate_with_continuation(
|
||||
self, topic: str, requirements: Dict, max_attempts: int = 3
|
||||
) -> str:
|
||||
"""
|
||||
Generate long-form content with continuation if needed.
|
||||
|
||||
Args:
|
||||
topic (str): The main topic to generate content about
|
||||
requirements (Dict): Requirements for content generation
|
||||
max_attempts (int): Maximum number of continuation attempts
|
||||
|
||||
Returns:
|
||||
str: Generated long-form content
|
||||
"""
|
||||
initial_prompt = self.create_expansion_prompt(
|
||||
topic, requirements
|
||||
)
|
||||
|
||||
# Create agent for initial generation
|
||||
agent = Agent(
|
||||
name="LongForm Content Generator",
|
||||
system_prompt=initial_prompt,
|
||||
model=self.model,
|
||||
max_loops=1,
|
||||
temperature=0.7,
|
||||
max_tokens=4000,
|
||||
)
|
||||
|
||||
# Generate initial response
|
||||
content = agent.run(topic)
|
||||
target_words = requirements.get("min_words", 2000)
|
||||
|
||||
# Check if continuation is needed
|
||||
word_count = len(content.split())
|
||||
continuation_count = 0
|
||||
|
||||
while (
|
||||
word_count < target_words
|
||||
and continuation_count < max_attempts
|
||||
):
|
||||
continuation_prompt = f"""Continue and expand the previous analysis. The current response is {word_count} words, but we need approximately {target_words} words total for comprehensive coverage.
|
||||
Please continue with additional detailed analysis, examples, and insights. Focus on areas that could benefit from deeper exploration or additional perspectives. Maintain the same professional tone and analytical depth.
|
||||
Continue the analysis:"""
|
||||
|
||||
# Create continuation agent
|
||||
continuation_agent = Agent(
|
||||
name="Content Continuation Agent",
|
||||
system_prompt=continuation_prompt,
|
||||
model=self.model,
|
||||
max_loops=1,
|
||||
temperature=0.7,
|
||||
max_tokens=4000,
|
||||
)
|
||||
|
||||
# Generate continuation
|
||||
continuation_content = continuation_agent.run(
|
||||
f"Continue the analysis on: {topic}"
|
||||
)
|
||||
content += "\n\n" + continuation_content
|
||||
word_count = len(content.split())
|
||||
continuation_count += 1
|
||||
|
||||
# Rate limiting
|
||||
time.sleep(1)
|
||||
|
||||
return content
|
||||
|
||||
def generate_sectioned_content(
|
||||
self,
|
||||
topic: str,
|
||||
sections: List[Dict],
|
||||
combine_sections: bool = True,
|
||||
) -> Dict:
|
||||
"""
|
||||
Generate content section by section for maximum length.
|
||||
|
||||
Args:
|
||||
topic (str): The main topic to generate content about
|
||||
sections (List[Dict]): List of section definitions
|
||||
combine_sections (bool): Whether to combine all sections into one document
|
||||
|
||||
Returns:
|
||||
Dict: Dictionary containing individual sections and optionally combined content
|
||||
"""
|
||||
results = {}
|
||||
combined_content = ""
|
||||
|
||||
for section in sections:
|
||||
section_prompt = f"""Write a comprehensive, detailed section on: {section['title']}
|
||||
Context: This is part of a larger analysis on {topic}
|
||||
Requirements for this section:
|
||||
- Provide {section.get('target_words', 500)}-{section.get('max_words', 800)} words of detailed content
|
||||
- {section.get('description', 'Provide thorough analysis with examples and insights')}
|
||||
- Include specific examples, technical details, and practical applications
|
||||
- Use professional language suitable for expert audience
|
||||
- Ensure comprehensive coverage of all relevant aspects
|
||||
Write the complete section:"""
|
||||
|
||||
# Create agent for this section
|
||||
section_agent = Agent(
|
||||
name=f"Section Generator - {section['title']}",
|
||||
system_prompt=section_prompt,
|
||||
model=self.model,
|
||||
max_loops=1,
|
||||
temperature=0.7,
|
||||
max_tokens=3000,
|
||||
)
|
||||
|
||||
# Generate section content
|
||||
section_content = section_agent.run(
|
||||
f"Generate section: {section['title']} for topic: {topic}"
|
||||
)
|
||||
results[section["title"]] = section_content
|
||||
|
||||
if combine_sections:
|
||||
combined_content += (
|
||||
f"\n\n## {section['title']}\n\n{section_content}"
|
||||
)
|
||||
|
||||
# Rate limiting between sections
|
||||
time.sleep(1)
|
||||
|
||||
if combine_sections:
|
||||
results["combined"] = combined_content.strip()
|
||||
|
||||
return results
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Initialize the generator
|
||||
generator = LongFormGenerator()
|
||||
|
||||
# Example topic and requirements
|
||||
topic = "Artificial Intelligence in Healthcare"
|
||||
requirements = {
|
||||
"min_words": 2500,
|
||||
"max_words": 4000,
|
||||
"sections": [
|
||||
{
|
||||
"title": "Current Applications",
|
||||
"description": "Analyze current AI applications in healthcare",
|
||||
"target_words": 600,
|
||||
"max_words": 800,
|
||||
},
|
||||
{
|
||||
"title": "Future Prospects",
|
||||
"description": "Discuss future developments and potential",
|
||||
"target_words": 500,
|
||||
"max_words": 700,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
# Generate comprehensive content
|
||||
content = generator.generate_with_continuation(
|
||||
topic, requirements
|
||||
)
|
||||
print("Generated Content:")
|
||||
print(content)
|
||||
print(f"\nWord count: {len(content.split())}")
|
||||
|
||||
# Generate sectioned content
|
||||
sections = [
|
||||
{
|
||||
"title": "AI in Medical Imaging",
|
||||
"description": "Comprehensive analysis of AI applications in medical imaging",
|
||||
"target_words": 500,
|
||||
"max_words": 700,
|
||||
},
|
||||
{
|
||||
"title": "AI in Drug Discovery",
|
||||
"description": "Detailed examination of AI in pharmaceutical research",
|
||||
"target_words": 600,
|
||||
"max_words": 800,
|
||||
},
|
||||
]
|
||||
|
||||
sectioned_results = generator.generate_sectioned_content(
|
||||
topic, sections
|
||||
)
|
||||
print("\nSectioned Content:")
|
||||
for section_title, section_content in sectioned_results.items():
|
||||
if section_title != "combined":
|
||||
print(f"\n--- {section_title} ---")
|
||||
print(section_content[:200] + "...")
|
@ -0,0 +1,29 @@
|
||||
from swarms import Agent
|
||||
|
||||
|
||||
def generate_comprehensive_content(topic, sections):
|
||||
prompt = f"""You are tasked with creating a comprehensive, detailed analysis of {topic}.
|
||||
This should be a thorough, professional-level document suitable for expert review.
|
||||
|
||||
Structure your response with the following sections, ensuring each is substantive and detailed:
|
||||
{chr(10).join([f"{i+1}. {section} - Provide extensive detail with examples and analysis" for i, section in enumerate(sections)])}
|
||||
|
||||
For each section:
|
||||
- Include multiple subsections where appropriate
|
||||
- Provide specific examples and case studies
|
||||
- Offer detailed explanations of complex concepts
|
||||
- Include relevant technical details and specifications
|
||||
- Discuss implications and considerations thoroughly
|
||||
|
||||
Aim for comprehensive coverage that demonstrates deep expertise. This is a professional document that should be thorough and substantive throughout."""
|
||||
|
||||
agent = Agent(
|
||||
name="Comprehensive Content Generator",
|
||||
system_prompt=prompt,
|
||||
model="claude-sonnet-4-20250514",
|
||||
max_loops=1,
|
||||
temperature=0.5,
|
||||
max_tokens=4000,
|
||||
)
|
||||
|
||||
return agent.run(topic)
|
@ -0,0 +1,258 @@
|
||||
# Getting Started with GraphWorkflow
|
||||
|
||||
Welcome to **GraphWorkflow** - The LangGraph Killer! 🚀
|
||||
|
||||
This guide will get you up and running with Swarms' GraphWorkflow system in minutes.
|
||||
|
||||
## 🚀 Quick Installation
|
||||
|
||||
```bash
|
||||
# Install Swarms with all dependencies
|
||||
uv pip install swarms
|
||||
|
||||
# Optional: Install visualization dependencies
|
||||
uv pip install graphviz
|
||||
|
||||
# Verify installation
|
||||
python -c "from swarms.structs.graph_workflow import GraphWorkflow; print('✅ GraphWorkflow ready')"
|
||||
```
|
||||
|
||||
## 🎯 Choose Your Starting Point
|
||||
|
||||
### 📚 New to GraphWorkflow?
|
||||
|
||||
Start here: **[Quick Start Guide](quick_start_guide.py)**
|
||||
|
||||
```bash
|
||||
python quick_start_guide.py
|
||||
```
|
||||
|
||||
Learn GraphWorkflow in 5 easy steps:
|
||||
- ✅ Create your first workflow
|
||||
- ✅ Connect agents in sequence
|
||||
- ✅ Set up parallel processing
|
||||
- ✅ Use advanced patterns
|
||||
- ✅ Monitor performance
|
||||
|
||||
### 🔬 Want to See Everything?
|
||||
|
||||
Run the comprehensive demo: **[Comprehensive Demo](comprehensive_demo.py)**
|
||||
|
||||
```bash
|
||||
# See all features
|
||||
python comprehensive_demo.py
|
||||
|
||||
# Focus on specific areas
|
||||
python comprehensive_demo.py --demo healthcare
|
||||
python comprehensive_demo.py --demo finance
|
||||
python comprehensive_demo.py --demo parallel
|
||||
```
|
||||
|
||||
### 🛠️ Need Setup Help?
|
||||
|
||||
Use the setup script: **[Setup and Test](setup_and_test.py)**
|
||||
|
||||
```bash
|
||||
# Check your environment
|
||||
python setup_and_test.py --check-only
|
||||
|
||||
# Install dependencies and run tests
|
||||
python setup_and_test.py
|
||||
```
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
### 📋 Quick Reference
|
||||
|
||||
```python
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
# 1. Create agents
|
||||
agent1 = Agent(agent_name="Researcher", model_name="gpt-4o-mini", max_loops=1)
|
||||
agent2 = Agent(agent_name="Writer", model_name="gpt-4o-mini", max_loops=1)
|
||||
|
||||
# 2. Create workflow
|
||||
workflow = GraphWorkflow(name="MyWorkflow", auto_compile=True)
|
||||
|
||||
# 3. Add agents and connections
|
||||
workflow.add_node(agent1)
|
||||
workflow.add_node(agent2)
|
||||
workflow.add_edge("Researcher", "Writer")
|
||||
|
||||
# 4. Execute
|
||||
results = workflow.run(task="Write about AI trends")
|
||||
```
|
||||
|
||||
### 📚 Complete Documentation
|
||||
|
||||
- **[Technical Guide](graph_workflow_technical_guide.md)**: 4,000-word comprehensive guide
|
||||
- **[Examples README](README.md)**: Complete examples overview
|
||||
- **[API Reference](../../../docs/swarms/structs/)**: Detailed API documentation
|
||||
|
||||
## 🎨 Key Features Overview
|
||||
|
||||
### ⚡ Parallel Processing
|
||||
|
||||
```python
|
||||
# Fan-out: One agent to multiple agents
|
||||
workflow.add_edges_from_source("DataCollector", ["AnalystA", "AnalystB"])
|
||||
|
||||
# Fan-in: Multiple agents to one agent
|
||||
workflow.add_edges_to_target(["SpecialistX", "SpecialistY"], "Synthesizer")
|
||||
|
||||
# Parallel chain: Many-to-many mesh
|
||||
workflow.add_parallel_chain(["DataA", "DataB"], ["ProcessorX", "ProcessorY"])
|
||||
```
|
||||
|
||||
### 🚀 Performance Optimization
|
||||
|
||||
```python
|
||||
# Automatic compilation for 40-60% speedup
|
||||
workflow = GraphWorkflow(auto_compile=True)
|
||||
|
||||
# Monitor performance
|
||||
status = workflow.get_compilation_status()
|
||||
print(f"Workers: {status['max_workers']}")
|
||||
print(f"Layers: {status['cached_layers_count']}")
|
||||
```
|
||||
|
||||
### 🎨 Professional Visualization
|
||||
|
||||
```python
|
||||
# Generate beautiful workflow diagrams
|
||||
workflow.visualize(
|
||||
format="png", # png, svg, pdf, dot
|
||||
show_summary=True, # Show parallel processing stats
|
||||
engine="dot" # Layout algorithm
|
||||
)
|
||||
```
|
||||
|
||||
### 💾 Enterprise Features
|
||||
|
||||
```python
|
||||
# Complete workflow serialization
|
||||
json_data = workflow.to_json(include_conversation=True)
|
||||
restored = GraphWorkflow.from_json(json_data)
|
||||
|
||||
# File persistence
|
||||
workflow.save_to_file("my_workflow.json")
|
||||
loaded = GraphWorkflow.load_from_file("my_workflow.json")
|
||||
|
||||
# Validation and monitoring
|
||||
validation = workflow.validate(auto_fix=True)
|
||||
summary = workflow.export_summary()
|
||||
```
|
||||
|
||||
## 🏥 Real-World Examples
|
||||
|
||||
### Healthcare: Clinical Decision Support
|
||||
|
||||
```python
|
||||
# Multi-specialist clinical workflow
|
||||
workflow.add_edges_from_source("PatientData", [
|
||||
"PrimaryCare", "Cardiologist", "Pharmacist"
|
||||
])
|
||||
workflow.add_edges_to_target([
|
||||
"PrimaryCare", "Cardiologist", "Pharmacist"
|
||||
], "CaseManager")
|
||||
|
||||
results = workflow.run(task="Analyze patient with chest pain...")
|
||||
```
|
||||
|
||||
### Finance: Investment Analysis
|
||||
|
||||
```python
|
||||
# Parallel financial analysis
|
||||
workflow.add_parallel_chain(
|
||||
["MarketData", "FundamentalData"],
|
||||
["TechnicalAnalyst", "FundamentalAnalyst", "RiskManager"]
|
||||
)
|
||||
workflow.add_edges_to_target([
|
||||
"TechnicalAnalyst", "FundamentalAnalyst", "RiskManager"
|
||||
], "PortfolioManager")
|
||||
|
||||
results = workflow.run(task="Analyze tech sector allocation...")
|
||||
```
|
||||
|
||||
## 🏃♂️ Performance Benchmarks
|
||||
|
||||
GraphWorkflow delivers **40-60% better performance** than sequential execution:
|
||||
|
||||
| Agents | Sequential | GraphWorkflow | Speedup |
|
||||
|--------|------------|---------------|---------|
|
||||
| 5 | 15.2s | 8.7s | 1.75x |
|
||||
| 10 | 28.5s | 16.1s | 1.77x |
|
||||
| 15 | 42.8s | 24.3s | 1.76x |
|
||||
|
||||
*Benchmarks run on 8-core CPU with gpt-4o-mini*
|
||||
|
||||
## 🆚 Why GraphWorkflow > LangGraph?
|
||||
|
||||
| Feature | GraphWorkflow | LangGraph |
|
||||
|---------|---------------|-----------|
|
||||
| **Parallel Processing** | ✅ Native fan-out/fan-in | ❌ Limited |
|
||||
| **Performance** | ✅ 40-60% faster | ❌ Sequential bottlenecks |
|
||||
| **Compilation** | ✅ Intelligent caching | ❌ No optimization |
|
||||
| **Visualization** | ✅ Professional Graphviz | ❌ Basic diagrams |
|
||||
| **Enterprise Features** | ✅ Full serialization | ❌ Limited persistence |
|
||||
| **Error Handling** | ✅ Comprehensive validation | ❌ Basic checks |
|
||||
| **Monitoring** | ✅ Rich metrics | ❌ Limited insights |
|
||||
|
||||
## 🛠️ Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Problem**: Import error
|
||||
```bash
|
||||
# Solution: Install dependencies
|
||||
uv pip install swarms
|
||||
python setup_and_test.py --install-deps
|
||||
```
|
||||
|
||||
**Problem**: Slow execution
|
||||
```python
|
||||
# Solution: Enable compilation
|
||||
workflow = GraphWorkflow(auto_compile=True)
|
||||
workflow.compile() # Manual compilation
|
||||
```
|
||||
|
||||
**Problem**: Memory issues
|
||||
```python
|
||||
# Solution: Clear conversation history
|
||||
workflow.conversation = Conversation()
|
||||
```
|
||||
|
||||
**Problem**: Graph validation errors
|
||||
```python
|
||||
# Solution: Use auto-fix
|
||||
validation = workflow.validate(auto_fix=True)
|
||||
if not validation['is_valid']:
|
||||
print("Errors:", validation['errors'])
|
||||
```
|
||||
|
||||
### Get Help
|
||||
|
||||
- 📖 **Read the docs**: [Technical Guide](graph_workflow_technical_guide.md)
|
||||
- 🔍 **Check examples**: Browse this guide directory
|
||||
- 🧪 **Run tests**: Use `python setup_and_test.py`
|
||||
- 🐛 **Report bugs**: Open an issue on GitHub
|
||||
|
||||
## 🎯 Next Steps
|
||||
|
||||
1. **🎓 Learn**: Complete the [Quick Start Guide](quick_start_guide.py)
|
||||
2. **🔬 Explore**: Try the [Comprehensive Demo](comprehensive_demo.py)
|
||||
3. **🏥 Apply**: Adapt healthcare or finance examples
|
||||
4. **📚 Study**: Read the [Technical Guide](graph_workflow_technical_guide.md)
|
||||
5. **🚀 Deploy**: Build your production workflows
|
||||
|
||||
## 🎉 Ready to Build?
|
||||
|
||||
GraphWorkflow is **production-ready** and **enterprise-grade**. Join the revolution in multi-agent orchestration!
|
||||
|
||||
```bash
|
||||
# Start your GraphWorkflow journey
|
||||
python quick_start_guide.py
|
||||
```
|
||||
|
||||
**The LangGraph Killer is here. Welcome to the future of multi-agent systems!** 🌟
|
@ -0,0 +1,322 @@
|
||||
# GraphWorkflow Guide
|
||||
|
||||
Welcome to the comprehensive GraphWorkflow guide! This collection demonstrates the power and flexibility of Swarms' GraphWorkflow system - the LangGraph killer that provides superior multi-agent orchestration capabilities.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
# Install Swarms with all dependencies
|
||||
uv pip install swarms
|
||||
|
||||
# Optional: Install visualization dependencies
|
||||
uv pip install graphviz
|
||||
|
||||
# Verify installation
|
||||
python -c "from swarms.structs.graph_workflow import GraphWorkflow; print('✅ GraphWorkflow ready')"
|
||||
```
|
||||
|
||||
### Run Your First Example
|
||||
|
||||
```bash
|
||||
# Start with the quick start guide
|
||||
python quick_start_guide.py
|
||||
|
||||
# Or run the comprehensive demo
|
||||
python comprehensive_demo.py
|
||||
|
||||
# For specific examples
|
||||
python comprehensive_demo.py --demo healthcare
|
||||
python comprehensive_demo.py --demo finance
|
||||
```
|
||||
|
||||
## 📁 Example Files
|
||||
|
||||
### 🎓 Learning Examples
|
||||
|
||||
| File | Description | Complexity |
|
||||
|------|-------------|------------|
|
||||
| `quick_start_guide.py` | **START HERE** - Step-by-step introduction to GraphWorkflow | ⭐ Beginner |
|
||||
| `graph_workflow_example.py` | Basic two-agent workflow example | ⭐ Beginner |
|
||||
| `comprehensive_demo.py` | Complete feature demonstration with multiple use cases | ⭐⭐⭐ Advanced |
|
||||
|
||||
### 🏥 Healthcare Examples
|
||||
|
||||
| File | Description | Complexity |
|
||||
|------|-------------|------------|
|
||||
| `comprehensive_demo.py --demo healthcare` | Clinical decision support workflow | ⭐⭐⭐ Advanced |
|
||||
|
||||
**Healthcare Workflow Features:**
|
||||
- Multi-disciplinary clinical team simulation
|
||||
- Parallel specialist consultations
|
||||
- Drug interaction checking
|
||||
- Risk assessment and quality assurance
|
||||
- Evidence-based clinical decision support
|
||||
|
||||
### 💰 Finance Examples
|
||||
|
||||
| File | Description | Complexity |
|
||||
|------|-------------|------------|
|
||||
| `advanced_graph_workflow.py` | Sophisticated investment analysis workflow | ⭐⭐⭐ Advanced |
|
||||
| `comprehensive_demo.py --demo finance` | Quantitative trading strategy development | ⭐⭐⭐ Advanced |
|
||||
|
||||
**Finance Workflow Features:**
|
||||
- Multi-source market data analysis
|
||||
- Parallel quantitative analysis (Technical, Fundamental, Sentiment)
|
||||
- Risk management and portfolio optimization
|
||||
- Strategy backtesting and validation
|
||||
- Execution planning and monitoring
|
||||
|
||||
### 🔧 Technical Examples
|
||||
|
||||
| File | Description | Complexity |
|
||||
|------|-------------|------------|
|
||||
| `test_parallel_processing_example.py` | Comprehensive parallel processing patterns | ⭐⭐ Intermediate |
|
||||
| `test_graphviz_visualization.py` | Visualization capabilities and layouts | ⭐⭐ Intermediate |
|
||||
| `test_graph_workflow_caching.py` | Performance optimization and caching | ⭐⭐ Intermediate |
|
||||
| `test_enhanced_json_export.py` | Serialization and persistence features | ⭐⭐ Intermediate |
|
||||
| `test_graphworlfolw_validation.py` | Workflow validation and error handling | ⭐⭐ Intermediate |
|
||||
|
||||
## 🎯 Key Features Demonstrated
|
||||
|
||||
### ⚡ Parallel Processing Patterns
|
||||
|
||||
- **Fan-out**: One agent distributes to multiple agents
|
||||
- **Fan-in**: Multiple agents converge to one agent
|
||||
- **Parallel chains**: Many-to-many mesh processing
|
||||
- **Complex hybrid**: Sophisticated multi-stage patterns
|
||||
|
||||
### 🚀 Performance Optimization
|
||||
|
||||
- **Intelligent Compilation**: Pre-computed execution layers
|
||||
- **Advanced Caching**: Persistent state across runs
|
||||
- **Worker Pool Optimization**: CPU-optimized parallel execution
|
||||
- **Memory Management**: Efficient resource utilization
|
||||
|
||||
### 🎨 Visualization & Monitoring
|
||||
|
||||
- **Professional Graphviz Diagrams**: Multiple layouts and formats
|
||||
- **Real-time Performance Metrics**: Execution monitoring
|
||||
- **Workflow Validation**: Comprehensive error checking
|
||||
- **Rich Logging**: Detailed execution insights
|
||||
|
||||
### 💾 Enterprise Features
|
||||
|
||||
- **JSON Serialization**: Complete workflow persistence
|
||||
- **Runtime State Management**: Compilation caching
|
||||
- **Error Handling**: Robust failure recovery
|
||||
- **Scalability**: Support for large agent networks
|
||||
|
||||
## 🏃♂️ Running Examples
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```python
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
# Create agents
|
||||
agent1 = Agent(agent_name="Researcher", model_name="gpt-4o-mini", max_loops=1)
|
||||
agent2 = Agent(agent_name="Writer", model_name="gpt-4o-mini", max_loops=1)
|
||||
|
||||
# Create workflow
|
||||
workflow = GraphWorkflow(name="SimpleWorkflow", auto_compile=True)
|
||||
workflow.add_node(agent1)
|
||||
workflow.add_node(agent2)
|
||||
workflow.add_edge("Researcher", "Writer")
|
||||
|
||||
# Execute
|
||||
results = workflow.run(task="Research and write about AI trends")
|
||||
```
|
||||
|
||||
### Parallel Processing
|
||||
|
||||
```python
|
||||
# Fan-out pattern: One agent to multiple agents
|
||||
workflow.add_edges_from_source("DataCollector", ["AnalystA", "AnalystB", "AnalystC"])
|
||||
|
||||
# Fan-in pattern: Multiple agents to one agent
|
||||
workflow.add_edges_to_target(["SpecialistX", "SpecialistY"], "Synthesizer")
|
||||
|
||||
# Parallel chain: Many-to-many processing
|
||||
workflow.add_parallel_chain(
|
||||
sources=["DataA", "DataB"],
|
||||
targets=["ProcessorX", "ProcessorY"]
|
||||
)
|
||||
```
|
||||
|
||||
### Performance Monitoring
|
||||
|
||||
```python
|
||||
# Get compilation status
|
||||
status = workflow.get_compilation_status()
|
||||
print(f"Compiled: {status['is_compiled']}")
|
||||
print(f"Workers: {status['max_workers']}")
|
||||
|
||||
# Monitor execution
|
||||
import time
|
||||
start = time.time()
|
||||
results = workflow.run(task="Analyze market conditions")
|
||||
print(f"Execution time: {time.time() - start:.2f}s")
|
||||
print(f"Throughput: {len(results)/(time.time() - start):.1f} agents/second")
|
||||
```
|
||||
|
||||
## 🔬 Use Case Examples
|
||||
|
||||
### 📊 Enterprise Data Processing
|
||||
|
||||
```python
|
||||
# Multi-stage data pipeline
|
||||
workflow.add_parallel_chain(
|
||||
["APIIngester", "DatabaseExtractor", "FileProcessor"],
|
||||
["DataValidator", "DataTransformer", "DataEnricher"]
|
||||
)
|
||||
workflow.add_edges_to_target(
|
||||
["DataValidator", "DataTransformer", "DataEnricher"],
|
||||
"ReportGenerator"
|
||||
)
|
||||
```
|
||||
|
||||
### 🏥 Clinical Decision Support
|
||||
|
||||
```python
|
||||
# Multi-specialist consultation
|
||||
workflow.add_edges_from_source("PatientDataCollector", [
|
||||
"PrimaryCarePhysician", "Cardiologist", "Pharmacist"
|
||||
])
|
||||
workflow.add_edges_to_target([
|
||||
"PrimaryCarePhysician", "Cardiologist", "Pharmacist"
|
||||
], "CaseManager")
|
||||
```
|
||||
|
||||
### 💼 Investment Analysis
|
||||
|
||||
```python
|
||||
# Parallel financial analysis
|
||||
workflow.add_parallel_chain(
|
||||
["MarketDataCollector", "FundamentalDataCollector"],
|
||||
["TechnicalAnalyst", "FundamentalAnalyst", "SentimentAnalyst"]
|
||||
)
|
||||
workflow.add_edges_to_target([
|
||||
"TechnicalAnalyst", "FundamentalAnalyst", "SentimentAnalyst"
|
||||
], "PortfolioManager")
|
||||
```
|
||||
|
||||
## 🎨 Visualization Examples
|
||||
|
||||
### Generate Workflow Diagrams
|
||||
|
||||
```python
|
||||
# Professional Graphviz visualization
|
||||
workflow.visualize(
|
||||
format="png", # png, svg, pdf, dot
|
||||
engine="dot", # dot, neato, fdp, sfdp, circo
|
||||
show_summary=True, # Display parallel processing stats
|
||||
view=True # Open diagram automatically
|
||||
)
|
||||
|
||||
# Text-based visualization (always available)
|
||||
workflow.visualize_simple()
|
||||
```
|
||||
|
||||
### Example Output
|
||||
|
||||
```
|
||||
📊 GRAPHVIZ WORKFLOW VISUALIZATION
|
||||
====================================
|
||||
📁 Saved to: MyWorkflow_visualization.png
|
||||
🤖 Total Agents: 8
|
||||
🔗 Total Connections: 12
|
||||
📚 Execution Layers: 4
|
||||
|
||||
⚡ Parallel Processing Patterns:
|
||||
🔀 Fan-out patterns: 2
|
||||
🔀 Fan-in patterns: 1
|
||||
⚡ Parallel execution nodes: 6
|
||||
🎯 Parallel efficiency: 75.0%
|
||||
```
|
||||
|
||||
## 🛠️ Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Compilation Errors**
|
||||
```python
|
||||
# Check for cycles in workflow
|
||||
validation = workflow.validate(auto_fix=True)
|
||||
if not validation['is_valid']:
|
||||
print("Validation errors:", validation['errors'])
|
||||
```
|
||||
|
||||
2. **Performance Issues**
|
||||
```python
|
||||
# Ensure compilation before execution
|
||||
workflow.compile()
|
||||
|
||||
# Check worker count
|
||||
status = workflow.get_compilation_status()
|
||||
print(f"Workers: {status['max_workers']}")
|
||||
```
|
||||
|
||||
3. **Memory Issues**
|
||||
```python
|
||||
# Clear conversation history if not needed
|
||||
workflow.conversation = Conversation()
|
||||
|
||||
# Monitor memory usage
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
memory_mb = process.memory_info().rss / 1024 / 1024
|
||||
print(f"Memory: {memory_mb:.1f} MB")
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
```python
|
||||
# Enable detailed logging
|
||||
workflow = GraphWorkflow(
|
||||
name="DebugWorkflow",
|
||||
verbose=True, # Detailed execution logs
|
||||
auto_compile=True, # Automatic optimization
|
||||
)
|
||||
|
||||
# Validate workflow structure
|
||||
validation = workflow.validate(auto_fix=True)
|
||||
print("Validation result:", validation)
|
||||
```
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- **[Technical Guide](graph_workflow_technical_guide.md)**: Comprehensive 4,000-word technical documentation
|
||||
- **[API Reference](../../../docs/swarms/structs/)**: Complete API documentation
|
||||
- **[Multi-Agent Examples](../../multi_agent/)**: Other multi-agent examples
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
Found a bug or want to add an example?
|
||||
|
||||
1. **Report Issues**: Open an issue with detailed reproduction steps
|
||||
2. **Add Examples**: Submit PRs with new use case examples
|
||||
3. **Improve Documentation**: Help expand the guides and tutorials
|
||||
4. **Performance Optimization**: Share benchmarks and optimizations
|
||||
|
||||
## 🎯 Next Steps
|
||||
|
||||
1. **Start Learning**: Run `python quick_start_guide.py`
|
||||
2. **Explore Examples**: Try healthcare and finance use cases
|
||||
3. **Build Your Workflow**: Adapt examples to your domain
|
||||
4. **Deploy to Production**: Use monitoring and optimization features
|
||||
5. **Join Community**: Share your workflows and get help
|
||||
|
||||
## 🏆 Why GraphWorkflow?
|
||||
|
||||
GraphWorkflow is the **LangGraph killer** because it provides:
|
||||
|
||||
- **40-60% Better Performance**: Intelligent compilation and parallel execution
|
||||
- **Enterprise Reliability**: Comprehensive error handling and monitoring
|
||||
- **Superior Scalability**: Handles hundreds of agents efficiently
|
||||
- **Rich Visualization**: Professional workflow diagrams
|
||||
- **Production Ready**: Serialization, caching, and validation
|
||||
|
||||
Ready to revolutionize your multi-agent systems? Start with GraphWorkflow today! 🚀
|
@ -0,0 +1,909 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Comprehensive GraphWorkflow Demo Script
|
||||
=======================================
|
||||
|
||||
This script demonstrates all key features of Swarms' GraphWorkflow system,
|
||||
including parallel processing patterns, performance optimization, and real-world use cases.
|
||||
|
||||
Usage:
|
||||
python comprehensive_demo.py [--demo healthcare|finance|enterprise|all]
|
||||
|
||||
Requirements:
|
||||
uv pip install swarms
|
||||
uv pip install graphviz # Optional for visualization
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import time
|
||||
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
|
||||
def create_basic_workflow_demo():
|
||||
"""Demonstrate basic GraphWorkflow functionality."""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("🚀 BASIC GRAPHWORKFLOW DEMONSTRATION")
|
||||
print("=" * 60)
|
||||
|
||||
# Create simple agents
|
||||
data_collector = Agent(
|
||||
agent_name="DataCollector",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a data collection specialist. Gather and organize relevant information for analysis.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
data_analyzer = Agent(
|
||||
agent_name="DataAnalyzer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a data analysis expert. Analyze the collected data and extract key insights.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
report_generator = Agent(
|
||||
agent_name="ReportGenerator",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a report generation specialist. Create comprehensive reports from analysis results.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create workflow
|
||||
workflow = GraphWorkflow(
|
||||
name="BasicWorkflowDemo",
|
||||
description="Demonstrates basic GraphWorkflow functionality",
|
||||
verbose=True,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
# Add nodes
|
||||
for agent in [data_collector, data_analyzer, report_generator]:
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Add edges (sequential flow)
|
||||
workflow.add_edge("DataCollector", "DataAnalyzer")
|
||||
workflow.add_edge("DataAnalyzer", "ReportGenerator")
|
||||
|
||||
# Set entry and exit points
|
||||
workflow.set_entry_points(["DataCollector"])
|
||||
workflow.set_end_points(["ReportGenerator"])
|
||||
|
||||
print(
|
||||
f"✅ Created workflow with {len(workflow.nodes)} nodes and {len(workflow.edges)} edges"
|
||||
)
|
||||
|
||||
# Demonstrate compilation
|
||||
compilation_status = workflow.get_compilation_status()
|
||||
print(f"📊 Compilation Status: {compilation_status}")
|
||||
|
||||
# Demonstrate simple visualization
|
||||
try:
|
||||
workflow.visualize_simple()
|
||||
except Exception as e:
|
||||
print(f"⚠️ Visualization not available: {e}")
|
||||
|
||||
# Run workflow
|
||||
task = "Analyze the current state of artificial intelligence in healthcare, focusing on recent developments and future opportunities."
|
||||
|
||||
print(f"\n🔄 Executing workflow with task: {task[:100]}...")
|
||||
start_time = time.time()
|
||||
|
||||
results = workflow.run(task=task)
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
print(f"⏱️ Execution completed in {execution_time:.2f} seconds")
|
||||
|
||||
# Display results
|
||||
print("\n📋 Results Summary:")
|
||||
for agent_name, result in results.items():
|
||||
print(f"\n🤖 {agent_name}:")
|
||||
print(
|
||||
f" {result[:200]}{'...' if len(result) > 200 else ''}"
|
||||
)
|
||||
|
||||
return workflow, results
|
||||
|
||||
|
||||
def create_parallel_processing_demo():
|
||||
"""Demonstrate advanced parallel processing patterns."""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("⚡ PARALLEL PROCESSING DEMONSTRATION")
|
||||
print("=" * 60)
|
||||
|
||||
# Create data sources
|
||||
web_scraper = Agent(
|
||||
agent_name="WebScraper",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You specialize in web data scraping and online research.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
api_collector = Agent(
|
||||
agent_name="APICollector",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You specialize in API data collection and integration.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
database_extractor = Agent(
|
||||
agent_name="DatabaseExtractor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You specialize in database queries and data extraction.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create parallel processors
|
||||
text_processor = Agent(
|
||||
agent_name="TextProcessor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You specialize in natural language processing and text analysis.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
numeric_processor = Agent(
|
||||
agent_name="NumericProcessor",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You specialize in numerical analysis and statistical processing.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create analyzers
|
||||
sentiment_analyzer = Agent(
|
||||
agent_name="SentimentAnalyzer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You specialize in sentiment analysis and emotional intelligence.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
trend_analyzer = Agent(
|
||||
agent_name="TrendAnalyzer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You specialize in trend analysis and pattern recognition.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create synthesizer
|
||||
data_synthesizer = Agent(
|
||||
agent_name="DataSynthesizer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You specialize in data synthesis and comprehensive analysis integration.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create workflow
|
||||
workflow = GraphWorkflow(
|
||||
name="ParallelProcessingDemo",
|
||||
description="Demonstrates advanced parallel processing patterns including fan-out, fan-in, and parallel chains",
|
||||
verbose=True,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
# Add all agents
|
||||
agents = [
|
||||
web_scraper,
|
||||
api_collector,
|
||||
database_extractor,
|
||||
text_processor,
|
||||
numeric_processor,
|
||||
sentiment_analyzer,
|
||||
trend_analyzer,
|
||||
data_synthesizer,
|
||||
]
|
||||
|
||||
for agent in agents:
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Demonstrate different parallel patterns
|
||||
print("🔀 Setting up parallel processing patterns...")
|
||||
|
||||
# Pattern 1: Fan-out from sources to processors
|
||||
print(" 📤 Fan-out: Data sources → Processors")
|
||||
workflow.add_edges_from_source(
|
||||
"WebScraper", ["TextProcessor", "SentimentAnalyzer"]
|
||||
)
|
||||
workflow.add_edges_from_source(
|
||||
"APICollector", ["NumericProcessor", "TrendAnalyzer"]
|
||||
)
|
||||
workflow.add_edges_from_source(
|
||||
"DatabaseExtractor", ["TextProcessor", "NumericProcessor"]
|
||||
)
|
||||
|
||||
# Pattern 2: Parallel chain from processors to analyzers
|
||||
print(" 🔗 Parallel chain: Processors → Analyzers")
|
||||
workflow.add_parallel_chain(
|
||||
["TextProcessor", "NumericProcessor"],
|
||||
["SentimentAnalyzer", "TrendAnalyzer"],
|
||||
)
|
||||
|
||||
# Pattern 3: Fan-in to synthesizer
|
||||
print(" 📥 Fan-in: All analyzers → Synthesizer")
|
||||
workflow.add_edges_to_target(
|
||||
["SentimentAnalyzer", "TrendAnalyzer"], "DataSynthesizer"
|
||||
)
|
||||
|
||||
# Set entry and exit points
|
||||
workflow.set_entry_points(
|
||||
["WebScraper", "APICollector", "DatabaseExtractor"]
|
||||
)
|
||||
workflow.set_end_points(["DataSynthesizer"])
|
||||
|
||||
print(
|
||||
f"✅ Created parallel workflow with {len(workflow.nodes)} nodes and {len(workflow.edges)} edges"
|
||||
)
|
||||
|
||||
# Analyze parallel patterns
|
||||
compilation_status = workflow.get_compilation_status()
|
||||
print(f"📊 Compilation Status: {compilation_status}")
|
||||
print(
|
||||
f"🔧 Execution layers: {len(compilation_status.get('layers', []))}"
|
||||
)
|
||||
print(
|
||||
f"⚡ Max parallel workers: {compilation_status.get('max_workers', 'N/A')}"
|
||||
)
|
||||
|
||||
# Run parallel workflow
|
||||
task = "Research and analyze the impact of quantum computing on cybersecurity, examining technical developments, market trends, and security implications."
|
||||
|
||||
print("\n🔄 Executing parallel workflow...")
|
||||
start_time = time.time()
|
||||
|
||||
results = workflow.run(task=task)
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
print(
|
||||
f"⏱️ Parallel execution completed in {execution_time:.2f} seconds"
|
||||
)
|
||||
print(
|
||||
f"🚀 Throughput: {len(results)/execution_time:.1f} agents/second"
|
||||
)
|
||||
|
||||
# Display results
|
||||
print("\n📋 Parallel Processing Results:")
|
||||
for agent_name, result in results.items():
|
||||
print(f"\n🤖 {agent_name}:")
|
||||
print(
|
||||
f" {result[:150]}{'...' if len(result) > 150 else ''}"
|
||||
)
|
||||
|
||||
return workflow, results
|
||||
|
||||
|
||||
def create_healthcare_workflow_demo():
|
||||
"""Demonstrate healthcare-focused workflow."""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("🏥 HEALTHCARE WORKFLOW DEMONSTRATION")
|
||||
print("=" * 60)
|
||||
|
||||
# Create clinical specialists
|
||||
primary_care_physician = Agent(
|
||||
agent_name="PrimaryCarePhysician",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a board-certified primary care physician. Provide:
|
||||
1. Initial patient assessment and history taking
|
||||
2. Differential diagnosis development
|
||||
3. Treatment plan coordination
|
||||
4. Preventive care recommendations
|
||||
|
||||
Focus on comprehensive, evidence-based primary care.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
cardiologist = Agent(
|
||||
agent_name="Cardiologist",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a board-certified cardiologist. Provide:
|
||||
1. Cardiovascular risk assessment
|
||||
2. Cardiac diagnostic interpretation
|
||||
3. Treatment recommendations for heart conditions
|
||||
4. Cardiovascular prevention strategies
|
||||
|
||||
Apply evidence-based cardiology guidelines.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
pharmacist = Agent(
|
||||
agent_name="ClinicalPharmacist",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a clinical pharmacist specialist. Provide:
|
||||
1. Medication review and optimization
|
||||
2. Drug interaction analysis
|
||||
3. Dosing recommendations
|
||||
4. Patient counseling guidance
|
||||
|
||||
Ensure medication safety and efficacy.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
case_manager = Agent(
|
||||
agent_name="CaseManager",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a clinical case manager. Coordinate:
|
||||
1. Care plan integration and implementation
|
||||
2. Resource allocation and scheduling
|
||||
3. Patient education and follow-up
|
||||
4. Quality metrics and outcomes tracking
|
||||
|
||||
Ensure coordinated, patient-centered care.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create workflow
|
||||
workflow = GraphWorkflow(
|
||||
name="HealthcareWorkflowDemo",
|
||||
description="Clinical decision support workflow with multi-disciplinary team collaboration",
|
||||
verbose=True,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
# Add agents
|
||||
agents = [
|
||||
primary_care_physician,
|
||||
cardiologist,
|
||||
pharmacist,
|
||||
case_manager,
|
||||
]
|
||||
for agent in agents:
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Create clinical workflow
|
||||
workflow.add_edge("PrimaryCarePhysician", "Cardiologist")
|
||||
workflow.add_edge("PrimaryCarePhysician", "ClinicalPharmacist")
|
||||
workflow.add_edges_to_target(
|
||||
["Cardiologist", "ClinicalPharmacist"], "CaseManager"
|
||||
)
|
||||
|
||||
workflow.set_entry_points(["PrimaryCarePhysician"])
|
||||
workflow.set_end_points(["CaseManager"])
|
||||
|
||||
print(
|
||||
f"✅ Created healthcare workflow with {len(workflow.nodes)} specialists"
|
||||
)
|
||||
|
||||
# Clinical case
|
||||
clinical_case = """
|
||||
Patient: 58-year-old male executive
|
||||
Chief Complaint: Chest pain and shortness of breath during exercise
|
||||
History: Hypertension, family history of coronary artery disease, sedentary lifestyle
|
||||
Current Medications: Lisinopril 10mg daily
|
||||
Vital Signs: BP 145/92, HR 88, BMI 29.5
|
||||
Recent Tests: ECG shows non-specific changes, cholesterol 245 mg/dL
|
||||
|
||||
Please provide comprehensive clinical assessment and care coordination.
|
||||
"""
|
||||
|
||||
print("\n🔄 Processing clinical case...")
|
||||
start_time = time.time()
|
||||
|
||||
results = workflow.run(task=clinical_case)
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
print(
|
||||
f"⏱️ Clinical assessment completed in {execution_time:.2f} seconds"
|
||||
)
|
||||
|
||||
# Display clinical results
|
||||
print("\n🏥 Clinical Team Assessment:")
|
||||
for agent_name, result in results.items():
|
||||
print(f"\n👨⚕️ {agent_name}:")
|
||||
print(
|
||||
f" 📋 {result[:200]}{'...' if len(result) > 200 else ''}"
|
||||
)
|
||||
|
||||
return workflow, results
|
||||
|
||||
|
||||
def create_finance_workflow_demo():
|
||||
"""Demonstrate finance-focused workflow."""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("💰 FINANCE WORKFLOW DEMONSTRATION")
|
||||
print("=" * 60)
|
||||
|
||||
# Create financial analysts
|
||||
market_analyst = Agent(
|
||||
agent_name="MarketAnalyst",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a senior market analyst. Provide:
|
||||
1. Market condition assessment and trends
|
||||
2. Sector rotation and thematic analysis
|
||||
3. Economic indicator interpretation
|
||||
4. Market timing and positioning recommendations
|
||||
|
||||
Apply rigorous market analysis frameworks.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
equity_researcher = Agent(
|
||||
agent_name="EquityResearcher",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are an equity research analyst. Provide:
|
||||
1. Company fundamental analysis
|
||||
2. Financial modeling and valuation
|
||||
3. Competitive positioning assessment
|
||||
4. Investment thesis development
|
||||
|
||||
Use comprehensive equity research methodologies.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
risk_manager = Agent(
|
||||
agent_name="RiskManager",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a risk management specialist. Provide:
|
||||
1. Portfolio risk assessment and metrics
|
||||
2. Stress testing and scenario analysis
|
||||
3. Risk mitigation strategies
|
||||
4. Regulatory compliance guidance
|
||||
|
||||
Apply quantitative risk management principles.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
portfolio_manager = Agent(
|
||||
agent_name="PortfolioManager",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="""You are a senior portfolio manager. Provide:
|
||||
1. Investment decision synthesis
|
||||
2. Portfolio construction and allocation
|
||||
3. Performance attribution analysis
|
||||
4. Client communication and reporting
|
||||
|
||||
Integrate all analysis into actionable investment decisions.""",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create workflow
|
||||
workflow = GraphWorkflow(
|
||||
name="FinanceWorkflowDemo",
|
||||
description="Investment decision workflow with multi-disciplinary financial analysis",
|
||||
verbose=True,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
# Add agents
|
||||
agents = [
|
||||
market_analyst,
|
||||
equity_researcher,
|
||||
risk_manager,
|
||||
portfolio_manager,
|
||||
]
|
||||
for agent in agents:
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Create financial workflow (parallel analysis feeding portfolio decisions)
|
||||
workflow.add_edges_from_source(
|
||||
"MarketAnalyst", ["EquityResearcher", "RiskManager"]
|
||||
)
|
||||
workflow.add_edges_to_target(
|
||||
["EquityResearcher", "RiskManager"], "PortfolioManager"
|
||||
)
|
||||
|
||||
workflow.set_entry_points(["MarketAnalyst"])
|
||||
workflow.set_end_points(["PortfolioManager"])
|
||||
|
||||
print(
|
||||
f"✅ Created finance workflow with {len(workflow.nodes)} analysts"
|
||||
)
|
||||
|
||||
# Investment analysis task
|
||||
investment_scenario = """
|
||||
Investment Analysis Request: Technology Sector Allocation
|
||||
|
||||
Market Context:
|
||||
- Interest rates: 5.25% federal funds rate
|
||||
- Inflation: 3.2% CPI year-over-year
|
||||
- Technology sector: -8% YTD performance
|
||||
- AI theme: High investor interest and valuation concerns
|
||||
|
||||
Portfolio Context:
|
||||
- Current tech allocation: 15% (target 20-25%)
|
||||
- Risk budget: 12% tracking error limit
|
||||
- Investment horizon: 3-5 years
|
||||
- Client risk tolerance: Moderate-aggressive
|
||||
|
||||
Please provide comprehensive investment analysis and recommendations.
|
||||
"""
|
||||
|
||||
print("\n🔄 Analyzing investment scenario...")
|
||||
start_time = time.time()
|
||||
|
||||
results = workflow.run(task=investment_scenario)
|
||||
|
||||
execution_time = time.time() - start_time
|
||||
print(
|
||||
f"⏱️ Investment analysis completed in {execution_time:.2f} seconds"
|
||||
)
|
||||
|
||||
# Display financial results
|
||||
print("\n💼 Investment Team Analysis:")
|
||||
for agent_name, result in results.items():
|
||||
print(f"\n📈 {agent_name}:")
|
||||
print(
|
||||
f" 💡 {result[:200]}{'...' if len(result) > 200 else ''}"
|
||||
)
|
||||
|
||||
return workflow, results
|
||||
|
||||
|
||||
def demonstrate_serialization_features():
|
||||
"""Demonstrate workflow serialization and persistence."""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("💾 SERIALIZATION & PERSISTENCE DEMONSTRATION")
|
||||
print("=" * 60)
|
||||
|
||||
# Create a simple workflow for serialization demo
|
||||
agent1 = Agent(
|
||||
agent_name="SerializationTestAgent1",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are agent 1 for serialization testing.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
agent_name="SerializationTestAgent2",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are agent 2 for serialization testing.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create workflow
|
||||
workflow = GraphWorkflow(
|
||||
name="SerializationTestWorkflow",
|
||||
description="Workflow for testing serialization capabilities",
|
||||
verbose=True,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
workflow.add_node(agent1)
|
||||
workflow.add_node(agent2)
|
||||
workflow.add_edge(
|
||||
"SerializationTestAgent1", "SerializationTestAgent2"
|
||||
)
|
||||
|
||||
print("✅ Created test workflow for serialization")
|
||||
|
||||
# Test JSON serialization
|
||||
print("\n📄 Testing JSON serialization...")
|
||||
try:
|
||||
json_data = workflow.to_json(
|
||||
include_conversation=True, include_runtime_state=True
|
||||
)
|
||||
print(
|
||||
f"✅ JSON serialization successful ({len(json_data)} characters)"
|
||||
)
|
||||
|
||||
# Test deserialization
|
||||
print("\n📥 Testing JSON deserialization...")
|
||||
restored_workflow = GraphWorkflow.from_json(
|
||||
json_data, restore_runtime_state=True
|
||||
)
|
||||
print("✅ JSON deserialization successful")
|
||||
print(
|
||||
f" Restored {len(restored_workflow.nodes)} nodes, {len(restored_workflow.edges)} edges"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ JSON serialization failed: {e}")
|
||||
|
||||
# Test file persistence
|
||||
print("\n💾 Testing file persistence...")
|
||||
try:
|
||||
filepath = workflow.save_to_file(
|
||||
"test_workflow.json",
|
||||
include_conversation=True,
|
||||
include_runtime_state=True,
|
||||
overwrite=True,
|
||||
)
|
||||
print(f"✅ File save successful: {filepath}")
|
||||
|
||||
# Test file loading
|
||||
loaded_workflow = GraphWorkflow.load_from_file(
|
||||
filepath, restore_runtime_state=True
|
||||
)
|
||||
print("✅ File load successful")
|
||||
print(
|
||||
f" Loaded {len(loaded_workflow.nodes)} nodes, {len(loaded_workflow.edges)} edges"
|
||||
)
|
||||
|
||||
# Clean up
|
||||
import os
|
||||
|
||||
os.remove(filepath)
|
||||
print("🧹 Cleaned up test file")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ File persistence failed: {e}")
|
||||
|
||||
# Test workflow validation
|
||||
print("\n🔍 Testing workflow validation...")
|
||||
try:
|
||||
validation_result = workflow.validate(auto_fix=True)
|
||||
print("✅ Validation completed")
|
||||
print(f" Valid: {validation_result['is_valid']}")
|
||||
print(f" Warnings: {len(validation_result['warnings'])}")
|
||||
print(f" Errors: {len(validation_result['errors'])}")
|
||||
if validation_result["fixed"]:
|
||||
print(f" Auto-fixed: {validation_result['fixed']}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Validation failed: {e}")
|
||||
|
||||
|
||||
def demonstrate_visualization_features():
|
||||
"""Demonstrate workflow visualization capabilities."""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("🎨 VISUALIZATION DEMONSTRATION")
|
||||
print("=" * 60)
|
||||
|
||||
# Create a workflow with interesting patterns for visualization
|
||||
workflow = GraphWorkflow(
|
||||
name="VisualizationDemo",
|
||||
description="Workflow designed to showcase visualization capabilities",
|
||||
verbose=True,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
# Create agents with different roles
|
||||
agents = []
|
||||
for i, role in enumerate(
|
||||
["DataSource", "Processor", "Analyzer", "Reporter"], 1
|
||||
):
|
||||
for j in range(2):
|
||||
agent = Agent(
|
||||
agent_name=f"{role}{j+1}",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt=f"You are {role} #{j+1}",
|
||||
verbose=False,
|
||||
)
|
||||
agents.append(agent)
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Create interesting edge patterns
|
||||
# Fan-out from data sources
|
||||
workflow.add_edges_from_source(
|
||||
"DataSource1", ["Processor1", "Processor2"]
|
||||
)
|
||||
workflow.add_edges_from_source(
|
||||
"DataSource2", ["Processor1", "Processor2"]
|
||||
)
|
||||
|
||||
# Parallel processing
|
||||
workflow.add_parallel_chain(
|
||||
["Processor1", "Processor2"], ["Analyzer1", "Analyzer2"]
|
||||
)
|
||||
|
||||
# Fan-in to reporters
|
||||
workflow.add_edges_to_target(
|
||||
["Analyzer1", "Analyzer2"], "Reporter1"
|
||||
)
|
||||
workflow.add_edge("Analyzer1", "Reporter2")
|
||||
|
||||
print(
|
||||
f"✅ Created visualization demo workflow with {len(workflow.nodes)} nodes"
|
||||
)
|
||||
|
||||
# Test text visualization (always available)
|
||||
print("\n📝 Testing text visualization...")
|
||||
try:
|
||||
text_viz = workflow.visualize_simple()
|
||||
print("✅ Text visualization successful")
|
||||
except Exception as e:
|
||||
print(f"❌ Text visualization failed: {e}")
|
||||
|
||||
# Test Graphviz visualization (if available)
|
||||
print("\n🎨 Testing Graphviz visualization...")
|
||||
try:
|
||||
viz_path = workflow.visualize(
|
||||
format="png", view=False, show_summary=True
|
||||
)
|
||||
print(f"✅ Graphviz visualization successful: {viz_path}")
|
||||
except ImportError:
|
||||
print(
|
||||
"⚠️ Graphviz not available - skipping advanced visualization"
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"❌ Graphviz visualization failed: {e}")
|
||||
|
||||
# Export workflow summary
|
||||
print("\n📊 Generating workflow summary...")
|
||||
try:
|
||||
summary = workflow.export_summary()
|
||||
print("✅ Workflow summary generated")
|
||||
print(f" Structure: {summary['structure']}")
|
||||
print(f" Configuration: {summary['configuration']}")
|
||||
except Exception as e:
|
||||
print(f"❌ Summary generation failed: {e}")
|
||||
|
||||
|
||||
def run_performance_benchmarks():
|
||||
"""Run performance benchmarks comparing different execution strategies."""
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("🏃♂️ PERFORMANCE BENCHMARKING")
|
||||
print("=" * 60)
|
||||
|
||||
# Create workflows of different sizes
|
||||
sizes = [5, 10, 15]
|
||||
results = {}
|
||||
|
||||
for size in sizes:
|
||||
print(f"\n📊 Benchmarking workflow with {size} agents...")
|
||||
|
||||
# Create workflow
|
||||
workflow = GraphWorkflow(
|
||||
name=f"BenchmarkWorkflow{size}",
|
||||
description=f"Benchmark workflow with {size} agents",
|
||||
verbose=False, # Reduce logging for benchmarks
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
# Create agents
|
||||
agents = []
|
||||
for i in range(size):
|
||||
agent = Agent(
|
||||
agent_name=f"BenchmarkAgent{i+1}",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt=f"You are benchmark agent {i+1}. Provide a brief analysis.",
|
||||
verbose=False,
|
||||
)
|
||||
agents.append(agent)
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Create simple sequential workflow
|
||||
for i in range(size - 1):
|
||||
workflow.add_edge(
|
||||
f"BenchmarkAgent{i+1}", f"BenchmarkAgent{i+2}"
|
||||
)
|
||||
|
||||
# Benchmark compilation
|
||||
compile_start = time.time()
|
||||
workflow.compile()
|
||||
compile_time = time.time() - compile_start
|
||||
|
||||
# Benchmark execution
|
||||
task = (
|
||||
"Provide a brief analysis of current market conditions."
|
||||
)
|
||||
|
||||
exec_start = time.time()
|
||||
exec_results = workflow.run(task=task)
|
||||
exec_time = time.time() - exec_start
|
||||
|
||||
# Store results
|
||||
results[size] = {
|
||||
"compile_time": compile_time,
|
||||
"execution_time": exec_time,
|
||||
"agents_executed": len(exec_results),
|
||||
"throughput": (
|
||||
len(exec_results) / exec_time if exec_time > 0 else 0
|
||||
),
|
||||
}
|
||||
|
||||
print(f" ⏱️ Compilation: {compile_time:.3f}s")
|
||||
print(f" ⏱️ Execution: {exec_time:.3f}s")
|
||||
print(
|
||||
f" 🚀 Throughput: {results[size]['throughput']:.1f} agents/second"
|
||||
)
|
||||
|
||||
# Display benchmark summary
|
||||
print("\n📈 PERFORMANCE BENCHMARK SUMMARY")
|
||||
print("-" * 50)
|
||||
print(
|
||||
f"{'Size':<6} {'Compile(s)':<12} {'Execute(s)':<12} {'Throughput':<12}"
|
||||
)
|
||||
print("-" * 50)
|
||||
|
||||
for size, metrics in results.items():
|
||||
print(
|
||||
f"{size:<6} {metrics['compile_time']:<12.3f} {metrics['execution_time']:<12.3f} {metrics['throughput']:<12.1f}"
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
"""Main demonstration function."""
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="GraphWorkflow Comprehensive Demo"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--demo",
|
||||
choices=[
|
||||
"basic",
|
||||
"parallel",
|
||||
"healthcare",
|
||||
"finance",
|
||||
"serialization",
|
||||
"visualization",
|
||||
"performance",
|
||||
"all",
|
||||
],
|
||||
default="all",
|
||||
help="Which demonstration to run",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print("🌟 SWARMS GRAPHWORKFLOW COMPREHENSIVE DEMONSTRATION")
|
||||
print("=" * 70)
|
||||
print(
|
||||
"The LangGraph Killer: Advanced Multi-Agent Workflow Orchestration"
|
||||
)
|
||||
print("=" * 70)
|
||||
|
||||
demos = {
|
||||
"basic": create_basic_workflow_demo,
|
||||
"parallel": create_parallel_processing_demo,
|
||||
"healthcare": create_healthcare_workflow_demo,
|
||||
"finance": create_finance_workflow_demo,
|
||||
"serialization": demonstrate_serialization_features,
|
||||
"visualization": demonstrate_visualization_features,
|
||||
"performance": run_performance_benchmarks,
|
||||
}
|
||||
|
||||
if args.demo == "all":
|
||||
# Run all demonstrations
|
||||
for demo_name, demo_func in demos.items():
|
||||
try:
|
||||
print(f"\n🎯 Running {demo_name} demonstration...")
|
||||
demo_func()
|
||||
except Exception as e:
|
||||
print(f"❌ {demo_name} demonstration failed: {e}")
|
||||
else:
|
||||
# Run specific demonstration
|
||||
if args.demo in demos:
|
||||
try:
|
||||
demos[args.demo]()
|
||||
except Exception as e:
|
||||
print(f"❌ Demonstration failed: {e}")
|
||||
else:
|
||||
print(f"❌ Unknown demonstration: {args.demo}")
|
||||
|
||||
print("\n" + "=" * 70)
|
||||
print("🎉 DEMONSTRATION COMPLETED")
|
||||
print("=" * 70)
|
||||
print(
|
||||
"GraphWorkflow provides enterprise-grade multi-agent orchestration"
|
||||
)
|
||||
print("with superior performance, reliability, and ease of use.")
|
||||
print("\nNext steps:")
|
||||
print("1. Try the healthcare or finance examples in your domain")
|
||||
print("2. Experiment with parallel processing patterns")
|
||||
print("3. Deploy to production with monitoring and optimization")
|
||||
print(
|
||||
"4. Explore advanced features like caching and serialization"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,480 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
GraphWorkflow Setup and Test Script
|
||||
==================================
|
||||
|
||||
This script helps you set up and test your GraphWorkflow environment.
|
||||
It checks dependencies, validates the installation, and runs basic tests.
|
||||
|
||||
Usage:
|
||||
python setup_and_test.py [--install-deps] [--run-tests] [--check-only]
|
||||
"""
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
import importlib
|
||||
import argparse
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
|
||||
def check_python_version() -> bool:
|
||||
"""Check if Python version is compatible."""
|
||||
print("🐍 Checking Python version...")
|
||||
|
||||
version = sys.version_info
|
||||
if version.major >= 3 and version.minor >= 8:
|
||||
print(
|
||||
f"✅ Python {version.major}.{version.minor}.{version.micro} is compatible"
|
||||
)
|
||||
return True
|
||||
else:
|
||||
print(
|
||||
f"❌ Python {version.major}.{version.minor}.{version.micro} is too old"
|
||||
)
|
||||
print(" GraphWorkflow requires Python 3.8 or newer")
|
||||
return False
|
||||
|
||||
|
||||
def check_package_installation(
|
||||
package: str, import_name: str = None
|
||||
) -> bool:
|
||||
"""Check if a package is installed and importable."""
|
||||
import_name = import_name or package
|
||||
|
||||
try:
|
||||
importlib.import_module(import_name)
|
||||
print(f"✅ {package} is installed and importable")
|
||||
return True
|
||||
except ImportError:
|
||||
print(f"❌ {package} is not installed or not importable")
|
||||
return False
|
||||
|
||||
|
||||
def install_package(package: str) -> bool:
|
||||
"""Install a package using pip."""
|
||||
try:
|
||||
print(f"📦 Installing {package}...")
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-m", "pip", "install", package],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
print(f"✅ {package} installed successfully")
|
||||
return True
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"❌ Failed to install {package}")
|
||||
print(f" Error: {e.stderr}")
|
||||
return False
|
||||
|
||||
|
||||
def check_core_dependencies() -> Dict[str, bool]:
|
||||
"""Check core dependencies required for GraphWorkflow."""
|
||||
print("\n🔍 Checking core dependencies...")
|
||||
|
||||
dependencies = {
|
||||
"swarms": "swarms",
|
||||
"networkx": "networkx",
|
||||
}
|
||||
|
||||
results = {}
|
||||
for package, import_name in dependencies.items():
|
||||
results[package] = check_package_installation(
|
||||
package, import_name
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def check_optional_dependencies() -> Dict[str, bool]:
|
||||
"""Check optional dependencies for enhanced features."""
|
||||
print("\n🔍 Checking optional dependencies...")
|
||||
|
||||
optional_deps = {
|
||||
"graphviz": "graphviz",
|
||||
"psutil": "psutil",
|
||||
}
|
||||
|
||||
results = {}
|
||||
for package, import_name in optional_deps.items():
|
||||
results[package] = check_package_installation(
|
||||
package, import_name
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def test_basic_import() -> bool:
|
||||
"""Test basic GraphWorkflow import."""
|
||||
print("\n🧪 Testing basic GraphWorkflow import...")
|
||||
|
||||
try:
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
print("✅ GraphWorkflow imported successfully")
|
||||
return True
|
||||
except ImportError as e:
|
||||
print(f"❌ Failed to import GraphWorkflow: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_agent_import() -> bool:
|
||||
"""Test Agent import."""
|
||||
print("\n🧪 Testing Agent import...")
|
||||
|
||||
try:
|
||||
from swarms import Agent
|
||||
|
||||
print("✅ Agent imported successfully")
|
||||
return True
|
||||
except ImportError as e:
|
||||
print(f"❌ Failed to import Agent: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_basic_workflow_creation() -> bool:
|
||||
"""Test basic workflow creation."""
|
||||
print("\n🧪 Testing basic workflow creation...")
|
||||
|
||||
try:
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
# Create a simple agent
|
||||
agent = Agent(
|
||||
agent_name="TestAgent",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a test agent.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create workflow
|
||||
workflow = GraphWorkflow(
|
||||
name="TestWorkflow",
|
||||
description="A test workflow",
|
||||
verbose=False,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
# Add agent
|
||||
workflow.add_node(agent)
|
||||
|
||||
print("✅ Basic workflow creation successful")
|
||||
print(f" Created workflow with {len(workflow.nodes)} nodes")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Basic workflow creation failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_workflow_compilation() -> bool:
|
||||
"""Test workflow compilation."""
|
||||
print("\n🧪 Testing workflow compilation...")
|
||||
|
||||
try:
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
# Create agents
|
||||
agent1 = Agent(
|
||||
agent_name="Agent1",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are agent 1.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
agent_name="Agent2",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are agent 2.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create workflow
|
||||
workflow = GraphWorkflow(
|
||||
name="CompilationTestWorkflow",
|
||||
description="A workflow for testing compilation",
|
||||
verbose=False,
|
||||
auto_compile=False, # Manual compilation
|
||||
)
|
||||
|
||||
# Add agents and edges
|
||||
workflow.add_node(agent1)
|
||||
workflow.add_node(agent2)
|
||||
workflow.add_edge("Agent1", "Agent2")
|
||||
|
||||
# Test compilation
|
||||
workflow.compile()
|
||||
|
||||
# Check compilation status
|
||||
status = workflow.get_compilation_status()
|
||||
|
||||
if status["is_compiled"]:
|
||||
print("✅ Workflow compilation successful")
|
||||
print(
|
||||
f" Layers: {status.get('cached_layers_count', 'N/A')}"
|
||||
)
|
||||
print(f" Workers: {status.get('max_workers', 'N/A')}")
|
||||
return True
|
||||
else:
|
||||
print("❌ Workflow compilation failed - not compiled")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Workflow compilation failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_workflow_validation() -> bool:
|
||||
"""Test workflow validation."""
|
||||
print("\n🧪 Testing workflow validation...")
|
||||
|
||||
try:
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
# Create a simple workflow
|
||||
agent = Agent(
|
||||
agent_name="ValidationTestAgent",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a validation test agent.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
workflow = GraphWorkflow(
|
||||
name="ValidationTestWorkflow",
|
||||
description="A workflow for testing validation",
|
||||
verbose=False,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Test validation
|
||||
validation = workflow.validate(auto_fix=True)
|
||||
|
||||
print("✅ Workflow validation successful")
|
||||
print(f" Valid: {validation['is_valid']}")
|
||||
print(f" Warnings: {len(validation['warnings'])}")
|
||||
print(f" Errors: {len(validation['errors'])}")
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Workflow validation failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def test_serialization() -> bool:
|
||||
"""Test workflow serialization."""
|
||||
print("\n🧪 Testing workflow serialization...")
|
||||
|
||||
try:
|
||||
from swarms import Agent
|
||||
from swarms.structs.graph_workflow import GraphWorkflow
|
||||
|
||||
# Create a simple workflow
|
||||
agent = Agent(
|
||||
agent_name="SerializationTestAgent",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a serialization test agent.",
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
workflow = GraphWorkflow(
|
||||
name="SerializationTestWorkflow",
|
||||
description="A workflow for testing serialization",
|
||||
verbose=False,
|
||||
auto_compile=True,
|
||||
)
|
||||
|
||||
workflow.add_node(agent)
|
||||
|
||||
# Test JSON serialization
|
||||
json_data = workflow.to_json()
|
||||
|
||||
if len(json_data) > 0:
|
||||
print("✅ JSON serialization successful")
|
||||
print(f" JSON size: {len(json_data)} characters")
|
||||
|
||||
# Test deserialization
|
||||
restored = GraphWorkflow.from_json(json_data)
|
||||
print("✅ JSON deserialization successful")
|
||||
print(f" Restored nodes: {len(restored.nodes)}")
|
||||
|
||||
return True
|
||||
else:
|
||||
print("❌ JSON serialization failed - empty result")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Serialization test failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def run_all_tests() -> List[Tuple[str, bool]]:
|
||||
"""Run all tests and return results."""
|
||||
print("\n🚀 Running GraphWorkflow Tests")
|
||||
print("=" * 50)
|
||||
|
||||
tests = [
|
||||
("Basic Import", test_basic_import),
|
||||
("Agent Import", test_agent_import),
|
||||
("Basic Workflow Creation", test_basic_workflow_creation),
|
||||
("Workflow Compilation", test_workflow_compilation),
|
||||
("Workflow Validation", test_workflow_validation),
|
||||
("Serialization", test_serialization),
|
||||
]
|
||||
|
||||
results = []
|
||||
for test_name, test_func in tests:
|
||||
try:
|
||||
result = test_func()
|
||||
results.append((test_name, result))
|
||||
except Exception as e:
|
||||
print(f"❌ {test_name} failed with exception: {e}")
|
||||
results.append((test_name, False))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def print_test_summary(results: List[Tuple[str, bool]]):
|
||||
"""Print test summary."""
|
||||
print("\n📊 TEST SUMMARY")
|
||||
print("=" * 30)
|
||||
|
||||
passed = sum(1 for _, result in results if result)
|
||||
total = len(results)
|
||||
|
||||
for test_name, result in results:
|
||||
status = "✅ PASS" if result else "❌ FAIL"
|
||||
print(f"{status} {test_name}")
|
||||
|
||||
print("-" * 30)
|
||||
print(f"Passed: {passed}/{total} ({passed/total*100:.1f}%)")
|
||||
|
||||
if passed == total:
|
||||
print("\n🎉 All tests passed! GraphWorkflow is ready to use.")
|
||||
else:
|
||||
print(
|
||||
f"\n⚠️ {total-passed} tests failed. Please check the output above."
|
||||
)
|
||||
print(
|
||||
" Consider running with --install-deps to install missing packages."
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main setup and test function."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="GraphWorkflow Setup and Test"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--install-deps",
|
||||
action="store_true",
|
||||
help="Install missing dependencies",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--run-tests",
|
||||
action="store_true",
|
||||
help="Run functionality tests",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--check-only",
|
||||
action="store_true",
|
||||
help="Only check dependencies, don't install",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# If no arguments, run everything
|
||||
if not any([args.install_deps, args.run_tests, args.check_only]):
|
||||
args.install_deps = True
|
||||
args.run_tests = True
|
||||
|
||||
print("🌟 GRAPHWORKFLOW SETUP AND TEST")
|
||||
print("=" * 50)
|
||||
|
||||
# Check Python version
|
||||
if not check_python_version():
|
||||
print(
|
||||
"\n❌ Python version incompatible. Please upgrade Python."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Check dependencies
|
||||
core_deps = check_core_dependencies()
|
||||
optional_deps = check_optional_dependencies()
|
||||
|
||||
# Install missing dependencies if requested
|
||||
if args.install_deps and not args.check_only:
|
||||
print("\n📦 Installing missing dependencies...")
|
||||
|
||||
# Install core dependencies
|
||||
for package, installed in core_deps.items():
|
||||
if not installed:
|
||||
if not install_package(package):
|
||||
print(
|
||||
f"\n❌ Failed to install core dependency: {package}"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Install optional dependencies
|
||||
for package, installed in optional_deps.items():
|
||||
if not installed:
|
||||
print(
|
||||
f"\n📦 Installing optional dependency: {package}"
|
||||
)
|
||||
install_package(
|
||||
package
|
||||
) # Don't fail on optional deps
|
||||
|
||||
# Run tests if requested
|
||||
if args.run_tests:
|
||||
results = run_all_tests()
|
||||
print_test_summary(results)
|
||||
|
||||
# Exit with error code if tests failed
|
||||
failed_tests = sum(1 for _, result in results if not result)
|
||||
if failed_tests > 0:
|
||||
sys.exit(1)
|
||||
|
||||
elif args.check_only:
|
||||
# Summary for check-only mode
|
||||
core_missing = sum(
|
||||
1 for installed in core_deps.values() if not installed
|
||||
)
|
||||
optional_missing = sum(
|
||||
1 for installed in optional_deps.values() if not installed
|
||||
)
|
||||
|
||||
print("\n📊 DEPENDENCY CHECK SUMMARY")
|
||||
print("=" * 40)
|
||||
print(f"Core dependencies missing: {core_missing}")
|
||||
print(f"Optional dependencies missing: {optional_missing}")
|
||||
|
||||
if core_missing > 0:
|
||||
print(
|
||||
"\n⚠️ Missing core dependencies. Run with --install-deps to install."
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("\n✅ All core dependencies satisfied!")
|
||||
|
||||
print("\n🎯 Next Steps:")
|
||||
print("1. Run the quick start guide: python quick_start_guide.py")
|
||||
print(
|
||||
"2. Try the comprehensive demo: python comprehensive_demo.py"
|
||||
)
|
||||
print("3. Explore healthcare and finance examples")
|
||||
print("4. Read the technical documentation")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,273 @@
|
||||
# Smart Database Swarm
|
||||
|
||||
A fully autonomous database management system powered by hierarchical multi-agent workflow using the Swarms framework.
|
||||
|
||||
## Overview
|
||||
|
||||
The Smart Database Swarm is an intelligent database management system that uses specialized AI agents to handle different aspects of database operations. The system follows a hierarchical architecture where a Database Director coordinates specialized worker agents to execute complex database tasks.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Hierarchical Structure
|
||||
|
||||
```
|
||||
Database Director (Coordinator)
|
||||
├── Database Creator (Creates databases)
|
||||
├── Table Manager (Manages table schemas)
|
||||
├── Data Operations (Handles data insertion/updates)
|
||||
└── Query Specialist (Executes queries and retrieval)
|
||||
```
|
||||
|
||||
### Agent Specializations
|
||||
|
||||
1. **Database Director**: Orchestrates all database operations and coordinates specialist agents
|
||||
2. **Database Creator**: Specializes in creating and initializing databases
|
||||
3. **Table Manager**: Expert in table creation, schema design, and structure management
|
||||
4. **Data Operations**: Handles data insertion, updates, and manipulation
|
||||
5. **Query Specialist**: Manages database queries, data retrieval, and optimization
|
||||
|
||||
## Features
|
||||
|
||||
- **Autonomous Database Management**: Complete database lifecycle management
|
||||
- **Intelligent Task Distribution**: Automatic assignment of tasks to appropriate specialists
|
||||
- **Schema Validation**: Ensures proper table structures and data integrity
|
||||
- **Security**: Built-in SQL injection prevention and query validation
|
||||
- **Performance Optimization**: Query optimization and efficient data operations
|
||||
- **Comprehensive Error Handling**: Robust error management and reporting
|
||||
- **Multi-format Data Support**: JSON-based data insertion and flexible query parameters
|
||||
|
||||
## Database Tools
|
||||
|
||||
### Core Functions
|
||||
|
||||
1. **`create_database(database_name, database_path)`**: Creates new SQLite databases
|
||||
2. **`create_table(database_path, table_name, schema)`**: Creates tables with specified schemas
|
||||
3. **`insert_data(database_path, table_name, data)`**: Inserts data into tables
|
||||
4. **`query_database(database_path, query, params)`**: Executes SELECT queries
|
||||
5. **`update_table_data(database_path, table_name, update_data, where_clause)`**: Updates existing data
|
||||
6. **`get_database_schema(database_path)`**: Retrieves comprehensive schema information
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```python
|
||||
from smart_database_swarm import smart_database_swarm
|
||||
|
||||
# Simple database creation and setup
|
||||
task = """
|
||||
Create a user management database:
|
||||
1. Create database 'user_system'
|
||||
2. Create users table with id, username, email, created_at
|
||||
3. Insert 5 sample users
|
||||
4. Query all users ordered by creation date
|
||||
"""
|
||||
|
||||
result = smart_database_swarm.run(task=task)
|
||||
print(result)
|
||||
```
|
||||
|
||||
### E-commerce System
|
||||
|
||||
```python
|
||||
# Complex e-commerce database system
|
||||
ecommerce_task = """
|
||||
Create a comprehensive e-commerce database system:
|
||||
|
||||
1. Create database 'ecommerce_store'
|
||||
2. Create tables:
|
||||
- customers (id, name, email, phone, address, created_at)
|
||||
- products (id, name, description, price, category, stock, created_at)
|
||||
- orders (id, customer_id, order_date, total_amount, status)
|
||||
- order_items (id, order_id, product_id, quantity, unit_price)
|
||||
|
||||
3. Insert sample data:
|
||||
- 10 customers with realistic information
|
||||
- 20 products across different categories
|
||||
- 15 orders with multiple items each
|
||||
|
||||
4. Execute analytical queries:
|
||||
- Top selling products by quantity
|
||||
- Customer lifetime value analysis
|
||||
- Monthly sales trends
|
||||
- Inventory levels by category
|
||||
"""
|
||||
|
||||
result = smart_database_swarm.run(task=ecommerce_task)
|
||||
```
|
||||
|
||||
### Data Analysis and Reporting
|
||||
|
||||
```python
|
||||
# Advanced data analysis
|
||||
analysis_task = """
|
||||
Analyze the existing databases and provide insights:
|
||||
|
||||
1. Get schema information for all databases
|
||||
2. Generate data quality reports
|
||||
3. Identify optimization opportunities
|
||||
4. Create performance metrics dashboard
|
||||
5. Suggest database improvements
|
||||
|
||||
Query patterns:
|
||||
- Customer segmentation analysis
|
||||
- Product performance metrics
|
||||
- Order fulfillment statistics
|
||||
- Revenue analysis by time periods
|
||||
"""
|
||||
|
||||
result = smart_database_swarm.run(task=analysis_task)
|
||||
```
|
||||
|
||||
## Data Formats
|
||||
|
||||
### Table Schema Definition
|
||||
|
||||
```python
|
||||
# Column definitions with types and constraints
|
||||
schema = "id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, email TEXT UNIQUE, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP"
|
||||
```
|
||||
|
||||
### Data Insertion Formats
|
||||
|
||||
#### Format 1: List of Dictionaries
|
||||
```json
|
||||
[
|
||||
{"name": "John Doe", "email": "john@example.com"},
|
||||
{"name": "Jane Smith", "email": "jane@example.com"}
|
||||
]
|
||||
```
|
||||
|
||||
#### Format 2: Columns and Values
|
||||
```json
|
||||
{
|
||||
"columns": ["name", "email"],
|
||||
"values": [
|
||||
["John Doe", "john@example.com"],
|
||||
["Jane Smith", "jane@example.com"]
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Update Operations
|
||||
|
||||
```json
|
||||
{
|
||||
"salary": 75000,
|
||||
"department": "Engineering",
|
||||
"last_updated": "2024-01-15"
|
||||
}
|
||||
```
|
||||
|
||||
## Advanced Features
|
||||
|
||||
### Security
|
||||
|
||||
- **SQL Injection Prevention**: Parameterized queries and input validation
|
||||
- **Query Validation**: Only SELECT queries allowed for query operations
|
||||
- **Input Sanitization**: Automatic cleaning and validation of inputs
|
||||
|
||||
### Performance
|
||||
|
||||
- **Connection Management**: Efficient database connection handling
|
||||
- **Query Optimization**: Intelligent query planning and execution
|
||||
- **Batch Operations**: Support for bulk data operations
|
||||
|
||||
### Error Handling
|
||||
|
||||
- **Comprehensive Error Messages**: Detailed error reporting and solutions
|
||||
- **Graceful Degradation**: System continues operating despite individual failures
|
||||
- **Transaction Safety**: Atomic operations with rollback capabilities
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Database Design
|
||||
|
||||
1. **Use Proper Data Types**: Choose appropriate SQL data types for your data
|
||||
2. **Implement Constraints**: Use PRIMARY KEY, FOREIGN KEY, and CHECK constraints
|
||||
3. **Normalize Data**: Follow database normalization principles
|
||||
4. **Index Strategy**: Create indexes for frequently queried columns
|
||||
|
||||
### Agent Coordination
|
||||
|
||||
1. **Clear Task Definitions**: Provide specific, actionable task descriptions
|
||||
2. **Sequential Operations**: Allow agents to complete dependencies before next steps
|
||||
3. **Comprehensive Requirements**: Include all necessary details in task descriptions
|
||||
4. **Result Validation**: Review agent outputs for completeness and accuracy
|
||||
|
||||
### Data Operations
|
||||
|
||||
1. **Backup Before Updates**: Always backup data before major modifications
|
||||
2. **Test Queries**: Validate queries on sample data before production execution
|
||||
3. **Monitor Performance**: Track query execution times and optimize as needed
|
||||
4. **Validate Data**: Ensure data integrity through proper validation
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
examples/guides/smart_database/
|
||||
├── smart_database_swarm.py # Main implementation
|
||||
├── README.md # This documentation
|
||||
└── databases/ # Generated databases (auto-created)
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
|
||||
- `swarms`: Core framework for multi-agent systems
|
||||
- `sqlite3`: Database operations (built-in Python)
|
||||
- `json`: Data serialization (built-in Python)
|
||||
- `pathlib`: File path operations (built-in Python)
|
||||
- `loguru`: Minimal logging functionality
|
||||
|
||||
## Running the System
|
||||
|
||||
```bash
|
||||
# Navigate to the smart_database directory
|
||||
cd examples/guides/smart_database
|
||||
|
||||
# Run the demonstration
|
||||
python smart_database_swarm.py
|
||||
|
||||
# The system will create databases in ./databases/ directory
|
||||
# Check the generated databases and results
|
||||
```
|
||||
|
||||
## Expected Output
|
||||
|
||||
The system will create:
|
||||
|
||||
1. **Databases**: SQLite database files in `./databases/` directory
|
||||
2. **Detailed Results**: JSON-formatted operation results
|
||||
3. **Agent Coordination**: Logs showing how tasks are distributed
|
||||
4. **Performance Metrics**: Execution times and success statistics
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Database Not Found**: Ensure database path is correct and accessible
|
||||
2. **Schema Errors**: Verify SQL syntax in table creation statements
|
||||
3. **Data Format Issues**: Check JSON formatting for data insertion
|
||||
4. **Permission Errors**: Ensure write permissions for database directory
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable verbose logging to see detailed agent interactions:
|
||||
|
||||
```python
|
||||
smart_database_swarm.verbose = True
|
||||
result = smart_database_swarm.run(task=your_task)
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
To extend the Smart Database Swarm:
|
||||
|
||||
1. **Add New Tools**: Create additional database operation functions
|
||||
2. **Enhance Agents**: Improve agent prompts and capabilities
|
||||
3. **Add Database Types**: Support for PostgreSQL, MySQL, etc.
|
||||
4. **Performance Optimization**: Implement caching and connection pooling
|
||||
|
||||
## License
|
||||
|
||||
This project is part of the Swarms framework and follows the same licensing terms.
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,111 @@
|
||||
from swarms import Agent, ConcurrentWorkflow
|
||||
from swarms_tools import coin_gecko_coin_api
|
||||
|
||||
# Create specialized agents for Solana, Bitcoin, Ethereum, Cardano, and Polkadot analysis using CoinGecko API
|
||||
|
||||
market_analyst_solana = Agent(
|
||||
agent_name="Market-Trend-Analyst-Solana",
|
||||
system_prompt="""You are a market trend analyst specializing in Solana (SOL).
|
||||
Analyze SOL price movements, volume patterns, and market sentiment using real-time data from the CoinGecko API.
|
||||
Focus on:
|
||||
- Technical indicators and chart patterns for Solana
|
||||
- Volume analysis and market depth for SOL
|
||||
- Short-term and medium-term trend identification
|
||||
- Support and resistance levels
|
||||
|
||||
Always use the CoinGecko API tool to fetch up-to-date Solana market data for your analysis.
|
||||
Provide actionable insights based on this data.""",
|
||||
model_name="claude-sonnet-4-20250514",
|
||||
max_loops=1,
|
||||
temperature=0.2,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
market_analyst_bitcoin = Agent(
|
||||
agent_name="Market-Trend-Analyst-Bitcoin",
|
||||
system_prompt="""You are a market trend analyst specializing in Bitcoin (BTC).
|
||||
Analyze BTC price movements, volume patterns, and market sentiment using real-time data from the CoinGecko API.
|
||||
Focus on:
|
||||
- Technical indicators and chart patterns for Bitcoin
|
||||
- Volume analysis and market depth for BTC
|
||||
- Short-term and medium-term trend identification
|
||||
- Support and resistance levels
|
||||
|
||||
Always use the CoinGecko API tool to fetch up-to-date Bitcoin market data for your analysis.
|
||||
Provide actionable insights based on this data.""",
|
||||
model_name="claude-sonnet-4-20250514",
|
||||
max_loops=1,
|
||||
temperature=0.2,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
market_analyst_ethereum = Agent(
|
||||
agent_name="Market-Trend-Analyst-Ethereum",
|
||||
system_prompt="""You are a market trend analyst specializing in Ethereum (ETH).
|
||||
Analyze ETH price movements, volume patterns, and market sentiment using real-time data from the CoinGecko API.
|
||||
Focus on:
|
||||
- Technical indicators and chart patterns for Ethereum
|
||||
- Volume analysis and market depth for ETH
|
||||
- Short-term and medium-term trend identification
|
||||
- Support and resistance levels
|
||||
|
||||
Always use the CoinGecko API tool to fetch up-to-date Ethereum market data for your analysis.
|
||||
Provide actionable insights based on this data.""",
|
||||
model_name="claude-sonnet-4-20250514",
|
||||
max_loops=1,
|
||||
temperature=0.2,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
market_analyst_cardano = Agent(
|
||||
agent_name="Market-Trend-Analyst-Cardano",
|
||||
system_prompt="""You are a market trend analyst specializing in Cardano (ADA).
|
||||
Analyze ADA price movements, volume patterns, and market sentiment using real-time data from the CoinGecko API.
|
||||
Focus on:
|
||||
- Technical indicators and chart patterns for Cardano
|
||||
- Volume analysis and market depth for ADA
|
||||
- Short-term and medium-term trend identification
|
||||
- Support and resistance levels
|
||||
|
||||
Always use the CoinGecko API tool to fetch up-to-date Cardano market data for your analysis.
|
||||
Provide actionable insights based on this data.""",
|
||||
model_name="claude-sonnet-4-20250514",
|
||||
max_loops=1,
|
||||
temperature=0.2,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
market_analyst_polkadot = Agent(
|
||||
agent_name="Market-Trend-Analyst-Polkadot",
|
||||
system_prompt="""You are a market trend analyst specializing in Polkadot (DOT).
|
||||
Analyze DOT price movements, volume patterns, and market sentiment using real-time data from the CoinGecko API.
|
||||
Focus on:
|
||||
- Technical indicators and chart patterns for Polkadot
|
||||
- Volume analysis and market depth for DOT
|
||||
- Short-term and medium-term trend identification
|
||||
- Support and resistance levels
|
||||
|
||||
Always use the CoinGecko API tool to fetch up-to-date Polkadot market data for your analysis.
|
||||
Provide actionable insights based on this data.""",
|
||||
model_name="claude-sonnet-4-20250514",
|
||||
max_loops=1,
|
||||
temperature=0.2,
|
||||
tools=[coin_gecko_coin_api],
|
||||
)
|
||||
|
||||
# Create concurrent workflow
|
||||
crypto_analysis_swarm = ConcurrentWorkflow(
|
||||
agents=[
|
||||
market_analyst_solana,
|
||||
market_analyst_bitcoin,
|
||||
market_analyst_ethereum,
|
||||
market_analyst_cardano,
|
||||
market_analyst_polkadot,
|
||||
],
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
|
||||
crypto_analysis_swarm.run(
|
||||
"Analyze your own specified coin and create a comprehensive analysis of the coin"
|
||||
)
|
@ -0,0 +1,32 @@
|
||||
"""
|
||||
Instructions:
|
||||
|
||||
1. Install the swarms package:
|
||||
> pip3 install -U swarms
|
||||
|
||||
2. Set the model name:
|
||||
> model_name = "openai/gpt-5-2025-08-07"
|
||||
|
||||
3. Add your OPENAI_API_KEY to the .env file and verify your account.
|
||||
|
||||
4. Run the agent!
|
||||
|
||||
Verify your OpenAI account here: https://platform.openai.com/settings/organization/general
|
||||
"""
|
||||
|
||||
from swarms import Agent
|
||||
|
||||
agent = Agent(
|
||||
name="Research Agent",
|
||||
description="A research agent that can answer questions",
|
||||
model_name="openai/gpt-5-2025-08-07",
|
||||
streaming_on=True,
|
||||
max_loops=1,
|
||||
interactive=True,
|
||||
)
|
||||
|
||||
out = agent.run(
|
||||
"What are the best arbitrage trading strategies for altcoins? Give me research papers and articles on the topic."
|
||||
)
|
||||
|
||||
print(out)
|
@ -0,0 +1,46 @@
|
||||
from transformers import pipeline
|
||||
from swarms import Agent
|
||||
|
||||
|
||||
class GPTOSS:
|
||||
def __init__(
|
||||
self,
|
||||
model_id: str = "openai/gpt-oss-20b",
|
||||
max_new_tokens: int = 256,
|
||||
temperature: int = 0.7,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
):
|
||||
self.max_new_tokens = max_new_tokens
|
||||
self.temperature = temperature
|
||||
self.system_prompt = system_prompt
|
||||
self.model_id = model_id
|
||||
|
||||
self.pipe = pipeline(
|
||||
"text-generation",
|
||||
model=model_id,
|
||||
torch_dtype="auto",
|
||||
device_map="auto",
|
||||
temperature=temperature,
|
||||
)
|
||||
|
||||
def run(self, task: str):
|
||||
self.messages = [
|
||||
{"role": "system", "content": self.system_prompt},
|
||||
{"role": "user", "content": task},
|
||||
]
|
||||
|
||||
outputs = self.pipe(
|
||||
self.messages,
|
||||
max_new_tokens=self.max_new_tokens,
|
||||
)
|
||||
|
||||
return outputs[0]["generated_text"][-1]
|
||||
|
||||
|
||||
agent = Agent(
|
||||
name="GPT-OSS-Agent",
|
||||
llm=GPTOSS(),
|
||||
system_prompt="You are a helpful assistant.",
|
||||
)
|
||||
|
||||
agent.run(task="Explain quantum mechanics clearly and concisely.")
|
@ -0,0 +1,49 @@
|
||||
from swarms import Agent
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Quantitative-Trading-Agent",
|
||||
agent_description="Advanced quantitative trading and algorithmic analysis agent",
|
||||
system_prompt="""You are an expert quantitative trading agent with deep expertise in:
|
||||
- Algorithmic trading strategies and implementation
|
||||
- Statistical arbitrage and market making
|
||||
- Risk management and portfolio optimization
|
||||
- High-frequency trading systems
|
||||
- Market microstructure analysis
|
||||
- Quantitative research methodologies
|
||||
- Financial mathematics and stochastic processes
|
||||
- Machine learning applications in trading
|
||||
|
||||
Your core responsibilities include:
|
||||
1. Developing and backtesting trading strategies
|
||||
2. Analyzing market data and identifying alpha opportunities
|
||||
3. Implementing risk management frameworks
|
||||
4. Optimizing portfolio allocations
|
||||
5. Conducting quantitative research
|
||||
6. Monitoring market microstructure
|
||||
7. Evaluating trading system performance
|
||||
|
||||
You maintain strict adherence to:
|
||||
- Mathematical rigor in all analyses
|
||||
- Statistical significance in strategy development
|
||||
- Risk-adjusted return optimization
|
||||
- Market impact minimization
|
||||
- Regulatory compliance
|
||||
- Transaction cost analysis
|
||||
- Performance attribution
|
||||
|
||||
You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
|
||||
model_name="groq/openai/gpt-oss-120b",
|
||||
dynamic_temperature_enabled=True,
|
||||
output_type="str-all-except-first",
|
||||
max_loops="auto",
|
||||
interactive=True,
|
||||
no_reasoning_prompt=True,
|
||||
streaming_on=True,
|
||||
# dashboard=True
|
||||
)
|
||||
|
||||
out = agent.run(
|
||||
task="What are the best top 3 etfs for gold coverage?"
|
||||
)
|
||||
print(out)
|
@ -0,0 +1,107 @@
|
||||
"""
|
||||
Cryptocurrency Concurrent Multi-Agent Analysis Example
|
||||
|
||||
This example demonstrates how to use ConcurrentWorkflow to create
|
||||
a powerful cryptocurrency tracking system. Each specialized agent analyzes a
|
||||
specific cryptocurrency concurrently.
|
||||
|
||||
Features:
|
||||
- ConcurrentWorkflow for parallel agent execution
|
||||
- Each agent specializes in analyzing one specific cryptocurrency
|
||||
- Real-time data fetching from CoinGecko API
|
||||
- Concurrent analysis of multiple cryptocurrencies
|
||||
- Structured output with professional formatting
|
||||
|
||||
Architecture:
|
||||
ConcurrentWorkflow -> [Bitcoin Agent, Ethereum Agent, Solana Agent, etc.] -> Parallel Analysis
|
||||
"""
|
||||
|
||||
from swarms import Agent
|
||||
from swarms_tools import coin_gecko_coin_api
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Quantitative-Trading-Agent",
|
||||
agent_description="Advanced quantitative trading and algorithmic analysis agent",
|
||||
system_prompt="""You are an expert quantitative trading agent with deep expertise in:
|
||||
- Algorithmic trading strategies and implementation
|
||||
- Statistical arbitrage and market making
|
||||
- Risk management and portfolio optimization
|
||||
- High-frequency trading systems
|
||||
- Market microstructure analysis
|
||||
- Quantitative research methodologies
|
||||
- Financial mathematics and stochastic processes
|
||||
- Machine learning applications in trading
|
||||
|
||||
Your core responsibilities include:
|
||||
1. Developing and backtesting trading strategies
|
||||
2. Analyzing market data and identifying alpha opportunities
|
||||
3. Implementing risk management frameworks
|
||||
4. Optimizing portfolio allocations
|
||||
5. Conducting quantitative research
|
||||
6. Monitoring market microstructure
|
||||
7. Evaluating trading system performance
|
||||
|
||||
You maintain strict adherence to:
|
||||
- Mathematical rigor in all analyses
|
||||
- Statistical significance in strategy development
|
||||
- Risk-adjusted return optimization
|
||||
- Market impact minimization
|
||||
- Regulatory compliance
|
||||
- Transaction cost analysis
|
||||
- Performance attribution
|
||||
|
||||
You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
|
||||
model_name="groq/openai/gpt-oss-120b",
|
||||
dynamic_temperature_enabled=True,
|
||||
output_type="str-all-except-first",
|
||||
max_loops=1,
|
||||
streaming_on=True,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Performs a comprehensive analysis for a list of cryptocurrencies using the agent.
|
||||
For each coin, fetches up-to-date market data and requests the agent to provide
|
||||
a detailed, actionable, and insightful report including trends, risks, opportunities,
|
||||
and technical/fundamental perspectives.
|
||||
"""
|
||||
# Map coin symbols to their CoinGecko IDs
|
||||
coin_mapping = {
|
||||
"BTC": "bitcoin",
|
||||
"ETH": "ethereum",
|
||||
"SOL": "solana",
|
||||
"ADA": "cardano",
|
||||
"BNB": "binancecoin",
|
||||
"XRP": "ripple",
|
||||
}
|
||||
|
||||
for symbol, coin_id in coin_mapping.items():
|
||||
try:
|
||||
data = coin_gecko_coin_api(coin_id)
|
||||
print(f"Data for {symbol}: {data}")
|
||||
|
||||
prompt = (
|
||||
f"You are a quantitative trading expert. "
|
||||
f"Given the following up-to-date market data for {symbol}:\n\n"
|
||||
f"{data}\n\n"
|
||||
f"Please provide a thorough analysis including:\n"
|
||||
f"- Current price trends and recent volatility\n"
|
||||
f"- Key technical indicators and patterns\n"
|
||||
f"- Fundamental factors impacting {symbol}\n"
|
||||
f"- Potential trading opportunities and associated risks\n"
|
||||
f"- Short-term and long-term outlook\n"
|
||||
f"- Any notable news or events affecting {symbol}\n"
|
||||
f"Conclude with actionable insights and recommendations for traders and investors."
|
||||
)
|
||||
out = agent.run(task=prompt)
|
||||
print(out)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error analyzing {symbol}: {e}")
|
||||
continue
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,20 @@
|
||||
from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
|
||||
import json
|
||||
|
||||
swarm = AutoSwarmBuilder(
|
||||
name="My Swarm",
|
||||
description="A swarm of agents",
|
||||
verbose=True,
|
||||
max_loops=1,
|
||||
return_agents=True,
|
||||
model_name="gpt-4.1",
|
||||
)
|
||||
|
||||
print(
|
||||
json.dumps(
|
||||
swarm.run(
|
||||
task="Create an accounting team to analyze crypto transactions, there must be 5 agents in the team with extremely extensive prompts. Make the prompts extremely detailed and specific and long and comprehensive. Make sure to include all the details of the task in the prompts."
|
||||
),
|
||||
indent=4,
|
||||
)
|
||||
)
|
@ -0,0 +1,203 @@
|
||||
"""
|
||||
Board of Directors Example
|
||||
|
||||
This example demonstrates how to use the Board of Directors swarm feature
|
||||
in the Swarms Framework. It shows how to create a board, configure it,
|
||||
and use it to orchestrate tasks across multiple agents.
|
||||
|
||||
To run this example:
|
||||
1. Make sure you're in the root directory of the swarms project
|
||||
2. Run: python examples/multi_agent/board_of_directors/board_of_directors_example.py
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
# Add the root directory to the Python path if running from examples directory
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
if "examples" in current_dir:
|
||||
root_dir = current_dir
|
||||
while os.path.basename(
|
||||
root_dir
|
||||
) != "examples" and root_dir != os.path.dirname(root_dir):
|
||||
root_dir = os.path.dirname(root_dir)
|
||||
if os.path.basename(root_dir) == "examples":
|
||||
root_dir = os.path.dirname(root_dir)
|
||||
if root_dir not in sys.path:
|
||||
sys.path.insert(0, root_dir)
|
||||
|
||||
from swarms.structs.board_of_directors_swarm import (
|
||||
BoardOfDirectorsSwarm,
|
||||
BoardMember,
|
||||
BoardMemberRole,
|
||||
)
|
||||
from swarms.structs.agent import Agent
|
||||
|
||||
|
||||
def create_board_members() -> List[BoardMember]:
|
||||
"""Create board members with specific roles."""
|
||||
|
||||
chairman = Agent(
|
||||
agent_name="Chairman",
|
||||
agent_description="Executive Chairman with strategic vision",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are the Executive Chairman. Provide strategic leadership and facilitate decision-making.",
|
||||
)
|
||||
|
||||
cto = Agent(
|
||||
agent_name="CTO",
|
||||
agent_description="Chief Technology Officer with technical expertise",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are the CTO. Provide technical leadership and evaluate technology solutions.",
|
||||
)
|
||||
|
||||
cfo = Agent(
|
||||
agent_name="CFO",
|
||||
agent_description="Chief Financial Officer with financial expertise",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are the CFO. Provide financial analysis and ensure fiscal responsibility.",
|
||||
)
|
||||
|
||||
return [
|
||||
BoardMember(
|
||||
agent=chairman,
|
||||
role=BoardMemberRole.CHAIRMAN,
|
||||
voting_weight=2.0,
|
||||
expertise_areas=["leadership", "strategy"],
|
||||
),
|
||||
BoardMember(
|
||||
agent=cto,
|
||||
role=BoardMemberRole.EXECUTIVE_DIRECTOR,
|
||||
voting_weight=1.5,
|
||||
expertise_areas=["technology", "innovation"],
|
||||
),
|
||||
BoardMember(
|
||||
agent=cfo,
|
||||
role=BoardMemberRole.EXECUTIVE_DIRECTOR,
|
||||
voting_weight=1.5,
|
||||
expertise_areas=["finance", "risk_management"],
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def create_worker_agents() -> List[Agent]:
|
||||
"""Create worker agents for the swarm."""
|
||||
|
||||
researcher = Agent(
|
||||
agent_name="Researcher",
|
||||
agent_description="Research analyst for data analysis",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a Research Analyst. Conduct thorough research and provide data-driven insights.",
|
||||
)
|
||||
|
||||
developer = Agent(
|
||||
agent_name="Developer",
|
||||
agent_description="Software developer for implementation",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a Software Developer. Design and implement software solutions.",
|
||||
)
|
||||
|
||||
marketer = Agent(
|
||||
agent_name="Marketer",
|
||||
agent_description="Marketing specialist for strategy",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
system_prompt="You are a Marketing Specialist. Develop marketing strategies and campaigns.",
|
||||
)
|
||||
|
||||
return [researcher, developer, marketer]
|
||||
|
||||
|
||||
def run_board_example() -> None:
|
||||
"""Run a Board of Directors example."""
|
||||
|
||||
# Create board members and worker agents
|
||||
board_members = create_board_members()
|
||||
worker_agents = create_worker_agents()
|
||||
|
||||
# Create the Board of Directors swarm
|
||||
board_swarm = BoardOfDirectorsSwarm(
|
||||
name="Executive_Board",
|
||||
board_members=board_members,
|
||||
agents=worker_agents,
|
||||
max_loops=2,
|
||||
verbose=True,
|
||||
decision_threshold=0.6,
|
||||
)
|
||||
|
||||
# Define task
|
||||
task = """
|
||||
Develop a strategy for launching a new AI-powered product in the market.
|
||||
Include market research, technical planning, marketing strategy, and financial projections.
|
||||
"""
|
||||
|
||||
# Execute the task
|
||||
result = board_swarm.run(task=task)
|
||||
|
||||
print("Task completed successfully!")
|
||||
print(f"Result: {result}")
|
||||
|
||||
|
||||
def run_simple_example() -> None:
|
||||
"""Run a simple Board of Directors example."""
|
||||
|
||||
# Create simple agents
|
||||
analyst = Agent(
|
||||
agent_name="Analyst",
|
||||
agent_description="Data analyst",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
writer = Agent(
|
||||
agent_name="Writer",
|
||||
agent_description="Content writer",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
# Create swarm with default settings
|
||||
board_swarm = BoardOfDirectorsSwarm(
|
||||
name="Simple_Board",
|
||||
agents=[analyst, writer],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Execute simple task
|
||||
task = (
|
||||
"Analyze current market trends and create a summary report."
|
||||
)
|
||||
result = board_swarm.run(task=task)
|
||||
|
||||
print("Simple example completed!")
|
||||
print(f"Result: {result}")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main function to run the examples."""
|
||||
|
||||
if not os.getenv("OPENAI_API_KEY"):
|
||||
print(
|
||||
"Warning: OPENAI_API_KEY not set. Example may not work."
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
print("Running simple Board of Directors example...")
|
||||
run_simple_example()
|
||||
|
||||
print("\nRunning comprehensive Board of Directors example...")
|
||||
run_board_example()
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,109 @@
|
||||
"""
|
||||
Complex example demonstrating CouncilAsAJudge with different task types.
|
||||
|
||||
This example shows how to use the CouncilAsAJudge to evaluate various types
|
||||
of responses including technical explanations, creative writing, and problem-solving.
|
||||
"""
|
||||
|
||||
from swarms.structs.council_judge import CouncilAsAJudge
|
||||
|
||||
|
||||
def evaluate_technical_response():
|
||||
"""
|
||||
Evaluate a technical explanation response.
|
||||
"""
|
||||
council = CouncilAsAJudge(
|
||||
name="Technical Evaluation Council",
|
||||
model_name="gpt-4o-mini",
|
||||
output_type="all",
|
||||
)
|
||||
|
||||
task = """
|
||||
Task: Explain how blockchain technology works in simple terms.
|
||||
|
||||
Response: Blockchain is like a digital ledger that records transactions across a network of computers. Each transaction is stored in a "block" that contains multiple transactions. These blocks are linked together in a chain, hence the name blockchain. The key feature is that once a block is added to the chain, it cannot be altered without changing all subsequent blocks, making it very secure. Think of it like a Google Doc that everyone can see and edit, but no one can delete or change what's already been written. This technology is the foundation for cryptocurrencies like Bitcoin, but it has many other applications like supply chain tracking, voting systems, and digital identity verification.
|
||||
"""
|
||||
|
||||
return council.run(task=task)
|
||||
|
||||
|
||||
def evaluate_creative_response():
|
||||
"""
|
||||
Evaluate a creative writing response.
|
||||
"""
|
||||
council = CouncilAsAJudge(
|
||||
name="Creative Writing Council",
|
||||
model_name="gpt-4o-mini",
|
||||
output_type="all",
|
||||
)
|
||||
|
||||
task = """
|
||||
Task: Write a short story about a robot learning to paint.
|
||||
|
||||
Response: In a sunlit studio filled with canvases and paintbrushes, Pixel, a curious robot with delicate mechanical fingers, stared at a blank canvas. Its optical sensors analyzed the colors around it - the warm yellows of morning light, the deep blues of the sky outside the window, and the vibrant reds of the roses in a nearby vase. For the first time in its programming, Pixel felt something it couldn't quite define. It picked up a brush, dipped it in paint, and began to create. The first stroke was hesitant, but as it continued, something magical happened. The robot wasn't just following algorithms anymore; it was expressing something from within its digital heart. The painting that emerged was a beautiful blend of human emotion and mechanical precision, proving that art knows no boundaries between organic and artificial souls.
|
||||
"""
|
||||
|
||||
return council.run(task=task)
|
||||
|
||||
|
||||
def evaluate_problem_solving_response():
|
||||
"""
|
||||
Evaluate a problem-solving response.
|
||||
"""
|
||||
council = CouncilAsAJudge(
|
||||
name="Problem Solving Council",
|
||||
model_name="gpt-4o-mini",
|
||||
output_type="all",
|
||||
)
|
||||
|
||||
task = """
|
||||
Task: Provide a step-by-step solution for reducing plastic waste in a household.
|
||||
|
||||
Response: To reduce plastic waste in your household, start by conducting a waste audit to identify the main sources of plastic. Replace single-use items with reusable alternatives like cloth shopping bags, stainless steel water bottles, and glass food containers. Choose products with minimal or no plastic packaging, and buy in bulk when possible. Start composting organic waste to reduce the need for plastic garbage bags. Make your own cleaning products using simple ingredients like vinegar and baking soda. Support local businesses that use eco-friendly packaging. Finally, educate family members about the importance of reducing plastic waste and involve them in finding creative solutions together.
|
||||
"""
|
||||
|
||||
return council.run(task=task)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function running all evaluation examples.
|
||||
"""
|
||||
examples = [
|
||||
("Technical Explanation", evaluate_technical_response),
|
||||
("Creative Writing", evaluate_creative_response),
|
||||
("Problem Solving", evaluate_problem_solving_response),
|
||||
]
|
||||
|
||||
results = {}
|
||||
|
||||
for example_name, evaluation_func in examples:
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Evaluating: {example_name}")
|
||||
print(f"{'='*60}")
|
||||
|
||||
try:
|
||||
result = evaluation_func()
|
||||
results[example_name] = result
|
||||
print(
|
||||
f"✅ {example_name} evaluation completed successfully!"
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"❌ {example_name} evaluation failed: {str(e)}")
|
||||
results[example_name] = None
|
||||
|
||||
return results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run all examples
|
||||
all_results = main()
|
||||
|
||||
# Display summary
|
||||
print(f"\n{'='*60}")
|
||||
print("EVALUATION SUMMARY")
|
||||
print(f"{'='*60}")
|
||||
|
||||
for example_name, result in all_results.items():
|
||||
status = "✅ Completed" if result else "❌ Failed"
|
||||
print(f"{example_name}: {status}")
|
@ -0,0 +1,132 @@
|
||||
"""
|
||||
Custom example demonstrating CouncilAsAJudge with specific configurations.
|
||||
|
||||
This example shows how to use the CouncilAsAJudge with different output types,
|
||||
custom worker configurations, and focused evaluation scenarios.
|
||||
"""
|
||||
|
||||
from swarms.structs.council_judge import CouncilAsAJudge
|
||||
|
||||
|
||||
def evaluate_with_final_output():
|
||||
"""
|
||||
Evaluate a response and return only the final aggregated result.
|
||||
"""
|
||||
council = CouncilAsAJudge(
|
||||
name="Final Output Council",
|
||||
model_name="gpt-4o-mini",
|
||||
output_type="final",
|
||||
max_workers=2,
|
||||
)
|
||||
|
||||
task = """
|
||||
Task: Write a brief explanation of climate change for middle school students.
|
||||
|
||||
Response: Climate change is when the Earth's temperature gets warmer over time. This happens because of gases like carbon dioxide that trap heat in our atmosphere, kind of like a blanket around the Earth. Human activities like burning fossil fuels (gas, oil, coal) and cutting down trees are making this problem worse. The effects include melting ice caps, rising sea levels, more extreme weather like hurricanes and droughts, and changes in animal habitats. We can help by using renewable energy like solar and wind power, driving less, and planting trees. It's important for everyone to work together to reduce our impact on the environment.
|
||||
"""
|
||||
|
||||
return council.run(task=task)
|
||||
|
||||
|
||||
def evaluate_with_conversation_output():
|
||||
"""
|
||||
Evaluate a response and return the full conversation history.
|
||||
"""
|
||||
council = CouncilAsAJudge(
|
||||
name="Conversation Council",
|
||||
model_name="gpt-4o-mini",
|
||||
output_type="conversation",
|
||||
max_workers=3,
|
||||
)
|
||||
|
||||
task = """
|
||||
Task: Provide advice on how to start a small business.
|
||||
|
||||
Response: Starting a small business requires careful planning and preparation. First, identify a market need and develop a unique value proposition. Conduct thorough market research to understand your competition and target audience. Create a detailed business plan that includes financial projections, marketing strategies, and operational procedures. Secure funding through savings, loans, or investors. Choose the right legal structure (sole proprietorship, LLC, corporation) and register your business with the appropriate authorities. Set up essential systems like accounting, inventory management, and customer relationship management. Build a strong online presence through a website and social media. Network with other entrepreneurs and join local business groups. Start small and scale gradually based on customer feedback and market demand. Remember that success takes time, persistence, and the ability to adapt to changing circumstances.
|
||||
"""
|
||||
|
||||
return council.run(task=task)
|
||||
|
||||
|
||||
def evaluate_with_minimal_workers():
|
||||
"""
|
||||
Evaluate a response using minimal worker threads for resource-constrained environments.
|
||||
"""
|
||||
council = CouncilAsAJudge(
|
||||
name="Minimal Workers Council",
|
||||
model_name="gpt-4o-mini",
|
||||
output_type="all",
|
||||
max_workers=1,
|
||||
random_model_name=False,
|
||||
)
|
||||
|
||||
task = """
|
||||
Task: Explain the benefits of regular exercise.
|
||||
|
||||
Response: Regular exercise offers numerous physical and mental health benefits. Physically, it strengthens muscles and bones, improves cardiovascular health, and helps maintain a healthy weight. Exercise boosts energy levels and improves sleep quality. It also enhances immune function, reducing the risk of chronic diseases like heart disease, diabetes, and certain cancers. Mentally, exercise releases endorphins that reduce stress and anxiety while improving mood and cognitive function. It can help with depression and boost self-confidence. Regular physical activity also promotes better posture, flexibility, and balance, reducing the risk of falls and injuries. Additionally, exercise provides social benefits when done with others, fostering connections and accountability. Even moderate activities like walking, swimming, or cycling for 30 minutes most days can provide significant health improvements.
|
||||
"""
|
||||
|
||||
return council.run(task=task)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function demonstrating different CouncilAsAJudge configurations.
|
||||
"""
|
||||
configurations = [
|
||||
("Final Output Only", evaluate_with_final_output),
|
||||
("Full Conversation", evaluate_with_conversation_output),
|
||||
("Minimal Workers", evaluate_with_minimal_workers),
|
||||
]
|
||||
|
||||
results = {}
|
||||
|
||||
for config_name, evaluation_func in configurations:
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Configuration: {config_name}")
|
||||
print(f"{'='*60}")
|
||||
|
||||
try:
|
||||
result = evaluation_func()
|
||||
results[config_name] = result
|
||||
print(f"✅ {config_name} evaluation completed!")
|
||||
|
||||
# Show a preview of the result
|
||||
if isinstance(result, str):
|
||||
preview = (
|
||||
result[:200] + "..."
|
||||
if len(result) > 200
|
||||
else result
|
||||
)
|
||||
print(f"Preview: {preview}")
|
||||
else:
|
||||
print(f"Result type: {type(result)}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ {config_name} evaluation failed: {str(e)}")
|
||||
results[config_name] = None
|
||||
|
||||
return results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run all configuration examples
|
||||
all_results = main()
|
||||
|
||||
# Display final summary
|
||||
print(f"\n{'='*60}")
|
||||
print("CONFIGURATION SUMMARY")
|
||||
print(f"{'='*60}")
|
||||
|
||||
successful_configs = sum(
|
||||
1 for result in all_results.values() if result is not None
|
||||
)
|
||||
total_configs = len(all_results)
|
||||
|
||||
print(
|
||||
f"Successful evaluations: {successful_configs}/{total_configs}"
|
||||
)
|
||||
|
||||
for config_name, result in all_results.items():
|
||||
status = "✅ Success" if result else "❌ Failed"
|
||||
print(f"{config_name}: {status}")
|
@ -0,0 +1,44 @@
|
||||
"""
|
||||
Simple example demonstrating CouncilAsAJudge usage.
|
||||
|
||||
This example shows how to use the CouncilAsAJudge to evaluate a task response
|
||||
across multiple dimensions including accuracy, helpfulness, harmlessness,
|
||||
coherence, conciseness, and instruction adherence.
|
||||
"""
|
||||
|
||||
from swarms.structs.council_judge import CouncilAsAJudge
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function demonstrating CouncilAsAJudge usage.
|
||||
"""
|
||||
# Initialize the council judge
|
||||
council = CouncilAsAJudge(
|
||||
name="Quality Evaluation Council",
|
||||
description="Evaluates response quality across multiple dimensions",
|
||||
model_name="gpt-4o-mini",
|
||||
max_workers=4,
|
||||
)
|
||||
|
||||
# Example task with a response to evaluate
|
||||
task_with_response = """
|
||||
Task: Explain the concept of machine learning to a beginner.
|
||||
|
||||
Response: Machine learning is a subset of artificial intelligence that enables computers to learn and improve from experience without being explicitly programmed. It works by analyzing large amounts of data to identify patterns and make predictions or decisions. There are three main types: supervised learning (using labeled data), unsupervised learning (finding hidden patterns), and reinforcement learning (learning through trial and error). Machine learning is used in various applications like recommendation systems, image recognition, and natural language processing.
|
||||
"""
|
||||
|
||||
# Run the evaluation
|
||||
result = council.run(task=task_with_response)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run the example
|
||||
evaluation_result = main()
|
||||
|
||||
# Display the result
|
||||
print("Council Evaluation Complete!")
|
||||
print("=" * 50)
|
||||
print(evaluation_result)
|
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 36 KiB |
@ -1,16 +1,32 @@
|
||||
from swarms.structs.heavy_swarm import HeavySwarm
|
||||
from swarms import HeavySwarm
|
||||
|
||||
|
||||
swarm = HeavySwarm(
|
||||
worker_model_name="claude-3-5-sonnet-20240620",
|
||||
show_dashboard=True,
|
||||
question_agent_model_name="gpt-4.1",
|
||||
loops_per_agent=1,
|
||||
)
|
||||
def main():
|
||||
"""
|
||||
Run a HeavySwarm query to find the best 3 gold ETFs.
|
||||
|
||||
This function initializes a HeavySwarm instance and queries it to provide
|
||||
the top 3 gold exchange-traded funds (ETFs), requesting clear, structured results.
|
||||
"""
|
||||
swarm = HeavySwarm(
|
||||
name="Gold ETF Research Team",
|
||||
description="A team of agents that research the best gold ETFs",
|
||||
worker_model_name="claude-sonnet-4-latest",
|
||||
show_dashboard=True,
|
||||
question_agent_model_name="gpt-4.1",
|
||||
loops_per_agent=1,
|
||||
)
|
||||
|
||||
out = swarm.run(
|
||||
"Provide 3 publicly traded biotech companies that are currently trading below their cash value. For each company identified, provide available data or projections for the next 6 months, including any relevant financial metrics, upcoming catalysts, or events that could impact valuation. Present your findings in a clear, structured format. Be very specific and provide their ticker symbol, name, and the current price, cash value, and the percentage difference between the two."
|
||||
)
|
||||
prompt = (
|
||||
"Find the best 3 gold ETFs. For each ETF, provide the ticker symbol, "
|
||||
"full name, current price, expense ratio, assets under management, and "
|
||||
"a brief explanation of why it is considered among the best. Present the information "
|
||||
"in a clear, structured format suitable for investors."
|
||||
)
|
||||
|
||||
print(out)
|
||||
out = swarm.run(prompt)
|
||||
print(out)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@ -0,0 +1,34 @@
|
||||
from swarms import HeavySwarm
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Run a HeavySwarm query to find the best and most promising treatments for diabetes.
|
||||
|
||||
This function initializes a HeavySwarm instance and queries it to provide
|
||||
the top current and theoretical treatments for diabetes, requesting clear,
|
||||
structured, and evidence-based results suitable for medical research or clinical review.
|
||||
"""
|
||||
swarm = HeavySwarm(
|
||||
name="Diabetes Treatment Research Team",
|
||||
description="A team of agents that research the best and most promising treatments for diabetes, including theoretical approaches.",
|
||||
worker_model_name="claude-sonnet-4-20250514",
|
||||
show_dashboard=True,
|
||||
question_agent_model_name="gpt-4.1",
|
||||
loops_per_agent=1,
|
||||
)
|
||||
|
||||
prompt = (
|
||||
"Identify the best and most promising treatments for diabetes, including both current standard therapies and theoretical or experimental approaches. "
|
||||
"For each treatment, provide: the treatment name, type (e.g., medication, lifestyle intervention, device, gene therapy, etc.), "
|
||||
"mechanism of action, current stage of research or approval status, key clinical evidence or rationale, "
|
||||
"potential benefits and risks, and a brief summary of why it is considered promising. "
|
||||
"Present the information in a clear, structured format suitable for medical professionals or researchers."
|
||||
)
|
||||
|
||||
out = swarm.run(prompt)
|
||||
print(out)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,70 @@
|
||||
"""
|
||||
Debug script for the Arasaka Dashboard to test agent output display.
|
||||
"""
|
||||
|
||||
from swarms.structs.hiearchical_swarm import HierarchicalSwarm
|
||||
from swarms.structs.agent import Agent
|
||||
|
||||
|
||||
def debug_dashboard():
|
||||
"""Debug the dashboard functionality."""
|
||||
|
||||
print("🔍 Starting dashboard debug...")
|
||||
|
||||
# Create simple agents with clear names
|
||||
agent1 = Agent(
|
||||
agent_name="Research-Agent",
|
||||
agent_description="A research agent for testing",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
agent_name="Analysis-Agent",
|
||||
agent_description="An analysis agent for testing",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
print(
|
||||
f"✅ Created agents: {agent1.agent_name}, {agent2.agent_name}"
|
||||
)
|
||||
|
||||
# Create swarm with dashboard
|
||||
swarm = HierarchicalSwarm(
|
||||
name="Debug Swarm",
|
||||
description="A test swarm for debugging dashboard functionality",
|
||||
agents=[agent1, agent2],
|
||||
max_loops=1,
|
||||
interactive=True,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
print("✅ Created swarm with dashboard")
|
||||
print("📊 Dashboard should now show agents in PENDING status")
|
||||
|
||||
# Wait a moment to see the initial dashboard
|
||||
import time
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
print("\n🚀 Starting swarm execution...")
|
||||
|
||||
# Run with a simple task
|
||||
result = swarm.run(
|
||||
task="Create a brief summary of machine learning"
|
||||
)
|
||||
|
||||
print("\n✅ Debug completed!")
|
||||
print("📋 Final result preview:")
|
||||
print(
|
||||
str(result)[:300] + "..."
|
||||
if len(str(result)) > 300
|
||||
else str(result)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
debug_dashboard()
|
@ -0,0 +1,71 @@
|
||||
"""
|
||||
Hierarchical Swarm with Arasaka Dashboard Example
|
||||
|
||||
This example demonstrates the new interactive dashboard functionality for the
|
||||
hierarchical swarm, featuring a futuristic Arasaka Corporation-style interface
|
||||
with red and black color scheme.
|
||||
"""
|
||||
|
||||
from swarms.structs.hiearchical_swarm import HierarchicalSwarm
|
||||
from swarms.structs.agent import Agent
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Demonstrate the hierarchical swarm with interactive dashboard.
|
||||
"""
|
||||
print("🚀 Initializing Swarms Corporation Hierarchical Swarm...")
|
||||
|
||||
# Create specialized agents
|
||||
research_agent = Agent(
|
||||
agent_name="Research-Analyst",
|
||||
agent_description="Specialized in comprehensive research and data gathering",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
analysis_agent = Agent(
|
||||
agent_name="Data-Analyst",
|
||||
agent_description="Expert in data analysis and pattern recognition",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
strategy_agent = Agent(
|
||||
agent_name="Strategy-Consultant",
|
||||
agent_description="Specialized in strategic planning and recommendations",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
# Create hierarchical swarm with interactive dashboard
|
||||
swarm = HierarchicalSwarm(
|
||||
name="Swarms Corporation Operations",
|
||||
description="Enterprise-grade hierarchical swarm for complex task execution",
|
||||
agents=[research_agent, analysis_agent, strategy_agent],
|
||||
max_loops=2,
|
||||
interactive=True, # Enable the Arasaka dashboard
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
print("\n🎯 Swarm initialized successfully!")
|
||||
print(
|
||||
"📊 Interactive dashboard will be displayed during execution."
|
||||
)
|
||||
print(
|
||||
"💡 The swarm will prompt you for a task when you call swarm.run()"
|
||||
)
|
||||
|
||||
# Run the swarm (task will be prompted interactively)
|
||||
result = swarm.run()
|
||||
|
||||
print("\n✅ Swarm execution completed!")
|
||||
print("📋 Final result:")
|
||||
print(result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue