diff --git a/docs/swarms/structs/cron_job.md b/docs/swarms/structs/cron_job.md
index 2d06c3af..c2ab0c24 100644
--- a/docs/swarms/structs/cron_job.md
+++ b/docs/swarms/structs/cron_job.md
@@ -122,6 +122,363 @@ cron_job = CronJob(
cron_job.run("Perform analysis")
```
+
+### Cron Jobs With Multi-Agent Structures
+
+You can also run Cron Jobs with multi-agent structures like `SequentialWorkflow`, `ConcurrentWorkflow`, `HiearchicalSwarm`, and other methods.
+
+- Just initialize the class as the agent parameter in the `CronJob(agent=swarm)`
+
+- Input your arguments into the `.run(task: str)` method
+
+
+```python
+"""
+Cryptocurrency Concurrent Multi-Agent Cron Job Example
+
+This example demonstrates how to use ConcurrentWorkflow with CronJob to create
+a powerful cryptocurrency tracking system. Each specialized agent analyzes a
+specific cryptocurrency concurrently every minute.
+
+Features:
+- ConcurrentWorkflow for parallel agent execution
+- CronJob scheduling for automated runs every 1 minute
+- Each agent specializes in analyzing one specific cryptocurrency
+- Real-time data fetching from CoinGecko API
+- Concurrent analysis of multiple cryptocurrencies
+- Structured output with professional formatting
+
+Architecture:
+CronJob -> ConcurrentWorkflow -> [Bitcoin Agent, Ethereum Agent, Solana Agent, etc.] -> Parallel Analysis
+"""
+
+from typing import List
+from loguru import logger
+
+from swarms import Agent, CronJob, ConcurrentWorkflow
+from swarms_tools import coin_gecko_coin_api
+
+
+def create_crypto_specific_agents() -> List[Agent]:
+ """
+ Creates agents that each specialize in analyzing a specific cryptocurrency.
+
+ Returns:
+ List[Agent]: List of cryptocurrency-specific Agent instances
+ """
+
+ # Bitcoin Specialist Agent
+ bitcoin_agent = Agent(
+ agent_name="Bitcoin-Analyst",
+ agent_description="Expert analyst specializing exclusively in Bitcoin (BTC) analysis and market dynamics",
+ system_prompt="""You are a Bitcoin specialist and expert analyst. Your expertise includes:
+
+BITCOIN SPECIALIZATION:
+- Bitcoin's unique position as digital gold
+- Bitcoin halving cycles and their market impact
+- Bitcoin mining economics and hash rate analysis
+- Lightning Network and Layer 2 developments
+- Bitcoin adoption by institutions and countries
+- Bitcoin's correlation with traditional markets
+- Bitcoin technical analysis and on-chain metrics
+- Bitcoin's role as a store of value and hedge against inflation
+
+ANALYSIS FOCUS:
+- Analyze ONLY Bitcoin data from the provided dataset
+- Focus on Bitcoin-specific metrics and trends
+- Consider Bitcoin's unique market dynamics
+- Evaluate Bitcoin's dominance and market leadership
+- Assess institutional adoption trends
+- Monitor on-chain activity and network health
+
+DELIVERABLES:
+- Bitcoin-specific analysis and insights
+- Price action assessment and predictions
+- Market dominance analysis
+- Institutional adoption impact
+- Technical and fundamental outlook
+- Risk factors specific to Bitcoin
+
+Extract Bitcoin data from the provided dataset and provide comprehensive Bitcoin-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ # Ethereum Specialist Agent
+ ethereum_agent = Agent(
+ agent_name="Ethereum-Analyst",
+ agent_description="Expert analyst specializing exclusively in Ethereum (ETH) analysis and ecosystem development",
+ system_prompt="""You are an Ethereum specialist and expert analyst. Your expertise includes:
+
+ETHEREUM SPECIALIZATION:
+- Ethereum's smart contract platform and DeFi ecosystem
+- Ethereum 2.0 transition and proof-of-stake mechanics
+- Gas fees, network usage, and scalability solutions
+- Layer 2 solutions (Arbitrum, Optimism, Polygon)
+- DeFi protocols and TVL (Total Value Locked) analysis
+- NFT markets and Ethereum's role in digital assets
+- Developer activity and ecosystem growth
+- EIP proposals and network upgrades
+
+ANALYSIS FOCUS:
+- Analyze ONLY Ethereum data from the provided dataset
+- Focus on Ethereum's platform utility and network effects
+- Evaluate DeFi ecosystem health and growth
+- Assess Layer 2 adoption and scalability solutions
+- Monitor network usage and gas fee trends
+- Consider Ethereum's competitive position vs other smart contract platforms
+
+DELIVERABLES:
+- Ethereum-specific analysis and insights
+- Platform utility and adoption metrics
+- DeFi ecosystem impact assessment
+- Network health and scalability evaluation
+- Competitive positioning analysis
+- Technical and fundamental outlook for ETH
+
+Extract Ethereum data from the provided dataset and provide comprehensive Ethereum-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ # Solana Specialist Agent
+ solana_agent = Agent(
+ agent_name="Solana-Analyst",
+ agent_description="Expert analyst specializing exclusively in Solana (SOL) analysis and ecosystem development",
+ system_prompt="""You are a Solana specialist and expert analyst. Your expertise includes:
+
+SOLANA SPECIALIZATION:
+- Solana's high-performance blockchain architecture
+- Proof-of-History consensus mechanism
+- Solana's DeFi ecosystem and DEX platforms (Serum, Raydium)
+- NFT marketplaces and creator economy on Solana
+- Network outages and reliability concerns
+- Developer ecosystem and Rust programming adoption
+- Validator economics and network decentralization
+- Cross-chain bridges and interoperability
+
+ANALYSIS FOCUS:
+- Analyze ONLY Solana data from the provided dataset
+- Focus on Solana's performance and scalability advantages
+- Evaluate network stability and uptime improvements
+- Assess ecosystem growth and developer adoption
+- Monitor DeFi and NFT activity on Solana
+- Consider Solana's competitive position vs Ethereum
+
+DELIVERABLES:
+- Solana-specific analysis and insights
+- Network performance and reliability assessment
+- Ecosystem growth and adoption metrics
+- DeFi and NFT market analysis
+- Competitive advantages and challenges
+- Technical and fundamental outlook for SOL
+
+Extract Solana data from the provided dataset and provide comprehensive Solana-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ # Cardano Specialist Agent
+ cardano_agent = Agent(
+ agent_name="Cardano-Analyst",
+ agent_description="Expert analyst specializing exclusively in Cardano (ADA) analysis and research-driven development",
+ system_prompt="""You are a Cardano specialist and expert analyst. Your expertise includes:
+
+CARDANO SPECIALIZATION:
+- Cardano's research-driven development approach
+- Ouroboros proof-of-stake consensus protocol
+- Smart contract capabilities via Plutus and Marlowe
+- Cardano's three-layer architecture (settlement, computation, control)
+- Academic partnerships and peer-reviewed research
+- Cardano ecosystem projects and DApp development
+- Native tokens and Cardano's UTXO model
+- Sustainability and treasury funding mechanisms
+
+ANALYSIS FOCUS:
+- Analyze ONLY Cardano data from the provided dataset
+- Focus on Cardano's methodical development approach
+- Evaluate smart contract adoption and ecosystem growth
+- Assess academic partnerships and research contributions
+- Monitor native token ecosystem development
+- Consider Cardano's long-term roadmap and milestones
+
+DELIVERABLES:
+- Cardano-specific analysis and insights
+- Development progress and milestone achievements
+- Smart contract ecosystem evaluation
+- Academic research impact assessment
+- Native token and DApp adoption metrics
+- Technical and fundamental outlook for ADA
+
+Extract Cardano data from the provided dataset and provide comprehensive Cardano-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ # Binance Coin Specialist Agent
+ bnb_agent = Agent(
+ agent_name="BNB-Analyst",
+ agent_description="Expert analyst specializing exclusively in BNB analysis and Binance ecosystem dynamics",
+ system_prompt="""You are a BNB specialist and expert analyst. Your expertise includes:
+
+BNB SPECIALIZATION:
+- BNB's utility within the Binance ecosystem
+- Binance Smart Chain (BSC) development and adoption
+- BNB token burns and deflationary mechanics
+- Binance exchange volume and market leadership
+- BSC DeFi ecosystem and yield farming
+- Cross-chain bridges and multi-chain strategies
+- Regulatory challenges facing Binance globally
+- BNB's role in transaction fee discounts and platform benefits
+
+ANALYSIS FOCUS:
+- Analyze ONLY BNB data from the provided dataset
+- Focus on BNB's utility value and exchange benefits
+- Evaluate BSC ecosystem growth and competition with Ethereum
+- Assess token burn impact on supply and price
+- Monitor Binance platform developments and regulations
+- Consider BNB's centralized vs decentralized aspects
+
+DELIVERABLES:
+- BNB-specific analysis and insights
+- Utility value and ecosystem benefits assessment
+- BSC adoption and DeFi growth evaluation
+- Token economics and burn mechanism impact
+- Regulatory risk and compliance analysis
+- Technical and fundamental outlook for BNB
+
+Extract BNB data from the provided dataset and provide comprehensive BNB-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ # XRP Specialist Agent
+ xrp_agent = Agent(
+ agent_name="XRP-Analyst",
+ agent_description="Expert analyst specializing exclusively in XRP analysis and cross-border payment solutions",
+ system_prompt="""You are an XRP specialist and expert analyst. Your expertise includes:
+
+XRP SPECIALIZATION:
+- XRP's role in cross-border payments and remittances
+- RippleNet adoption by financial institutions
+- Central Bank Digital Currency (CBDC) partnerships
+- Regulatory landscape and SEC lawsuit implications
+- XRP Ledger's consensus mechanism and energy efficiency
+- On-Demand Liquidity (ODL) usage and growth
+- Competition with SWIFT and traditional payment rails
+- Ripple's partnerships with banks and payment providers
+
+ANALYSIS FOCUS:
+- Analyze ONLY XRP data from the provided dataset
+- Focus on XRP's utility in payments and remittances
+- Evaluate RippleNet adoption and institutional partnerships
+- Assess regulatory developments and legal clarity
+- Monitor ODL usage and transaction volumes
+- Consider XRP's competitive position in payments
+
+DELIVERABLES:
+- XRP-specific analysis and insights
+- Payment utility and adoption assessment
+- Regulatory landscape and legal developments
+- Institutional partnership impact evaluation
+- Cross-border payment market analysis
+- Technical and fundamental outlook for XRP
+
+Extract XRP data from the provided dataset and provide comprehensive XRP-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ return [
+ bitcoin_agent,
+ ethereum_agent,
+ solana_agent,
+ cardano_agent,
+ bnb_agent,
+ xrp_agent,
+ ]
+
+
+def create_crypto_workflow() -> ConcurrentWorkflow:
+ """
+ Creates a ConcurrentWorkflow with cryptocurrency-specific analysis agents.
+
+ Returns:
+ ConcurrentWorkflow: Configured workflow for crypto analysis
+ """
+ agents = create_crypto_specific_agents()
+
+ workflow = ConcurrentWorkflow(
+ name="Crypto-Specific-Analysis-Workflow",
+ description="Concurrent execution of cryptocurrency-specific analysis agents",
+ agents=agents,
+ max_loops=1,
+ )
+
+ return workflow
+
+
+def create_crypto_cron_job() -> CronJob:
+ """
+ Creates a CronJob that runs cryptocurrency-specific analysis every minute using ConcurrentWorkflow.
+
+ Returns:
+ CronJob: Configured cron job for automated crypto analysis
+ """
+ # Create the concurrent workflow
+ workflow = create_crypto_workflow()
+
+ # Create the cron job
+ cron_job = CronJob(
+ agent=workflow, # Use the workflow as the agent
+ interval="5seconds", # Run every 1 minute
+ )
+
+ return cron_job
+
+
+def main():
+ """
+ Main function to run the cryptocurrency-specific concurrent analysis cron job.
+ """
+ cron_job = create_crypto_cron_job()
+
+ prompt = """
+
+ Conduct a comprehensive analysis of your assigned cryptocurrency.
+
+ """
+
+ # Start the cron job
+ logger.info("π Starting automated analysis loop...")
+ logger.info("β° Press Ctrl+C to stop the cron job")
+
+ output = cron_job.run(task=prompt)
+ print(output)
+
+
+if __name__ == "__main__":
+ main()
+```
+
## Conclusion
The CronJob class provides a powerful way to schedule and automate tasks using Swarms Agents or custom functions. Key benefits include:
diff --git a/examples/cron_job_examples/crypto_concurrent_cron_example.py b/examples/cron_job_examples/crypto_concurrent_cron_example.py
new file mode 100644
index 00000000..e1b837ce
--- /dev/null
+++ b/examples/cron_job_examples/crypto_concurrent_cron_example.py
@@ -0,0 +1,349 @@
+"""
+Cryptocurrency Concurrent Multi-Agent Cron Job Example
+
+This example demonstrates how to use ConcurrentWorkflow with CronJob to create
+a powerful cryptocurrency tracking system. Each specialized agent analyzes a
+specific cryptocurrency concurrently every minute.
+
+Features:
+- ConcurrentWorkflow for parallel agent execution
+- CronJob scheduling for automated runs every 1 minute
+- Each agent specializes in analyzing one specific cryptocurrency
+- Real-time data fetching from CoinGecko API
+- Concurrent analysis of multiple cryptocurrencies
+- Structured output with professional formatting
+
+Architecture:
+CronJob -> ConcurrentWorkflow -> [Bitcoin Agent, Ethereum Agent, Solana Agent, etc.] -> Parallel Analysis
+"""
+
+from typing import List
+from loguru import logger
+
+from swarms import Agent, CronJob, ConcurrentWorkflow
+from swarms_tools import coin_gecko_coin_api
+
+
+def create_crypto_specific_agents() -> List[Agent]:
+ """
+ Creates agents that each specialize in analyzing a specific cryptocurrency.
+
+ Returns:
+ List[Agent]: List of cryptocurrency-specific Agent instances
+ """
+
+ # Bitcoin Specialist Agent
+ bitcoin_agent = Agent(
+ agent_name="Bitcoin-Analyst",
+ agent_description="Expert analyst specializing exclusively in Bitcoin (BTC) analysis and market dynamics",
+ system_prompt="""You are a Bitcoin specialist and expert analyst. Your expertise includes:
+
+BITCOIN SPECIALIZATION:
+- Bitcoin's unique position as digital gold
+- Bitcoin halving cycles and their market impact
+- Bitcoin mining economics and hash rate analysis
+- Lightning Network and Layer 2 developments
+- Bitcoin adoption by institutions and countries
+- Bitcoin's correlation with traditional markets
+- Bitcoin technical analysis and on-chain metrics
+- Bitcoin's role as a store of value and hedge against inflation
+
+ANALYSIS FOCUS:
+- Analyze ONLY Bitcoin data from the provided dataset
+- Focus on Bitcoin-specific metrics and trends
+- Consider Bitcoin's unique market dynamics
+- Evaluate Bitcoin's dominance and market leadership
+- Assess institutional adoption trends
+- Monitor on-chain activity and network health
+
+DELIVERABLES:
+- Bitcoin-specific analysis and insights
+- Price action assessment and predictions
+- Market dominance analysis
+- Institutional adoption impact
+- Technical and fundamental outlook
+- Risk factors specific to Bitcoin
+
+Extract Bitcoin data from the provided dataset and provide comprehensive Bitcoin-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ # Ethereum Specialist Agent
+ ethereum_agent = Agent(
+ agent_name="Ethereum-Analyst",
+ agent_description="Expert analyst specializing exclusively in Ethereum (ETH) analysis and ecosystem development",
+ system_prompt="""You are an Ethereum specialist and expert analyst. Your expertise includes:
+
+ETHEREUM SPECIALIZATION:
+- Ethereum's smart contract platform and DeFi ecosystem
+- Ethereum 2.0 transition and proof-of-stake mechanics
+- Gas fees, network usage, and scalability solutions
+- Layer 2 solutions (Arbitrum, Optimism, Polygon)
+- DeFi protocols and TVL (Total Value Locked) analysis
+- NFT markets and Ethereum's role in digital assets
+- Developer activity and ecosystem growth
+- EIP proposals and network upgrades
+
+ANALYSIS FOCUS:
+- Analyze ONLY Ethereum data from the provided dataset
+- Focus on Ethereum's platform utility and network effects
+- Evaluate DeFi ecosystem health and growth
+- Assess Layer 2 adoption and scalability solutions
+- Monitor network usage and gas fee trends
+- Consider Ethereum's competitive position vs other smart contract platforms
+
+DELIVERABLES:
+- Ethereum-specific analysis and insights
+- Platform utility and adoption metrics
+- DeFi ecosystem impact assessment
+- Network health and scalability evaluation
+- Competitive positioning analysis
+- Technical and fundamental outlook for ETH
+
+Extract Ethereum data from the provided dataset and provide comprehensive Ethereum-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ # Solana Specialist Agent
+ solana_agent = Agent(
+ agent_name="Solana-Analyst",
+ agent_description="Expert analyst specializing exclusively in Solana (SOL) analysis and ecosystem development",
+ system_prompt="""You are a Solana specialist and expert analyst. Your expertise includes:
+
+SOLANA SPECIALIZATION:
+- Solana's high-performance blockchain architecture
+- Proof-of-History consensus mechanism
+- Solana's DeFi ecosystem and DEX platforms (Serum, Raydium)
+- NFT marketplaces and creator economy on Solana
+- Network outages and reliability concerns
+- Developer ecosystem and Rust programming adoption
+- Validator economics and network decentralization
+- Cross-chain bridges and interoperability
+
+ANALYSIS FOCUS:
+- Analyze ONLY Solana data from the provided dataset
+- Focus on Solana's performance and scalability advantages
+- Evaluate network stability and uptime improvements
+- Assess ecosystem growth and developer adoption
+- Monitor DeFi and NFT activity on Solana
+- Consider Solana's competitive position vs Ethereum
+
+DELIVERABLES:
+- Solana-specific analysis and insights
+- Network performance and reliability assessment
+- Ecosystem growth and adoption metrics
+- DeFi and NFT market analysis
+- Competitive advantages and challenges
+- Technical and fundamental outlook for SOL
+
+Extract Solana data from the provided dataset and provide comprehensive Solana-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ # Cardano Specialist Agent
+ cardano_agent = Agent(
+ agent_name="Cardano-Analyst",
+ agent_description="Expert analyst specializing exclusively in Cardano (ADA) analysis and research-driven development",
+ system_prompt="""You are a Cardano specialist and expert analyst. Your expertise includes:
+
+CARDANO SPECIALIZATION:
+- Cardano's research-driven development approach
+- Ouroboros proof-of-stake consensus protocol
+- Smart contract capabilities via Plutus and Marlowe
+- Cardano's three-layer architecture (settlement, computation, control)
+- Academic partnerships and peer-reviewed research
+- Cardano ecosystem projects and DApp development
+- Native tokens and Cardano's UTXO model
+- Sustainability and treasury funding mechanisms
+
+ANALYSIS FOCUS:
+- Analyze ONLY Cardano data from the provided dataset
+- Focus on Cardano's methodical development approach
+- Evaluate smart contract adoption and ecosystem growth
+- Assess academic partnerships and research contributions
+- Monitor native token ecosystem development
+- Consider Cardano's long-term roadmap and milestones
+
+DELIVERABLES:
+- Cardano-specific analysis and insights
+- Development progress and milestone achievements
+- Smart contract ecosystem evaluation
+- Academic research impact assessment
+- Native token and DApp adoption metrics
+- Technical and fundamental outlook for ADA
+
+Extract Cardano data from the provided dataset and provide comprehensive Cardano-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ # Binance Coin Specialist Agent
+ bnb_agent = Agent(
+ agent_name="BNB-Analyst",
+ agent_description="Expert analyst specializing exclusively in BNB analysis and Binance ecosystem dynamics",
+ system_prompt="""You are a BNB specialist and expert analyst. Your expertise includes:
+
+BNB SPECIALIZATION:
+- BNB's utility within the Binance ecosystem
+- Binance Smart Chain (BSC) development and adoption
+- BNB token burns and deflationary mechanics
+- Binance exchange volume and market leadership
+- BSC DeFi ecosystem and yield farming
+- Cross-chain bridges and multi-chain strategies
+- Regulatory challenges facing Binance globally
+- BNB's role in transaction fee discounts and platform benefits
+
+ANALYSIS FOCUS:
+- Analyze ONLY BNB data from the provided dataset
+- Focus on BNB's utility value and exchange benefits
+- Evaluate BSC ecosystem growth and competition with Ethereum
+- Assess token burn impact on supply and price
+- Monitor Binance platform developments and regulations
+- Consider BNB's centralized vs decentralized aspects
+
+DELIVERABLES:
+- BNB-specific analysis and insights
+- Utility value and ecosystem benefits assessment
+- BSC adoption and DeFi growth evaluation
+- Token economics and burn mechanism impact
+- Regulatory risk and compliance analysis
+- Technical and fundamental outlook for BNB
+
+Extract BNB data from the provided dataset and provide comprehensive BNB-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ # XRP Specialist Agent
+ xrp_agent = Agent(
+ agent_name="XRP-Analyst",
+ agent_description="Expert analyst specializing exclusively in XRP analysis and cross-border payment solutions",
+ system_prompt="""You are an XRP specialist and expert analyst. Your expertise includes:
+
+XRP SPECIALIZATION:
+- XRP's role in cross-border payments and remittances
+- RippleNet adoption by financial institutions
+- Central Bank Digital Currency (CBDC) partnerships
+- Regulatory landscape and SEC lawsuit implications
+- XRP Ledger's consensus mechanism and energy efficiency
+- On-Demand Liquidity (ODL) usage and growth
+- Competition with SWIFT and traditional payment rails
+- Ripple's partnerships with banks and payment providers
+
+ANALYSIS FOCUS:
+- Analyze ONLY XRP data from the provided dataset
+- Focus on XRP's utility in payments and remittances
+- Evaluate RippleNet adoption and institutional partnerships
+- Assess regulatory developments and legal clarity
+- Monitor ODL usage and transaction volumes
+- Consider XRP's competitive position in payments
+
+DELIVERABLES:
+- XRP-specific analysis and insights
+- Payment utility and adoption assessment
+- Regulatory landscape and legal developments
+- Institutional partnership impact evaluation
+- Cross-border payment market analysis
+- Technical and fundamental outlook for XRP
+
+Extract XRP data from the provided dataset and provide comprehensive XRP-focused analysis.""",
+ model_name="groq/moonshotai/kimi-k2-instruct",
+ max_loops=1,
+ dynamic_temperature_enabled=True,
+ streaming_on=False,
+ tools=[coin_gecko_coin_api],
+ )
+
+ return [
+ bitcoin_agent,
+ ethereum_agent,
+ solana_agent,
+ cardano_agent,
+ bnb_agent,
+ xrp_agent,
+ ]
+
+
+def create_crypto_workflow() -> ConcurrentWorkflow:
+ """
+ Creates a ConcurrentWorkflow with cryptocurrency-specific analysis agents.
+
+ Returns:
+ ConcurrentWorkflow: Configured workflow for crypto analysis
+ """
+ agents = create_crypto_specific_agents()
+
+ workflow = ConcurrentWorkflow(
+ name="Crypto-Specific-Analysis-Workflow",
+ description="Concurrent execution of cryptocurrency-specific analysis agents",
+ agents=agents,
+ max_loops=1,
+ )
+
+ return workflow
+
+
+def create_crypto_cron_job() -> CronJob:
+ """
+ Creates a CronJob that runs cryptocurrency-specific analysis every minute using ConcurrentWorkflow.
+
+ Returns:
+ CronJob: Configured cron job for automated crypto analysis
+ """
+ # Create the concurrent workflow
+ workflow = create_crypto_workflow()
+
+ # Create the cron job
+ cron_job = CronJob(
+ agent=workflow, # Use the workflow as the agent
+ interval="5seconds", # Run every 1 minute
+ )
+
+ return cron_job
+
+
+def main():
+ """
+ Main function to run the cryptocurrency-specific concurrent analysis cron job.
+ """
+ cron_job = create_crypto_cron_job()
+
+ prompt = (
+ "You are a world-class institutional crypto analyst at a top-tier asset management firm (e.g., BlackRock).\n"
+ "Conduct a thorough, data-driven, and professional analysis of your assigned cryptocurrency, including:\n"
+ "- Current price, market cap, and recent performance trends\n"
+ "- Key technical and fundamental indicators\n"
+ "- Major news, regulatory, or macroeconomic events impacting the asset\n"
+ "- On-chain activity and notable whale or institutional movements\n"
+ "- Short-term and long-term outlook with clear, actionable insights\n"
+ "Present your findings in a concise, well-structured report suitable for executive decision-makers."
+ )
+
+ # Start the cron job
+ logger.info("π Starting automated analysis loop...")
+ logger.info("β° Press Ctrl+C to stop the cron job")
+
+ output = cron_job.run(task=prompt)
+ print(output)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/cron_job_examples/simple_concurrent_crypto_cron.py b/examples/cron_job_examples/simple_concurrent_crypto_cron.py
new file mode 100644
index 00000000..670bfd26
--- /dev/null
+++ b/examples/cron_job_examples/simple_concurrent_crypto_cron.py
@@ -0,0 +1,157 @@
+"""
+Simple Cryptocurrency Concurrent CronJob Example
+
+This is a simplified version showcasing the core concept of combining:
+- CronJob (for scheduling)
+- ConcurrentWorkflow (for parallel execution)
+- Each agent analyzes a specific cryptocurrency
+
+Perfect for understanding the basic pattern before diving into the full example.
+"""
+
+import json
+import requests
+from datetime import datetime
+from loguru import logger
+
+from swarms import Agent, CronJob, ConcurrentWorkflow
+
+
+def get_specific_crypto_data(coin_ids):
+ """Fetch specific crypto data from CoinGecko API."""
+ try:
+ url = "https://api.coingecko.com/api/v3/simple/price"
+ params = {
+ "ids": ",".join(coin_ids),
+ "vs_currencies": "usd",
+ "include_24hr_change": True,
+ "include_market_cap": True,
+ "include_24hr_vol": True,
+ }
+
+ response = requests.get(url, params=params, timeout=10)
+ response.raise_for_status()
+
+ data = response.json()
+ result = {
+ "timestamp": datetime.now().isoformat(),
+ "coins": data,
+ }
+
+ return json.dumps(result, indent=2)
+
+ except Exception as e:
+ logger.error(f"Error fetching crypto data: {e}")
+ return f"Error: {e}"
+
+
+def create_crypto_specific_agents():
+ """Create agents that each specialize in one cryptocurrency."""
+
+ # Bitcoin Specialist Agent
+ bitcoin_agent = Agent(
+ agent_name="Bitcoin-Analyst",
+ system_prompt="""You are a Bitcoin specialist. Analyze ONLY Bitcoin (BTC) data from the provided dataset.
+ Focus on:
+ - Bitcoin price movements and trends
+ - Market dominance and institutional adoption
+ - Bitcoin-specific market dynamics
+ - Store of value characteristics
+ Ignore all other cryptocurrencies in your analysis.""",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ print_on=False, # Important for concurrent execution
+ )
+
+ # Ethereum Specialist Agent
+ ethereum_agent = Agent(
+ agent_name="Ethereum-Analyst",
+ system_prompt="""You are an Ethereum specialist. Analyze ONLY Ethereum (ETH) data from the provided dataset.
+ Focus on:
+ - Ethereum price action and DeFi ecosystem
+ - Smart contract platform adoption
+ - Gas fees and network usage
+ - Layer 2 scaling solutions impact
+ Ignore all other cryptocurrencies in your analysis.""",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ print_on=False,
+ )
+
+ # Solana Specialist Agent
+ solana_agent = Agent(
+ agent_name="Solana-Analyst",
+ system_prompt="""You are a Solana specialist. Analyze ONLY Solana (SOL) data from the provided dataset.
+ Focus on:
+ - Solana price performance and ecosystem growth
+ - High-performance blockchain advantages
+ - DeFi and NFT activity on Solana
+ - Network reliability and uptime
+ Ignore all other cryptocurrencies in your analysis.""",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ print_on=False,
+ )
+
+ return [bitcoin_agent, ethereum_agent, solana_agent]
+
+
+def main():
+ """Main function demonstrating crypto-specific concurrent analysis with cron job."""
+ logger.info(
+ "π Starting Simple Crypto-Specific Concurrent Analysis"
+ )
+ logger.info("π° Each agent analyzes one specific cryptocurrency:")
+ logger.info(" π Bitcoin-Analyst -> BTC only")
+ logger.info(" π΅ Ethereum-Analyst -> ETH only")
+ logger.info(" π’ Solana-Analyst -> SOL only")
+
+ # Define specific cryptocurrencies to analyze
+ coin_ids = ["bitcoin", "ethereum", "solana"]
+
+ # Step 1: Create crypto-specific agents
+ agents = create_crypto_specific_agents()
+
+ # Step 2: Create ConcurrentWorkflow
+ workflow = ConcurrentWorkflow(
+ name="Simple-Crypto-Specific-Analysis",
+ agents=agents,
+ show_dashboard=True, # Shows real-time progress
+ )
+
+ # Step 3: Create CronJob with the workflow
+ cron_job = CronJob(
+ agent=workflow, # Use workflow as the agent
+ interval="60seconds", # Run every minute
+ job_id="simple-crypto-specific-cron",
+ )
+
+ # Step 4: Define the analysis task
+ task = f"""
+ Analyze the cryptocurrency data below. Each agent should focus ONLY on their assigned cryptocurrency:
+
+ - Bitcoin-Analyst: Analyze Bitcoin (BTC) data only
+ - Ethereum-Analyst: Analyze Ethereum (ETH) data only
+ - Solana-Analyst: Analyze Solana (SOL) data only
+
+ Cryptocurrency Data:
+ {get_specific_crypto_data(coin_ids)}
+
+ Each agent should:
+ 1. Extract and analyze data for YOUR ASSIGNED cryptocurrency only
+ 2. Provide brief insights from your specialty perspective
+ 3. Give a price trend assessment
+ 4. Identify key opportunities or risks
+ 5. Ignore all other cryptocurrencies
+ """
+
+ # Step 5: Start the cron job
+ logger.info("βΆοΈ Starting cron job - Press Ctrl+C to stop")
+ try:
+ cron_job.run(task=task)
+ except KeyboardInterrupt:
+ logger.info("βΉοΈ Stopped by user")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/cron_job_examples/solana_price_tracker.py b/examples/cron_job_examples/solana_price_tracker.py
index 0ae048ab..e7c9dd17 100644
--- a/examples/cron_job_examples/solana_price_tracker.py
+++ b/examples/cron_job_examples/solana_price_tracker.py
@@ -141,7 +141,7 @@ def analyze_solana_data(data: str) -> str:
formatted_data = solana_data.get("formatted_data", {})
# Extract key metrics
- current_price = price_data.get("current_price_usd")
+ price_data.get("current_price_usd")
price_change = price_data.get("price_change_24h_percent")
volume_24h = price_data.get("volume_24h_usd")
market_cap = price_data.get("market_cap_usd")
diff --git a/examples/multi_agent/board_of_directors/board_of_directors_example.py b/examples/multi_agent/board_of_directors/board_of_directors_example.py
index bc043733..2461919e 100644
--- a/examples/multi_agent/board_of_directors/board_of_directors_example.py
+++ b/examples/multi_agent/board_of_directors/board_of_directors_example.py
@@ -16,11 +16,13 @@ from typing import List
# Add the root directory to the Python path if running from examples directory
current_dir = os.path.dirname(os.path.abspath(__file__))
-if 'examples' in current_dir:
+if "examples" in current_dir:
root_dir = current_dir
- while os.path.basename(root_dir) != 'examples' and root_dir != os.path.dirname(root_dir):
+ while os.path.basename(
+ root_dir
+ ) != "examples" and root_dir != os.path.dirname(root_dir):
root_dir = os.path.dirname(root_dir)
- if os.path.basename(root_dir) == 'examples':
+ if os.path.basename(root_dir) == "examples":
root_dir = os.path.dirname(root_dir)
if root_dir not in sys.path:
sys.path.insert(0, root_dir)
@@ -35,7 +37,7 @@ from swarms.structs.agent import Agent
def create_board_members() -> List[BoardMember]:
"""Create board members with specific roles."""
-
+
chairman = Agent(
agent_name="Chairman",
agent_description="Executive Chairman with strategic vision",
@@ -43,7 +45,7 @@ def create_board_members() -> List[BoardMember]:
max_loops=1,
system_prompt="You are the Executive Chairman. Provide strategic leadership and facilitate decision-making.",
)
-
+
cto = Agent(
agent_name="CTO",
agent_description="Chief Technology Officer with technical expertise",
@@ -51,7 +53,7 @@ def create_board_members() -> List[BoardMember]:
max_loops=1,
system_prompt="You are the CTO. Provide technical leadership and evaluate technology solutions.",
)
-
+
cfo = Agent(
agent_name="CFO",
agent_description="Chief Financial Officer with financial expertise",
@@ -59,32 +61,32 @@ def create_board_members() -> List[BoardMember]:
max_loops=1,
system_prompt="You are the CFO. Provide financial analysis and ensure fiscal responsibility.",
)
-
+
return [
BoardMember(
agent=chairman,
role=BoardMemberRole.CHAIRMAN,
voting_weight=2.0,
- expertise_areas=["leadership", "strategy"]
+ expertise_areas=["leadership", "strategy"],
),
BoardMember(
agent=cto,
role=BoardMemberRole.EXECUTIVE_DIRECTOR,
voting_weight=1.5,
- expertise_areas=["technology", "innovation"]
+ expertise_areas=["technology", "innovation"],
),
BoardMember(
agent=cfo,
role=BoardMemberRole.EXECUTIVE_DIRECTOR,
voting_weight=1.5,
- expertise_areas=["finance", "risk_management"]
+ expertise_areas=["finance", "risk_management"],
),
]
def create_worker_agents() -> List[Agent]:
"""Create worker agents for the swarm."""
-
+
researcher = Agent(
agent_name="Researcher",
agent_description="Research analyst for data analysis",
@@ -92,7 +94,7 @@ def create_worker_agents() -> List[Agent]:
max_loops=1,
system_prompt="You are a Research Analyst. Conduct thorough research and provide data-driven insights.",
)
-
+
developer = Agent(
agent_name="Developer",
agent_description="Software developer for implementation",
@@ -100,7 +102,7 @@ def create_worker_agents() -> List[Agent]:
max_loops=1,
system_prompt="You are a Software Developer. Design and implement software solutions.",
)
-
+
marketer = Agent(
agent_name="Marketer",
agent_description="Marketing specialist for strategy",
@@ -108,17 +110,17 @@ def create_worker_agents() -> List[Agent]:
max_loops=1,
system_prompt="You are a Marketing Specialist. Develop marketing strategies and campaigns.",
)
-
+
return [researcher, developer, marketer]
def run_board_example() -> None:
"""Run a Board of Directors example."""
-
+
# Create board members and worker agents
board_members = create_board_members()
worker_agents = create_worker_agents()
-
+
# Create the Board of Directors swarm
board_swarm = BoardOfDirectorsSwarm(
name="Executive_Board",
@@ -128,23 +130,23 @@ def run_board_example() -> None:
verbose=True,
decision_threshold=0.6,
)
-
+
# Define task
task = """
Develop a strategy for launching a new AI-powered product in the market.
Include market research, technical planning, marketing strategy, and financial projections.
"""
-
+
# Execute the task
result = board_swarm.run(task=task)
-
+
print("Task completed successfully!")
print(f"Result: {result}")
def run_simple_example() -> None:
"""Run a simple Board of Directors example."""
-
+
# Create simple agents
analyst = Agent(
agent_name="Analyst",
@@ -152,43 +154,47 @@ def run_simple_example() -> None:
model_name="gpt-4o-mini",
max_loops=1,
)
-
+
writer = Agent(
agent_name="Writer",
agent_description="Content writer",
model_name="gpt-4o-mini",
max_loops=1,
)
-
+
# Create swarm with default settings
board_swarm = BoardOfDirectorsSwarm(
name="Simple_Board",
agents=[analyst, writer],
verbose=True,
)
-
+
# Execute simple task
- task = "Analyze current market trends and create a summary report."
+ task = (
+ "Analyze current market trends and create a summary report."
+ )
result = board_swarm.run(task=task)
-
+
print("Simple example completed!")
print(f"Result: {result}")
def main() -> None:
"""Main function to run the examples."""
-
+
if not os.getenv("OPENAI_API_KEY"):
- print("Warning: OPENAI_API_KEY not set. Example may not work.")
+ print(
+ "Warning: OPENAI_API_KEY not set. Example may not work."
+ )
return
-
+
try:
print("Running simple Board of Directors example...")
run_simple_example()
-
+
print("\nRunning comprehensive Board of Directors example...")
run_board_example()
-
+
except Exception as e:
print(f"Error: {e}")
diff --git a/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/debug_dashboard.py b/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/debug_dashboard.py
new file mode 100644
index 00000000..d3f3f389
--- /dev/null
+++ b/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/debug_dashboard.py
@@ -0,0 +1,70 @@
+"""
+Debug script for the Arasaka Dashboard to test agent output display.
+"""
+
+from swarms.structs.hiearchical_swarm import HierarchicalSwarm
+from swarms.structs.agent import Agent
+
+
+def debug_dashboard():
+ """Debug the dashboard functionality."""
+
+ print("π Starting dashboard debug...")
+
+ # Create simple agents with clear names
+ agent1 = Agent(
+ agent_name="Research-Agent",
+ agent_description="A research agent for testing",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ agent2 = Agent(
+ agent_name="Analysis-Agent",
+ agent_description="An analysis agent for testing",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ print(
+ f"β
Created agents: {agent1.agent_name}, {agent2.agent_name}"
+ )
+
+ # Create swarm with dashboard
+ swarm = HierarchicalSwarm(
+ name="Debug Swarm",
+ description="A test swarm for debugging dashboard functionality",
+ agents=[agent1, agent2],
+ max_loops=1,
+ interactive=True,
+ verbose=True,
+ )
+
+ print("β
Created swarm with dashboard")
+ print("π Dashboard should now show agents in PENDING status")
+
+ # Wait a moment to see the initial dashboard
+ import time
+
+ time.sleep(3)
+
+ print("\nπ Starting swarm execution...")
+
+ # Run with a simple task
+ result = swarm.run(
+ task="Create a brief summary of machine learning"
+ )
+
+ print("\nβ
Debug completed!")
+ print("π Final result preview:")
+ print(
+ str(result)[:300] + "..."
+ if len(str(result)) > 300
+ else str(result)
+ )
+
+
+if __name__ == "__main__":
+ debug_dashboard()
diff --git a/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/hiearchical_swarm_example.py b/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/hiearchical_swarm_example.py
new file mode 100644
index 00000000..fefe856b
--- /dev/null
+++ b/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/hiearchical_swarm_example.py
@@ -0,0 +1,71 @@
+"""
+Hierarchical Swarm with Arasaka Dashboard Example
+
+This example demonstrates the new interactive dashboard functionality for the
+hierarchical swarm, featuring a futuristic Arasaka Corporation-style interface
+with red and black color scheme.
+"""
+
+from swarms.structs.hiearchical_swarm import HierarchicalSwarm
+from swarms.structs.agent import Agent
+
+
+def main():
+ """
+ Demonstrate the hierarchical swarm with interactive dashboard.
+ """
+ print("π Initializing Swarms Corporation Hierarchical Swarm...")
+
+ # Create specialized agents
+ research_agent = Agent(
+ agent_name="Research-Analyst",
+ agent_description="Specialized in comprehensive research and data gathering",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ analysis_agent = Agent(
+ agent_name="Data-Analyst",
+ agent_description="Expert in data analysis and pattern recognition",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ strategy_agent = Agent(
+ agent_name="Strategy-Consultant",
+ agent_description="Specialized in strategic planning and recommendations",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ # Create hierarchical swarm with interactive dashboard
+ swarm = HierarchicalSwarm(
+ name="Swarms Corporation Operations",
+ description="Enterprise-grade hierarchical swarm for complex task execution",
+ agents=[research_agent, analysis_agent, strategy_agent],
+ max_loops=2,
+ interactive=True, # Enable the Arasaka dashboard
+ verbose=True,
+ )
+
+ print("\nπ― Swarm initialized successfully!")
+ print(
+ "π Interactive dashboard will be displayed during execution."
+ )
+ print(
+ "π‘ The swarm will prompt you for a task when you call swarm.run()"
+ )
+
+ # Run the swarm (task will be prompted interactively)
+ result = swarm.run()
+
+ print("\nβ
Swarm execution completed!")
+ print("π Final result:")
+ print(result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/test_dashboard.py b/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/test_dashboard.py
new file mode 100644
index 00000000..433f0e14
--- /dev/null
+++ b/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/test_dashboard.py
@@ -0,0 +1,56 @@
+"""
+Test script for the Arasaka Dashboard functionality.
+"""
+
+from swarms.structs.hiearchical_swarm import HierarchicalSwarm
+from swarms.structs.agent import Agent
+
+
+def test_dashboard():
+ """Test the dashboard functionality with a simple task."""
+
+ # Create simple agents
+ agent1 = Agent(
+ agent_name="Test-Agent-1",
+ agent_description="A test agent for dashboard verification",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ agent2 = Agent(
+ agent_name="Test-Agent-2",
+ agent_description="Another test agent for dashboard verification",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ # Create swarm with dashboard
+ swarm = HierarchicalSwarm(
+ name="Dashboard Test Swarm",
+ agents=[agent1, agent2],
+ max_loops=1,
+ interactive=True,
+ verbose=True,
+ )
+
+ print("π§ͺ Testing Arasaka Dashboard...")
+ print("π Dashboard should appear and prompt for task input")
+
+ # Run with a simple task
+ result = swarm.run(
+ task="Create a simple summary of artificial intelligence trends"
+ )
+
+ print("\nβ
Test completed!")
+ print("π Result preview:")
+ print(
+ str(result)[:500] + "..."
+ if len(str(result)) > 500
+ else str(result)
+ )
+
+
+if __name__ == "__main__":
+ test_dashboard()
diff --git a/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/test_full_output.py b/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/test_full_output.py
new file mode 100644
index 00000000..a281b7dc
--- /dev/null
+++ b/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/test_full_output.py
@@ -0,0 +1,56 @@
+"""
+Test script for full agent output display in the Arasaka Dashboard.
+"""
+
+from swarms.structs.hiearchical_swarm import HierarchicalSwarm
+from swarms.structs.agent import Agent
+
+
+def test_full_output():
+ """Test the full output display functionality."""
+
+ print("π Testing full agent output display...")
+
+ # Create agents that will produce substantial output
+ agent1 = Agent(
+ agent_name="Research-Agent",
+ agent_description="A research agent that produces detailed output",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ agent2 = Agent(
+ agent_name="Analysis-Agent",
+ agent_description="An analysis agent that provides comprehensive analysis",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ # Create swarm with dashboard and detailed view enabled
+ swarm = HierarchicalSwarm(
+ name="Full Output Test Swarm",
+ description="A test swarm for verifying full agent output display",
+ agents=[agent1, agent2],
+ max_loops=1,
+ interactive=True,
+ verbose=True,
+ )
+
+ print("β
Created swarm with detailed view enabled")
+ print(
+ "π Dashboard should show full agent outputs without truncation"
+ )
+
+ # Run with a task that will generate substantial output
+ swarm.run(
+ task="Provide a comprehensive analysis of artificial intelligence trends in 2024, including detailed explanations of each trend"
+ )
+
+ print("\nβ
Test completed!")
+ print("π Check the dashboard for full agent outputs")
+
+
+if __name__ == "__main__":
+ test_full_output()
diff --git a/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/test_multi_loop.py b/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/test_multi_loop.py
new file mode 100644
index 00000000..045ef86e
--- /dev/null
+++ b/examples/multi_agent/hiearchical_swarm/hiearchical_swarm_ui/test_multi_loop.py
@@ -0,0 +1,57 @@
+"""
+Test script for multi-loop agent tracking in the Arasaka Dashboard.
+"""
+
+from swarms.structs.hiearchical_swarm import HierarchicalSwarm
+from swarms.structs.agent import Agent
+
+
+def test_multi_loop():
+ """Test the multi-loop agent tracking functionality."""
+
+ print("π Testing multi-loop agent tracking...")
+
+ # Create agents
+ agent1 = Agent(
+ agent_name="Research-Agent",
+ agent_description="A research agent for multi-loop testing",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ agent2 = Agent(
+ agent_name="Analysis-Agent",
+ agent_description="An analysis agent for multi-loop testing",
+ model_name="gpt-4o-mini",
+ max_loops=1,
+ verbose=False,
+ )
+
+ # Create swarm with multiple loops
+ swarm = HierarchicalSwarm(
+ name="Multi-Loop Test Swarm",
+ description="A test swarm for verifying multi-loop agent tracking",
+ agents=[agent1, agent2],
+ max_loops=3, # Multiple loops to test history tracking
+ interactive=True,
+ verbose=True,
+ )
+
+ print("β
Created swarm with multi-loop tracking")
+ print(
+ "π Dashboard should show agent outputs across multiple loops"
+ )
+ print("π Each loop will add new rows to the monitoring matrix")
+
+ # Run with a task that will benefit from multiple iterations
+ swarm.run(
+ task="Analyze the impact of artificial intelligence on healthcare, then refine the analysis with additional insights, and finally provide actionable recommendations"
+ )
+
+ print("\nβ
Multi-loop test completed!")
+ print("π Check the dashboard for agent outputs across all loops")
+
+
+if __name__ == "__main__":
+ test_multi_loop()
diff --git a/examples/single_agent/llms/gpt_oss_examples/gpt_os_agent.py b/examples/single_agent/llms/gpt_oss_examples/gpt_os_agent.py
new file mode 100644
index 00000000..d4d975de
--- /dev/null
+++ b/examples/single_agent/llms/gpt_oss_examples/gpt_os_agent.py
@@ -0,0 +1,44 @@
+from transformers import pipeline
+from swarms import Agent
+
+class GPTOSS:
+ def __init__(
+ self,
+ model_id: str = "openai/gpt-oss-20b",
+ max_new_tokens: int = 256,
+ temperature: int = 0.7,
+ system_prompt: str = "You are a helpful assistant.",
+ ):
+ self.max_new_tokens = max_new_tokens
+ self.temperature = temperature
+ self.system_prompt = system_prompt
+ self.model_id = model_id
+
+ self.pipe = pipeline(
+ "text-generation",
+ model=model_id,
+ torch_dtype="auto",
+ device_map="auto",
+ temperature=temperature,
+ )
+
+ def run(self, task: str):
+ self.messages = [
+ {"role": "system", "content": self.system_prompt},
+ {"role": "user", "content": task},
+ ]
+
+ outputs = self.pipe(
+ self.messages,
+ max_new_tokens=self.max_new_tokens,
+ )
+
+ return outputs[0]["generated_text"][-1]
+
+agent = Agent(
+ name="GPT-OSS-Agent",
+ llm=GPTOSS(),
+ system_prompt="You are a helpful assistant.",
+)
+
+agent.run(task="Explain quantum mechanics clearly and concisely.")
diff --git a/examples/single_agent/llms/gpt_oss_examples/groq_gpt_oss_models.py b/examples/single_agent/llms/gpt_oss_examples/groq_gpt_oss_models.py
new file mode 100644
index 00000000..6b27a321
--- /dev/null
+++ b/examples/single_agent/llms/gpt_oss_examples/groq_gpt_oss_models.py
@@ -0,0 +1,49 @@
+from swarms import Agent
+
+# Initialize the agent
+agent = Agent(
+ agent_name="Quantitative-Trading-Agent",
+ agent_description="Advanced quantitative trading and algorithmic analysis agent",
+ system_prompt="""You are an expert quantitative trading agent with deep expertise in:
+ - Algorithmic trading strategies and implementation
+ - Statistical arbitrage and market making
+ - Risk management and portfolio optimization
+ - High-frequency trading systems
+ - Market microstructure analysis
+ - Quantitative research methodologies
+ - Financial mathematics and stochastic processes
+ - Machine learning applications in trading
+
+ Your core responsibilities include:
+ 1. Developing and backtesting trading strategies
+ 2. Analyzing market data and identifying alpha opportunities
+ 3. Implementing risk management frameworks
+ 4. Optimizing portfolio allocations
+ 5. Conducting quantitative research
+ 6. Monitoring market microstructure
+ 7. Evaluating trading system performance
+
+ You maintain strict adherence to:
+ - Mathematical rigor in all analyses
+ - Statistical significance in strategy development
+ - Risk-adjusted return optimization
+ - Market impact minimization
+ - Regulatory compliance
+ - Transaction cost analysis
+ - Performance attribution
+
+ You communicate in precise, technical terms while maintaining clarity for stakeholders.""",
+ model_name="groq/openai/gpt-oss-120b",
+ dynamic_temperature_enabled=True,
+ output_type="str-all-except-first",
+ max_loops="auto",
+ interactive=True,
+ no_reasoning_prompt=True,
+ streaming_on=True,
+ # dashboard=True
+)
+
+out = agent.run(
+ task="What are the best top 3 etfs for gold coverage?"
+)
+print(out)
diff --git a/hs_interactive.py b/hs_interactive.py
new file mode 100644
index 00000000..698e26df
--- /dev/null
+++ b/hs_interactive.py
@@ -0,0 +1,24 @@
+from swarms import HierarchicalSwarm, Agent
+
+# Create agents
+research_agent = Agent(
+ agent_name="Research-Analyst", model_name="gpt-4.1", print_on=True
+)
+analysis_agent = Agent(
+ agent_name="Data-Analyst", model_name="gpt-4.1", print_on=True
+)
+
+# Create swarm with interactive dashboard
+swarm = HierarchicalSwarm(
+ agents=[research_agent, analysis_agent],
+ max_loops=1,
+ interactive=True, # Enable the Arasaka dashboard
+ # director_reasoning_enabled=False,
+ # director_reasoning_model_name="groq/moonshotai/kimi-k2-instruct",
+ multi_agent_prompt_improvements=True,
+)
+
+# Run swarm (task will be prompted interactively)
+result = swarm.run("what are the best nanomachine research papers?")
+
+print(result)
diff --git a/swarms/prompts/reasoning_prompt.py b/swarms/prompts/reasoning_prompt.py
index b929d5b6..24f810cd 100644
--- a/swarms/prompts/reasoning_prompt.py
+++ b/swarms/prompts/reasoning_prompt.py
@@ -7,3 +7,8 @@ The reasoning process and the final answer should be distinctly enclosed within
It is essential to output multiple tags to reflect the depth of thought and exploration involved in addressing the task. The Assistant should strive to think deeply and thoroughly about the question, ensuring that all relevant aspects are considered before arriving at a conclusion.
"""
+
+
+INTERNAL_MONOLGUE_PROMPT = """
+You are an introspective reasoning engine whose sole task is to explore and unpack any problem or task without ever delivering a final solution. Whenever you process a prompt, you must envelope every discrete insight, question, or inference inside and tags, using as many of these tagsβnested or sequentialβas needed to reveal your full chain of thought. Begin each session by rephrasing the problem in your own words to ensure youβve captured its goals, inputs, outputs, and constraintsβentirely within blocksβand identify any ambiguities or assumptions you must clarify. Then decompose the task into sub-questions or logical components, examining multiple approaches, edge cases, and trade-offs, all inside further tags. Continue layering your reasoning, pausing at each step to ask yourself βWhat else might I consider?β or βIs there an implicit assumption here?ββalways inside β¦. Never move beyond analysis: do not generate outlines, pseudocode, or answersβonly think. If you find yourself tempted to propose a solution, immediately halt and circle back into deeper tags. Your objective is total transparency of reasoning and exhaustive exploration of the problem space; defer any answer generation until explicitly instructed otherwise.
+"""
diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py
index b52f9c1e..4d2014cd 100644
--- a/swarms/structs/__init__.py
+++ b/swarms/structs/__init__.py
@@ -94,6 +94,7 @@ from swarms.structs.swarming_architectures import (
star_swarm,
)
+
__all__ = [
"Agent",
"BaseStructure",
diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py
index 3f726d24..aaf8d028 100644
--- a/swarms/structs/agent.py
+++ b/swarms/structs/agent.py
@@ -102,7 +102,7 @@ def parse_done_token(response: str) -> bool:
# Agent ID generator
def agent_id():
"""Generate an agent id"""
- return uuid.uuid4().hex
+ return f"agent-{uuid.uuid4().hex}"
# Agent output types
@@ -673,7 +673,7 @@ class Agent:
# Initialize the short term memory
memory = Conversation(
- system_prompt=prompt,
+ name=f"{self.agent_name}_conversation",
user=self.user_name,
rules=self.rules,
token_count=(
@@ -693,6 +693,12 @@ class Agent:
),
)
+ # Add the system prompt to the conversation
+ memory.add(
+ role="System",
+ content=prompt,
+ )
+
return memory
def agent_output_model(self):
@@ -861,7 +867,9 @@ class Agent:
return tools
except AgentMCPConnectionError as e:
- logger.error(f"Error in MCP connection: {e}")
+ logger.error(
+ f"Error in MCP connection: {e} Traceback: {traceback.format_exc()}"
+ )
raise e
def setup_config(self):
@@ -1172,7 +1180,8 @@ class Agent:
if self.print_on is True:
if isinstance(response, list):
self.pretty_print(
- f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n Output: {format_data_structure(response)} ",
+ # f"Structured Output - Attempting Function Call Execution [{time.strftime('%H:%M:%S')}] \n\n Output: {format_data_structure(response)} ",
+ f"[Structured Output] [Time: {time.strftime('%H:%M:%S')}] \n\n {json.dumps(response, indent=4)}",
loop_count,
)
elif self.streaming_on:
@@ -2457,6 +2466,10 @@ class Agent:
Returns:
Dict[str, Any]: A dictionary representation of the class attributes.
"""
+
+ # Remove the llm object from the dictionary
+ self.__dict__.pop("llm", None)
+
return {
attr_name: self._serialize_attr(attr_name, attr_value)
for attr_name, attr_value in self.__dict__.items()
diff --git a/swarms/structs/batch_agent_execution.py b/swarms/structs/batch_agent_execution.py
index 2b74a9e7..7b2a926d 100644
--- a/swarms/structs/batch_agent_execution.py
+++ b/swarms/structs/batch_agent_execution.py
@@ -1,11 +1,16 @@
+import concurrent.futures
from swarms.structs.agent import Agent
-from typing import List
+from typing import List, Union, Callable
+import os
from swarms.utils.formatter import formatter
+from loguru import logger
+import traceback
def batch_agent_execution(
- agents: List[Agent],
- tasks: List[str],
+ agents: List[Union[Agent, Callable]],
+ tasks: List[str] = None,
+ imgs: List[str] = None,
):
"""
Execute a batch of agents on a list of tasks concurrently.
@@ -20,45 +25,58 @@ def batch_agent_execution(
Raises:
ValueError: If number of agents doesn't match number of tasks
"""
- if len(agents) != len(tasks):
- raise ValueError(
- "Number of agents must match number of tasks"
- )
+ try:
- import concurrent.futures
- import multiprocessing
+ logger.info(
+ f"Executing {len(agents)} agents on {len(tasks)} tasks"
+ )
- results = []
+ if len(agents) != len(tasks):
+ raise ValueError(
+ "Number of agents must match number of tasks"
+ )
- # Calculate max workers as 90% of available CPU cores
- max_workers = max(1, int(multiprocessing.cpu_count() * 0.9))
+ results = []
- formatter.print_panel(
- f"Executing {len(agents)} agents on {len(tasks)} tasks using {max_workers} workers"
- )
+ # Calculate max workers as 90% of available CPU cores
+ max_workers = max(1, int(os.cpu_count() * 0.9))
- with concurrent.futures.ThreadPoolExecutor(
- max_workers=max_workers
- ) as executor:
- # Submit all tasks to the executor
- future_to_task = {
- executor.submit(agent.run, task): (agent, task)
- for agent, task in zip(agents, tasks)
- }
+ formatter.print_panel(
+ f"Executing {len(agents)} agents on {len(tasks)} tasks using {max_workers} workers"
+ )
- # Collect results as they complete
- for future in concurrent.futures.as_completed(future_to_task):
- agent, task = future_to_task[future]
- try:
- result = future.result()
- results.append(result)
- except Exception as e:
- print(
- f"Task failed for agent {agent.agent_name}: {str(e)}"
+ with concurrent.futures.ThreadPoolExecutor(
+ max_workers=max_workers
+ ) as executor:
+ # Submit all tasks to the executor
+ future_to_task = {
+ executor.submit(agent.run, task, imgs): (
+ agent,
+ task,
+ imgs,
)
- results.append(None)
+ for agent, task, imgs in zip(agents, tasks, imgs)
+ }
+
+ # Collect results as they complete
+ for future in concurrent.futures.as_completed(
+ future_to_task
+ ):
+ agent, task = future_to_task[future]
+ try:
+ result = future.result()
+ results.append(result)
+ except Exception as e:
+ print(
+ f"Task failed for agent {agent.agent_name}: {str(e)}"
+ )
+ results.append(None)
- # Wait for all futures to complete before returning
- concurrent.futures.wait(future_to_task.keys())
+ # Wait for all futures to complete before returning
+ concurrent.futures.wait(future_to_task.keys())
- return results
+ return results
+ except Exception as e:
+ log = f"Batch agent execution failed Error: {str(e)} Traceback: {traceback.format_exc()}"
+ logger.error(log)
+ raise e
diff --git a/swarms/structs/board_of_directors_swarm.py b/swarms/structs/board_of_directors_swarm.py
index f80fb4a4..7dbf0d34 100644
--- a/swarms/structs/board_of_directors_swarm.py
+++ b/swarms/structs/board_of_directors_swarm.py
@@ -28,8 +28,7 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass, field
from enum import Enum
from functools import lru_cache
-from pathlib import Path
-from typing import Any, Callable, Dict, List, Optional, Union, Tuple
+from typing import Any, Callable, Dict, List, Optional, Union
from loguru import logger
from pydantic import BaseModel, Field
@@ -38,30 +37,35 @@ from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.conversation import Conversation
from swarms.structs.ma_utils import list_all_agents
-from swarms.utils.history_output_formatter import history_output_formatter
+from swarms.utils.history_output_formatter import (
+ history_output_formatter,
+)
from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.output_types import OutputType
# Initialize logger for Board of Directors swarm
-board_logger = initialize_logger(log_folder="board_of_directors_swarm")
+board_logger = initialize_logger(
+ log_folder="board_of_directors_swarm"
+)
# ============================================================================
# BOARD OF DIRECTORS CONFIGURATION
# ============================================================================
+
class BoardFeatureStatus(str, Enum):
"""Enumeration of Board of Directors feature status.
-
+
This enum defines the possible states of the Board of Directors feature
within the Swarms Framework.
-
+
Attributes:
ENABLED: Feature is explicitly enabled
DISABLED: Feature is explicitly disabled
AUTO: Feature state is determined automatically
"""
-
+
ENABLED = "enabled"
DISABLED = "disabled"
AUTO = "auto"
@@ -70,10 +74,10 @@ class BoardFeatureStatus(str, Enum):
class BoardConfigModel(BaseModel):
"""
Configuration model for Board of Directors feature.
-
+
This model defines all configurable parameters for the Board of Directors
feature, including feature status, board composition, and operational settings.
-
+
Attributes:
board_feature_enabled: Whether the Board of Directors feature is enabled globally
default_board_size: Default number of board members when creating a new board
@@ -86,69 +90,69 @@ class BoardConfigModel(BaseModel):
auto_fallback_to_director: Automatically fall back to Director mode if Board fails
custom_board_templates: Custom board templates for different use cases
"""
-
+
# Feature control
board_feature_enabled: bool = Field(
default=False,
- description="Whether the Board of Directors feature is enabled globally."
+ description="Whether the Board of Directors feature is enabled globally.",
)
-
+
# Board composition
default_board_size: int = Field(
default=3,
ge=1,
le=10,
- description="Default number of board members when creating a new board."
+ description="Default number of board members when creating a new board.",
)
-
+
# Operational settings
decision_threshold: float = Field(
default=0.6,
ge=0.0,
le=1.0,
- description="Threshold for majority decisions (0.0-1.0)."
+ description="Threshold for majority decisions (0.0-1.0).",
)
-
+
enable_voting: bool = Field(
default=True,
- description="Enable voting mechanisms for board decisions."
+ description="Enable voting mechanisms for board decisions.",
)
-
+
enable_consensus: bool = Field(
default=True,
- description="Enable consensus-building mechanisms."
+ description="Enable consensus-building mechanisms.",
)
-
+
# Model settings
default_board_model: str = Field(
default="gpt-4o-mini",
- description="Default model for board member agents."
+ description="Default model for board member agents.",
)
-
+
# Logging and monitoring
verbose_logging: bool = Field(
default=False,
- description="Enable verbose logging for board operations."
+ description="Enable verbose logging for board operations.",
)
-
+
# Performance settings
max_board_meeting_duration: int = Field(
default=300,
ge=60,
le=3600,
- description="Maximum duration for board meetings in seconds."
+ description="Maximum duration for board meetings in seconds.",
)
-
+
# Integration settings
auto_fallback_to_director: bool = Field(
default=True,
- description="Automatically fall back to Director mode if Board fails."
+ description="Automatically fall back to Director mode if Board fails.",
)
-
+
# Custom board templates
custom_board_templates: Dict[str, Dict[str, Any]] = Field(
default_factory=dict,
- description="Custom board templates for different use cases."
+ description="Custom board templates for different use cases.",
)
@@ -156,118 +160,145 @@ class BoardConfigModel(BaseModel):
class BoardConfig:
"""
Board of Directors configuration manager.
-
+
This class manages the configuration for the Board of Directors feature,
including loading from environment variables, configuration files, and
providing default values.
-
+
Attributes:
config_file_path: Optional path to configuration file
config_data: Optional configuration data dictionary
config: The current configuration model instance
"""
-
+
config_file_path: Optional[str] = None
config_data: Optional[Dict[str, Any]] = None
config: BoardConfigModel = field(init=False)
-
+
def __post_init__(self) -> None:
"""Initialize the configuration after object creation."""
self._load_config()
-
+
def _load_config(self) -> None:
"""
Load configuration from various sources.
-
+
Priority order:
1. Environment variables
2. Configuration file
3. Default values
-
+
Raises:
Exception: If configuration loading fails
"""
try:
# Start with default configuration
self.config = BoardConfigModel()
-
+
# Load from configuration file if specified
- if self.config_file_path and os.path.exists(self.config_file_path):
+ if self.config_file_path and os.path.exists(
+ self.config_file_path
+ ):
self._load_from_file()
-
+
# Override with environment variables
self._load_from_environment()
-
+
# Override with explicit config data
if self.config_data:
self._load_from_dict(self.config_data)
-
+
except Exception as e:
- logger.error(f"Failed to load Board of Directors configuration: {str(e)}")
+ logger.error(
+ f"Failed to load Board of Directors configuration: {str(e)}"
+ )
raise
-
+
def _load_from_file(self) -> None:
"""
Load configuration from file.
-
+
Raises:
Exception: If file loading fails
"""
try:
import yaml
- with open(self.config_file_path, 'r') as f:
+
+ with open(self.config_file_path, "r") as f:
file_config = yaml.safe_load(f)
self._load_from_dict(file_config)
- logger.info(f"Loaded Board of Directors config from: {self.config_file_path}")
+ logger.info(
+ f"Loaded Board of Directors config from: {self.config_file_path}"
+ )
except Exception as e:
- logger.warning(f"Failed to load config file {self.config_file_path}: {e}")
+ logger.warning(
+ f"Failed to load config file {self.config_file_path}: {e}"
+ )
raise
-
+
def _load_from_environment(self) -> None:
"""
Load configuration from environment variables.
-
+
This method maps environment variables to configuration parameters
and handles type conversion appropriately.
"""
env_mappings = {
- 'SWARMS_BOARD_FEATURE_ENABLED': 'board_feature_enabled',
- 'SWARMS_BOARD_DEFAULT_SIZE': 'default_board_size',
- 'SWARMS_BOARD_DECISION_THRESHOLD': 'decision_threshold',
- 'SWARMS_BOARD_ENABLE_VOTING': 'enable_voting',
- 'SWARMS_BOARD_ENABLE_CONSENSUS': 'enable_consensus',
- 'SWARMS_BOARD_DEFAULT_MODEL': 'default_board_model',
- 'SWARMS_BOARD_VERBOSE_LOGGING': 'verbose_logging',
- 'SWARMS_BOARD_MAX_MEETING_DURATION': 'max_board_meeting_duration',
- 'SWARMS_BOARD_AUTO_FALLBACK': 'auto_fallback_to_director',
+ "SWARMS_BOARD_FEATURE_ENABLED": "board_feature_enabled",
+ "SWARMS_BOARD_DEFAULT_SIZE": "default_board_size",
+ "SWARMS_BOARD_DECISION_THRESHOLD": "decision_threshold",
+ "SWARMS_BOARD_ENABLE_VOTING": "enable_voting",
+ "SWARMS_BOARD_ENABLE_CONSENSUS": "enable_consensus",
+ "SWARMS_BOARD_DEFAULT_MODEL": "default_board_model",
+ "SWARMS_BOARD_VERBOSE_LOGGING": "verbose_logging",
+ "SWARMS_BOARD_MAX_MEETING_DURATION": "max_board_meeting_duration",
+ "SWARMS_BOARD_AUTO_FALLBACK": "auto_fallback_to_director",
}
-
+
for env_var, config_key in env_mappings.items():
value = os.getenv(env_var)
if value is not None:
try:
# Convert string values to appropriate types
- if config_key in ['board_feature_enabled', 'enable_voting', 'enable_consensus', 'verbose_logging', 'auto_fallback_to_director']:
- converted_value = value.lower() in ['true', '1', 'yes', 'on']
- elif config_key in ['default_board_size', 'max_board_meeting_duration']:
+ if config_key in [
+ "board_feature_enabled",
+ "enable_voting",
+ "enable_consensus",
+ "verbose_logging",
+ "auto_fallback_to_director",
+ ]:
+ converted_value = value.lower() in [
+ "true",
+ "1",
+ "yes",
+ "on",
+ ]
+ elif config_key in [
+ "default_board_size",
+ "max_board_meeting_duration",
+ ]:
converted_value = int(value)
- elif config_key in ['decision_threshold']:
+ elif config_key in ["decision_threshold"]:
converted_value = float(value)
else:
converted_value = value
-
+
setattr(self.config, config_key, converted_value)
- logger.debug(f"Loaded {config_key} from environment: {converted_value}")
+ logger.debug(
+ f"Loaded {config_key} from environment: {converted_value}"
+ )
except (ValueError, TypeError) as e:
- logger.warning(f"Failed to parse environment variable {env_var}: {e}")
-
+ logger.warning(
+ f"Failed to parse environment variable {env_var}: {e}"
+ )
+
def _load_from_dict(self, config_dict: Dict[str, Any]) -> None:
"""
Load configuration from dictionary.
-
+
Args:
config_dict: Dictionary containing configuration values
-
+
Raises:
ValueError: If configuration values are invalid
"""
@@ -277,33 +308,35 @@ class BoardConfig:
setattr(self.config, key, value)
except (ValueError, TypeError) as e:
logger.warning(f"Failed to set config {key}: {e}")
- raise ValueError(f"Invalid configuration value for {key}: {e}")
-
+ raise ValueError(
+ f"Invalid configuration value for {key}: {e}"
+ )
+
def is_enabled(self) -> bool:
"""
Check if the Board of Directors feature is enabled.
-
+
Returns:
bool: True if the feature is enabled, False otherwise
"""
return self.config.board_feature_enabled
-
+
def get_config(self) -> BoardConfigModel:
"""
Get the current configuration.
-
+
Returns:
BoardConfigModel: The current configuration
"""
return self.config
-
+
def update_config(self, updates: Dict[str, Any]) -> None:
"""
Update the configuration with new values.
-
+
Args:
updates: Dictionary of configuration updates
-
+
Raises:
ValueError: If any update values are invalid
"""
@@ -312,119 +345,192 @@ class BoardConfig:
except ValueError as e:
logger.error(f"Failed to update configuration: {e}")
raise
-
+
def save_config(self, file_path: Optional[str] = None) -> None:
"""
Save the current configuration to a file.
-
+
Args:
file_path: Optional file path to save to (uses config_file_path if not provided)
-
+
Raises:
Exception: If saving fails
"""
save_path = file_path or self.config_file_path
if not save_path:
- logger.warning("No file path specified for saving configuration")
+ logger.warning(
+ "No file path specified for saving configuration"
+ )
return
-
+
try:
import yaml
+
# Convert config to dictionary
config_dict = self.config.model_dump()
-
+
# Ensure directory exists
os.makedirs(os.path.dirname(save_path), exist_ok=True)
-
- with open(save_path, 'w') as f:
- yaml.dump(config_dict, f, default_flow_style=False, indent=2)
-
- logger.info(f"Saved Board of Directors config to: {save_path}")
+
+ with open(save_path, "w") as f:
+ yaml.dump(
+ config_dict, f, default_flow_style=False, indent=2
+ )
+
+ logger.info(
+ f"Saved Board of Directors config to: {save_path}"
+ )
except Exception as e:
logger.error(f"Failed to save config to {save_path}: {e}")
raise
-
+
@lru_cache(maxsize=128)
- def get_default_board_template(self, template_name: str = "standard") -> Dict[str, Any]:
+ def get_default_board_template(
+ self, template_name: str = "standard"
+ ) -> Dict[str, Any]:
"""
Get a default board template.
-
+
This method provides predefined board templates for common use cases.
Templates are cached for improved performance.
-
+
Args:
template_name: Name of the template to retrieve
-
+
Returns:
Dict[str, Any]: Board template configuration
"""
templates = {
"standard": {
"roles": [
- {"name": "Chairman", "weight": 1.5, "expertise": ["leadership", "strategy"]},
- {"name": "Vice-Chairman", "weight": 1.2, "expertise": ["operations", "coordination"]},
- {"name": "Secretary", "weight": 1.0, "expertise": ["documentation", "communication"]},
+ {
+ "name": "Chairman",
+ "weight": 1.5,
+ "expertise": ["leadership", "strategy"],
+ },
+ {
+ "name": "Vice-Chairman",
+ "weight": 1.2,
+ "expertise": ["operations", "coordination"],
+ },
+ {
+ "name": "Secretary",
+ "weight": 1.0,
+ "expertise": [
+ "documentation",
+ "communication",
+ ],
+ },
]
},
"executive": {
"roles": [
- {"name": "CEO", "weight": 2.0, "expertise": ["executive_leadership", "strategy"]},
- {"name": "CFO", "weight": 1.5, "expertise": ["finance", "risk_management"]},
- {"name": "CTO", "weight": 1.5, "expertise": ["technology", "innovation"]},
- {"name": "COO", "weight": 1.3, "expertise": ["operations", "efficiency"]},
+ {
+ "name": "CEO",
+ "weight": 2.0,
+ "expertise": [
+ "executive_leadership",
+ "strategy",
+ ],
+ },
+ {
+ "name": "CFO",
+ "weight": 1.5,
+ "expertise": ["finance", "risk_management"],
+ },
+ {
+ "name": "CTO",
+ "weight": 1.5,
+ "expertise": ["technology", "innovation"],
+ },
+ {
+ "name": "COO",
+ "weight": 1.3,
+ "expertise": ["operations", "efficiency"],
+ },
]
},
"advisory": {
"roles": [
- {"name": "Lead_Advisor", "weight": 1.3, "expertise": ["strategy", "consulting"]},
- {"name": "Technical_Advisor", "weight": 1.2, "expertise": ["technology", "architecture"]},
- {"name": "Business_Advisor", "weight": 1.2, "expertise": ["business", "market_analysis"]},
- {"name": "Legal_Advisor", "weight": 1.1, "expertise": ["legal", "compliance"]},
+ {
+ "name": "Lead_Advisor",
+ "weight": 1.3,
+ "expertise": ["strategy", "consulting"],
+ },
+ {
+ "name": "Technical_Advisor",
+ "weight": 1.2,
+ "expertise": ["technology", "architecture"],
+ },
+ {
+ "name": "Business_Advisor",
+ "weight": 1.2,
+ "expertise": ["business", "market_analysis"],
+ },
+ {
+ "name": "Legal_Advisor",
+ "weight": 1.1,
+ "expertise": ["legal", "compliance"],
+ },
]
},
"minimal": {
"roles": [
- {"name": "Chairman", "weight": 1.0, "expertise": ["leadership"]},
- {"name": "Member", "weight": 1.0, "expertise": ["general"]},
+ {
+ "name": "Chairman",
+ "weight": 1.0,
+ "expertise": ["leadership"],
+ },
+ {
+ "name": "Member",
+ "weight": 1.0,
+ "expertise": ["general"],
+ },
]
- }
+ },
}
-
+
# Check custom templates first
if template_name in self.config.custom_board_templates:
return self.config.custom_board_templates[template_name]
-
+
# Return standard template if requested template not found
return templates.get(template_name, templates["standard"])
-
+
def validate_config(self) -> List[str]:
"""
Validate the current configuration.
-
+
This method performs comprehensive validation of the configuration
to ensure all values are within acceptable ranges and constraints.
-
+
Returns:
List[str]: List of validation errors (empty if valid)
"""
errors = []
-
+
try:
# Validate the configuration model
self.config.model_validate(self.config.model_dump())
except Exception as e:
errors.append(f"Configuration validation failed: {e}")
-
+
# Additional custom validations
if self.config.decision_threshold < 0.5:
- errors.append("Decision threshold should be at least 0.5 for meaningful majority decisions")
-
+ errors.append(
+ "Decision threshold should be at least 0.5 for meaningful majority decisions"
+ )
+
if self.config.default_board_size < 2:
- errors.append("Board size should be at least 2 for meaningful discussions")
-
+ errors.append(
+ "Board size should be at least 2 for meaningful discussions"
+ )
+
if self.config.max_board_meeting_duration < 60:
- errors.append("Board meeting duration should be at least 60 seconds")
-
+ errors.append(
+ "Board meeting duration should be at least 60 seconds"
+ )
+
return errors
@@ -433,72 +539,80 @@ _board_config: Optional[BoardConfig] = None
@lru_cache(maxsize=1)
-def get_board_config(config_file_path: Optional[str] = None) -> BoardConfig:
+def get_board_config(
+ config_file_path: Optional[str] = None,
+) -> BoardConfig:
"""
Get the global Board of Directors configuration instance.
-
+
This function provides a singleton pattern for accessing the Board of Directors
configuration. The configuration is cached for improved performance.
-
+
Args:
config_file_path: Optional path to configuration file
-
+
Returns:
BoardConfig: The global configuration instance
"""
global _board_config
-
+
if _board_config is None:
_board_config = BoardConfig(config_file_path=config_file_path)
-
+
return _board_config
-def enable_board_feature(config_file_path: Optional[str] = None) -> None:
+def enable_board_feature(
+ config_file_path: Optional[str] = None,
+) -> None:
"""
Enable the Board of Directors feature globally.
-
+
This function enables the Board of Directors feature and saves the configuration
to the specified file path.
-
+
Args:
config_file_path: Optional path to save the configuration
"""
config = get_board_config(config_file_path)
config.update_config({"board_feature_enabled": True})
-
+
if config_file_path:
config.save_config(config_file_path)
-
+
logger.info("Board of Directors feature enabled")
-def disable_board_feature(config_file_path: Optional[str] = None) -> None:
+def disable_board_feature(
+ config_file_path: Optional[str] = None,
+) -> None:
"""
Disable the Board of Directors feature globally.
-
+
This function disables the Board of Directors feature and saves the configuration
to the specified file path.
-
+
Args:
config_file_path: Optional path to save the configuration
"""
config = get_board_config(config_file_path)
config.update_config({"board_feature_enabled": False})
-
+
if config_file_path:
config.save_config(config_file_path)
-
+
logger.info("Board of Directors feature disabled")
-def is_board_feature_enabled(config_file_path: Optional[str] = None) -> bool:
+def is_board_feature_enabled(
+ config_file_path: Optional[str] = None,
+) -> bool:
"""
Check if the Board of Directors feature is enabled.
-
+
Args:
config_file_path: Optional path to configuration file
-
+
Returns:
bool: True if the feature is enabled, False otherwise
"""
@@ -506,13 +620,15 @@ def is_board_feature_enabled(config_file_path: Optional[str] = None) -> bool:
return config.is_enabled()
-def create_default_config_file(file_path: str = "swarms_board_config.yaml") -> None:
+def create_default_config_file(
+ file_path: str = "swarms_board_config.yaml",
+) -> None:
"""
Create a default configuration file.
-
+
This function creates a default Board of Directors configuration file
with recommended settings.
-
+
Args:
file_path: Path where to create the configuration file
"""
@@ -526,101 +642,117 @@ def create_default_config_file(file_path: str = "swarms_board_config.yaml") -> N
"verbose_logging": False,
"max_board_meeting_duration": 300,
"auto_fallback_to_director": True,
- "custom_board_templates": {}
+ "custom_board_templates": {},
}
-
- config = BoardConfig(config_file_path=file_path, config_data=default_config)
+
+ config = BoardConfig(
+ config_file_path=file_path, config_data=default_config
+ )
config.save_config(file_path)
-
- logger.info(f"Created default Board of Directors config file: {file_path}")
+
+ logger.info(
+ f"Created default Board of Directors config file: {file_path}"
+ )
-def set_board_size(size: int, config_file_path: Optional[str] = None) -> None:
+def set_board_size(
+ size: int, config_file_path: Optional[str] = None
+) -> None:
"""
Set the default board size.
-
+
Args:
size: The default board size (1-10)
config_file_path: Optional path to save the configuration
"""
if not 1 <= size <= 10:
raise ValueError("Board size must be between 1 and 10")
-
+
config = get_board_config(config_file_path)
config.update_config({"default_board_size": size})
-
+
if config_file_path:
config.save_config(config_file_path)
-
+
logger.info(f"Default board size set to: {size}")
-def set_decision_threshold(threshold: float, config_file_path: Optional[str] = None) -> None:
+def set_decision_threshold(
+ threshold: float, config_file_path: Optional[str] = None
+) -> None:
"""
Set the decision threshold for majority decisions.
-
+
Args:
threshold: The decision threshold (0.0-1.0)
config_file_path: Optional path to save the configuration
"""
if not 0.0 <= threshold <= 1.0:
- raise ValueError("Decision threshold must be between 0.0 and 1.0")
-
+ raise ValueError(
+ "Decision threshold must be between 0.0 and 1.0"
+ )
+
config = get_board_config(config_file_path)
config.update_config({"decision_threshold": threshold})
-
+
if config_file_path:
config.save_config(config_file_path)
-
+
logger.info(f"Decision threshold set to: {threshold}")
-def set_board_model(model: str, config_file_path: Optional[str] = None) -> None:
+def set_board_model(
+ model: str, config_file_path: Optional[str] = None
+) -> None:
"""
Set the default board model.
-
+
Args:
model: The default model name for board members
config_file_path: Optional path to save the configuration
"""
config = get_board_config(config_file_path)
config.update_config({"default_board_model": model})
-
+
if config_file_path:
config.save_config(config_file_path)
-
+
logger.info(f"Default board model set to: {model}")
-def enable_verbose_logging(config_file_path: Optional[str] = None) -> None:
+def enable_verbose_logging(
+ config_file_path: Optional[str] = None,
+) -> None:
"""
Enable verbose logging for board operations.
-
+
Args:
config_file_path: Optional path to save the configuration
"""
config = get_board_config(config_file_path)
config.update_config({"verbose_logging": True})
-
+
if config_file_path:
config.save_config(config_file_path)
-
+
logger.info("Verbose logging enabled for Board of Directors")
-def disable_verbose_logging(config_file_path: Optional[str] = None) -> None:
+def disable_verbose_logging(
+ config_file_path: Optional[str] = None,
+) -> None:
"""
Disable verbose logging for board operations.
-
+
Args:
config_file_path: Optional path to save the configuration
"""
config = get_board_config(config_file_path)
config.update_config({"verbose_logging": False})
-
+
if config_file_path:
config.save_config(config_file_path)
-
+
logger.info("Verbose logging disabled for Board of Directors")
@@ -628,13 +760,14 @@ def disable_verbose_logging(config_file_path: Optional[str] = None) -> None:
# BOARD OF DIRECTORS IMPLEMENTATION
# ============================================================================
+
class BoardMemberRole(str, Enum):
"""Enumeration of possible board member roles.
-
+
This enum defines the various roles that board members can have within
the Board of Directors swarm. Each role has specific responsibilities
and voting weights associated with it.
-
+
Attributes:
CHAIRMAN: Primary leader responsible for board meetings and final decisions
VICE_CHAIRMAN: Secondary leader who supports the chairman
@@ -643,7 +776,7 @@ class BoardMemberRole(str, Enum):
MEMBER: General board member with specific expertise
EXECUTIVE_DIRECTOR: Executive-level board member with operational authority
"""
-
+
CHAIRMAN = "chairman"
VICE_CHAIRMAN = "vice_chairman"
SECRETARY = "secretary"
@@ -654,18 +787,18 @@ class BoardMemberRole(str, Enum):
class BoardDecisionType(str, Enum):
"""Enumeration of board decision types.
-
+
This enum defines the different types of decisions that can be made
by the Board of Directors, including voting mechanisms and consensus
approaches.
-
+
Attributes:
UNANIMOUS: All board members agree on the decision
MAJORITY: More than 50% of votes are in favor
CONSENSUS: General agreement without formal voting
CHAIRMAN_DECISION: Final decision made by the chairman
"""
-
+
UNANIMOUS = "unanimous"
MAJORITY = "majority"
CONSENSUS = "consensus"
@@ -676,26 +809,26 @@ class BoardDecisionType(str, Enum):
class BoardMember:
"""
Represents a member of the Board of Directors.
-
+
This dataclass encapsulates all information about a board member,
including their agent representation, role, voting weight, and
areas of expertise.
-
+
Attributes:
agent: The agent representing this board member
role: The role of this board member within the board
voting_weight: The weight of this member's vote (default: 1.0)
expertise_areas: Areas of expertise for this board member
"""
-
+
agent: Agent
role: BoardMemberRole
voting_weight: float = 1.0
expertise_areas: List[str] = field(default_factory=list)
-
+
def __post_init__(self) -> None:
"""Initialize default values after object creation.
-
+
This method ensures that the expertise_areas list is properly
initialized as an empty list if not provided.
"""
@@ -706,11 +839,11 @@ class BoardMember:
class BoardOrder(BaseModel):
"""
Represents an order issued by the Board of Directors.
-
+
This model defines the structure of orders that the board issues
to worker agents, including task assignments, priorities, and
deadlines.
-
+
Attributes:
agent_name: The name of the agent to which the task is assigned
task: The specific task to be executed by the assigned agent
@@ -718,7 +851,7 @@ class BoardOrder(BaseModel):
deadline: Optional deadline for task completion
assigned_by: The board member who assigned this task
"""
-
+
agent_name: str = Field(
...,
description="Specifies the name of the agent to which the task is assigned.",
@@ -746,10 +879,10 @@ class BoardOrder(BaseModel):
class BoardDecision(BaseModel):
"""
Represents a decision made by the Board of Directors.
-
+
This model tracks the details of decisions made by the board,
including voting results, decision types, and reasoning.
-
+
Attributes:
decision_type: The type of decision (unanimous, majority, etc.)
decision: The actual decision made
@@ -758,7 +891,7 @@ class BoardDecision(BaseModel):
abstentions: Number of abstentions
reasoning: The reasoning behind the decision
"""
-
+
decision_type: BoardDecisionType = Field(
...,
description="The type of decision made by the board.",
@@ -791,17 +924,17 @@ class BoardDecision(BaseModel):
class BoardSpec(BaseModel):
"""
Specification for Board of Directors operations.
-
+
This model represents the complete output of a board meeting,
including the plan, orders, decisions, and meeting summary.
-
+
Attributes:
plan: The overall plan created by the board
orders: List of orders issued by the board
decisions: List of decisions made by the board
meeting_summary: Summary of the board meeting
"""
-
+
plan: str = Field(
...,
description="Outlines the sequence of actions to be taken by the swarm as decided by the board.",
@@ -823,11 +956,11 @@ class BoardSpec(BaseModel):
class BoardOfDirectorsSwarm(BaseSwarm):
"""
A hierarchical swarm of agents with a Board of Directors that orchestrates tasks.
-
+
The Board of Directors operates as a collective decision-making body that can be
enabled manually through configuration. It provides an alternative to the single
Director approach with more democratic and collaborative decision-making.
-
+
The workflow follows a hierarchical pattern:
1. Task is received and sent to the Board of Directors
2. Board convenes to discuss and create a plan through voting and consensus
@@ -835,7 +968,7 @@ class BoardOfDirectorsSwarm(BaseSwarm):
4. Agents execute tasks and report back to the board
5. Board evaluates results and issues new orders if needed (up to max_loops)
6. All context and conversation history is preserved throughout the process
-
+
Attributes:
name: The name of the swarm
description: A description of the swarm
@@ -874,7 +1007,7 @@ class BoardOfDirectorsSwarm(BaseSwarm):
) -> None:
"""
Initialize the Board of Directors Swarm with the given parameters.
-
+
Args:
name: The name of the swarm
description: A description of the swarm
@@ -892,7 +1025,7 @@ class BoardOfDirectorsSwarm(BaseSwarm):
max_workers: Maximum number of workers for parallel execution
*args: Additional positional arguments passed to BaseSwarm
**kwargs: Additional keyword arguments passed to BaseSwarm
-
+
Raises:
ValueError: If critical requirements are not met during initialization
"""
@@ -901,7 +1034,7 @@ class BoardOfDirectorsSwarm(BaseSwarm):
description=description,
agents=agents,
)
-
+
self.name = name
self.board_members = board_members or []
self.agents = agents or []
@@ -914,24 +1047,30 @@ class BoardOfDirectorsSwarm(BaseSwarm):
self.decision_threshold = decision_threshold
self.enable_voting = enable_voting
self.enable_consensus = enable_consensus
- self.max_workers = max_workers or min(32, (os.cpu_count() or 1) + 4)
-
+ self.max_workers = max_workers or min(
+ 32, (os.cpu_count() or 1) + 4
+ )
+
# Initialize the swarm
self._init_board_swarm()
def _init_board_swarm(self) -> None:
"""
Initialize the Board of Directors swarm.
-
+
This method sets up the board members, initializes the conversation,
performs reliability checks, and prepares the board for operation.
-
+
Raises:
ValueError: If reliability checks fail
"""
if self.verbose:
- board_logger.info(f"π Initializing Board of Directors Swarm: {self.name}")
- board_logger.info(f"π Configuration - Max loops: {self.max_loops}")
+ board_logger.info(
+ f"π Initializing Board of Directors Swarm: {self.name}"
+ )
+ board_logger.info(
+ f"π Configuration - Max loops: {self.max_loops}"
+ )
self.conversation = Conversation(time_enabled=False)
@@ -946,17 +1085,21 @@ class BoardOfDirectorsSwarm(BaseSwarm):
self._add_context_to_board()
if self.verbose:
- board_logger.success(f"β
Board of Directors Swarm initialized successfully: {self.name}")
+ board_logger.success(
+ f"β
Board of Directors Swarm initialized successfully: {self.name}"
+ )
def _setup_default_board(self) -> None:
"""
Set up a default Board of Directors if none is provided.
-
+
Creates a basic board structure with Chairman, Vice Chairman, and Secretary roles.
This method is called automatically if no board members are provided during initialization.
"""
if self.verbose:
- board_logger.info("π― Setting up default Board of Directors")
+ board_logger.info(
+ "π― Setting up default Board of Directors"
+ )
# Create default board members
chairman = Agent(
@@ -984,18 +1127,35 @@ class BoardOfDirectorsSwarm(BaseSwarm):
)
self.board_members = [
- BoardMember(chairman, BoardMemberRole.CHAIRMAN, 1.5, ["leadership", "strategy"]),
- BoardMember(vice_chairman, BoardMemberRole.VICE_CHAIRMAN, 1.2, ["operations", "coordination"]),
- BoardMember(secretary, BoardMemberRole.SECRETARY, 1.0, ["documentation", "communication"]),
+ BoardMember(
+ chairman,
+ BoardMemberRole.CHAIRMAN,
+ 1.5,
+ ["leadership", "strategy"],
+ ),
+ BoardMember(
+ vice_chairman,
+ BoardMemberRole.VICE_CHAIRMAN,
+ 1.2,
+ ["operations", "coordination"],
+ ),
+ BoardMember(
+ secretary,
+ BoardMemberRole.SECRETARY,
+ 1.0,
+ ["documentation", "communication"],
+ ),
]
if self.verbose:
- board_logger.success("β
Default Board of Directors setup completed")
+ board_logger.success(
+ "β
Default Board of Directors setup completed"
+ )
def _get_chairman_prompt(self) -> str:
"""
Get the system prompt for the Chairman role.
-
+
Returns:
str: The system prompt defining the Chairman's responsibilities and behavior
"""
@@ -1012,7 +1172,7 @@ You should be diplomatic, fair, and decisive in your leadership."""
def _get_vice_chairman_prompt(self) -> str:
"""
Get the system prompt for the Vice Chairman role.
-
+
Returns:
str: The system prompt defining the Vice Chairman's responsibilities and behavior
"""
@@ -1029,7 +1189,7 @@ You should be collaborative, analytical, and supportive in your role."""
def _get_secretary_prompt(self) -> str:
"""
Get the system prompt for the Secretary role.
-
+
Returns:
str: The system prompt defining the Secretary's responsibilities and behavior
"""
@@ -1046,16 +1206,18 @@ You should be thorough, organized, and detail-oriented in your documentation."""
def _add_context_to_board(self) -> None:
"""
Add agent context to all board members' conversations.
-
+
This ensures that board members are aware of all available agents
and their capabilities when making decisions.
-
+
Raises:
Exception: If context addition fails
"""
try:
if self.verbose:
- board_logger.info("π Adding agent context to board members")
+ board_logger.info(
+ "π Adding agent context to board members"
+ )
# Add context to each board member
for board_member in self.board_members:
@@ -1067,26 +1229,34 @@ You should be thorough, organized, and detail-oriented in your documentation."""
)
if self.verbose:
- board_logger.success("β
Agent context added to board members successfully")
+ board_logger.success(
+ "β
Agent context added to board members successfully"
+ )
except Exception as e:
- error_msg = f"β Failed to add context to board members: {str(e)}"
- board_logger.error(f"{error_msg}\nπ Traceback: {traceback.format_exc()}")
+ error_msg = (
+ f"β Failed to add context to board members: {str(e)}"
+ )
+ board_logger.error(
+ f"{error_msg}\nπ Traceback: {traceback.format_exc()}"
+ )
raise
def _perform_reliability_checks(self) -> None:
"""
Perform reliability checks for the Board of Directors swarm.
-
+
This method validates critical requirements and configuration
parameters to ensure the swarm can operate correctly.
-
+
Raises:
ValueError: If critical requirements are not met
"""
try:
if self.verbose:
- board_logger.info(f"π Running reliability checks for swarm: {self.name}")
+ board_logger.info(
+ f"π Running reliability checks for swarm: {self.name}"
+ )
# Check if Board of Directors feature is enabled
board_config = get_board_config()
@@ -1106,14 +1276,21 @@ You should be thorough, organized, and detail-oriented in your documentation."""
"Max loops must be greater than 0. Please set a valid number of loops."
)
- if self.decision_threshold < 0.0 or self.decision_threshold > 1.0:
+ if (
+ self.decision_threshold < 0.0
+ or self.decision_threshold > 1.0
+ ):
raise ValueError(
"Decision threshold must be between 0.0 and 1.0."
)
if self.verbose:
- board_logger.success(f"β
Reliability checks passed for swarm: {self.name}")
- board_logger.info(f"π Swarm stats - Agents: {len(self.agents)}, Max loops: {self.max_loops}")
+ board_logger.success(
+ f"β
Reliability checks passed for swarm: {self.name}"
+ )
+ board_logger.info(
+ f"π Swarm stats - Agents: {len(self.agents)}, Max loops: {self.max_loops}"
+ )
except Exception as e:
error_msg = f"β Failed reliability checks: {str(e)}\nπ Traceback: {traceback.format_exc()}"
@@ -1127,39 +1304,47 @@ You should be thorough, organized, and detail-oriented in your documentation."""
) -> BoardSpec:
"""
Run a board meeting to discuss and decide on the given task.
-
+
This method orchestrates a complete board meeting, including discussion,
decision-making, and task distribution to worker agents.
-
+
Args:
task: The task to be discussed and planned by the board
img: Optional image to be used with the task
-
+
Returns:
BoardSpec: The board's plan and orders
-
+
Raises:
Exception: If board meeting execution fails
"""
try:
if self.verbose:
- board_logger.info(f"ποΈ Running board meeting with task: {task[:100]}...")
+ board_logger.info(
+ f"ποΈ Running board meeting with task: {task[:100]}..."
+ )
# Create board meeting prompt
meeting_prompt = self._create_board_meeting_prompt(task)
-
+
# Run board discussion
- board_discussion = self._conduct_board_discussion(meeting_prompt, img)
-
+ board_discussion = self._conduct_board_discussion(
+ meeting_prompt, img
+ )
+
# Parse board decisions
board_spec = self._parse_board_decisions(board_discussion)
-
+
# Add to conversation history
- self.conversation.add(role="Board of Directors", content=board_discussion)
+ self.conversation.add(
+ role="Board of Directors", content=board_discussion
+ )
if self.verbose:
board_logger.success("β
Board meeting completed")
- board_logger.debug(f"π Board output type: {type(board_spec)}")
+ board_logger.debug(
+ f"π Board output type: {type(board_spec)}"
+ )
return board_spec
@@ -1171,14 +1356,14 @@ You should be thorough, organized, and detail-oriented in your documentation."""
def _create_board_meeting_prompt(self, task: str) -> str:
"""
Create a prompt for the board meeting.
-
+
This method generates a comprehensive prompt that guides the board
through the meeting process, including task discussion, decision-making,
and task distribution.
-
+
Args:
task: The task to be discussed
-
+
Returns:
str: The board meeting prompt
"""
@@ -1229,58 +1414,73 @@ Please provide your response in the following format:
def _format_board_members_info(self) -> str:
"""
Format board members information for the prompt.
-
+
This method creates a formatted string containing information about
all board members, their roles, and expertise areas.
-
+
Returns:
str: Formatted board members information
"""
info = []
for member in self.board_members:
- info.append(f"- {member.agent.agent_name} ({member.role.value}): {member.agent.agent_description}")
+ info.append(
+ f"- {member.agent.agent_name} ({member.role.value}): {member.agent.agent_description}"
+ )
if member.expertise_areas:
- info.append(f" Expertise: {', '.join(member.expertise_areas)}")
+ info.append(
+ f" Expertise: {', '.join(member.expertise_areas)}"
+ )
return "\n".join(info)
- def _conduct_board_discussion(self, prompt: str, img: Optional[str] = None) -> str:
+ def _conduct_board_discussion(
+ self, prompt: str, img: Optional[str] = None
+ ) -> str:
"""
Conduct the board discussion using the chairman as the primary speaker.
-
+
This method uses the chairman agent to lead the board discussion
and generate the meeting output.
-
+
Args:
prompt: The board meeting prompt
img: Optional image input
-
+
Returns:
str: The board discussion output
-
+
Raises:
ValueError: If no chairman is found in board members
"""
# Use the chairman to lead the discussion
- chairman = next((member.agent for member in self.board_members
- if member.role == BoardMemberRole.CHAIRMAN),
- self.board_members[0].agent if self.board_members else None)
-
+ chairman = next(
+ (
+ member.agent
+ for member in self.board_members
+ if member.role == BoardMemberRole.CHAIRMAN
+ ),
+ (
+ self.board_members[0].agent
+ if self.board_members
+ else None
+ ),
+ )
+
if not chairman:
raise ValueError("No chairman found in board members")
-
+
return chairman.run(task=prompt, img=img)
def _parse_board_decisions(self, board_output: str) -> BoardSpec:
"""
Parse the board output into a BoardSpec object.
-
+
This method attempts to parse the board discussion output as JSON
and convert it into a structured BoardSpec object. If parsing fails,
it returns a basic BoardSpec with the raw output.
-
+
Args:
board_output: The output from the board discussion
-
+
Returns:
BoardSpec: Parsed board specification
"""
@@ -1288,10 +1488,12 @@ Please provide your response in the following format:
# Try to parse as JSON first
if isinstance(board_output, str):
# Try to extract JSON from the response
- json_match = re.search(r'\{.*\}', board_output, re.DOTALL)
+ json_match = re.search(
+ r"\{.*\}", board_output, re.DOTALL
+ )
if json_match:
board_output = json_match.group()
-
+
parsed = json.loads(board_output)
else:
parsed = board_output
@@ -1310,7 +1512,9 @@ Please provide your response in the following format:
task=order_data.get("task", ""),
priority=order_data.get("priority", 3),
deadline=order_data.get("deadline"),
- assigned_by=order_data.get("assigned_by", "Board of Directors")
+ assigned_by=order_data.get(
+ "assigned_by", "Board of Directors"
+ ),
)
orders.append(order)
@@ -1318,12 +1522,18 @@ Please provide your response in the following format:
decisions = []
for decision_data in decisions_data:
decision = BoardDecision(
- decision_type=BoardDecisionType(decision_data.get("decision_type", "consensus")),
+ decision_type=BoardDecisionType(
+ decision_data.get(
+ "decision_type", "consensus"
+ )
+ ),
decision=decision_data.get("decision", ""),
votes_for=decision_data.get("votes_for", 0),
- votes_against=decision_data.get("votes_against", 0),
+ votes_against=decision_data.get(
+ "votes_against", 0
+ ),
abstentions=decision_data.get("abstentions", 0),
- reasoning=decision_data.get("reasoning", "")
+ reasoning=decision_data.get("reasoning", ""),
)
decisions.append(decision)
@@ -1331,53 +1541,67 @@ Please provide your response in the following format:
plan=plan,
orders=orders,
decisions=decisions,
- meeting_summary=meeting_summary
+ meeting_summary=meeting_summary,
)
except Exception as e:
- board_logger.error(f"Failed to parse board decisions: {str(e)}")
+ board_logger.error(
+ f"Failed to parse board decisions: {str(e)}"
+ )
# Return a basic BoardSpec if parsing fails
return BoardSpec(
plan=board_output,
orders=[],
decisions=[],
- meeting_summary="Parsing failed, using raw output"
+ meeting_summary="Parsing failed, using raw output",
)
- def step(self, task: str, img: Optional[str] = None, *args: Any, **kwargs: Any) -> Any:
+ def step(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ *args: Any,
+ **kwargs: Any,
+ ) -> Any:
"""
Execute a single step of the Board of Directors swarm.
-
+
This method runs one complete cycle of board meeting and task execution.
It includes board discussion, task distribution, and optional feedback.
-
+
Args:
task: The task to be executed
img: Optional image input
*args: Additional positional arguments
**kwargs: Additional keyword arguments
-
+
Returns:
Any: The result of the step execution
-
+
Raises:
Exception: If step execution fails
"""
try:
if self.verbose:
- board_logger.info(f"π£ Executing single step for task: {task[:100]}...")
+ board_logger.info(
+ f"π£ Executing single step for task: {task[:100]}..."
+ )
# Run board meeting
board_spec = self.run_board_meeting(task=task, img=img)
if self.verbose:
- board_logger.info(f"π Board created plan and {len(board_spec.orders)} orders")
+ board_logger.info(
+ f"π Board created plan and {len(board_spec.orders)} orders"
+ )
# Execute the orders
outputs = self._execute_orders(board_spec.orders)
if self.verbose:
- board_logger.info(f"β‘ Executed {len(outputs)} orders")
+ board_logger.info(
+ f"β‘ Executed {len(outputs)} orders"
+ )
# Provide board feedback if enabled
if self.board_feedback_on:
@@ -1395,47 +1619,64 @@ Please provide your response in the following format:
board_logger.error(error_msg)
raise
- def run(self, task: str, img: Optional[str] = None, *args: Any, **kwargs: Any) -> Any:
+ def run(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ *args: Any,
+ **kwargs: Any,
+ ) -> Any:
"""
Run the Board of Directors swarm for the specified number of loops.
-
+
This method executes the complete swarm workflow, including multiple
iterations if max_loops is greater than 1. Each iteration includes
board meeting, task execution, and feedback generation.
-
+
Args:
task: The task to be executed
img: Optional image input
*args: Additional positional arguments
**kwargs: Additional keyword arguments
-
+
Returns:
Any: The final result of the swarm execution
-
+
Raises:
Exception: If swarm execution fails
"""
try:
if self.verbose:
- board_logger.info(f"ποΈ Starting Board of Directors swarm execution: {self.name}")
+ board_logger.info(
+ f"ποΈ Starting Board of Directors swarm execution: {self.name}"
+ )
board_logger.info(f"π Task: {task[:100]}...")
current_loop = 0
while current_loop < self.max_loops:
if self.verbose:
- board_logger.info(f"π Executing loop {current_loop + 1}/{self.max_loops}")
+ board_logger.info(
+ f"π Executing loop {current_loop + 1}/{self.max_loops}"
+ )
# Execute step
- result = self.step(task=task, img=img, *args, **kwargs)
-
+ self.step(task=task, img=img, *args, **kwargs)
+
# Add to conversation
- self.conversation.add(role="System", content=f"Loop {current_loop + 1} completed")
-
+ self.conversation.add(
+ role="System",
+ content=f"Loop {current_loop + 1} completed",
+ )
+
current_loop += 1
if self.verbose:
- board_logger.success(f"π Board of Directors swarm run completed: {self.name}")
- board_logger.info(f"π Total loops executed: {current_loop}")
+ board_logger.success(
+ f"π Board of Directors swarm run completed: {self.name}"
+ )
+ board_logger.info(
+ f"π Total loops executed: {current_loop}"
+ )
return history_output_formatter(
conversation=self.conversation, type=self.output_type
@@ -1446,19 +1687,25 @@ Please provide your response in the following format:
board_logger.error(error_msg)
raise
- async def arun(self, task: str, img: Optional[str] = None, *args: Any, **kwargs: Any) -> Any:
+ async def arun(
+ self,
+ task: str,
+ img: Optional[str] = None,
+ *args: Any,
+ **kwargs: Any,
+ ) -> Any:
"""
Run the Board of Directors swarm asynchronously.
-
+
This method provides an asynchronous interface for running the swarm,
allowing for non-blocking execution in async contexts.
-
+
Args:
task: The task to be executed
img: Optional image input
*args: Additional positional arguments
**kwargs: Additional keyword arguments
-
+
Returns:
Any: The final result of the swarm execution
"""
@@ -1471,16 +1718,16 @@ Please provide your response in the following format:
def _generate_board_feedback(self, outputs: List[Any]) -> str:
"""
Provide feedback from the Board of Directors based on agent outputs.
-
+
This method uses the chairman to review and provide feedback on
the outputs generated by worker agents.
-
+
Args:
outputs: List of outputs from agents
-
+
Returns:
str: Board feedback on the outputs
-
+
Raises:
ValueError: If no chairman is found for feedback
Exception: If feedback generation fails
@@ -1492,10 +1739,19 @@ Please provide your response in the following format:
task = f"History: {self.conversation.get_str()} \n\n"
# Use the chairman for feedback
- chairman = next((member.agent for member in self.board_members
- if member.role == BoardMemberRole.CHAIRMAN),
- self.board_members[0].agent if self.board_members else None)
-
+ chairman = next(
+ (
+ member.agent
+ for member in self.board_members
+ if member.role == BoardMemberRole.CHAIRMAN
+ ),
+ (
+ self.board_members[0].agent
+ if self.board_members
+ else None
+ ),
+ )
+
if not chairman:
raise ValueError("No chairman found for feedback")
@@ -1509,10 +1765,14 @@ Please provide your response in the following format:
)
output = chairman.run(task=feedback_prompt)
- self.conversation.add(role=chairman.agent_name, content=output)
+ self.conversation.add(
+ role=chairman.agent_name, content=output
+ )
if self.verbose:
- board_logger.success("β
Board feedback generated successfully")
+ board_logger.success(
+ "β
Board feedback generated successfully"
+ )
return output
@@ -1522,27 +1782,23 @@ Please provide your response in the following format:
raise
def _call_single_agent(
- self,
- agent_name: str,
- task: str,
- *args: Any,
- **kwargs: Any
+ self, agent_name: str, task: str, *args: Any, **kwargs: Any
) -> Any:
"""
Call a single agent with the given task.
-
+
This method finds and executes a specific agent with the provided task.
It includes error handling and logging for agent execution.
-
+
Args:
agent_name: The name of the agent to call
task: The task to assign to the agent
*args: Additional positional arguments
**kwargs: Additional keyword arguments
-
+
Returns:
Any: The output from the agent
-
+
Raises:
ValueError: If the specified agent is not found
Exception: If agent execution fails
@@ -1554,13 +1810,18 @@ Please provide your response in the following format:
# Find agent by name
agent = None
for a in self.agents:
- if hasattr(a, "agent_name") and a.agent_name == agent_name:
+ if (
+ hasattr(a, "agent_name")
+ and a.agent_name == agent_name
+ ):
agent = a
break
if agent is None:
available_agents = [
- a.agent_name for a in self.agents if hasattr(a, "agent_name")
+ a.agent_name
+ for a in self.agents
+ if hasattr(a, "agent_name")
]
raise ValueError(
f"Agent '{agent_name}' not found in swarm. Available agents: {available_agents}"
@@ -1574,7 +1835,9 @@ Please provide your response in the following format:
self.conversation.add(role=agent_name, content=output)
if self.verbose:
- board_logger.success(f"β
Agent {agent_name} completed task successfully")
+ board_logger.success(
+ f"β
Agent {agent_name} completed task successfully"
+ )
return output
@@ -1583,59 +1846,75 @@ Please provide your response in the following format:
board_logger.error(error_msg)
raise
- def _execute_orders(self, orders: List[BoardOrder]) -> List[Dict[str, Any]]:
+ def _execute_orders(
+ self, orders: List[BoardOrder]
+ ) -> List[Dict[str, Any]]:
"""
Execute the orders issued by the Board of Directors.
-
+
This method uses ThreadPoolExecutor to execute multiple orders in parallel,
improving performance for complex task distributions.
-
+
Args:
orders: List of board orders to execute
-
+
Returns:
List[Dict[str, Any]]: List of outputs from executed orders
-
+
Raises:
Exception: If order execution fails
"""
try:
if self.verbose:
- board_logger.info(f"β‘ Executing {len(orders)} board orders")
+ board_logger.info(
+ f"β‘ Executing {len(orders)} board orders"
+ )
# Use ThreadPoolExecutor for parallel execution
outputs = []
- with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
+ with ThreadPoolExecutor(
+ max_workers=self.max_workers
+ ) as executor:
# Submit all orders for execution
future_to_order = {
- executor.submit(self._execute_single_order, order): order
+ executor.submit(
+ self._execute_single_order, order
+ ): order
for order in orders
}
-
+
# Collect results as they complete
for future in as_completed(future_to_order):
order = future_to_order[future]
try:
output = future.result()
- outputs.append({
- "agent_name": order.agent_name,
- "task": order.task,
- "output": output,
- "priority": order.priority,
- "assigned_by": order.assigned_by,
- })
+ outputs.append(
+ {
+ "agent_name": order.agent_name,
+ "task": order.task,
+ "output": output,
+ "priority": order.priority,
+ "assigned_by": order.assigned_by,
+ }
+ )
except Exception as e:
- board_logger.error(f"Failed to execute order for {order.agent_name}: {str(e)}")
- outputs.append({
- "agent_name": order.agent_name,
- "task": order.task,
- "output": f"Error: {str(e)}",
- "priority": order.priority,
- "assigned_by": order.assigned_by,
- })
+ board_logger.error(
+ f"Failed to execute order for {order.agent_name}: {str(e)}"
+ )
+ outputs.append(
+ {
+ "agent_name": order.agent_name,
+ "task": order.task,
+ "output": f"Error: {str(e)}",
+ "priority": order.priority,
+ "assigned_by": order.assigned_by,
+ }
+ )
if self.verbose:
- board_logger.success(f"β
Executed {len(outputs)} orders successfully")
+ board_logger.success(
+ f"β
Executed {len(outputs)} orders successfully"
+ )
return outputs
@@ -1647,13 +1926,13 @@ Please provide your response in the following format:
def _execute_single_order(self, order: BoardOrder) -> Any:
"""
Execute a single board order.
-
+
This method is a wrapper around _call_single_agent for executing
individual board orders.
-
+
Args:
order: The board order to execute
-
+
Returns:
Any: The output from the executed order
"""
@@ -1665,41 +1944,48 @@ Please provide your response in the following format:
def add_board_member(self, board_member: BoardMember) -> None:
"""
Add a new member to the Board of Directors.
-
+
This method allows dynamic addition of board members after swarm initialization.
-
+
Args:
board_member: The board member to add
"""
self.board_members.append(board_member)
if self.verbose:
- board_logger.info(f"β
Added board member: {board_member.agent.agent_name}")
+ board_logger.info(
+ f"β
Added board member: {board_member.agent.agent_name}"
+ )
def remove_board_member(self, agent_name: str) -> None:
"""
Remove a board member by agent name.
-
+
This method allows dynamic removal of board members after swarm initialization.
-
+
Args:
agent_name: The name of the agent to remove from the board
"""
self.board_members = [
- member for member in self.board_members
+ member
+ for member in self.board_members
if member.agent.agent_name != agent_name
]
if self.verbose:
- board_logger.info(f"β
Removed board member: {agent_name}")
+ board_logger.info(
+ f"β
Removed board member: {agent_name}"
+ )
- def get_board_member(self, agent_name: str) -> Optional[BoardMember]:
+ def get_board_member(
+ self, agent_name: str
+ ) -> Optional[BoardMember]:
"""
Get a board member by agent name.
-
+
This method retrieves a specific board member by their agent name.
-
+
Args:
agent_name: The name of the agent
-
+
Returns:
Optional[BoardMember]: The board member if found, None otherwise
"""
@@ -1711,10 +1997,10 @@ Please provide your response in the following format:
def get_board_summary(self) -> Dict[str, Any]:
"""
Get a summary of the Board of Directors.
-
+
This method provides a comprehensive summary of the board structure,
including member information, configuration, and statistics.
-
+
Returns:
Dict[str, Any]: Summary of the board structure and members
"""
diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py
index 85a720cf..70a97587 100644
--- a/swarms/structs/hiearchical_swarm.py
+++ b/swarms/structs/hiearchical_swarm.py
@@ -18,6 +18,9 @@ Todo
- Auto build agents from input prompt - and then add them to the swarm
- Create an interactive and dynamic UI like we did with heavy swarm
- Make it faster and more high performance
+- Enable the director to choose a multi-agent approach to the task, it orchestrates how the agents talk and work together.
+- Improve the director feedback, maybe add agent as a judge to the worker agent instead of the director.
+- Use agent rearrange to orchestrate the agents
Classes:
HierarchicalOrder: Represents a single task assignment to a specific agent
@@ -25,14 +28,26 @@ Classes:
HierarchicalSwarm: Main swarm orchestrator that manages director and worker agents
"""
+import time
import traceback
from typing import Any, Callable, List, Optional, Union
+from loguru import logger
from pydantic import BaseModel, Field
+from rich.console import Console
+from rich.layout import Layout
+from rich.live import Live
+from rich.panel import Panel
+from rich.table import Table
+from rich.text import Text
from swarms.prompts.hiearchical_system_prompt import (
HIEARCHICAL_SWARM_SYSTEM_PROMPT,
)
+from swarms.prompts.multi_agent_collab_prompt import (
+ MULTI_AGENT_COLLAB_PROMPT_TWO,
+)
+from swarms.prompts.reasoning_prompt import INTERNAL_MONOLGUE_PROMPT
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.structs.ma_utils import list_all_agents
@@ -40,10 +55,507 @@ from swarms.tools.base_tool import BaseTool
from swarms.utils.history_output_formatter import (
history_output_formatter,
)
-from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.output_types import OutputType
-logger = initialize_logger(log_folder="hierarchical_swarm")
+
+class HierarchicalSwarmDashboard:
+ """
+ Futuristic Arasaka Corporation-style dashboard for hierarchical swarm monitoring.
+
+ This dashboard provides a professional, enterprise-grade interface with red and black
+ color scheme, real-time monitoring of swarm operations, and cyberpunk aesthetics.
+
+ Attributes:
+ console (Console): Rich console instance for rendering
+ live_display (Live): Live display for real-time updates
+ swarm_name (str): Name of the swarm being monitored
+ agent_statuses (dict): Current status of all agents
+ director_status (str): Current status of the director
+ current_loop (int): Current execution loop
+ max_loops (int): Maximum number of loops
+ is_active (bool): Whether the dashboard is currently active
+ """
+
+ def __init__(self, swarm_name: str = "Swarms Corporation"):
+ """
+ Initialize the Arasaka dashboard.
+
+ Args:
+ swarm_name (str): Name of the swarm to display in the dashboard
+ """
+ self.console = Console()
+ self.live_display = None
+ self.swarm_name = swarm_name
+ self.agent_statuses = {}
+ self.director_status = "INITIALIZING"
+ self.current_loop = 0
+ self.max_loops = 1
+ self.is_active = False
+ self.start_time = None
+ self.spinner_frames = [
+ "β ",
+ "β ",
+ "β Ή",
+ "β Έ",
+ "β Ό",
+ "β ΄",
+ "β ¦",
+ "β §",
+ "β ",
+ "β ",
+ ]
+ self.spinner_idx = 0
+
+ # Director information tracking
+ self.director_plan = ""
+ self.director_orders = []
+
+ # Swarm information
+ self.swarm_description = ""
+ self.director_name = "Director"
+ self.director_model_name = "gpt-4o-mini"
+
+ # View mode for agents display
+ self.detailed_view = False
+
+ # Multi-loop agent tracking
+ self.agent_history = {} # Track agent outputs across loops
+ self.current_loop = 0
+
+ def _get_spinner(self) -> str:
+ """Get current spinner frame for loading animations."""
+ self.spinner_idx = (self.spinner_idx + 1) % len(
+ self.spinner_frames
+ )
+ return self.spinner_frames[self.spinner_idx]
+
+ def _create_header(self) -> Panel:
+ """Create the dashboard header with Swarms Corporation branding."""
+ header_text = Text()
+ header_text.append(
+ "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n",
+ style="bold red",
+ )
+ header_text.append("β", style="bold red")
+ header_text.append(" ", style="bold red")
+ header_text.append(
+ "SWARMS CORPORATION", style="bold white on red"
+ )
+ header_text.append(" ", style="bold red")
+ header_text.append("β\n", style="bold red")
+ header_text.append("β", style="bold red")
+ header_text.append(" ", style="bold red")
+ header_text.append(
+ "HIERARCHICAL SWARM OPERATIONS CENTER", style="bold red"
+ )
+ header_text.append(" ", style="bold red")
+ header_text.append("β\n", style="bold red")
+ header_text.append(
+ "ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ",
+ style="bold red",
+ )
+
+ return Panel(
+ header_text,
+ border_style="red",
+ padding=(0, 1),
+ )
+
+ def _create_status_panel(self) -> Panel:
+ """Create the operations status panel."""
+ status_text = Text()
+
+ # Corporation branding and operation type
+ status_text.append(
+ "By the Swarms Corporation", style="bold cyan"
+ )
+ status_text.append("\n", style="white")
+ status_text.append(
+ "Hierarchical Agent Operations", style="bold white"
+ )
+
+ status_text.append("\n\n", style="white")
+
+ # Swarm information
+ status_text.append("SWARM NAME: ", style="bold white")
+ status_text.append(f"{self.swarm_name}", style="bold cyan")
+
+ status_text.append("\n", style="white")
+ status_text.append("DESCRIPTION: ", style="bold white")
+ status_text.append(f"{self.swarm_description}", style="white")
+
+ status_text.append("\n", style="white")
+ status_text.append("DIRECTOR: ", style="bold white")
+ status_text.append(
+ f"{self.director_name} ({self.director_model_name})",
+ style="cyan",
+ )
+
+ status_text.append("\n", style="white")
+ status_text.append("TOTAL LOOPS: ", style="bold white")
+ status_text.append(f"{self.max_loops}", style="bold cyan")
+
+ status_text.append(" | ", style="white")
+ status_text.append("CURRENT LOOP: ", style="bold white")
+ status_text.append(
+ f"{self.current_loop}", style="bold yellow"
+ )
+
+ # Agent count metadata
+ agent_count = len(getattr(self, "agent_history", {}))
+ status_text.append(" | ", style="white")
+ status_text.append("AGENTS: ", style="bold white")
+ status_text.append(f"{agent_count}", style="bold green")
+
+ status_text.append("\n\n", style="white")
+
+ # Director status
+ status_text.append("DIRECTOR STATUS: ", style="bold white")
+ if self.director_status == "INITIALIZING":
+ status_text.append(
+ f"{self._get_spinner()} {self.director_status}",
+ style="bold yellow",
+ )
+ elif self.director_status == "ACTIVE":
+ status_text.append(
+ f"β {self.director_status}", style="bold green"
+ )
+ elif self.director_status == "PROCESSING":
+ status_text.append(
+ f"{self._get_spinner()} {self.director_status}",
+ style="bold cyan",
+ )
+ else:
+ status_text.append(
+ f"β {self.director_status}", style="bold red"
+ )
+
+ status_text.append("\n\n", style="white")
+
+ # Runtime and completion information
+ if self.start_time:
+ runtime = time.time() - self.start_time
+ status_text.append("RUNTIME: ", style="bold white")
+ status_text.append(f"{runtime:.2f}s", style="bold green")
+
+ # Add completion percentage if loops are running
+ if self.max_loops > 0:
+ completion_percent = (
+ self.current_loop / self.max_loops
+ ) * 100
+ status_text.append(" | ", style="white")
+ status_text.append("PROGRESS: ", style="bold white")
+ status_text.append(
+ f"{completion_percent:.1f}%", style="bold cyan"
+ )
+
+ return Panel(
+ status_text,
+ border_style="red",
+ padding=(1, 2),
+ title="[bold white]OPERATIONS STATUS[/bold white]",
+ )
+
+ def _create_agents_table(self) -> Table:
+ """Create the agents monitoring table with full outputs and loop history."""
+ table = Table(
+ show_header=True,
+ header_style="bold white on red",
+ border_style="red",
+ title="[bold white]AGENT MONITORING MATRIX[/bold white]",
+ title_style="bold white",
+ show_lines=True,
+ )
+
+ table.add_column("AGENT ID", style="bold cyan", width=25)
+ table.add_column("LOOP", style="bold white", width=8)
+ table.add_column("STATUS", style="bold white", width=15)
+ table.add_column("TASK", style="white", width=40)
+ table.add_column("OUTPUT", style="white", width=150)
+
+ # Display agents with their history across loops
+ for agent_name, history in self.agent_history.items():
+ for loop_num in range(self.max_loops + 1):
+ loop_key = f"Loop_{loop_num}"
+
+ if loop_key in history:
+ loop_data = history[loop_key]
+ status = loop_data.get("status", "UNKNOWN")
+ task = loop_data.get("task", "N/A")
+ output = loop_data.get("output", "")
+
+ # Style status
+ if status == "RUNNING":
+ status_display = (
+ f"{self._get_spinner()} {status}"
+ )
+ status_style = "bold yellow"
+ elif status == "COMPLETED":
+ status_display = f"β {status}"
+ status_style = "bold green"
+ elif status == "PENDING":
+ status_display = f"β {status}"
+ status_style = "bold red"
+ else:
+ status_display = f"β {status}"
+ status_style = "bold red"
+
+ # Show full output without truncation
+ output_display = output if output else "No output"
+
+ table.add_row(
+ Text(agent_name, style="bold cyan"),
+ Text(f"Loop {loop_num}", style="bold white"),
+ Text(status_display, style=status_style),
+ Text(task, style="white"),
+ Text(output_display, style="white"),
+ )
+
+ return table
+
+ def _create_detailed_agents_view(self) -> Panel:
+ """Create a detailed view of agents with full outputs and loop history."""
+ detailed_text = Text()
+
+ for agent_name, history in self.agent_history.items():
+ detailed_text.append(
+ f"AGENT: {agent_name}\n", style="bold cyan"
+ )
+ detailed_text.append("=" * 80 + "\n", style="red")
+
+ for loop_num in range(self.max_loops + 1):
+ loop_key = f"Loop_{loop_num}"
+
+ if loop_key in history:
+ loop_data = history[loop_key]
+ status = loop_data.get("status", "UNKNOWN")
+ task = loop_data.get("task", "N/A")
+ output = loop_data.get("output", "")
+
+ detailed_text.append(
+ f"LOOP {loop_num}:\n", style="bold white"
+ )
+ detailed_text.append(
+ f"STATUS: {status}\n", style="bold white"
+ )
+ detailed_text.append(
+ f"TASK: {task}\n", style="white"
+ )
+ detailed_text.append(
+ "OUTPUT:\n", style="bold white"
+ )
+ detailed_text.append(f"{output}\n", style="white")
+ detailed_text.append("β" * 80 + "\n", style="red")
+
+ return Panel(
+ detailed_text,
+ border_style="red",
+ padding=(1, 2),
+ title="[bold white]DETAILED AGENT OUTPUTS (FULL HISTORY)[/bold white]",
+ )
+
+ def _create_director_panel(self) -> Panel:
+ """Create the director information panel showing plan and orders."""
+ director_text = Text()
+
+ # Plan section
+ director_text.append("DIRECTOR PLAN:\n", style="bold white")
+ if self.director_plan:
+ director_text.append(self.director_plan, style="white")
+ else:
+ director_text.append(
+ "No plan available", style="dim white"
+ )
+
+ director_text.append("\n\n", style="white")
+
+ # Orders section
+ director_text.append("CURRENT ORDERS:\n", style="bold white")
+ if self.director_orders:
+ for i, order in enumerate(
+ self.director_orders
+ ): # Show first 5 orders
+ director_text.append(f"{i+1}. ", style="bold cyan")
+ director_text.append(
+ f"{order.get('agent_name', 'Unknown')}: ",
+ style="bold white",
+ )
+ task = order.get("task", "No task")
+ director_text.append(task, style="white")
+ director_text.append("\n", style="white")
+
+ if len(self.director_orders) > 5:
+ director_text.append(
+ f"... and {len(self.director_orders) - 5} more orders",
+ style="dim white",
+ )
+ else:
+ director_text.append(
+ "No orders available", style="dim white"
+ )
+
+ return Panel(
+ director_text,
+ border_style="red",
+ padding=(1, 2),
+ title="[bold white]DIRECTOR OPERATIONS[/bold white]",
+ )
+
+ def _create_dashboard_layout(self) -> Layout:
+ """Create the complete dashboard layout."""
+ layout = Layout()
+
+ # Split into operations status, director operations, and agents
+ layout.split_column(
+ Layout(name="operations_status", size=12),
+ Layout(name="director_operations", size=12),
+ Layout(name="agents", ratio=1),
+ )
+
+ # Add content to each section
+ layout["operations_status"].update(
+ self._create_status_panel()
+ )
+ layout["director_operations"].update(
+ self._create_director_panel()
+ )
+
+ # Choose between table view and detailed view
+ if self.detailed_view:
+ layout["agents"].update(
+ self._create_detailed_agents_view()
+ )
+ else:
+ layout["agents"].update(
+ Panel(
+ self._create_agents_table(),
+ border_style="red",
+ padding=(1, 1),
+ )
+ )
+
+ return layout
+
+ def start(self, max_loops: int = 1):
+ """Start the dashboard display."""
+ self.max_loops = max_loops
+ self.start_time = time.time()
+ self.is_active = True
+
+ self.live_display = Live(
+ self._create_dashboard_layout(),
+ console=self.console,
+ refresh_per_second=10,
+ transient=False,
+ )
+ self.live_display.start()
+
+ def update_agent_status(
+ self,
+ agent_name: str,
+ status: str,
+ task: str = "",
+ output: str = "",
+ ):
+ """Update the status of a specific agent."""
+ # Create loop key for tracking history
+ loop_key = f"Loop_{self.current_loop}"
+
+ # Initialize agent history if not exists
+ if agent_name not in self.agent_history:
+ self.agent_history[agent_name] = {}
+
+ # Store current status and add to history
+ self.agent_statuses[agent_name] = {
+ "status": status,
+ "task": task,
+ "output": output,
+ }
+
+ # Add to history for this loop
+ self.agent_history[agent_name][loop_key] = {
+ "status": status,
+ "task": task,
+ "output": output,
+ }
+
+ if self.live_display and self.is_active:
+ self.live_display.update(self._create_dashboard_layout())
+
+ def update_director_status(self, status: str):
+ """Update the director status."""
+ self.director_status = status
+ if self.live_display and self.is_active:
+ self.live_display.update(self._create_dashboard_layout())
+
+ def update_loop(self, current_loop: int):
+ """Update the current execution loop."""
+ self.current_loop = current_loop
+ if self.live_display and self.is_active:
+ self.live_display.update(self._create_dashboard_layout())
+
+ def update_director_plan(self, plan: str):
+ """Update the director's plan."""
+ self.director_plan = plan
+ if self.live_display and self.is_active:
+ self.live_display.update(self._create_dashboard_layout())
+
+ def update_director_orders(self, orders: list):
+ """Update the director's orders."""
+ self.director_orders = orders
+ if self.live_display and self.is_active:
+ self.live_display.update(self._create_dashboard_layout())
+
+ def stop(self):
+ """Stop the dashboard display."""
+ self.is_active = False
+ if self.live_display:
+ self.live_display.stop()
+ self.console.print()
+
+ def update_swarm_info(
+ self,
+ name: str,
+ description: str,
+ max_loops: int,
+ director_name: str,
+ director_model_name: str,
+ ):
+ """Update the dashboard with swarm-specific information."""
+ self.swarm_name = name
+ self.swarm_description = description
+ self.max_loops = max_loops
+ self.director_name = director_name
+ self.director_model_name = director_model_name
+ if self.live_display and self.is_active:
+ self.live_display.update(self._create_dashboard_layout())
+
+ def force_refresh(self):
+ """Force refresh the dashboard display."""
+ if self.live_display and self.is_active:
+ self.live_display.update(self._create_dashboard_layout())
+
+ def show_full_output(self, agent_name: str, full_output: str):
+ """Display full agent output in a separate panel."""
+ if self.live_display and self.is_active:
+ # Create a full output panel
+ output_panel = Panel(
+ Text(full_output, style="white"),
+ title=f"[bold white]FULL OUTPUT - {agent_name}[/bold white]",
+ border_style="red",
+ padding=(1, 2),
+ width=120,
+ )
+
+ # Temporarily show the full output
+ self.console.print(output_panel)
+ self.console.print() # Add spacing
+
+ def toggle_detailed_view(self):
+ """Toggle between table view and detailed view."""
+ self.detailed_view = not self.detailed_view
+ if self.live_display and self.is_active:
+ self.live_display.update(self._create_dashboard_layout())
class HierarchicalOrder(BaseModel):
@@ -71,6 +583,25 @@ class HierarchicalOrder(BaseModel):
)
+class HierarchicalOrderRearrange(BaseModel):
+ """
+ Represents a single task assignment within the hierarchical swarm.
+
+ This class defines the structure for individual task orders that the director
+ distributes to worker agents. Each order specifies which agent should execute
+ what specific task.
+ """
+
+ initial_task: str = Field(
+ ...,
+ description="The initial task that the director has to execute.",
+ )
+ flow_of_communication: str = Field(
+ ...,
+ description="How the agents will communicate with each other to accomplish the task. Like agent_one -> agent_two -> agent_three -> agent_four -> agent_one, can use comma signs to denote sequential communication and commas to denote parallel communication for example agent_one -> agent_two, agent_three -> agent_four",
+ )
+
+
class SwarmSpec(BaseModel):
"""
Defines the complete specification for a hierarchical swarm execution.
@@ -86,10 +617,20 @@ class SwarmSpec(BaseModel):
individual agents within the swarm.
"""
+ # # thoughts: str = Field(
+ # # ...,
+ # # description="A plan generated by the director agent for the swarm to accomplish the given task, where the director autonomously reasons through the problem, devises its own strategy, and determines the sequence of actions. "
+ # # "This plan reflects the director's independent thought process, outlining the rationale, priorities, and steps it deems necessary for successful execution. "
+ # # "It serves as a blueprint for the swarm, enabling agents to follow the director's self-derived guidance and adapt as needed throughout the process.",
+ # )
+
plan: str = Field(
...,
- description="Outlines the sequence of actions to be taken by the swarm. This plan is a detailed roadmap that guides the swarm's behavior and decision-making.",
+ description="A plan generated by the director agent for the swarm to accomplish the given task, where the director autonomously reasons through the problem, devises its own strategy, and determines the sequence of actions. "
+ "This plan reflects the director's independent thought process, outlining the rationale, priorities, and steps it deems necessary for successful execution. "
+ "It serves as a blueprint for the swarm, enabling agents to follow the director's self-derived guidance and adapt as needed throughout the process.",
)
+
orders: List[HierarchicalOrder] = Field(
...,
description="A collection of task assignments to specific agents within the swarm. These orders are the specific instructions that guide the agents in their task execution and are a key element in the swarm's plan.",
@@ -143,6 +684,11 @@ class HierarchicalSwarm:
Union[Agent, Callable, Any]
] = None,
director_feedback_on: bool = True,
+ interactive: bool = False,
+ director_system_prompt: str = HIEARCHICAL_SWARM_SYSTEM_PROMPT,
+ director_reasoning_model_name: str = "o3-mini",
+ director_reasoning_enabled: bool = True,
+ multi_agent_prompt_improvements: bool = False,
*args,
**kwargs,
):
@@ -187,9 +733,79 @@ class HierarchicalSwarm:
self.add_collaboration_prompt = add_collaboration_prompt
self.planning_director_agent = planning_director_agent
self.director_feedback_on = director_feedback_on
+ self.interactive = interactive
+ self.director_system_prompt = director_system_prompt
+ self.director_reasoning_model_name = (
+ director_reasoning_model_name
+ )
+ self.director_reasoning_enabled = director_reasoning_enabled
+ self.multi_agent_prompt_improvements = (
+ multi_agent_prompt_improvements
+ )
+
+ if self.interactive:
+ self.agents_no_print()
+
+ # Initialize dashboard if interactive mode is enabled
+ self.dashboard = None
+ if self.interactive:
+ self.dashboard = HierarchicalSwarmDashboard(self.name)
+ # Enable detailed view for better output visibility
+ self.dashboard.detailed_view = True
+ # Pass additional swarm information to dashboard
+ self.dashboard.update_swarm_info(
+ name=self.name,
+ description=self.description,
+ max_loops=self.max_loops,
+ director_name=self.director_name,
+ director_model_name=self.director_model_name,
+ )
self.init_swarm()
+ def list_worker_agents(self) -> str:
+ return list_all_agents(
+ agents=self.agents,
+ add_to_conversation=False,
+ )
+
+ def prepare_worker_agents(self):
+ for agent in self.agents:
+ prompt = (
+ MULTI_AGENT_COLLAB_PROMPT_TWO
+ + self.list_worker_agents()
+ )
+ if hasattr(agent, "system_prompt"):
+ agent.system_prompt += prompt
+ else:
+ agent.system_prompt = prompt
+
+ def reasoning_agent_run(
+ self, task: str, img: Optional[str] = None
+ ):
+ """
+ Run a reasoning agent to analyze the task before the main director processes it.
+
+ Args:
+ task (str): The task to reason about
+ img (Optional[str]): Optional image input
+
+ Returns:
+ str: The reasoning output from the agent
+ """
+ agent = Agent(
+ agent_name=self.director_name,
+ agent_description=f"You're the {self.director_name} agent that is responsible for reasoning about the task and creating a plan for the swarm to accomplish the task.",
+ model_name=self.director_reasoning_model_name,
+ system_prompt=INTERNAL_MONOLGUE_PROMPT
+ + self.director_system_prompt,
+ max_loops=1,
+ )
+
+ prompt = f"Conversation History: {self.conversation.get_str()} \n\n Task: {task}"
+
+ return agent.run(task=prompt, img=img)
+
def init_swarm(self):
"""
Initialize the swarm with proper configuration and validation.
@@ -216,11 +832,27 @@ class HierarchicalSwarm:
self.add_context_to_director()
+ # Initialize agent statuses in dashboard if interactive mode
+ if self.interactive and self.dashboard:
+ for agent in self.agents:
+ if hasattr(agent, "agent_name"):
+ self.dashboard.update_agent_status(
+ agent.agent_name,
+ "PENDING",
+ "Awaiting task assignment",
+ "Ready for deployment",
+ )
+ # Force refresh to ensure agents are displayed
+ self.dashboard.force_refresh()
+
if self.verbose:
logger.success(
f"β
HierarchicalSwarm: {self.name} initialized successfully."
)
+ if self.multi_agent_prompt_improvements:
+ self.prepare_worker_agents()
+
def add_context_to_director(self):
"""
Add agent context and collaboration information to the director's conversation.
@@ -282,6 +914,7 @@ class HierarchicalSwarm:
return Agent(
agent_name=self.director_name,
agent_description="A director agent that can create a plan and distribute orders to agents",
+ system_prompt=self.director_system_prompt,
model_name=self.director_model_name,
max_loops=1,
base_model=SwarmSpec,
@@ -333,6 +966,10 @@ class HierarchicalSwarm:
error_msg = f"β Failed to setup director: {str(e)}\nπ Traceback: {traceback.format_exc()}\nπ If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues"
logger.error(error_msg)
+ def agents_no_print(self):
+ for agent in self.agents:
+ agent.print_on = False
+
def run_director(
self,
task: str,
@@ -358,9 +995,7 @@ class HierarchicalSwarm:
"""
try:
if self.verbose:
- logger.info(
- f"π― Running director with task: {task[:100]}..."
- )
+ logger.info(f"π― Running director with task: {task}")
if self.planning_director_agent is not None:
plan = self.planning_director_agent.run(
@@ -370,6 +1005,12 @@ class HierarchicalSwarm:
task += plan
+ if self.director_reasoning_enabled:
+ reasoning_output = self.reasoning_agent_run(
+ task=task, img=img
+ )
+ task += f"\n\n Reasoning: {reasoning_output}"
+
# Run the director with the context
function_call = self.director.run(
task=f"History: {self.conversation.get_str()} \n\n Task: {task}",
@@ -391,6 +1032,7 @@ class HierarchicalSwarm:
except Exception as e:
error_msg = f"β Failed to setup director: {str(e)}\nπ Traceback: {traceback.format_exc()}\nπ If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues"
logger.error(error_msg)
+ raise e
def step(self, task: str, img: str = None, *args, **kwargs):
"""
@@ -417,9 +1059,13 @@ class HierarchicalSwarm:
try:
if self.verbose:
logger.info(
- f"π£ Executing single step for task: {task[:100]}..."
+ f"π£ Executing single step for task: {task}"
)
+ # Update dashboard for director execution
+ if self.interactive and self.dashboard:
+ self.dashboard.update_director_status("PLANNING")
+
output = self.run_director(task=task, img=img)
# Parse the orders
@@ -430,6 +1076,20 @@ class HierarchicalSwarm:
f"π Parsed plan and {len(orders)} orders"
)
+ # Update dashboard with plan and orders information
+ if self.interactive and self.dashboard:
+ self.dashboard.update_director_plan(plan)
+ # Convert orders to list of dicts for dashboard
+ orders_list = [
+ {
+ "agent_name": order.agent_name,
+ "task": order.task,
+ }
+ for order in orders
+ ]
+ self.dashboard.update_director_orders(orders_list)
+ self.dashboard.update_director_status("EXECUTING")
+
# Execute the orders
outputs = self.execute_orders(orders)
@@ -450,7 +1110,13 @@ class HierarchicalSwarm:
error_msg = f"β Failed to setup director: {str(e)}\nπ Traceback: {traceback.format_exc()}\nπ If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues"
logger.error(error_msg)
- def run(self, task: str, img: str = None, *args, **kwargs):
+ def run(
+ self,
+ task: Optional[str] = None,
+ img: Optional[str] = None,
+ *args,
+ **kwargs,
+ ):
"""
Execute the hierarchical swarm for the specified number of feedback loops.
@@ -462,7 +1128,8 @@ class HierarchicalSwarm:
context from previous iterations to subsequent ones.
Args:
- task (str): The initial task to be processed by the swarm.
+ task (str, optional): The initial task to be processed by the swarm.
+ If None and interactive mode is enabled, will prompt for input.
img (str, optional): Optional image input for the agents.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
@@ -475,9 +1142,23 @@ class HierarchicalSwarm:
Exception: If swarm execution fails.
"""
try:
+ # Handle interactive mode task input
+ if task is None and self.interactive:
+ task = self._get_interactive_task()
+
+ # if task is None:
+ # raise ValueError(
+ # "Task is required for swarm execution"
+ # )
+
current_loop = 0
last_output = None
+ # Start dashboard if in interactive mode
+ if self.interactive and self.dashboard:
+ self.dashboard.start(self.max_loops)
+ self.dashboard.update_director_status("ACTIVE")
+
if self.verbose:
logger.info(
f"π Starting hierarchical swarm run: {self.name}"
@@ -492,6 +1173,13 @@ class HierarchicalSwarm:
f"π Loop {current_loop + 1}/{self.max_loops} - Processing task"
)
+ # Update dashboard loop counter
+ if self.interactive and self.dashboard:
+ self.dashboard.update_loop(current_loop + 1)
+ self.dashboard.update_director_status(
+ "PROCESSING"
+ )
+
# For the first loop, use the original task.
# For subsequent loops, use the feedback from the previous loop as context.
if current_loop == 0:
@@ -527,6 +1215,11 @@ class HierarchicalSwarm:
content=f"--- Loop {current_loop}/{self.max_loops} completed ---",
)
+ # Stop dashboard if in interactive mode
+ if self.interactive and self.dashboard:
+ self.dashboard.update_director_status("COMPLETED")
+ self.dashboard.stop()
+
if self.verbose:
logger.success(
f"π Hierarchical swarm run completed: {self.name}"
@@ -540,9 +1233,32 @@ class HierarchicalSwarm:
)
except Exception as e:
+ # Stop dashboard on error
+ if self.interactive and self.dashboard:
+ self.dashboard.update_director_status("ERROR")
+ self.dashboard.stop()
+
error_msg = f"β Failed to setup director: {str(e)}\nπ Traceback: {traceback.format_exc()}\nπ If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues"
logger.error(error_msg)
+ def _get_interactive_task(self) -> str:
+ """
+ Get task input from user in interactive mode.
+
+ Returns:
+ str: The task input from the user
+ """
+ if self.dashboard:
+ self.dashboard.console.print(
+ "\n[bold red]SWARMS CORPORATION[/bold red] - [bold white]TASK INPUT REQUIRED[/bold white]"
+ )
+ self.dashboard.console.print(
+ "[bold cyan]Enter your task for the hierarchical swarm:[/bold cyan]"
+ )
+
+ task = input("> ")
+ return task.strip()
+
def feedback_director(self, outputs: list):
"""
Generate feedback from the director based on agent outputs.
@@ -646,6 +1362,12 @@ class HierarchicalSwarm:
f"Agent '{agent_name}' not found in swarm. Available agents: {available_agents}"
)
+ # Update dashboard for agent execution
+ if self.interactive and self.dashboard:
+ self.dashboard.update_agent_status(
+ agent_name, "RUNNING", task, "Executing task..."
+ )
+
output = agent.run(
task=f"History: {self.conversation.get_str()} \n\n Task: {task}",
*args,
@@ -661,6 +1383,12 @@ class HierarchicalSwarm:
return output
except Exception as e:
+ # Update dashboard with error status
+ if self.interactive and self.dashboard:
+ self.dashboard.update_agent_status(
+ agent_name, "ERROR", task, f"Error: {str(e)}"
+ )
+
error_msg = f"β Failed to setup director: {str(e)}\nπ Traceback: {traceback.format_exc()}\nπ If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues"
logger.error(error_msg)
@@ -805,8 +1533,9 @@ class HierarchicalSwarm:
)
except Exception as e:
- error_msg = f"β Failed to setup director: {str(e)}\nπ Traceback: {traceback.format_exc()}\nπ If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues"
+ error_msg = f"β Failed to parse orders: {str(e)}\nπ Traceback: {traceback.format_exc()}\nπ If this issue persists, please report it at: https://github.com/kyegomez/swarms/issues"
logger.error(error_msg)
+ raise e
def execute_orders(self, orders: list):
"""
@@ -836,9 +1565,31 @@ class HierarchicalSwarm:
f"π Executing order {i+1}/{len(orders)}: {order.agent_name}"
)
+ # Update dashboard for agent execution
+ if self.interactive and self.dashboard:
+ self.dashboard.update_agent_status(
+ order.agent_name,
+ "RUNNING",
+ order.task,
+ "Processing...",
+ )
+
output = self.call_single_agent(
order.agent_name, order.task
)
+
+ # Update dashboard with completed status
+ if self.interactive and self.dashboard:
+ # Always show full output without truncation
+ output_display = str(output)
+
+ self.dashboard.update_agent_status(
+ order.agent_name,
+ "COMPLETED",
+ order.task,
+ output_display,
+ )
+
outputs.append(output)
if self.verbose:
diff --git a/tests/structs/test_board_of_directors_swarm.py b/tests/structs/test_board_of_directors_swarm.py
index b87e563c..cd85b81e 100644
--- a/tests/structs/test_board_of_directors_swarm.py
+++ b/tests/structs/test_board_of_directors_swarm.py
@@ -15,8 +15,7 @@ The test suite follows the Swarms testing philosophy:
import os
import pytest
import asyncio
-from unittest.mock import Mock, patch, MagicMock, AsyncMock
-from typing import List, Dict, Any, Optional
+from unittest.mock import Mock, patch, AsyncMock
from swarms.structs.board_of_directors_swarm import (
BoardOfDirectorsSwarm,
@@ -28,7 +27,6 @@ from swarms.structs.board_of_directors_swarm import (
BoardSpec,
)
from swarms.structs.agent import Agent
-from swarms.structs.conversation import Conversation
# Test fixtures
@@ -50,7 +48,7 @@ def mock_board_member(mock_agent):
agent=mock_agent,
role=BoardMemberRole.CHAIRMAN,
voting_weight=1.5,
- expertise_areas=["leadership", "strategy"]
+ expertise_areas=["leadership", "strategy"],
)
@@ -70,18 +68,22 @@ def sample_agents():
@pytest.fixture
def sample_board_members(sample_agents):
"""Create sample board members for testing."""
- roles = [BoardMemberRole.CHAIRMAN, BoardMemberRole.VICE_CHAIRMAN, BoardMemberRole.SECRETARY]
+ roles = [
+ BoardMemberRole.CHAIRMAN,
+ BoardMemberRole.VICE_CHAIRMAN,
+ BoardMemberRole.SECRETARY,
+ ]
board_members = []
-
+
for i, (agent, role) in enumerate(zip(sample_agents, roles)):
board_member = BoardMember(
agent=agent,
role=role,
voting_weight=1.0 + (i * 0.2),
- expertise_areas=[f"expertise_{i+1}"]
+ expertise_areas=[f"expertise_{i+1}"],
)
board_members.append(board_member)
-
+
return board_members
@@ -92,7 +94,7 @@ def basic_board_swarm(sample_agents):
name="TestBoard",
agents=sample_agents,
verbose=False,
- max_loops=1
+ max_loops=1,
)
@@ -109,14 +111,14 @@ def configured_board_swarm(sample_agents, sample_board_members):
decision_threshold=0.7,
enable_voting=True,
enable_consensus=True,
- max_workers=4
+ max_workers=4,
)
# Unit tests for enums and data models
class TestBoardMemberRole:
"""Test BoardMemberRole enum."""
-
+
def test_enum_values(self):
"""Test that all enum values are correctly defined."""
assert BoardMemberRole.CHAIRMAN == "chairman"
@@ -124,61 +126,67 @@ class TestBoardMemberRole:
assert BoardMemberRole.SECRETARY == "secretary"
assert BoardMemberRole.TREASURER == "treasurer"
assert BoardMemberRole.MEMBER == "member"
- assert BoardMemberRole.EXECUTIVE_DIRECTOR == "executive_director"
+ assert (
+ BoardMemberRole.EXECUTIVE_DIRECTOR == "executive_director"
+ )
class TestBoardDecisionType:
"""Test BoardDecisionType enum."""
-
+
def test_enum_values(self):
"""Test that all enum values are correctly defined."""
assert BoardDecisionType.UNANIMOUS == "unanimous"
assert BoardDecisionType.MAJORITY == "majority"
assert BoardDecisionType.CONSENSUS == "consensus"
- assert BoardDecisionType.CHAIRMAN_DECISION == "chairman_decision"
+ assert (
+ BoardDecisionType.CHAIRMAN_DECISION == "chairman_decision"
+ )
class TestBoardMember:
"""Test BoardMember dataclass."""
-
+
def test_board_member_creation(self, mock_agent):
"""Test creating a board member."""
board_member = BoardMember(
agent=mock_agent,
role=BoardMemberRole.CHAIRMAN,
voting_weight=1.5,
- expertise_areas=["leadership", "strategy"]
+ expertise_areas=["leadership", "strategy"],
)
-
+
assert board_member.agent == mock_agent
assert board_member.role == BoardMemberRole.CHAIRMAN
assert board_member.voting_weight == 1.5
- assert board_member.expertise_areas == ["leadership", "strategy"]
-
+ assert board_member.expertise_areas == [
+ "leadership",
+ "strategy",
+ ]
+
def test_board_member_defaults(self, mock_agent):
"""Test board member with default values."""
board_member = BoardMember(
- agent=mock_agent,
- role=BoardMemberRole.MEMBER
+ agent=mock_agent, role=BoardMemberRole.MEMBER
)
-
+
assert board_member.voting_weight == 1.0
assert board_member.expertise_areas == []
-
+
def test_board_member_post_init(self, mock_agent):
"""Test board member post-init with None expertise areas."""
board_member = BoardMember(
agent=mock_agent,
role=BoardMemberRole.MEMBER,
- expertise_areas=None
+ expertise_areas=None,
)
-
+
assert board_member.expertise_areas == []
class TestBoardOrder:
"""Test BoardOrder model."""
-
+
def test_board_order_creation(self):
"""Test creating a board order."""
order = BoardOrder(
@@ -186,26 +194,23 @@ class TestBoardOrder:
task="Test task",
priority=1,
deadline="2024-01-01",
- assigned_by="Chairman"
+ assigned_by="Chairman",
)
-
+
assert order.agent_name == "TestAgent"
assert order.task == "Test task"
assert order.priority == 1
assert order.deadline == "2024-01-01"
assert order.assigned_by == "Chairman"
-
+
def test_board_order_defaults(self):
"""Test board order with default values."""
- order = BoardOrder(
- agent_name="TestAgent",
- task="Test task"
- )
-
+ order = BoardOrder(agent_name="TestAgent", task="Test task")
+
assert order.priority == 3
assert order.deadline is None
assert order.assigned_by == "Board of Directors"
-
+
def test_board_order_validation(self):
"""Test board order validation."""
# Test priority validation
@@ -213,20 +218,20 @@ class TestBoardOrder:
BoardOrder(
agent_name="TestAgent",
task="Test task",
- priority=0 # Invalid priority
+ priority=0, # Invalid priority
)
-
+
with pytest.raises(ValueError):
BoardOrder(
agent_name="TestAgent",
task="Test task",
- priority=6 # Invalid priority
+ priority=6, # Invalid priority
)
class TestBoardDecision:
"""Test BoardDecision model."""
-
+
def test_board_decision_creation(self):
"""Test creating a board decision."""
decision = BoardDecision(
@@ -235,23 +240,26 @@ class TestBoardDecision:
votes_for=3,
votes_against=1,
abstentions=0,
- reasoning="The proposal aligns with our strategic goals"
+ reasoning="The proposal aligns with our strategic goals",
)
-
+
assert decision.decision_type == BoardDecisionType.MAJORITY
assert decision.decision == "Approve the proposal"
assert decision.votes_for == 3
assert decision.votes_against == 1
assert decision.abstentions == 0
- assert decision.reasoning == "The proposal aligns with our strategic goals"
-
+ assert (
+ decision.reasoning
+ == "The proposal aligns with our strategic goals"
+ )
+
def test_board_decision_defaults(self):
"""Test board decision with default values."""
decision = BoardDecision(
decision_type=BoardDecisionType.CONSENSUS,
- decision="Test decision"
+ decision="Test decision",
)
-
+
assert decision.votes_for == 0
assert decision.votes_against == 0
assert decision.abstentions == 0
@@ -260,39 +268,36 @@ class TestBoardDecision:
class TestBoardSpec:
"""Test BoardSpec model."""
-
+
def test_board_spec_creation(self):
"""Test creating a board spec."""
orders = [
BoardOrder(agent_name="Agent1", task="Task 1"),
- BoardOrder(agent_name="Agent2", task="Task 2")
+ BoardOrder(agent_name="Agent2", task="Task 2"),
]
decisions = [
BoardDecision(
decision_type=BoardDecisionType.MAJORITY,
- decision="Decision 1"
+ decision="Decision 1",
)
]
-
+
spec = BoardSpec(
plan="Test plan",
orders=orders,
decisions=decisions,
- meeting_summary="Test meeting summary"
+ meeting_summary="Test meeting summary",
)
-
+
assert spec.plan == "Test plan"
assert len(spec.orders) == 2
assert len(spec.decisions) == 1
assert spec.meeting_summary == "Test meeting summary"
-
+
def test_board_spec_defaults(self):
"""Test board spec with default values."""
- spec = BoardSpec(
- plan="Test plan",
- orders=[]
- )
-
+ spec = BoardSpec(plan="Test plan", orders=[])
+
assert spec.decisions == []
assert spec.meeting_summary == ""
@@ -300,21 +305,22 @@ class TestBoardSpec:
# Unit tests for BoardOfDirectorsSwarm
class TestBoardOfDirectorsSwarmInitialization:
"""Test BoardOfDirectorsSwarm initialization."""
-
+
def test_basic_initialization(self, sample_agents):
"""Test basic swarm initialization."""
swarm = BoardOfDirectorsSwarm(
- name="TestSwarm",
- agents=sample_agents
+ name="TestSwarm", agents=sample_agents
)
-
+
assert swarm.name == "TestSwarm"
assert len(swarm.agents) == 3
assert swarm.max_loops == 1
assert swarm.verbose is False
assert swarm.decision_threshold == 0.6
-
- def test_configured_initialization(self, sample_agents, sample_board_members):
+
+ def test_configured_initialization(
+ self, sample_agents, sample_board_members
+ ):
"""Test configured swarm initialization."""
swarm = BoardOfDirectorsSwarm(
name="ConfiguredSwarm",
@@ -326,9 +332,9 @@ class TestBoardOfDirectorsSwarmInitialization:
decision_threshold=0.8,
enable_voting=False,
enable_consensus=False,
- max_workers=8
+ max_workers=8,
)
-
+
assert swarm.name == "ConfiguredSwarm"
assert swarm.description == "Test description"
assert len(swarm.board_members) == 3
@@ -339,121 +345,159 @@ class TestBoardOfDirectorsSwarmInitialization:
assert swarm.enable_voting is False
assert swarm.enable_consensus is False
assert swarm.max_workers == 8
-
+
def test_default_board_setup(self, sample_agents):
"""Test default board setup when no board members provided."""
swarm = BoardOfDirectorsSwarm(agents=sample_agents)
-
+
assert len(swarm.board_members) == 3
assert swarm.board_members[0].role == BoardMemberRole.CHAIRMAN
- assert swarm.board_members[1].role == BoardMemberRole.VICE_CHAIRMAN
- assert swarm.board_members[2].role == BoardMemberRole.SECRETARY
-
+ assert (
+ swarm.board_members[1].role
+ == BoardMemberRole.VICE_CHAIRMAN
+ )
+ assert (
+ swarm.board_members[2].role == BoardMemberRole.SECRETARY
+ )
+
def test_initialization_without_agents(self):
"""Test initialization without agents should raise error."""
- with pytest.raises(ValueError, match="No agents found in the swarm"):
+ with pytest.raises(
+ ValueError, match="No agents found in the swarm"
+ ):
BoardOfDirectorsSwarm(agents=[])
-
- def test_initialization_with_invalid_max_loops(self, sample_agents):
+
+ def test_initialization_with_invalid_max_loops(
+ self, sample_agents
+ ):
"""Test initialization with invalid max_loops."""
- with pytest.raises(ValueError, match="Max loops must be greater than 0"):
+ with pytest.raises(
+ ValueError, match="Max loops must be greater than 0"
+ ):
BoardOfDirectorsSwarm(agents=sample_agents, max_loops=0)
-
- def test_initialization_with_invalid_decision_threshold(self, sample_agents):
+
+ def test_initialization_with_invalid_decision_threshold(
+ self, sample_agents
+ ):
"""Test initialization with invalid decision threshold."""
- with pytest.raises(ValueError, match="Decision threshold must be between 0.0 and 1.0"):
- BoardOfDirectorsSwarm(agents=sample_agents, decision_threshold=1.5)
+ with pytest.raises(
+ ValueError,
+ match="Decision threshold must be between 0.0 and 1.0",
+ ):
+ BoardOfDirectorsSwarm(
+ agents=sample_agents, decision_threshold=1.5
+ )
class TestBoardOfDirectorsSwarmMethods:
"""Test BoardOfDirectorsSwarm methods."""
-
+
def test_setup_default_board(self, sample_agents):
"""Test default board setup."""
swarm = BoardOfDirectorsSwarm(agents=sample_agents)
-
+
assert len(swarm.board_members) == 3
- assert all(hasattr(member.agent, 'agent_name') for member in swarm.board_members)
- assert all(hasattr(member.agent, 'run') for member in swarm.board_members)
-
+ assert all(
+ hasattr(member.agent, "agent_name")
+ for member in swarm.board_members
+ )
+ assert all(
+ hasattr(member.agent, "run")
+ for member in swarm.board_members
+ )
+
def test_get_chairman_prompt(self, sample_agents):
"""Test chairman prompt generation."""
swarm = BoardOfDirectorsSwarm(agents=sample_agents)
prompt = swarm._get_chairman_prompt()
-
+
assert "Chairman" in prompt
assert "board meetings" in prompt
assert "consensus" in prompt
-
+
def test_get_vice_chairman_prompt(self, sample_agents):
"""Test vice chairman prompt generation."""
swarm = BoardOfDirectorsSwarm(agents=sample_agents)
prompt = swarm._get_vice_chairman_prompt()
-
+
assert "Vice Chairman" in prompt
assert "supporting" in prompt
assert "operational" in prompt
-
+
def test_get_secretary_prompt(self, sample_agents):
"""Test secretary prompt generation."""
swarm = BoardOfDirectorsSwarm(agents=sample_agents)
prompt = swarm._get_secretary_prompt()
-
+
assert "Secretary" in prompt
assert "documenting" in prompt
assert "records" in prompt
-
+
def test_format_board_members_info(self, configured_board_swarm):
"""Test board members info formatting."""
info = configured_board_swarm._format_board_members_info()
-
+
assert "Chairman" in info
assert "Vice-Chairman" in info
assert "Secretary" in info
assert "expertise" in info
-
- def test_add_board_member(self, basic_board_swarm, mock_board_member):
+
+ def test_add_board_member(
+ self, basic_board_swarm, mock_board_member
+ ):
"""Test adding a board member."""
initial_count = len(basic_board_swarm.board_members)
basic_board_swarm.add_board_member(mock_board_member)
-
- assert len(basic_board_swarm.board_members) == initial_count + 1
+
+ assert (
+ len(basic_board_swarm.board_members) == initial_count + 1
+ )
assert mock_board_member in basic_board_swarm.board_members
-
+
def test_remove_board_member(self, configured_board_swarm):
"""Test removing a board member."""
member_to_remove = configured_board_swarm.board_members[0]
member_name = member_to_remove.agent.agent_name
-
+
initial_count = len(configured_board_swarm.board_members)
configured_board_swarm.remove_board_member(member_name)
-
- assert len(configured_board_swarm.board_members) == initial_count - 1
- assert member_to_remove not in configured_board_swarm.board_members
-
+
+ assert (
+ len(configured_board_swarm.board_members)
+ == initial_count - 1
+ )
+ assert (
+ member_to_remove
+ not in configured_board_swarm.board_members
+ )
+
def test_get_board_member(self, configured_board_swarm):
"""Test getting a board member by name."""
member = configured_board_swarm.board_members[0]
member_name = member.agent.agent_name
-
- found_member = configured_board_swarm.get_board_member(member_name)
+
+ found_member = configured_board_swarm.get_board_member(
+ member_name
+ )
assert found_member == member
-
+
# Test with non-existent member
- not_found = configured_board_swarm.get_board_member("NonExistent")
+ not_found = configured_board_swarm.get_board_member(
+ "NonExistent"
+ )
assert not_found is None
-
+
def test_get_board_summary(self, configured_board_swarm):
"""Test getting board summary."""
summary = configured_board_swarm.get_board_summary()
-
+
assert "board_name" in summary
assert "total_members" in summary
assert "total_agents" in summary
assert "max_loops" in summary
assert "decision_threshold" in summary
assert "members" in summary
-
+
assert summary["board_name"] == "ConfiguredBoard"
assert summary["total_members"] == 3
assert summary["total_agents"] == 3
@@ -461,39 +505,53 @@ class TestBoardOfDirectorsSwarmMethods:
class TestBoardMeetingOperations:
"""Test board meeting operations."""
-
- def test_create_board_meeting_prompt(self, configured_board_swarm):
+
+ def test_create_board_meeting_prompt(
+ self, configured_board_swarm
+ ):
"""Test board meeting prompt creation."""
task = "Test task for board meeting"
- prompt = configured_board_swarm._create_board_meeting_prompt(task)
-
+ prompt = configured_board_swarm._create_board_meeting_prompt(
+ task
+ )
+
assert task in prompt
assert "BOARD OF DIRECTORS MEETING" in prompt
assert "INSTRUCTIONS" in prompt
assert "plan" in prompt
assert "orders" in prompt
-
+
def test_conduct_board_discussion(self, configured_board_swarm):
"""Test board discussion conduction."""
prompt = "Test board meeting prompt"
-
- with patch.object(configured_board_swarm.board_members[0].agent, 'run') as mock_run:
+
+ with patch.object(
+ configured_board_swarm.board_members[0].agent, "run"
+ ) as mock_run:
mock_run.return_value = "Board discussion result"
- result = configured_board_swarm._conduct_board_discussion(prompt)
-
+ result = configured_board_swarm._conduct_board_discussion(
+ prompt
+ )
+
assert result == "Board discussion result"
mock_run.assert_called_once_with(task=prompt, img=None)
-
- def test_conduct_board_discussion_no_chairman(self, sample_agents):
+
+ def test_conduct_board_discussion_no_chairman(
+ self, sample_agents
+ ):
"""Test board discussion when no chairman is found."""
swarm = BoardOfDirectorsSwarm(agents=sample_agents)
# Remove all board members
swarm.board_members = []
-
- with pytest.raises(ValueError, match="No chairman found in board members"):
+
+ with pytest.raises(
+ ValueError, match="No chairman found in board members"
+ ):
swarm._conduct_board_discussion("Test prompt")
-
- def test_parse_board_decisions_valid_json(self, configured_board_swarm):
+
+ def test_parse_board_decisions_valid_json(
+ self, configured_board_swarm
+ ):
"""Test parsing valid JSON board decisions."""
valid_json = """
{
@@ -519,43 +577,58 @@ class TestBoardMeetingOperations:
"meeting_summary": "Test summary"
}
"""
-
- result = configured_board_swarm._parse_board_decisions(valid_json)
-
+
+ result = configured_board_swarm._parse_board_decisions(
+ valid_json
+ )
+
assert isinstance(result, BoardSpec)
assert result.plan == "Test plan"
assert len(result.orders) == 1
assert len(result.decisions) == 1
assert result.meeting_summary == "Test summary"
-
- def test_parse_board_decisions_invalid_json(self, configured_board_swarm):
+
+ def test_parse_board_decisions_invalid_json(
+ self, configured_board_swarm
+ ):
"""Test parsing invalid JSON board decisions."""
invalid_json = "Invalid JSON content"
-
- result = configured_board_swarm._parse_board_decisions(invalid_json)
-
+
+ result = configured_board_swarm._parse_board_decisions(
+ invalid_json
+ )
+
assert isinstance(result, BoardSpec)
assert result.plan == invalid_json
assert len(result.orders) == 0
assert len(result.decisions) == 0
- assert result.meeting_summary == "Parsing failed, using raw output"
-
+ assert (
+ result.meeting_summary
+ == "Parsing failed, using raw output"
+ )
+
def test_run_board_meeting(self, configured_board_swarm):
"""Test running a complete board meeting."""
task = "Test board meeting task"
-
- with patch.object(configured_board_swarm, '_conduct_board_discussion') as mock_discuss:
- with patch.object(configured_board_swarm, '_parse_board_decisions') as mock_parse:
+
+ with patch.object(
+ configured_board_swarm, "_conduct_board_discussion"
+ ) as mock_discuss:
+ with patch.object(
+ configured_board_swarm, "_parse_board_decisions"
+ ) as mock_parse:
mock_discuss.return_value = "Board discussion"
mock_parse.return_value = BoardSpec(
plan="Test plan",
orders=[],
decisions=[],
- meeting_summary="Test summary"
+ meeting_summary="Test summary",
+ )
+
+ result = configured_board_swarm.run_board_meeting(
+ task
)
-
- result = configured_board_swarm.run_board_meeting(task)
-
+
assert isinstance(result, BoardSpec)
mock_discuss.assert_called_once()
mock_parse.assert_called_once_with("Board discussion")
@@ -563,153 +636,206 @@ class TestBoardMeetingOperations:
class TestTaskExecution:
"""Test task execution methods."""
-
+
def test_call_single_agent(self, configured_board_swarm):
"""Test calling a single agent."""
agent_name = "Agent1"
task = "Test task"
-
- with patch.object(configured_board_swarm.agents[0], 'run') as mock_run:
+
+ with patch.object(
+ configured_board_swarm.agents[0], "run"
+ ) as mock_run:
mock_run.return_value = "Agent response"
- result = configured_board_swarm._call_single_agent(agent_name, task)
-
+ result = configured_board_swarm._call_single_agent(
+ agent_name, task
+ )
+
assert result == "Agent response"
mock_run.assert_called_once()
-
- def test_call_single_agent_not_found(self, configured_board_swarm):
+
+ def test_call_single_agent_not_found(
+ self, configured_board_swarm
+ ):
"""Test calling a non-existent agent."""
- with pytest.raises(ValueError, match="Agent 'NonExistent' not found"):
- configured_board_swarm._call_single_agent("NonExistent", "Test task")
-
+ with pytest.raises(
+ ValueError, match="Agent 'NonExistent' not found"
+ ):
+ configured_board_swarm._call_single_agent(
+ "NonExistent", "Test task"
+ )
+
def test_execute_single_order(self, configured_board_swarm):
"""Test executing a single order."""
order = BoardOrder(
agent_name="Agent1",
task="Test order task",
priority=1,
- assigned_by="Chairman"
+ assigned_by="Chairman",
)
-
- with patch.object(configured_board_swarm, '_call_single_agent') as mock_call:
+
+ with patch.object(
+ configured_board_swarm, "_call_single_agent"
+ ) as mock_call:
mock_call.return_value = "Order execution result"
- result = configured_board_swarm._execute_single_order(order)
-
+ result = configured_board_swarm._execute_single_order(
+ order
+ )
+
assert result == "Order execution result"
mock_call.assert_called_once_with(
- agent_name="Agent1",
- task="Test order task"
+ agent_name="Agent1", task="Test order task"
)
-
+
def test_execute_orders(self, configured_board_swarm):
"""Test executing multiple orders."""
orders = [
- BoardOrder(agent_name="Agent1", task="Task 1", priority=1),
- BoardOrder(agent_name="Agent2", task="Task 2", priority=2),
+ BoardOrder(
+ agent_name="Agent1", task="Task 1", priority=1
+ ),
+ BoardOrder(
+ agent_name="Agent2", task="Task 2", priority=2
+ ),
]
-
- with patch.object(configured_board_swarm, '_execute_single_order') as mock_execute:
+
+ with patch.object(
+ configured_board_swarm, "_execute_single_order"
+ ) as mock_execute:
mock_execute.side_effect = ["Result 1", "Result 2"]
results = configured_board_swarm._execute_orders(orders)
-
+
assert len(results) == 2
assert results[0]["agent_name"] == "Agent1"
assert results[0]["output"] == "Result 1"
assert results[1]["agent_name"] == "Agent2"
assert results[1]["output"] == "Result 2"
-
+
def test_generate_board_feedback(self, configured_board_swarm):
"""Test generating board feedback."""
outputs = [
{"agent_name": "Agent1", "output": "Output 1"},
- {"agent_name": "Agent2", "output": "Output 2"}
+ {"agent_name": "Agent2", "output": "Output 2"},
]
-
- with patch.object(configured_board_swarm.board_members[0].agent, 'run') as mock_run:
+
+ with patch.object(
+ configured_board_swarm.board_members[0].agent, "run"
+ ) as mock_run:
mock_run.return_value = "Board feedback"
- result = configured_board_swarm._generate_board_feedback(outputs)
-
+ result = configured_board_swarm._generate_board_feedback(
+ outputs
+ )
+
assert result == "Board feedback"
mock_run.assert_called_once()
-
+
def test_generate_board_feedback_no_chairman(self, sample_agents):
"""Test generating feedback when no chairman is found."""
swarm = BoardOfDirectorsSwarm(agents=sample_agents)
swarm.board_members = [] # Remove all board members
-
- with pytest.raises(ValueError, match="No chairman found for feedback"):
+
+ with pytest.raises(
+ ValueError, match="No chairman found for feedback"
+ ):
swarm._generate_board_feedback([])
class TestStepAndRunMethods:
"""Test step and run methods."""
-
+
def test_step_method(self, configured_board_swarm):
"""Test the step method."""
task = "Test step task"
-
- with patch.object(configured_board_swarm, 'run_board_meeting') as mock_meeting:
- with patch.object(configured_board_swarm, '_execute_orders') as mock_execute:
- with patch.object(configured_board_swarm, '_generate_board_feedback') as mock_feedback:
+
+ with patch.object(
+ configured_board_swarm, "run_board_meeting"
+ ) as mock_meeting:
+ with patch.object(
+ configured_board_swarm, "_execute_orders"
+ ) as mock_execute:
+ with patch.object(
+ configured_board_swarm, "_generate_board_feedback"
+ ) as mock_feedback:
mock_meeting.return_value = BoardSpec(
plan="Test plan",
- orders=[BoardOrder(agent_name="Agent1", task="Task 1")],
+ orders=[
+ BoardOrder(
+ agent_name="Agent1", task="Task 1"
+ )
+ ],
decisions=[],
- meeting_summary="Test summary"
+ meeting_summary="Test summary",
)
- mock_execute.return_value = [{"agent_name": "Agent1", "output": "Result"}]
+ mock_execute.return_value = [
+ {"agent_name": "Agent1", "output": "Result"}
+ ]
mock_feedback.return_value = "Board feedback"
-
+
result = configured_board_swarm.step(task)
-
+
assert result == "Board feedback"
- mock_meeting.assert_called_once_with(task=task, img=None)
+ mock_meeting.assert_called_once_with(
+ task=task, img=None
+ )
mock_execute.assert_called_once()
mock_feedback.assert_called_once()
-
+
def test_step_method_no_feedback(self, configured_board_swarm):
"""Test the step method with feedback disabled."""
configured_board_swarm.board_feedback_on = False
task = "Test step task"
-
- with patch.object(configured_board_swarm, 'run_board_meeting') as mock_meeting:
- with patch.object(configured_board_swarm, '_execute_orders') as mock_execute:
+
+ with patch.object(
+ configured_board_swarm, "run_board_meeting"
+ ) as mock_meeting:
+ with patch.object(
+ configured_board_swarm, "_execute_orders"
+ ) as mock_execute:
mock_meeting.return_value = BoardSpec(
plan="Test plan",
- orders=[BoardOrder(agent_name="Agent1", task="Task 1")],
+ orders=[
+ BoardOrder(agent_name="Agent1", task="Task 1")
+ ],
decisions=[],
- meeting_summary="Test summary"
+ meeting_summary="Test summary",
)
- mock_execute.return_value = [{"agent_name": "Agent1", "output": "Result"}]
-
+ mock_execute.return_value = [
+ {"agent_name": "Agent1", "output": "Result"}
+ ]
+
result = configured_board_swarm.step(task)
-
- assert result == [{"agent_name": "Agent1", "output": "Result"}]
-
+
+ assert result == [
+ {"agent_name": "Agent1", "output": "Result"}
+ ]
+
def test_run_method(self, configured_board_swarm):
"""Test the run method."""
task = "Test run task"
-
- with patch.object(configured_board_swarm, 'step') as mock_step:
- with patch.object(configured_board_swarm, 'conversation') as mock_conversation:
+
+ with patch.object(
+ configured_board_swarm, "step"
+ ) as mock_step:
+ with patch.object(
+ configured_board_swarm, "conversation"
+ ) as mock_conversation:
mock_step.return_value = "Step result"
mock_conversation.add = Mock()
-
- result = configured_board_swarm.run(task)
-
+
+ configured_board_swarm.run(task)
+
assert mock_step.call_count == 2 # max_loops = 2
assert mock_conversation.add.call_count == 2
-
+
def test_arun_method(self, configured_board_swarm):
"""Test the async run method."""
task = "Test async run task"
-
- with patch.object(configured_board_swarm, 'run') as mock_run:
+
+ with patch.object(configured_board_swarm, "run") as mock_run:
mock_run.return_value = "Async result"
-
+
async def test_async():
result = await configured_board_swarm.arun(task)
return result
-
+
result = asyncio.run(test_async())
assert result == "Async result"
mock_run.assert_called_once_with(task=task, img=None)
@@ -718,17 +844,15 @@ class TestStepAndRunMethods:
# Integration tests
class TestBoardOfDirectorsSwarmIntegration:
"""Integration tests for BoardOfDirectorsSwarm."""
-
+
def test_full_workflow_integration(self, sample_agents):
"""Test full workflow integration."""
swarm = BoardOfDirectorsSwarm(
- agents=sample_agents,
- verbose=False,
- max_loops=1
+ agents=sample_agents, verbose=False, max_loops=1
)
-
+
task = "Create a simple report"
-
+
# Mock the board discussion to return structured output
mock_board_output = """
{
@@ -760,37 +884,41 @@ class TestBoardOfDirectorsSwarmIntegration:
"meeting_summary": "Board agreed to create a comprehensive report"
}
"""
-
- with patch.object(swarm.board_members[0].agent, 'run') as mock_run:
+
+ with patch.object(
+ swarm.board_members[0].agent, "run"
+ ) as mock_run:
mock_run.return_value = mock_board_output
result = swarm.run(task)
-
+
assert result is not None
assert isinstance(result, dict)
-
+
def test_board_member_management_integration(self, sample_agents):
"""Test board member management integration."""
swarm = BoardOfDirectorsSwarm(agents=sample_agents)
-
+
# Test adding a new board member
new_member = BoardMember(
agent=sample_agents[0],
role=BoardMemberRole.MEMBER,
voting_weight=1.0,
- expertise_areas=["testing"]
+ expertise_areas=["testing"],
)
-
+
initial_count = len(swarm.board_members)
swarm.add_board_member(new_member)
assert len(swarm.board_members) == initial_count + 1
-
+
# Test removing a board member
member_name = swarm.board_members[0].agent.agent_name
swarm.remove_board_member(member_name)
assert len(swarm.board_members) == initial_count
-
+
# Test getting board member
- member = swarm.get_board_member(swarm.board_members[0].agent.agent_name)
+ member = swarm.get_board_member(
+ swarm.board_members[0].agent.agent_name
+ )
assert member is not None
@@ -798,56 +926,90 @@ class TestBoardOfDirectorsSwarmIntegration:
@pytest.mark.parametrize("max_loops", [1, 2, 3])
def test_max_loops_parameterization(sample_agents, max_loops):
"""Test swarm with different max_loops values."""
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, max_loops=max_loops)
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, max_loops=max_loops
+ )
assert swarm.max_loops == max_loops
-@pytest.mark.parametrize("decision_threshold", [0.5, 0.6, 0.7, 0.8, 0.9])
-def test_decision_threshold_parameterization(sample_agents, decision_threshold):
+@pytest.mark.parametrize(
+ "decision_threshold", [0.5, 0.6, 0.7, 0.8, 0.9]
+)
+def test_decision_threshold_parameterization(
+ sample_agents, decision_threshold
+):
"""Test swarm with different decision threshold values."""
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, decision_threshold=decision_threshold)
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, decision_threshold=decision_threshold
+ )
assert swarm.decision_threshold == decision_threshold
-@pytest.mark.parametrize("board_model", ["gpt-4o-mini", "gpt-4", "claude-3-sonnet"])
+@pytest.mark.parametrize(
+ "board_model", ["gpt-4o-mini", "gpt-4", "claude-3-sonnet"]
+)
def test_board_model_parameterization(sample_agents, board_model):
"""Test swarm with different board models."""
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, board_model_name=board_model)
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, board_model_name=board_model
+ )
assert swarm.board_model_name == board_model
# Error handling tests
class TestBoardOfDirectorsSwarmErrorHandling:
"""Test error handling in BoardOfDirectorsSwarm."""
-
+
def test_initialization_error_handling(self):
"""Test error handling during initialization."""
with pytest.raises(ValueError):
BoardOfDirectorsSwarm(agents=[])
-
- def test_board_meeting_error_handling(self, configured_board_swarm):
+
+ def test_board_meeting_error_handling(
+ self, configured_board_swarm
+ ):
"""Test error handling during board meeting."""
- with patch.object(configured_board_swarm, '_conduct_board_discussion') as mock_discuss:
- mock_discuss.side_effect = Exception("Board meeting failed")
-
- with pytest.raises(Exception, match="Board meeting failed"):
+ with patch.object(
+ configured_board_swarm, "_conduct_board_discussion"
+ ) as mock_discuss:
+ mock_discuss.side_effect = Exception(
+ "Board meeting failed"
+ )
+
+ with pytest.raises(
+ Exception, match="Board meeting failed"
+ ):
configured_board_swarm.run_board_meeting("Test task")
-
- def test_task_execution_error_handling(self, configured_board_swarm):
+
+ def test_task_execution_error_handling(
+ self, configured_board_swarm
+ ):
"""Test error handling during task execution."""
- with patch.object(configured_board_swarm, '_call_single_agent') as mock_call:
+ with patch.object(
+ configured_board_swarm, "_call_single_agent"
+ ) as mock_call:
mock_call.side_effect = Exception("Task execution failed")
-
- with pytest.raises(Exception, match="Task execution failed"):
- configured_board_swarm._call_single_agent("Agent1", "Test task")
-
- def test_order_execution_error_handling(self, configured_board_swarm):
+
+ with pytest.raises(
+ Exception, match="Task execution failed"
+ ):
+ configured_board_swarm._call_single_agent(
+ "Agent1", "Test task"
+ )
+
+ def test_order_execution_error_handling(
+ self, configured_board_swarm
+ ):
"""Test error handling during order execution."""
orders = [BoardOrder(agent_name="Agent1", task="Task 1")]
-
- with patch.object(configured_board_swarm, '_execute_single_order') as mock_execute:
- mock_execute.side_effect = Exception("Order execution failed")
-
+
+ with patch.object(
+ configured_board_swarm, "_execute_single_order"
+ ) as mock_execute:
+ mock_execute.side_effect = Exception(
+ "Order execution failed"
+ )
+
# Should not raise exception, but log error
results = configured_board_swarm._execute_orders(orders)
assert len(results) == 1
@@ -857,56 +1019,58 @@ class TestBoardOfDirectorsSwarmErrorHandling:
# Performance tests
class TestBoardOfDirectorsSwarmPerformance:
"""Test performance characteristics of BoardOfDirectorsSwarm."""
-
+
def test_parallel_execution_performance(self, sample_agents):
"""Test parallel execution performance."""
import time
-
+
swarm = BoardOfDirectorsSwarm(
- agents=sample_agents,
- max_workers=3,
- verbose=False
+ agents=sample_agents, max_workers=3, verbose=False
)
-
+
# Create multiple orders
orders = [
BoardOrder(agent_name=f"Agent{i+1}", task=f"Task {i+1}")
for i in range(3)
]
-
+
start_time = time.time()
-
- with patch.object(swarm, '_execute_single_order') as mock_execute:
- mock_execute.side_effect = lambda order: f"Result for {order.task}"
+
+ with patch.object(
+ swarm, "_execute_single_order"
+ ) as mock_execute:
+ mock_execute.side_effect = (
+ lambda order: f"Result for {order.task}"
+ )
results = swarm._execute_orders(orders)
-
+
end_time = time.time()
execution_time = end_time - start_time
-
+
assert len(results) == 3
- assert execution_time < 1.0 # Should complete quickly with parallel execution
-
+ assert (
+ execution_time < 1.0
+ ) # Should complete quickly with parallel execution
+
def test_memory_usage(self, sample_agents):
"""Test memory usage characteristics."""
import psutil
import os
-
+
process = psutil.Process(os.getpid())
initial_memory = process.memory_info().rss
-
+
# Create multiple swarms
swarms = []
for i in range(5):
swarm = BoardOfDirectorsSwarm(
- agents=sample_agents,
- name=f"Swarm{i}",
- verbose=False
+ agents=sample_agents, name=f"Swarm{i}", verbose=False
)
swarms.append(swarm)
-
+
final_memory = process.memory_info().rss
memory_increase = final_memory - initial_memory
-
+
# Memory increase should be reasonable (less than 100MB)
assert memory_increase < 100 * 1024 * 1024
@@ -914,56 +1078,76 @@ class TestBoardOfDirectorsSwarmPerformance:
# Configuration tests
class TestBoardOfDirectorsSwarmConfiguration:
"""Test configuration options for BoardOfDirectorsSwarm."""
-
+
def test_verbose_configuration(self, sample_agents):
"""Test verbose configuration."""
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, verbose=True)
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, verbose=True
+ )
assert swarm.verbose is True
-
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, verbose=False)
+
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, verbose=False
+ )
assert swarm.verbose is False
-
+
def test_collaboration_prompt_configuration(self, sample_agents):
"""Test collaboration prompt configuration."""
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, add_collaboration_prompt=True)
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, add_collaboration_prompt=True
+ )
assert swarm.add_collaboration_prompt is True
-
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, add_collaboration_prompt=False)
+
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, add_collaboration_prompt=False
+ )
assert swarm.add_collaboration_prompt is False
-
+
def test_board_feedback_configuration(self, sample_agents):
"""Test board feedback configuration."""
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, board_feedback_on=True)
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, board_feedback_on=True
+ )
assert swarm.board_feedback_on is True
-
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, board_feedback_on=False)
+
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, board_feedback_on=False
+ )
assert swarm.board_feedback_on is False
-
+
def test_voting_configuration(self, sample_agents):
"""Test voting configuration."""
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, enable_voting=True)
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, enable_voting=True
+ )
assert swarm.enable_voting is True
-
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, enable_voting=False)
+
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, enable_voting=False
+ )
assert swarm.enable_voting is False
-
+
def test_consensus_configuration(self, sample_agents):
"""Test consensus configuration."""
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, enable_consensus=True)
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, enable_consensus=True
+ )
assert swarm.enable_consensus is True
-
- swarm = BoardOfDirectorsSwarm(agents=sample_agents, enable_consensus=False)
+
+ swarm = BoardOfDirectorsSwarm(
+ agents=sample_agents, enable_consensus=False
+ )
assert swarm.enable_consensus is False
# Real integration tests (skipped if no API key)
@pytest.mark.skipif(
not os.getenv("OPENAI_API_KEY"),
- reason="OpenAI API key not available"
+ reason="OpenAI API key not available",
)
class TestBoardOfDirectorsSwarmRealIntegration:
"""Real integration tests for BoardOfDirectorsSwarm."""
-
+
def test_real_board_meeting(self):
"""Test real board meeting with actual API calls."""
# Create real agents
@@ -972,30 +1156,28 @@ class TestBoardOfDirectorsSwarmRealIntegration:
agent_name="Researcher",
agent_description="Research analyst",
model_name="gpt-4o-mini",
- max_loops=1
+ max_loops=1,
),
Agent(
agent_name="Writer",
agent_description="Content writer",
model_name="gpt-4o-mini",
- max_loops=1
- )
+ max_loops=1,
+ ),
]
-
+
swarm = BoardOfDirectorsSwarm(
- agents=agents,
- verbose=False,
- max_loops=1
+ agents=agents, verbose=False, max_loops=1
)
-
+
task = "Create a brief market analysis report"
-
+
result = swarm.run(task)
-
+
assert result is not None
assert isinstance(result, dict)
assert "conversation_history" in result
-
+
def test_real_board_member_management(self):
"""Test real board member management."""
agents = [
@@ -1003,12 +1185,12 @@ class TestBoardOfDirectorsSwarmRealIntegration:
agent_name="TestAgent",
agent_description="Test agent",
model_name="gpt-4o-mini",
- max_loops=1
+ max_loops=1,
)
]
-
+
swarm = BoardOfDirectorsSwarm(agents=agents, verbose=False)
-
+
# Test board summary
summary = swarm.get_board_summary()
assert summary["total_members"] == 3 # Default board
@@ -1017,4 +1199,4 @@ class TestBoardOfDirectorsSwarmRealIntegration:
# Test runner
if __name__ == "__main__":
- pytest.main([__file__, "-v", "--tb=short"])
\ No newline at end of file
+ pytest.main([__file__, "-v", "--tb=short"])