From 6c6b9911e0882cba04841ff98ec655cb6e7743fb Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sat, 18 Oct 2025 20:07:21 -0700 Subject: [PATCH] [Improvement][AgentRouter][ Now leverages litellm embedding instead chromadb] [CLEANUP][Removed un-used util files and cleaned up artifacts] --- docs/examples/index.md | 18 +- examples/README.md | 33 +- .../concurrent_examples/concurrent_example.py | 0 examples/network_error_example.py | 119 +++++ examples/network_management_example.py | 223 +++++++++ examples/persistence_example.py | 62 +++ examples/persistence_management_example.py | 141 ++++++ swarms/structs/__init__.py | 2 +- swarms/structs/agent_router.py | 237 ++++++---- swarms/structs/aop.py | 436 +++++++++++++++++- swarms/structs/social_algorithms.py | 20 - swarms/utils/audio_processing.py | 343 -------------- swarms/utils/auto_download_check_packages.py | 151 ------ swarms/utils/image_generator.py | 54 --- tests/structs/test_agent_router.py | 387 ++++++++++++++++ tests/utils/test_auto_check_download.py | 104 ----- 16 files changed, 1565 insertions(+), 765 deletions(-) rename concurrent_example.py => examples/multi_agent/concurrent_examples/concurrent_example.py (100%) create mode 100644 examples/network_error_example.py create mode 100644 examples/network_management_example.py create mode 100644 examples/persistence_example.py create mode 100644 examples/persistence_management_example.py delete mode 100644 swarms/utils/audio_processing.py delete mode 100644 swarms/utils/auto_download_check_packages.py delete mode 100644 swarms/utils/image_generator.py create mode 100644 tests/structs/test_agent_router.py delete mode 100644 tests/utils/test_auto_check_download.py diff --git a/docs/examples/index.md b/docs/examples/index.md index a23f7b06..bb1ed712 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -242,12 +242,20 @@ This index organizes **100+ production-ready examples** from our [Swarms Example | Business | [Business Strategy](https://github.com/The-Swarm-Corporation/swarms-examples/blob/main/examples/applications/business_strategy/business_strategy_graph/growth_agent.py) | Strategic planning and business development swarm | | Research | [Astronomy Research](https://github.com/The-Swarm-Corporation/swarms-examples/blob/main/examples/applications/astronomy/multiversal_detection/test.py) | Collaborative space research and astronomical analysis | -## Additional Resources -- [Github](https://github.com/kyegomez/swarms) +------- -- Discord (https://t.co/zlLe07AqUX) +## Connect With Us -- Telegram (https://t.co/dSRy143zQv) +Join our community of agent engineers and researchers for technical support, cutting-edge updates, and exclusive access to world-class agent engineering insights! -- X Community (https://x.com/i/communities/1875452887414804745) \ No newline at end of file +| Platform | Description | Link | +|----------|-------------|------| +| šŸ“š Documentation | Official documentation and guides | [docs.swarms.world](https://docs.swarms.world) | +| šŸ“ Blog | Latest updates and technical articles | [Medium](https://medium.com/@kyeg) | +| šŸ’¬ Discord | Live chat and community support | [Join Discord](https://discord.gg/EamjgSaEQf) | +| 🐦 Twitter | Latest news and announcements | [@swarms_corp](https://twitter.com/swarms_corp) | +| šŸ‘„ LinkedIn | Professional network and updates | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | +| šŸ“ŗ YouTube | Tutorials and demos | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | +| šŸŽ« Events | Join our community events | [Sign up here](https://lu.ma/5p2jnc2v) | +| šŸš€ Onboarding Session | Get onboarded with Kye Gomez, creator and lead maintainer of Swarms | [Book Session](https://cal.com/swarms/swarms-onboarding-session) | diff --git a/examples/README.md b/examples/README.md index d5b7b150..ad229945 100644 --- a/examples/README.md +++ b/examples/README.md @@ -5,43 +5,61 @@ This directory contains comprehensive examples demonstrating various capabilitie ## šŸ“ Directory Overview ### šŸ¤– Multi-Agent Systems -- **[multi_agent/](multi_agent/)** - Advanced multi-agent patterns including agent rearrangement, auto swarm builder (ASB), batched workflows, board of directors, caching, concurrent processing, councils, debates, elections, forest swarms, graph workflows, group chats, heavy swarms, hierarchical swarms, majority voting, and orchestration examples. + +- **[multi_agent/](multi_agent/)** - Advanced multi-agent patterns including agent rearrangement, auto swarm builder (ASB), batched workflows, board of directors, caching, concurrent processing, councils, debates, elections, forest swarms, graph workflows, group chats, heavy swarms, hierarchical swarms, majority voting, orchestration examples, social algorithms, simulations, spreadsheet examples, and swarm routing. ### šŸ‘¤ Single Agent Systems + - **[single_agent/](single_agent/)** - Single agent implementations including demos, external agent integrations, LLM integrations (Azure, Claude, DeepSeek, Mistral, OpenAI, Qwen), onboarding, RAG, reasoning agents, tools integration, utils, and vision capabilities. ### šŸ› ļø Tools & Integrations + - **[tools/](tools/)** - Tool integration examples including agent-as-tools, base tool implementations, browser automation, Claude integration, Exa search, Firecrawl, multi-tool usage, and Stagehand integration. ### šŸŽÆ Model Integrations -- **[models/](models/)** - Various model integrations including Cerebras, GPT-5, GPT-OSS, Llama 4, Lumo, Ollama, and VLLM implementations. + +- **[models/](models/)** - Various model integrations including Cerebras, GPT-5, GPT-OSS, Llama 4, Lumo, Ollama, and VLLM implementations with concurrent processing examples and provider-specific configurations. ### šŸ”Œ API & Protocols + - **[swarms_api_examples/](swarms_api_examples/)** - Swarms API usage examples including agent overview, batch processing, client integration, team examples, analysis, and rate limiting. - **[mcp/](mcp/)** - Model Context Protocol (MCP) integration examples including agent implementations, multi-connection setups, server configurations, and utility functions. +- **[aop_examples/](aop_examples/)** - Agents over Protocol (AOP) examples demonstrating MCP server setup, agent discovery, client interactions, queue-based task submission, and medical AOP implementations. + ### 🧠 Advanced Capabilities -- **[reasoning_agents/](reasoning_agents/)** - Advanced reasoning capabilities including agent judge evaluation systems and O3 model integration. -- **[rag/](rag/)** - Retrieval Augmented Generation (RAG) implementations with vector database integrations. +- **[reasoning_agents/](reasoning_agents/)** - Advanced reasoning capabilities including agent judge evaluation systems, O3 model integration, and mixture of agents (MOA) sequential examples. + +- **[rag/](rag/)** - Retrieval Augmented Generation (RAG) implementations with vector database integrations including Qdrant examples. ### šŸ“š Guides & Tutorials -- **[guides/](guides/)** - Comprehensive guides and tutorials including generation length blog, geo guesser agent, graph workflow guide, hierarchical marketing team, nano banana Jarvis agent, smart database, and web scraper agents. + +- **[guides/](guides/)** - Comprehensive guides and tutorials including generation length blog, geo guesser agent, graph workflow guide, hierarchical marketing team, nano banana Jarvis agent, smart database, web scraper agents, and workshop examples (840_update, 850_workshop). ### šŸŽŖ Demonstrations + - **[demos/](demos/)** - Domain-specific demonstrations across various industries including apps, charts, crypto, CUDA, finance, hackathon projects, insurance, legal, medical, news, privacy, real estate, science, and synthetic data generation. +### šŸ† Hackathons + +- **[hackathons/](hackathons/)** - Hackathon projects and implementations including September 27 hackathon examples with diet coach agents, nutritional content analysis swarms, and API client integrations. + ### šŸš€ Deployment + - **[deployment/](deployment/)** - Deployment strategies and patterns including cron job implementations and FastAPI deployment examples. ### šŸ› ļø Utilities + - **[utils/](utils/)** - Utility functions and helper implementations including agent loader, communication examples, concurrent wrappers, miscellaneous utilities, and telemetry. ### šŸŽ“ Educational + - **[workshops/](workshops/)** - Workshop examples and educational sessions including agent tools, batched grids, geo guesser, and Jarvis agent implementations. ### šŸ–„ļø User Interface + - **[ui/](ui/)** - User interface examples and implementations including chat interfaces. ## šŸš€ Quick Start @@ -49,7 +67,10 @@ This directory contains comprehensive examples demonstrating various capabilitie 1. **New to Swarms?** Start with [single_agent/simple_agent.py](single_agent/simple_agent.py) for basic concepts 2. **Want multi-agent workflows?** Check out [multi_agent/duo_agent.py](multi_agent/duo_agent.py) 3. **Need tool integration?** Explore [tools/agent_as_tools.py](tools/agent_as_tools.py) -4. **Looking for guides?** Visit [guides/](guides/) for comprehensive tutorials +4. **Interested in AOP?** Try [aop_examples/example_new_agent_tools.py](aop_examples/example_new_agent_tools.py) for agent discovery +5. **Want to see social algorithms?** Check out [multi_agent/social_algorithms_examples/](multi_agent/social_algorithms_examples/) +6. **Looking for guides?** Visit [guides/](guides/) for comprehensive tutorials +7. **Hackathon projects?** Explore [hackathons/hackathon_sep_27/](hackathons/hackathon_sep_27/) for real-world implementations ## šŸ“– Documentation diff --git a/concurrent_example.py b/examples/multi_agent/concurrent_examples/concurrent_example.py similarity index 100% rename from concurrent_example.py rename to examples/multi_agent/concurrent_examples/concurrent_example.py diff --git a/examples/network_error_example.py b/examples/network_error_example.py new file mode 100644 index 00000000..2de42abe --- /dev/null +++ b/examples/network_error_example.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +""" +Example demonstrating the AOP network error handling feature. + +This example shows how the AOP server handles network connection issues +with custom error messages and automatic retry logic. +""" + +from swarms import Agent +from swarms.structs.aop import AOP + + +def main(): + """Demonstrate AOP network error handling functionality.""" + + # Create a simple agent + agent = Agent( + agent_name="network_test_agent", + agent_description="An agent for testing network error handling", + system_prompt="You are a helpful assistant for network testing.", + ) + + # Create AOP with network monitoring enabled + aop = AOP( + server_name="Network Resilient AOP Server", + description="An AOP server with network error handling and retry logic", + agents=[agent], + port=8003, + host="localhost", + persistence=True, # Enable persistence for automatic restart + max_restart_attempts=3, + restart_delay=2.0, + network_monitoring=True, # Enable network monitoring + max_network_retries=5, # Allow up to 5 network retries + network_retry_delay=3.0, # Wait 3 seconds between network retries + network_timeout=10.0, # 10 second network timeout + verbose=True, + ) + + print("AOP Network Error Handling Demo") + print("=" * 40) + print() + + # Show initial network status + print("Initial network status:") + network_status = aop.get_network_status() + for key, value in network_status.items(): + print(f" {key}: {value}") + print() + + # Show persistence status + print("Persistence status:") + persistence_status = aop.get_persistence_status() + for key, value in persistence_status.items(): + print(f" {key}: {value}") + print() + + print("Network error handling features:") + print("āœ… Custom error messages with emojis") + print("āœ… Automatic network connectivity testing") + print("āœ… Configurable retry attempts and delays") + print("āœ… Network error detection and classification") + print("āœ… Graceful degradation and recovery") + print() + + print("To test network error handling:") + print("1. Start the server (it will run on localhost:8003)") + print("2. Simulate network issues by:") + print(" - Disconnecting your network") + print(" - Blocking the port with firewall") + print(" - Stopping the network service") + print("3. Watch the custom error messages and retry attempts") + print("4. Reconnect and see automatic recovery") + print() + + try: + print("Starting server with network monitoring...") + print("Press Ctrl+C to stop the demo") + print() + + # This will run with network monitoring enabled + aop.run() + + except KeyboardInterrupt: + print("\nDemo interrupted by user") + print("Network status at shutdown:") + network_status = aop.get_network_status() + for key, value in network_status.items(): + print(f" {key}: {value}") + except Exception as e: + print(f"\nUnexpected error: {e}") + print("This demonstrates how non-network errors are handled") + + +def simulate_network_issues(): + """ + Simulate various network issues for testing. + + This function can be used to test the network error handling + in a controlled environment. + """ + print("Network Issue Simulation:") + print("1. Connection Refused - Server not running") + print("2. Connection Reset - Server closed connection") + print("3. Timeout - Server not responding") + print("4. Host Resolution Failed - Invalid hostname") + print("5. Network Unreachable - No route to host") + print() + print("The AOP server will detect these errors and:") + print("- Display custom error messages with emojis") + print("- Attempt automatic reconnection") + print("- Test network connectivity before retry") + print("- Give up after max retry attempts") + + +if __name__ == "__main__": + main() + print("\n" + "=" * 40) + simulate_network_issues() diff --git a/examples/network_management_example.py b/examples/network_management_example.py new file mode 100644 index 00000000..5f3ec136 --- /dev/null +++ b/examples/network_management_example.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 +""" +Example demonstrating AOP network management and monitoring. + +This example shows how to monitor and manage network connectivity +in an AOP server with real-time status updates. +""" + +import time +import threading +from swarms import Agent +from swarms.structs.aop import AOP + + +def monitor_network_status(aop_instance): + """Monitor network status in a separate thread.""" + while True: + try: + network_status = aop_instance.get_network_status() + persistence_status = aop_instance.get_persistence_status() + + print(f"\n{'='*60}") + print( + f"šŸ“Š REAL-TIME STATUS MONITOR - {time.strftime('%H:%M:%S')}" + ) + print(f"{'='*60}") + + # Network Status + print("🌐 NETWORK STATUS:") + print( + f" Monitoring: {'āœ… Enabled' if network_status['network_monitoring_enabled'] else 'āŒ Disabled'}" + ) + print( + f" Connected: {'āœ… Yes' if network_status['network_connected'] else 'āŒ No'}" + ) + print( + f" Retry Count: {network_status['network_retry_count']}/{network_status['max_network_retries']}" + ) + print( + f" Remaining Retries: {network_status['remaining_network_retries']}" + ) + print( + f" Host: {network_status['host']}:{network_status['port']}" + ) + print(f" Timeout: {network_status['network_timeout']}s") + print( + f" Retry Delay: {network_status['network_retry_delay']}s" + ) + + if network_status["last_network_error"]: + print( + f" Last Error: {network_status['last_network_error']}" + ) + + # Persistence Status + print("\nšŸ”„ PERSISTENCE STATUS:") + print( + f" Enabled: {'āœ… Yes' if persistence_status['persistence_enabled'] else 'āŒ No'}" + ) + print( + f" Shutdown Requested: {'āŒ Yes' if persistence_status['shutdown_requested'] else 'āœ… No'}" + ) + print( + f" Restart Count: {persistence_status['restart_count']}/{persistence_status['max_restart_attempts']}" + ) + print( + f" Remaining Restarts: {persistence_status['remaining_restarts']}" + ) + print( + f" Restart Delay: {persistence_status['restart_delay']}s" + ) + + # Connection Health + if network_status["network_connected"]: + print("\nšŸ’š CONNECTION HEALTH: Excellent") + elif network_status["network_retry_count"] == 0: + print("\n🟔 CONNECTION HEALTH: Unknown") + elif network_status["remaining_network_retries"] > 0: + print( + f"\n🟠 CONNECTION HEALTH: Recovering ({network_status['remaining_network_retries']} retries left)" + ) + else: + print( + "\nšŸ”“ CONNECTION HEALTH: Critical (No retries left)" + ) + + print(f"{'='*60}") + + # Check if we should stop monitoring + if ( + persistence_status["shutdown_requested"] + and not persistence_status["persistence_enabled"] + ): + print("šŸ›‘ Shutdown requested, stopping monitor...") + break + + time.sleep(5) # Update every 5 seconds + + except Exception as e: + print(f"āŒ Monitor error: {e}") + time.sleep(5) + + +def main(): + """Demonstrate AOP network management.""" + + # Create a simple agent + agent = Agent( + agent_name="network_monitor_agent", + agent_description="An agent for network monitoring demo", + system_prompt="You are a helpful assistant for network monitoring.", + ) + + # Create AOP with comprehensive network monitoring + aop = AOP( + server_name="Network Managed AOP Server", + description="An AOP server with comprehensive network management", + agents=[agent], + port=8004, + host="localhost", + persistence=True, + max_restart_attempts=5, + restart_delay=3.0, + network_monitoring=True, + max_network_retries=10, + network_retry_delay=2.0, + network_timeout=5.0, + verbose=True, + ) + + print("AOP Network Management Demo") + print("=" * 50) + print() + + # Show initial configuration + print("Initial Configuration:") + print(f" Server: {aop.server_name}") + print(f" Host: {aop.host}:{aop.port}") + print(f" Persistence: {aop.persistence}") + print(f" Network Monitoring: {aop.network_monitoring}") + print(f" Max Network Retries: {aop.max_network_retries}") + print(f" Network Timeout: {aop.network_timeout}s") + print() + + # Start monitoring in background + print("Starting network status monitor...") + monitor_thread = threading.Thread( + target=monitor_network_status, args=(aop,), daemon=True + ) + monitor_thread.start() + + print("Available commands:") + print(" 'start' - Start the server") + print(" 'status' - Show current status") + print(" 'reset_network' - Reset network retry counter") + print(" 'disable_network' - Disable network monitoring") + print(" 'enable_network' - Enable network monitoring") + print(" 'shutdown' - Request graceful shutdown") + print(" 'quit' - Exit the program") + print() + + try: + while True: + command = input("Enter command: ").strip().lower() + + if command == "start": + print( + "Starting server... (Press Ctrl+C to test network error handling)" + ) + try: + aop.run() + except KeyboardInterrupt: + print("Server interrupted!") + + elif command == "status": + print("\nCurrent Status:") + network_status = aop.get_network_status() + persistence_status = aop.get_persistence_status() + + print("Network:") + for key, value in network_status.items(): + print(f" {key}: {value}") + + print("\nPersistence:") + for key, value in persistence_status.items(): + print(f" {key}: {value}") + + elif command == "reset_network": + aop.reset_network_retry_count() + print("Network retry counter reset!") + + elif command == "disable_network": + aop.network_monitoring = False + print("Network monitoring disabled!") + + elif command == "enable_network": + aop.network_monitoring = True + print("Network monitoring enabled!") + + elif command == "shutdown": + aop.request_shutdown() + print("Shutdown requested!") + + elif command == "quit": + print("Exiting...") + break + + else: + print( + "Unknown command. Try: start, status, reset_network, disable_network, enable_network, shutdown, quit" + ) + + except KeyboardInterrupt: + print("\nExiting...") + finally: + # Clean shutdown + aop.disable_persistence() + aop.request_shutdown() + print("Cleanup completed") + + +if __name__ == "__main__": + main() diff --git a/examples/persistence_example.py b/examples/persistence_example.py new file mode 100644 index 00000000..9f56401b --- /dev/null +++ b/examples/persistence_example.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +""" +Example demonstrating the AOP persistence feature. + +This example shows how to use the persistence mode to create a server +that automatically restarts when stopped, with failsafe protection. +""" + +from swarms import Agent +from swarms.structs.aop import AOP + + +def main(): + """Demonstrate AOP persistence functionality.""" + + # Create a simple agent + agent = Agent( + agent_name="example_agent", + agent_description="An example agent for persistence demo", + system_prompt="You are a helpful assistant.", + ) + + # Create AOP with persistence enabled + aop = AOP( + server_name="Persistent AOP Server", + description="A persistent AOP server that auto-restarts", + agents=[agent], + port=8001, + persistence=True, # Enable persistence + max_restart_attempts=5, # Allow up to 5 restarts + restart_delay=3.0, # Wait 3 seconds between restarts + verbose=True, + ) + + print("Starting persistent AOP server...") + print("Press Ctrl+C to test the restart functionality") + print("The server will restart automatically up to 5 times") + print("After 5 failed restarts, it will shut down permanently") + print() + + # Show persistence status + status = aop.get_persistence_status() + print(f"Persistence Status: {status}") + print() + + try: + # This will run with persistence enabled + aop.run() + except KeyboardInterrupt: + print("\nReceived interrupt signal") + print( + "In persistence mode, the server would normally restart" + ) + print( + "To disable persistence and shut down gracefully, call:" + ) + print(" aop.disable_persistence()") + print(" aop.request_shutdown()") + + +if __name__ == "__main__": + main() diff --git a/examples/persistence_management_example.py b/examples/persistence_management_example.py new file mode 100644 index 00000000..e58d4314 --- /dev/null +++ b/examples/persistence_management_example.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python3 +""" +Example demonstrating AOP persistence management methods. + +This example shows how to control persistence mode at runtime, +including enabling/disabling persistence and monitoring status. +""" + +import time +import threading +from swarms import Agent +from swarms.structs.aop import AOP + + +def monitor_persistence(aop_instance): + """Monitor persistence status in a separate thread.""" + while True: + status = aop_instance.get_persistence_status() + print("\n[Monitor] Persistence Status:") + print(f" - Enabled: {status['persistence_enabled']}") + print( + f" - Shutdown Requested: {status['shutdown_requested']}" + ) + print(f" - Restart Count: {status['restart_count']}") + print( + f" - Remaining Restarts: {status['remaining_restarts']}" + ) + print( + f" - Max Restart Attempts: {status['max_restart_attempts']}" + ) + print(f" - Restart Delay: {status['restart_delay']}s") + + if status["shutdown_requested"]: + break + + time.sleep(10) # Check every 10 seconds + + +def main(): + """Demonstrate AOP persistence management.""" + + # Create a simple agent + agent = Agent( + agent_name="management_agent", + agent_description="An agent for persistence management demo", + system_prompt="You are a helpful assistant for testing persistence.", + ) + + # Create AOP with persistence initially disabled + aop = AOP( + server_name="Managed AOP Server", + description="An AOP server with runtime persistence management", + agents=[agent], + port=8002, + persistence=False, # Start with persistence disabled + max_restart_attempts=3, + restart_delay=2.0, + verbose=True, + ) + + print("AOP Persistence Management Demo") + print("=" * 40) + print() + + # Show initial status + print("Initial persistence status:") + status = aop.get_persistence_status() + for key, value in status.items(): + print(f" {key}: {value}") + print() + + # Start monitoring in background + monitor_thread = threading.Thread( + target=monitor_persistence, args=(aop,), daemon=True + ) + monitor_thread.start() + + print("Available commands:") + print(" 'enable' - Enable persistence mode") + print(" 'disable' - Disable persistence mode") + print(" 'shutdown' - Request graceful shutdown") + print(" 'reset' - Reset restart counter") + print(" 'status' - Show current status") + print(" 'start' - Start the server") + print(" 'quit' - Exit the program") + print() + + try: + while True: + command = input("Enter command: ").strip().lower() + + if command == "enable": + aop.enable_persistence() + print("Persistence enabled!") + + elif command == "disable": + aop.disable_persistence() + print("Persistence disabled!") + + elif command == "shutdown": + aop.request_shutdown() + print("Shutdown requested!") + + elif command == "reset": + aop.reset_restart_count() + print("Restart counter reset!") + + elif command == "status": + status = aop.get_persistence_status() + print("Current status:") + for key, value in status.items(): + print(f" {key}: {value}") + + elif command == "start": + print( + "Starting server... (Press Ctrl+C to test restart)" + ) + try: + aop.run() + except KeyboardInterrupt: + print("Server interrupted!") + + elif command == "quit": + print("Exiting...") + break + + else: + print( + "Unknown command. Try: enable, disable, shutdown, reset, status, start, quit" + ) + + except KeyboardInterrupt: + print("\nExiting...") + finally: + # Clean shutdown + aop.disable_persistence() + aop.request_shutdown() + + +if __name__ == "__main__": + main() diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index a4993d41..ea90feb2 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -59,6 +59,7 @@ from swarms.structs.multi_agent_exec import ( ) from swarms.structs.multi_agent_router import MultiAgentRouter from swarms.structs.round_robin import RoundRobinSwarm +from swarms.structs.self_moa_seq import SelfMoASeq from swarms.structs.sequential_workflow import SequentialWorkflow from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm from swarms.structs.stopping_conditions import ( @@ -98,7 +99,6 @@ from swarms.structs.swarming_architectures import ( staircase_swarm, star_swarm, ) -from swarms.structs.self_moa_seq import SelfMoASeq __all__ = [ "Agent", diff --git a/swarms/structs/agent_router.py b/swarms/structs/agent_router.py index b9362798..ea11ed5f 100644 --- a/swarms/structs/agent_router.py +++ b/swarms/structs/agent_router.py @@ -1,87 +1,146 @@ -from typing import List, Optional +import math +from typing import Any, Callable, List, Optional, Union +from litellm import embedding from tenacity import retry, stop_after_attempt, wait_exponential -from typing import Union, Callable, Any -from swarms import Agent -from swarms.utils.loguru_logger import initialize_logger -from swarms.utils.auto_download_check_packages import ( - auto_check_and_download_package, -) +from swarms.structs.omni_agent_types import AgentType +from swarms.utils.loguru_logger import initialize_logger logger = initialize_logger(log_folder="agent_router") class AgentRouter: """ - Initialize the AgentRouter. + Initialize the AgentRouter using LiteLLM embeddings for agent matching. Args: - collection_name (str): Name of the collection in the vector database. - persist_directory (str): Directory to persist the vector database. + embedding_model (str): The embedding model to use for generating embeddings. + Examples: 'text-embedding-ada-002', 'text-embedding-3-small', 'text-embedding-3-large', + 'cohere/embed-english-v3.0', 'huggingface/microsoft/codebert-base', etc. n_agents (int): Number of agents to return in queries. - *args: Additional arguments to pass to the chromadb Client. - **kwargs: Additional keyword arguments to pass to the chromadb Client. + api_key (str, optional): API key for the embedding service. If not provided, + will use environment variables. + api_base (str, optional): Custom API base URL for the embedding service. + agents (List[AgentType], optional): List of agents to initialize the router with. """ def __init__( self, - collection_name: str = "agents", - persist_directory: str = "./vector_db", + embedding_model: str = "text-embedding-ada-002", n_agents: int = 1, - *args, - **kwargs, + api_key: Optional[str] = None, + api_base: Optional[str] = None, + agents: Optional[List[AgentType]] = None, ): + self.embedding_model = embedding_model + self.n_agents = n_agents + self.api_key = api_key + self.api_base = api_base + self.agents: List[AgentType] = [] + self.agent_embeddings: List[List[float]] = [] + self.agent_metadata: List[dict] = [] + + # Add agents if provided during initialization + if agents: + self.add_agents(agents) + + def _generate_embedding(self, text: str) -> List[float]: + """ + Generate embedding for the given text using the specified model. + + Args: + text (str): The text to generate embedding for. + + Returns: + List[float]: The embedding vector as a list of floats. + """ try: - import chromadb - except ImportError: - auto_check_and_download_package( - "chromadb", package_manager="pip", upgrade=True - ) - import chromadb + # Prepare parameters for the embedding call + params = {"model": self.embedding_model, "input": [text]} - self.collection_name = collection_name - self.n_agents = n_agents - self.persist_directory = persist_directory - self.client = chromadb.Client(*args, **kwargs) - self.collection = self.client.create_collection( - collection_name - ) - self.agents: List[Agent] = [] + if self.api_key: + params["api_key"] = self.api_key + if self.api_base: + params["api_base"] = self.api_base + + response = embedding(**params) + + # Extract the embedding from the response + embedding_vector = response.data[0].embedding + return embedding_vector + + except Exception as e: + logger.error(f"Error generating embedding: {str(e)}") + raise + + def _cosine_similarity( + self, vec1: List[float], vec2: List[float] + ) -> float: + """ + Calculate cosine similarity between two vectors. + + Args: + vec1 (List[float]): First vector. + vec2 (List[float]): Second vector. + + Returns: + float: Cosine similarity between the vectors. + """ + if len(vec1) != len(vec2): + raise ValueError("Vectors must have the same length") + + # Calculate dot product + dot_product = sum(a * b for a, b in zip(vec1, vec2)) + + # Calculate magnitudes + magnitude1 = math.sqrt(sum(a * a for a in vec1)) + magnitude2 = math.sqrt(sum(a * a for a in vec2)) + + # Avoid division by zero + if magnitude1 == 0 or magnitude2 == 0: + return 0.0 + + return dot_product / (magnitude1 * magnitude2) @retry( stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10), ) - def add_agent(self, agent: Agent) -> None: + def add_agent(self, agent: AgentType) -> None: """ - Add an agent to the vector database. + Add an agent to the embedding-based agent router. Args: agent (Agent): The agent to add. Raises: - Exception: If there's an error adding the agent to the vector database. + Exception: If there's an error adding the agent to the router. """ try: agent_text = f"{agent.name} {agent.description} {agent.system_prompt}" - self.collection.add( - documents=[agent_text], - metadatas=[{"name": agent.name}], - ids=[agent.name], - ) + + # Generate embedding for the agent + agent_embedding = self._generate_embedding(agent_text) + + # Store agent and its embedding self.agents.append(agent) + self.agent_embeddings.append(agent_embedding) + self.agent_metadata.append( + {"name": agent.name, "text": agent_text} + ) + logger.info( - f"Added agent {agent.name} to the vector database." + f"Added agent {agent.name} to the embedding-based router." ) except Exception as e: logger.error( - f"Error adding agent {agent.name} to the vector database: {str(e)}" + f"Error adding agent {agent.name} to the router: {str(e)}" ) raise def add_agents( - self, agents: List[Union[Agent, Callable, Any]] + self, agents: List[Union[AgentType, Callable, Any]] ) -> None: """ Add multiple agents to the vector database. @@ -94,7 +153,7 @@ class AgentRouter: def update_agent_history(self, agent_name: str) -> None: """ - Update the agent's entry in the vector database with its interaction history. + Update the agent's entry in the router with its interaction history. Args: agent_name (str): The name of the agent to update. @@ -107,17 +166,39 @@ class AgentRouter: history_text = " ".join(history) updated_text = f"{agent.name} {agent.description} {agent.system_prompt} {history_text}" - self.collection.update( - ids=[agent_name], - documents=[updated_text], - metadatas=[{"name": agent_name}], - ) - logger.info( - f"Updated agent {agent_name} with interaction history." + # Find the agent's index + agent_index = next( + ( + i + for i, a in enumerate(self.agents) + if a.name == agent_name + ), + None, ) + + if agent_index is not None: + # Generate new embedding with updated text + updated_embedding = self._generate_embedding( + updated_text + ) + + # Update the stored data + self.agent_embeddings[agent_index] = updated_embedding + self.agent_metadata[agent_index] = { + "name": agent_name, + "text": updated_text, + } + + logger.info( + f"Updated agent {agent_name} with interaction history." + ) + else: + logger.warning( + f"Agent {agent_name} not found in the agents list." + ) else: logger.warning( - f"Agent {agent_name} not found in the database." + f"Agent {agent_name} not found in the router." ) @retry( @@ -126,14 +207,14 @@ class AgentRouter: ) def find_best_agent( self, task: str, *args, **kwargs - ) -> Optional[Agent]: + ) -> Optional[AgentType]: """ - Find the best agent for a given task. + Find the best agent for a given task using cosine similarity. Args: task (str): The task description. - *args: Additional arguments to pass to the collection.query method. - **kwargs: Additional keyword arguments to pass to the collection.query method. + *args: Additional arguments (unused, kept for compatibility). + **kwargs: Additional keyword arguments (unused, kept for compatibility). Returns: Optional[Agent]: The best matching agent, if found. @@ -142,32 +223,32 @@ class AgentRouter: Exception: If there's an error finding the best agent. """ try: - results = self.collection.query( - query_texts=[task], - n_results=self.n_agents, - *args, - **kwargs, - ) + if not self.agents or not self.agent_embeddings: + logger.warning("No agents available in the router.") + return None + + # Generate embedding for the task + task_embedding = self._generate_embedding(task) + + # Calculate cosine similarities + similarities = [] + for agent_embedding in self.agent_embeddings: + similarity = self._cosine_similarity( + task_embedding, agent_embedding + ) + similarities.append(similarity) + + # Find the best matching agent(s) + if similarities: + # Get index of the best similarity + best_index = similarities.index(max(similarities)) + best_agent = self.agents[best_index] + best_similarity = similarities[best_index] - if results["ids"]: - best_match_name = results["ids"][0][0] - best_agent = next( - ( - a - for a in self.agents - if a.name == best_match_name - ), - None, + logger.info( + f"Found best matching agent: {best_agent.name} (similarity: {best_similarity:.4f})" ) - if best_agent: - logger.info( - f"Found best matching agent: {best_match_name}" - ) - return best_agent - else: - logger.warning( - f"Agent {best_match_name} found in index but not in agents list." - ) + return best_agent else: logger.warning( "No matching agent found for the given task." diff --git a/swarms/structs/aop.py b/swarms/structs/aop.py index 044ab563..b95acb77 100644 --- a/swarms/structs/aop.py +++ b/swarms/structs/aop.py @@ -1,4 +1,5 @@ import asyncio +import socket import sys import threading import time @@ -556,6 +557,7 @@ class AOP: 3. Handle tool execution with proper error handling 4. Manage the MCP server lifecycle 5. Queue-based task execution for improved performance and reliability + 6. Persistence mode with automatic restart and failsafe protection Attributes: mcp_server: The FastMCP server instance @@ -564,6 +566,13 @@ class AOP: task_queues: Dictionary mapping tool names to their task queues server_name: Name of the MCP server queue_enabled: Whether queue-based execution is enabled + persistence: Whether persistence mode is enabled + max_restart_attempts: Maximum number of restart attempts before giving up + restart_delay: Delay between restart attempts in seconds + network_monitoring: Whether network connection monitoring is enabled + max_network_retries: Maximum number of network reconnection attempts + network_retry_delay: Delay between network retry attempts in seconds + network_timeout: Network connection timeout in seconds """ def __init__( @@ -581,6 +590,13 @@ class AOP: max_queue_size_per_agent: int = 1000, processing_timeout: int = 30, retry_delay: float = 1.0, + persistence: bool = False, + max_restart_attempts: int = 10, + restart_delay: float = 5.0, + network_monitoring: bool = True, + max_network_retries: int = 5, + network_retry_delay: float = 10.0, + network_timeout: float = 30.0, log_level: Literal[ "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" ] = "INFO", @@ -605,6 +621,13 @@ class AOP: max_queue_size_per_agent: Maximum queue size per agent processing_timeout: Timeout for task processing in seconds retry_delay: Delay between retries in seconds + persistence: Enable automatic restart on shutdown (with failsafe) + max_restart_attempts: Maximum number of restart attempts before giving up + restart_delay: Delay between restart attempts in seconds + network_monitoring: Enable network connection monitoring and retry + max_network_retries: Maximum number of network reconnection attempts + network_retry_delay: Delay between network retry attempts in seconds + network_timeout: Network connection timeout in seconds """ self.server_name = server_name self.description = description @@ -618,6 +641,23 @@ class AOP: self.max_queue_size_per_agent = max_queue_size_per_agent self.processing_timeout = processing_timeout self.retry_delay = retry_delay + self.persistence = persistence + self.max_restart_attempts = max_restart_attempts + self.restart_delay = restart_delay + self.network_monitoring = network_monitoring + self.max_network_retries = max_network_retries + self.network_retry_delay = network_retry_delay + self.network_timeout = network_timeout + + # Persistence state tracking + self._restart_count = 0 + self._persistence_enabled = persistence + self._shutdown_requested = False + + # Network state tracking + self._network_retry_count = 0 + self._last_network_error = None + self._network_connected = True self.agents: Dict[str, Agent] = {} self.tool_configs: Dict[str, AgentToolConfig] = {} @@ -641,7 +681,7 @@ class AOP: ) logger.info( - f"Initialized AOP with server name: {server_name}, verbose: {verbose}, traceback: {traceback_enabled}" + f"Initialized AOP with server name: {server_name}, verbose: {verbose}, traceback: {traceback_enabled}, persistence: {persistence}, network_monitoring: {network_monitoring}" ) # Add initial agents if provided @@ -2262,9 +2302,397 @@ class AOP: def run(self) -> None: """ - Run the MCP server. + Run the MCP server with optional persistence. + + If persistence is enabled, the server will automatically restart + when stopped, up to max_restart_attempts times. This includes + a failsafe mechanism to prevent infinite restart loops. + """ + if not self._persistence_enabled: + # Standard run without persistence + self.start_server() + return + + # Persistence-enabled run + logger.info( + f"Starting AOP server with persistence enabled (max restarts: {self.max_restart_attempts})" + ) + + while ( + not self._shutdown_requested + and self._restart_count <= self.max_restart_attempts + ): + try: + if self._restart_count > 0: + logger.info( + f"Restarting server (attempt {self._restart_count}/{self.max_restart_attempts})" + ) + # Wait before restarting + time.sleep(self.restart_delay) + + # Reset restart count on successful start + self._restart_count = 0 + self.start_server() + + except KeyboardInterrupt: + if ( + self._persistence_enabled + and not self._shutdown_requested + ): + logger.warning( + "Server interrupted by user, but persistence is enabled. Restarting..." + ) + self._restart_count += 1 + continue + else: + logger.info("Server shutdown requested by user") + break + + except Exception as e: + if ( + self._persistence_enabled + and not self._shutdown_requested + ): + # Check if it's a network error + if self._is_network_error(e): + logger.warning( + "🌐 Network error detected, attempting reconnection..." + ) + if self._handle_network_error(e): + # Network retry successful, continue with restart + self._restart_count += 1 + continue + else: + # Network retry failed, give up + logger.critical( + "šŸ’€ Network reconnection failed permanently" + ) + break + else: + # Non-network error, use standard restart logic + logger.error( + f"Server crashed with error: {e}" + ) + self._restart_count += 1 + + if ( + self._restart_count + > self.max_restart_attempts + ): + logger.critical( + f"Maximum restart attempts ({self.max_restart_attempts}) exceeded. Shutting down permanently." + ) + break + else: + logger.info( + f"Will restart in {self.restart_delay} seconds..." + ) + continue + else: + # Check if it's a network error even without persistence + if self._is_network_error(e): + logger.error( + "🌐 Network error detected but persistence is disabled" + ) + if self.network_monitoring: + logger.info( + "šŸ”„ Attempting network reconnection..." + ) + if self._handle_network_error(e): + # Try to start server again after network recovery + try: + self.start_server() + return + except Exception as retry_error: + logger.error( + f"Server failed after network recovery: {retry_error}" + ) + raise + else: + logger.critical( + "šŸ’€ Network reconnection failed" + ) + raise + else: + logger.error( + "Network monitoring is disabled, cannot retry" + ) + raise + else: + logger.error( + f"Server failed and persistence is disabled: {e}" + ) + raise + + if self._restart_count > self.max_restart_attempts: + logger.critical( + "Server failed permanently due to exceeding maximum restart attempts" + ) + elif self._shutdown_requested: + logger.info("Server shutdown completed as requested") + else: + logger.info("Server stopped normally") + + def _is_network_error(self, error: Exception) -> bool: + """ + Check if an error is network-related. + + Args: + error: The exception to check + + Returns: + bool: True if the error is network-related + """ + network_errors = ( + ConnectionError, + ConnectionRefusedError, + ConnectionResetError, + ConnectionAbortedError, + TimeoutError, + socket.gaierror, + socket.timeout, + OSError, + ) + + # Check if it's a direct network error + if isinstance(error, network_errors): + return True + + # Check error message for network-related keywords + error_msg = str(error).lower() + network_keywords = [ + "connection refused", + "connection reset", + "connection aborted", + "network is unreachable", + "no route to host", + "timeout", + "socket", + "network", + "connection", + "refused", + "reset", + "aborted", + "unreachable", + "timeout", + ] + + return any( + keyword in error_msg for keyword in network_keywords + ) + + def _get_network_error_message( + self, error: Exception, attempt: int + ) -> str: + """ + Get a custom error message for network-related errors. + + Args: + error: The network error that occurred + attempt: Current retry attempt number + + Returns: + str: Custom error message + """ + error_type = type(error).__name__ + error_msg = str(error) + + if isinstance(error, ConnectionRefusedError): + return f"🌐 NETWORK ERROR: Connection refused to {self.host}:{self.port} (attempt {attempt}/{self.max_network_retries})" + elif isinstance(error, ConnectionResetError): + return f"🌐 NETWORK ERROR: Connection was reset by remote host (attempt {attempt}/{self.max_network_retries})" + elif isinstance(error, ConnectionAbortedError): + return f"🌐 NETWORK ERROR: Connection was aborted (attempt {attempt}/{self.max_network_retries})" + elif isinstance(error, TimeoutError): + return f"🌐 NETWORK ERROR: Connection timeout after {self.network_timeout}s (attempt {attempt}/{self.max_network_retries})" + elif isinstance(error, socket.gaierror): + return f"🌐 NETWORK ERROR: Host resolution failed for {self.host} (attempt {attempt}/{self.max_network_retries})" + elif isinstance(error, OSError): + return f"🌐 NETWORK ERROR: OS-level network error - {error_msg} (attempt {attempt}/{self.max_network_retries})" + else: + return f"🌐 NETWORK ERROR: {error_type} - {error_msg} (attempt {attempt}/{self.max_network_retries})" + + def _test_network_connectivity(self) -> bool: + """ + Test network connectivity to the server host and port. + + Returns: + bool: True if network is reachable, False otherwise + """ + try: + # Test if we can resolve the host + socket.gethostbyname(self.host) + + # Test if we can connect to the port + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(self.network_timeout) + result = sock.connect_ex((self.host, self.port)) + sock.close() + + return result == 0 + except Exception as e: + if self.verbose: + logger.debug(f"Network connectivity test failed: {e}") + return False + + def _handle_network_error(self, error: Exception) -> bool: + """ + Handle network errors with retry logic. + + Args: + error: The network error that occurred + + Returns: + bool: True if should retry, False if should give up + """ + if not self.network_monitoring: + return False + + self._network_retry_count += 1 + self._last_network_error = error + self._network_connected = False + + # Get custom error message + error_msg = self._get_network_error_message( + error, self._network_retry_count + ) + logger.error(error_msg) + + # Check if we should retry + if self._network_retry_count <= self.max_network_retries: + logger.warning( + f"šŸ”„ Attempting to reconnect in {self.network_retry_delay} seconds..." + ) + logger.info( + f"šŸ“Š Network retry {self._network_retry_count}/{self.max_network_retries}" + ) + + # Wait before retry + time.sleep(self.network_retry_delay) + + # Test connectivity before retry + if self._test_network_connectivity(): + logger.info("āœ… Network connectivity restored!") + self._network_connected = True + self._network_retry_count = ( + 0 # Reset on successful test + ) + return True + else: + logger.warning( + "āŒ Network connectivity test failed, will retry..." + ) + return True + else: + logger.critical( + f"šŸ’€ Maximum network retry attempts ({self.max_network_retries}) exceeded!" + ) + logger.critical( + "🚫 Giving up on network reconnection. Server will shut down." + ) + return False + + def get_network_status(self) -> Dict[str, Any]: + """ + Get current network status and statistics. + + Returns: + Dict containing network status information + """ + return { + "network_monitoring_enabled": self.network_monitoring, + "network_connected": self._network_connected, + "network_retry_count": self._network_retry_count, + "max_network_retries": self.max_network_retries, + "network_retry_delay": self.network_retry_delay, + "network_timeout": self.network_timeout, + "last_network_error": ( + str(self._last_network_error) + if self._last_network_error + else None + ), + "remaining_network_retries": max( + 0, + self.max_network_retries - self._network_retry_count, + ), + "host": self.host, + "port": self.port, + } + + def reset_network_retry_count(self) -> None: + """ + Reset the network retry counter. + + This can be useful if you want to give the server a fresh + set of network retry attempts. + """ + self._network_retry_count = 0 + self._last_network_error = None + self._network_connected = True + logger.info("Network retry counter reset") + + def enable_persistence(self) -> None: + """ + Enable persistence mode for the server. + + This allows the server to automatically restart when stopped, + up to the maximum number of restart attempts. + """ + self._persistence_enabled = True + logger.info("Persistence mode enabled") + + def disable_persistence(self) -> None: + """ + Disable persistence mode for the server. + + This will allow the server to shut down normally without + automatic restarts. + """ + self._persistence_enabled = False + self._shutdown_requested = True + logger.info( + "Persistence mode disabled - server will shut down on next stop" + ) + + def request_shutdown(self) -> None: + """ + Request a graceful shutdown of the server. + + If persistence is enabled, this will prevent automatic restarts + and allow the server to shut down normally. + """ + self._shutdown_requested = True + logger.info( + "Shutdown requested - server will stop after current operations complete" + ) + + def get_persistence_status(self) -> Dict[str, Any]: + """ + Get the current persistence status and statistics. + + Returns: + Dict containing persistence configuration and status + """ + return { + "persistence_enabled": self._persistence_enabled, + "shutdown_requested": self._shutdown_requested, + "restart_count": self._restart_count, + "max_restart_attempts": self.max_restart_attempts, + "restart_delay": self.restart_delay, + "remaining_restarts": max( + 0, self.max_restart_attempts - self._restart_count + ), + } + + def reset_restart_count(self) -> None: + """ + Reset the restart counter. + + This can be useful if you want to give the server a fresh + set of restart attempts. """ - self.start_server() + self._restart_count = 0 + logger.info("Restart counter reset") def get_server_info(self) -> Dict[str, Any]: """ @@ -2283,6 +2711,8 @@ class AOP: "log_level": self.log_level, "transport": self.transport, "queue_enabled": self.queue_enabled, + "persistence": self.get_persistence_status(), + "network": self.get_network_status(), "tool_details": { tool_name: self.get_agent_info(tool_name) for tool_name in self.agents.keys() diff --git a/swarms/structs/social_algorithms.py b/swarms/structs/social_algorithms.py index aeee7a69..c432419a 100644 --- a/swarms/structs/social_algorithms.py +++ b/swarms/structs/social_algorithms.py @@ -1,15 +1,7 @@ -""" -Social Algorithms for Multi-Agent Communication - -This module provides a flexible framework for defining custom social algorithms -that control how agents communicate and interact with each other in multi-agent systems. -""" - import time import uuid from typing import Any, Callable, Dict, List, Optional from dataclasses import dataclass -from enum import Enum from swarms.structs.agent import Agent from swarms.structs.omni_agent_types import AgentType @@ -19,18 +11,6 @@ from swarms.utils.output_types import OutputType logger = initialize_logger(log_folder="social_algorithms") -class SocialAlgorithmType(Enum): - """Types of social algorithms supported.""" - - CUSTOM = "custom" - SEQUENTIAL = "sequential" - CONCURRENT = "concurrent" - HIERARCHICAL = "hierarchical" - MESH = "mesh" - ROUND_ROBIN = "round_robin" - BROADCAST = "broadcast" - - @dataclass class CommunicationStep: """Represents a single step in a social algorithm.""" diff --git a/swarms/utils/audio_processing.py b/swarms/utils/audio_processing.py deleted file mode 100644 index 1f746923..00000000 --- a/swarms/utils/audio_processing.py +++ /dev/null @@ -1,343 +0,0 @@ -import base64 -from typing import Union, Dict, Any, Tuple -import requests -from pathlib import Path -import wave -import numpy as np - - -def encode_audio_to_base64(audio_path: Union[str, Path]) -> str: - """ - Encode a WAV file to base64 string. - - Args: - audio_path (Union[str, Path]): Path to the WAV file - - Returns: - str: Base64 encoded string of the audio file - - Raises: - FileNotFoundError: If the audio file doesn't exist - ValueError: If the file is not a valid WAV file - """ - try: - audio_path = Path(audio_path) - if not audio_path.exists(): - raise FileNotFoundError( - f"Audio file not found: {audio_path}" - ) - - if not audio_path.suffix.lower() == ".wav": - raise ValueError("File must be a WAV file") - - with open(audio_path, "rb") as audio_file: - audio_data = audio_file.read() - return base64.b64encode(audio_data).decode("utf-8") - except Exception as e: - raise Exception(f"Error encoding audio file: {str(e)}") - - -def decode_base64_to_audio( - base64_string: str, output_path: Union[str, Path] -) -> None: - """ - Decode a base64 string to a WAV file. - - Args: - base64_string (str): Base64 encoded audio data - output_path (Union[str, Path]): Path where the WAV file should be saved - - Raises: - ValueError: If the base64 string is invalid - IOError: If there's an error writing the file - """ - try: - output_path = Path(output_path) - output_path.parent.mkdir(parents=True, exist_ok=True) - - audio_data = base64.b64decode(base64_string) - with open(output_path, "wb") as audio_file: - audio_file.write(audio_data) - except Exception as e: - raise Exception(f"Error decoding audio data: {str(e)}") - - -def download_audio_from_url( - url: str, output_path: Union[str, Path] -) -> None: - """ - Download an audio file from a URL and save it locally. - - Args: - url (str): URL of the audio file - output_path (Union[str, Path]): Path where the audio file should be saved - - Raises: - requests.RequestException: If there's an error downloading the file - IOError: If there's an error saving the file - """ - try: - output_path = Path(output_path) - output_path.parent.mkdir(parents=True, exist_ok=True) - - response = requests.get(url) - response.raise_for_status() - - with open(output_path, "wb") as audio_file: - audio_file.write(response.content) - except Exception as e: - raise Exception(f"Error downloading audio file: {str(e)}") - - -def process_audio_with_model( - audio_path: Union[str, Path], - model: str, - prompt: str, - voice: str = "alloy", - format: str = "wav", -) -> Dict[str, Any]: - """ - Process an audio file with a model that supports audio input/output. - - Args: - audio_path (Union[str, Path]): Path to the input WAV file - model (str): Model name to use for processing - prompt (str): Text prompt to accompany the audio - voice (str, optional): Voice to use for audio output. Defaults to "alloy" - format (str, optional): Audio format. Defaults to "wav" - - Returns: - Dict[str, Any]: Model response containing both text and audio if applicable - - Raises: - ImportError: If litellm is not installed - ValueError: If the model doesn't support audio processing - """ - try: - from litellm import ( - completion, - supports_audio_input, - supports_audio_output, - ) - - if not supports_audio_input(model): - raise ValueError( - f"Model {model} does not support audio input" - ) - - # Encode the audio file - encoded_audio = encode_audio_to_base64(audio_path) - - # Prepare the messages - messages = [ - { - "role": "user", - "content": [ - {"type": "text", "text": prompt}, - { - "type": "input_audio", - "input_audio": { - "data": encoded_audio, - "format": format, - }, - }, - ], - } - ] - - # Make the API call - response = completion( - model=model, - modalities=["text", "audio"], - audio={"voice": voice, "format": format}, - messages=messages, - ) - - return response - except ImportError: - raise ImportError( - "Please install litellm: pip install litellm" - ) - except Exception as e: - raise Exception( - f"Error processing audio with model: {str(e)}" - ) - - -def read_wav_file( - file_path: Union[str, Path], -) -> Tuple[np.ndarray, int]: - """ - Read a WAV file and return its audio data and sample rate. - - Args: - file_path (Union[str, Path]): Path to the WAV file - - Returns: - Tuple[np.ndarray, int]: Audio data as numpy array and sample rate - - Raises: - FileNotFoundError: If the file doesn't exist - ValueError: If the file is not a valid WAV file - """ - try: - file_path = Path(file_path) - if not file_path.exists(): - raise FileNotFoundError( - f"Audio file not found: {file_path}" - ) - - with wave.open(str(file_path), "rb") as wav_file: - # Get audio parameters - n_channels = wav_file.getnchannels() - sample_width = wav_file.getsampwidth() - frame_rate = wav_file.getframerate() - n_frames = wav_file.getnframes() - - # Read audio data - frames = wav_file.readframes(n_frames) - - # Convert to numpy array - dtype = np.int16 if sample_width == 2 else np.int8 - audio_data = np.frombuffer(frames, dtype=dtype) - - # Reshape if stereo - if n_channels == 2: - audio_data = audio_data.reshape(-1, 2) - - return audio_data, frame_rate - - except Exception as e: - raise Exception(f"Error reading WAV file: {str(e)}") - - -def write_wav_file( - audio_data: np.ndarray, - file_path: Union[str, Path], - sample_rate: int, - sample_width: int = 2, -) -> None: - """ - Write audio data to a WAV file. - - Args: - audio_data (np.ndarray): Audio data as numpy array - file_path (Union[str, Path]): Path where to save the WAV file - sample_rate (int): Sample rate of the audio - sample_width (int, optional): Sample width in bytes. Defaults to 2 (16-bit) - - Raises: - ValueError: If the audio data is invalid - IOError: If there's an error writing the file - """ - try: - file_path = Path(file_path) - file_path.parent.mkdir(parents=True, exist_ok=True) - - # Ensure audio data is in the correct format - if audio_data.dtype != np.int16 and sample_width == 2: - audio_data = (audio_data * 32767).astype(np.int16) - elif audio_data.dtype != np.int8 and sample_width == 1: - audio_data = (audio_data * 127).astype(np.int8) - - # Determine number of channels - n_channels = ( - 2 - if len(audio_data.shape) > 1 and audio_data.shape[1] == 2 - else 1 - ) - - with wave.open(str(file_path), "wb") as wav_file: - wav_file.setnchannels(n_channels) - wav_file.setsampwidth(sample_width) - wav_file.setframerate(sample_rate) - wav_file.writeframes(audio_data.tobytes()) - - except Exception as e: - raise Exception(f"Error writing WAV file: {str(e)}") - - -def normalize_audio(audio_data: np.ndarray) -> np.ndarray: - """ - Normalize audio data to have maximum amplitude of 1.0. - - Args: - audio_data (np.ndarray): Input audio data - - Returns: - np.ndarray: Normalized audio data - """ - return audio_data / np.max(np.abs(audio_data)) - - -def convert_to_mono(audio_data: np.ndarray) -> np.ndarray: - """ - Convert stereo audio to mono by averaging channels. - - Args: - audio_data (np.ndarray): Input audio data (stereo) - - Returns: - np.ndarray: Mono audio data - """ - if len(audio_data.shape) == 1: - return audio_data - return np.mean(audio_data, axis=1) - - -def encode_wav_to_base64( - audio_data: np.ndarray, sample_rate: int -) -> str: - """ - Convert audio data to base64 encoded WAV string. - - Args: - audio_data (np.ndarray): Audio data - sample_rate (int): Sample rate of the audio - - Returns: - str: Base64 encoded WAV data - """ - # Create a temporary WAV file in memory - with wave.open("temp.wav", "wb") as wav_file: - wav_file.setnchannels(1 if len(audio_data.shape) == 1 else 2) - wav_file.setsampwidth(2) # 16-bit - wav_file.setframerate(sample_rate) - wav_file.writeframes(audio_data.tobytes()) - - # Read the file and encode to base64 - with open("temp.wav", "rb") as f: - wav_bytes = f.read() - - # Clean up temporary file - Path("temp.wav").unlink() - - return base64.b64encode(wav_bytes).decode("utf-8") - - -def decode_base64_to_wav( - base64_string: str, -) -> Tuple[np.ndarray, int]: - """ - Convert base64 encoded WAV string to audio data and sample rate. - - Args: - base64_string (str): Base64 encoded WAV data - - Returns: - Tuple[np.ndarray, int]: Audio data and sample rate - """ - # Decode base64 string - wav_bytes = base64.b64decode(base64_string) - - # Write to temporary file - with open("temp.wav", "wb") as f: - f.write(wav_bytes) - - # Read the WAV file - audio_data, sample_rate = read_wav_file("temp.wav") - - # Clean up temporary file - Path("temp.wav").unlink() - - return audio_data, sample_rate diff --git a/swarms/utils/auto_download_check_packages.py b/swarms/utils/auto_download_check_packages.py deleted file mode 100644 index 187e2b11..00000000 --- a/swarms/utils/auto_download_check_packages.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -Package installation utility that checks for package existence and installs if needed. -Supports both pip and conda package managers. -""" - -import importlib.util -import subprocess -import sys -from typing import Literal, Optional, Union -from swarms.utils.loguru_logger import initialize_logger - - -from importlib.metadata import distribution, PackageNotFoundError - -logger = initialize_logger("autocheckpackages") - - -def check_and_install_package( - package_name: str, - package_manager: Literal["pip", "conda"] = "pip", - version: Optional[str] = None, - upgrade: bool = False, -) -> bool: - """ - Check if a package is installed and install it if not found. - - Args: - package_name: Name of the package to check/install - package_manager: Package manager to use ('pip' or 'conda') - version: Specific version to install (optional) - upgrade: Whether to upgrade the package if it exists - - Returns: - bool: True if package is available after check/install, False if installation failed - - Raises: - ValueError: If invalid package manager is specified - """ - try: - # Check if package exists - if package_manager == "pip": - try: - distribution(package_name) - if not upgrade: - logger.info( - f"Package {package_name} is already installed" - ) - return True - except PackageNotFoundError: - pass - - # Construct installation command - cmd = [sys.executable, "-m", "pip", "install"] - if upgrade: - cmd.append("--upgrade") - - if version: - cmd.append(f"{package_name}=={version}") - else: - cmd.append(package_name) - - elif package_manager == "conda": - # Check if conda is available - try: - subprocess.run( - ["conda", "--version"], - check=True, - capture_output=True, - ) - except (subprocess.CalledProcessError, FileNotFoundError): - logger.error( - "Conda is not available. Please install conda first." - ) - return False - - # Construct conda command - cmd = ["conda", "install", "-y"] - if version: - cmd.append(f"{package_name}={version}") - else: - cmd.append(package_name) - else: - raise ValueError( - f"Invalid package manager: {package_manager}" - ) - - # Run installation - logger.info(f"Installing {package_name}...") - subprocess.run( - cmd, check=True, capture_output=True, text=True - ) - - # Verify installation - try: - importlib.import_module(package_name) - logger.info(f"Successfully installed {package_name}") - return True - except ImportError: - logger.error( - f"Package {package_name} was installed but cannot be imported" - ) - return False - - except subprocess.CalledProcessError as e: - logger.error(f"Failed to install {package_name}: {e.stderr}") - return False - except Exception as e: - logger.error( - f"Unexpected error while installing {package_name}: {str(e)}" - ) - return False - - -def auto_check_and_download_package( - packages: Union[str, list[str]], - package_manager: Literal["pip", "conda"] = "pip", - upgrade: bool = False, -) -> bool: - """ - Ensure multiple packages are installed. - - Args: - packages: Single package name or list of package names - package_manager: Package manager to use ('pip' or 'conda') - upgrade: Whether to upgrade existing packages - - Returns: - bool: True if all packages are available, False if any installation failed - """ - if isinstance(packages, str): - packages = [packages] - - success = True - for package in packages: - if ":" in package: - name, version = package.split(":") - if not check_and_install_package( - name, package_manager, version, upgrade - ): - success = False - else: - if not check_and_install_package( - package, package_manager, upgrade=upgrade - ): - success = False - - return success - - -# if __name__ == "__main__": -# print(auto_check_and_download_package("torch")) diff --git a/swarms/utils/image_generator.py b/swarms/utils/image_generator.py deleted file mode 100644 index 1fffdb30..00000000 --- a/swarms/utils/image_generator.py +++ /dev/null @@ -1,54 +0,0 @@ -from typing import Any -from litellm import image_generation - - -class ImageGenerator: - def __init__( - self, - model: str | None = None, - n: int | None = 2, - quality: Any = None, - response_format: str | None = None, - size: str | None = 10, - style: str | None = None, - user: str | None = None, - input_fidelity: str | None = None, - timeout: int = 600, - output_path_folder: str | None = "images", - api_key: str | None = None, - api_base: str | None = None, - ): - self.model = model - self.n = n - self.quality = quality - self.response_format = response_format - self.size = size - self.style = style - self.user = user - self.input_fidelity = input_fidelity - self.timeout = timeout - self.output_path_folder = output_path_folder - self.api_key = api_key - self.api_base = api_base - - def run(self, task: str = None): - - return image_generation( - prompt=task, - model=self.model, - n=self.n, - quality=self.quality, - response_format=self.response_format, - size=self.size, - style=self.style, - user=self.user, - input_fidelity=self.input_fidelity, - timeout=self.timeout, - ) - - -# if __name__ == "__main__": -# image_generator = ImageGenerator() -# print(image_generator.run(task="A beautiful sunset over a calm ocean")) - -# print(model_list) diff --git a/tests/structs/test_agent_router.py b/tests/structs/test_agent_router.py new file mode 100644 index 00000000..105644ac --- /dev/null +++ b/tests/structs/test_agent_router.py @@ -0,0 +1,387 @@ +""" +Simplified test suite for AgentRouter class using pytest. + +This module contains focused tests for the core functionality of the AgentRouter class. +""" + +import pytest +from unittest.mock import Mock, patch + +from swarms.structs.agent_router import AgentRouter +from swarms.structs.agent import Agent + + +@pytest.fixture +def test_agent(): + """Create a real agent for testing.""" + with patch("swarms.structs.agent.LiteLLM") as mock_llm: + mock_llm.return_value.run.return_value = "Test response" + return Agent( + agent_name="test_agent", + agent_description="A test agent", + system_prompt="You are a test agent", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + print_on=False, + ) + + +def test_agent_router_initialization_default(): + """Test AgentRouter initialization with default parameters.""" + with patch("swarms.structs.agent_router.embedding"): + router = AgentRouter() + + assert router.embedding_model == "text-embedding-ada-002" + assert router.n_agents == 1 + assert router.api_key is None + assert router.api_base is None + assert router.agents == [] + assert router.agent_embeddings == [] + assert router.agent_metadata == [] + + +def test_agent_router_initialization_custom(): + """Test AgentRouter initialization with custom parameters.""" + with patch("swarms.structs.agent_router.embedding"), patch( + "swarms.structs.agent.LiteLLM" + ) as mock_llm: + mock_llm.return_value.run.return_value = "Test response" + agents = [ + Agent( + agent_name="test1", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + print_on=False, + ), + Agent( + agent_name="test2", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + print_on=False, + ), + ] + router = AgentRouter( + embedding_model="custom-model", + n_agents=3, + api_key="custom_key", + api_base="custom_base", + agents=agents, + ) + + assert router.embedding_model == "custom-model" + assert router.n_agents == 3 + assert router.api_key == "custom_key" + assert router.api_base == "custom_base" + assert len(router.agents) == 2 + + +def test_cosine_similarity_identical_vectors(): + """Test cosine similarity with identical vectors.""" + router = AgentRouter() + vec1 = [1.0, 0.0, 0.0] + vec2 = [1.0, 0.0, 0.0] + + result = router._cosine_similarity(vec1, vec2) + assert result == 1.0 + + +def test_cosine_similarity_orthogonal_vectors(): + """Test cosine similarity with orthogonal vectors.""" + router = AgentRouter() + vec1 = [1.0, 0.0, 0.0] + vec2 = [0.0, 1.0, 0.0] + + result = router._cosine_similarity(vec1, vec2) + assert result == 0.0 + + +def test_cosine_similarity_opposite_vectors(): + """Test cosine similarity with opposite vectors.""" + router = AgentRouter() + vec1 = [1.0, 0.0, 0.0] + vec2 = [-1.0, 0.0, 0.0] + + result = router._cosine_similarity(vec1, vec2) + assert result == -1.0 + + +def test_cosine_similarity_different_lengths(): + """Test cosine similarity with vectors of different lengths.""" + router = AgentRouter() + vec1 = [1.0, 0.0] + vec2 = [1.0, 0.0, 0.0] + + with pytest.raises( + ValueError, match="Vectors must have the same length" + ): + router._cosine_similarity(vec1, vec2) + + +@patch("swarms.structs.agent_router.embedding") +def test_generate_embedding_success(mock_embedding): + """Test successful embedding generation.""" + mock_embedding.return_value.data = [ + Mock(embedding=[0.1, 0.2, 0.3, 0.4]) + ] + + router = AgentRouter() + result = router._generate_embedding("test text") + + assert result == [0.1, 0.2, 0.3, 0.4] + mock_embedding.assert_called_once() + + +@patch("swarms.structs.agent_router.embedding") +def test_generate_embedding_error(mock_embedding): + """Test embedding generation error handling.""" + mock_embedding.side_effect = Exception("API Error") + + router = AgentRouter() + + with pytest.raises(Exception, match="API Error"): + router._generate_embedding("test text") + + +@patch("swarms.structs.agent_router.embedding") +def test_add_agent_success(mock_embedding, test_agent): + """Test successful agent addition.""" + mock_embedding.return_value.data = [ + Mock(embedding=[0.1, 0.2, 0.3]) + ] + + router = AgentRouter() + router.add_agent(test_agent) + + assert len(router.agents) == 1 + assert len(router.agent_embeddings) == 1 + assert len(router.agent_metadata) == 1 + assert router.agents[0] == test_agent + assert router.agent_embeddings[0] == [0.1, 0.2, 0.3] + assert router.agent_metadata[0]["name"] == "test_agent" + + +@patch("swarms.structs.agent_router.embedding") +def test_add_agent_retry_error(mock_embedding, test_agent): + """Test agent addition with retry mechanism failure.""" + mock_embedding.side_effect = Exception("Embedding error") + + router = AgentRouter() + + # Should raise RetryError after retries are exhausted + with pytest.raises(Exception) as exc_info: + router.add_agent(test_agent) + + # Check that it's a retry error or contains the original error + assert "Embedding error" in str( + exc_info.value + ) or "RetryError" in str(exc_info.value) + + +@patch("swarms.structs.agent_router.embedding") +def test_add_agents_multiple(mock_embedding): + """Test adding multiple agents.""" + mock_embedding.return_value.data = [ + Mock(embedding=[0.1, 0.2, 0.3]) + ] + + with patch("swarms.structs.agent.LiteLLM") as mock_llm: + mock_llm.return_value.run.return_value = "Test response" + router = AgentRouter() + agents = [ + Agent( + agent_name="agent1", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + print_on=False, + ), + Agent( + agent_name="agent2", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + print_on=False, + ), + Agent( + agent_name="agent3", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + print_on=False, + ), + ] + + router.add_agents(agents) + + assert len(router.agents) == 3 + assert len(router.agent_embeddings) == 3 + assert len(router.agent_metadata) == 3 + + +@patch("swarms.structs.agent_router.embedding") +def test_find_best_agent_success(mock_embedding): + """Test successful best agent finding.""" + # Mock embeddings for agents and task + mock_embedding.side_effect = [ + Mock(data=[Mock(embedding=[0.1, 0.2, 0.3])]), # agent1 + Mock(data=[Mock(embedding=[0.4, 0.5, 0.6])]), # agent2 + Mock(data=[Mock(embedding=[0.7, 0.8, 0.9])]), # task + ] + + with patch("swarms.structs.agent.LiteLLM") as mock_llm: + mock_llm.return_value.run.return_value = "Test response" + router = AgentRouter() + agent1 = Agent( + agent_name="agent1", + agent_description="First agent", + system_prompt="Prompt 1", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + print_on=False, + ) + agent2 = Agent( + agent_name="agent2", + agent_description="Second agent", + system_prompt="Prompt 2", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + print_on=False, + ) + + router.add_agent(agent1) + router.add_agent(agent2) + + # Mock the similarity calculation to return predictable results + with patch.object( + router, "_cosine_similarity" + ) as mock_similarity: + mock_similarity.side_effect = [ + 0.8, + 0.6, + ] # agent1 more similar + + result = router.find_best_agent("test task") + + assert result == agent1 + + +def test_find_best_agent_no_agents(): + """Test finding best agent when no agents are available.""" + with patch("swarms.structs.agent_router.embedding"): + router = AgentRouter() + + result = router.find_best_agent("test task") + + assert result is None + + +@patch("swarms.structs.agent_router.embedding") +def test_find_best_agent_retry_error(mock_embedding): + """Test error handling in find_best_agent with retry mechanism.""" + mock_embedding.side_effect = Exception("API Error") + + with patch("swarms.structs.agent.LiteLLM") as mock_llm: + mock_llm.return_value.run.return_value = "Test response" + router = AgentRouter() + router.agents = [ + Agent( + agent_name="agent1", + model_name="gpt-4o-mini", + max_loops=1, + verbose=False, + print_on=False, + ) + ] + router.agent_embeddings = [[0.1, 0.2, 0.3]] + + # Should raise RetryError after retries are exhausted + with pytest.raises(Exception) as exc_info: + router.find_best_agent("test task") + + # Check that it's a retry error or contains the original error + assert "API Error" in str( + exc_info.value + ) or "RetryError" in str(exc_info.value) + + +@patch("swarms.structs.agent_router.embedding") +def test_update_agent_history_success(mock_embedding, test_agent): + """Test successful agent history update.""" + mock_embedding.return_value.data = [ + Mock(embedding=[0.1, 0.2, 0.3]) + ] + + router = AgentRouter() + router.add_agent(test_agent) + + # Update agent history + router.update_agent_history("test_agent") + + # Verify the embedding was regenerated + assert ( + mock_embedding.call_count == 2 + ) # Once for add, once for update + + +def test_update_agent_history_agent_not_found(): + """Test updating history for non-existent agent.""" + with patch( + "swarms.structs.agent_router.embedding" + ) as mock_embedding: + mock_embedding.return_value.data = [ + Mock(embedding=[0.1, 0.2, 0.3]) + ] + router = AgentRouter() + + # Should not raise an exception, just log a warning + router.update_agent_history("non_existent_agent") + + +@patch("swarms.structs.agent_router.embedding") +def test_agent_metadata_structure(mock_embedding, test_agent): + """Test the structure of agent metadata.""" + mock_embedding.return_value.data = [ + Mock(embedding=[0.1, 0.2, 0.3]) + ] + + router = AgentRouter() + router.add_agent(test_agent) + + metadata = router.agent_metadata[0] + assert "name" in metadata + assert "text" in metadata + assert metadata["name"] == "test_agent" + assert ( + "test_agent A test agent You are a test agent" + in metadata["text"] + ) + + +def test_agent_router_edge_cases(): + """Test various edge cases.""" + with patch( + "swarms.structs.agent_router.embedding" + ) as mock_embedding: + mock_embedding.return_value.data = [ + Mock(embedding=[0.1, 0.2, 0.3]) + ] + + router = AgentRouter() + + # Test with empty string task + result = router.find_best_agent("") + assert result is None + + # Test with very long task description + long_task = "test " * 1000 + result = router.find_best_agent(long_task) + assert result is None + + +if __name__ == "__main__": + pytest.main([__file__]) diff --git a/tests/utils/test_auto_check_download.py b/tests/utils/test_auto_check_download.py deleted file mode 100644 index ac8fee3d..00000000 --- a/tests/utils/test_auto_check_download.py +++ /dev/null @@ -1,104 +0,0 @@ -from swarms.utils.auto_download_check_packages import ( - auto_check_and_download_package, - check_and_install_package, -) - - -def test_check_and_install_package_pip(): - result = check_and_install_package("numpy", package_manager="pip") - print(f"Test result for 'numpy' installation using pip: {result}") - assert result, "Failed to install or verify 'numpy' using pip" - - -def test_check_and_install_package_conda(): - result = check_and_install_package( - "numpy", package_manager="conda" - ) - print( - f"Test result for 'numpy' installation using conda: {result}" - ) - assert result, "Failed to install or verify 'numpy' using conda" - - -def test_check_and_install_specific_version(): - result = check_and_install_package( - "numpy", package_manager="pip", version="1.21.0" - ) - print( - f"Test result for specific version of 'numpy' installation using pip: {result}" - ) - assert ( - result - ), "Failed to install or verify specific version of 'numpy' using pip" - - -def test_check_and_install_with_upgrade(): - result = check_and_install_package( - "numpy", package_manager="pip", upgrade=True - ) - print(f"Test result for 'numpy' upgrade using pip: {result}") - assert result, "Failed to upgrade 'numpy' using pip" - - -def test_auto_check_and_download_single_package(): - result = auto_check_and_download_package( - "scipy", package_manager="pip" - ) - print(f"Test result for 'scipy' installation using pip: {result}") - assert result, "Failed to install or verify 'scipy' using pip" - - -def test_auto_check_and_download_multiple_packages(): - packages = ["scipy", "pandas"] - result = auto_check_and_download_package( - packages, package_manager="pip" - ) - print( - f"Test result for multiple packages installation using pip: {result}" - ) - assert ( - result - ), f"Failed to install or verify one or more packages in {packages} using pip" - - -def test_auto_check_and_download_multiple_packages_with_versions(): - packages = ["numpy:1.21.0", "pandas:1.3.0"] - result = auto_check_and_download_package( - packages, package_manager="pip" - ) - print( - f"Test result for multiple packages with versions installation using pip: {result}" - ) - assert ( - result - ), f"Failed to install or verify one or more packages in {packages} with specific versions using pip" - - -# Example of running tests -if __name__ == "__main__": - try: - test_check_and_install_package_pip() - print("test_check_and_install_package_pip passed") - - test_check_and_install_package_conda() - print("test_check_and_install_package_conda passed") - - test_check_and_install_specific_version() - print("test_check_and_install_specific_version passed") - - test_check_and_install_with_upgrade() - print("test_check_and_install_with_upgrade passed") - - test_auto_check_and_download_single_package() - print("test_auto_check_and_download_single_package passed") - - test_auto_check_and_download_multiple_packages() - print("test_auto_check_and_download_multiple_packages passed") - - test_auto_check_and_download_multiple_packages_with_versions() - print( - "test_auto_check_and_download_multiple_packages_with_versions passed" - ) - - except AssertionError as e: - print(f"Test failed: {str(e)}")