diff --git a/docs/swarms/agents/create_agents_yaml.md b/docs/swarms/agents/create_agents_yaml.md index c8f89b93..980364b5 100644 --- a/docs/swarms/agents/create_agents_yaml.md +++ b/docs/swarms/agents/create_agents_yaml.md @@ -122,24 +122,13 @@ load_dotenv() yaml_file = "agents_multi_agent.yaml" -# Get the OpenAI API key from the environment variable -api_key = os.getenv("GROQ_API_KEY") - -# Model -model = OpenAIChat( - openai_api_base="https://api.groq.com/openai/v1", - openai_api_key=api_key, - model_name="llama-3.1-70b-versatile", - temperature=0.1, -) - try: - # Create agents and run tasks (using 'both' to return agents and task results) - task_results = create_agents_from_yaml( - model=model, yaml_file=yaml_file, return_type="run_swarm" - ) + # Create agents and run tasks (using 'both' to return agents and task results) + task_results = create_agents_from_yaml( + model=model, yaml_file=yaml_file, return_type="run_swarm" + ) - logger.info(f"Results from agents: {task_results}") + logger.info(f"Results from agents: {task_results}") except Exception as e: logger.error(f"An error occurred: {e}") diff --git a/docs/swarms/examples/groq.md b/docs/swarms/examples/groq.md index 1c23e775..e39ad359 100644 --- a/docs/swarms/examples/groq.md +++ b/docs/swarms/examples/groq.md @@ -9,8 +9,6 @@ ```python import os -from swarm_models import OpenAIChat - from swarms import Agent company = "NVDA" diff --git a/docs/swarms/structs/group_chat.md b/docs/swarms/structs/group_chat.md index ffd632af..8b24aad2 100644 --- a/docs/swarms/structs/group_chat.md +++ b/docs/swarms/structs/group_chat.md @@ -32,24 +32,6 @@ A production-grade multi-agent system enabling sophisticated group conversations | max_loops | int | 10 | Maximum conversation turns | -## Table of Contents - -- [Installation](#installation) -- [Core Concepts](#core-concepts) -- [Basic Usage](#basic-usage) -- [Advanced Configuration](#advanced-configuration) -- [Speaker Functions](#speaker-functions) -- [Response Models](#response-models) -- [Advanced Examples](#advanced-examples) -- [API Reference](#api-reference) -- [Best Practices](#best-practices) - -## Installation - -```bash -pip3 install swarms swarm-models loguru -``` - ## Core Concepts The GroupChat system consists of several key components: @@ -65,55 +47,23 @@ The GroupChat system consists of several key components: import os from dotenv import load_dotenv -from swarm_models import OpenAIChat from swarms import Agent, GroupChat, expertise_based - if __name__ == "__main__": - load_dotenv() - - # Get the OpenAI API key from the environment variable - api_key = os.getenv("OPENAI_API_KEY") - - # Create an instance of the OpenAIChat class - model = OpenAIChat( - openai_api_key=api_key, - model_name="gpt-4o-mini", - temperature=0.1, - ) - # Example agents agent1 = Agent( agent_name="Financial-Analysis-Agent", system_prompt="You are a financial analyst specializing in investment strategies.", - llm=model, + model_name="gpt-4.1", max_loops=1, - autosave=False, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - output_type="string", - streaming_on=False, ) agent2 = Agent( agent_name="Tax-Adviser-Agent", system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.", - llm=model, + model_name="gpt-4.1", max_loops=1, - autosave=False, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - output_type="string", - streaming_on=False, ) agents = [agent1, agent2] @@ -212,36 +162,6 @@ chat = GroupChat( ) ``` -## Response Models - -### Complete Schema - -```python -class AgentResponse(BaseModel): - """Individual agent response in a conversation turn""" - agent_name: str - role: str - message: str - timestamp: datetime = Field(default_factory=datetime.now) - turn_number: int - preceding_context: List[str] = Field(default_factory=list) - -class ChatTurn(BaseModel): - """Single turn in the conversation""" - turn_number: int - responses: List[AgentResponse] - task: str - timestamp: datetime = Field(default_factory=datetime.now) - -class ChatHistory(BaseModel): - """Complete conversation history""" - turns: List[ChatTurn] - total_messages: int - name: str - description: str - start_time: datetime = Field(default_factory=datetime.now) -``` - ## Advanced Examples ### Multi-Agent Analysis Team @@ -251,19 +171,19 @@ class ChatHistory(BaseModel): data_analyst = Agent( agent_name="Data-Analyst", system_prompt="You analyze numerical data and patterns", - llm=model + model_name="gpt-4.1", ) market_expert = Agent( agent_name="Market-Expert", system_prompt="You provide market insights and trends", - llm=model + model_name="gpt-4.1", ) strategy_advisor = Agent( agent_name="Strategy-Advisor", system_prompt="You formulate strategic recommendations", - llm=model + model_name="gpt-4.1", ) # Create analysis team @@ -308,29 +228,12 @@ for task, history in zip(tasks, histories): ## Best Practices -1. **Agent Design** - - Give agents clear, specific roles - - Use detailed system prompts - - Set appropriate context lengths - - Enable retries for reliability - -2. **Speaker Functions** - - Match function to use case - - Consider conversation flow - - Handle edge cases - - Add appropriate logging - -3. **Error Handling** - - Use try-except blocks - - Log errors appropriately - - Implement retry logic - - Provide fallback responses - -4. **Performance** - - Use concurrent processing for multiple tasks - - Monitor context lengths - - Implement proper cleanup - - Cache responses when appropriate +| Category | Recommendations | +|---------------------|--------------------------------------------------------------------------------------------------| +| **Agent Design** | - Give agents clear, specific roles
- Use detailed system prompts
- Set appropriate context lengths
- Enable retries for reliability | +| **Speaker Functions** | - Match function to use case
- Consider conversation flow
- Handle edge cases
- Add appropriate logging | +| **Error Handling** | - Use try-except blocks
- Log errors appropriately
- Implement retry logic
- Provide fallback responses | +| **Performance** | - Use concurrent processing for multiple tasks
- Monitor context lengths
- Implement proper cleanup
- Cache responses when appropriate | ## API Reference diff --git a/docs/swarms/structs/moa.md b/docs/swarms/structs/moa.md index 0a1af8bc..c153ab54 100644 --- a/docs/swarms/structs/moa.md +++ b/docs/swarms/structs/moa.md @@ -1,7 +1,5 @@ # MixtureOfAgents Class Documentation -## Architecture Overview - ```mermaid graph TD A[Input Task] --> B[Initialize MixtureOfAgents] @@ -26,7 +24,6 @@ graph TD end ``` -## Overview The `MixtureOfAgents` class represents a mixture of agents operating within a swarm. The workflow of the swarm follows a parallel → sequential → parallel → final output agent process. This implementation is inspired by concepts discussed in the paper: [https://arxiv.org/pdf/2406.04692](https://arxiv.org/pdf/2406.04692). @@ -73,45 +70,8 @@ class MixtureOfAgents(BaseSwarm): | `auto_save` | `bool` | Flag indicating whether to auto-save the metadata to a file. | `False` | | `saved_file_name`| `str` | The name of the file where the metadata will be saved. | `"moe_swarm.json"` | -### `agent_check` -```python -def agent_check(self): -``` -#### Description - -Checks if the provided `agents` attribute is a list of `Agent` instances. Raises a `TypeError` if the validation fails. - -#### Example Usage - -```python -moe_swarm = MixtureOfAgents(agents=[agent1, agent2]) -moe_swarm.agent_check() # Validates the agents -``` - -### `final_agent_check` - -```python -def final_agent_check(self): -``` - -#### Description - -Checks if the provided `final_agent` attribute is an instance of `Agent`. Raises a `TypeError` if the validation fails. - -#### Example Usage - -```python -moe_swarm = MixtureOfAgents(final_agent=final_agent) -moe_swarm.final_agent_check() # Validates the final agent -``` - -### `swarm_initialization` - -```python -def swarm_initialization(self): -``` #### Description @@ -280,48 +240,28 @@ For further reading and background information on the concepts used in the `Mixt ```python from swarms import MixtureOfAgents, Agent -from swarm_models import OpenAIChat - # Define agents director = Agent( agent_name="Director", system_prompt="Directs the tasks for the accountants", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="director.json", ) # Initialize accountant 1 accountant1 = Agent( agent_name="Accountant1", system_prompt="Prepares financial statements", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="accountant1.json", ) # Initialize accountant 2 accountant2 = Agent( agent_name="Accountant2", system_prompt="Audits financial records", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="accountant2.json", ) @@ -338,49 +278,28 @@ print(history) ```python from swarms import MixtureOfAgents, Agent -from swarm_models import OpenAIChat - # Define Agents -# Define agents director = Agent( agent_name="Director", system_prompt="Directs the tasks for the accountants", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="director.json", ) # Initialize accountant 1 accountant1 = Agent( agent_name="Accountant1", system_prompt="Prepares financial statements", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="accountant1.json", ) # Initialize accountant 2 accountant2 = Agent( agent_name="Accountant2", system_prompt="Audits financial records", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="accountant2.json", ) # Initialize the MixtureOfAgents with verbose output and auto-save enabled @@ -401,49 +320,30 @@ print(history) ```python from swarms import MixtureOfAgents, Agent -from swarm_models import OpenAIChat # Define agents # Initialize the director agent director = Agent( agent_name="Director", system_prompt="Directs the tasks for the accountants", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="director.json", ) # Initialize accountant 1 accountant1 = Agent( agent_name="Accountant1", system_prompt="Prepares financial statements", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="accountant1.json", ) # Initialize accountant 2 accountant2 = Agent( agent_name="Accountant2", system_prompt="Audits financial records", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="accountant2.json", ) # Initialize the MixtureOfAgents with custom rules and multiple layers @@ -468,11 +368,13 @@ The `MixtureOfAgents` class is a powerful and flexible framework for managing an ### Key Takeaways -1. **Flexible Initialization**: The class allows for customizable initialization with various parameters, enabling users to tailor the swarm's configuration to their specific needs. -2. **Robust Agent Management**: With built-in validation methods, the class ensures that all agents and the final agent are correctly instantiated, preventing runtime errors and facilitating smooth execution. -3. **Layered Processing**: The layered approach to processing allows for intermediate results to be iteratively refined, enhancing the overall output quality. -4. **Verbose Logging and Auto-Save**: These features aid in debugging, monitoring, and record-keeping, providing transparency and ease of management. -5. **Comprehensive Documentation**: The detailed class and method documentation, along with numerous usage examples, provide a clear and thorough understanding of how to leverage the `MixtureOfAgents` class effectively. +| Feature | Description | +|-----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **Flexible Initialization** | The class allows for customizable initialization with various parameters, enabling users to tailor the swarm's configuration to their specific needs. | +| **Robust Agent Management** | Built-in validation methods ensure that all agents and the final agent are correctly instantiated, preventing runtime errors and facilitating smooth execution. | +| **Layered Processing** | The layered approach to processing allows for intermediate results to be iteratively refined, enhancing the overall output quality. | +| **Verbose Logging and Auto-Save** | Features such as verbose logging and auto-save aid in debugging, monitoring, and record-keeping, providing transparency and ease of management. | +| **Comprehensive Documentation** | Detailed class and method documentation, along with numerous usage examples, provide a clear and thorough understanding of how to leverage the `MixtureOfAgents` class effectively. | ### Practical Applications @@ -499,46 +401,28 @@ In conclusion, the `MixtureOfAgents` class represents a versatile and efficient ```python from swarms import MixtureOfAgents, Agent -from swarm_models import OpenAIChat # Initialize agents as in previous examples director = Agent( agent_name="Director", system_prompt="Directs the tasks for the accountants", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="director.json", + ) accountant1 = Agent( agent_name="Accountant1", system_prompt="Prepares financial statements", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="accountant1.json", ) accountant2 = Agent( agent_name="Accountant2", system_prompt="Audits financial records", - llm=OpenAIChat(), + model_name="gpt-4.1", max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="accountant2.json", ) # Initialize MixtureOfAgents @@ -562,7 +446,6 @@ for task, result in zip(tasks, results): ```python from swarms import MixtureOfAgents, Agent -from swarm_models import OpenAIChat # Initialize agents as before # ... agent initialization code ... @@ -585,19 +468,3 @@ for task, result in zip(tasks, results): print(f"Task: {task}\nResult: {result}\n") ``` -## Advanced Features - -### Context Preservation - -The `MixtureOfAgents` class maintains context between iterations when running multiple loops. Each subsequent iteration receives the context from previous runs, allowing for more sophisticated and context-aware processing. - -### Asynchronous Processing - -The class implements asynchronous processing internally using Python's `asyncio`, enabling efficient handling of concurrent operations and improved performance for complex workflows. - -### Telemetry and Logging - -Built-in telemetry and logging capabilities help track agent performance and maintain detailed execution records: -- Automatic logging of agent outputs -- Structured data capture using Pydantic models -- JSON-formatted output options \ No newline at end of file diff --git a/docs/swarms/structs/round_robin_swarm.md b/docs/swarms/structs/round_robin_swarm.md index 33ad7e2b..0a8215ea 100644 --- a/docs/swarms/structs/round_robin_swarm.md +++ b/docs/swarms/structs/round_robin_swarm.md @@ -1,16 +1,12 @@ # RoundRobin: Round-Robin Task Execution in a Swarm -## Introduction - The `RoundRobinSwarm` class is designed to manage and execute tasks among multiple agents in a round-robin fashion. This approach ensures that each agent in a swarm receives an equal opportunity to execute tasks, which promotes fairness and efficiency in distributed systems. It is particularly useful in environments where collaborative, sequential task execution is needed among various agents. -## Conceptual Overview - -### What is Round-Robin? +## What is Round-Robin? Round-robin is a scheduling technique commonly used in computing for managing processes in shared systems. It involves assigning a fixed time slot to each process and cycling through all processes in a circular order without prioritization. In the context of swarms of agents, this method ensures equitable distribution of tasks and resource usage among all agents. -### Application in Swarms +## Application in Swarms In swarms, `RoundRobinSwarm` utilizes the round-robin scheduling to manage tasks among agents like software components, autonomous robots, or virtual entities. This strategy is beneficial where tasks are interdependent or require sequential processing. @@ -28,73 +24,57 @@ In swarms, `RoundRobinSwarm` utilizes the round-robin scheduling to manage tasks Initializes the swarm with the provided list of agents, verbosity setting, and operational parameters. **Parameters:** -- `agents`: Optional list of agents in the swarm. -- `verbose`: Boolean flag for detailed logging. -- `max_loops`: Maximum number of execution cycles. -- `callback`: Optional function called after each loop. +| Parameter | Type | Description | +|-------------|---------------------|-----------------------------------------------------| +| agents | List[Agent], optional | List of agents in the swarm. | +| verbose | bool | Boolean flag for detailed logging. | +| max_loops | int | Maximum number of execution cycles. | +| callback | Callable, optional | Function called after each loop. | ### `run` Executes a specified task across all agents in a round-robin manner, cycling through each agent repeatedly for the number of specified loops. **Conceptual Behavior:** -- Distribute the task sequentially among all agents starting from the current index. -- Each agent processes the task and potentially modifies it or produces new output. -- After an agent completes its part of the task, the index moves to the next agent. -- This cycle continues until the specified maximum number of loops is completed. -- Optionally, a callback function can be invoked after each loop to handle intermediate results or perform additional actions. + +| Step | Description | +|------|-------------| +| 1 | Distribute the task sequentially among all agents starting from the current index. | +| 2 | Each agent processes the task and potentially modifies it or produces new output. | +| 3 | After an agent completes its part of the task, the index moves to the next agent. | +| 4 | This cycle continues until the specified maximum number of loops is completed. | +| 5 | Optionally, a callback function can be invoked after each loop to handle intermediate results or perform additional actions. | ## Examples -### Example 1: Load Balancing Among Servers In this example, `RoundRobinSwarm` is used to distribute network requests evenly among a group of servers. This is common in scenarios where load balancing is crucial for maintaining system responsiveness and scalability. ```python from swarms import Agent, RoundRobinSwarm -from swarm_models import OpenAIChat - - -# Initialize the LLM -llm = OpenAIChat() # Define sales agents sales_agent1 = Agent( agent_name="Sales Agent 1 - Automation Specialist", system_prompt="You're Sales Agent 1, your purpose is to generate sales for a company by focusing on the benefits of automating accounting processes!", agent_description="Generate sales by focusing on the benefits of automation!", - llm=llm, + model_name="gpt-4.1", max_loops=1, - autosave=True, - dashboard=False, - verbose=True, - streaming_on=True, - context_length=1000, ) sales_agent2 = Agent( agent_name="Sales Agent 2 - Cost Saving Specialist", system_prompt="You're Sales Agent 2, your purpose is to generate sales for a company by emphasizing the cost savings of using swarms of agents!", agent_description="Generate sales by emphasizing cost savings!", - llm=llm, + model_name="gpt-4.1", max_loops=1, - autosave=True, - dashboard=False, - verbose=True, - streaming_on=True, - context_length=1000, ) sales_agent3 = Agent( agent_name="Sales Agent 3 - Efficiency Specialist", system_prompt="You're Sales Agent 3, your purpose is to generate sales for a company by highlighting the efficiency and accuracy of our swarms of agents in accounting processes!", agent_description="Generate sales by highlighting efficiency and accuracy!", - llm=llm, + model_name="gpt-4.1", max_loops=1, - autosave=True, - dashboard=False, - verbose=True, - streaming_on=True, - context_length=1000, ) # Initialize the swarm with sales agents @@ -103,14 +83,11 @@ sales_swarm = RoundRobinSwarm(agents=[sales_agent1, sales_agent2, sales_agent3], # Define a sales task task = "Generate a sales email for an accountant firm executive to sell swarms of agents to automate their accounting processes." -# Distribute sales tasks to different agents -for _ in range(5): # Repeat the task 5 times - results = sales_swarm.run(task) - print("Sales generated:", results) +out = sales_swarm.run(task) +print(out) ``` - ## Conclusion The RoundRobinSwarm class provides a robust and flexible framework for managing tasks among multiple agents in a fair and efficient manner. This class is especially useful in environments where tasks need to be distributed evenly among a group of agents, ensuring that all tasks are handled timely and effectively. Through the round-robin algorithm, each agent in the swarm is guaranteed an equal opportunity to contribute to the overall task, promoting efficiency and collaboration. diff --git a/docs/swarms/structs/swarm_router.md b/docs/swarms/structs/swarm_router.md index fcf467bb..1edccf6b 100644 --- a/docs/swarms/structs/swarm_router.md +++ b/docs/swarms/structs/swarm_router.md @@ -122,20 +122,6 @@ pip install swarms swarm_models import os from dotenv import load_dotenv from swarms import Agent, SwarmRouter, SwarmType -from swarm_models import OpenAIChat - -load_dotenv() - -# Get the OpenAI API key from the environment variable -api_key = os.getenv("GROQ_API_KEY") - -# Model -model = OpenAIChat( - openai_api_base="https://api.groq.com/openai/v1", - openai_api_key=api_key, - model_name="llama-3.1-70b-versatile", - temperature=0.1, -) # Define specialized system prompts for each agent DATA_EXTRACTOR_PROMPT = """You are a highly specialized private equity agent focused on data extraction from various documents. Your expertise includes: @@ -158,31 +144,15 @@ Deliver clear, concise summaries that capture the essence of various documents w data_extractor_agent = Agent( agent_name="Data-Extractor", system_prompt=DATA_EXTRACTOR_PROMPT, - llm=model, + model_name="gpt-4.1", max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="data_extractor_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", ) summarizer_agent = Agent( agent_name="Document-Summarizer", system_prompt=SUMMARIZER_PROMPT, - llm=model, + model_name="gpt-4.1", max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="summarizer_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", ) # Initialize the SwarmRouter @@ -192,8 +162,6 @@ router = SwarmRouter( max_loops=1, agents=[data_extractor_agent, summarizer_agent], swarm_type="ConcurrentWorkflow", - autosave=True, - return_json=True, ) # Example usage @@ -203,10 +171,6 @@ if __name__ == "__main__": "Where is the best place to find template term sheets for series A startups? Provide links and references" ) print(result) - - # Retrieve and print logs - for log in router.get_logs(): - print(f"{log.timestamp} - {log.level}: {log.message}") ``` ## Advanced Usage @@ -243,40 +207,6 @@ auto_router = SwarmRouter( result = auto_router.run("Analyze and summarize the quarterly financial report") ``` -### Loading Agents from CSV - -To load agents from a CSV file: - -```python -csv_router = SwarmRouter( - name="CSVAgentRouter", - load_agents_from_csv=True, - csv_file_path="agents.csv", - swarm_type="SequentialWorkflow" -) - -result = csv_router.run("Process the client data") -``` - -### Using Shared Memory System - -To enable shared memory across agents: - -```python -from swarms.memory import SemanticMemory - -memory_system = SemanticMemory() - -memory_router = SwarmRouter( - name="MemoryRouter", - agents=[agent1, agent2], - shared_memory_system=memory_system, - swarm_type="SequentialWorkflow" -) - -result = memory_router.run("Analyze historical data and make predictions") -``` - ### Injecting Rules to All Agents To inject common rules into all agents: @@ -454,6 +384,7 @@ result = voting_router.run("Should we invest in Company X based on the available ``` ### Auto Select (Experimental) + Autonomously selects the right swarm by conducting vector search on your input task or name or description or all 3. ```python @@ -551,18 +482,3 @@ router = SwarmRouter( result = router("Analyze the market data") # Equivalent to router.run("Analyze the market data") ``` - -### Using the swarm_router Function - -For quick one-off tasks, you can use the swarm_router function: - -```python -from swarms import swarm_router - -result = swarm_router( - name="QuickRouter", - agents=[agent1, agent2], - swarm_type="ConcurrentWorkflow", - task="Analyze the quarterly report" -) -``` diff --git a/swarms/structs/round_robin.py b/swarms/structs/round_robin.py index 19198d3d..21261b3b 100644 --- a/swarms/structs/round_robin.py +++ b/swarms/structs/round_robin.py @@ -1,12 +1,13 @@ import random -from swarms.structs.base_swarm import BaseSwarm -from typing import List -from swarms.structs.agent import Agent -from pydantic import BaseModel, Field -from typing import Optional from datetime import datetime -from swarms.schemas.agent_step_schemas import ManySteps +from typing import List, Optional + import tenacity +from pydantic import BaseModel, Field + +from swarms.schemas.agent_step_schemas import ManySteps +from swarms.structs.agent import Agent +from swarms.structs.base_swarm import BaseSwarm from swarms.utils.loguru_logger import initialize_logger logger = initialize_logger("round-robin") diff --git a/swarms/structs/swarm_id_generator.py b/swarms/structs/swarm_id_generator.py deleted file mode 100644 index aeaa5999..00000000 --- a/swarms/structs/swarm_id_generator.py +++ /dev/null @@ -1,5 +0,0 @@ -import uuid - - -def generate_swarm_id(): - return f"swarm-{uuid.uuid4().hex}" diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py index b612bfaf..e2d53aaa 100644 --- a/swarms/utils/formatter.py +++ b/swarms/utils/formatter.py @@ -16,7 +16,6 @@ from rich.text import Text from rich.spinner import Spinner from rich.markdown import Markdown -from rich.syntax import Syntax # Global lock to ensure only a single Rich Live context is active at any moment. # Rich's Live render is **not** thread-safe; concurrent Live contexts on the same @@ -34,86 +33,106 @@ spinner = Spinner("dots", style="yellow") class MarkdownOutputHandler: """Custom output handler to render content as markdown with simplified syntax highlighting""" - + def __init__(self, console: "Console"): self.console = console - + def _clean_output(self, output: str) -> str: """Clean up the output for better markdown rendering""" if not output: return "" - + # Remove log prefixes and timestamps - output = re.sub(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \| INFO.*?\|.*?\|', '', output) - output = re.sub(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \| DEBUG.*?\|.*?\|', '', output) - output = re.sub(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \| WARNING.*?\|.*?\|', '', output) - output = re.sub(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \| ERROR.*?\|.*?\|', '', output) - + output = re.sub( + r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \| INFO.*?\|.*?\|", + "", + output, + ) + output = re.sub( + r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \| DEBUG.*?\|.*?\|", + "", + output, + ) + output = re.sub( + r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \| WARNING.*?\|.*?\|", + "", + output, + ) + output = re.sub( + r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} \| ERROR.*?\|.*?\|", + "", + output, + ) + # Remove spinner characters and progress indicators - output = re.sub(r'[⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏]', '', output) - output = re.sub(r'⠋ Processing\.\.\.', '', output) - output = re.sub(r'⠙ Processing\.\.\.', '', output) - output = re.sub(r'⠹ Processing\.\.\.', '', output) - output = re.sub(r'⠸ Processing\.\.\.', '', output) - output = re.sub(r'⠼ Processing\.\.\.', '', output) - output = re.sub(r'⠴ Processing\.\.\.', '', output) - output = re.sub(r'⠦ Processing\.\.\.', '', output) - output = re.sub(r'⠧ Processing\.\.\.', '', output) - output = re.sub(r'⠇ Processing\.\.\.', '', output) - output = re.sub(r'⠏ Processing\.\.\.', '', output) - + output = re.sub(r"[⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏]", "", output) + output = re.sub(r"⠋ Processing\.\.\.", "", output) + output = re.sub(r"⠙ Processing\.\.\.", "", output) + output = re.sub(r"⠹ Processing\.\.\.", "", output) + output = re.sub(r"⠸ Processing\.\.\.", "", output) + output = re.sub(r"⠼ Processing\.\.\.", "", output) + output = re.sub(r"⠴ Processing\.\.\.", "", output) + output = re.sub(r"⠦ Processing\.\.\.", "", output) + output = re.sub(r"⠧ Processing\.\.\.", "", output) + output = re.sub(r"⠇ Processing\.\.\.", "", output) + output = re.sub(r"⠏ Processing\.\.\.", "", output) + # Remove loop indicators - output = re.sub(r'⠋ Loop \d+/\d+', '', output) - output = re.sub(r'⠙ Loop \d+/\d+', '', output) - output = re.sub(r'⠹ Loop \d+/\d+', '', output) - output = re.sub(r'⠸ Loop \d+/\d+', '', output) - output = re.sub(r'⠼ Loop \d+/\d+', '', output) - output = re.sub(r'⠴ Loop \d+/\d+', '', output) - output = re.sub(r'⠦ Loop \d+/\d+', '', output) - output = re.sub(r'⠧ Loop \d+/\d+', '', output) - output = re.sub(r'⠇ Loop \d+/\d+', '', output) - output = re.sub(r'⠏ Loop \d+/\d+', '', output) - + output = re.sub(r"⠋ Loop \d+/\d+", "", output) + output = re.sub(r"⠙ Loop \d+/\d+", "", output) + output = re.sub(r"⠹ Loop \d+/\d+", "", output) + output = re.sub(r"⠸ Loop \d+/\d+", "", output) + output = re.sub(r"⠼ Loop \d+/\d+", "", output) + output = re.sub(r"⠴ Loop \d+/\d+", "", output) + output = re.sub(r"⠦ Loop \d+/\d+", "", output) + output = re.sub(r"⠧ Loop \d+/\d+", "", output) + output = re.sub(r"⠇ Loop \d+/\d+", "", output) + output = re.sub(r"⠏ Loop \d+/\d+", "", output) + # Remove any remaining log messages - output = re.sub(r'INFO.*?\|.*?\|.*?\|', '', output) - output = re.sub(r'DEBUG.*?\|.*?\|.*?\|', '', output) - output = re.sub(r'WARNING.*?\|.*?\|.*?\|', '', output) - output = re.sub(r'ERROR.*?\|.*?\|.*?\|', '', output) - + output = re.sub(r"INFO.*?\|.*?\|.*?\|", "", output) + output = re.sub(r"DEBUG.*?\|.*?\|.*?\|", "", output) + output = re.sub(r"WARNING.*?\|.*?\|.*?\|", "", output) + output = re.sub(r"ERROR.*?\|.*?\|.*?\|", "", output) + # Clean up extra whitespace and empty lines - output = re.sub(r'\n\s*\n\s*\n', '\n\n', output) - output = re.sub(r'^\s+', '', output, flags=re.MULTILINE) - output = re.sub(r'\s+$', '', output, flags=re.MULTILINE) - + output = re.sub(r"\n\s*\n\s*\n", "\n\n", output) + output = re.sub(r"^\s+", "", output, flags=re.MULTILINE) + output = re.sub(r"\s+$", "", output, flags=re.MULTILINE) + # Remove any remaining plaintext artifacts - output = re.sub(r'Generated content:', '', output) - output = re.sub(r'Evaluation result:', '', output) - output = re.sub(r'Refined content:', '', output) - + output = re.sub(r"Generated content:", "", output) + output = re.sub(r"Evaluation result:", "", output) + output = re.sub(r"Refined content:", "", output) + # Ensure proper markdown formatting - if not output.strip().startswith('#'): + if not output.strip().startswith("#"): # If no headers, add some structure - lines = output.strip().split('\n') + lines = output.strip().split("\n") if len(lines) > 0: # Add a header for the first meaningful line first_line = lines[0].strip() - if first_line and not first_line.startswith('**'): - output = f"## {first_line}\n\n" + '\n'.join(lines[1:]) - + if first_line and not first_line.startswith("**"): + output = f"## {first_line}\n\n" + "\n".join( + lines[1:] + ) + return output.strip() - - def render_with_simple_syntax_highlighting(self, content: str) -> list: + + def render_with_simple_syntax_highlighting( + self, content: str + ) -> list: """Render content with simplified syntax highlighting for code blocks""" # For now, let's just render everything as markdown to ensure it works # We can add code block detection back later if needed - return [('markdown', content)] - + return [("markdown", content)] + def render_content_parts(self, parts: list) -> list: """Render different content parts with appropriate formatting""" rendered_parts = [] - + for part in parts: - if part[0] == 'markdown': + if part[0] == "markdown": # Render markdown try: md = Markdown(part[1]) @@ -121,45 +140,57 @@ class MarkdownOutputHandler: except Exception: # Fallback to plain text rendered_parts.append(Text(part[1])) - - elif part[0] == 'code': + + elif part[0] == "code": # Code is already rendered as Syntax or Text object rendered_parts.append(part[1]) - + return rendered_parts - - def render_markdown_output(self, content: str, title: str = "", border_style: str = "blue"): + + def render_markdown_output( + self, + content: str, + title: str = "", + border_style: str = "blue", + ): """Render content as markdown with syntax highlighting""" if not content or content.strip() == "": return - + # Clean up the output cleaned_content = self._clean_output(content) - + # Render with syntax highlighting try: # Split content into parts (markdown and code blocks) - parts = self.render_with_simple_syntax_highlighting(cleaned_content) - + parts = self.render_with_simple_syntax_highlighting( + cleaned_content + ) + # Render each part appropriately rendered_parts = self.render_content_parts(parts) - + # Create a group of rendered parts from rich.console import Group + content_group = Group(*rendered_parts) - - self.console.print(Panel( - content_group, - title=title, - border_style=border_style - )) - except Exception as e: + + self.console.print( + Panel( + content_group, + title=title, + border_style=border_style, + ) + ) + except Exception: # Fallback to plain text if rendering fails - self.console.print(Panel( - cleaned_content, - title=title, - border_style="yellow" - )) + self.console.print( + Panel( + cleaned_content, + title=title, + border_style="yellow", + ) + ) def choose_random_color(): @@ -206,9 +237,11 @@ class Formatter: "⠏", ] self._spinner_idx = 0 - + # Set markdown capability based on user preference - self.markdown_handler = MarkdownOutputHandler(self.console) if md else None + self.markdown_handler = ( + MarkdownOutputHandler(self.console) if md else None + ) def _get_status_with_loading(self, status: str) -> Text: """ @@ -284,7 +317,9 @@ class Formatter: # Use markdown rendering if enabled if self.markdown_handler: - self.markdown_handler.render_markdown_output(content, title, style) + self.markdown_handler.render_markdown_output( + content, title, style + ) else: # Fallback to original panel printing try: @@ -293,7 +328,7 @@ class Formatter: # Fallback to basic printing if panel fails print(f"\n{title}:") print(content) - + def print_markdown( self, content: str, @@ -308,7 +343,9 @@ class Formatter: border_style (str): The border style for the panel """ if self.markdown_handler: - self.markdown_handler.render_markdown_output(content, title, border_style) + self.markdown_handler.render_markdown_output( + content, title, border_style + ) else: # Fallback to regular panel if markdown is disabled self.print_panel(content, title, border_style) diff --git a/tests/utils/test_md_output.py b/tests/utils/test_md_output.py index 814c1323..d1693739 100644 --- a/tests/utils/test_md_output.py +++ b/tests/utils/test_md_output.py @@ -4,30 +4,32 @@ Test script demonstrating markdown output functionality with a real swarm Uses the current state of formatter.py to show agent markdown output capabilities """ -import sys import os -import asyncio -from typing import List, Dict, Any from dotenv import load_dotenv # Load environment variables load_dotenv() from swarms import Agent -from swarms.structs import SequentialWorkflow, ConcurrentWorkflow, GroupChat +from swarms.structs import ( + SequentialWorkflow, + ConcurrentWorkflow, + GroupChat, +) from swarms.utils.formatter import Formatter + class MarkdownTestSwarm: """A test swarm that demonstrates markdown output capabilities""" - + def __init__(self): self.formatter = Formatter(markdown=True) self.setup_agents() self.setup_swarm() - + def setup_agents(self): """Setup specialized agents for markdown testing""" - + # Research Agent - Generates structured markdown reports self.research_agent = Agent( agent_name="Research Agent", @@ -45,9 +47,9 @@ class MarkdownTestSwarm: max_tokens=4000, max_loops=1, context_length=8000, # Limit context to prevent overflow - return_history=False # Don't return history to reduce context + return_history=False, # Don't return history to reduce context ) - + # Code Analysis Agent - Generates code-heavy markdown self.code_agent = Agent( agent_name="Code Analysis Agent", @@ -64,9 +66,9 @@ class MarkdownTestSwarm: max_tokens=4000, max_loops=1, context_length=8000, # Limit context to prevent overflow - return_history=False # Don't return history to reduce context + return_history=False, # Don't return history to reduce context ) - + # Data Visualization Agent - Creates data-focused markdown self.data_agent = Agent( agent_name="Data Visualization Agent", @@ -83,65 +85,75 @@ class MarkdownTestSwarm: max_tokens=4000, max_loops=1, context_length=8000, # Limit context to prevent overflow - return_history=False # Don't return history to reduce context + return_history=False, # Don't return history to reduce context ) - + def setup_swarm(self): """Setup the swarm with the agents""" # Create different swarm types for testing self.sequential_swarm = SequentialWorkflow( name="Markdown Test Sequential", description="Sequential workflow for markdown testing", - agents=[self.research_agent, self.code_agent, self.data_agent], - max_loops=1 # Reduce loops to prevent context overflow + agents=[ + self.research_agent, + self.code_agent, + self.data_agent, + ], + max_loops=1, # Reduce loops to prevent context overflow ) - + self.concurrent_swarm = ConcurrentWorkflow( name="Markdown Test Concurrent", description="Concurrent workflow for markdown testing", - agents=[self.research_agent, self.code_agent, self.data_agent], - max_loops=1 # Reduce loops to prevent context overflow + agents=[ + self.research_agent, + self.code_agent, + self.data_agent, + ], + max_loops=1, # Reduce loops to prevent context overflow ) - + self.groupchat_swarm = GroupChat( name="Markdown Test Group Chat", description="A group chat for testing markdown output", - agents=[self.research_agent, self.code_agent, self.data_agent], - max_loops=1 # Reduce loops to prevent context overflow + agents=[ + self.research_agent, + self.code_agent, + self.data_agent, + ], + max_loops=1, # Reduce loops to prevent context overflow ) - + # Default swarm for main tests self.swarm = self.sequential_swarm - + def test_basic_markdown_output(self): """Test basic markdown output with a simple topic""" - print("\n" + "="*60) + print("\n" + "=" * 60) print("TEST 1: Basic Markdown Output") - print("="*60) - + print("=" * 60) + topic = "Python Web Development with FastAPI" - + self.formatter.print_panel( f"Starting research on: {topic}", title="Research Topic", - style="bold blue" + style="bold blue", ) - + # Run the research agent result = self.research_agent.run(topic) - + self.formatter.print_markdown( - result, - title="Research Report", - border_style="green" + result, title="Research Report", border_style="green" ) - + def test_code_analysis_markdown(self): """Test markdown output with code analysis""" - print("\n" + "="*60) + print("\n" + "=" * 60) print("TEST 2: Code Analysis Markdown") - print("="*60) - + print("=" * 60) + code_sample = """ def fibonacci(n): if n <= 1: @@ -152,28 +164,30 @@ def fibonacci(n): for i in range(10): print(fibonacci(i)) """ - + self.formatter.print_panel( "Analyzing Python code sample", title="Code Analysis", - style="bold cyan" + style="bold cyan", ) - + # Run the code analysis agent - result = self.code_agent.run(f"Analyze this Python code and provide improvements:\n\n{code_sample}") - + result = self.code_agent.run( + f"Analyze this Python code and provide improvements:\n\n{code_sample}" + ) + self.formatter.print_markdown( result, title="Code Analysis Report", - border_style="yellow" + border_style="yellow", ) - + def test_data_analysis_markdown(self): """Test markdown output with data analysis""" - print("\n" + "="*60) + print("\n" + "=" * 60) print("TEST 3: Data Analysis Markdown") - print("="*60) - + print("=" * 60) + data_request = """ Analyze the following dataset: - Sales: $1.2M (Q1), $1.5M (Q2), $1.8M (Q3), $2.1M (Q4) @@ -182,28 +196,26 @@ for i in range(10): Provide insights and recommendations in markdown format. """ - + self.formatter.print_panel( "Analyzing quarterly business data", title="Data Analysis", - style="bold magenta" + style="bold magenta", ) - + # Run the data analysis agent result = self.data_agent.run(data_request) - + self.formatter.print_markdown( - result, - title="Data Analysis Report", - border_style="red" + result, title="Data Analysis Report", border_style="red" ) - + def test_swarm_collaboration_markdown(self): """Test markdown output with swarm collaboration""" - print("\n" + "="*60) + print("\n" + "=" * 60) print("TEST 4: Swarm Collaboration Markdown") - print("="*60) - + print("=" * 60) + complex_topic = """ Create a comprehensive guide on building a machine learning pipeline that includes: 1. Data preprocessing techniques @@ -213,39 +225,39 @@ for i in range(10): Each agent should contribute their expertise and the final output should be well-formatted markdown. """ - + self.formatter.print_panel( "Swarm collaboration on ML pipeline guide", title="Swarm Task", - style="bold green" + style="bold green", ) - + # Run the swarm results = self.swarm.run(complex_topic) - + # Display individual agent results # SequentialWorkflow returns a list of results, not a dict for i, result in enumerate(results, 1): agent_name = f"Agent {i}" - + # Handle different result types if isinstance(result, dict): # Extract the output from dict result - result_content = result.get('output', str(result)) + result_content = result.get("output", str(result)) else: result_content = str(result) self.formatter.print_markdown( result_content, title=f"Agent {i}: {agent_name}", - border_style="blue" + border_style="blue", ) - + def test_markdown_toggle_functionality(self): """Test the markdown enable/disable functionality""" - print("\n" + "="*60) + print("\n" + "=" * 60) print("TEST 5: Markdown Toggle Functionality") - print("="*60) - + print("=" * 60) + test_content = """ # Test Content @@ -262,110 +274,116 @@ def test_function(): - Item 2 - Item 3 """ - + # Test with markdown enabled self.formatter.print_panel( "Testing with markdown ENABLED", title="Markdown Enabled", - style="bold green" + style="bold green", ) self.formatter.print_markdown(test_content, "Markdown Output") - + # Disable markdown self.formatter.disable_markdown() self.formatter.print_panel( "Testing with markdown DISABLED", title="Markdown Disabled", - style="bold red" + style="bold red", ) self.formatter.print_panel(test_content, "Plain Text Output") - + # Re-enable markdown self.formatter.enable_markdown() self.formatter.print_panel( "Testing with markdown RE-ENABLED", title="Markdown Re-enabled", - style="bold blue" + style="bold blue", + ) + self.formatter.print_markdown( + test_content, "Markdown Output Again" ) - self.formatter.print_markdown(test_content, "Markdown Output Again") - + def test_different_swarm_types(self): """Test markdown output with different swarm types""" - print("\n" + "="*60) + print("\n" + "=" * 60) print("TEST 6: Different Swarm Types") - print("="*60) - - simple_topic = "Explain the benefits of using Python for data science" - + print("=" * 60) + + simple_topic = ( + "Explain the benefits of using Python for data science" + ) + # Test Sequential Workflow print("\n--- Sequential Workflow ---") self.formatter.print_panel( "Testing Sequential Workflow (agents work in sequence)", title="Swarm Type Test", - style="bold blue" + style="bold blue", ) sequential_results = self.sequential_swarm.run(simple_topic) for i, result in enumerate(sequential_results, 1): # Handle different result types if isinstance(result, dict): - result_content = result.get('output', str(result)) + result_content = result.get("output", str(result)) else: result_content = str(result) - + self.formatter.print_markdown( result_content, title=f"Sequential Agent {i}", - border_style="blue" + border_style="blue", ) - + # Test Concurrent Workflow print("\n--- Concurrent Workflow ---") self.formatter.print_panel( "Testing Concurrent Workflow (agents work in parallel)", title="Swarm Type Test", - style="bold green" + style="bold green", ) concurrent_results = self.concurrent_swarm.run(simple_topic) for i, result in enumerate(concurrent_results, 1): # Handle different result types if isinstance(result, dict): - result_content = result.get('output', str(result)) + result_content = result.get("output", str(result)) else: result_content = str(result) - + self.formatter.print_markdown( result_content, title=f"Concurrent Agent {i}", - border_style="green" + border_style="green", ) - + # Test Group Chat print("\n--- Group Chat ---") self.formatter.print_panel( "Testing Group Chat (agents collaborate in conversation)", title="Swarm Type Test", - style="bold magenta" + style="bold magenta", ) groupchat_results = self.groupchat_swarm.run(simple_topic) - + # Handle different result types for GroupChat if isinstance(groupchat_results, dict): - result_content = groupchat_results.get('output', str(groupchat_results)) + result_content = groupchat_results.get( + "output", str(groupchat_results) + ) else: result_content = str(groupchat_results) - + self.formatter.print_markdown( result_content, title="Group Chat Result", - border_style="magenta" + border_style="magenta", ) - + def test_simple_formatter_only(self): """Test just the formatter functionality without agents""" - print("\n" + "="*60) + print("\n" + "=" * 60) print("TEST 7: Simple Formatter Test (No Agents)") - print("="*60) - + print("=" * 60) + # Test basic markdown rendering simple_markdown = """ # Simple Test @@ -384,34 +402,34 @@ def hello_world(): - Item 2 - Item 3 """ - + self.formatter.print_panel( "Testing formatter without agents", title="Formatter Test", - style="bold cyan" + style="bold cyan", ) - + self.formatter.print_markdown( simple_markdown, title="Simple Markdown Test", - border_style="green" + border_style="green", ) - + # Test toggle functionality self.formatter.disable_markdown() self.formatter.print_panel( "Markdown disabled - this should be plain text", title="Plain Text Test", - style="bold red" + style="bold red", ) self.formatter.enable_markdown() - + def test_error_handling_markdown(self): """Test markdown output with error handling""" - print("\n" + "="*60) + print("\n" + "=" * 60) print("TEST 8: Error Handling in Markdown") - print("="*60) - + print("=" * 60) + # Test with malformed markdown malformed_content = """ # Incomplete header @@ -420,87 +438,95 @@ def hello_world(): def incomplete_code(): # Missing closing backticks """ - + self.formatter.print_panel( "Testing error handling with malformed markdown", title="Error Handling Test", - style="bold yellow" + style="bold yellow", ) - + # This should handle the error gracefully self.formatter.print_markdown( malformed_content, title="Malformed Markdown Test", - border_style="yellow" + border_style="yellow", ) - + # Test with empty content self.formatter.print_markdown( - "", - title="Empty Content Test", - border_style="cyan" + "", title="Empty Content Test", border_style="cyan" ) - + # Test with None content self.formatter.print_markdown( - None, - title="None Content Test", - border_style="magenta" + None, title="None Content Test", border_style="magenta" ) - + def run_all_tests(self): """Run all markdown output tests""" print(" Starting Swarm Markdown Output Tests") - print("="*60) - + print("=" * 60) + try: # Test 1: Basic markdown output self.test_basic_markdown_output() - + # Test 2: Code analysis markdown self.test_code_analysis_markdown() - + # Test 3: Data analysis markdown self.test_data_analysis_markdown() - + # Test 4: Swarm collaboration self.test_swarm_collaboration_markdown() - + # Test 5: Markdown toggle functionality self.test_markdown_toggle_functionality() - + # Test 6: Different swarm types self.test_different_swarm_types() - + # Test 7: Simple formatter test (no agents) self.test_simple_formatter_only() - + # Test 8: Error handling self.test_error_handling_markdown() - - print("\n" + "="*60) + + print("\n" + "=" * 60) print(" All tests completed successfully!") - print("="*60) - + print("=" * 60) + except Exception as e: print(f"\n Test failed with error: {str(e)}") import traceback + traceback.print_exc() + def main(): """Main function to run the markdown output tests""" print("Swarms Markdown Output Test Suite") - print("Testing the current state of formatter.py with real swarm agents") - print("="*60) - + print( + "Testing the current state of formatter.py with real swarm agents" + ) + print("=" * 60) + # Check environment setup - api_key = os.getenv("OPENAI_API_KEY") or os.getenv("SWARMS_API_KEY") + api_key = os.getenv("OPENAI_API_KEY") or os.getenv( + "SWARMS_API_KEY" + ) if not api_key: - print("⚠ Warning: No API key found. Please set OPENAI_API_KEY or SWARMS_API_KEY environment variable.") - print(" You can create a .env file with: OPENAI_API_KEY=your_api_key_here") - print(" Or set it in your environment: export OPENAI_API_KEY=your_api_key_here") + print( + "⚠ Warning: No API key found. Please set OPENAI_API_KEY or SWARMS_API_KEY environment variable." + ) + print( + " You can create a .env file with: OPENAI_API_KEY=your_api_key_here" + ) + print( + " Or set it in your environment: export OPENAI_API_KEY=your_api_key_here" + ) print() - + try: # Create and run the test swarm test_swarm = MarkdownTestSwarm() @@ -508,12 +534,16 @@ def main(): except Exception as e: print(f"\n Test failed with error: {str(e)}") print("\n Troubleshooting tips:") - print("1. Make sure you have set your API key (OPENAI_API_KEY or SWARMS_API_KEY)") + print( + "1. Make sure you have set your API key (OPENAI_API_KEY or SWARMS_API_KEY)" + ) print("2. Check your internet connection") print("3. Verify you have sufficient API credits") print("4. Try running with a simpler test first") import traceback + traceback.print_exc() + if __name__ == "__main__": - main() + main()