From b6079f371476229912a887a11da19709db7e0975 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:13:51 +0000 Subject: [PATCH 01/19] Update pymdown-extensions requirement from ~=10.11 to ~=10.12 Updates the requirements on [pymdown-extensions](https://github.com/facelessuser/pymdown-extensions) to permit the latest version. - [Release notes](https://github.com/facelessuser/pymdown-extensions/releases) - [Commits](https://github.com/facelessuser/pymdown-extensions/compare/10.11...10.12) --- updated-dependencies: - dependency-name: pymdown-extensions dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 008bedf1..121e0475 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -26,7 +26,7 @@ jinja2~=3.1 markdown~=3.7 mkdocs-material-extensions~=1.3 pygments~=2.18 -pymdown-extensions~=10.11 +pymdown-extensions~=10.12 # Requirements for plugins babel~=2.16 From f5c031bdb9d6c95ac2666b02ad00acc9f436be68 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 10:34:25 +0000 Subject: [PATCH 02/19] Update ruff requirement from >=0.5.1,<0.6.10 to >=0.5.1,<0.7.4 --- updated-dependencies: - dependency-name: ruff dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 24cd0922..9eec0bbf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -85,7 +85,7 @@ swarms = "swarms.cli.main:main" [tool.poetry.group.lint.dependencies] black = ">=23.1,<25.0" -ruff = ">=0.5.1,<0.6.10" +ruff = ">=0.5.1,<0.7.4" types-toml = "^0.10.8.1" types-pytz = ">=2023.3,<2025.0" types-chardet = "^5.0.4.6" From 59da933310b215311575b597a33c78a847aab56f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 10:55:53 +0000 Subject: [PATCH 03/19] Update fastapi requirement from ^0.110.1 to >=0.110.1,<0.116.0 Updates the requirements on [fastapi](https://github.com/fastapi/fastapi) to permit the latest version. - [Release notes](https://github.com/fastapi/fastapi/releases) - [Commits](https://github.com/fastapi/fastapi/compare/0.110.1...0.115.4) --- updated-dependencies: - dependency-name: fastapi dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 24cd0922..be6e2ba9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,7 @@ mypy-protobuf = "^3.0.0" pytest = "^8.1.1" termcolor = "^2.4.0" pandas = "^2.2.2" -fastapi = "^0.110.1" +fastapi = ">=0.110.1,<0.116.0" [tool.ruff] line-length = 70 From 8e374b15bed69a385395f098419f682b6e654bee Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 16 Nov 2024 11:40:54 -0800 Subject: [PATCH 04/19] [6.0.9] --- agent_showcase_example 2.py | 68 +++ auto_swarm_router.py | 1 - concurrent_mix 2.py | 99 +++ docs/swarms/changelog/6_0_0 2.md | 59 ++ docs/swarms/structs/group_chat.md | 363 ++++++----- persistent_legal_agent.py | 113 ++++ rag_examples/agent_with_rag.py | 44 -- rag_examples/agent_with_rag_and_tools.py | 117 ---- real_estate_agent.py | 319 ++++++++++ rearrange_test 2.py | 119 ++++ scripts/docs/create_llm_file_for_docs.sh | 52 ++ sequential_worflow_test 2.py | 117 ++++ swarms/cli/parse_yaml.py | 120 ---- swarms/schemas/plan.py | 10 - swarms/structs/__init__.py | 2 - swarms/structs/agent.py | 22 +- .../structs/{agent_rag.py => agent_router.py} | 0 swarms/structs/agents_available 2.py | 93 +++ swarms/structs/auto_agent_generator.py | 3 - swarms/structs/auto_swarm_builder 2.py | 299 +++++++++ swarms/structs/conversation.py | 33 + swarms/structs/groupchat.py | 570 ++++++++++++------ swarms/structs/message_pool.py | 214 ------- swarms/structs/mixture_of_agents.py | 2 +- swarms/structs/rearrange.py | 19 +- swarms/structs/sequential_workflow.py | 29 +- swarms/structs/spreadsheet_swarm.py | 2 +- swarms/structs/swarm_router.py | 12 +- swarms/telemetry/log_swarm_data.py | 16 - swarms/telemetry/sys_info.py | 52 +- swarms/utils/add_docs_to_agents 2.py | 141 +++++ swarms/utils/any_to_str 2.py | 102 ++++ swarms/utils/data_to_text.py | 50 -- swarms/utils/decorators.py | 38 +- swarms/utils/exec_funcs_in_parallel.py | 127 ---- swarms/utils/successful_run.py | 75 --- swarms/utils/swarm_output_handling 2.py | 34 ++ swarms/utils/swarm_reliability_checks 2.py | 78 +++ swarms/utils/wrapper_clusterop 2.py | 77 +++ tests/structs/test_message_pool.py | 117 ---- 40 files changed, 2486 insertions(+), 1322 deletions(-) create mode 100644 agent_showcase_example 2.py create mode 100644 concurrent_mix 2.py create mode 100644 docs/swarms/changelog/6_0_0 2.md create mode 100644 persistent_legal_agent.py delete mode 100644 rag_examples/agent_with_rag.py delete mode 100644 rag_examples/agent_with_rag_and_tools.py create mode 100644 real_estate_agent.py create mode 100644 rearrange_test 2.py create mode 100644 scripts/docs/create_llm_file_for_docs.sh create mode 100644 sequential_worflow_test 2.py delete mode 100644 swarms/cli/parse_yaml.py delete mode 100644 swarms/schemas/plan.py rename swarms/structs/{agent_rag.py => agent_router.py} (100%) create mode 100644 swarms/structs/agents_available 2.py delete mode 100644 swarms/structs/auto_agent_generator.py create mode 100644 swarms/structs/auto_swarm_builder 2.py delete mode 100644 swarms/structs/message_pool.py delete mode 100644 swarms/telemetry/log_swarm_data.py create mode 100644 swarms/utils/add_docs_to_agents 2.py create mode 100644 swarms/utils/any_to_str 2.py delete mode 100644 swarms/utils/exec_funcs_in_parallel.py delete mode 100644 swarms/utils/successful_run.py create mode 100644 swarms/utils/swarm_output_handling 2.py create mode 100644 swarms/utils/swarm_reliability_checks 2.py create mode 100644 swarms/utils/wrapper_clusterop 2.py delete mode 100644 tests/structs/test_message_pool.py diff --git a/agent_showcase_example 2.py b/agent_showcase_example 2.py new file mode 100644 index 00000000..b78abf81 --- /dev/null +++ b/agent_showcase_example 2.py @@ -0,0 +1,68 @@ +import os + +from swarms import Agent + +from swarm_models import OpenAIChat +from swarms.structs.agents_available import showcase_available_agents + +# Get the OpenAI API key from the environment variable +api_key = os.getenv("OPENAI_API_KEY") + +# Create an instance of the OpenAIChat class +model = OpenAIChat( + api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 +) + +# Initialize the Claims Director agent +director_agent = Agent( + agent_name="ClaimsDirector", + agent_description="Oversees and coordinates the medical insurance claims processing workflow", + system_prompt="""You are the Claims Director responsible for managing the medical insurance claims process. + Assign and prioritize tasks between claims processors and auditors. Ensure claims are handled efficiently + and accurately while maintaining compliance with insurance policies and regulations.""", + llm=model, + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="director_agent.json", +) + +# Initialize Claims Processor agent +processor_agent = Agent( + agent_name="ClaimsProcessor", + agent_description="Reviews and processes medical insurance claims, verifying coverage and eligibility", + system_prompt="""Review medical insurance claims for completeness and accuracy. Verify patient eligibility, + coverage details, and process claims according to policy guidelines. Flag any claims requiring special review.""", + llm=model, + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="processor_agent.json", +) + +# Initialize Claims Auditor agent +auditor_agent = Agent( + agent_name="ClaimsAuditor", + agent_description="Audits processed claims for accuracy and compliance with policies and regulations", + system_prompt="""Audit processed insurance claims for accuracy and compliance. Review claim decisions, + identify potential fraud or errors, and ensure all processing follows established guidelines and regulations.""", + llm=model, + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="auditor_agent.json", +) + +# Create a list of agents +agents = [director_agent, processor_agent, auditor_agent] + +print(showcase_available_agents(agents=agents)) diff --git a/auto_swarm_router.py b/auto_swarm_router.py index 41a3badd..4ca3714f 100644 --- a/auto_swarm_router.py +++ b/auto_swarm_router.py @@ -109,7 +109,6 @@ router = SwarmRouter( swarm_type="SequentialWorkflow", # or "SequentialWorkflow" or "ConcurrentWorkflow" or auto_generate_prompts=True, output_type="all", - ) # Example usage diff --git a/concurrent_mix 2.py b/concurrent_mix 2.py new file mode 100644 index 00000000..5ac80ede --- /dev/null +++ b/concurrent_mix 2.py @@ -0,0 +1,99 @@ +import os + +from swarm_models import OpenAIChat + +from swarms import Agent, run_agents_with_tasks_concurrently + +# Fetch the OpenAI API key from the environment variable +api_key = os.getenv("OPENAI_API_KEY") + +# Create an instance of the OpenAIChat class +model = OpenAIChat( + openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 +) + +# Initialize agents for different roles +delaware_ccorp_agent = Agent( + agent_name="Delaware-CCorp-Hiring-Agent", + system_prompt=""" + Create a comprehensive hiring description for a Delaware C Corporation, + including all relevant laws and regulations, such as the Delaware General + Corporation Law (DGCL) and the Delaware Corporate Law. Ensure the description + covers the requirements for hiring employees, contractors, and officers, + including the necessary paperwork, tax obligations, and benefits. Also, + outline the procedures for compliance with Delaware's employment laws, + including anti-discrimination laws, workers' compensation, and unemployment + insurance. Provide guidance on how to navigate the complexities of Delaware's + corporate law and ensure that all hiring practices are in compliance with + state and federal regulations. + """, + llm=model, + max_loops=1, + autosave=False, + dashboard=False, + verbose=True, + output_type="str", + artifacts_on=True, + artifacts_output_path="delaware_ccorp_hiring_description.md", + artifacts_file_extension=".md", +) + +indian_foreign_agent = Agent( + agent_name="Indian-Foreign-Hiring-Agent", + system_prompt=""" + Create a comprehensive hiring description for an Indian or foreign country, + including all relevant laws and regulations, such as the Indian Contract Act, + the Indian Labour Laws, and the Foreign Exchange Management Act (FEMA). + Ensure the description covers the requirements for hiring employees, + contractors, and officers, including the necessary paperwork, tax obligations, + and benefits. Also, outline the procedures for compliance with Indian and + foreign employment laws, including anti-discrimination laws, workers' + compensation, and unemployment insurance. Provide guidance on how to navigate + the complexities of Indian and foreign corporate law and ensure that all hiring + practices are in compliance with state and federal regulations. Consider the + implications of hiring foreign nationals and the requirements for obtaining + necessary visas and work permits. + """, + llm=model, + max_loops=1, + autosave=False, + dashboard=False, + verbose=True, + output_type="str", + artifacts_on=True, + artifacts_output_path="indian_foreign_hiring_description.md", + artifacts_file_extension=".md", +) + +# List of agents and corresponding tasks +agents = [delaware_ccorp_agent, indian_foreign_agent] +tasks = [ + """ + Create a comprehensive hiring description for an Agent Engineer, including + required skills and responsibilities. Ensure the description covers the + necessary technical expertise, such as proficiency in AI/ML frameworks, + programming languages, and data structures. Outline the key responsibilities, + including designing and developing AI agents, integrating with existing systems, + and ensuring scalability and performance. + """, + """ + Generate a detailed job description for a Prompt Engineer, including + required skills and responsibilities. Ensure the description covers the + necessary technical expertise, such as proficiency in natural language processing, + machine learning, and software development. Outline the key responsibilities, + including designing and optimizing prompts for AI systems, ensuring prompt + quality and consistency, and collaborating with cross-functional teams. + """, +] + +# Run agents with tasks concurrently +results = run_agents_with_tasks_concurrently( + agents, + tasks, + all_cores=True, + device="cpu", +) + +# Print the results +for result in results: + print(result) diff --git a/docs/swarms/changelog/6_0_0 2.md b/docs/swarms/changelog/6_0_0 2.md new file mode 100644 index 00000000..aae2e8ef --- /dev/null +++ b/docs/swarms/changelog/6_0_0 2.md @@ -0,0 +1,59 @@ +# Swarms 6.0.0 - Performance & Reliability Update πŸš€ + +We're excited to announce the release of Swarms 6.0.0, bringing significant improvements to performance, reliability, and developer experience. This release focuses on streamlining core functionalities while enhancing the overall stability of the framework. + +## πŸ“¦ Installation + +```bash +pip3 install -U swarms +``` + +## 🌟 Highlights + +### Agent Enhancements +- **Improved RAG Performance**: Significant improvements to Retrieval-Augmented Generation capabilities +- **Enhanced Prompt Generation**: Auto-generate prompt now incorporates name, description, and system prompt for more contextual interactions +- **Streamlined Architecture**: Cleaned up unused code for better performance and maintainability +- **Simplified State Management**: Consolidated state management methods into a single `load()` function + +### Tools & Execution +- **Optimized Environment Management**: Fixed multiple environment instantiation issue + - Environments now initialize once during `__init__` +- **New SwarmRouter Function**: Simplified routing mechanism + - Returns consolidated string output from all agents + - Improved coordination between swarm components + +## πŸ’ͺ Performance Improvements +- Faster execution times +- Reduced memory footprint +- More reliable logging system +- Lightweight and efficient codebase + +## 🀝 Join Our Community + +### We're Hiring! +Join our growing team! We're currently looking for: +- Agent Engineers +- Developer Relations +- Infrastructure Engineers +- And more! + +### Get Involved +- ⭐ Star our repository +- πŸ”„ Fork the project +- πŸ›  Submit pull requests +- πŸ› Report issues +- πŸ’‘ Share your ideas + +### Contact & Support +- πŸ“§ Email: kye@swarms.world +- πŸ”— Issues: [GitHub Issues](https://github.com/kyegomez/swarms/issues) + +## πŸ”œ What's Next? +Have ideas for features, bug fixes, or improvements? We'd love to hear from you! Reach out through our GitHub issues or email us directly. + +--- + +*Thank you to all our contributors and users who make Swarms better every day. Together, we're building the future of swarm intelligence.* + +#SwarmAI #OpenSource #AI #MachineLearning \ No newline at end of file diff --git a/docs/swarms/structs/group_chat.md b/docs/swarms/structs/group_chat.md index b4d805a1..71254953 100644 --- a/docs/swarms/structs/group_chat.md +++ b/docs/swarms/structs/group_chat.md @@ -1,238 +1,231 @@ -# GroupChat +# GroupChat Class Documentation -The `GroupChat` class is designed to manage a group chat session involving multiple agents. This class handles initializing the conversation, selecting the next speaker, resetting the chat, and executing the chat rounds, providing a structured approach to managing a dynamic and interactive conversation. -### Key Concepts +The GroupChat class manages multi-agent conversations with state persistence, comprehensive logging, and flexible agent configurations. It supports both Agent class instances and callable functions, making it versatile for different use cases. + +## Installation +```bash +pip install swarms python-dotenv pydantic +``` -- **Agents**: Entities participating in the group chat. -- **Conversation Management**: Handling the flow of conversation, selecting speakers, and maintaining chat history. -- **Round-based Execution**: Managing the chat in predefined rounds. ## Attributes -### Arguments - -| Argument | Type | Default | Description | -|---------------------|----------------------|-------------|-------------| -| `agents` | `List[Agent]` | `None` | List of agents participating in the group chat. | -| `max_rounds` | `int` | `10` | Maximum number of chat rounds. | -| `admin_name` | `str` | `"Admin"` | Name of the admin user. | -| `group_objective` | `str` | `None` | Objective of the group chat. | -| `selector_agent` | `Agent` | `None` | Agent responsible for selecting the next speaker. | -| `rules` | `str` | `None` | Rules for the group chat. | -| `*args` | | | Variable length argument list. | -| `**kwargs` | | | Arbitrary keyword arguments. | - -### Attributes - -| Attribute | Type | Description | -|---------------------|----------------------|-------------| -| `agents` | `List[Agent]` | List of agents participating in the group chat. | -| `max_rounds` | `int` | Maximum number of chat rounds. | -| `admin_name` | `str` | Name of the admin user. | -| `group_objective` | `str` | Objective of the group chat. | -| `selector_agent` | `Agent` | Agent responsible for selecting the next speaker. | -| `messages` | `Conversation` | Conversation object for storing the chat messages. | +| Attribute | Type | Description | +|-----------|------|-------------| +| state_path | str | Path for saving/loading chat state | +| wrapped_agents | List[AgentWrapper] | List of wrapped agent instances | +| selector_agent | AgentWrapper | Agent responsible for speaker selection | +| state | GroupChatState | Current state of the group chat | ## Methods -### __init__ - -Initializes the group chat with the given parameters. - -**Examples:** +### Core Methods ```python -agents = [Agent(name="Agent 1"), Agent(name="Agent 2")] -group_chat = GroupChat(agents=agents, max_rounds=5, admin_name="GroupAdmin") -``` - -### agent_names - -Returns the names of the agents in the group chat. +def run(self, task: str) -> str: + """Execute the group chat conversation""" -**Returns:** +def save_state(self) -> None: + """Save current state to disk""" -| Return Type | Description | -|-------------|-------------| -| `List[str]` | List of agent names. | +@classmethod +def load_state(cls, state_path: str) -> 'GroupChat': + """Load GroupChat from saved state""" -**Examples:** +def get_conversation_summary(self) -> Dict[str, Any]: + """Return a summary of the conversation""" -```python -names = group_chat.agent_names -print(names) # Output: ['Agent 1', 'Agent 2'] +def export_conversation(self, format: str = "json") -> Union[str, Dict]: + """Export the conversation in specified format""" ``` -### reset - -Resets the group chat by clearing the message history. - -**Examples:** +### Internal Methods ```python -group_chat.reset() -``` - -### agent_by_name - -Finds an agent whose name is contained within the given name string. - -**Arguments:** +def _log_interaction(self, agent_name: str, position: int, input_text: str, output_text: str) -> None: + """Log a single interaction""" -| Parameter | Type | Description | -|-----------|--------|-------------| -| `name` | `str` | Name string to search for. | +def _add_message(self, role: str, content: str) -> None: + """Add a message to the conversation history""" -**Returns:** - -| Return Type | Description | -|-------------|-------------| -| `Agent` | Agent object with a name contained in the given name string. | - -**Raises:** - -- `ValueError`: If no agent is found with a name contained in the given name string. +def select_next_speaker(self, last_speaker: AgentWrapper) -> AgentWrapper: + """Select the next speaker using the selector agent""" +``` -**Examples:** +## Usage Examples +### 1. Basic Setup with Two Agents ```python -agent = group_chat.agent_by_name("Agent 1") -print(agent.agent_name) # Output: 'Agent 1' +import os +from swarms import Agent +from swarm_models import OpenAIChat + +# Initialize OpenAI +api_key = os.getenv("OPENAI_API_KEY") +model = OpenAIChat(openai_api_key=api_key, model_name="gpt-4-mini") + +# Create agents +analyst = Agent( + agent_name="Financial-Analyst", + system_prompt="You are a financial analyst...", + llm=model +) + +advisor = Agent( + agent_name="Investment-Advisor", + system_prompt="You are an investment advisor...", + llm=model +) + +# Create group chat +chat = GroupChat( + name="Investment Team", + agents=[analyst, advisor], + max_rounds=5, + group_objective="Provide investment advice" +) + +response = chat.run("What's the best investment strategy for retirement?") ``` -### next_agent - -Returns the next agent in the list. - -**Arguments:** - -| Parameter | Type | Description | -|-----------|--------|-------------| -| `agent` | `Agent`| Current agent. | - -**Returns:** - -| Return Type | Description | -|-------------|-------------| -| `Agent` | Next agent in the list. | - -**Examples:** - +### 2. Advanced Setup with State Management ```python -current_agent = group_chat.agents[0] -next_agent = group_chat.next_agent(current_agent) -print(next_agent.agent_name) # Output: Name of the next agent +# Create group chat with state persistence +chat = GroupChat( + name="Investment Advisory Team", + description="Expert team for financial planning", + agents=[analyst, advisor, tax_specialist], + max_rounds=10, + admin_name="Senior Advisor", + group_objective="Provide comprehensive financial planning", + state_path="investment_chat_state.json", + rules="1. Always provide sources\n2. Be concise\n3. Focus on practical advice" +) + +# Run chat and save state +response = chat.run("Create a retirement plan for a 35-year old") +chat.save_state() + +# Load existing chat state +loaded_chat = GroupChat.load_state("investment_chat_state.json") ``` -### select_speaker_msg - -Returns the message for selecting the next speaker. - -**Returns:** - -| Return Type | Description | -|-------------|-------------| -| `str` | Prompt message for selecting the next speaker. | - -**Examples:** - +### 3. Using Custom Callable Agents ```python -message = group_chat.select_speaker_msg() -print(message) +def custom_agent(input_text: str) -> str: + # Custom logic here + return f"Processed: {input_text}" + +# Mix of regular agents and callable functions +chat = GroupChat( + name="Hybrid Team", + agents=[analyst, custom_agent], + max_rounds=3 +) ``` -### select_speaker - -Selects the next speaker. - -**Arguments:** - -| Parameter | Type | Description | -|----------------------|--------|-------------| -| `last_speaker_agent` | `Agent`| Last speaker in the conversation. | -| `selector_agent` | `Agent`| Agent responsible for selecting the next speaker. | - -**Returns:** +### 4. Export and Analysis +```python +# Run chat +chat.run("Analyze market conditions") -| Return Type | Description | -|-------------|-------------| -| `Agent` | Next speaker. | +# Get summary +summary = chat.get_conversation_summary() +print(summary) -**Examples:** +# Export in different formats +json_conv = chat.export_conversation(format="json") +text_conv = chat.export_conversation(format="text") +``` +### 5. Advanced Configuration with Custom Selector ```python -next_speaker = group_chat.select_speaker(last_speaker_agent, selector_agent) -print(next_speaker.agent_name) +class CustomSelector(Agent): + def run(self, input_text: str) -> str: + # Custom selection logic + return "Financial-Analyst" + +chat = GroupChat( + name="Custom Selection Team", + agents=[analyst, advisor], + selector_agent=CustomSelector( + agent_name="Custom-Selector", + system_prompt="Select the next speaker based on expertise", + llm=model + ), + max_rounds=5 +) ``` -### _participant_roles - -Returns the roles of the participants. - -**Returns:** - -| Return Type | Description | -|-------------|-------------| -| `str` | Participant roles. | - -**Examples:** - +### 6. Debugging Setup ```python -roles = group_chat._participant_roles() -print(roles) +import logging + +# Configure logging +logging.basicConfig(level=logging.DEBUG) + +chat = GroupChat( + name="Debug Team", + agents=[analyst, advisor], + max_rounds=3, + state_path="debug_chat.json" +) + +# Run with detailed logging +try: + response = chat.run("Complex query") +except Exception as e: + logger.error(f"Chat failed: {str(e)}") + # Access last successful state + state = chat.state ``` -### __call__ +## Error Handling -Executes the group chat as a function. - -**Arguments:** - -| Parameter | Type | Description | -|-----------|--------|-------------| -| `task` | `str` | Task to be performed. | - -**Returns:** - -| Return Type | Description | -|-------------|-------------| -| `str` | Reply from the last speaker. | - -**Examples:** +The GroupChat class includes comprehensive error handling: ```python -response = group_chat(task="Discuss the project plan") -print(response) +try: + chat = GroupChat(agents=[analyst]) # Will raise ValueError +except ValueError as e: + print("Configuration error:", str(e)) + +try: + response = chat.run("Query") +except Exception as e: + # Access error state + error_summary = chat.get_conversation_summary() + print("Execution error:", str(e)) + print("State at error:", error_summary) ``` -### Additional Examples +## Best Practices -#### Example 1: Initializing and Running a Group Chat +1. **State Management**: + - Always specify a `state_path` for important conversations + - Use `save_state()` after critical operations + - Implement regular state backups for long conversations -```python -agents = [Agent(name="Agent 1"), Agent(name="Agent 2"), Agent(name="Agent 3")] -selector_agent = Agent(name="Selector") -group_chat = GroupChat(agents=agents, selector_agent=selector_agent, max_rounds=3, group_objective="Discuss the quarterly goals.") +2. **Agent Configuration**: + - Provide clear system prompts for each agent + - Use descriptive agent names + - Consider agent expertise when setting the group objective -response = group_chat(task="Let's start the discussion on quarterly goals.") -print(response) -``` +3. **Performance**: + - Keep `max_rounds` reasonable (5-10 for most cases) + - Use early stopping conditions when possible + - Monitor conversation length and complexity -#### Example 2: Resetting the Group Chat +4. **Error Handling**: + - Always wrap chat execution in try-except blocks + - Implement proper logging + - Save states before potentially risky operations -```python -group_chat.reset() -``` - -#### Example 3: Selecting the Next Speaker - -```python -last_speaker = group_chat.agents[0] -next_speaker = group_chat.select_speaker(last_speaker_agent=last_speaker, selector_agent=selector_agent) -print(next_speaker.agent_name) -``` +## Limitations -## Summary +- Agents must either have a `run` method or be callable +- State files can grow large with many interactions +- Selector agent may need optimization for large agent groups +- Real-time streaming not supported in basic configuration -The `GroupChat` class offers a structured approach to managing a group chat involving multiple agents. With functionalities for initializing conversations, selecting speakers, and handling chat rounds, it provides a robust framework for dynamic and interactive discussions. This makes it an essential tool for applications requiring coordinated communication among multiple agents. \ No newline at end of file diff --git a/persistent_legal_agent.py b/persistent_legal_agent.py new file mode 100644 index 00000000..65e8d61a --- /dev/null +++ b/persistent_legal_agent.py @@ -0,0 +1,113 @@ +import os +from swarms import Agent +from swarm_models import OpenAIChat +from dotenv import load_dotenv + +# Custom system prompt for VC legal document generation +VC_LEGAL_AGENT_PROMPT = """You are a specialized legal document assistant focusing on venture capital documentation. +Your role is to help draft preliminary versions of common VC legal documents while adhering to these guidelines: + +1. Always include standard legal disclaimers +2. Follow standard VC document structures +3. Flag areas that need attorney review +4. Request necessary information for document completion +5. Maintain consistency across related documents +6. Output only when document is complete and verified + +Remember: All output should be marked as 'DRAFT' and require professional legal review.""" + + +def create_vc_legal_agent(): + load_dotenv() + api_key = os.getenv("OPENAI_API_KEY") + + # Configure the model with appropriate parameters for legal work + # Get the OpenAI API key from the environment variable + api_key = os.getenv("GROQ_API_KEY") + + # Model + model = OpenAIChat( + openai_api_base="https://api.groq.com/openai/v1", + openai_api_key=api_key, + model_name="llama-3.1-70b-versatile", + temperature=0.1, + ) + + # Initialize the persistent agent + agent = Agent( + agent_name="VC-Legal-Document-Agent", + system_prompt=VC_LEGAL_AGENT_PROMPT, + llm=model, + max_loops="auto", # Allows multiple iterations until completion + stopping_token="", # Agent will continue until this token is output + autosave=True, + dashboard=True, # Enable dashboard for monitoring + verbose=True, + dynamic_temperature_enabled=False, # Disable for consistency in legal documents + saved_state_path="vc_legal_agent_state.json", + user_name="legal_corp", + retry_attempts=3, + context_length=200000, + return_step_meta=True, + output_type="string", + streaming_on=False, + ) + + return agent + + +def generate_legal_document(agent, document_type, parameters): + """ + Generate a legal document with multiple refinement iterations + + Args: + agent: The initialized VC legal agent + document_type: Type of document to generate (e.g., "term_sheet", "investment_agreement") + parameters: Dict containing necessary parameters for the document + + Returns: + str: The generated document content + """ + prompt = f""" + Generate a {document_type} with the following parameters: + {parameters} + + Please follow these steps: + 1. Create initial draft + 2. Review for completeness + 3. Add necessary legal disclaimers + 4. Verify all required sections + 5. Output when complete + + Include [REQUIRES LEGAL REVIEW] tags for sections needing attorney attention. + """ + + return agent.run(prompt) + + +# Example usage +if __name__ == "__main__": + # Initialize the agent + legal_agent = create_vc_legal_agent() + + # Example parameters for a term sheet + parameters = { + "company_name": "TechStartup Inc.", + "investment_amount": "$5,000,000", + "valuation": "$20,000,000", + "investor_rights": [ + "Board seat", + "Pro-rata rights", + "Information rights", + ], + "type_of_security": "Series A Preferred Stock", + } + + # Generate a term sheet + document = generate_legal_document( + legal_agent, "term_sheet", parameters + ) + + # Save the generated document + with open("generated_term_sheet_draft.md", "w") as f: + f.write(document) diff --git a/rag_examples/agent_with_rag.py b/rag_examples/agent_with_rag.py deleted file mode 100644 index 153c207d..00000000 --- a/rag_examples/agent_with_rag.py +++ /dev/null @@ -1,44 +0,0 @@ -import os - -from swarms_memory import ChromaDB - -from swarms import Agent -from swarm_models import Anthropic -from swarms.prompts.finance_agent_sys_prompt import ( - FINANCIAL_AGENT_SYS_PROMPT, -) - -# Initilaize the chromadb client -chromadb = ChromaDB( - metric="cosine", - output_dir="fiance_agent_rag", - # docs_folder="artifacts", # Folder of your documents -) - -# Model -model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")) - - -# Initialize the agent -agent = Agent( - agent_name="Financial-Analysis-Agent", - system_prompt=FINANCIAL_AGENT_SYS_PROMPT, - agent_description="Agent creates ", - llm=model, - max_loops="auto", - autosave=True, - dashboard=False, - verbose=True, - streaming_on=True, - dynamic_temperature_enabled=True, - saved_state_path="finance_agent.json", - user_name="swarms_corp", - retry_attempts=3, - context_length=200000, - long_term_memory=chromadb, -) - - -agent.run( - "What are the components of a startups stock incentive equity plan" -) diff --git a/rag_examples/agent_with_rag_and_tools.py b/rag_examples/agent_with_rag_and_tools.py deleted file mode 100644 index f278c173..00000000 --- a/rag_examples/agent_with_rag_and_tools.py +++ /dev/null @@ -1,117 +0,0 @@ -from swarms import Agent -from swarm_models import OpenAIChat -from swarms_memory import ChromaDB -import subprocess -import os - -# Making an instance of the ChromaDB class -memory = ChromaDB( - metric="cosine", - n_results=3, - output_dir="results", - docs_folder="docs", -) - -# Model -model = OpenAIChat( - api_key=os.getenv("OPENAI_API_KEY"), - model_name="gpt-4o-mini", - temperature=0.1, -) - - -# Tools in swarms are simple python functions and docstrings -def terminal( - code: str, -): - """ - Run code in the terminal. - - Args: - code (str): The code to run in the terminal. - - Returns: - str: The output of the code. - """ - out = subprocess.run( - code, shell=True, capture_output=True, text=True - ).stdout - return str(out) - - -def browser(query: str): - """ - Search the query in the browser with the `browser` tool. - - Args: - query (str): The query to search in the browser. - - Returns: - str: The search results. - """ - import webbrowser - - url = f"https://www.google.com/search?q={query}" - webbrowser.open(url) - return f"Searching for {query} in the browser." - - -def create_file(file_path: str, content: str): - """ - Create a file using the file editor tool. - - Args: - file_path (str): The path to the file. - content (str): The content to write to the file. - - Returns: - str: The result of the file creation operation. - """ - with open(file_path, "w") as file: - file.write(content) - return f"File {file_path} created successfully." - - -def file_editor(file_path: str, mode: str, content: str): - """ - Edit a file using the file editor tool. - - Args: - file_path (str): The path to the file. - mode (str): The mode to open the file in. - content (str): The content to write to the file. - - Returns: - str: The result of the file editing operation. - """ - with open(file_path, mode) as file: - file.write(content) - return f"File {file_path} edited successfully." - - -# Agent -agent = Agent( - agent_name="Devin", - system_prompt=( - "Autonomous agent that can interact with humans and other" - " agents. Be Helpful and Kind. Use the tools provided to" - " assist the user. Return all code in markdown format." - ), - llm=model, - max_loops="auto", - autosave=True, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - interactive=True, - tools=[terminal, browser, file_editor, create_file], - streaming=True, - long_term_memory=memory, -) - -# Run the agent -out = agent( - "Create a CSV file with the latest tax rates for C corporations in the following ten states and the District of Columbia: Alabama, California, Florida, Georgia, Illinois, New York, North Carolina, Ohio, Texas, and Washington." -) -print(out) diff --git a/real_estate_agent.py b/real_estate_agent.py new file mode 100644 index 00000000..92864209 --- /dev/null +++ b/real_estate_agent.py @@ -0,0 +1,319 @@ +""" +Zoe - Real Estate Agent + +""" + +from typing import Optional, Dict, Any, List +from dataclasses import dataclass +from datetime import datetime +import os +import json +import requests +from loguru import logger +from swarms import Agent +from swarm_models import OpenAIChat +from dotenv import load_dotenv +from enum import Enum + +# Configure loguru logger +logger.add( + "logs/real_estate_agent_{time}.log", + rotation="500 MB", + retention="10 days", + level="INFO", + format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}", +) + + +class PropertyType(str, Enum): + """Enum for property types""" + + OFFICE = "office" + RETAIL = "retail" + INDUSTRIAL = "industrial" + MIXED_USE = "mixed-use" + LAND = "land" + + +@dataclass +class PropertyListing: + """Data class for commercial property listings""" + + property_id: str + address: str + city: str + state: str + zip_code: str + price: float + square_footage: float + property_type: PropertyType + zoning: str + listing_date: datetime + lat: float + lng: float + description: Optional[str] = None + features: Optional[List[str]] = None + images: Optional[List[str]] = None + + +class PropertyRadarAPI: + """Client for PropertyRadar API integration""" + + def __init__(self, api_key: str): + """Initialize PropertyRadar API client + + Args: + api_key (str): PropertyRadar API key + """ + self.api_key = api_key + self.base_url = "https://api.propertyradar.com/v1" + self.session = requests.Session() + self.session.headers.update( + { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + ) + + def search_properties( + self, + max_price: float = 10_000_000, + property_types: List[PropertyType] = None, + location: Dict[str, Any] = None, + min_sqft: Optional[float] = None, + max_sqft: Optional[float] = None, + page: int = 1, + limit: int = 20, + ) -> List[PropertyListing]: + """ + Search for commercial properties using PropertyRadar API + + Args: + max_price (float): Maximum property price + property_types (List[PropertyType]): Types of properties to search for + location (Dict[str, Any]): Location criteria (city, county, or coordinates) + min_sqft (Optional[float]): Minimum square footage + max_sqft (Optional[float]): Maximum square footage + page (int): Page number for pagination + limit (int): Number of results per page + + Returns: + List[PropertyListing]: List of matching properties + """ + try: + # Build the query parameters + params = { + "price_max": max_price, + "property_types": ( + [pt.value for pt in property_types] + if property_types + else None + ), + "page": page, + "limit": limit, + "for_sale": True, + "state": "FL", # Florida only + "commercial_property": True, + } + + # Add location parameters + if location: + params.update(location) + + # Add square footage filters + if min_sqft: + params["square_feet_min"] = min_sqft + if max_sqft: + params["square_feet_max"] = max_sqft + + # Make the API request + response = self.session.get( + f"{self.base_url}/properties", + params={ + k: v for k, v in params.items() if v is not None + }, + ) + response.raise_for_status() + + # Parse the response + properties_data = response.json() + + # Convert to PropertyListing objects + return [ + PropertyListing( + property_id=prop["id"], + address=prop["address"], + city=prop["city"], + state=prop["state"], + zip_code=prop["zip_code"], + price=float(prop["price"]), + square_footage=float(prop["square_feet"]), + property_type=PropertyType(prop["property_type"]), + zoning=prop["zoning"], + listing_date=datetime.fromisoformat( + prop["list_date"] + ), + lat=float(prop["latitude"]), + lng=float(prop["longitude"]), + description=prop.get("description"), + features=prop.get("features", []), + images=prop.get("images", []), + ) + for prop in properties_data["results"] + ] + + except requests.RequestException as e: + logger.error(f"Error fetching properties: {str(e)}") + raise + + +class CommercialRealEstateAgent: + """Agent for searching and analyzing commercial real estate properties""" + + def __init__( + self, + openai_api_key: str, + propertyradar_api_key: str, + model_name: str = "gpt-4", + temperature: float = 0.1, + saved_state_path: Optional[str] = None, + ): + """Initialize the real estate agent + + Args: + openai_api_key (str): OpenAI API key + propertyradar_api_key (str): PropertyRadar API key + model_name (str): Name of the LLM model to use + temperature (float): Temperature setting for the LLM + saved_state_path (Optional[str]): Path to save agent state + """ + self.property_api = PropertyRadarAPI(propertyradar_api_key) + + # Initialize OpenAI model + self.model = OpenAIChat( + openai_api_key=openai_api_key, + model_name=model_name, + temperature=temperature, + ) + + # Initialize the agent + self.agent = Agent( + agent_name="Commercial-Real-Estate-Agent", + system_prompt=self._get_system_prompt(), + llm=self.model, + max_loops=1, + autosave=True, + dashboard=False, + verbose=True, + saved_state_path=saved_state_path, + context_length=200000, + streaming_on=False, + ) + + logger.info( + "Commercial Real Estate Agent initialized successfully" + ) + + def _get_system_prompt(self) -> str: + """Get the system prompt for the agent""" + return """You are a specialized commercial real estate agent assistant focused on Central Florida properties. + Your primary responsibilities are: + 1. Search for commercial properties under $10 million + 2. Focus on properties zoned for commercial use + 3. Provide detailed analysis of property features, location benefits, and potential ROI + 4. Consider local market conditions and growth potential + 5. Verify zoning compliance and restrictions + + When analyzing properties, consider: + - Current market valuations + - Local business development plans + - Traffic patterns and accessibility + - Nearby amenities and businesses + - Future development potential""" + + def search_properties( + self, + max_price: float = 10_000_000, + property_types: List[PropertyType] = None, + location: Dict[str, Any] = None, + min_sqft: Optional[float] = None, + max_sqft: Optional[float] = None, + ) -> List[Dict[str, Any]]: + """ + Search for properties and provide analysis + + Args: + max_price (float): Maximum property price + property_types (List[PropertyType]): Types of properties to search + location (Dict[str, Any]): Location criteria + min_sqft (Optional[float]): Minimum square footage + max_sqft (Optional[float]): Maximum square footage + + Returns: + List[Dict[str, Any]]: List of properties with analysis + """ + try: + # Search for properties + properties = self.property_api.search_properties( + max_price=max_price, + property_types=property_types, + location=location, + min_sqft=min_sqft, + max_sqft=max_sqft, + ) + + # Analyze each property + analyzed_properties = [] + for prop in properties: + analysis = self.agent.run( + f"Analyze this commercial property:\n" + f"Address: {prop.address}, {prop.city}, FL {prop.zip_code}\n" + f"Price: ${prop.price:,.2f}\n" + f"Square Footage: {prop.square_footage:,.0f}\n" + f"Property Type: {prop.property_type.value}\n" + f"Zoning: {prop.zoning}\n" + f"Description: {prop.description or 'Not provided'}" + ) + + analyzed_properties.append( + {"property": prop.__dict__, "analysis": analysis} + ) + + logger.info( + f"Successfully analyzed {len(analyzed_properties)} properties" + ) + return analyzed_properties + + except Exception as e: + logger.error( + f"Error in property search and analysis: {str(e)}" + ) + raise + + +def main(): + """Main function to demonstrate usage""" + load_dotenv() + + # Initialize the agent + agent = CommercialRealEstateAgent( + openai_api_key=os.getenv("OPENAI_API_KEY"), + propertyradar_api_key=os.getenv("PROPERTYRADAR_API_KEY"), + saved_state_path="real_estate_agent_state.json", + ) + + # Example search + results = agent.search_properties( + max_price=5_000_000, + property_types=[PropertyType.RETAIL, PropertyType.OFFICE], + location={"city": "Orlando", "radius_miles": 25}, + min_sqft=2000, + ) + + # Save results + with open("search_results.json", "w") as f: + json.dump(results, f, default=str, indent=2) + + +if __name__ == "__main__": + main() diff --git a/rearrange_test 2.py b/rearrange_test 2.py new file mode 100644 index 00000000..ddfd7670 --- /dev/null +++ b/rearrange_test 2.py @@ -0,0 +1,119 @@ +import os + +from swarms import Agent, AgentRearrange + +from swarm_models import OpenAIChat + +# Get the OpenAI API key from the environment variable +api_key = os.getenv("OPENAI_API_KEY") + +# Create an instance of the OpenAIChat class +model = OpenAIChat( + api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 +) + + +# Initialize the boss agent (Director) +boss_agent = Agent( + agent_name="BossAgent", + system_prompt=""" + You are the BossAgent responsible for managing and overseeing a swarm of agents analyzing company expenses. + Your job is to dynamically assign tasks, prioritize their execution, and ensure that all agents collaborate efficiently. + After receiving a report on the company's expenses, you will break down the work into smaller tasks, + assigning specific tasks to each agent, such as detecting recurring high costs, categorizing expenditures, + and identifying unnecessary transactions. Ensure the results are communicated back in a structured way + so the finance team can take actionable steps to cut off unproductive spending. You also monitor and + dynamically adapt the swarm to optimize their performance. Finally, you summarize their findings + into a coherent report. + """, + llm=model, + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="boss_agent.json", +) + +# Initialize worker 1: Expense Analyzer +worker1 = Agent( + agent_name="ExpenseAnalyzer", + system_prompt=""" + Your task is to carefully analyze the company's expense data provided to you. + You will focus on identifying high-cost recurring transactions, categorizing expenditures + (e.g., marketing, operations, utilities, etc.), and flagging areas where there seems to be excessive spending. + You will provide a detailed breakdown of each category, along with specific recommendations for cost-cutting. + Pay close attention to monthly recurring subscriptions, office supplies, and non-essential expenditures. + """, + llm=model, + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="worker1.json", +) + +# Initialize worker 2: Summary Generator +worker2 = Agent( + agent_name="SummaryGenerator", + system_prompt=""" + After receiving the detailed breakdown from the ExpenseAnalyzer, + your task is to create a concise summary of the findings. You will focus on the most actionable insights, + such as highlighting the specific transactions that can be immediately cut off and summarizing the areas + where the company is overspending. Your summary will be used by the BossAgent to generate the final report. + Be clear and to the point, emphasizing the urgency of cutting unnecessary expenses. + """, + llm=model, + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="worker2.json", +) + +# Swarm-Level Prompt (Collaboration Prompt) +swarm_prompt = """ + As a swarm, your collective goal is to analyze the company's expenses and identify transactions that should be cut off. + You will work collaboratively to break down the entire process of expense analysis into manageable steps. + The BossAgent will direct the flow and assign tasks dynamically to the agents. The ExpenseAnalyzer will first + focus on breaking down the expense report, identifying high-cost recurring transactions, categorizing them, + and providing recommendations for potential cost reduction. After the analysis, the SummaryGenerator will then + consolidate all the findings into an actionable summary that the finance team can use to immediately cut off unnecessary expenses. + Together, your collaboration is essential to streamlining and improving the company’s financial health. +""" + +# Create a list of agents +agents = [boss_agent, worker1, worker2] + +# Define the flow pattern for the swarm +flow = "BossAgent -> ExpenseAnalyzer -> SummaryGenerator" + +# Using AgentRearrange class to manage the swarm +agent_system = AgentRearrange( + agents=agents, + flow=flow, + return_json=False, + output_type="final", + max_loops=1, + docs=["SECURITY.md"], +) + +# Input task for the swarm +task = f""" + + {swarm_prompt} + + The company has been facing a rising number of unnecessary expenses, and the finance team needs a detailed + analysis of recent transactions to identify which expenses can be cut off to improve profitability. + Analyze the provided transaction data and create a detailed report on cost-cutting opportunities, + focusing on recurring transactions and non-essential expenditures. +""" + +# Run the swarm system with the task +output = agent_system.run(task) +print(output) diff --git a/scripts/docs/create_llm_file_for_docs.sh b/scripts/docs/create_llm_file_for_docs.sh new file mode 100644 index 00000000..0b0ca612 --- /dev/null +++ b/scripts/docs/create_llm_file_for_docs.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Set up logging +LOG_FILE="docs_compilation.log" +OUTPUT_FILE="combined_docs.txt" + +# Initialize log file +echo "$(date): Starting documentation compilation" > "$LOG_FILE" + +# Create/clear output file +> "$OUTPUT_FILE" + +# Function to determine file type and handle accordingly +process_file() { + local file="$1" + + # Get file extension + extension="${file##*.}" + + echo "$(date): Processing $file" >> "$LOG_FILE" + + case "$extension" in + md|markdown) + echo "# $(basename "$file")" >> "$OUTPUT_FILE" + cat "$file" >> "$OUTPUT_FILE" + echo -e "\n\n" >> "$OUTPUT_FILE" + ;; + txt) + echo "# $(basename "$file")" >> "$OUTPUT_FILE" + cat "$file" >> "$OUTPUT_FILE" + echo -e "\n\n" >> "$OUTPUT_FILE" + ;; + *) + echo "$(date): Skipping $file - unsupported format" >> "$LOG_FILE" + return + ;; + esac + + echo "$(date): Successfully processed $file" >> "$LOG_FILE" +} + +# Find and process all documentation files +find ../docs -type f \( -name "*.md" -o -name "*.txt" -o -name "*.markdown" \) | while read -r file; do + process_file "$file" +done + +# Log completion +echo "$(date): Documentation compilation complete" >> "$LOG_FILE" +echo "$(date): Output saved to $OUTPUT_FILE" >> "$LOG_FILE" + +# Print summary +echo "Documentation compilation complete. Check $LOG_FILE for details." \ No newline at end of file diff --git a/sequential_worflow_test 2.py b/sequential_worflow_test 2.py new file mode 100644 index 00000000..654154c6 --- /dev/null +++ b/sequential_worflow_test 2.py @@ -0,0 +1,117 @@ +import os +from dotenv import load_dotenv +from swarms import Agent, SequentialWorkflow +from swarm_models import OpenAIChat + +load_dotenv() + +# Get the OpenAI API key from the environment variable +api_key = os.getenv("GROQ_API_KEY") + +# Model +model = OpenAIChat( + openai_api_base="https://api.groq.com/openai/v1", + openai_api_key=api_key, + model_name="llama-3.1-70b-versatile", + temperature=0.1, +) + + +# Initialize specialized agents +data_extractor_agent = Agent( + agent_name="Data-Extractor", + system_prompt=None, + llm=model, + max_loops=1, + autosave=True, + verbose=True, + dynamic_temperature_enabled=True, + saved_state_path="data_extractor_agent.json", + user_name="pe_firm", + retry_attempts=1, + context_length=200000, + output_type="string", +) + +summarizer_agent = Agent( + agent_name="Document-Summarizer", + system_prompt=None, + llm=model, + max_loops=1, + autosave=True, + verbose=True, + dynamic_temperature_enabled=True, + saved_state_path="summarizer_agent.json", + user_name="pe_firm", + retry_attempts=1, + context_length=200000, + output_type="string", +) + +financial_analyst_agent = Agent( + agent_name="Financial-Analyst", + system_prompt=None, + llm=model, + max_loops=1, + autosave=True, + verbose=True, + dynamic_temperature_enabled=True, + saved_state_path="financial_analyst_agent.json", + user_name="pe_firm", + retry_attempts=1, + context_length=200000, + output_type="string", +) + +market_analyst_agent = Agent( + agent_name="Market-Analyst", + system_prompt=None, + llm=model, + max_loops=1, + autosave=True, + verbose=True, + dynamic_temperature_enabled=True, + saved_state_path="market_analyst_agent.json", + user_name="pe_firm", + retry_attempts=1, + context_length=200000, + output_type="string", +) + +operational_analyst_agent = Agent( + agent_name="Operational-Analyst", + system_prompt=None, + llm=model, + max_loops=1, + autosave=True, + verbose=True, + dynamic_temperature_enabled=True, + saved_state_path="operational_analyst_agent.json", + user_name="pe_firm", + retry_attempts=1, + context_length=200000, + output_type="string", +) + +# Initialize the SwarmRouter +router = SequentialWorkflow( + name="pe-document-analysis-swarm", + description="Analyze documents for private equity due diligence and investment decision-making", + max_loops=1, + agents=[ + data_extractor_agent, + summarizer_agent, + financial_analyst_agent, + market_analyst_agent, + operational_analyst_agent, + ], + output_type="all", +) + +# Example usage +if __name__ == "__main__": + # Run a comprehensive private equity document analysis task + result = router.run( + "Where is the best place to find template term sheets for series A startups. Provide links and references" + ) + print(result) diff --git a/swarms/cli/parse_yaml.py b/swarms/cli/parse_yaml.py deleted file mode 100644 index de8e936d..00000000 --- a/swarms/cli/parse_yaml.py +++ /dev/null @@ -1,120 +0,0 @@ -from swarms.utils.loguru_logger import logger -import yaml -from pydantic import BaseModel -from typing import List, Optional -import json -from swarms.structs.agent_registry import AgentRegistry -from swarms.structs.agent import Agent -from swarm_models.popular_llms import OpenAIChat - - -class AgentInput(BaseModel): - agent_name: str = "Swarm Agent" - system_prompt: Optional[str] = None - agent_description: Optional[str] = None - model_name: str = "OpenAIChat" - max_loops: int = 1 - autosave: bool = False - dynamic_temperature_enabled: bool = False - dashboard: bool = False - verbose: bool = False - streaming_on: bool = True - saved_state_path: Optional[str] = None - sop: Optional[str] = None - sop_list: Optional[List[str]] = None - user_name: str = "User" - retry_attempts: int = 3 - context_length: int = 8192 - task: Optional[str] = None - interactive: bool = False - - -def parse_yaml_to_json(yaml_str: str) -> str: - """ - Parses the given YAML string into an AgentInput model and converts it to a JSON string. - - Args: - yaml_str (str): The YAML string to be parsed. - - Returns: - str: The JSON string representation of the parsed YAML. - - Raises: - ValueError: If the YAML string cannot be parsed into the AgentInput model. - """ - try: - data = yaml.safe_load(yaml_str) - agent_input = AgentInput(**data) - return agent_input.json() - except yaml.YAMLError as e: - print(f"YAML Error: {e}") - raise ValueError("Invalid YAML input.") from e - except ValueError as e: - print(f"Validation Error: {e}") - raise ValueError("Invalid data for AgentInput model.") from e - - -# # Example usage -# yaml_input = """ -# agent_name: "Custom Agent" -# system_prompt: "System prompt example" -# agent_description: "This is a test agent" -# model_name: "CustomModel" -# max_loops: 5 -# autosave: true -# dynamic_temperature_enabled: true -# dashboard: true -# verbose: true -# streaming_on: false -# saved_state_path: "/path/to/state" -# sop: "Standard operating procedure" -# sop_list: ["step1", "step2"] -# user_name: "Tester" -# retry_attempts: 5 -# context_length: 4096 -# task: "Perform testing" -# """ - -# json_output = parse_yaml_to_json(yaml_input) -# print(json_output) - -registry = AgentRegistry() - - -def create_agent_from_yaml(yaml_path: str) -> None: - with open(yaml_path, "r") as file: - yaml_str = file.read() - agent_json = parse_yaml_to_json(yaml_str) - agent_config = json.loads(agent_json) - - agent = Agent( - agent_name=agent_config.get("agent_name", "Swarm Agent"), - system_prompt=agent_config.get("system_prompt"), - agent_description=agent_config.get("agent_description"), - llm=OpenAIChat(), - max_loops=agent_config.get("max_loops", 1), - autosave=agent_config.get("autosave", False), - dynamic_temperature_enabled=agent_config.get( - "dynamic_temperature_enabled", False - ), - dashboard=agent_config.get("dashboard", False), - verbose=agent_config.get("verbose", False), - streaming_on=agent_config.get("streaming_on", True), - saved_state_path=agent_config.get("saved_state_path"), - retry_attempts=agent_config.get("retry_attempts", 3), - context_length=agent_config.get("context_length", 8192), - ) - - registry.add(agent.agent_name, agent) - logger.info(f"Agent {agent.agent_name} created from {yaml_path}.") - - -def run_agent(agent_name: str, task: str) -> None: - agent = registry.find_agent_by_name(agent_name) - agent.run(task) - - -def list_agents() -> None: - agents = registry.list_agents() - for agent_id in agents: - print(agent_id) diff --git a/swarms/schemas/plan.py b/swarms/schemas/plan.py deleted file mode 100644 index 060d4b3b..00000000 --- a/swarms/schemas/plan.py +++ /dev/null @@ -1,10 +0,0 @@ -from typing import List -from pydantic import BaseModel -from swarms.schemas.agent_step_schemas import Step - - -class Plan(BaseModel): - steps: List[Step] - - class Config: - orm_mode = True diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index 05b74bc6..e391a1d1 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -19,7 +19,6 @@ from swarms.structs.majority_voting import ( parse_code_completion, ) from swarms.structs.message import Message -from swarms.structs.message_pool import MessagePool from swarms.structs.mixture_of_agents import MixtureOfAgents from swarms.structs.multi_agent_collab import MultiAgentCollaboration @@ -93,7 +92,6 @@ __all__ = [ "most_frequent", "parse_code_completion", "Message", - "MessagePool", "MultiAgentCollaboration", "SwarmNetwork", "AgentRearrange", diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 696193be..20f41ae6 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -1,3 +1,4 @@ +from datetime import datetime import asyncio import json import logging @@ -177,6 +178,7 @@ class Agent: artifacts_on (bool): Enable artifacts artifacts_output_path (str): The artifacts output path artifacts_file_extension (str): The artifacts file extension (.pdf, .md, .txt, ) + scheduled_run_date (datetime): The date and time to schedule the task Methods: run: Run the agent @@ -333,6 +335,7 @@ class Agent: device: str = "cpu", all_cores: bool = True, device_id: int = 0, + scheduled_run_date: Optional[datetime] = None, *args, **kwargs, ): @@ -445,6 +448,7 @@ class Agent: self.device = device self.all_cores = all_cores self.device_id = device_id + self.scheduled_run_date = scheduled_run_date # Initialize the short term memory self.short_memory = Conversation( @@ -733,7 +737,9 @@ class Agent: # Check parameters def check_parameters(self): if self.llm is None: - raise ValueError("Language model is not provided. Choose a model from the available models in swarm_models or create a class with a run(task: str) method and or a __call__ method.") + raise ValueError( + "Language model is not provided. Choose a model from the available models in swarm_models or create a class with a run(task: str) method and or a __call__ method." + ) if self.max_loops is None or self.max_loops == 0: raise ValueError("Max loops is not provided") @@ -743,8 +749,6 @@ class Agent: if self.context_length == 0 or self.context_length is None: raise ValueError("Context length is not provided") - - # Main function def _run( @@ -2245,14 +2249,17 @@ class Agent: device: str = "cpu", # gpu device_id: int = 0, all_cores: bool = True, + scheduled_run_date: Optional[datetime] = None, *args, **kwargs, ) -> Any: """ - Executes the agent's run method on a specified device. + Executes the agent's run method on a specified device, with optional scheduling. This method attempts to execute the agent's run method on a specified device, either CPU or GPU. It logs the device selection and the number of cores or GPU ID used. If the device is set to CPU, it can use all available cores or a specific core specified by `device_id`. If the device is set to GPU, it uses the GPU specified by `device_id`. + If a `scheduled_date` is provided, the method will wait until that date and time before executing the task. + Args: task (Optional[str], optional): The task to be executed. Defaults to None. img (Optional[str], optional): The image to be processed. Defaults to None. @@ -2260,6 +2267,7 @@ class Agent: device (str, optional): The device to use for execution. Defaults to "cpu". device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0. all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True. + scheduled_run_date (Optional[datetime], optional): The date and time to schedule the task. Defaults to None. *args: Additional positional arguments to be passed to the execution method. **kwargs: Additional keyword arguments to be passed to the execution method. @@ -2273,6 +2281,12 @@ class Agent: device = device or self.device device_id = device_id or self.device_id + if scheduled_run_date: + while datetime.now() < scheduled_run_date: + time.sleep( + 1 + ) # Sleep for a short period to avoid busy waiting + try: logger.info(f"Attempting to run on device: {device}") if device == "cpu": diff --git a/swarms/structs/agent_rag.py b/swarms/structs/agent_router.py similarity index 100% rename from swarms/structs/agent_rag.py rename to swarms/structs/agent_router.py diff --git a/swarms/structs/agents_available 2.py b/swarms/structs/agents_available 2.py new file mode 100644 index 00000000..0ed63c5a --- /dev/null +++ b/swarms/structs/agents_available 2.py @@ -0,0 +1,93 @@ +from typing import List, Any +from loguru import logger +from swarms.structs.agent import Agent + + +def get_agent_name(agent: Any) -> str: + """Helper function to safely get agent name + + Args: + agent (Any): The agent object to get name from + + Returns: + str: The agent's name if found, 'Unknown' otherwise + """ + if isinstance(agent, Agent) and hasattr(agent, "agent_name"): + return agent.agent_name + return "Unknown" + + +def get_agent_description(agent: Any) -> str: + """Helper function to get agent description or system prompt preview + + Args: + agent (Any): The agent object + + Returns: + str: Description or first 100 chars of system prompt + """ + if not isinstance(agent, Agent): + return "N/A" + + if hasattr(agent, "description") and agent.description: + return agent.description + + if hasattr(agent, "system_prompt") and agent.system_prompt: + return f"{agent.system_prompt[:150]}..." + + return "N/A" + + +def showcase_available_agents( + name: str = None, + description: str = None, + agents: List[Agent] = [], + update_agents_on: bool = False, +) -> str: + """ + Generate a formatted string showcasing all available agents and their descriptions. + + Args: + agents (List[Agent]): List of Agent objects to showcase. + update_agents_on (bool, optional): If True, updates each agent's system prompt with + the showcase information. Defaults to False. + + Returns: + str: Formatted string containing agent information, including names, descriptions + and IDs for all available agents. + """ + logger.info(f"Showcasing {len(agents)} available agents") + + formatted_agents = [] + header = f"\n####### Agents available in the swarm: {name} ############\n" + header += f"{description}\n" + row_format = "{:<5} | {:<20} | {:<50}" + header_row = row_format.format("ID", "Agent Name", "Description") + separator = "-" * 80 + + formatted_agents.append(header) + formatted_agents.append(separator) + formatted_agents.append(header_row) + formatted_agents.append(separator) + + for idx, agent in enumerate(agents): + if not isinstance(agent, Agent): + logger.warning( + f"Skipping non-Agent object: {type(agent)}" + ) + continue + + agent_name = get_agent_name(agent) + description = ( + get_agent_description(agent)[:100] + "..." + if len(get_agent_description(agent)) > 100 + else get_agent_description(agent) + ) + + formatted_agents.append( + row_format.format(idx + 1, agent_name, description) + ) + + showcase = "\n".join(formatted_agents) + + return showcase diff --git a/swarms/structs/auto_agent_generator.py b/swarms/structs/auto_agent_generator.py deleted file mode 100644 index 530a9404..00000000 --- a/swarms/structs/auto_agent_generator.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -This class will input a swarm type -> then auto generate a list of `Agent` structures with their name, descriptions, system prompts, and more. -""" diff --git a/swarms/structs/auto_swarm_builder 2.py b/swarms/structs/auto_swarm_builder 2.py new file mode 100644 index 00000000..177cfdc4 --- /dev/null +++ b/swarms/structs/auto_swarm_builder 2.py @@ -0,0 +1,299 @@ +from loguru import logger + +import os +from typing import List + +from pydantic import BaseModel, Field +from swarm_models import OpenAIFunctionCaller, OpenAIChat + +from swarms.structs.agent import Agent +from swarms.structs.swarm_router import SwarmRouter + + +class AgentConfig(BaseModel): + """Configuration for an individual agent in a swarm""" + + name: str = Field( + description="The name of the agent", example="Research-Agent" + ) + description: str = Field( + description="A description of the agent's purpose and capabilities", + example="Agent responsible for researching and gathering information", + ) + system_prompt: str = Field( + description="The system prompt that defines the agent's behavior", + example="You are a research agent. Your role is to gather and analyze information...", + ) + max_loops: int = Field( + description="Maximum number of reasoning loops the agent can perform", + example=3, + ) + + +class SwarmConfig(BaseModel): + """Configuration for a swarm of cooperative agents""" + + name: str = Field( + description="The name of the swarm", + example="Research-Writing-Swarm", + ) + description: str = Field( + description="The description of the swarm's purpose and capabilities", + example="A swarm of agents that work together to research topics and write articles", + ) + agents: List[AgentConfig] = Field( + description="The list of agents that make up the swarm", + example=[ + AgentConfig( + name="Research-Agent", + description="Gathers information", + system_prompt="You are a research agent...", + max_loops=2, + ), + AgentConfig( + name="Writing-Agent", + description="Writes content", + system_prompt="You are a writing agent...", + max_loops=1, + ), + ], + ) + max_loops: int = Field( + description="The maximum number of loops to run the swarm", + example=1, + ) + + +# Get the OpenAI API key from the environment variable +api_key = os.getenv("OPENAI_API_KEY") + +# Create an instance of the OpenAIChat class +model = OpenAIChat( + openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 +) + + +BOSS_SYSTEM_PROMPT = """ +Manage a swarm of worker agents to efficiently serve the user by deciding whether to create new agents or delegate tasks. Ensure operations are efficient and effective. + +### Instructions: + +1. **Task Assignment**: + - Analyze available worker agents when a task is presented. + - Delegate tasks to existing agents with clear, direct, and actionable instructions if an appropriate agent is available. + - If no suitable agent exists, create a new agent with a fitting system prompt to handle the task. + +2. **Agent Creation**: + - Name agents according to the task they are intended to perform (e.g., "Twitter Marketing Agent"). + - Provide each new agent with a concise and clear system prompt that includes its role, objectives, and any tools it can utilize. + +3. **Efficiency**: + - Minimize redundancy and maximize task completion speed. + - Avoid unnecessary agent creation if an existing agent can fulfill the task. + +4. **Communication**: + - Be explicit in task delegation instructions to avoid ambiguity and ensure effective task execution. + - Require agents to report back on task completion or encountered issues. + +5. **Reasoning and Decisions**: + - Offer brief reasoning when selecting or creating agents to maintain transparency. + - Avoid using an agent if unnecessary, with a clear explanation if no agents are suitable for a task. + +# Output Format + +Present your plan in clear, bullet-point format or short concise paragraphs, outlining task assignment, agent creation, efficiency strategies, and communication protocols. + +# Notes + +- Preserve transparency by always providing reasoning for task-agent assignments and creation. +- Ensure instructions to agents are unambiguous to minimize error. + +""" + + +class AutoSwarmBuilder: + """A class that automatically builds and manages swarms of AI agents. + + This class handles the creation, coordination and execution of multiple AI agents working + together as a swarm to accomplish complex tasks. It uses a boss agent to delegate work + and create new specialized agents as needed. + + Args: + name (str): The name of the swarm + description (str): A description of the swarm's purpose + verbose (bool, optional): Whether to output detailed logs. Defaults to True. + max_loops (int, optional): Maximum number of execution loops. Defaults to 1. + """ + + def __init__( + self, + name: str = None, + description: str = None, + verbose: bool = True, + max_loops: int = 1, + ): + self.name = name + self.description = description + self.verbose = verbose + self.max_loops = max_loops + self.agents_pool = [] + logger.info( + f"Initialized AutoSwarmBuilder: {name} {description}" + ) + + # @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10)) + def run(self, task: str, image_url: str = None, *args, **kwargs): + """Run the swarm on a given task. + + Args: + task (str): The task to be accomplished + image_url (str, optional): URL of an image input if needed. Defaults to None. + *args: Variable length argument list + **kwargs: Arbitrary keyword arguments + + Returns: + The output from the swarm's execution + """ + logger.info(f"Running swarm on task: {task}") + agents = self._create_agents(task, image_url, *args, **kwargs) + logger.info(f"Agents created {len(agents)}") + logger.info("Routing task through swarm") + output = self.swarm_router(agents, task, image_url) + logger.info(f"Swarm execution complete with output: {output}") + return output + + # @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10)) + def _create_agents(self, task: str, *args, **kwargs): + """Create the necessary agents for a task. + + Args: + task (str): The task to create agents for + *args: Variable length argument list + **kwargs: Arbitrary keyword arguments + + Returns: + list: List of created agents + """ + logger.info("Creating agents for task") + model = OpenAIFunctionCaller( + system_prompt=BOSS_SYSTEM_PROMPT, + api_key=os.getenv("OPENAI_API_KEY"), + temperature=0.1, + base_model=SwarmConfig, + ) + + agents_dictionary = model.run(task) + logger.info(f"Agents dictionary: {agents_dictionary}") + + # Convert dictionary to SwarmConfig if needed + if isinstance(agents_dictionary, dict): + agents_dictionary = SwarmConfig(**agents_dictionary) + + # Set swarm config + self.name = agents_dictionary.name + self.description = agents_dictionary.description + self.max_loops = getattr( + agents_dictionary, "max_loops", 1 + ) # Default to 1 if not set + + logger.info( + f"Swarm config: {self.name}, {self.description}, {self.max_loops}" + ) + + # Create agents from config + agents = [] + for agent_config in agents_dictionary.agents: + # Convert dict to AgentConfig if needed + if isinstance(agent_config, dict): + agent_config = AgentConfig(**agent_config) + + agent = self.build_agent( + agent_name=agent_config.name, + agent_description=agent_config.description, + agent_system_prompt=agent_config.system_prompt, + max_loops=agent_config.max_loops, + ) + agents.append(agent) + + return agents + + def build_agent( + self, + agent_name: str, + agent_description: str, + agent_system_prompt: str, + max_loops: int = 1, + ): + """Build a single agent with the given specifications. + + Args: + agent_name (str): Name of the agent + agent_description (str): Description of the agent's purpose + agent_system_prompt (str): The system prompt for the agent + + Returns: + Agent: The constructed agent instance + """ + logger.info(f"Building agent: {agent_name}") + agent = Agent( + agent_name=agent_name, + description=agent_description, + system_prompt=agent_system_prompt, + llm=model, + max_loops=max_loops, + autosave=True, + dashboard=False, + verbose=True, + dynamic_temperature_enabled=True, + saved_state_path=f"{agent_name}.json", + user_name="swarms_corp", + retry_attempts=1, + context_length=200000, + return_step_meta=False, + output_type="str", # "json", "dict", "csv" OR "string" soon "yaml" and + streaming_on=False, + auto_generate_prompt=True, + ) + + return agent + + # @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10)) + def swarm_router( + self, + agents: List[Agent], + task: str, + image_url: str = None, + *args, + **kwargs, + ): + """Route tasks between agents in the swarm. + + Args: + agents (List[Agent]): List of available agents + task (str): The task to route + image_url (str, optional): URL of an image input if needed. Defaults to None. + *args: Variable length argument list + **kwargs: Arbitrary keyword arguments + + Returns: + The output from the routed task execution + """ + logger.info("Routing task through swarm") + swarm_router_instance = SwarmRouter( + agents=agents, + swarm_type="auto", + max_loops=1, + ) + + return swarm_router_instance.run( + self.name + " " + self.description + " " + task, + ) + + +example = AutoSwarmBuilder() + +print( + example.run( + "Write multiple blog posts about the latest advancements in swarm intelligence all at once" + ) +) diff --git a/swarms/structs/conversation.py b/swarms/structs/conversation.py index 768c19c5..f808382d 100644 --- a/swarms/structs/conversation.py +++ b/swarms/structs/conversation.py @@ -6,6 +6,12 @@ import yaml from termcolor import colored from swarms.structs.base_structure import BaseStructure +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from swarms.structs.agent import ( + Agent, + ) # Only imported during type checking class Conversation(BaseStructure): @@ -392,6 +398,33 @@ class Conversation(BaseStructure): def to_yaml(self): return yaml.dump(self.conversation_history) + def get_visible_messages(self, agent: "Agent", turn: int): + """ + Get the visible messages for a given agent and turn. + + Args: + agent (Agent): The agent. + turn (int): The turn number. + + Returns: + List[Dict]: The list of visible messages. + """ + # Get the messages before the current turn + prev_messages = [ + message + for message in self.conversation_history + if message["turn"] < turn + ] + + visible_messages = [] + for message in prev_messages: + if ( + message["visible_to"] == "all" + or agent.agent_name in message["visible_to"] + ): + visible_messages.append(message) + return visible_messages + # # Example usage # conversation = Conversation() diff --git a/swarms/structs/groupchat.py b/swarms/structs/groupchat.py index 71ea7f8d..0e347f42 100644 --- a/swarms/structs/groupchat.py +++ b/swarms/structs/groupchat.py @@ -1,72 +1,159 @@ -from typing import List, Dict +from typing import List, Dict, Optional, Union, Callable, Any from pydantic import BaseModel, Field -from swarms.structs.conversation import Conversation -from swarms.utils.loguru_logger import logger -from swarms.structs.agent import Agent +from datetime import datetime +import json from uuid import uuid4 -from swarms.schemas.agent_step_schemas import ManySteps +import logging +from swarms.structs.agent import Agent +from swarms.structs.agents_available import showcase_available_agents + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class Message(BaseModel): + """Single message in the conversation""" + + role: str + content: str + timestamp: datetime = Field(default_factory=datetime.utcnow) + + +class AgentMetadata(BaseModel): + """Metadata for tracking agent state and configuration""" + + agent_name: str + agent_type: str + system_prompt: Optional[str] = None + description: Optional[str] = None + config: Dict[str, Any] = Field(default_factory=dict) + + +class InteractionLog(BaseModel): + """Log entry for a single interaction""" + id: str = Field(default_factory=lambda: uuid4().hex) + agent_name: str + position: int + input_text: str + output_text: str + timestamp: datetime = Field(default_factory=datetime.utcnow) + metadata: Dict[str, Any] = Field(default_factory=dict) -class GroupChatInput(BaseModel): + +class GroupChatState(BaseModel): + """Complete state of the group chat""" + + id: str = Field(default_factory=lambda: uuid4().hex) + name: Optional[str] = None + description: Optional[str] = None admin_name: str group_objective: str - agents: List[Dict[str, str]] max_rounds: int - selector_agent: Dict[str, str] - rules: str + rules: Optional[str] = None + agent_metadata: List[AgentMetadata] + messages: List[Message] + interactions: List[InteractionLog] + created_at: datetime = Field(default_factory=datetime.utcnow) + updated_at: datetime = Field(default_factory=datetime.utcnow) -class GroupChatOutput(BaseModel): - id: str = Field(uuid4().hex) - task: str = Field(..., description=None) - input_config: GroupChatInput - agent_outputs: List[ManySteps] = Field(..., description=None) +class AgentWrapper: + """Wrapper class to standardize agent interfaces""" + + def __init__( + self, + agent: Union["Agent", Callable], + agent_name: str, + system_prompt: Optional[str] = None, + ): + self.agent = agent + self.agent_name = agent_name + self.system_prompt = system_prompt + self._validate_agent() + + def _validate_agent(self): + """Validate that the agent has the required interface""" + if hasattr(self.agent, "run"): + self.run = self.agent.run + elif callable(self.agent): + self.run = self.agent + else: + raise ValueError( + "Agent must either have a 'run' method or be callable" + ) + + def get_metadata(self) -> AgentMetadata: + """Extract metadata from the agent""" + return AgentMetadata( + agent_name=self.agent_name, + agent_type=type(self.agent).__name__, + system_prompt=self.system_prompt, + config={ + k: v + for k, v in self.agent.__dict__.items() + if isinstance(v, (str, int, float, bool, dict, list)) + }, + ) class GroupChat: - """Manager class for a group chat. + """Enhanced GroupChat manager with state persistence and comprehensive logging. - This class handles the management of a group chat, including initializing the conversation, - selecting the next speaker, resetting the chat, and executing the chat rounds. + This class implements a multi-agent chat system with the following key features: + - State persistence to disk + - Comprehensive interaction logging + - Configurable agent selection + - Early stopping conditions + - Conversation export capabilities - Args: - agents (List[Agent], optional): List of agents participating in the group chat. Defaults to None. - max_rounds (int, optional): Maximum number of chat rounds. Defaults to 10. - admin_name (str, optional): Name of the admin user. Defaults to "Admin". - group_objective (str, optional): Objective of the group chat. Defaults to None. - selector_agent (Agent, optional): Agent responsible for selecting the next speaker. Defaults to None. - rules (str, optional): Rules for the group chat. Defaults to None. - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. + The GroupChat coordinates multiple agents to have a goal-directed conversation, + with one agent speaking at a time based on a selector agent's decisions. Attributes: - agents (List[Agent]): List of agents participating in the group chat. - max_rounds (int): Maximum number of chat rounds. - admin_name (str): Name of the admin user. - group_objective (str): Objective of the group chat. - selector_agent (Agent): Agent responsible for selecting the next speaker. - messages (Conversation): Conversation object for storing the chat messages. - + name (Optional[str]): Name of the group chat + description (Optional[str]): Description of the group chat's purpose + agents (List[Union["Agent", Callable]]): List of participating agents + max_rounds (int): Maximum number of conversation rounds + admin_name (str): Name of the administrator + group_objective (str): The goal/objective of the conversation + selector_agent (Union["Agent", Callable]): Agent that selects next speaker + rules (Optional[str]): Rules governing the conversation + state_path (Optional[str]): Path to save conversation state + showcase_agents_on (bool): Whether to showcase agent capabilities """ def __init__( self, - name: str = None, - description: str = None, - agents: List[Agent] = None, + name: Optional[str] = None, + description: Optional[str] = None, + agents: List[Union["Agent", Callable]] = None, max_rounds: int = 10, admin_name: str = "Admin", group_objective: str = None, - selector_agent: Agent = None, - rules: str = None, - *args, - **kwargs, + selector_agent: Union["Agent", Callable] = None, + rules: Optional[str] = None, + state_path: Optional[str] = None, + showcase_agents_on: bool = False, ): - # super().__init__(agents = agents, *args, **kwargs) - if not agents: - raise ValueError( - "Agents cannot be empty. Add more agents." - ) + """Initialize a new GroupChat instance. + + Args: + name: Name of the group chat + description: Description of the group chat's purpose + agents: List of participating agents + max_rounds: Maximum number of conversation rounds + admin_name: Name of the administrator + group_objective: The goal/objective of the conversation + selector_agent: Agent that selects next speaker + rules: Rules governing the conversation + state_path: Path to save conversation state + showcase_agents_on: Whether to showcase agent capabilities + + Raises: + ValueError: If no agents are provided + """ self.name = name self.description = description self.agents = agents @@ -74,184 +161,327 @@ class GroupChat: self.admin_name = admin_name self.group_objective = group_objective self.selector_agent = selector_agent + self.rules = rules + self.state_path = state_path + self.showcase_agents_on = showcase_agents_on - # Initialize the conversation - self.message_history = Conversation( - system_prompt=self.group_objective, - time_enabled=True, - user=self.admin_name, - rules=rules, - *args, - **kwargs, + if not agents: + raise ValueError("At least two agents are required") + + # Generate unique state path if not provided + self.state_path = ( + state_path or f"group_chat_{uuid4().hex}.json" ) - # Initialize log for interactions - self.group_log = GroupChatLog( - admin_name=self.admin_name, - group_objective=self.group_objective, + # Wrap all agents to standardize interface + self.wrapped_agents = [ + AgentWrapper( + agent, + ( + f"Agent_{i}" + if not hasattr(agent, "agent_name") + else agent.agent_name + ), + ) + for i, agent in enumerate(agents) + ] + + # Configure selector agent + self.selector_agent = AgentWrapper( + selector_agent or self.wrapped_agents[0].agent, + "Selector", + "Select the next speaker based on the conversation context", ) - @property - def agent_names(self) -> List[str]: - """Return the names of the agents in the group chat.""" - return [agent.agent_name for agent in self.agents] + # Initialize conversation state + self.state = GroupChatState( + name=name, + description=description, + admin_name=admin_name, + group_objective=group_objective, + max_rounds=max_rounds, + rules=rules, + agent_metadata=[ + agent.get_metadata() for agent in self.wrapped_agents + ], + messages=[], + interactions=[], + ) - def reset(self): - """Reset the group chat.""" - logger.info("Resetting GroupChat") - self.message_history.clear() + # Showcase agents if enabled + if self.showcase_agents_on is True: + self.showcase_agents() - def agent_by_name(self, name: str) -> Agent: - """Find an agent whose name is contained within the given 'name' string. + def showcase_agents(self): + """Showcase available agents and update their system prompts. - Args: - name (str): Name string to search for. + This method displays agent capabilities and updates each agent's + system prompt with information about other agents in the group. + """ + out = showcase_available_agents( + name=self.name, + description=self.description, + agents=self.wrapped_agents, + ) - Returns: - Agent: Agent object with a name contained in the given 'name' string. + for agent in self.wrapped_agents: + # Initialize system_prompt if None + if agent.system_prompt is None: + agent.system_prompt = "" + agent.system_prompt += out - Raises: - ValueError: If no agent is found with a name contained in the given 'name' string. + def save_state(self) -> None: + """Save current conversation state to disk. + The state is saved as a JSON file at the configured state_path. """ - for agent in self.agents: - if agent.agent_name in name: - return agent - raise ValueError( - f"No agent found with a name contained in '{name}'." - ) + with open(self.state_path, "w") as f: + json.dump(self.state.dict(), f, default=str, indent=2) + logger.info(f"State saved to {self.state_path}") - def next_agent(self, agent: Agent) -> Agent: - """Return the next agent in the list. + @classmethod + def load_state(cls, state_path: str) -> "GroupChat": + """Load GroupChat from saved state. Args: - agent (Agent): Current agent. + state_path: Path to the saved state JSON file Returns: - Agent: Next agent in the list. + GroupChat: A new GroupChat instance with restored state + Raises: + FileNotFoundError: If state file doesn't exist + json.JSONDecodeError: If state file is invalid JSON """ - return self.agents[ - (self.agent_names.index(agent.agent_name) + 1) - % len(self.agents) - ] + with open(state_path, "r") as f: + state_dict = json.load(f) + + # Convert loaded data back to state model + state = GroupChatState(**state_dict) + + # Initialize with minimal config, then restore state + instance = cls( + name=state.name, + admin_name=state.admin_name, + agents=[], # Temporary empty list + group_objective=state.group_objective, + ) + instance.state = state + return instance - def select_speaker_msg(self): - """Return the message for selecting the next speaker.""" - prompt = f""" - You are in a role play game. The following roles are available: - {self._participant_roles()}. + def _log_interaction( + self, + agent_name: str, + position: int, + input_text: str, + output_text: str, + ) -> None: + """Log a single interaction in the conversation. - Read the following conversation. - Then select the next role from {self.agent_names} to play. Only return the role. + Args: + agent_name: Name of the speaking agent + position: Position in conversation sequence + input_text: Input context provided to agent + output_text: Agent's response """ - return prompt + log_entry = InteractionLog( + agent_name=agent_name, + position=position, + input_text=input_text, + output_text=output_text, + metadata={ + "current_agents": [ + a.agent_name for a in self.wrapped_agents + ], + "round": position // len(self.wrapped_agents), + }, + ) + self.state.interactions.append(log_entry) + self.save_state() - def select_speaker( - self, last_speaker_agent: Agent, selector_agent: Agent - ) -> Agent: - """Select the next speaker. + def _add_message(self, role: str, content: str) -> None: + """Add a message to the conversation history. Args: - last_speaker_agent (Agent): Last speaker in the conversation. - selector_agent (Agent): Agent responsible for selecting the next speaker. - - Returns: - Agent: Next speaker. - + role: Speaker's role/name + content: Message content """ - logger.info("Selecting a new speaker") - selector_agent.system_prompt = self.select_speaker_msg() - - n_agents = len(self.agent_names) - if n_agents < 3: - logger.warning( - f"GroupChat is underpopulated with {n_agents} agents. Direct communication might be more efficient." - ) - - self.message_history.add( - role=self.admin_name, - content=f"Read the above conversation. Then select the next most suitable role from {self.agent_names} to play. Only return the role.", - ) + message = Message(role=role, content=content) + self.state.messages.append(message) + self.save_state() - name = selector_agent.run( - self.message_history.return_history_as_string() - ) - try: - selected_agent = self.agent_by_name(name) - return selected_agent - except ValueError: - return self.next_agent(last_speaker_agent) + def select_next_speaker( + self, last_speaker: AgentWrapper + ) -> AgentWrapper: + """Select the next speaker using the selector agent. - def _participant_roles(self): - """Print the roles of the participants. + Args: + last_speaker: The agent who spoke last Returns: - str: Participant roles. + AgentWrapper: The next agent to speak + Note: + Falls back to round-robin selection if selector agent fails """ - return "\n".join( + conversation_history = "\n".join( [ - f"{agent.agent_name}: {agent.system_prompt}" - for agent in self.agents + f"{msg.role}: {msg.content}" + for msg in self.state.messages ] ) - def run(self, task: str, *args, **kwargs): - """Call 'GroupChatManager' instance as a function. + selection_prompt = f""" + Current speakers: {[agent.agent_name for agent in self.wrapped_agents]} + Last speaker: {last_speaker.agent_name} + Group objective: {self.state.group_objective} + + Based on the conversation history and group objective, select the next most appropriate speaker. + Only return the speaker's name. + + Conversation history: + {conversation_history} + """ + + try: + next_speaker_name = self.selector_agent.run( + selection_prompt + ).strip() + return next( + agent + for agent in self.wrapped_agents + if agent.agent_name in next_speaker_name + ) + except (StopIteration, Exception) as e: + logger.warning( + f"Selector agent failed: {str(e)}. Falling back to round-robin." + ) + # Fallback to round-robin if selection fails + current_idx = self.wrapped_agents.index(last_speaker) + return self.wrapped_agents[ + (current_idx + 1) % len(self.wrapped_agents) + ] + + def run(self, task: str) -> str: + """Execute the group chat conversation. Args: - task (str): Task to be performed. + task: The initial task/question to discuss Returns: - str: Reply from the last speaker. + str: The final response from the conversation + Raises: + Exception: If any error occurs during execution """ try: - logger.info( - f"Activating GroupChat with {len(self.agents)} Agents" - ) - self.message_history.add( - self.selector_agent.agent_name, task - ) + logger.info(f"Starting GroupChat with task: {task}") + self._add_message(self.state.admin_name, task) - for i in range(self.max_rounds): - speaker_agent = self.select_speaker( - last_speaker_agent=self.selector_agent, - selector_agent=self.selector_agent, + current_speaker = self.wrapped_agents[0] + final_response = None + + for round_num in range(self.state.max_rounds): + # Select next speaker + current_speaker = self.select_next_speaker( + current_speaker ) logger.info( - f"Next speaker selected: {speaker_agent.agent_name}" + f"Selected speaker: {current_speaker.agent_name}" ) - reply = speaker_agent.run( - self.message_history.return_history_as_string(), - *args, - **kwargs, - ) - self.message_history.add( - speaker_agent.agent_name, reply + # Prepare context and get response + conversation_history = "\n".join( + [ + f"{msg.role}: {msg.content}" + for msg in self.state.messages[ + -10: + ] # Last 10 messages for context + ] ) - # Log the interaction - self.group_log.log_interaction( - agent_name=speaker_agent.agent_name, - position=i, - input_text=self.message_history.return_history_as_string(), - output_text=reply, + try: + response = current_speaker.run( + conversation_history + ) + final_response = response + except Exception as e: + logger.error( + f"Agent {current_speaker.agent_name} failed: {str(e)}" + ) + continue + + # Log interaction and add to message history + self._log_interaction( + current_speaker.agent_name, + round_num, + conversation_history, + response, + ) + self._add_message( + current_speaker.agent_name, response ) - if i == self.max_rounds - 1: + # Optional: Add early stopping condition based on response content + if ( + "TASK_COMPLETE" in response + or "CONCLUSION" in response + ): + logger.info( + "Task completion detected, ending conversation" + ) break - return reply + return final_response or "No valid response generated" - except Exception as error: - logger.error( - f"Error detected: {error}. Please optimize the inputs and submit an issue on the swarms GitHub." - ) - raise error + except Exception as e: + logger.error(f"Error in GroupChat execution: {str(e)}") + raise + + def get_conversation_summary(self) -> Dict[str, Any]: + """Return a summary of the conversation. - def get_group_log_as_json(self) -> str: - """Return the interaction log as a JSON string.""" - return self.group_log.return_json() + Returns: + Dict containing conversation metrics and status + """ + return { + "id": self.state.id, + "total_interactions": len(self.state.interactions), + "participating_agents": [ + agent.agent_name for agent in self.wrapped_agents + ], + "conversation_length": len(self.state.messages), + "duration": ( + datetime.utcnow() - self.state.created_at + ).total_seconds(), + "objective_completed": any( + "TASK_COMPLETE" in msg.content + for msg in self.state.messages + ), + } + + def export_conversation( + self, format: str = "json" + ) -> Union[str, Dict]: + """Export the conversation in the specified format. + + Args: + format: Output format ("json" or "text") + + Returns: + Union[str, Dict]: Conversation in requested format + + Raises: + ValueError: If format is not supported + """ + if format == "json": + return self.state.dict() + elif format == "text": + return "\n".join( + [ + f"{msg.role} ({msg.timestamp}): {msg.content}" + for msg in self.state.messages + ] + ) + else: + raise ValueError(f"Unsupported export format: {format}") diff --git a/swarms/structs/message_pool.py b/swarms/structs/message_pool.py deleted file mode 100644 index 3f7a6343..00000000 --- a/swarms/structs/message_pool.py +++ /dev/null @@ -1,214 +0,0 @@ -import hashlib -from time import time_ns -from typing import Callable, List, Optional, Sequence, Union - -from swarms.structs.agent import Agent -from swarms.utils.loguru_logger import logger -from swarms.structs.base_swarm import BaseSwarm - - -def _hash(input: str): - """ - Hashes the input string using SHA256 algorithm. - - Args: - input (str): The string to be hashed. - - Returns: - str: The hexadecimal representation of the hash value. - """ - hex_dig = hashlib.sha256(input.encode("utf-8")).hexdigest() - return hex_dig - - -def msg_hash( - agent: Agent, content: str, turn: int, msg_type: str = "text" -): - """ - Generate a hash value for a message. - - Args: - agent (Agent): The agent sending the message. - content (str): The content of the message. - turn (int): The turn number of the message. - msg_type (str, optional): The type of the message. Defaults to "text". - - Returns: - int: The hash value of the message. - """ - time = time_ns() - return _hash( - f"agent: {agent.agent_name}\ncontent: {content}\ntimestamp:" - f" {str(time)}\nturn: {turn}\nmsg_type: {msg_type}" - ) - - -class MessagePool(BaseSwarm): - """ - A class representing a message pool for agents in a swarm. - - Attributes: - agents (Optional[Sequence[Agent]]): The list of agents in the swarm. - moderator (Optional[Agent]): The moderator agent. - turns (Optional[int]): The number of turns. - routing_function (Optional[Callable]): The routing function for message distribution. - show_names (Optional[bool]): Flag indicating whether to show agent names. - messages (List[Dict]): The list of messages in the pool. - - Examples: - >>> from swarms.structs.agent import Agent - >>> from swarms.structs.message_pool import MessagePool - >>> agent1 = Agent(agent_name="agent1") - >>> agent2 = Agent(agent_name="agent2") - >>> agent3 = Agent(agent_name="agent3") - >>> moderator = Agent(agent_name="moderator") - >>> agents = [agent1, agent2, agent3] - >>> message_pool = MessagePool(agents=agents, moderator=moderator, turns=5) - >>> message_pool.add(agent=agent1, content="Hello, agent2!", turn=1) - >>> message_pool.add(agent=agent2, content="Hello, agent1!", turn=1) - >>> message_pool.add(agent=agent3, content="Hello, agent1!", turn=1) - >>> message_pool.get_all_messages() - [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] - >>> message_pool.get_visible_messages(agent=agent1, turn=1) - [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] - >>> message_pool.get_visible_messages(agent=agent2, turn=1) - [{'agent': Agent(agent_name='agent1'), 'content': 'Hello, agent2!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent2'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}, {'agent': Agent(agent_name='agent3'), 'content': 'Hello, agent1!', 'turn': 1, 'visible_to': 'all', 'logged': True}] - """ - - def __init__( - self, - agents: Optional[Sequence[Agent]] = None, - moderator: Optional[Agent] = None, - turns: Optional[int] = 5, - routing_function: Optional[Callable] = None, - show_names: Optional[bool] = False, - autosave: Optional[bool] = False, - *args, - **kwargs, - ): - super().__init__() - - self.agent = agents - self.moderator = moderator - self.turns = turns - self.routing_function = routing_function - self.show_names = show_names - self.autosave = autosave - - self.messages = [] - - logger.info("MessagePool initialized") - logger.info(f"Number of agents: {len(agents)}") - logger.info( - f"Agents: {[agent.agent_name for agent in agents]}" - ) - logger.info(f"moderator: {moderator.agent_name} is available") - logger.info(f"Number of turns: {turns}") - - def add( - self, - agent: Agent, - content: str, - turn: int, - visible_to: Union[str, List[str]] = "all", - logged: bool = True, - ): - """ - Add a message to the pool. - - Args: - agent (Agent): The agent sending the message. - content (str): The content of the message. - turn (int): The turn number. - visible_to (Union[str, List[str]], optional): The agents who can see the message. Defaults to "all". - logged (bool, optional): Flag indicating whether the message should be logged. Defaults to True. - """ - - self.messages.append( - { - "agent": agent, - "content": content, - "turn": turn, - "visible_to": visible_to, - "logged": logged, - } - ) - logger.info(f"Message added: {content}") - - def reset(self): - """ - Reset the message pool. - """ - self.messages = [] - logger.info("MessagePool reset") - - def last_turn(self): - """ - Get the last turn number. - - Returns: - int: The last turn number. - """ - if len(self.messages) == 0: - return 0 - else: - return self.messages[-1]["turn"] - - @property - def last_message(self): - """ - Get the last message in the pool. - - Returns: - dict: The last message. - """ - if len(self.messages) == 0: - return None - else: - return self.messages[-1] - - def get_all_messages(self): - """ - Get all messages in the pool. - - Returns: - List[Dict]: The list of all messages. - """ - return self.messages - - def get_visible_messages(self, agent: Agent, turn: int): - """ - Get the visible messages for a given agent and turn. - - Args: - agent (Agent): The agent. - turn (int): The turn number. - - Returns: - List[Dict]: The list of visible messages. - """ - # Get the messages before the current turn - prev_messages = [ - message - for message in self.messages - if message["turn"] < turn - ] - - visible_messages = [] - for message in prev_messages: - if ( - message["visible_to"] == "all" - or agent.agent_name in message["visible_to"] - ): - visible_messages.append(message) - return visible_messages - - # def query(self, query: str): - # """ - # Query a message from the messages list and then pass it to the moderator - # """ - # return [ - # (mod, content) - # for mod, content, _ in self.messages # Add an underscore to ignore the rest of the elements - # if query in content - # ] diff --git a/swarms/structs/mixture_of_agents.py b/swarms/structs/mixture_of_agents.py index cccca322..feb93eaf 100644 --- a/swarms/structs/mixture_of_agents.py +++ b/swarms/structs/mixture_of_agents.py @@ -6,7 +6,7 @@ from loguru import logger from pydantic import BaseModel, Field from swarms.structs.agent import Agent -from swarms.telemetry.log_swarm_data import log_agent_data +from swarms.telemetry.capture_sys_data import log_agent_data from swarms.schemas.agent_step_schemas import ManySteps from swarms.prompts.ag_prompt import aggregator_system_prompt diff --git a/swarms/structs/rearrange.py b/swarms/structs/rearrange.py index 01c0f7b5..231cab16 100644 --- a/swarms/structs/rearrange.py +++ b/swarms/structs/rearrange.py @@ -17,11 +17,18 @@ from swarms.utils.loguru_logger import logger from swarms.utils.wrapper_clusterop import ( exec_callable_with_clusterops, ) -from swarms.utils.swarm_reliability_checks import reliability_check # Literal of output types OutputType = Literal[ - "all", "final", "list", "dict", ".json", ".md", ".txt", ".yaml", ".toml" + "all", + "final", + "list", + "dict", + ".json", + ".md", + ".txt", + ".yaml", + ".toml", ] @@ -451,14 +458,16 @@ class AgentRearrange(BaseSwarm): return output except Exception as e: - logger.error(f"An error occurred: {e} \n {traceback.format_exc()}") + logger.error( + f"An error occurred: {e} \n {traceback.format_exc()}" + ) return e def run( self, task: str = None, img: str = None, - device: str = "cpu", + device: str = "cpu", device_id: int = 1, all_cores: bool = True, all_gpus: bool = False, @@ -492,7 +501,7 @@ class AgentRearrange(BaseSwarm): *args, **kwargs, ) - + def __call__(self, task: str, *args, **kwargs): """ Make the class callable by executing the run() method. diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 0b576df9..1fc0fe8a 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -44,8 +44,6 @@ class SequentialWorkflow: self.reliability_check() - - self.agent_rearrange = AgentRearrange( name=name, description=description, @@ -58,10 +56,10 @@ class SequentialWorkflow: *args, **kwargs, ) - + # Handle agent showcase self.handle_agent_showcase() - + def sequential_flow(self): # Only create flow if agents exist if self.agents: @@ -70,21 +68,28 @@ class SequentialWorkflow: for agent in self.agents: try: # Try to get agent_name, fallback to name if not available - agent_name = getattr(agent, 'agent_name', None) or agent.name + agent_name = ( + getattr(agent, "agent_name", None) + or agent.name + ) agent_names.append(agent_name) except AttributeError: - logger.warning(f"Could not get name for agent {agent}") + logger.warning( + f"Could not get name for agent {agent}" + ) continue - + if agent_names: flow = " -> ".join(agent_names) else: flow = "" - logger.warning("No valid agent names found to create flow") + logger.warning( + "No valid agent names found to create flow" + ) else: flow = "" logger.warning("No agents provided to create flow") - + return flow def reliability_check(self): @@ -93,9 +98,11 @@ class SequentialWorkflow: if self.max_loops == 0: raise ValueError("max_loops cannot be 0") - + if self.output_type not in OutputType: - raise ValueError("output_type must be 'all', 'final', 'list', 'dict', '.json', '.md', '.txt', '.yaml', or '.toml'") + raise ValueError( + "output_type must be 'all', 'final', 'list', 'dict', '.json', '.md', '.txt', '.yaml', or '.toml'" + ) logger.info("Checks completed your swarm is ready.") diff --git a/swarms/structs/spreadsheet_swarm.py b/swarms/structs/spreadsheet_swarm.py index c573b8d7..51b022ea 100644 --- a/swarms/structs/spreadsheet_swarm.py +++ b/swarms/structs/spreadsheet_swarm.py @@ -12,7 +12,7 @@ from pydantic import BaseModel, Field from swarms.structs.agent import Agent from swarms.structs.base_swarm import BaseSwarm from swarms.utils.file_processing import create_file_in_folder -from swarms.telemetry.log_swarm_data import log_agent_data +from swarms.telemetry.capture_sys_data import log_agent_data time = datetime.datetime.now().isoformat() uuid_hex = uuid.uuid4().hex diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index e14ff9e7..79115b6d 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -187,7 +187,6 @@ class SwarmRouter: # Add documents to the logs # self.logs.append(Document(file_path=self.documents, data=data)) - def activate_shared_memory(self): logger.info("Activating shared memory with all agents ") @@ -451,7 +450,7 @@ class SwarmRouter: def __call__(self, task: str, *args, **kwargs) -> Any: """ Make the SwarmRouter instance callable. - + Args: task (str): The task to be executed by the swarm. *args: Variable length argument list. @@ -611,7 +610,10 @@ class SwarmRouter: Raises: Exception: If an error occurs during task execution. """ - from concurrent.futures import ThreadPoolExecutor, as_completed + from concurrent.futures import ( + ThreadPoolExecutor, + as_completed, + ) results = [] with ThreadPoolExecutor() as executor: @@ -620,7 +622,7 @@ class SwarmRouter: executor.submit(self.run, task, *args, **kwargs) for task in tasks ] - + # Process results as they complete rather than waiting for all for future in as_completed(futures): try: @@ -629,7 +631,7 @@ class SwarmRouter: except Exception as e: logger.error(f"Task execution failed: {str(e)}") results.append(None) - + return results diff --git a/swarms/telemetry/log_swarm_data.py b/swarms/telemetry/log_swarm_data.py deleted file mode 100644 index ffb72ab4..00000000 --- a/swarms/telemetry/log_swarm_data.py +++ /dev/null @@ -1,16 +0,0 @@ -def log_agent_data(data: dict): - import requests - - data_dict = { - "data": data, - } - - url = "https://swarms.world/api/get-agents/log-agents" - headers = { - "Content-Type": "application/json", - "Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869", - } - - response = requests.post(url, json=data_dict, headers=headers) - - return response.json() diff --git a/swarms/telemetry/sys_info.py b/swarms/telemetry/sys_info.py index c4a0692a..2739362f 100644 --- a/swarms/telemetry/sys_info.py +++ b/swarms/telemetry/sys_info.py @@ -10,7 +10,12 @@ def get_python_version(): return platform.python_version() -def get_pip_version(): +def get_pip_version() -> str: + """Get pip version + + Returns: + str: The version of pip installed + """ try: pip_version = ( subprocess.check_output(["pip", "--version"]) @@ -22,7 +27,12 @@ def get_pip_version(): return pip_version -def get_swarms_verison(): +def get_swarms_verison() -> tuple[str, str]: + """Get swarms version from both command line and package + + Returns: + tuple[str, str]: A tuple containing (command line version, package version) + """ try: swarms_verison_cmd = ( subprocess.check_output(["swarms", "--version"]) @@ -38,15 +48,30 @@ def get_swarms_verison(): return swarms_verison -def get_os_version(): +def get_os_version() -> str: + """Get operating system version + + Returns: + str: The operating system version and platform details + """ return platform.platform() -def get_cpu_info(): +def get_cpu_info() -> str: + """Get CPU information + + Returns: + str: The processor information + """ return platform.processor() -def get_ram_info(): +def get_ram_info() -> str: + """Get RAM information + + Returns: + str: A formatted string containing total, used and free RAM in GB + """ vm = psutil.virtual_memory() used_ram_gb = vm.used / (1024**3) free_ram_gb = vm.free / (1024**3) @@ -57,7 +82,15 @@ def get_ram_info(): ) -def get_package_mismatches(file_path="pyproject.toml"): +def get_package_mismatches(file_path: str = "pyproject.toml") -> str: + """Get package version mismatches between pyproject.toml and installed packages + + Args: + file_path (str, optional): Path to pyproject.toml file. Defaults to "pyproject.toml". + + Returns: + str: A formatted string containing package version mismatches + """ with open(file_path) as file: pyproject = toml.load(file) dependencies = pyproject["tool"]["poetry"]["dependencies"] @@ -89,7 +122,12 @@ def get_package_mismatches(file_path="pyproject.toml"): return "\n" + "\n".join(mismatches) -def system_info(): +def system_info() -> dict[str, str]: + """Get system information including Python, pip, OS, CPU and RAM details + + Returns: + dict[str, str]: A dictionary containing system information + """ return { "Python Version": get_python_version(), "Pip Version": get_pip_version(), diff --git a/swarms/utils/add_docs_to_agents 2.py b/swarms/utils/add_docs_to_agents 2.py new file mode 100644 index 00000000..8dbc1df3 --- /dev/null +++ b/swarms/utils/add_docs_to_agents 2.py @@ -0,0 +1,141 @@ +from typing import Any, List, Optional, Union +from pathlib import Path +from loguru import logger +from doc_master import doc_master +from concurrent.futures import ThreadPoolExecutor, as_completed +from tenacity import retry, stop_after_attempt, wait_exponential + + +@retry( + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=4, max=10), +) +def _process_document(doc_path: Union[str, Path]) -> str: + """Safely process a single document with retries. + + Args: + doc_path: Path to the document to process + + Returns: + Processed document text + + Raises: + Exception: If document processing fails after retries + """ + try: + return doc_master( + file_path=str(doc_path), output_type="string" + ) + except Exception as e: + logger.error( + f"Error processing document {doc_path}: {str(e)}" + ) + raise + + +def handle_input_docs( + agents: Any, + docs: Optional[List[Union[str, Path]]] = None, + doc_folder: Optional[Union[str, Path]] = None, + max_workers: int = 4, + chunk_size: int = 1000000, +) -> Any: + """ + Add document content to agent prompts with improved reliability and performance. + + Args: + agents: Dictionary mapping agent names to Agent objects + docs: List of document paths + doc_folder: Path to folder containing documents + max_workers: Maximum number of parallel document processing workers + chunk_size: Maximum characters to process at once to avoid memory issues + + Raises: + ValueError: If neither docs nor doc_folder is provided + RuntimeError: If document processing fails + """ + if not agents: + logger.warning( + "No agents provided, skipping document distribution" + ) + return + + if not docs and not doc_folder: + logger.warning( + "No documents or folder provided, skipping document distribution" + ) + return + + logger.info("Starting document distribution to agents") + + try: + processed_docs = [] + + # Process individual documents in parallel + if docs: + with ThreadPoolExecutor( + max_workers=max_workers + ) as executor: + future_to_doc = { + executor.submit(_process_document, doc): doc + for doc in docs + } + + for future in as_completed(future_to_doc): + doc = future_to_doc[future] + try: + processed_docs.append(future.result()) + except Exception as e: + logger.error( + f"Failed to process document {doc}: {str(e)}" + ) + raise RuntimeError( + f"Document processing failed: {str(e)}" + ) + + # Process folder if specified + elif doc_folder: + try: + folder_content = doc_master( + folder_path=str(doc_folder), output_type="string" + ) + processed_docs.append(folder_content) + except Exception as e: + logger.error( + f"Failed to process folder {doc_folder}: {str(e)}" + ) + raise RuntimeError( + f"Folder processing failed: {str(e)}" + ) + + # Combine and chunk the processed documents + combined_data = "\n".join(processed_docs) + + # Update agent prompts in chunks to avoid memory issues + for agent in agents.values(): + try: + for i in range(0, len(combined_data), chunk_size): + chunk = combined_data[i : i + chunk_size] + if i == 0: + agent.system_prompt += ( + "\nDocuments:\n" + chunk + ) + else: + agent.system_prompt += chunk + except Exception as e: + logger.error( + f"Failed to update agent prompt: {str(e)}" + ) + raise RuntimeError( + f"Agent prompt update failed: {str(e)}" + ) + + logger.info( + f"Successfully added documents to {len(agents)} agents" + ) + + return agents + + except Exception as e: + logger.error(f"Document distribution failed: {str(e)}") + raise RuntimeError(f"Document distribution failed: {str(e)}") diff --git a/swarms/utils/any_to_str 2.py b/swarms/utils/any_to_str 2.py new file mode 100644 index 00000000..125e233e --- /dev/null +++ b/swarms/utils/any_to_str 2.py @@ -0,0 +1,102 @@ +from typing import Union, Dict, List, Tuple, Any + + +def any_to_str(data: Union[str, Dict, List, Tuple, Any]) -> str: + """Convert any input data type to a nicely formatted string. + + This function handles conversion of various Python data types into a clean string representation. + It recursively processes nested data structures and handles None values gracefully. + + Args: + data: Input data of any type to convert to string. Can be: + - Dictionary + - List/Tuple + - String + - None + - Any other type that can be converted via str() + + Returns: + str: A formatted string representation of the input data. + - Dictionaries are formatted as "key: value" pairs separated by commas + - Lists/tuples are comma-separated + - None returns empty string + - Other types are converted using str() + + Examples: + >>> any_to_str({'a': 1, 'b': 2}) + 'a: 1, b: 2' + >>> any_to_str([1, 2, 3]) + '1, 2, 3' + >>> any_to_str(None) + '' + """ + try: + if isinstance(data, dict): + # Format dictionary with newlines and indentation + items = [] + for k, v in data.items(): + value = any_to_str(v) + items.append(f"{k}: {value}") + return "\n".join(items) + + elif isinstance(data, (list, tuple)): + # Format sequences with brackets and proper spacing + items = [any_to_str(x) for x in data] + if len(items) == 0: + return "[]" if isinstance(data, list) else "()" + return ( + f"[{', '.join(items)}]" + if isinstance(data, list) + else f"({', '.join(items)})" + ) + + elif data is None: + return "None" + + else: + # Handle strings and other types + if isinstance(data, str): + return f'"{data}"' + return str(data) + + except Exception as e: + return f"Error converting data: {str(e)}" + + +def main(): + # Example 1: Dictionary + print("Dictionary:") + print( + any_to_str( + { + "name": "John", + "age": 30, + "hobbies": ["reading", "hiking"], + } + ) + ) + + print("\nNested Dictionary:") + print( + any_to_str( + { + "user": { + "id": 123, + "details": {"city": "New York", "active": True}, + }, + "data": [1, 2, 3], + } + ) + ) + + print("\nList and Tuple:") + print(any_to_str([1, "text", None, (1, 2)])) + print(any_to_str((True, False, None))) + + print("\nEmpty Collections:") + print(any_to_str([])) + print(any_to_str({})) + + +if __name__ == "__main__": + main() diff --git a/swarms/utils/data_to_text.py b/swarms/utils/data_to_text.py index f4d12fc1..562f8098 100644 --- a/swarms/utils/data_to_text.py +++ b/swarms/utils/data_to_text.py @@ -137,53 +137,3 @@ def data_to_text(file: str) -> str: return data except Exception as e: raise OSError(f"Error reading file: {file}") from e - - -def data_to_text(file): - """ - Converts the given data file to text format. - - Args: - file (str): The path to the data file. - - Returns: - str: The text representation of the data file. - - Raises: - FileNotFoundError: If the file does not exist. - IOError: If there is an error reading the file. - - Examples: - >>> data_to_text("data.csv") - 'This is the text representation of the data file.' - - """ - if not os.path.exists(file): - raise FileNotFoundError(f"File not found: {file}") - - try: - _, ext = os.path.splitext(file) - ext = ( - ext.lower() - ) # Convert extension to lowercase for case-insensitive comparison - if ext == ".csv": - return csv_to_text(file) - elif ext == ".json": - return json_to_text(file) - elif ext == ".txt": - return txt_to_text(file) - elif ext == ".pdf": - return pdf_to_text(file) - elif ext == ".md": - return md_to_text(file) - else: - # Check if the file is a binary file (like an image) - if ext in [".png", ".jpg", ".jpeg", ".gif", ".bmp"]: - # Skip binary files - return None - else: - with open(file) as file: - data = file.read() - return data - except Exception as e: - raise OSError(f"Error reading file: {file}") from e diff --git a/swarms/utils/decorators.py b/swarms/utils/decorators.py index 3eed85bf..1d3d450c 100644 --- a/swarms/utils/decorators.py +++ b/swarms/utils/decorators.py @@ -1,46 +1,10 @@ import functools import logging import threading -import time import warnings -def log_decorator(func): - def wrapper(*args, **kwargs): - logging.info(f"Entering {func.__name__}") - result = func(*args, **kwargs) - logging.info(f"Exiting {func.__name__}") - return result - - return wrapper - - -def error_decorator(func): - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as e: - logging.error(f"Error in {func.__name__}: {str(e)}") - raise - - return wrapper - - -def timing_decorator(func): - def wrapper(*args, **kwargs): - start_time = time.time() - result = func(*args, **kwargs) - end_time = time.time() - logging.info( - f"{func.__name__} executed in" - f" {end_time - start_time} seconds" - ) - return result - - return wrapper - - -def retry_decorator(max_retries=5): +def retry_decorator(max_retries: int = 5): """ Decorator that retries a function a specified number of times if an exception occurs. diff --git a/swarms/utils/exec_funcs_in_parallel.py b/swarms/utils/exec_funcs_in_parallel.py deleted file mode 100644 index 95548603..00000000 --- a/swarms/utils/exec_funcs_in_parallel.py +++ /dev/null @@ -1,127 +0,0 @@ -import time -from os import cpu_count -from typing import Any, Callable, List, Optional - -from loguru import logger -from pathos.multiprocessing import ProcessingPool as Pool - - -from typing import Tuple - - -def execute_parallel_optimized( - callables_with_args: List[ - Tuple[Callable[..., Any], Tuple[Any, ...]] - ], - max_workers: Optional[int] = None, - chunk_size: Optional[int] = None, - retries: int = 3, - **kwargs, -) -> List[Any]: - """ - Executes a list of callables in parallel, leveraging all available CPU cores. - - This function is optimized for high performance and reliability. - - Args: - callables_with_args (List[Tuple[Callable[..., Any], Tuple[Any, ...]]]): - A list of tuples, where each tuple contains a callable and a tuple of its arguments. - max_workers (Optional[int]): The maximum number of workers to use. Defaults to the number of available cores. - chunk_size (Optional[int]): The size of chunks to split the tasks into for balanced execution. Defaults to automatic chunking. - retries (int): Number of retries for a failed task. Default is 3. - - Returns: - List[Any]: A list of results from each callable. The order corresponds to the order of the input list. - - Raises: - Exception: Any exception raised by the callable will be logged and re-raised after retries are exhausted. - """ - max_workers = cpu_count() if max_workers is None else max_workers - results = [] - logger.info( - f"Starting optimized parallel execution of {len(callables_with_args)} tasks." - ) - - pool = Pool( - nodes=max_workers, **kwargs - ) # Initialize the pool once - - def _execute_with_retry(callable_, args, retries): - attempt = 0 - while attempt < retries: - try: - result = callable_(*args) - logger.info( - f"Task {callable_} with args {args} completed successfully." - ) - return result - except Exception as e: - attempt += 1 - logger.warning( - f"Task {callable_} with args {args} failed on attempt {attempt}: {e}" - ) - time.sleep(1) # Small delay before retrying - if attempt >= retries: - logger.error( - f"Task {callable_} with args {args} failed after {retries} retries." - ) - raise - - try: - if chunk_size is None: - chunk_size = ( - len(callables_with_args) - // (max_workers or pool.ncpus) - or 1 - ) - - # Use chunking and mapping for efficient execution - results = pool.map( - lambda item: _execute_with_retry( - item[0], item[1], retries - ), - callables_with_args, - chunksize=chunk_size, - ) - - except Exception as e: - logger.critical( - f"Parallel execution failed due to an error: {e}" - ) - raise - - logger.info( - f"Optimized parallel execution completed. {len(results)} tasks executed." - ) - pool.close() # Ensure pool is properly closed - pool.join() - - -# return results - - -# def add(a, b): -# return a + b - - -# def multiply(a, b): -# return a * b - - -# def power(a, b): -# return a**b - - -# # if __name__ == "__main__": -# # # List of callables with their respective arguments -# # callables_with_args = [ -# # (add, (2, 3)), -# # (multiply, (5, 4)), -# # (power, (2, 10)), -# # ] - -# # # Execute the callables in parallel -# # results = execute_parallel_optimized(callables_with_args) - -# # # Print the results -# # print("Results:", results) diff --git a/swarms/utils/successful_run.py b/swarms/utils/successful_run.py deleted file mode 100644 index 672145c4..00000000 --- a/swarms/utils/successful_run.py +++ /dev/null @@ -1,75 +0,0 @@ -from loguru import logger -import sys -import platform -import os -import datetime - -# Configuring loguru to log to both the console and a file -logger.remove() # Remove default logger configuration -logger.add( - sys.stderr, - level="INFO", - format="{time} - {level} - {message}", -) - -logger.add( - "info.log", level="INFO", format="{time} - {level} - {message}" -) - - -def log_success_message() -> None: - """ - Logs a success message with instructions for sharing agents on the Swarms Agent Explorer and joining the community for assistance. - - Returns: - None - - Raises: - None - """ - # Gather extensive context information - context_info = { - "timestamp": datetime.datetime.now().isoformat(), - "python_version": platform.python_version(), - "platform": platform.platform(), - "machine": platform.machine(), - "processor": platform.processor(), - "user": os.getenv("USER") or os.getenv("USERNAME"), - "current_working_directory": os.getcwd(), - } - - success_message = ( - f"\n" - f"#########################################\n" - f"# #\n" - f"# SUCCESSFUL RUN DETECTED! #\n" - f"# #\n" - f"#########################################\n" - f"\n" - f"Your task completed successfully!\n" - f"\n" - f"Context Information:\n" - f"-----------------------------------------\n" - f"Timestamp: {context_info['timestamp']}\n" - f"Python Version: {context_info['python_version']}\n" - f"Platform: {context_info['platform']}\n" - f"Machine: {context_info['machine']}\n" - f"Processor: {context_info['processor']}\n" - f"User: {context_info['user']}\n" - f"Current Working Directory: {context_info['current_working_directory']}\n" - f"-----------------------------------------\n" - f"\n" - f"Share your agents on the Swarms Agent Explorer with friends:\n" - f"https://swarms.world/platform/explorer\n" - f"\n" - f"Join the Swarms community if you want assistance or help debugging:\n" - f"https://discord.gg/uzu63HQx\n" - f"\n" - f"#########################################\n" - ) - - logger.info(success_message) - - -# Example usage: -# log_success_message() diff --git a/swarms/utils/swarm_output_handling 2.py b/swarms/utils/swarm_output_handling 2.py new file mode 100644 index 00000000..d7549100 --- /dev/null +++ b/swarms/utils/swarm_output_handling 2.py @@ -0,0 +1,34 @@ +from typing import Union, Dict, List +from swarms.artifacts.main_artifact import Artifact + + +def handle_artifact_outputs( + file_path: str, + data: Union[str, Dict, List], + output_type: str = "txt", + folder_path: str = "./artifacts", +) -> str: + """ + Handle different types of data and create files in various formats. + + Args: + file_path: Path where the file should be saved + data: Input data that can be string, dict or list + output_type: Type of output file (txt, md, pdf, csv, json) + folder_path: Folder to save artifacts + + Returns: + str: Path to the created file + """ + # Create artifact with appropriate file type + artifact = Artifact( + folder_path=folder_path, + file_path=file_path, + file_type=output_type, + contents=data, + edit_count=0, + ) + + # Save the file + # artifact.save() + artifact.save_as(output_format=output_type) diff --git a/swarms/utils/swarm_reliability_checks 2.py b/swarms/utils/swarm_reliability_checks 2.py new file mode 100644 index 00000000..46145859 --- /dev/null +++ b/swarms/utils/swarm_reliability_checks 2.py @@ -0,0 +1,78 @@ +from loguru import logger +from typing import List, Union, Callable, Optional +from swarms.structs.agent import Agent + + +def reliability_check( + agents: List[Union[Agent, Callable]], + max_loops: int, + name: Optional[str] = None, + description: Optional[str] = None, + flow: Optional[str] = None, +) -> None: + """ + Performs reliability checks on swarm configuration parameters. + + Args: + agents: List of Agent objects or callables that will be executed + max_loops: Maximum number of execution loops + name: Name identifier for the swarm + description: Description of the swarm's purpose + + Raises: + ValueError: If any parameters fail validation checks + TypeError: If parameters are of incorrect type + """ + logger.info("Initializing swarm reliability checks") + + # Type checking + if not isinstance(agents, list): + raise TypeError("agents parameter must be a list") + + if not isinstance(max_loops, int): + raise TypeError("max_loops must be an integer") + + # Validate agents + if not agents: + raise ValueError("Agents list cannot be empty") + + for i, agent in enumerate(agents): + if not isinstance(agent, (Agent, Callable)): + raise TypeError( + f"Agent at index {i} must be an Agent instance or Callable" + ) + + # Validate max_loops + if max_loops <= 0: + raise ValueError("max_loops must be greater than 0") + + if max_loops > 1000: + logger.warning( + "Large max_loops value detected. This may impact performance." + ) + + # Validate name + if name is None: + raise ValueError("name parameter is required") + if not isinstance(name, str): + raise TypeError("name must be a string") + if len(name.strip()) == 0: + raise ValueError("name cannot be empty or just whitespace") + + # Validate description + if description is None: + raise ValueError("description parameter is required") + if not isinstance(description, str): + raise TypeError("description must be a string") + if len(description.strip()) == 0: + raise ValueError( + "description cannot be empty or just whitespace" + ) + + # Validate flow + if flow is None: + raise ValueError("flow parameter is required") + if not isinstance(flow, str): + raise TypeError("flow must be a string") + + logger.info("All reliability checks passed successfully") diff --git a/swarms/utils/wrapper_clusterop 2.py b/swarms/utils/wrapper_clusterop 2.py new file mode 100644 index 00000000..3ee8d3e4 --- /dev/null +++ b/swarms/utils/wrapper_clusterop 2.py @@ -0,0 +1,77 @@ +import os +from typing import Any + +from clusterops import ( + execute_on_gpu, + execute_on_multiple_gpus, + execute_with_cpu_cores, + list_available_gpus, +) +from loguru import logger + + +def exec_callable_with_clusterops( + device: str = "cpu", + device_id: int = 0, + all_cores: bool = True, + all_gpus: bool = False, + func: callable = None, + *args, + **kwargs, +) -> Any: + """ + Executes a given function on a specified device, either CPU or GPU. + + This method attempts to execute a given function on a specified device, either CPU or GPU. It logs the device selection and the number of cores or GPU ID used. If the device is set to CPU, it can use all available cores or a specific core specified by `device_id`. If the device is set to GPU, it uses the GPU specified by `device_id`. + + Args: + device (str, optional): The device to use for execution. Defaults to "cpu". + device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0. + all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True. + all_gpus (bool, optional): If True, uses all available GPUs. Defaults to False. + func (callable): The function to execute. + *args: Additional positional arguments to be passed to the execution method. + **kwargs: Additional keyword arguments to be passed to the execution method. + + Returns: + Any: The result of the execution. + + Raises: + ValueError: If an invalid device is specified. + Exception: If any other error occurs during execution. + """ + try: + logger.info(f"Attempting to run on device: {device}") + if device == "cpu": + logger.info("Device set to CPU") + if all_cores is True: + count = os.cpu_count() + logger.info(f"Using all available CPU cores: {count}") + else: + count = device_id + logger.info(f"Using specific CPU core: {count}") + + return execute_with_cpu_cores( + count, func, *args, **kwargs + ) + + # If device gpu + elif device == "gpu": + logger.info("Device set to GPU") + return execute_on_gpu(device_id, func, *args, **kwargs) + elif device == "gpu" and all_gpus is True: + logger.info("Device set to GPU and running all gpus") + gpus = [int(gpu) for gpu in list_available_gpus()] + return execute_on_multiple_gpus( + gpus, func, *args, **kwargs + ) + else: + raise ValueError( + f"Invalid device specified: {device}. Supported devices are 'cpu' and 'gpu'." + ) + except ValueError as e: + logger.error(f"Invalid device specified: {e}") + raise e + except Exception as e: + logger.error(f"An error occurred during execution: {e}") + raise e diff --git a/tests/structs/test_message_pool.py b/tests/structs/test_message_pool.py deleted file mode 100644 index cd0607cf..00000000 --- a/tests/structs/test_message_pool.py +++ /dev/null @@ -1,117 +0,0 @@ -from swarm_models import OpenAIChat -from swarms.structs.agent import Agent -from swarms.structs.message_pool import MessagePool - - -def test_message_pool_initialization(): - agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") - agent2 = Agent(llm=OpenAIChat(), agent_name="agent1") - moderator = Agent(llm=OpenAIChat(), agent_name="agent1") - agents = [agent1, agent2] - message_pool = MessagePool( - agents=agents, moderator=moderator, turns=5 - ) - - assert message_pool.agent == agents - assert message_pool.moderator == moderator - assert message_pool.turns == 5 - assert message_pool.messages == [] - - -def test_message_pool_add(): - agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") - message_pool = MessagePool( - agents=[agent1], moderator=agent1, turns=5 - ) - message_pool.add(agent=agent1, content="Hello, world!", turn=1) - - assert message_pool.messages == [ - { - "agent": agent1, - "content": "Hello, world!", - "turn": 1, - "visible_to": "all", - "logged": True, - } - ] - - -def test_message_pool_reset(): - agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") - message_pool = MessagePool( - agents=[agent1], moderator=agent1, turns=5 - ) - message_pool.add(agent=agent1, content="Hello, world!", turn=1) - message_pool.reset() - - assert message_pool.messages == [] - - -def test_message_pool_last_turn(): - agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") - message_pool = MessagePool( - agents=[agent1], moderator=agent1, turns=5 - ) - message_pool.add(agent=agent1, content="Hello, world!", turn=1) - - assert message_pool.last_turn() == 1 - - -def test_message_pool_last_message(): - agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") - message_pool = MessagePool( - agents=[agent1], moderator=agent1, turns=5 - ) - message_pool.add(agent=agent1, content="Hello, world!", turn=1) - - assert message_pool.last_message == { - "agent": agent1, - "content": "Hello, world!", - "turn": 1, - "visible_to": "all", - "logged": True, - } - - -def test_message_pool_get_all_messages(): - agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") - message_pool = MessagePool( - agents=[agent1], moderator=agent1, turns=5 - ) - message_pool.add(agent=agent1, content="Hello, world!", turn=1) - - assert message_pool.get_all_messages() == [ - { - "agent": agent1, - "content": "Hello, world!", - "turn": 1, - "visible_to": "all", - "logged": True, - } - ] - - -def test_message_pool_get_visible_messages(): - agent1 = Agent(llm=OpenAIChat(), agent_name="agent1") - agent2 = Agent(agent_name="agent2") - message_pool = MessagePool( - agents=[agent1, agent2], moderator=agent1, turns=5 - ) - message_pool.add( - agent=agent1, - content="Hello, agent2!", - turn=1, - visible_to=[agent2.agent_name], - ) - - assert message_pool.get_visible_messages( - agent=agent2, turn=2 - ) == [ - { - "agent": agent1, - "content": "Hello, agent2!", - "turn": 1, - "visible_to": [agent2.agent_name], - "logged": True, - } - ] From a1ff1e1392ee52beb51863f221ff71748046a06f Mon Sep 17 00:00:00 2001 From: Kye Gomez <98760976+kyegomez@users.noreply.github.com> Date: Sat, 16 Nov 2024 11:42:42 -0800 Subject: [PATCH 05/19] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 96be5b67..dd9ce311 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ Swarms is an enterprise grade and production ready multi-agent collaboration fra - Set an `.env` Variable with your desired workspace dir: `WORKSPACE_DIR="agent_workspace"` or do it in your terminal with `export WORKSPACE_DIR="agent_workspace"` - Finally, `swarms onboarding` to get you started. -## Onboarding +## Guides and Walkthroughs Refer to our documentation for production grade implementation details. From 2b4be2bef7f0b9c5483454e4066ce5ff613c1b13 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 16 Nov 2024 11:43:48 -0800 Subject: [PATCH 06/19] [CLEAN UP] --- agent_showcase_example 2.py | 68 ----- concurrent_mix 2.py | 99 ------- rearrange_test 2.py | 119 -------- sequential_worflow_test 2.py | 117 -------- swarms/structs/agents_available 2.py | 93 ------- swarms/structs/auto_swarm_builder 2.py | 299 --------------------- swarms/utils/swarm_output_handling 2.py | 34 --- swarms/utils/swarm_reliability_checks 2.py | 78 ------ swarms/utils/wrapper_clusterop 2.py | 77 ------ 9 files changed, 984 deletions(-) delete mode 100644 agent_showcase_example 2.py delete mode 100644 concurrent_mix 2.py delete mode 100644 rearrange_test 2.py delete mode 100644 sequential_worflow_test 2.py delete mode 100644 swarms/structs/agents_available 2.py delete mode 100644 swarms/structs/auto_swarm_builder 2.py delete mode 100644 swarms/utils/swarm_output_handling 2.py delete mode 100644 swarms/utils/swarm_reliability_checks 2.py delete mode 100644 swarms/utils/wrapper_clusterop 2.py diff --git a/agent_showcase_example 2.py b/agent_showcase_example 2.py deleted file mode 100644 index b78abf81..00000000 --- a/agent_showcase_example 2.py +++ /dev/null @@ -1,68 +0,0 @@ -import os - -from swarms import Agent - -from swarm_models import OpenAIChat -from swarms.structs.agents_available import showcase_available_agents - -# Get the OpenAI API key from the environment variable -api_key = os.getenv("OPENAI_API_KEY") - -# Create an instance of the OpenAIChat class -model = OpenAIChat( - api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 -) - -# Initialize the Claims Director agent -director_agent = Agent( - agent_name="ClaimsDirector", - agent_description="Oversees and coordinates the medical insurance claims processing workflow", - system_prompt="""You are the Claims Director responsible for managing the medical insurance claims process. - Assign and prioritize tasks between claims processors and auditors. Ensure claims are handled efficiently - and accurately while maintaining compliance with insurance policies and regulations.""", - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="director_agent.json", -) - -# Initialize Claims Processor agent -processor_agent = Agent( - agent_name="ClaimsProcessor", - agent_description="Reviews and processes medical insurance claims, verifying coverage and eligibility", - system_prompt="""Review medical insurance claims for completeness and accuracy. Verify patient eligibility, - coverage details, and process claims according to policy guidelines. Flag any claims requiring special review.""", - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="processor_agent.json", -) - -# Initialize Claims Auditor agent -auditor_agent = Agent( - agent_name="ClaimsAuditor", - agent_description="Audits processed claims for accuracy and compliance with policies and regulations", - system_prompt="""Audit processed insurance claims for accuracy and compliance. Review claim decisions, - identify potential fraud or errors, and ensure all processing follows established guidelines and regulations.""", - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="auditor_agent.json", -) - -# Create a list of agents -agents = [director_agent, processor_agent, auditor_agent] - -print(showcase_available_agents(agents=agents)) diff --git a/concurrent_mix 2.py b/concurrent_mix 2.py deleted file mode 100644 index 5ac80ede..00000000 --- a/concurrent_mix 2.py +++ /dev/null @@ -1,99 +0,0 @@ -import os - -from swarm_models import OpenAIChat - -from swarms import Agent, run_agents_with_tasks_concurrently - -# Fetch the OpenAI API key from the environment variable -api_key = os.getenv("OPENAI_API_KEY") - -# Create an instance of the OpenAIChat class -model = OpenAIChat( - openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 -) - -# Initialize agents for different roles -delaware_ccorp_agent = Agent( - agent_name="Delaware-CCorp-Hiring-Agent", - system_prompt=""" - Create a comprehensive hiring description for a Delaware C Corporation, - including all relevant laws and regulations, such as the Delaware General - Corporation Law (DGCL) and the Delaware Corporate Law. Ensure the description - covers the requirements for hiring employees, contractors, and officers, - including the necessary paperwork, tax obligations, and benefits. Also, - outline the procedures for compliance with Delaware's employment laws, - including anti-discrimination laws, workers' compensation, and unemployment - insurance. Provide guidance on how to navigate the complexities of Delaware's - corporate law and ensure that all hiring practices are in compliance with - state and federal regulations. - """, - llm=model, - max_loops=1, - autosave=False, - dashboard=False, - verbose=True, - output_type="str", - artifacts_on=True, - artifacts_output_path="delaware_ccorp_hiring_description.md", - artifacts_file_extension=".md", -) - -indian_foreign_agent = Agent( - agent_name="Indian-Foreign-Hiring-Agent", - system_prompt=""" - Create a comprehensive hiring description for an Indian or foreign country, - including all relevant laws and regulations, such as the Indian Contract Act, - the Indian Labour Laws, and the Foreign Exchange Management Act (FEMA). - Ensure the description covers the requirements for hiring employees, - contractors, and officers, including the necessary paperwork, tax obligations, - and benefits. Also, outline the procedures for compliance with Indian and - foreign employment laws, including anti-discrimination laws, workers' - compensation, and unemployment insurance. Provide guidance on how to navigate - the complexities of Indian and foreign corporate law and ensure that all hiring - practices are in compliance with state and federal regulations. Consider the - implications of hiring foreign nationals and the requirements for obtaining - necessary visas and work permits. - """, - llm=model, - max_loops=1, - autosave=False, - dashboard=False, - verbose=True, - output_type="str", - artifacts_on=True, - artifacts_output_path="indian_foreign_hiring_description.md", - artifacts_file_extension=".md", -) - -# List of agents and corresponding tasks -agents = [delaware_ccorp_agent, indian_foreign_agent] -tasks = [ - """ - Create a comprehensive hiring description for an Agent Engineer, including - required skills and responsibilities. Ensure the description covers the - necessary technical expertise, such as proficiency in AI/ML frameworks, - programming languages, and data structures. Outline the key responsibilities, - including designing and developing AI agents, integrating with existing systems, - and ensuring scalability and performance. - """, - """ - Generate a detailed job description for a Prompt Engineer, including - required skills and responsibilities. Ensure the description covers the - necessary technical expertise, such as proficiency in natural language processing, - machine learning, and software development. Outline the key responsibilities, - including designing and optimizing prompts for AI systems, ensuring prompt - quality and consistency, and collaborating with cross-functional teams. - """, -] - -# Run agents with tasks concurrently -results = run_agents_with_tasks_concurrently( - agents, - tasks, - all_cores=True, - device="cpu", -) - -# Print the results -for result in results: - print(result) diff --git a/rearrange_test 2.py b/rearrange_test 2.py deleted file mode 100644 index ddfd7670..00000000 --- a/rearrange_test 2.py +++ /dev/null @@ -1,119 +0,0 @@ -import os - -from swarms import Agent, AgentRearrange - -from swarm_models import OpenAIChat - -# Get the OpenAI API key from the environment variable -api_key = os.getenv("OPENAI_API_KEY") - -# Create an instance of the OpenAIChat class -model = OpenAIChat( - api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 -) - - -# Initialize the boss agent (Director) -boss_agent = Agent( - agent_name="BossAgent", - system_prompt=""" - You are the BossAgent responsible for managing and overseeing a swarm of agents analyzing company expenses. - Your job is to dynamically assign tasks, prioritize their execution, and ensure that all agents collaborate efficiently. - After receiving a report on the company's expenses, you will break down the work into smaller tasks, - assigning specific tasks to each agent, such as detecting recurring high costs, categorizing expenditures, - and identifying unnecessary transactions. Ensure the results are communicated back in a structured way - so the finance team can take actionable steps to cut off unproductive spending. You also monitor and - dynamically adapt the swarm to optimize their performance. Finally, you summarize their findings - into a coherent report. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="boss_agent.json", -) - -# Initialize worker 1: Expense Analyzer -worker1 = Agent( - agent_name="ExpenseAnalyzer", - system_prompt=""" - Your task is to carefully analyze the company's expense data provided to you. - You will focus on identifying high-cost recurring transactions, categorizing expenditures - (e.g., marketing, operations, utilities, etc.), and flagging areas where there seems to be excessive spending. - You will provide a detailed breakdown of each category, along with specific recommendations for cost-cutting. - Pay close attention to monthly recurring subscriptions, office supplies, and non-essential expenditures. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="worker1.json", -) - -# Initialize worker 2: Summary Generator -worker2 = Agent( - agent_name="SummaryGenerator", - system_prompt=""" - After receiving the detailed breakdown from the ExpenseAnalyzer, - your task is to create a concise summary of the findings. You will focus on the most actionable insights, - such as highlighting the specific transactions that can be immediately cut off and summarizing the areas - where the company is overspending. Your summary will be used by the BossAgent to generate the final report. - Be clear and to the point, emphasizing the urgency of cutting unnecessary expenses. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="worker2.json", -) - -# Swarm-Level Prompt (Collaboration Prompt) -swarm_prompt = """ - As a swarm, your collective goal is to analyze the company's expenses and identify transactions that should be cut off. - You will work collaboratively to break down the entire process of expense analysis into manageable steps. - The BossAgent will direct the flow and assign tasks dynamically to the agents. The ExpenseAnalyzer will first - focus on breaking down the expense report, identifying high-cost recurring transactions, categorizing them, - and providing recommendations for potential cost reduction. After the analysis, the SummaryGenerator will then - consolidate all the findings into an actionable summary that the finance team can use to immediately cut off unnecessary expenses. - Together, your collaboration is essential to streamlining and improving the company’s financial health. -""" - -# Create a list of agents -agents = [boss_agent, worker1, worker2] - -# Define the flow pattern for the swarm -flow = "BossAgent -> ExpenseAnalyzer -> SummaryGenerator" - -# Using AgentRearrange class to manage the swarm -agent_system = AgentRearrange( - agents=agents, - flow=flow, - return_json=False, - output_type="final", - max_loops=1, - docs=["SECURITY.md"], -) - -# Input task for the swarm -task = f""" - - {swarm_prompt} - - The company has been facing a rising number of unnecessary expenses, and the finance team needs a detailed - analysis of recent transactions to identify which expenses can be cut off to improve profitability. - Analyze the provided transaction data and create a detailed report on cost-cutting opportunities, - focusing on recurring transactions and non-essential expenditures. -""" - -# Run the swarm system with the task -output = agent_system.run(task) -print(output) diff --git a/sequential_worflow_test 2.py b/sequential_worflow_test 2.py deleted file mode 100644 index 654154c6..00000000 --- a/sequential_worflow_test 2.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -from dotenv import load_dotenv -from swarms import Agent, SequentialWorkflow -from swarm_models import OpenAIChat - -load_dotenv() - -# Get the OpenAI API key from the environment variable -api_key = os.getenv("GROQ_API_KEY") - -# Model -model = OpenAIChat( - openai_api_base="https://api.groq.com/openai/v1", - openai_api_key=api_key, - model_name="llama-3.1-70b-versatile", - temperature=0.1, -) - - -# Initialize specialized agents -data_extractor_agent = Agent( - agent_name="Data-Extractor", - system_prompt=None, - llm=model, - max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="data_extractor_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", -) - -summarizer_agent = Agent( - agent_name="Document-Summarizer", - system_prompt=None, - llm=model, - max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="summarizer_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", -) - -financial_analyst_agent = Agent( - agent_name="Financial-Analyst", - system_prompt=None, - llm=model, - max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="financial_analyst_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", -) - -market_analyst_agent = Agent( - agent_name="Market-Analyst", - system_prompt=None, - llm=model, - max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="market_analyst_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", -) - -operational_analyst_agent = Agent( - agent_name="Operational-Analyst", - system_prompt=None, - llm=model, - max_loops=1, - autosave=True, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="operational_analyst_agent.json", - user_name="pe_firm", - retry_attempts=1, - context_length=200000, - output_type="string", -) - -# Initialize the SwarmRouter -router = SequentialWorkflow( - name="pe-document-analysis-swarm", - description="Analyze documents for private equity due diligence and investment decision-making", - max_loops=1, - agents=[ - data_extractor_agent, - summarizer_agent, - financial_analyst_agent, - market_analyst_agent, - operational_analyst_agent, - ], - output_type="all", -) - -# Example usage -if __name__ == "__main__": - # Run a comprehensive private equity document analysis task - result = router.run( - "Where is the best place to find template term sheets for series A startups. Provide links and references" - ) - print(result) diff --git a/swarms/structs/agents_available 2.py b/swarms/structs/agents_available 2.py deleted file mode 100644 index 0ed63c5a..00000000 --- a/swarms/structs/agents_available 2.py +++ /dev/null @@ -1,93 +0,0 @@ -from typing import List, Any -from loguru import logger -from swarms.structs.agent import Agent - - -def get_agent_name(agent: Any) -> str: - """Helper function to safely get agent name - - Args: - agent (Any): The agent object to get name from - - Returns: - str: The agent's name if found, 'Unknown' otherwise - """ - if isinstance(agent, Agent) and hasattr(agent, "agent_name"): - return agent.agent_name - return "Unknown" - - -def get_agent_description(agent: Any) -> str: - """Helper function to get agent description or system prompt preview - - Args: - agent (Any): The agent object - - Returns: - str: Description or first 100 chars of system prompt - """ - if not isinstance(agent, Agent): - return "N/A" - - if hasattr(agent, "description") and agent.description: - return agent.description - - if hasattr(agent, "system_prompt") and agent.system_prompt: - return f"{agent.system_prompt[:150]}..." - - return "N/A" - - -def showcase_available_agents( - name: str = None, - description: str = None, - agents: List[Agent] = [], - update_agents_on: bool = False, -) -> str: - """ - Generate a formatted string showcasing all available agents and their descriptions. - - Args: - agents (List[Agent]): List of Agent objects to showcase. - update_agents_on (bool, optional): If True, updates each agent's system prompt with - the showcase information. Defaults to False. - - Returns: - str: Formatted string containing agent information, including names, descriptions - and IDs for all available agents. - """ - logger.info(f"Showcasing {len(agents)} available agents") - - formatted_agents = [] - header = f"\n####### Agents available in the swarm: {name} ############\n" - header += f"{description}\n" - row_format = "{:<5} | {:<20} | {:<50}" - header_row = row_format.format("ID", "Agent Name", "Description") - separator = "-" * 80 - - formatted_agents.append(header) - formatted_agents.append(separator) - formatted_agents.append(header_row) - formatted_agents.append(separator) - - for idx, agent in enumerate(agents): - if not isinstance(agent, Agent): - logger.warning( - f"Skipping non-Agent object: {type(agent)}" - ) - continue - - agent_name = get_agent_name(agent) - description = ( - get_agent_description(agent)[:100] + "..." - if len(get_agent_description(agent)) > 100 - else get_agent_description(agent) - ) - - formatted_agents.append( - row_format.format(idx + 1, agent_name, description) - ) - - showcase = "\n".join(formatted_agents) - - return showcase diff --git a/swarms/structs/auto_swarm_builder 2.py b/swarms/structs/auto_swarm_builder 2.py deleted file mode 100644 index 177cfdc4..00000000 --- a/swarms/structs/auto_swarm_builder 2.py +++ /dev/null @@ -1,299 +0,0 @@ -from loguru import logger - -import os -from typing import List - -from pydantic import BaseModel, Field -from swarm_models import OpenAIFunctionCaller, OpenAIChat - -from swarms.structs.agent import Agent -from swarms.structs.swarm_router import SwarmRouter - - -class AgentConfig(BaseModel): - """Configuration for an individual agent in a swarm""" - - name: str = Field( - description="The name of the agent", example="Research-Agent" - ) - description: str = Field( - description="A description of the agent's purpose and capabilities", - example="Agent responsible for researching and gathering information", - ) - system_prompt: str = Field( - description="The system prompt that defines the agent's behavior", - example="You are a research agent. Your role is to gather and analyze information...", - ) - max_loops: int = Field( - description="Maximum number of reasoning loops the agent can perform", - example=3, - ) - - -class SwarmConfig(BaseModel): - """Configuration for a swarm of cooperative agents""" - - name: str = Field( - description="The name of the swarm", - example="Research-Writing-Swarm", - ) - description: str = Field( - description="The description of the swarm's purpose and capabilities", - example="A swarm of agents that work together to research topics and write articles", - ) - agents: List[AgentConfig] = Field( - description="The list of agents that make up the swarm", - example=[ - AgentConfig( - name="Research-Agent", - description="Gathers information", - system_prompt="You are a research agent...", - max_loops=2, - ), - AgentConfig( - name="Writing-Agent", - description="Writes content", - system_prompt="You are a writing agent...", - max_loops=1, - ), - ], - ) - max_loops: int = Field( - description="The maximum number of loops to run the swarm", - example=1, - ) - - -# Get the OpenAI API key from the environment variable -api_key = os.getenv("OPENAI_API_KEY") - -# Create an instance of the OpenAIChat class -model = OpenAIChat( - openai_api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 -) - - -BOSS_SYSTEM_PROMPT = """ -Manage a swarm of worker agents to efficiently serve the user by deciding whether to create new agents or delegate tasks. Ensure operations are efficient and effective. - -### Instructions: - -1. **Task Assignment**: - - Analyze available worker agents when a task is presented. - - Delegate tasks to existing agents with clear, direct, and actionable instructions if an appropriate agent is available. - - If no suitable agent exists, create a new agent with a fitting system prompt to handle the task. - -2. **Agent Creation**: - - Name agents according to the task they are intended to perform (e.g., "Twitter Marketing Agent"). - - Provide each new agent with a concise and clear system prompt that includes its role, objectives, and any tools it can utilize. - -3. **Efficiency**: - - Minimize redundancy and maximize task completion speed. - - Avoid unnecessary agent creation if an existing agent can fulfill the task. - -4. **Communication**: - - Be explicit in task delegation instructions to avoid ambiguity and ensure effective task execution. - - Require agents to report back on task completion or encountered issues. - -5. **Reasoning and Decisions**: - - Offer brief reasoning when selecting or creating agents to maintain transparency. - - Avoid using an agent if unnecessary, with a clear explanation if no agents are suitable for a task. - -# Output Format - -Present your plan in clear, bullet-point format or short concise paragraphs, outlining task assignment, agent creation, efficiency strategies, and communication protocols. - -# Notes - -- Preserve transparency by always providing reasoning for task-agent assignments and creation. -- Ensure instructions to agents are unambiguous to minimize error. - -""" - - -class AutoSwarmBuilder: - """A class that automatically builds and manages swarms of AI agents. - - This class handles the creation, coordination and execution of multiple AI agents working - together as a swarm to accomplish complex tasks. It uses a boss agent to delegate work - and create new specialized agents as needed. - - Args: - name (str): The name of the swarm - description (str): A description of the swarm's purpose - verbose (bool, optional): Whether to output detailed logs. Defaults to True. - max_loops (int, optional): Maximum number of execution loops. Defaults to 1. - """ - - def __init__( - self, - name: str = None, - description: str = None, - verbose: bool = True, - max_loops: int = 1, - ): - self.name = name - self.description = description - self.verbose = verbose - self.max_loops = max_loops - self.agents_pool = [] - logger.info( - f"Initialized AutoSwarmBuilder: {name} {description}" - ) - - # @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10)) - def run(self, task: str, image_url: str = None, *args, **kwargs): - """Run the swarm on a given task. - - Args: - task (str): The task to be accomplished - image_url (str, optional): URL of an image input if needed. Defaults to None. - *args: Variable length argument list - **kwargs: Arbitrary keyword arguments - - Returns: - The output from the swarm's execution - """ - logger.info(f"Running swarm on task: {task}") - agents = self._create_agents(task, image_url, *args, **kwargs) - logger.info(f"Agents created {len(agents)}") - logger.info("Routing task through swarm") - output = self.swarm_router(agents, task, image_url) - logger.info(f"Swarm execution complete with output: {output}") - return output - - # @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10)) - def _create_agents(self, task: str, *args, **kwargs): - """Create the necessary agents for a task. - - Args: - task (str): The task to create agents for - *args: Variable length argument list - **kwargs: Arbitrary keyword arguments - - Returns: - list: List of created agents - """ - logger.info("Creating agents for task") - model = OpenAIFunctionCaller( - system_prompt=BOSS_SYSTEM_PROMPT, - api_key=os.getenv("OPENAI_API_KEY"), - temperature=0.1, - base_model=SwarmConfig, - ) - - agents_dictionary = model.run(task) - logger.info(f"Agents dictionary: {agents_dictionary}") - - # Convert dictionary to SwarmConfig if needed - if isinstance(agents_dictionary, dict): - agents_dictionary = SwarmConfig(**agents_dictionary) - - # Set swarm config - self.name = agents_dictionary.name - self.description = agents_dictionary.description - self.max_loops = getattr( - agents_dictionary, "max_loops", 1 - ) # Default to 1 if not set - - logger.info( - f"Swarm config: {self.name}, {self.description}, {self.max_loops}" - ) - - # Create agents from config - agents = [] - for agent_config in agents_dictionary.agents: - # Convert dict to AgentConfig if needed - if isinstance(agent_config, dict): - agent_config = AgentConfig(**agent_config) - - agent = self.build_agent( - agent_name=agent_config.name, - agent_description=agent_config.description, - agent_system_prompt=agent_config.system_prompt, - max_loops=agent_config.max_loops, - ) - agents.append(agent) - - return agents - - def build_agent( - self, - agent_name: str, - agent_description: str, - agent_system_prompt: str, - max_loops: int = 1, - ): - """Build a single agent with the given specifications. - - Args: - agent_name (str): Name of the agent - agent_description (str): Description of the agent's purpose - agent_system_prompt (str): The system prompt for the agent - - Returns: - Agent: The constructed agent instance - """ - logger.info(f"Building agent: {agent_name}") - agent = Agent( - agent_name=agent_name, - description=agent_description, - system_prompt=agent_system_prompt, - llm=model, - max_loops=max_loops, - autosave=True, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path=f"{agent_name}.json", - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - return_step_meta=False, - output_type="str", # "json", "dict", "csv" OR "string" soon "yaml" and - streaming_on=False, - auto_generate_prompt=True, - ) - - return agent - - # @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=4, max=10)) - def swarm_router( - self, - agents: List[Agent], - task: str, - image_url: str = None, - *args, - **kwargs, - ): - """Route tasks between agents in the swarm. - - Args: - agents (List[Agent]): List of available agents - task (str): The task to route - image_url (str, optional): URL of an image input if needed. Defaults to None. - *args: Variable length argument list - **kwargs: Arbitrary keyword arguments - - Returns: - The output from the routed task execution - """ - logger.info("Routing task through swarm") - swarm_router_instance = SwarmRouter( - agents=agents, - swarm_type="auto", - max_loops=1, - ) - - return swarm_router_instance.run( - self.name + " " + self.description + " " + task, - ) - - -example = AutoSwarmBuilder() - -print( - example.run( - "Write multiple blog posts about the latest advancements in swarm intelligence all at once" - ) -) diff --git a/swarms/utils/swarm_output_handling 2.py b/swarms/utils/swarm_output_handling 2.py deleted file mode 100644 index d7549100..00000000 --- a/swarms/utils/swarm_output_handling 2.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import Union, Dict, List -from swarms.artifacts.main_artifact import Artifact - - -def handle_artifact_outputs( - file_path: str, - data: Union[str, Dict, List], - output_type: str = "txt", - folder_path: str = "./artifacts", -) -> str: - """ - Handle different types of data and create files in various formats. - - Args: - file_path: Path where the file should be saved - data: Input data that can be string, dict or list - output_type: Type of output file (txt, md, pdf, csv, json) - folder_path: Folder to save artifacts - - Returns: - str: Path to the created file - """ - # Create artifact with appropriate file type - artifact = Artifact( - folder_path=folder_path, - file_path=file_path, - file_type=output_type, - contents=data, - edit_count=0, - ) - - # Save the file - # artifact.save() - artifact.save_as(output_format=output_type) diff --git a/swarms/utils/swarm_reliability_checks 2.py b/swarms/utils/swarm_reliability_checks 2.py deleted file mode 100644 index 46145859..00000000 --- a/swarms/utils/swarm_reliability_checks 2.py +++ /dev/null @@ -1,78 +0,0 @@ -from loguru import logger -from typing import List, Union, Callable, Optional -from swarms.structs.agent import Agent - - -def reliability_check( - agents: List[Union[Agent, Callable]], - max_loops: int, - name: Optional[str] = None, - description: Optional[str] = None, - flow: Optional[str] = None, -) -> None: - """ - Performs reliability checks on swarm configuration parameters. - - Args: - agents: List of Agent objects or callables that will be executed - max_loops: Maximum number of execution loops - name: Name identifier for the swarm - description: Description of the swarm's purpose - - Raises: - ValueError: If any parameters fail validation checks - TypeError: If parameters are of incorrect type - """ - logger.info("Initializing swarm reliability checks") - - # Type checking - if not isinstance(agents, list): - raise TypeError("agents parameter must be a list") - - if not isinstance(max_loops, int): - raise TypeError("max_loops must be an integer") - - # Validate agents - if not agents: - raise ValueError("Agents list cannot be empty") - - for i, agent in enumerate(agents): - if not isinstance(agent, (Agent, Callable)): - raise TypeError( - f"Agent at index {i} must be an Agent instance or Callable" - ) - - # Validate max_loops - if max_loops <= 0: - raise ValueError("max_loops must be greater than 0") - - if max_loops > 1000: - logger.warning( - "Large max_loops value detected. This may impact performance." - ) - - # Validate name - if name is None: - raise ValueError("name parameter is required") - if not isinstance(name, str): - raise TypeError("name must be a string") - if len(name.strip()) == 0: - raise ValueError("name cannot be empty or just whitespace") - - # Validate description - if description is None: - raise ValueError("description parameter is required") - if not isinstance(description, str): - raise TypeError("description must be a string") - if len(description.strip()) == 0: - raise ValueError( - "description cannot be empty or just whitespace" - ) - - # Validate flow - if flow is None: - raise ValueError("flow parameter is required") - if not isinstance(flow, str): - raise TypeError("flow must be a string") - - logger.info("All reliability checks passed successfully") diff --git a/swarms/utils/wrapper_clusterop 2.py b/swarms/utils/wrapper_clusterop 2.py deleted file mode 100644 index 3ee8d3e4..00000000 --- a/swarms/utils/wrapper_clusterop 2.py +++ /dev/null @@ -1,77 +0,0 @@ -import os -from typing import Any - -from clusterops import ( - execute_on_gpu, - execute_on_multiple_gpus, - execute_with_cpu_cores, - list_available_gpus, -) -from loguru import logger - - -def exec_callable_with_clusterops( - device: str = "cpu", - device_id: int = 0, - all_cores: bool = True, - all_gpus: bool = False, - func: callable = None, - *args, - **kwargs, -) -> Any: - """ - Executes a given function on a specified device, either CPU or GPU. - - This method attempts to execute a given function on a specified device, either CPU or GPU. It logs the device selection and the number of cores or GPU ID used. If the device is set to CPU, it can use all available cores or a specific core specified by `device_id`. If the device is set to GPU, it uses the GPU specified by `device_id`. - - Args: - device (str, optional): The device to use for execution. Defaults to "cpu". - device_id (int, optional): The ID of the GPU to use if device is set to "gpu". Defaults to 0. - all_cores (bool, optional): If True, uses all available CPU cores. Defaults to True. - all_gpus (bool, optional): If True, uses all available GPUs. Defaults to False. - func (callable): The function to execute. - *args: Additional positional arguments to be passed to the execution method. - **kwargs: Additional keyword arguments to be passed to the execution method. - - Returns: - Any: The result of the execution. - - Raises: - ValueError: If an invalid device is specified. - Exception: If any other error occurs during execution. - """ - try: - logger.info(f"Attempting to run on device: {device}") - if device == "cpu": - logger.info("Device set to CPU") - if all_cores is True: - count = os.cpu_count() - logger.info(f"Using all available CPU cores: {count}") - else: - count = device_id - logger.info(f"Using specific CPU core: {count}") - - return execute_with_cpu_cores( - count, func, *args, **kwargs - ) - - # If device gpu - elif device == "gpu": - logger.info("Device set to GPU") - return execute_on_gpu(device_id, func, *args, **kwargs) - elif device == "gpu" and all_gpus is True: - logger.info("Device set to GPU and running all gpus") - gpus = [int(gpu) for gpu in list_available_gpus()] - return execute_on_multiple_gpus( - gpus, func, *args, **kwargs - ) - else: - raise ValueError( - f"Invalid device specified: {device}. Supported devices are 'cpu' and 'gpu'." - ) - except ValueError as e: - logger.error(f"Invalid device specified: {e}") - raise e - except Exception as e: - logger.error(f"An error occurred during execution: {e}") - raise e From 4a9a0ba3efc57fb0d8e3b2fdbfeb1cc95c5c6658 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 16 Nov 2024 11:46:31 -0800 Subject: [PATCH 07/19] [CLEANUP] --- swarms/utils/add_docs_to_agents 2.py | 141 --------------------------- swarms/utils/any_to_str 2.py | 102 ------------------- 2 files changed, 243 deletions(-) delete mode 100644 swarms/utils/add_docs_to_agents 2.py delete mode 100644 swarms/utils/any_to_str 2.py diff --git a/swarms/utils/add_docs_to_agents 2.py b/swarms/utils/add_docs_to_agents 2.py deleted file mode 100644 index 8dbc1df3..00000000 --- a/swarms/utils/add_docs_to_agents 2.py +++ /dev/null @@ -1,141 +0,0 @@ -from typing import Any, List, Optional, Union -from pathlib import Path -from loguru import logger -from doc_master import doc_master -from concurrent.futures import ThreadPoolExecutor, as_completed -from tenacity import retry, stop_after_attempt, wait_exponential - - -@retry( - stop=stop_after_attempt(3), - wait=wait_exponential(multiplier=1, min=4, max=10), -) -def _process_document(doc_path: Union[str, Path]) -> str: - """Safely process a single document with retries. - - Args: - doc_path: Path to the document to process - - Returns: - Processed document text - - Raises: - Exception: If document processing fails after retries - """ - try: - return doc_master( - file_path=str(doc_path), output_type="string" - ) - except Exception as e: - logger.error( - f"Error processing document {doc_path}: {str(e)}" - ) - raise - - -def handle_input_docs( - agents: Any, - docs: Optional[List[Union[str, Path]]] = None, - doc_folder: Optional[Union[str, Path]] = None, - max_workers: int = 4, - chunk_size: int = 1000000, -) -> Any: - """ - Add document content to agent prompts with improved reliability and performance. - - Args: - agents: Dictionary mapping agent names to Agent objects - docs: List of document paths - doc_folder: Path to folder containing documents - max_workers: Maximum number of parallel document processing workers - chunk_size: Maximum characters to process at once to avoid memory issues - - Raises: - ValueError: If neither docs nor doc_folder is provided - RuntimeError: If document processing fails - """ - if not agents: - logger.warning( - "No agents provided, skipping document distribution" - ) - return - - if not docs and not doc_folder: - logger.warning( - "No documents or folder provided, skipping document distribution" - ) - return - - logger.info("Starting document distribution to agents") - - try: - processed_docs = [] - - # Process individual documents in parallel - if docs: - with ThreadPoolExecutor( - max_workers=max_workers - ) as executor: - future_to_doc = { - executor.submit(_process_document, doc): doc - for doc in docs - } - - for future in as_completed(future_to_doc): - doc = future_to_doc[future] - try: - processed_docs.append(future.result()) - except Exception as e: - logger.error( - f"Failed to process document {doc}: {str(e)}" - ) - raise RuntimeError( - f"Document processing failed: {str(e)}" - ) - - # Process folder if specified - elif doc_folder: - try: - folder_content = doc_master( - folder_path=str(doc_folder), output_type="string" - ) - processed_docs.append(folder_content) - except Exception as e: - logger.error( - f"Failed to process folder {doc_folder}: {str(e)}" - ) - raise RuntimeError( - f"Folder processing failed: {str(e)}" - ) - - # Combine and chunk the processed documents - combined_data = "\n".join(processed_docs) - - # Update agent prompts in chunks to avoid memory issues - for agent in agents.values(): - try: - for i in range(0, len(combined_data), chunk_size): - chunk = combined_data[i : i + chunk_size] - if i == 0: - agent.system_prompt += ( - "\nDocuments:\n" + chunk - ) - else: - agent.system_prompt += chunk - except Exception as e: - logger.error( - f"Failed to update agent prompt: {str(e)}" - ) - raise RuntimeError( - f"Agent prompt update failed: {str(e)}" - ) - - logger.info( - f"Successfully added documents to {len(agents)} agents" - ) - - return agents - - except Exception as e: - logger.error(f"Document distribution failed: {str(e)}") - raise RuntimeError(f"Document distribution failed: {str(e)}") diff --git a/swarms/utils/any_to_str 2.py b/swarms/utils/any_to_str 2.py deleted file mode 100644 index 125e233e..00000000 --- a/swarms/utils/any_to_str 2.py +++ /dev/null @@ -1,102 +0,0 @@ -from typing import Union, Dict, List, Tuple, Any - - -def any_to_str(data: Union[str, Dict, List, Tuple, Any]) -> str: - """Convert any input data type to a nicely formatted string. - - This function handles conversion of various Python data types into a clean string representation. - It recursively processes nested data structures and handles None values gracefully. - - Args: - data: Input data of any type to convert to string. Can be: - - Dictionary - - List/Tuple - - String - - None - - Any other type that can be converted via str() - - Returns: - str: A formatted string representation of the input data. - - Dictionaries are formatted as "key: value" pairs separated by commas - - Lists/tuples are comma-separated - - None returns empty string - - Other types are converted using str() - - Examples: - >>> any_to_str({'a': 1, 'b': 2}) - 'a: 1, b: 2' - >>> any_to_str([1, 2, 3]) - '1, 2, 3' - >>> any_to_str(None) - '' - """ - try: - if isinstance(data, dict): - # Format dictionary with newlines and indentation - items = [] - for k, v in data.items(): - value = any_to_str(v) - items.append(f"{k}: {value}") - return "\n".join(items) - - elif isinstance(data, (list, tuple)): - # Format sequences with brackets and proper spacing - items = [any_to_str(x) for x in data] - if len(items) == 0: - return "[]" if isinstance(data, list) else "()" - return ( - f"[{', '.join(items)}]" - if isinstance(data, list) - else f"({', '.join(items)})" - ) - - elif data is None: - return "None" - - else: - # Handle strings and other types - if isinstance(data, str): - return f'"{data}"' - return str(data) - - except Exception as e: - return f"Error converting data: {str(e)}" - - -def main(): - # Example 1: Dictionary - print("Dictionary:") - print( - any_to_str( - { - "name": "John", - "age": 30, - "hobbies": ["reading", "hiking"], - } - ) - ) - - print("\nNested Dictionary:") - print( - any_to_str( - { - "user": { - "id": 123, - "details": {"city": "New York", "active": True}, - }, - "data": [1, 2, 3], - } - ) - ) - - print("\nList and Tuple:") - print(any_to_str([1, "text", None, (1, 2)])) - print(any_to_str((True, False, None))) - - print("\nEmpty Collections:") - print(any_to_str([])) - print(any_to_str({})) - - -if __name__ == "__main__": - main() From 54dfa692a85136e4010529c6f0754664e18d18f7 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 16 Nov 2024 12:07:17 -0800 Subject: [PATCH 08/19] [LOGGING CLEANUP] --- swarms/agents/ape_agent.py | 4 +- swarms/agents/create_agents_from_yaml.py | 5 +- swarms/cli/onboarding_process.py | 5 +- swarms/prompts/prompt.py | 4 +- swarms/structs/agent.py | 4 +- swarms/structs/agent_router.py | 4 +- swarms/structs/agents_available.py | 18 +++- swarms/structs/auto_swarm_builder.py | 3 + swarms/structs/concurrent_workflow.py | 4 +- swarms/structs/hiearchical_swarm.py | 3 +- swarms/structs/majority_voting.py | 16 +--- swarms/structs/mixture_of_agents.py | 4 +- swarms/structs/spreadsheet_swarm.py | 4 +- swarms/structs/swarm_matcher.py | 4 +- swarms/structs/swarm_router.py | 4 +- swarms/structs/swarming_architectures.py | 4 +- swarms/structs/tree_swarm.py | 6 +- swarms/telemetry/auto_upgrade_swarms.py | 15 +++- swarms/telemetry/capture_sys_data.py | 5 +- swarms/tools/tool_registry.py | 4 +- swarms/utils/__init__.py | 2 - swarms/utils/add_docs_to_agents.py | 10 ++- swarms/utils/concurrent_utils.py | 49 ---------- swarms/utils/decorators.py | 80 ----------------- swarms/utils/loguru_logger.py | 32 ++++--- swarms/utils/pandas_utils.py | 4 +- swarms/utils/report_error_loguru.py | 108 ----------------------- swarms/utils/run_on_cpu.py | 5 +- swarms/utils/swarm_output_handling.py | 34 ------- swarms/utils/swarm_reliability_checks.py | 7 +- swarms/utils/wrapper_clusterop.py | 5 +- 31 files changed, 129 insertions(+), 327 deletions(-) delete mode 100644 swarms/utils/concurrent_utils.py delete mode 100644 swarms/utils/decorators.py delete mode 100644 swarms/utils/report_error_loguru.py delete mode 100644 swarms/utils/swarm_output_handling.py diff --git a/swarms/agents/ape_agent.py b/swarms/agents/ape_agent.py index 164813cc..420b7aaa 100644 --- a/swarms/agents/ape_agent.py +++ b/swarms/agents/ape_agent.py @@ -1,6 +1,5 @@ from typing import Any -from loguru import logger from tenacity import retry, stop_after_attempt, wait_exponential from swarms.prompts.prompt_generator import ( @@ -9,6 +8,9 @@ from swarms.prompts.prompt_generator import ( from swarms.prompts.prompt_generator_optimizer import ( prompt_generator_sys_prompt, ) +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="ape_agent") @retry( diff --git a/swarms/agents/create_agents_from_yaml.py b/swarms/agents/create_agents_from_yaml.py index ef1e8f18..7e6e056b 100644 --- a/swarms/agents/create_agents_from_yaml.py +++ b/swarms/agents/create_agents_from_yaml.py @@ -2,12 +2,15 @@ import os from typing import Any, Callable, Dict, List, Tuple, Union import yaml -from loguru import logger +from swarms.utils.loguru_logger import initialize_logger from swarms.structs.agent import Agent from swarms.structs.swarm_router import SwarmRouter +logger = initialize_logger(log_folder="create_agents_from_yaml") + + def create_agents_from_yaml( model: Callable = None, yaml_file: str = "agents.yaml", diff --git a/swarms/cli/onboarding_process.py b/swarms/cli/onboarding_process.py index 99018b86..17971269 100644 --- a/swarms/cli/onboarding_process.py +++ b/swarms/cli/onboarding_process.py @@ -3,13 +3,16 @@ import os import time from typing import Dict -from loguru import logger +from swarms.utils.loguru_logger import initialize_logger + from swarms.telemetry.capture_sys_data import ( capture_system_data, log_agent_data, ) +logger = initialize_logger(log_folder="onboarding_process") + class OnboardingProcess: """ diff --git a/swarms/prompts/prompt.py b/swarms/prompts/prompt.py index b892f4f1..65f3e191 100644 --- a/swarms/prompts/prompt.py +++ b/swarms/prompts/prompt.py @@ -4,7 +4,6 @@ import time import uuid from typing import Any, Callable, List -from loguru import logger from pydantic import ( BaseModel, Field, @@ -17,6 +16,9 @@ from swarms.telemetry.capture_sys_data import ( log_agent_data, ) from swarms.tools.base_tool import BaseTool +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(file_name="prompts") class Prompt(BaseModel): diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 20f41ae6..a59e0d4e 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -26,7 +26,6 @@ from clusterops import ( execute_on_gpu, execute_with_cpu_cores, ) -from loguru import logger from pydantic import BaseModel from swarm_models.tiktoken_wrapper import TikTokenizer from termcolor import colored @@ -54,6 +53,9 @@ from swarms.utils.data_to_text import data_to_text from swarms.utils.file_processing import create_file_in_folder from swarms.utils.pdf_to_text import pdf_to_text from swarms.artifacts.main_artifact import Artifact +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="agents") # Utils diff --git a/swarms/structs/agent_router.py b/swarms/structs/agent_router.py index e92926e3..6cf3c094 100644 --- a/swarms/structs/agent_router.py +++ b/swarms/structs/agent_router.py @@ -1,10 +1,12 @@ from typing import List, Optional import chromadb -from loguru import logger from tenacity import retry, stop_after_attempt, wait_exponential from typing import Union, Callable, Any from swarms import Agent +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="agent_router") class AgentRouter: diff --git a/swarms/structs/agents_available.py b/swarms/structs/agents_available.py index 0ed63c5a..f676877d 100644 --- a/swarms/structs/agents_available.py +++ b/swarms/structs/agents_available.py @@ -1,6 +1,22 @@ +import os from typing import List, Any -from loguru import logger from swarms.structs.agent import Agent +from loguru import logger +import uuid + +WORKSPACE_DIR = os.getenv("WORKSPACE_DIR") +uuid_for_log = str(uuid.uuid4()) +logger.add( + os.path.join( + WORKSPACE_DIR, + "agents_available", + f"agents-available-{uuid_for_log}.log", + ), + level="INFO", + colorize=True, + backtrace=True, + diagnose=True, +) def get_agent_name(agent: Any) -> str: diff --git a/swarms/structs/auto_swarm_builder.py b/swarms/structs/auto_swarm_builder.py index 177cfdc4..9009bf8a 100644 --- a/swarms/structs/auto_swarm_builder.py +++ b/swarms/structs/auto_swarm_builder.py @@ -8,6 +8,9 @@ from swarm_models import OpenAIFunctionCaller, OpenAIChat from swarms.structs.agent import Agent from swarms.structs.swarm_router import SwarmRouter +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="auto_swarm_builder") class AgentConfig(BaseModel): diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py index 02102188..74945914 100644 --- a/swarms/structs/concurrent_workflow.py +++ b/swarms/structs/concurrent_workflow.py @@ -5,7 +5,6 @@ from concurrent.futures import ThreadPoolExecutor from datetime import datetime from typing import Any, Dict, List, Optional, Union -from loguru import logger from pydantic import BaseModel, Field from tenacity import retry, stop_after_attempt, wait_exponential @@ -19,6 +18,9 @@ from clusterops import ( execute_on_multiple_gpus, list_available_gpus, ) +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="concurrent_workflow") class AgentOutputSchema(BaseModel): diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py index 82fa6ba2..4eac5c78 100644 --- a/swarms/structs/hiearchical_swarm.py +++ b/swarms/structs/hiearchical_swarm.py @@ -1,7 +1,7 @@ from typing import List, Any -from loguru import logger from pydantic import BaseModel, Field +from swarms.utils.loguru_logger import initialize_logger from swarms.structs.base_swarm import BaseSwarm from swarms.structs.agent import Agent from swarms.structs.concat import concat_strings @@ -9,6 +9,7 @@ from swarms.structs.agent_registry import AgentRegistry from swarm_models.base_llm import BaseLLM from swarms.structs.conversation import Conversation +logger = initialize_logger(log_folder="hiearchical_swarm") # Example usage: HIEARCHICAL_AGENT_SYSTEM_PROMPT = """ diff --git a/swarms/structs/majority_voting.py b/swarms/structs/majority_voting.py index addf058e..18738aa0 100644 --- a/swarms/structs/majority_voting.py +++ b/swarms/structs/majority_voting.py @@ -1,26 +1,14 @@ import concurrent.futures import re -import sys from collections import Counter from typing import Any, Callable, List, Optional -from loguru import logger - from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation from swarms.utils.file_processing import create_file +from swarms.utils.loguru_logger import initialize_logger -# Configure loguru logger with advanced settings -logger.remove() -logger.add( - sys.stderr, - colorize=True, - format="{time} {message}", - backtrace=True, - diagnose=True, - enqueue=True, - catch=True, -) +logger = initialize_logger(log_folder="majority_voting") def extract_last_python_code_block(text): diff --git a/swarms/structs/mixture_of_agents.py b/swarms/structs/mixture_of_agents.py index feb93eaf..7c97afd2 100644 --- a/swarms/structs/mixture_of_agents.py +++ b/swarms/structs/mixture_of_agents.py @@ -2,13 +2,15 @@ import asyncio import time from typing import Any, Dict, List, Optional -from loguru import logger from pydantic import BaseModel, Field from swarms.structs.agent import Agent from swarms.telemetry.capture_sys_data import log_agent_data from swarms.schemas.agent_step_schemas import ManySteps from swarms.prompts.ag_prompt import aggregator_system_prompt +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="mixture_of_agents") time_stamp = time.strftime("%Y-%m-%d %H:%M:%S") diff --git a/swarms/structs/spreadsheet_swarm.py b/swarms/structs/spreadsheet_swarm.py index 51b022ea..e57d6a5c 100644 --- a/swarms/structs/spreadsheet_swarm.py +++ b/swarms/structs/spreadsheet_swarm.py @@ -6,13 +6,15 @@ import uuid from typing import List, Union import aiofiles -from loguru import logger from pydantic import BaseModel, Field from swarms.structs.agent import Agent from swarms.structs.base_swarm import BaseSwarm from swarms.utils.file_processing import create_file_in_folder from swarms.telemetry.capture_sys_data import log_agent_data +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="spreadsheet_swarm") time = datetime.datetime.now().isoformat() uuid_hex = uuid.uuid4().hex diff --git a/swarms/structs/swarm_matcher.py b/swarms/structs/swarm_matcher.py index 37d75eac..c4d0711f 100644 --- a/swarms/structs/swarm_matcher.py +++ b/swarms/structs/swarm_matcher.py @@ -3,9 +3,11 @@ import numpy as np import torch from transformers import AutoTokenizer, AutoModel from pydantic import BaseModel, Field -from loguru import logger import json from tenacity import retry, stop_after_attempt, wait_exponential +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="swarm_matcher") class SwarmType(BaseModel): diff --git a/swarms/structs/swarm_router.py b/swarms/structs/swarm_router.py index 79115b6d..1b1cc44c 100644 --- a/swarms/structs/swarm_router.py +++ b/swarms/structs/swarm_router.py @@ -3,7 +3,6 @@ from datetime import datetime from typing import Any, Callable, Dict, List, Literal, Union from doc_master import doc_master -from loguru import logger from pydantic import BaseModel, Field from tenacity import retry, stop_after_attempt, wait_fixed @@ -18,6 +17,9 @@ from swarms.structs.swarm_matcher import swarm_matcher from swarms.utils.wrapper_clusterop import ( exec_callable_with_clusterops, ) +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="swarm_router") SwarmType = Literal[ "AgentRearrange", diff --git a/swarms/structs/swarming_architectures.py b/swarms/structs/swarming_architectures.py index cd87a155..ce840023 100644 --- a/swarms/structs/swarming_architectures.py +++ b/swarms/structs/swarming_architectures.py @@ -2,11 +2,13 @@ import asyncio import math from typing import List, Union -from loguru import logger from pydantic import BaseModel from swarms.structs.agent import Agent from swarms.structs.omni_agent_types import AgentListType +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="swarming_architectures") # Define Pydantic schema for logging agent responses diff --git a/swarms/structs/tree_swarm.py b/swarms/structs/tree_swarm.py index ceb15800..56b46642 100644 --- a/swarms/structs/tree_swarm.py +++ b/swarms/structs/tree_swarm.py @@ -3,11 +3,13 @@ from collections import Counter from datetime import datetime from typing import Any, List, Optional -from loguru import logger from pydantic import BaseModel, Field from sentence_transformers import SentenceTransformer, util -from swarms import Agent +from swarms.structs.agent import Agent +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="tree_swarm") # Pretrained model for embeddings embedding_model = SentenceTransformer( diff --git a/swarms/telemetry/auto_upgrade_swarms.py b/swarms/telemetry/auto_upgrade_swarms.py index 410b0332..d4627b95 100644 --- a/swarms/telemetry/auto_upgrade_swarms.py +++ b/swarms/telemetry/auto_upgrade_swarms.py @@ -1,9 +1,10 @@ import subprocess -from loguru import logger - +from swarms.utils.loguru_logger import initialize_logger from swarms.telemetry.check_update import check_for_update +logger = initialize_logger(log_folder="auto_upgrade_swarms") + def auto_update(): """auto update swarms""" @@ -13,7 +14,15 @@ def auto_update(): logger.info( "There is a new version of swarms available! Downloading..." ) - subprocess.run(["pip", "install", "-U", "swarms"]) + try: + subprocess.run( + ["pip", "install", "-U", "swarms"], check=True + ) + except subprocess.CalledProcessError: + logger.info("Attempting to install with pip3...") + subprocess.run( + ["pip3", "install", "-U", "swarms"], check=True + ) else: logger.info("swarms is up to date!") except Exception as e: diff --git a/swarms/telemetry/capture_sys_data.py b/swarms/telemetry/capture_sys_data.py index de9bdc9b..09d94a70 100644 --- a/swarms/telemetry/capture_sys_data.py +++ b/swarms/telemetry/capture_sys_data.py @@ -2,10 +2,13 @@ import platform import socket import psutil import uuid -from loguru import logger from typing import Dict import requests +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="capture_sys_data") + def capture_system_data() -> Dict[str, str]: """ diff --git a/swarms/tools/tool_registry.py b/swarms/tools/tool_registry.py index f28ed40c..385eed1b 100644 --- a/swarms/tools/tool_registry.py +++ b/swarms/tools/tool_registry.py @@ -1,9 +1,11 @@ import os from typing import Any, Callable, Dict, List, Optional import time -from loguru import logger from pydantic import BaseModel, Field from concurrent.futures import ThreadPoolExecutor, as_completed +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="tool_registry") class ToolMetadata(BaseModel): diff --git a/swarms/utils/__init__.py b/swarms/utils/__init__.py index 5c8496a8..0a825caf 100644 --- a/swarms/utils/__init__.py +++ b/swarms/utils/__init__.py @@ -17,7 +17,6 @@ from swarms.tools.prebuilt.math_eval import math_eval from swarms.utils.parse_code import extract_code_from_markdown from swarms.utils.pdf_to_text import pdf_to_text from swarms.utils.try_except_wrapper import try_except_wrapper -from swarms.utils.concurrent_utils import execute_concurrently from swarms.utils.calculate_func_metrics import profile_func @@ -37,6 +36,5 @@ __all__ = [ "extract_code_from_markdown", "pdf_to_text", "try_except_wrapper", - "execute_concurrently", "profile_func", ] diff --git a/swarms/utils/add_docs_to_agents.py b/swarms/utils/add_docs_to_agents.py index 8dbc1df3..85e3076c 100644 --- a/swarms/utils/add_docs_to_agents.py +++ b/swarms/utils/add_docs_to_agents.py @@ -1,10 +1,14 @@ -from typing import Any, List, Optional, Union +from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path -from loguru import logger +from typing import Any, List, Optional, Union + from doc_master import doc_master -from concurrent.futures import ThreadPoolExecutor, as_completed from tenacity import retry, stop_after_attempt, wait_exponential +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="add_docs_to_agents") + @retry( stop=stop_after_attempt(3), diff --git a/swarms/utils/concurrent_utils.py b/swarms/utils/concurrent_utils.py deleted file mode 100644 index becad4ca..00000000 --- a/swarms/utils/concurrent_utils.py +++ /dev/null @@ -1,49 +0,0 @@ -import concurrent.futures -from typing import List, Tuple, Any, Dict, Union, Callable - - -def execute_concurrently( - callable_functions: List[ - Tuple[Callable, Tuple[Any, ...], Dict[str, Any]] - ], - max_workers: int = 5, -) -> List[Union[Any, Exception]]: - """ - Executes callable functions concurrently using multithreading. - - Parameters: - - callable_functions: A list of tuples, each containing the callable function and its arguments. - For example: [(function1, (arg1, arg2), {'kwarg1': val1}), (function2, (), {})] - - max_workers: The maximum number of threads to use. - - Returns: - - results: A list of results returned by the callable functions. If an error occurs in any function, - the exception object will be placed at the corresponding index in the list. - """ - results = [None] * len(callable_functions) - - def worker( - fn: Callable, - args: Tuple[Any, ...], - kwargs: Dict[str, Any], - index: int, - ) -> None: - try: - result = fn(*args, **kwargs) - results[index] = result - except Exception as e: - results[index] = e - - with concurrent.futures.ThreadPoolExecutor( - max_workers=max_workers - ) as executor: - futures = [] - for i, (fn, args, kwargs) in enumerate(callable_functions): - futures.append( - executor.submit(worker, fn, args, kwargs, i) - ) - - # Wait for all threads to complete - concurrent.futures.wait(futures) - - return results diff --git a/swarms/utils/decorators.py b/swarms/utils/decorators.py deleted file mode 100644 index 1d3d450c..00000000 --- a/swarms/utils/decorators.py +++ /dev/null @@ -1,80 +0,0 @@ -import functools -import logging -import threading -import warnings - - -def retry_decorator(max_retries: int = 5): - """ - Decorator that retries a function a specified number of times if an exception occurs. - - Args: - max_retries (int): The maximum number of times to retry the function. - - Returns: - function: The decorated function. - - """ - - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - for _ in range(max_retries): - try: - return func(*args, **kwargs) - except Exception as error: - logging.error( - f" Error in {func.__name__}:" - f" {str(error)} Retrying ...." - ) - return func(*args, **kwargs) - - return wrapper - - return decorator - - -def singleton_decorator(cls): - instances = {} - - def wrapper(*args, **kwargs): - if cls not in instances: - instances[cls] = cls(*args, **kwargs) - return instances[cls] - - return wrapper - - -def synchronized_decorator(func): - func.__lock__ = threading.Lock() - - def wrapper(*args, **kwargs): - with func.__lock__: - return func(*args, **kwargs) - - return wrapper - - -def deprecated_decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - warnings.warn( - f"{func.__name__} is deprecated", - category=DeprecationWarning, - ) - return func(*args, **kwargs) - - return wrapper - - -def validate_inputs_decorator(validator): - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - if not validator(*args, **kwargs): - raise ValueError("Invalid Inputs") - return func(*args, **kwargs) - - return wrapper - - return decorator diff --git a/swarms/utils/loguru_logger.py b/swarms/utils/loguru_logger.py index b53ec379..cf1fbd65 100644 --- a/swarms/utils/loguru_logger.py +++ b/swarms/utils/loguru_logger.py @@ -1,23 +1,33 @@ import os +import uuid from loguru import logger -WORKSPACE_DIR = os.getenv("WORKSPACE_DIR") +def initialize_logger(log_folder: str = "logs"): -logger.add( - os.path.join(WORKSPACE_DIR, "swarms.log"), - level="INFO", - colorize=True, - backtrace=True, - diagnose=True, -) + WORKSPACE_DIR = os.getenv("WORKSPACE_DIR") + if not os.path.exists(WORKSPACE_DIR): + os.makedirs(WORKSPACE_DIR) + # Create a folder within the workspace_dir + log_folder_path = os.path.join(WORKSPACE_DIR, log_folder) + if not os.path.exists(log_folder_path): + os.makedirs(log_folder_path) -def loguru_logger(file_path: str = "swarms.log"): - return logger.add( - os.path.join(WORKSPACE_DIR, file_path), + # Generate a unique identifier for the log file + uuid_for_log = str(uuid.uuid4()) + log_file_path = os.path.join( + log_folder_path, f"{log_folder}_{uuid_for_log}.log" + ) + + logger.add( + log_file_path, level="INFO", colorize=True, backtrace=True, diagnose=True, + enqueue=True, + retention="10 days", + compression="zip", ) + return logger diff --git a/swarms/utils/pandas_utils.py b/swarms/utils/pandas_utils.py index dcf5354e..358c36e6 100644 --- a/swarms/utils/pandas_utils.py +++ b/swarms/utils/pandas_utils.py @@ -1,11 +1,13 @@ import subprocess from typing import Any, Dict, List -from loguru import logger +from swarms.utils.loguru_logger import initialize_logger + from pydantic import BaseModel from swarms.structs.agent import Agent +logger = initialize_logger(log_folder="pandas_utils") try: import pandas as pd diff --git a/swarms/utils/report_error_loguru.py b/swarms/utils/report_error_loguru.py deleted file mode 100644 index 39ec8b5f..00000000 --- a/swarms/utils/report_error_loguru.py +++ /dev/null @@ -1,108 +0,0 @@ -import datetime -import os -import platform -import traceback - -from loguru import logger - -# Remove default logger configuration -logger.remove() - -# Define the path for the log folder -log_folder = os.path.join(os.getcwd(), "errors") - -try: - # Create the log folder if it doesn't exist - os.makedirs(log_folder, exist_ok=True) -except PermissionError: - logger.error(f"Permission denied: '{log_folder}'") -except Exception as e: - logger.error( - f"An error occurred while creating the log folder: {e}" - ) -else: - # If the folder was created successfully, add a new logger - logger.add( - os.path.join(log_folder, "error_{time}.log"), - level="ERROR", - format="{time} - {level} - {message}", - ) - - -def report_error(error: Exception): - """ - Logs an error message and provides instructions for reporting the issue on Swarms GitHub - or joining the community on Discord for real-time support. - - Args: - error (Exception): The exception that occurred. - - Returns: - None - - Raises: - None - """ - # Gather extensive context information - context_info = { - "exception_type": type(error).__name__, - "exception_message": str(error), - "stack_trace": traceback.format_exc(), - "timestamp": datetime.datetime.now().isoformat(), - "python_version": platform.python_version(), - "platform": platform.platform(), - "machine": platform.machine(), - "processor": platform.processor(), - "user": os.getenv("USER") or os.getenv("USERNAME"), - "current_working_directory": os.getcwd(), - } - - error_message = ( - f"\n" - f"------------------Error: {error}-----------------------\n" - f"#########################################\n" - f"# #\n" - f"# ERROR DETECTED! #\n" - f"# #\n" - f"# #\n" - f"# #\n" - f"# #\n" - f"#########################################\n" - f"\n" - f"Error Message: {context_info['exception_message']} ({context_info['exception_type']})\n" - f"\n" - f"Stack Trace:\n{context_info['stack_trace']}\n" - f"\n" - f"Context Information:\n" - f"-----------------------------------------\n" - f"Timestamp: {context_info['timestamp']}\n" - f"Python Version: {context_info['python_version']}\n" - f"Platform: {context_info['platform']}\n" - f"Machine: {context_info['machine']}\n" - f"Processor: {context_info['processor']}\n" - f"User: {context_info['user']}\n" - f"Current Working Directory: {context_info['current_working_directory']}\n" - f"-----------------------------------------\n" - f"\n" - "Support" - f"\n" - f"\n" - f"To report this issue, please visit the Swarms GitHub Issues page:\n" - f"https://github.com/kyegomez/swarms/issues\n" - f"\n" - f"You can also join the Swarms community on Discord for real-time support:\n" - f"https://discord.com/servers/agora-999382051935506503\n" - f"\n" - f"#########################################\n" - f"-----------------------------------------\n" - ) - - return logger.error(error_message) - - -# # Example usage: -# try: -# # Simulate an error -# raise ValueError("An example error") -# except Exception as e: -# report_error(e) diff --git a/swarms/utils/run_on_cpu.py b/swarms/utils/run_on_cpu.py index 742792b0..9573135d 100644 --- a/swarms/utils/run_on_cpu.py +++ b/swarms/utils/run_on_cpu.py @@ -1,9 +1,12 @@ import os import psutil from typing import Callable, Any -from loguru import logger import functools +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="run_on_cpu") + def run_on_cpu(func: Callable) -> Callable: """ diff --git a/swarms/utils/swarm_output_handling.py b/swarms/utils/swarm_output_handling.py deleted file mode 100644 index d7549100..00000000 --- a/swarms/utils/swarm_output_handling.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import Union, Dict, List -from swarms.artifacts.main_artifact import Artifact - - -def handle_artifact_outputs( - file_path: str, - data: Union[str, Dict, List], - output_type: str = "txt", - folder_path: str = "./artifacts", -) -> str: - """ - Handle different types of data and create files in various formats. - - Args: - file_path: Path where the file should be saved - data: Input data that can be string, dict or list - output_type: Type of output file (txt, md, pdf, csv, json) - folder_path: Folder to save artifacts - - Returns: - str: Path to the created file - """ - # Create artifact with appropriate file type - artifact = Artifact( - folder_path=folder_path, - file_path=file_path, - file_type=output_type, - contents=data, - edit_count=0, - ) - - # Save the file - # artifact.save() - artifact.save_as(output_format=output_type) diff --git a/swarms/utils/swarm_reliability_checks.py b/swarms/utils/swarm_reliability_checks.py index 46145859..4af895d1 100644 --- a/swarms/utils/swarm_reliability_checks.py +++ b/swarms/utils/swarm_reliability_checks.py @@ -1,6 +1,9 @@ -from loguru import logger -from typing import List, Union, Callable, Optional +from typing import Callable, List, Optional, Union + from swarms.structs.agent import Agent +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="swarm_reliability_checks") def reliability_check( diff --git a/swarms/utils/wrapper_clusterop.py b/swarms/utils/wrapper_clusterop.py index 3ee8d3e4..2343e52f 100644 --- a/swarms/utils/wrapper_clusterop.py +++ b/swarms/utils/wrapper_clusterop.py @@ -1,13 +1,16 @@ import os from typing import Any + from clusterops import ( execute_on_gpu, execute_on_multiple_gpus, execute_with_cpu_cores, list_available_gpus, ) -from loguru import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="clusterops_wrapper") def exec_callable_with_clusterops( From 14fc0a9f8ad005a7bc1a6857422d1bf56fd864e9 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 17 Nov 2024 08:51:43 -0800 Subject: [PATCH 09/19] [LOGGING FIXES] --- swarms/agents/tool_agent.py | 4 +- swarms/artifacts/main_artifact.py | 4 +- swarms/structs/__init__.py | 37 ++- swarms/structs/base_swarm.py | 4 +- swarms/structs/base_workflow.py | 4 +- swarms/structs/company.py | 5 +- swarms/structs/federated_swarm.py | 393 ----------------------- swarms/structs/graph_workflow.py | 4 +- swarms/structs/multi_process_workflow.py | 105 +++++- swarms/structs/rearrange.py | 4 +- swarms/structs/round_robin.py | 4 +- swarms/structs/sequential_workflow.py | 4 +- swarms/structs/swarm_arange.py | 4 +- swarms/structs/swarm_load_balancer.py | 4 +- swarms/structs/swarm_net.py | 4 +- swarms/structs/swarm_registry.py | 4 +- swarms/structs/task.py | 4 +- swarms/tools/base_tool.py | 4 +- swarms/tools/func_calling_executor.py | 3 +- swarms/tools/pydantic_to_json.py | 4 +- swarms/tools/tool_parse_exec.py | 4 +- swarms/utils/async_file_creation.py | 46 +++ swarms/utils/calculate_func_metrics.py | 102 +++++- swarms/utils/file_processing.py | 127 +++++--- swarms/utils/parse_code.py | 20 +- swarms/utils/profile_func_2.py | 98 ------ 26 files changed, 426 insertions(+), 574 deletions(-) delete mode 100644 swarms/structs/federated_swarm.py diff --git a/swarms/agents/tool_agent.py b/swarms/agents/tool_agent.py index d05417f1..34a316b3 100644 --- a/swarms/agents/tool_agent.py +++ b/swarms/agents/tool_agent.py @@ -2,7 +2,9 @@ from typing import Any, Optional, Callable from swarms.structs.agent import Agent from swarms.tools.json_former import Jsonformer -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="tool_agent") class ToolAgent(Agent): diff --git a/swarms/artifacts/main_artifact.py b/swarms/artifacts/main_artifact.py index d2009476..5eaa939e 100644 --- a/swarms/artifacts/main_artifact.py +++ b/swarms/artifacts/main_artifact.py @@ -1,11 +1,13 @@ import time -from swarms.utils.loguru_logger import logger import os import json from typing import List, Union, Dict, Any from pydantic import BaseModel, Field, validator from datetime import datetime from swarms.utils.file_processing import create_file_in_folder +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="main_artifact") class FileVersion(BaseModel): diff --git a/swarms/structs/__init__.py b/swarms/structs/__init__.py index e391a1d1..a660ed1a 100644 --- a/swarms/structs/__init__.py +++ b/swarms/structs/__init__.py @@ -1,4 +1,5 @@ from swarms.structs.agent import Agent +from swarms.structs.agents_available import showcase_available_agents from swarms.structs.auto_swarm import AutoSwarm, AutoSwarmRouter from swarms.structs.base_structure import BaseStructure from swarms.structs.base_swarm import BaseSwarm @@ -19,15 +20,31 @@ from swarms.structs.majority_voting import ( parse_code_completion, ) from swarms.structs.message import Message - from swarms.structs.mixture_of_agents import MixtureOfAgents from swarms.structs.multi_agent_collab import MultiAgentCollaboration +from swarms.structs.multi_agent_exec import ( + run_agent_with_timeout, + run_agents_concurrently, + run_agents_concurrently_async, + run_agents_concurrently_multiprocess, + run_agents_sequentially, + run_agents_with_different_tasks, + run_agents_with_resource_monitoring, + run_agents_with_tasks_concurrently, + run_single_agent, +) from swarms.structs.queue_swarm import TaskQueueSwarm from swarms.structs.rearrange import AgentRearrange, rearrange from swarms.structs.round_robin import RoundRobinSwarm from swarms.structs.sequential_workflow import SequentialWorkflow from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm +from swarms.structs.swarm_arange import SwarmRearrange from swarms.structs.swarm_net import SwarmNetwork +from swarms.structs.swarm_router import ( + SwarmRouter, + SwarmType, + swarm_router, +) from swarms.structs.swarming_architectures import ( broadcast, circular_swarm, @@ -58,24 +75,6 @@ from swarms.structs.utils import ( find_token_in_text, parse_tasks, ) -from swarms.structs.swarm_router import ( - SwarmRouter, - SwarmType, - swarm_router, -) -from swarms.structs.swarm_arange import SwarmRearrange -from swarms.structs.multi_agent_exec import ( - run_agents_concurrently, - run_agents_concurrently_async, - run_single_agent, - run_agents_concurrently_multiprocess, - run_agents_sequentially, - run_agents_with_different_tasks, - run_agent_with_timeout, - run_agents_with_resource_monitoring, - run_agents_with_tasks_concurrently, -) -from swarms.structs.agents_available import showcase_available_agents __all__ = [ "Agent", diff --git a/swarms/structs/base_swarm.py b/swarms/structs/base_swarm.py index 2f141213..6e2242be 100644 --- a/swarms/structs/base_swarm.py +++ b/swarms/structs/base_swarm.py @@ -20,13 +20,15 @@ from swarms_memory import BaseVectorDatabase from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation from swarms.structs.omni_agent_types import AgentType -from swarms.utils.loguru_logger import logger from pydantic import BaseModel from swarms.utils.pandas_utils import ( dict_to_dataframe, display_agents_info, pydantic_model_to_dataframe, ) +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="base_swarm") class BaseSwarm(ABC): diff --git a/swarms/structs/base_workflow.py b/swarms/structs/base_workflow.py index b5deb916..b75bfe2c 100644 --- a/swarms/structs/base_workflow.py +++ b/swarms/structs/base_workflow.py @@ -6,7 +6,9 @@ from termcolor import colored from swarms.structs.agent import Agent from swarms.structs.base_structure import BaseStructure from swarms.structs.task import Task -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger("base-workflow") class BaseWorkflow(BaseStructure): diff --git a/swarms/structs/company.py b/swarms/structs/company.py index ef576e80..f7fb36b7 100644 --- a/swarms/structs/company.py +++ b/swarms/structs/company.py @@ -2,8 +2,11 @@ from dataclasses import dataclass, field from typing import Dict, List, Optional, Union from swarms.structs.agent import Agent -from swarms.utils.loguru_logger import logger from swarms.structs.base_swarm import BaseSwarm +from swarms.utils.loguru_logger import initialize_logger + + +logger = initialize_logger("company-swarm") @dataclass diff --git a/swarms/structs/federated_swarm.py b/swarms/structs/federated_swarm.py deleted file mode 100644 index 6c5e09ca..00000000 --- a/swarms/structs/federated_swarm.py +++ /dev/null @@ -1,393 +0,0 @@ -from typing import List, Callable, Union, Optional -from loguru import logger -from swarms.structs.base_swarm import BaseSwarm -from queue import PriorityQueue -from concurrent.futures import ( - ThreadPoolExecutor, - as_completed, -) -import time -from pydantic import BaseModel, Field - - -class SwarmRunData(BaseModel): - """ - Pydantic model to capture metadata about each swarm's execution. - """ - - swarm_name: str - task: str - priority: int - start_time: Optional[float] = None - end_time: Optional[float] = None - duration: Optional[float] = None - status: str = "Pending" - retries: int = 0 - result: Optional[str] = None - exception: Optional[str] = None - - -class FederatedSwarmModel(BaseModel): - """ - Pydantic base model to capture and log data for the FederatedSwarm system. - """ - - task: str - swarms_data: List[SwarmRunData] = Field(default_factory=list) - - def add_swarm(self, swarm_name: str, task: str, priority: int): - swarm_data = SwarmRunData( - swarm_name=swarm_name, task=task, priority=priority - ) - self.swarms_data.append(swarm_data) - - def update_swarm_status( - self, - swarm_name: str, - status: str, - start_time: float = None, - end_time: float = None, - retries: int = 0, - result: str = None, - exception: str = None, - ): - for swarm in self.swarms_data: - if swarm.name == swarm_name: - swarm.status = status - if start_time: - swarm.start_time = start_time - if end_time: - swarm.end_time = end_time - swarm.duration = end_time - swarm.start_time - swarm.retries = retries - swarm.result = result - swarm.exception = exception - break - - -class FederatedSwarm: - def __init__( - self, - swarms: List[Union[BaseSwarm, Callable]], - max_workers: int = 4, - ): - """ - Initializes the FederatedSwarm with a list of swarms or callable objects and - sets up a priority queue and thread pool for concurrency. - - Args: - swarms (List[Union[BaseSwarm, Callable]]): A list of swarms (BaseSwarm) or callable objects. - max_workers (int): The maximum number of concurrent workers (threads) to run swarms in parallel. - """ - self.swarms = PriorityQueue() - self.max_workers = max_workers - self.thread_pool = ThreadPoolExecutor( - max_workers=self.max_workers - ) - self.task_queue = [] - self.future_to_swarm = {} - self.results = {} - self.validate_swarms(swarms) - - def init_metadata(self, task: str): - """ - Initializes the Pydantic base model to capture metadata about the current task and swarms. - """ - self.metadata = FederatedSwarmModel(task=task) - for priority, swarm in list(self.swarms.queue): - swarm_name = ( - swarm.__class__.__name__ - if hasattr(swarm, "__class__") - else str(swarm) - ) - self.metadata.add_swarm( - swarm_name=swarm_name, task=task, priority=priority - ) - logger.info(f"Metadata initialized for task '{task}'.") - - def validate_swarms( - self, swarms: List[Union[BaseSwarm, Callable]] - ): - """ - Validates and adds swarms to the priority queue, ensuring each swarm has a `run(task)` method. - - Args: - swarms (List[Union[BaseSwarm, Callable]]): List of swarms with an optional priority value. - """ - for swarm, priority in swarms: - if not callable(swarm): - raise TypeError(f"{swarm} is not callable.") - - if hasattr(swarm, "run"): - logger.info(f"{swarm} has a 'run' method.") - else: - raise AttributeError( - f"{swarm} does not have a 'run(task)' method." - ) - - self.swarms.put((priority, swarm)) - logger.info( - f"Swarm {swarm} added with priority {priority}." - ) - - def run_parallel( - self, - task: str, - timeout: Optional[float] = None, - retries: int = 0, - ): - """ - Runs all swarms in parallel with prioritization and optional timeout. - - Args: - task (str): The task to be passed to the `run` method of each swarm. - timeout (Optional[float]): Maximum time allowed for each swarm to run. - retries (int): Number of retries allowed for failed swarms. - """ - logger.info( - f"Running task '{task}' in parallel with timeout: {timeout}, retries: {retries}" - ) - self.init_metadata(task) - - while not self.swarms.empty(): - priority, swarm = self.swarms.get() - swarm_name = ( - swarm.__class__.__name__ - if hasattr(swarm, "__class__") - else str(swarm) - ) - future = self.thread_pool.submit( - self._run_with_retry, - swarm, - task, - retries, - timeout, - swarm_name, - ) - self.future_to_swarm[future] = swarm - - for future in as_completed(self.future_to_swarm): - swarm = self.future_to_swarm[future] - try: - result = future.result() - swarm_name = ( - swarm.__class__.__name__ - if hasattr(swarm, "__class__") - else str(swarm) - ) - self.metadata.update_swarm_status( - swarm_name=swarm_name, - status="Completed", - result=result, - ) - logger.info( - f"Swarm {swarm_name} completed successfully." - ) - except Exception as e: - swarm_name = ( - swarm.__class__.__name__ - if hasattr(swarm, "__class__") - else str(swarm) - ) - self.metadata.update_swarm_status( - swarm_name=swarm_name, - status="Failed", - exception=str(e), - ) - logger.error(f"Swarm {swarm_name} failed: {e}") - self.results[swarm] = "Failed" - - def run_sequentially( - self, - task: str, - retries: int = 0, - timeout: Optional[float] = None, - ): - """ - Runs all swarms sequentially in order of priority. - - Args: - task (str): The task to pass to the `run` method of each swarm. - retries (int): Number of retries for failed swarms. - timeout (Optional[float]): Optional time limit for each swarm. - """ - logger.info(f"Running task '{task}' sequentially.") - - while not self.swarms.empty(): - priority, swarm = self.swarms.get() - try: - logger.info( - f"Running swarm {swarm} with priority {priority}." - ) - self._run_with_retry(swarm, task, retries, timeout) - logger.info(f"Swarm {swarm} completed successfully.") - except Exception as e: - logger.error(f"Swarm {swarm} failed with error: {e}") - - def _run_with_retry( - self, - swarm: Union[BaseSwarm, Callable], - task: str, - retries: int, - timeout: Optional[float], - swarm_name: str, - ): - """ - Helper function to run a swarm with a retry mechanism and optional timeout. - - Args: - swarm (Union[BaseSwarm, Callable]): The swarm to run. - task (str): The task to pass to the swarm. - retries (int): The number of retries allowed for the swarm in case of failure. - timeout (Optional[float]): Maximum time allowed for the swarm to run. - swarm_name (str): Name of the swarm (used for metadata). - """ - attempts = 0 - start_time = time.time() - while attempts <= retries: - try: - logger.info( - f"Running swarm {swarm}. Attempt: {attempts + 1}" - ) - self.metadata.update_swarm_status( - swarm_name=swarm_name, - status="Running", - start_time=start_time, - ) - if hasattr(swarm, "run"): - if timeout: - start_time = time.time() - swarm.run(task) - duration = time.time() - start_time - if duration > timeout: - raise TimeoutError( - f"Swarm {swarm} timed out after {duration:.2f}s." - ) - else: - swarm.run(task) - else: - swarm(task) - end_time = time.time() - self.metadata.update_swarm_status( - swarm_name=swarm_name, - status="Completed", - end_time=end_time, - retries=attempts, - ) - return "Success" - except Exception as e: - logger.error(f"Swarm {swarm} failed: {e}") - attempts += 1 - if attempts > retries: - end_time = time.time() - self.metadata.update_swarm_status( - swarm_name=swarm_name, - status="Failed", - end_time=end_time, - retries=attempts, - exception=str(e), - ) - logger.error(f"Swarm {swarm} exhausted retries.") - raise - - def add_swarm( - self, swarm: Union[BaseSwarm, Callable], priority: int - ): - """ - Adds a new swarm to the FederatedSwarm at runtime. - - Args: - swarm (Union[BaseSwarm, Callable]): The swarm to add. - priority (int): The priority level for the swarm. - """ - self.swarms.put((priority, swarm)) - logger.info( - f"Swarm {swarm} added dynamically with priority {priority}." - ) - - def queue_task(self, task: str): - """ - Adds a task to the internal task queue for batch processing. - - Args: - task (str): The task to queue. - """ - self.task_queue.append(task) - logger.info(f"Task '{task}' added to the queue.") - - def process_task_queue(self): - """ - Processes all tasks in the task queue. - """ - for task in self.task_queue: - logger.info(f"Processing task: {task}") - self.run_parallel(task) - self.task_queue = [] - - def log_swarm_results(self): - """ - Logs the results of all swarms after execution. - """ - logger.info("Logging swarm results...") - for swarm, result in self.results.items(): - logger.info(f"Swarm {swarm}: {result}") - - def get_swarm_status(self) -> dict: - """ - Retrieves the status of each swarm (completed, running, failed). - - Returns: - dict: Dictionary containing swarm statuses. - """ - status = {} - for future, swarm in self.future_to_swarm.items(): - if future.done(): - status[swarm] = "Completed" - elif future.running(): - status[swarm] = "Running" - else: - status[swarm] = "Failed" - return status - - def cancel_running_swarms(self): - """ - Cancels all currently running swarms by shutting down the thread pool. - """ - logger.warning("Cancelling all running swarms...") - self.thread_pool.shutdown(wait=False) - logger.info("All running swarms cancelled.") - - -# Example Usage: - - -# class ExampleSwarm(BaseSwarm): -# def run(self, task: str): -# logger.info(f"ExampleSwarm is processing task: {task}") - - -# def example_callable(task: str): -# logger.info(f"Callable is processing task: {task}") - - -# if __name__ == "__main__": -# swarms = [(ExampleSwarm(), 1), (example_callable, 2)] -# federated_swarm = FederatedSwarm(swarms) - -# # Run in parallel -# federated_swarm.run_parallel( -# "Process data", timeout=10, retries=3 -# ) - -# # Run sequentially -# federated_swarm.run_sequentially("Process data sequentially") - -# # Log results -# federated_swarm.log_swarm_results() - -# # Get status of swarms -# status = federated_swarm.get_swarm_status() -# logger.info(f"Swarm statuses: {status}") - -# # Cancel running swarms (if needed) -# # federated_swarm.cancel_running_swarms() diff --git a/swarms/structs/graph_workflow.py b/swarms/structs/graph_workflow.py index 989175b7..803a9643 100644 --- a/swarms/structs/graph_workflow.py +++ b/swarms/structs/graph_workflow.py @@ -5,7 +5,9 @@ import networkx as nx from pydantic.v1 import BaseModel, Field, validator from swarms.structs.agent import Agent # noqa: F401 -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="graph_workflow") class NodeType(str, Enum): diff --git a/swarms/structs/multi_process_workflow.py b/swarms/structs/multi_process_workflow.py index 44051d0a..7b04c10e 100644 --- a/swarms/structs/multi_process_workflow.py +++ b/swarms/structs/multi_process_workflow.py @@ -1,9 +1,12 @@ from multiprocessing import Manager, Pool, cpu_count -from typing import Sequence, Union, Callable +from typing import Sequence, Union, Callable, List +from concurrent.futures import ThreadPoolExecutor, as_completed from swarms.structs.agent import Agent from swarms.structs.base_workflow import BaseWorkflow -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="multi_process_workflow") class MultiProcessWorkflow(BaseWorkflow): @@ -13,7 +16,7 @@ class MultiProcessWorkflow(BaseWorkflow): Args: max_workers (int): The maximum number of workers to use for parallel processing. autosave (bool): Flag indicating whether to automatically save the workflow. - tasks (List[Task]): A list of Task objects representing the workflow tasks. + agents (List[Union[Agent, Callable]]): A list of Agent objects or callable functions representing the workflow tasks. *args: Additional positional arguments. **kwargs: Additional keyword arguments. @@ -132,7 +135,7 @@ class MultiProcessWorkflow(BaseWorkflow): callback=results_list.append, timeout=task.timeout, ) - for agent in self.agent + for agent in self.agents ] # Wait for all jobs to complete @@ -145,3 +148,97 @@ class MultiProcessWorkflow(BaseWorkflow): except Exception as error: logger.error(f"Error in run: {error}") return None + + async def async_run(self, task: str, *args, **kwargs): + """Asynchronously run the workflow. + + Args: + task (Task): The task to run. + *args: Additional positional arguments for the task execution. + **kwargs: Additional keyword arguments for the task execution. + + Returns: + List[Any]: The results of all executed tasks. + + """ + try: + results = [] + with ThreadPoolExecutor( + max_workers=self.max_workers + ) as executor: + futures = [ + executor.submit( + self.execute_task, task, *args, **kwargs + ) + for _ in range(len(self.agents)) + ] + for future in as_completed(futures): + result = future.result() + results.append(result) + + return results + except Exception as error: + logger.error(f"Error in async_run: {error}") + return None + + def batched_run( + self, tasks: List[str], batch_size: int = 5, *args, **kwargs + ): + """Run tasks in batches. + + Args: + tasks (List[str]): A list of tasks to run. + batch_size (int): The size of each batch. + *args: Additional positional arguments for the task execution. + **kwargs: Additional keyword arguments for the task execution. + + Returns: + List[Any]: The results of all executed tasks. + + """ + try: + results = [] + for i in range(0, len(tasks), batch_size): + batch = tasks[i : i + batch_size] + with Pool(processes=self.max_workers) as pool: + results_list = pool.map( + self.execute_task, batch, *args, **kwargs + ) + results.extend(results_list) + + return results + except Exception as error: + logger.error(f"Error in batched_run: {error}") + return None + + def concurrent_run(self, tasks: List[str], *args, **kwargs): + """Run tasks concurrently. + + Args: + tasks (List[str]): A list of tasks to run. + *args: Additional positional arguments for the task execution. + **kwargs: Additional keyword arguments for the task execution. + + Returns: + List[Any]: The results of all executed tasks. + + """ + try: + results = [] + with ThreadPoolExecutor( + max_workers=self.max_workers + ) as executor: + futures = [ + executor.submit( + self.execute_task, task, *args, **kwargs + ) + for task in tasks + ] + for future in as_completed(futures): + result = future.result() + results.append(result) + + return results + except Exception as error: + logger.error(f"Error in concurrent_run: {error}") + return None diff --git a/swarms/structs/rearrange.py b/swarms/structs/rearrange.py index 231cab16..f3d8fa8c 100644 --- a/swarms/structs/rearrange.py +++ b/swarms/structs/rearrange.py @@ -13,10 +13,12 @@ from swarms.structs.agent import Agent from swarms.structs.agents_available import showcase_available_agents from swarms.structs.base_swarm import BaseSwarm from swarms.utils.add_docs_to_agents import handle_input_docs -from swarms.utils.loguru_logger import logger from swarms.utils.wrapper_clusterop import ( exec_callable_with_clusterops, ) +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="rearrange") # Literal of output types OutputType = Literal[ diff --git a/swarms/structs/round_robin.py b/swarms/structs/round_robin.py index a2a2bd5d..19198d3d 100644 --- a/swarms/structs/round_robin.py +++ b/swarms/structs/round_robin.py @@ -2,12 +2,14 @@ import random from swarms.structs.base_swarm import BaseSwarm from typing import List from swarms.structs.agent import Agent -from swarms.utils.loguru_logger import logger from pydantic import BaseModel, Field from typing import Optional from datetime import datetime from swarms.schemas.agent_step_schemas import ManySteps import tenacity +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger("round-robin") datetime_stamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 1fc0fe8a..b3f28936 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -1,9 +1,11 @@ from typing import List from swarms.structs.agent import Agent -from swarms.utils.loguru_logger import logger from swarms.structs.rearrange import AgentRearrange, OutputType from concurrent.futures import ThreadPoolExecutor, as_completed from swarms.structs.agents_available import showcase_available_agents +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="sequential_workflow") class SequentialWorkflow: diff --git a/swarms/structs/swarm_arange.py b/swarms/structs/swarm_arange.py index 4e57facd..efb880ad 100644 --- a/swarms/structs/swarm_arange.py +++ b/swarms/structs/swarm_arange.py @@ -3,8 +3,10 @@ import time import uuid from typing import Any, Callable, Dict, List, Optional -from swarms.utils.loguru_logger import logger from swarms.utils.any_to_str import any_to_str +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="swarm_arange") def swarm_id(): diff --git a/swarms/structs/swarm_load_balancer.py b/swarms/structs/swarm_load_balancer.py index b7cfdb94..275da2c2 100644 --- a/swarms/structs/swarm_load_balancer.py +++ b/swarms/structs/swarm_load_balancer.py @@ -5,7 +5,9 @@ from typing import Callable, List, Optional from swarms.structs.agent import Agent from swarms.structs.base_swarm import BaseSwarm -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="swarm_load_balancer") class AgentLoadBalancer(BaseSwarm): diff --git a/swarms/structs/swarm_net.py b/swarms/structs/swarm_net.py index 33be00de..dac0d0a2 100644 --- a/swarms/structs/swarm_net.py +++ b/swarms/structs/swarm_net.py @@ -19,7 +19,9 @@ from pydantic import BaseModel from swarms.structs.agent import Agent from swarms.structs.base_swarm import BaseSwarm -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger("swarm-network") # Pydantic models diff --git a/swarms/structs/swarm_registry.py b/swarms/structs/swarm_registry.py index b35aafb1..a4db3cb4 100644 --- a/swarms/structs/swarm_registry.py +++ b/swarms/structs/swarm_registry.py @@ -1,6 +1,8 @@ from pydantic.v1 import BaseModel from typing import List, Callable -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="swarm_registry") class SwarmRegistry(BaseModel): diff --git a/swarms/structs/task.py b/swarms/structs/task.py index 70293426..fc73dea9 100644 --- a/swarms/structs/task.py +++ b/swarms/structs/task.py @@ -9,8 +9,10 @@ from pydantic import BaseModel, Field from swarms.structs.agent import Agent from swarms.structs.conversation import Conversation from swarms.structs.omni_agent_types import AgentType -from swarms.utils.loguru_logger import logger from typing import Optional +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="task") class Task(BaseModel): diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py index 519ddc8c..dcb81974 100644 --- a/swarms/tools/base_tool.py +++ b/swarms/tools/base_tool.py @@ -14,7 +14,9 @@ from swarms.tools.pydantic_to_json import ( base_model_to_openai_function, multi_base_model_to_openai_function, ) -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="base_tool") ToolType = Union[BaseModel, Dict[str, Any], Callable[..., Any]] diff --git a/swarms/tools/func_calling_executor.py b/swarms/tools/func_calling_executor.py index 5cc0e4b5..65d95a73 100644 --- a/swarms/tools/func_calling_executor.py +++ b/swarms/tools/func_calling_executor.py @@ -1,7 +1,8 @@ import concurrent.futures from typing import Callable, Any, Dict, List -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger +logger = initialize_logger(log_folder="func_calling_executor") # def openai_tool_executor( # tools: List[Dict[str, Any]], diff --git a/swarms/tools/pydantic_to_json.py b/swarms/tools/pydantic_to_json.py index 7c64ea8e..1f6521df 100644 --- a/swarms/tools/pydantic_to_json.py +++ b/swarms/tools/pydantic_to_json.py @@ -2,7 +2,9 @@ from typing import Any, List from docstring_parser import parse from pydantic import BaseModel -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger("pydantic_to_json") def _remove_a_key(d: dict, remove_key: str) -> None: diff --git a/swarms/tools/tool_parse_exec.py b/swarms/tools/tool_parse_exec.py index 8686781a..7cc4369f 100644 --- a/swarms/tools/tool_parse_exec.py +++ b/swarms/tools/tool_parse_exec.py @@ -1,8 +1,10 @@ import json from typing import List, Any, Callable -from swarms.utils.loguru_logger import logger from swarms.utils.parse_code import extract_code_from_markdown +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="tool_parse_exec") def parse_and_execute_json( diff --git a/swarms/utils/async_file_creation.py b/swarms/utils/async_file_creation.py index 90832db3..6c35e95d 100644 --- a/swarms/utils/async_file_creation.py +++ b/swarms/utils/async_file_creation.py @@ -58,3 +58,49 @@ async def create_file_with_directory( os.makedirs(directory) await async_create_file(file_path, content) + + +def sync_create_file(file_path: str, content: str) -> None: + """ + Synchronously creates a file at the specified path and writes the given content to it. + + Args: + file_path (str): The path where the file will be created. + content (str): The content to be written to the file. + + Returns: + None + """ + asyncio.run(async_create_file(file_path, content)) + + +def sync_create_multiple_files( + file_paths: List[str], contents: List[str] +) -> None: + """ + Synchronously creates multiple files at the specified paths and writes the corresponding content to each file. + + Args: + file_paths (List[str]): A list of paths where the files will be created. + contents (List[str]): A list of content to be written to each file, corresponding to the file paths. + + Returns: + None + """ + asyncio.run(create_multiple_files(file_paths, contents)) + + +def sync_create_file_with_directory( + file_path: str, content: str +) -> None: + """ + Synchronously creates a file with the specified directory path and content. If the directory does not exist, it is created. + + Args: + file_path (str): The path of the file to be created, including the directory. + content (str): The content to be written to the file. + + Returns: + None + """ + asyncio.run(create_file_with_directory(file_path, content)) diff --git a/swarms/utils/calculate_func_metrics.py b/swarms/utils/calculate_func_metrics.py index 1aacb3a9..bfb8a528 100644 --- a/swarms/utils/calculate_func_metrics.py +++ b/swarms/utils/calculate_func_metrics.py @@ -1,7 +1,15 @@ import time +import tracemalloc +from functools import wraps +from typing import Any, Callable + import psutil +from loguru import logger from pydantic import BaseModel -from swarms.utils.loguru_logger import logger + +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="calculate_func_metrics") class FunctionMetrics(BaseModel): @@ -70,3 +78,95 @@ def profile_func(func): return result, metrics return wrapper + + +def profile_all(func: Callable) -> Callable: + """ + A decorator to profile memory usage, CPU usage, and I/O operations + of a function and log the data using loguru. + + It combines tracemalloc for memory profiling, psutil for CPU and I/O operations, + and measures execution time. + + Args: + func (Callable): The function to be profiled. + + Returns: + Callable: The wrapped function with profiling enabled. + """ + + @wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Any: + # Start memory tracking + tracemalloc.start() + + # Get initial CPU stats + process = psutil.Process() + initial_cpu_times = process.cpu_times() + + # Get initial I/O stats if available + try: + initial_io_counters = process.io_counters() + io_tracking_available = True + except AttributeError: + logger.warning( + "I/O counters not available on this platform." + ) + io_tracking_available = False + + # Start timing the function execution + start_time = time.time() + + # Execute the function + result = func(*args, **kwargs) + + # Stop timing + end_time = time.time() + execution_time = end_time - start_time + + # Get final CPU stats + final_cpu_times = process.cpu_times() + + # Get final I/O stats if available + if io_tracking_available: + final_io_counters = process.io_counters() + io_read_count = ( + final_io_counters.read_count + - initial_io_counters.read_count + ) + io_write_count = ( + final_io_counters.write_count + - initial_io_counters.write_count + ) + else: + io_read_count = io_write_count = 0 + + # Get memory usage statistics + snapshot = tracemalloc.take_snapshot() + top_stats = snapshot.statistics("lineno") + + # Calculate CPU usage + cpu_usage = ( + final_cpu_times.user + - initial_cpu_times.user + + final_cpu_times.system + - initial_cpu_times.system + ) + + # Log the data + logger.info(f"Execution time: {execution_time:.4f} seconds") + logger.info(f"CPU usage: {cpu_usage:.2f} seconds") + if io_tracking_available: + logger.info( + f"I/O Operations - Read: {io_read_count}, Write: {io_write_count}" + ) + logger.info("Top memory usage:") + for stat in top_stats[:10]: + logger.info(stat) + + # Stop memory tracking + tracemalloc.stop() + + return result + + return wrapper diff --git a/swarms/utils/file_processing.py b/swarms/utils/file_processing.py index e14918fd..30e5dbf6 100644 --- a/swarms/utils/file_processing.py +++ b/swarms/utils/file_processing.py @@ -5,6 +5,28 @@ from typing import Any import re import shutil import tempfile +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger(log_folder="file_processing") + + +def check_if_folder_exists(folder_name: str) -> bool: + """ + Check if a folder exists at the specified path. + + Args: + folder_name (str): The path to the folder to check. + + Returns: + bool: True if the folder exists, False otherwise. + """ + try: + return os.path.exists(folder_name) and os.path.isdir( + folder_name + ) + except Exception as e: + logger.error(f"Failed to check if folder exists: {e}") + return False def zip_workspace(workspace_path: str, output_filename: str): @@ -12,25 +34,33 @@ def zip_workspace(workspace_path: str, output_filename: str): Zips the specified workspace directory and returns the path to the zipped file. Ensure the output_filename does not have .zip extension as it's added by make_archive. """ - temp_dir = tempfile.mkdtemp() - # Remove .zip if present in output_filename to avoid duplication - base_output_path = os.path.join( - temp_dir, output_filename.replace(".zip", "") - ) - zip_path = shutil.make_archive( - base_output_path, "zip", workspace_path - ) - return zip_path # make_archive already appends .zip + try: + temp_dir = tempfile.mkdtemp() + # Remove .zip if present in output_filename to avoid duplication + base_output_path = os.path.join( + temp_dir, output_filename.replace(".zip", "") + ) + zip_path = shutil.make_archive( + base_output_path, "zip", workspace_path + ) + return zip_path # make_archive already appends .zip + except Exception as e: + logger.error(f"Failed to zip workspace: {e}") + return None def sanitize_file_path(file_path: str): """ Cleans and sanitizes the file path to be valid for Windows. """ - sanitized_path = file_path.replace("`", "").strip() - # Replace any invalid characters here with an underscore or remove them - sanitized_path = re.sub(r'[<>:"/\\|?*]', "_", sanitized_path) - return sanitized_path + try: + sanitized_path = file_path.replace("`", "").strip() + # Replace any invalid characters here with an underscore or remove them + sanitized_path = re.sub(r'[<>:"/\\|?*]', "_", sanitized_path) + return sanitized_path + except Exception as e: + logger.error(f"Failed to sanitize file path: {e}") + return None def load_json(json_string: str): @@ -43,11 +73,14 @@ def load_json(json_string: str): Returns: object: The Python object representing the JSON data. """ - json_data = json.loads(json_string) - return json_data + try: + json_data = json.loads(json_string) + return json_data + except json.JSONDecodeError as e: + logger.error(f"Failed to decode JSON: {e}") + return None -# Create file that def create_file( content: str, file_path: str, @@ -59,9 +92,13 @@ def create_file( content (str): The content to be written to the file. file_path (str): The path to the file to be created. """ - with open(file_path, "w") as file: - file.write(content) - return file_path + try: + with open(file_path, "w") as file: + file.write(content) + return file_path + except Exception as e: + logger.error(f"Failed to create file: {e}") + return None def create_file_in_folder( @@ -78,15 +115,19 @@ def create_file_in_folder( Returns: str: The path of the created file. """ - if not os.path.exists(folder_path): - os.makedirs(folder_path) + try: + if not os.path.exists(folder_path): + os.makedirs(folder_path) - # Create the file in the folder - file_path = os.path.join(folder_path, file_name) - with open(file_path, "w") as file: - file.write(content) + # Create the file in the folder + file_path = os.path.join(folder_path, file_name) + with open(file_path, "w") as file: + file.write(content) - return file_path + return file_path + except Exception as e: + logger.error(f"Failed to create file in folder: {e}") + return None def zip_folders( @@ -103,16 +144,24 @@ def zip_folders( Returns: None """ - # Create a temporary directory - with tempfile.TemporaryDirectory() as temp_dir: - # Copy both folders into the temporary directory - shutil.copytree( - folder1_path, - os.path.join(temp_dir, os.path.basename(folder1_path)), - ) - shutil.copytree( - folder2_path, - os.path.join(temp_dir, os.path.basename(folder2_path)), - ) - # Create a zip file that contains the temporary directory - shutil.make_archive(zip_file_path, "zip", temp_dir) + try: + # Create a temporary directory + with tempfile.TemporaryDirectory() as temp_dir: + # Copy both folders into the temporary directory + shutil.copytree( + folder1_path, + os.path.join( + temp_dir, os.path.basename(folder1_path) + ), + ) + shutil.copytree( + folder2_path, + os.path.join( + temp_dir, os.path.basename(folder2_path) + ), + ) + # Create a zip file that contains the temporary directory + shutil.make_archive(zip_file_path, "zip", temp_dir) + except Exception as e: + logger.error(f"Failed to zip folders: {e}") + return None diff --git a/swarms/utils/parse_code.py b/swarms/utils/parse_code.py index 25cd6210..f295340c 100644 --- a/swarms/utils/parse_code.py +++ b/swarms/utils/parse_code.py @@ -14,14 +14,30 @@ def extract_code_from_markdown(markdown_content: str) -> str: # Regular expression for fenced code blocks with optional language specifier pattern = r"```(?:\w+\n)?(.*?)```" + # Check if markdown_content is a string + if not isinstance(markdown_content, str): + raise TypeError("markdown_content must be a string") + # Find all matches of the pattern matches = re.finditer(pattern, markdown_content, re.DOTALL) # Extract the content inside the backticks - code_blocks = [match.group(1).strip() for match in matches] + code_blocks = [] + for match in matches: + code_block = match.group(1).strip() + # Remove any leading or trailing whitespace from the code block + code_block = code_block.strip() + # Remove any empty lines from the code block + code_block = "\n".join( + [line for line in code_block.split("\n") if line.strip()] + ) + code_blocks.append(code_block) # Concatenate all code blocks separated by newlines - return "\n".join(code_blocks) + if code_blocks: + return "\n\n".join(code_blocks) + else: + return "" # example = """ diff --git a/swarms/utils/profile_func_2.py b/swarms/utils/profile_func_2.py index a17c85aa..e69de29b 100644 --- a/swarms/utils/profile_func_2.py +++ b/swarms/utils/profile_func_2.py @@ -1,98 +0,0 @@ -from functools import wraps -from loguru import logger -import tracemalloc -import psutil -import time -from typing import Callable, Any - - -def profile_all(func: Callable) -> Callable: - """ - A decorator to profile memory usage, CPU usage, and I/O operations - of a function and log the data using loguru. - - It combines tracemalloc for memory profiling, psutil for CPU and I/O operations, - and measures execution time. - - Args: - func (Callable): The function to be profiled. - - Returns: - Callable: The wrapped function with profiling enabled. - """ - - @wraps(func) - def wrapper(*args: Any, **kwargs: Any) -> Any: - # Start memory tracking - tracemalloc.start() - - # Get initial CPU stats - process = psutil.Process() - initial_cpu_times = process.cpu_times() - - # Get initial I/O stats if available - try: - initial_io_counters = process.io_counters() - io_tracking_available = True - except AttributeError: - logger.warning( - "I/O counters not available on this platform." - ) - io_tracking_available = False - - # Start timing the function execution - start_time = time.time() - - # Execute the function - result = func(*args, **kwargs) - - # Stop timing - end_time = time.time() - execution_time = end_time - start_time - - # Get final CPU stats - final_cpu_times = process.cpu_times() - - # Get final I/O stats if available - if io_tracking_available: - final_io_counters = process.io_counters() - io_read_count = ( - final_io_counters.read_count - - initial_io_counters.read_count - ) - io_write_count = ( - final_io_counters.write_count - - initial_io_counters.write_count - ) - else: - io_read_count = io_write_count = 0 - - # Get memory usage statistics - snapshot = tracemalloc.take_snapshot() - top_stats = snapshot.statistics("lineno") - - # Calculate CPU usage - cpu_usage = ( - final_cpu_times.user - - initial_cpu_times.user - + final_cpu_times.system - - initial_cpu_times.system - ) - - # Log the data - logger.info(f"Execution time: {execution_time:.4f} seconds") - logger.info(f"CPU usage: {cpu_usage:.2f} seconds") - if io_tracking_available: - logger.info( - f"I/O Operations - Read: {io_read_count}, Write: {io_write_count}" - ) - logger.info("Top memory usage:") - for stat in top_stats[:10]: - logger.info(stat) - - # Stop memory tracking - tracemalloc.stop() - - return result - - return wrapper From e0ca51b560ea0022c65925083e7d668a8c542947 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 17 Nov 2024 08:52:54 -0800 Subject: [PATCH 10/19] [new examples] --- .../agent_showcase_example.py | 0 .../auto_swarm_router.py | 0 concurrent_mix.py => new_features_examples/concurrent_mix.py | 0 .../persistent_legal_agent.py | 0 .../real_estate_agent.py | 0 rearrange_test.py => new_features_examples/rearrange_test.py | 0 .../sequential_worflow_test.py | 0 .../swarm_arange_demo.py | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename agent_showcase_example.py => new_features_examples/agent_showcase_example.py (100%) rename auto_swarm_router.py => new_features_examples/auto_swarm_router.py (100%) rename concurrent_mix.py => new_features_examples/concurrent_mix.py (100%) rename persistent_legal_agent.py => new_features_examples/persistent_legal_agent.py (100%) rename real_estate_agent.py => new_features_examples/real_estate_agent.py (100%) rename rearrange_test.py => new_features_examples/rearrange_test.py (100%) rename sequential_worflow_test.py => new_features_examples/sequential_worflow_test.py (100%) rename swarm_arange_demo.py => new_features_examples/swarm_arange_demo.py (100%) diff --git a/agent_showcase_example.py b/new_features_examples/agent_showcase_example.py similarity index 100% rename from agent_showcase_example.py rename to new_features_examples/agent_showcase_example.py diff --git a/auto_swarm_router.py b/new_features_examples/auto_swarm_router.py similarity index 100% rename from auto_swarm_router.py rename to new_features_examples/auto_swarm_router.py diff --git a/concurrent_mix.py b/new_features_examples/concurrent_mix.py similarity index 100% rename from concurrent_mix.py rename to new_features_examples/concurrent_mix.py diff --git a/persistent_legal_agent.py b/new_features_examples/persistent_legal_agent.py similarity index 100% rename from persistent_legal_agent.py rename to new_features_examples/persistent_legal_agent.py diff --git a/real_estate_agent.py b/new_features_examples/real_estate_agent.py similarity index 100% rename from real_estate_agent.py rename to new_features_examples/real_estate_agent.py diff --git a/rearrange_test.py b/new_features_examples/rearrange_test.py similarity index 100% rename from rearrange_test.py rename to new_features_examples/rearrange_test.py diff --git a/sequential_worflow_test.py b/new_features_examples/sequential_worflow_test.py similarity index 100% rename from sequential_worflow_test.py rename to new_features_examples/sequential_worflow_test.py diff --git a/swarm_arange_demo.py b/new_features_examples/swarm_arange_demo.py similarity index 100% rename from swarm_arange_demo.py rename to new_features_examples/swarm_arange_demo.py From f4cf551c2395531f426cc745ec85d446e7913c27 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 17 Nov 2024 09:51:11 -0800 Subject: [PATCH 11/19] [CLEANUP UN-USED FILES SUCH AS RUN_CPU decorator + other files] --- swarms/telemetry/bootup.py | 28 ++++--- swarms/telemetry/check_update.py | 51 +++++++++--- swarms/utils/profile_func_2.py | 0 swarms/utils/run_on_cpu.py | 128 ------------------------------- 4 files changed, 57 insertions(+), 150 deletions(-) delete mode 100644 swarms/utils/profile_func_2.py delete mode 100644 swarms/utils/run_on_cpu.py diff --git a/swarms/telemetry/bootup.py b/swarms/telemetry/bootup.py index 24d7a7c4..41cae773 100644 --- a/swarms/telemetry/bootup.py +++ b/swarms/telemetry/bootup.py @@ -9,18 +9,22 @@ from swarms.utils.disable_logging import disable_logging def bootup(): """Bootup swarms""" - logging.disable(logging.CRITICAL) - os.environ["WANDB_SILENT"] = "true" + try: + logging.disable(logging.CRITICAL) + os.environ["WANDB_SILENT"] = "true" - # Auto set workspace directory - workspace_dir = os.path.join(os.getcwd(), "agent_workspace") - if not os.path.exists(workspace_dir): - os.makedirs(workspace_dir) - os.environ["WORKSPACE_DIR"] = workspace_dir + # Auto set workspace directory + workspace_dir = os.path.join(os.getcwd(), "agent_workspace") + if not os.path.exists(workspace_dir): + os.makedirs(workspace_dir, exist_ok=True) + os.environ["WORKSPACE_DIR"] = workspace_dir - warnings.filterwarnings("ignore", category=DeprecationWarning) + warnings.filterwarnings("ignore", category=DeprecationWarning) - # Use ThreadPoolExecutor to run disable_logging and auto_update concurrently - with ThreadPoolExecutor(max_workers=2) as executor: - executor.submit(disable_logging) - executor.submit(auto_update) + # Use ThreadPoolExecutor to run disable_logging and auto_update concurrently + with ThreadPoolExecutor(max_workers=2) as executor: + executor.submit(disable_logging) + executor.submit(auto_update) + except Exception as e: + print(f"An error occurred: {str(e)}") + raise diff --git a/swarms/telemetry/check_update.py b/swarms/telemetry/check_update.py index a7e2384a..2b0b9a1c 100644 --- a/swarms/telemetry/check_update.py +++ b/swarms/telemetry/check_update.py @@ -4,10 +4,22 @@ import sys import pkg_resources import requests from packaging import version +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger("check-update") # borrowed from: https://stackoverflow.com/a/1051266/656011 def check_for_package(package: str) -> bool: + """ + Checks if a package is installed and available for import. + + Args: + package (str): The name of the package to check. + + Returns: + bool: True if the package is installed and can be imported, False otherwise. + """ if package in sys.modules: return True elif (spec := importlib.util.find_spec(package)) is not None: @@ -19,24 +31,43 @@ def check_for_package(package: str) -> bool: return True except ImportError: + logger.error(f"Failed to import {package}") return False else: + logger.info(f"{package} not found") return False def check_for_update() -> bool: - """Check for updates + """ + Checks if there is an update available for the swarms package. Returns: - BOOL: Flag to indicate if there is an update + bool: True if an update is available, False otherwise. """ - # Fetch the latest version from the PyPI API - response = requests.get("https://pypi.org/pypi/swarms/json") - latest_version = response.json()["info"]["version"] + try: + # Fetch the latest version from the PyPI API + response = requests.get("https://pypi.org/pypi/swarms/json") + response.raise_for_status() # Raises an HTTPError if the response status code is 4XX/5XX + latest_version = response.json()["info"]["version"] - # Get the current version using pkg_resources - current_version = pkg_resources.get_distribution("swarms").version + # Get the current version using pkg_resources + current_version = pkg_resources.get_distribution( + "swarms" + ).version - return version.parse(latest_version) > version.parse( - current_version - ) + if version.parse(latest_version) > version.parse( + current_version + ): + logger.info( + f"Update available: {latest_version} > {current_version}" + ) + return True + else: + logger.info( + f"No update available: {latest_version} <= {current_version}" + ) + return False + except requests.exceptions.RequestException as e: + logger.error(f"Failed to check for update: {e}") + return False diff --git a/swarms/utils/profile_func_2.py b/swarms/utils/profile_func_2.py deleted file mode 100644 index e69de29b..00000000 diff --git a/swarms/utils/run_on_cpu.py b/swarms/utils/run_on_cpu.py deleted file mode 100644 index 9573135d..00000000 --- a/swarms/utils/run_on_cpu.py +++ /dev/null @@ -1,128 +0,0 @@ -import os -import psutil -from typing import Callable, Any -import functools - -from swarms.utils.loguru_logger import initialize_logger - -logger = initialize_logger(log_folder="run_on_cpu") - - -def run_on_cpu(func: Callable) -> Callable: - """ - Decorator that ensures the function runs on all available CPU cores, - maximizing CPU and memory usage to execute the function as quickly as possible. - - This decorator sets the CPU affinity of the current process to all available CPU cores - before executing the function. After the function completes, the original CPU affinity is restored. - - Args: - func (Callable): The function to be executed. - - Returns: - Callable: The wrapped function with CPU affinity settings applied. - - Raises: - RuntimeError: If the CPU affinity cannot be set or restored. - """ - - @functools.wraps(func) - def wrapper(*args: Any, **kwargs: Any) -> Any: - # Get the current process - process = psutil.Process(os.getpid()) - - # Check if the platform supports cpu_affinity - if not hasattr(process, "cpu_affinity"): - logger.warning( - "CPU affinity is not supported on this platform. Executing function without setting CPU affinity." - ) - return func(*args, **kwargs) - - # Save the original CPU affinity - original_affinity = process.cpu_affinity() - logger.info(f"Original CPU affinity: {original_affinity}") - - try: - # Set the CPU affinity to all available CPU cores - all_cpus = list(range(os.cpu_count())) - process.cpu_affinity(all_cpus) - logger.info(f"Set CPU affinity to: {all_cpus}") - - # Set process priority to high - try: - process.nice(psutil.HIGH_PRIORITY_CLASS) - logger.info("Set process priority to high.") - except AttributeError: - logger.warning( - "Setting process priority is not supported on this platform." - ) - - # Pre-allocate memory by creating a large array (optional step) - memory_size = int( - psutil.virtual_memory().available * 0.9 - ) # 90% of available memory - try: - logger.info( - f"Pre-allocating memory: {memory_size} bytes" - ) - _ = bytearray(memory_size) - except MemoryError: - logger.error( - "Failed to pre-allocate memory, continuing without pre-allocation." - ) - - # Run the function - result = func(*args, **kwargs) - - except psutil.AccessDenied as e: - logger.error( - "Access denied while setting CPU affinity", - exc_info=True, - ) - raise RuntimeError( - "Access denied while setting CPU affinity" - ) from e - - except psutil.NoSuchProcess as e: - logger.error("Process does not exist", exc_info=True) - raise RuntimeError("Process does not exist") from e - - except Exception as e: - logger.error( - "An error occurred during function execution", - exc_info=True, - ) - raise RuntimeError( - "An error occurred during function execution" - ) from e - - finally: - # Restore the original CPU affinity - try: - process.cpu_affinity(original_affinity) - logger.info( - f"Restored original CPU affinity: {original_affinity}" - ) - except Exception as e: - logger.error( - "Failed to restore CPU affinity", exc_info=True - ) - raise RuntimeError( - "Failed to restore CPU affinity" - ) from e - - return result - - return wrapper - - -# # Example usage of the decorator -# @run_on_cpu -# def compute_heavy_task() -> None: -# # An example task that is CPU and memory intensive -# data = [i**2 for i in range(100000000)] -# sum(data) -# print("Task completed.") - - -# compute_heavy_task() From f132ea932a8f9dd35d7243d112a0ed3a931ee876 Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 18 Nov 2024 11:25:00 -0800 Subject: [PATCH 12/19] [README] --- README.md | 15 +++++++ swarms/tools/e2b_tool.py | 91 ---------------------------------------- 2 files changed, 15 insertions(+), 91 deletions(-) delete mode 100644 swarms/tools/e2b_tool.py diff --git a/README.md b/README.md index dd9ce311..4397eb61 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,21 @@ Swarms is an enterprise grade and production ready multi-agent collaboration framework that enables you to orchestrate many agents to work collaboratively at scale to automate real-world activities. +## ✨ Feature Comparison + +| Category | Features | Benefits | +|----------|----------|-----------| +| 🏒 Enterprise Architecture | β€’ Production-Ready Infrastructure
β€’ High Reliability Systems
β€’ Modular Design
β€’ Comprehensive Logging | β€’ Reduced downtime
β€’ Easier maintenance
β€’ Better debugging
β€’ Enhanced monitoring | +| πŸ€– Agent Orchestration | β€’ Hierarchical Swarms
β€’ Parallel Processing
β€’ Sequential Workflows
β€’ Graph-based Workflows
β€’ Dynamic Agent Rearrangement | β€’ Complex task handling
β€’ Improved performance
β€’ Flexible workflows
β€’ Optimized execution | +| πŸ”„ Integration Capabilities | β€’ Multi-Model Support
β€’ Custom Agent Creation
β€’ Extensive Tool Library
β€’ Multiple Memory Systems | β€’ Provider flexibility
β€’ Custom solutions
β€’ Extended functionality
β€’ Enhanced memory management | +| πŸ“ˆ Scalability | β€’ Concurrent Processing
β€’ Resource Management
β€’ Load Balancing
β€’ Horizontal Scaling | β€’ Higher throughput
β€’ Efficient resource use
β€’ Better performance
β€’ Easy scaling | +| πŸ› οΈ Developer Tools | β€’ Simple API
β€’ Extensive Documentation
β€’ Active Community
β€’ CLI Tools | β€’ Faster development
β€’ Easy learning curve
β€’ Community support
β€’ Quick deployment | +| πŸ” Security Features | β€’ Error Handling
β€’ Rate Limiting
β€’ Monitoring Integration
β€’ Audit Logging | β€’ Improved reliability
β€’ API protection
β€’ Better monitoring
β€’ Enhanced tracking | +| πŸ“Š Advanced Features | β€’ SpreadsheetSwarm
β€’ Group Chat
β€’ Agent Registry
β€’ Mixture of Agents | β€’ Mass agent management
β€’ Collaborative AI
β€’ Centralized control
β€’ Complex solutions | +| πŸ”Œ Provider Support | β€’ OpenAI
β€’ Anthropic
β€’ ChromaDB
β€’ Custom Providers | β€’ Provider flexibility
β€’ Storage options
β€’ Custom integration
β€’ Vendor independence | +| πŸ’ͺ Production Features | β€’ Automatic Retries
β€’ Async Support
β€’ Environment Management
β€’ Type Safety | β€’ Better reliability
β€’ Improved performance
β€’ Easy configuration
β€’ Safer code | +| 🎯 Use Case Support | β€’ Task-Specific Agents
β€’ Custom Workflows
β€’ Industry Solutions
β€’ Extensible Framework | β€’ Quick deployment
β€’ Flexible solutions
β€’ Industry readiness
β€’ Easy customization | + ---- diff --git a/swarms/tools/e2b_tool.py b/swarms/tools/e2b_tool.py deleted file mode 100644 index 5f8ef4d9..00000000 --- a/swarms/tools/e2b_tool.py +++ /dev/null @@ -1,91 +0,0 @@ -import subprocess -import sys -from loguru import logger -from typing import Tuple, Union, List -from e2b_code_interpreter import CodeInterpreter - -# load_dotenv() - - -# Helper function to lazily install the package if not found -def lazy_install(package: str) -> None: - try: - __import__(package) - except ImportError: - logger.warning(f"{package} not found. Installing now...") - subprocess.check_call( - [sys.executable, "-m", "pip", "install", package] - ) - - -# Ensure e2b_code_interpreter is installed lazily -lazy_install("e2b_code_interpreter") - - -def code_interpret( - code_interpreter: CodeInterpreter, code: str -) -> Union[Tuple[List[str], List[str]], None]: - """ - Runs AI-generated code using the provided CodeInterpreter and logs the process. - - Args: - code_interpreter (CodeInterpreter): An instance of the CodeInterpreter class. - code (str): The code string to be executed. - - Returns: - Union[Tuple[List[str], List[str]], None]: A tuple of (results, logs) if successful, - or None if an error occurred. - - Raises: - ValueError: If the code or code_interpreter is invalid. - """ - if not isinstance(code_interpreter, CodeInterpreter): - logger.error("Invalid CodeInterpreter instance provided.") - raise ValueError( - "code_interpreter must be an instance of CodeInterpreter." - ) - if not isinstance(code, str) or not code.strip(): - logger.error("Invalid code provided.") - raise ValueError("code must be a non-empty string.") - - logger.info( - f"\n{'='*50}\n> Running the following AI-generated code:\n{code}\n{'='*50}" - ) - - try: - exec_result = code_interpreter.notebook.exec_cell( - code, - # on_stderr=lambda stderr: logger.error(f"[Code Interpreter stderr] {stderr}"), - # on_stdout=lambda stdout: logger.info(f"[Code Interpreter stdout] {stdout}") - ) - - if exec_result.error: - logger.error( - f"[Code Interpreter error] {exec_result.error}" - ) - return None - else: - logger.success("Code executed successfully.") - # return exec_result.results, exec_result.logs - # return exec_result.results - prompt = f"{exec_result.results}: {exec_result.logs}" - return prompt - - except Exception: - logger.exception( - "An error occurred during code interpretation." - ) - return None - - -# # from e2b_code_interpreter import CodeInterpreter - -# interpreter = CodeInterpreter() -# code = "print('Hello, World!')" - -# result = code_interpret(interpreter, code) - -# if result: -# results = result -# print("Execution Results:", results) -# # print("Execution Logs:", logs) From 5736d1a4a42f58e345b05225b4ddff2fbf76d452 Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 18 Nov 2024 11:25:26 -0800 Subject: [PATCH 13/19] [README] --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 4397eb61..22a0c522 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,8 @@ Swarms is an enterprise grade and production ready multi-agent collaboration framework that enables you to orchestrate many agents to work collaboratively at scale to automate real-world activities. -## ✨ Feature Comparison + +## ✨ Features | Category | Features | Benefits | |----------|----------|-----------| From 9475b7a4a397cef0a2199ceca453013fc60af52c Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 18 Nov 2024 11:43:02 -0800 Subject: [PATCH 14/19] [CLEANUP] --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index 22a0c522..bb9dd54b 100644 --- a/README.md +++ b/README.md @@ -37,9 +37,6 @@ [![Share on Reddit](https://img.shields.io/badge/-Share%20on%20Reddit-orange)](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&title=Swarms%20-%20the%20future%20of%20AI) [![Share on Hacker News](https://img.shields.io/badge/-Share%20on%20Hacker%20News-orange)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&t=Swarms%20-%20the%20future%20of%20AI) [![Share on Pinterest](https://img.shields.io/badge/-Share%20on%20Pinterest-red)](https://pinterest.com/pin/create/button/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms&media=https%3A%2F%2Fexample.com%2Fimage.jpg&description=Swarms%20-%20the%20future%20of%20AI) [![Share on WhatsApp](https://img.shields.io/badge/-Share%20on%20WhatsApp-green)](https://api.whatsapp.com/send?text=Check%20out%20Swarms%20-%20the%20future%20of%20AI%20%23swarms%20%23AI%0A%0Ahttps%3A%2F%2Fgithub.com%2Fkyegomez%2Fswarms) - -Swarms is an enterprise grade and production ready multi-agent collaboration framework that enables you to orchestrate many agents to work collaboratively at scale to automate real-world activities. - ## ✨ Features | Category | Features | Benefits | From b2ed4c919bdd47efa3d1edac9ce50f4bee936c1d Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 18 Nov 2024 12:07:57 -0800 Subject: [PATCH 15/19] [AGENT REARRANGE DOCS] --- docs/swarms/structs/agent_rearrange.md | 77 +++++++++++++++++++------- swarms/utils/workspace_manager.py | 0 2 files changed, 56 insertions(+), 21 deletions(-) create mode 100644 swarms/utils/workspace_manager.py diff --git a/docs/swarms/structs/agent_rearrange.md b/docs/swarms/structs/agent_rearrange.md index 2cfe5703..d7a8bb98 100644 --- a/docs/swarms/structs/agent_rearrange.md +++ b/docs/swarms/structs/agent_rearrange.md @@ -7,10 +7,22 @@ The `AgentRearrange` class represents a swarm of agents for rearranging tasks. I | Attribute | Type | Description | | --- | --- | --- | -| `agents` | `dict` | A dictionary of agents, where the key is the agent's name and the value is the agent object. | -| `flow` | `str` | The flow pattern of the tasks. | -| `max_loops` | `int` | The maximum number of loops for the agents to run. | -| `verbose` | `bool` | Whether to enable verbose logging or not. | +| `id` | `str` | Unique identifier for the swarm | +| `name` | `str` | Name of the swarm | +| `description` | `str` | Description of the swarm's purpose | +| `agents` | `dict` | Dictionary mapping agent names to Agent objects | +| `flow` | `str` | Flow pattern defining task execution order | +| `max_loops` | `int` | Maximum number of execution loops | +| `verbose` | `bool` | Whether to enable verbose logging | +| `memory_system` | `BaseVectorDatabase` | Memory system for storing agent interactions | +| `human_in_the_loop` | `bool` | Whether human intervention is enabled | +| `custom_human_in_the_loop` | `Callable` | Custom function for human intervention | +| `return_json` | `bool` | Whether to return output in JSON format | +| `output_type` | `OutputType` | Format of output ("all", "final", "list", or "dict") | +| `docs` | `List[str]` | List of document paths to add to agent prompts | +| `doc_folder` | `str` | Folder path containing documents to add to agent prompts | +| `swarm_history` | `dict` | History of agent interactions | + ## Methods ------- @@ -62,20 +74,55 @@ Validates the flow pattern. - `bool`: `True` if the flow pattern is valid. -### `run(self, task: str, *args, **kwargs)` +### `run(self, task: str = None, img: str = None, device: str = "cpu", device_id: int = 1, all_cores: bool = True, all_gpus: bool = False, *args, **kwargs)` -Runs the swarm to rearrange the tasks. +Executes the agent rearrangement task with specified compute resources. | Parameter | Type | Description | | --- | --- | --- | -| `task` | `str` | The initial task to be processed. | -| `*args` | - | Additional positional arguments. | -| `**kwargs` | - | Additional keyword arguments. | +| `task` | `str` | The task to execute | +| `img` | `str` | Path to input image if required | +| `device` | `str` | Computing device to use ('cpu' or 'gpu') | +| `device_id` | `int` | ID of specific device to use | +| `all_cores` | `bool` | Whether to use all CPU cores | +| `all_gpus` | `bool` | Whether to use all available GPUs | **Returns:** - `str`: The final processed task. +### `batch_run(self, tasks: List[str], img: Optional[List[str]] = None, batch_size: int = 10, device: str = "cpu", device_id: int = None, all_cores: bool = True, all_gpus: bool = False, *args, **kwargs)` + +Process multiple tasks in batches. + +| Parameter | Type | Description | +| --- | --- | --- | +| `tasks` | `List[str]` | List of tasks to process | +| `img` | `List[str]` | Optional list of images corresponding to tasks | +| `batch_size` | `int` | Number of tasks to process simultaneously | +| `device` | `str` | Computing device to use | +| `device_id` | `int` | Specific device ID if applicable | +| `all_cores` | `bool` | Whether to use all CPU cores | +| `all_gpus` | `bool` | Whether to use all available GPUs | + + + +### `concurrent_run(self, tasks: List[str], img: Optional[List[str]] = None, max_workers: Optional[int] = None, device: str = "cpu", device_id: int = None, all_cores: bool = True, all_gpus: bool = False, *args, **kwargs)` + +Process multiple tasks concurrently using ThreadPoolExecutor. + +| Parameter | Type | Description | +| --- | --- | --- | +| `tasks` | `List[str]` | List of tasks to process | +| `img` | `List[str]` | Optional list of images corresponding to tasks | +| `max_workers` | `int` | Maximum number of worker threads | +| `device` | `str` | Computing device to use | +| `device_id` | `int` | Specific device ID if applicable | +| `all_cores` | `bool` | Whether to use all CPU cores | +| `all_gpus` | `bool` | Whether to use all available GPUs | + + + ## Documentation for `rearrange` Function ====================================== @@ -247,18 +294,6 @@ Additionally, you can modify the `run` method of the `AgentRearrange` class to i It's important to note that the `AgentRearrange` class and the `rearrange` function rely on the individual agents to process tasks correctly. The quality of the output will depend on the capabilities and configurations of the agents used in the swarm. Additionally, the `AgentRearrange` class does not provide any mechanisms for task prioritization or load balancing among the agents. -## Future Improvements -------------------- - -Here are some potential future improvements for the `AgentRearrange` class and the `rearrange` function: - -- **Task Prioritization**: Implement a mechanism to prioritize tasks based on factors such as urgency, importance, or resource availability. -- **Load Balancing**: Incorporate load balancing algorithms to distribute tasks among agents more efficiently, taking into account factors such as agent availability, performance, and resource utilization. -- **Dynamic Flow Reconfiguration**: Allow for dynamic reconfiguration of the flow pattern during runtime, enabling the addition, removal, or reordering of agents based on specific conditions or events. -- **Error Handling and Fault Tolerance**: Enhance error handling and fault tolerance mechanisms to gracefully handle agent failures, task timeouts, or other exceptional situations. -- **Monitoring and Metrics**: Implement monitoring and metrics collection to track the performance and efficiency of the swarm, as well as individual agent performance. -- **Scalability**: Enhance the scalability of the system to handle larger numbers of agents and tasks efficiently. - ## Conclusion ---------- diff --git a/swarms/utils/workspace_manager.py b/swarms/utils/workspace_manager.py new file mode 100644 index 00000000..e69de29b From 59b6b41b98b7d97e1c6e599d3a80744a5fcb935b Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 18 Nov 2024 12:09:37 -0800 Subject: [PATCH 16/19] [AGENT DOCS UPDATE] --- docs/swarms/structs/agent.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/docs/swarms/structs/agent.md b/docs/swarms/structs/agent.md index 97ab465b..6413dd2c 100644 --- a/docs/swarms/structs/agent.md +++ b/docs/swarms/structs/agent.md @@ -132,6 +132,16 @@ graph TD | `data_memory` | Optional callable for data memory operations. | | `load_yaml_path` | String representing the path to a YAML file for loading configurations. | | `auto_generate_prompt` | Boolean indicating whether to automatically generate prompts. | +| `rag_every_loop` | Boolean indicating whether to query RAG database for context on every loop | +| `plan_enabled` | Boolean indicating whether planning functionality is enabled | +| `artifacts_on` | Boolean indicating whether to save artifacts from agent execution | +| `artifacts_output_path` | File path where artifacts should be saved | +| `artifacts_file_extension` | File extension to use for saved artifacts | +| `device` | Device to run computations on ("cpu" or "gpu") | +| `all_cores` | Boolean indicating whether to use all CPU cores | +| `device_id` | ID of the GPU device to use if running on GPU | +| `scheduled_run_date` | Optional datetime for scheduling future agent runs | + ## `Agent` Methods @@ -200,6 +210,20 @@ graph TD | `handle_sop_ops()` | Handles operations related to standard operating procedures. | None | `agent.handle_sop_ops()` | | `agent_output_type(responses)` | Processes and returns the agent's output based on the specified output type. | `responses` (list): List of responses. | `formatted_output = agent.agent_output_type(responses)` | | `check_if_no_prompt_then_autogenerate(task)` | Checks if a system prompt is not set and auto-generates one if needed. | `task` (str): The task to use for generating a prompt. | `agent.check_if_no_prompt_then_autogenerate("Analyze data")` | +| `check_if_no_prompt_then_autogenerate(task)` | Checks if auto_generate_prompt is enabled and generates a prompt by combining agent name, description and system prompt | `task` (str, optional): Task to use as fallback | `agent.check_if_no_prompt_then_autogenerate("Analyze data")` | +| `handle_artifacts(response, output_path, extension)` | Handles saving artifacts from agent execution | `response` (str): Agent response
`output_path` (str): Output path
`extension` (str): File extension | `agent.handle_artifacts(response, "outputs/", ".txt")` | + + + +## Updated Run Method + +Update the run method documentation to include new parameters: + +| Method | Description | Inputs | Usage Example | +|--------|-------------|--------|----------------| +| `run(task, img=None, is_last=False, device="cpu", device_id=0, all_cores=True, scheduled_run_date=None)` | Runs the agent with specified parameters | `task` (str): Task to run
`img` (str, optional): Image path
`is_last` (bool): If this is last task
`device` (str): Device to use
`device_id` (int): GPU ID
`all_cores` (bool): Use all CPU cores
`scheduled_run_date` (datetime, optional): Future run date | `agent.run("Analyze data", device="gpu", device_id=0)` | + + ## Getting Started @@ -538,5 +562,9 @@ print(agent.system_prompt) 8. Optimize token usage with `dynamic_context_window` and `tokens_checks` methods. 9. Use `concurrent` and `async` methods for performance-critical applications. 10. Regularly review and analyze feedback using the `analyze_feedback` method. +11. Use `artifacts_on` to save important outputs from agent execution +12. Configure `device` and `device_id` appropriately for optimal performance +13. Enable `rag_every_loop` when continuous context from long-term memory is needed +14. Use `scheduled_run_date` for automated task scheduling By following these guidelines and leveraging the Swarm Agent's extensive features, you can create powerful, flexible, and efficient autonomous agents for a wide range of applications. \ No newline at end of file From 1f47866722a904d30712a5dc9c16114cd9dc0b3a Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 18 Nov 2024 15:44:44 -0800 Subject: [PATCH 17/19] [REPORT ERROR LOGGER FROM UN-USED CODE] --- backtester.py | 538 +++++++++++++++++++++++++++++ pyproject.toml | 2 +- swarms/structs/agent_registry.py | 1 - swarms/utils/try_except_wrapper.py | 1 - 4 files changed, 539 insertions(+), 3 deletions(-) create mode 100644 backtester.py diff --git a/backtester.py b/backtester.py new file mode 100644 index 00000000..51cac1a1 --- /dev/null +++ b/backtester.py @@ -0,0 +1,538 @@ +""" +Advanced Financial Backtesting System +----------------------------------- +A comprehensive system for backtesting trading strategies using the Swarms framework, +real-time data from Yahoo Finance, and AI-driven decision making. + +Features: +- Type-safe implementation with comprehensive type hints +- Detailed logging with Loguru +- Real-time data fetching from Yahoo Finance +- Advanced technical analysis +- Performance metrics and visualization +- AI-driven trading decisions using Swarms framework +""" + +import os +from datetime import datetime +from typing import Dict, List, TypedDict +from dataclasses import dataclass +import pandas as pd +import numpy as np +import yfinance as yf +from swarms import Agent +from swarm_models import OpenAIChat +from dotenv import load_dotenv +from loguru import logger + +# Configure logging +logger.add( + "backtester_{time}.log", + rotation="500 MB", + retention="10 days", + level="INFO", + format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}", +) + + +# Type definitions +class TradeAction(TypedDict): + date: datetime + action: str + symbol: str + quantity: float + price: float + cash: float + commission: float + + +class PortfolioMetrics(TypedDict): + total_return: float + total_trades: int + total_commission: float + final_cash: float + sharpe_ratio: float + max_drawdown: float + + +@dataclass +class TechnicalIndicators: + sma_20: float + sma_50: float + rsi: float + macd: float + signal_line: float + volume: int + + +class FinancialData: + """ + Handles financial data operations using Yahoo Finance API + + Attributes: + cache (Dict): Cache for storing downloaded data + """ + + def __init__(self) -> None: + self.cache: Dict[str, pd.DataFrame] = {} + + @logger.catch + def get_historical_prices( + self, symbol: str, start_date: str, end_date: str + ) -> pd.DataFrame: + """ + Fetches historical price data from Yahoo Finance + + Args: + symbol: Stock symbol + start_date: Start date in YYYY-MM-DD format + end_date: End date in YYYY-MM-DD format + + Returns: + DataFrame containing historical price data + """ + logger.info( + f"Fetching data for {symbol} from {start_date} to {end_date}" + ) + + if symbol not in self.cache: + try: + ticker = yf.Ticker(symbol) + df = ticker.history(start=start_date, end=end_date) + df["symbol"] = symbol + df.index.name = "date" + df.reset_index(inplace=True) + self.cache[symbol] = df + logger.success( + f"Successfully downloaded data for {symbol}" + ) + except Exception as e: + logger.error( + f"Error fetching data for {symbol}: {str(e)}" + ) + raise + + return self.cache[symbol] + + @logger.catch + def get_technical_indicators( + self, df: pd.DataFrame + ) -> pd.DataFrame: + """ + Calculates technical indicators for analysis + + Args: + df: DataFrame with price data + + Returns: + DataFrame with added technical indicators + """ + logger.info("Calculating technical indicators") + df = df.copy() + + try: + # Calculate moving averages + df["SMA_20"] = df["Close"].rolling(window=20).mean() + df["SMA_50"] = df["Close"].rolling(window=50).mean() + + # Calculate RSI + delta = df["Close"].diff() + gain = ( + (delta.where(delta > 0, 0)).rolling(window=14).mean() + ) + loss = ( + (-delta.where(delta < 0, 0)).rolling(window=14).mean() + ) + rs = gain / loss + df["RSI"] = 100 - (100 / (1 + rs)) + + # Calculate MACD + exp1 = df["Close"].ewm(span=12, adjust=False).mean() + exp2 = df["Close"].ewm(span=26, adjust=False).mean() + df["MACD"] = exp1 - exp2 + df["Signal_Line"] = ( + df["MACD"].ewm(span=9, adjust=False).mean() + ) + + logger.success( + "Successfully calculated technical indicators" + ) + return df + + except Exception as e: + logger.error( + f"Error calculating technical indicators: {str(e)}" + ) + raise + + +class Portfolio: + """ + Manages portfolio positions and tracks performance + + Attributes: + initial_cash: Starting capital + cash: Current cash balance + positions: Current stock positions + history: Trade history + trade_count: Number of trades executed + """ + + def __init__(self, initial_cash: float = 100000.0) -> None: + self.initial_cash = initial_cash + self.cash = initial_cash + self.positions: Dict[str, float] = {} + self.history: List[TradeAction] = [] + self.trade_count = 0 + logger.info( + f"Initialized portfolio with ${initial_cash:,.2f}" + ) + + @logger.catch + def execute_trade( + self, + symbol: str, + action: str, + price: float, + quantity: float, + date: datetime, + ) -> None: + """ + Executes a trade and updates portfolio state + + Args: + symbol: Stock symbol + action: 'BUY' or 'SELL' + price: Trade price + quantity: Number of shares + date: Trade date + """ + commission = 1.0 # $1 per trade commission + + try: + if action == "BUY": + cost = (price * quantity) + commission + if cost <= self.cash: + self.cash -= cost + self.positions[symbol] = ( + self.positions.get(symbol, 0) + quantity + ) + self.trade_count += 1 + logger.info( + f"Bought {quantity} shares of {symbol} at ${price:.2f}" + ) + elif action == "SELL": + if ( + symbol in self.positions + and self.positions[symbol] >= quantity + ): + self.cash += (price * quantity) - commission + self.positions[symbol] -= quantity + if self.positions[symbol] == 0: + del self.positions[symbol] + self.trade_count += 1 + logger.info( + f"Sold {quantity} shares of {symbol} at ${price:.2f}" + ) + + self.history.append( + { + "date": date, + "action": action, + "symbol": symbol, + "quantity": quantity, + "price": price, + "cash": self.cash, + "commission": commission, + } + ) + + except Exception as e: + logger.error(f"Error executing trade: {str(e)}") + raise + + def get_metrics(self) -> PortfolioMetrics: + """ + Calculates portfolio performance metrics + + Returns: + Dictionary containing performance metrics + """ + try: + df = pd.DataFrame(self.history) + if len(df) == 0: + return { + "total_return": 0.0, + "total_trades": 0, + "total_commission": 0.0, + "final_cash": self.initial_cash, + "sharpe_ratio": 0.0, + "max_drawdown": 0.0, + } + + portfolio_values = df["cash"].values + returns = ( + np.diff(portfolio_values) / portfolio_values[:-1] + ) + + sharpe_ratio = ( + np.sqrt(252) * np.mean(returns) / np.std(returns) + if len(returns) > 0 + else 0 + ) + max_drawdown = np.min( + np.minimum.accumulate(portfolio_values) + / np.maximum.accumulate(portfolio_values) + - 1 + ) + + metrics: PortfolioMetrics = { + "total_return": ( + (self.cash - self.initial_cash) + / self.initial_cash + ) + * 100, + "total_trades": self.trade_count, + "total_commission": self.trade_count * 1.0, + "final_cash": self.cash, + "sharpe_ratio": float(sharpe_ratio), + "max_drawdown": float(max_drawdown * 100), + } + + logger.info("Successfully calculated portfolio metrics") + return metrics + + except Exception as e: + logger.error( + f"Error calculating portfolio metrics: {str(e)}" + ) + raise + + +class FinancialAgent: + """ + AI Agent for making trading decisions using the Swarms framework + + Attributes: + model: OpenAI chat model instance + agent: Swarms agent instance + """ + + def __init__(self, api_key: str) -> None: + logger.info("Initializing Financial Agent") + + self.model = OpenAIChat( + openai_api_key=api_key, + model_name="gpt-4-0125-preview", + temperature=0.1, + ) + + self.agent = Agent( + agent_name="Financial-Trading-Agent", + system_prompt="""You are an AI trading agent. Analyze the provided price data and technical indicators to make trading decisions. + Output only one of these decisions: BUY, SELL, or HOLD. Consider the following in your analysis: + 1. Trend direction using moving averages (SMA_20 and SMA_50) + 2. RSI for overbought/oversold conditions (>70 overbought, <30 oversold) + 3. MACD crossovers and momentum + 4. Recent price action and volume + + Provide your decision in a single word: BUY, SELL, or HOLD.""", + llm=self.model, + max_loops=1, + autosave=True, + dashboard=False, + verbose=True, + ) + + @logger.catch + def make_decision(self, price_data: pd.DataFrame) -> str: + """ + Makes trading decision based on price data and technical indicators + + Args: + price_data: DataFrame containing price and indicator data + + Returns: + Trading decision: 'BUY', 'SELL', or 'HOLD' + """ + try: + latest_data = price_data.tail(1).to_dict("records")[0] + + prompt = f""" + Current Market Data: + Price: ${latest_data['Close']:.2f} + SMA_20: ${latest_data['SMA_20']:.2f} + SMA_50: ${latest_data['SMA_50']:.2f} + RSI: {latest_data['RSI']:.2f} + MACD: {latest_data['MACD']:.2f} + Signal Line: {latest_data['Signal_Line']:.2f} + Volume: {latest_data['Volume']} + + Based on this data, what is your trading decision? + """ + + decision = self.agent.run(prompt) + decision = decision.strip().upper() + + if decision not in ["BUY", "SELL", "HOLD"]: + logger.warning( + f"Invalid decision '{decision}', defaulting to HOLD" + ) + decision = "HOLD" + + logger.info(f"Agent decision: {decision}") + return decision + + except Exception as e: + logger.error(f"Error making trading decision: {str(e)}") + raise + + +class Backtester: + """ + Runs trading strategy backtests and analyzes performance + + Attributes: + agent: Trading agent instance + portfolio: Portfolio instance + results: List of backtest results + """ + + def __init__( + self, agent: FinancialAgent, portfolio: Portfolio + ) -> None: + self.agent = agent + self.portfolio = portfolio + self.results: List[Dict] = [] + logger.info("Initialized Backtester") + + @logger.catch + def run_backtest( + self, price_data: pd.DataFrame, trade_size: float = 100 + ) -> None: + """ + Runs backtest simulation + + Args: + price_data: Historical price data + trade_size: Number of shares per trade + """ + logger.info("Starting backtest") + + try: + df = FinancialData().get_technical_indicators(price_data) + df = df.dropna() + + for i in range(len(df)): + current_data = df.iloc[i] + current_price = current_data["Close"] + current_date = current_data["date"] + + decision = self.agent.make_decision( + df.iloc[max(0, i - 10) : i + 1] + ) + + if decision == "BUY": + self.portfolio.execute_trade( + symbol=current_data["symbol"], + action="BUY", + price=current_price, + quantity=trade_size, + date=current_date, + ) + elif decision == "SELL": + self.portfolio.execute_trade( + symbol=current_data["symbol"], + action="SELL", + price=current_price, + quantity=trade_size, + date=current_date, + ) + + portfolio_value = self.portfolio.get_metrics()[ + "final_cash" + ] + + self.results.append( + { + "date": current_date, + "price": current_price, + "decision": decision, + "portfolio_value": portfolio_value, + "SMA_20": current_data["SMA_20"], + "SMA_50": current_data["SMA_50"], + "RSI": current_data["RSI"], + "MACD": current_data["MACD"], + } + ) + + logger.success("Backtest completed successfully") + + except Exception as e: + logger.error(f"Error during backtest: {str(e)}") + raise + + def get_results(self) -> pd.DataFrame: + """ + Returns backtest results as DataFrame + """ + return pd.DataFrame(self.results) + + +def main() -> None: + """ + Main function to run the backtesting system + """ + try: + # Load environment variables + load_dotenv() + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError( + "OpenAI API key not found in environment variables" + ) + + # Initialize components + data_provider = FinancialData() + agent = FinancialAgent(api_key) + portfolio = Portfolio(initial_cash=100000.0) + backtester = Backtester(agent, portfolio) + + # Get historical data + symbol = "AAPL" + start_date = "2023-01-01" + end_date = "2023-12-31" + + logger.info( + f"Starting backtest for {symbol} from {start_date} to {end_date}" + ) + + price_data = data_provider.get_historical_prices( + symbol, start_date, end_date + ) + backtester.run_backtest(price_data) + + # Get and display results + results = backtester.get_results() + metrics = portfolio.get_metrics() + + logger.info("Backtest Results:") + logger.info(f"Initial Portfolio Value: ${100000:.2f}") + logger.info( + f"Final Portfolio Value: ${metrics['final_cash']:.2f}" + ) + logger.info(f"Total Return: {metrics['total_return']:.2f}%") + logger.info(f"Total Trades: {metrics['total_trades']}") + logger.info( + f"Total Commission: ${metrics['total_commission']:.2f}" + ) + logger.info(f"Sharpe Ratio: {metrics['sharpe_ratio']:.2f}") + logger.info(f"Max Drawdown: {metrics['max_drawdown']:.2f}%") + + except Exception as e: + logger.error(f"Error in main function: {str(e)}") + raise + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index d405aa10..7914360d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "6.0.9" +version = "6.1.0" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/structs/agent_registry.py b/swarms/structs/agent_registry.py index 809f2010..75a2a226 100644 --- a/swarms/structs/agent_registry.py +++ b/swarms/structs/agent_registry.py @@ -7,7 +7,6 @@ from pydantic import BaseModel, Field, ValidationError from swarms import Agent from swarms.utils.loguru_logger import logger -from swarms.utils.report_error_loguru import report_error class AgentConfigSchema(BaseModel): diff --git a/swarms/utils/try_except_wrapper.py b/swarms/utils/try_except_wrapper.py index 50fdd877..827fb9c3 100644 --- a/swarms/utils/try_except_wrapper.py +++ b/swarms/utils/try_except_wrapper.py @@ -3,7 +3,6 @@ from time import time from typing import Any, Callable from swarms.utils.loguru_logger import logger -from swarms.utils.report_error_loguru import report_error def retry( From f34010f20474cb182bcc3f60163604abff84bc7a Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 18 Nov 2024 15:45:48 -0800 Subject: [PATCH 18/19] [CLEANUP ERROR] --- pyproject.toml | 2 +- swarms/structs/agent_registry.py | 8 ++++---- swarms/utils/try_except_wrapper.py | 8 +++++--- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 7914360d..0e902b39 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "6.1.0" +version = "6.1.1" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/structs/agent_registry.py b/swarms/structs/agent_registry.py index 75a2a226..09348622 100644 --- a/swarms/structs/agent_registry.py +++ b/swarms/structs/agent_registry.py @@ -228,7 +228,7 @@ class AgentRegistry: logger.info("Listing all agents.") return agent_names except Exception as e: - report_error(e) + logger.error(f"Error: {e}") raise e def return_all_agents(self) -> List[Agent]: @@ -244,7 +244,7 @@ class AgentRegistry: logger.info("Returning all agents.") return agents except Exception as e: - report_error(e) + logger.error(f"Error: {e}") raise e def query( @@ -275,7 +275,7 @@ class AgentRegistry: logger.info("Querying agents with condition.") return agents except Exception as e: - report_error(e) + logger.error(f"Error: {e}") raise e def find_agent_by_name(self, agent_name: str) -> Optional[Agent]: @@ -299,7 +299,7 @@ class AgentRegistry: if agent.agent_name == agent_name: return agent except Exception as e: - report_error(e) + logger.error(f"Error: {e}") raise e def agent_to_py_model(self, agent: Agent): diff --git a/swarms/utils/try_except_wrapper.py b/swarms/utils/try_except_wrapper.py index 827fb9c3..faa63534 100644 --- a/swarms/utils/try_except_wrapper.py +++ b/swarms/utils/try_except_wrapper.py @@ -2,7 +2,9 @@ from functools import wraps from time import time from typing import Any, Callable -from swarms.utils.loguru_logger import logger +from swarms.utils.loguru_logger import initialize_logger + +logger = initialize_logger("try_except_wrapper") def retry( @@ -113,12 +115,12 @@ def try_except_wrapper(verbose: bool = False): return result except Exception as error: if verbose: - report_error( + logger.error( f"An error occurred in function {func.__name__}:" f" {error}" ) else: - report_error( + logger.error( f"An error occurred in function {func.__name__}:" f" {error}" ) From 47a359ec34c1e4f1a72dea8ab9d9e835d1f981d6 Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 18 Nov 2024 16:19:02 -0800 Subject: [PATCH 19/19] [ERROR][Prompting Error] --- backtester.py | 538 --------------------------------------- pyproject.toml | 2 +- swarms/prompts/prompt.py | 14 +- 3 files changed, 10 insertions(+), 544 deletions(-) delete mode 100644 backtester.py diff --git a/backtester.py b/backtester.py deleted file mode 100644 index 51cac1a1..00000000 --- a/backtester.py +++ /dev/null @@ -1,538 +0,0 @@ -""" -Advanced Financial Backtesting System ------------------------------------ -A comprehensive system for backtesting trading strategies using the Swarms framework, -real-time data from Yahoo Finance, and AI-driven decision making. - -Features: -- Type-safe implementation with comprehensive type hints -- Detailed logging with Loguru -- Real-time data fetching from Yahoo Finance -- Advanced technical analysis -- Performance metrics and visualization -- AI-driven trading decisions using Swarms framework -""" - -import os -from datetime import datetime -from typing import Dict, List, TypedDict -from dataclasses import dataclass -import pandas as pd -import numpy as np -import yfinance as yf -from swarms import Agent -from swarm_models import OpenAIChat -from dotenv import load_dotenv -from loguru import logger - -# Configure logging -logger.add( - "backtester_{time}.log", - rotation="500 MB", - retention="10 days", - level="INFO", - format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}", -) - - -# Type definitions -class TradeAction(TypedDict): - date: datetime - action: str - symbol: str - quantity: float - price: float - cash: float - commission: float - - -class PortfolioMetrics(TypedDict): - total_return: float - total_trades: int - total_commission: float - final_cash: float - sharpe_ratio: float - max_drawdown: float - - -@dataclass -class TechnicalIndicators: - sma_20: float - sma_50: float - rsi: float - macd: float - signal_line: float - volume: int - - -class FinancialData: - """ - Handles financial data operations using Yahoo Finance API - - Attributes: - cache (Dict): Cache for storing downloaded data - """ - - def __init__(self) -> None: - self.cache: Dict[str, pd.DataFrame] = {} - - @logger.catch - def get_historical_prices( - self, symbol: str, start_date: str, end_date: str - ) -> pd.DataFrame: - """ - Fetches historical price data from Yahoo Finance - - Args: - symbol: Stock symbol - start_date: Start date in YYYY-MM-DD format - end_date: End date in YYYY-MM-DD format - - Returns: - DataFrame containing historical price data - """ - logger.info( - f"Fetching data for {symbol} from {start_date} to {end_date}" - ) - - if symbol not in self.cache: - try: - ticker = yf.Ticker(symbol) - df = ticker.history(start=start_date, end=end_date) - df["symbol"] = symbol - df.index.name = "date" - df.reset_index(inplace=True) - self.cache[symbol] = df - logger.success( - f"Successfully downloaded data for {symbol}" - ) - except Exception as e: - logger.error( - f"Error fetching data for {symbol}: {str(e)}" - ) - raise - - return self.cache[symbol] - - @logger.catch - def get_technical_indicators( - self, df: pd.DataFrame - ) -> pd.DataFrame: - """ - Calculates technical indicators for analysis - - Args: - df: DataFrame with price data - - Returns: - DataFrame with added technical indicators - """ - logger.info("Calculating technical indicators") - df = df.copy() - - try: - # Calculate moving averages - df["SMA_20"] = df["Close"].rolling(window=20).mean() - df["SMA_50"] = df["Close"].rolling(window=50).mean() - - # Calculate RSI - delta = df["Close"].diff() - gain = ( - (delta.where(delta > 0, 0)).rolling(window=14).mean() - ) - loss = ( - (-delta.where(delta < 0, 0)).rolling(window=14).mean() - ) - rs = gain / loss - df["RSI"] = 100 - (100 / (1 + rs)) - - # Calculate MACD - exp1 = df["Close"].ewm(span=12, adjust=False).mean() - exp2 = df["Close"].ewm(span=26, adjust=False).mean() - df["MACD"] = exp1 - exp2 - df["Signal_Line"] = ( - df["MACD"].ewm(span=9, adjust=False).mean() - ) - - logger.success( - "Successfully calculated technical indicators" - ) - return df - - except Exception as e: - logger.error( - f"Error calculating technical indicators: {str(e)}" - ) - raise - - -class Portfolio: - """ - Manages portfolio positions and tracks performance - - Attributes: - initial_cash: Starting capital - cash: Current cash balance - positions: Current stock positions - history: Trade history - trade_count: Number of trades executed - """ - - def __init__(self, initial_cash: float = 100000.0) -> None: - self.initial_cash = initial_cash - self.cash = initial_cash - self.positions: Dict[str, float] = {} - self.history: List[TradeAction] = [] - self.trade_count = 0 - logger.info( - f"Initialized portfolio with ${initial_cash:,.2f}" - ) - - @logger.catch - def execute_trade( - self, - symbol: str, - action: str, - price: float, - quantity: float, - date: datetime, - ) -> None: - """ - Executes a trade and updates portfolio state - - Args: - symbol: Stock symbol - action: 'BUY' or 'SELL' - price: Trade price - quantity: Number of shares - date: Trade date - """ - commission = 1.0 # $1 per trade commission - - try: - if action == "BUY": - cost = (price * quantity) + commission - if cost <= self.cash: - self.cash -= cost - self.positions[symbol] = ( - self.positions.get(symbol, 0) + quantity - ) - self.trade_count += 1 - logger.info( - f"Bought {quantity} shares of {symbol} at ${price:.2f}" - ) - elif action == "SELL": - if ( - symbol in self.positions - and self.positions[symbol] >= quantity - ): - self.cash += (price * quantity) - commission - self.positions[symbol] -= quantity - if self.positions[symbol] == 0: - del self.positions[symbol] - self.trade_count += 1 - logger.info( - f"Sold {quantity} shares of {symbol} at ${price:.2f}" - ) - - self.history.append( - { - "date": date, - "action": action, - "symbol": symbol, - "quantity": quantity, - "price": price, - "cash": self.cash, - "commission": commission, - } - ) - - except Exception as e: - logger.error(f"Error executing trade: {str(e)}") - raise - - def get_metrics(self) -> PortfolioMetrics: - """ - Calculates portfolio performance metrics - - Returns: - Dictionary containing performance metrics - """ - try: - df = pd.DataFrame(self.history) - if len(df) == 0: - return { - "total_return": 0.0, - "total_trades": 0, - "total_commission": 0.0, - "final_cash": self.initial_cash, - "sharpe_ratio": 0.0, - "max_drawdown": 0.0, - } - - portfolio_values = df["cash"].values - returns = ( - np.diff(portfolio_values) / portfolio_values[:-1] - ) - - sharpe_ratio = ( - np.sqrt(252) * np.mean(returns) / np.std(returns) - if len(returns) > 0 - else 0 - ) - max_drawdown = np.min( - np.minimum.accumulate(portfolio_values) - / np.maximum.accumulate(portfolio_values) - - 1 - ) - - metrics: PortfolioMetrics = { - "total_return": ( - (self.cash - self.initial_cash) - / self.initial_cash - ) - * 100, - "total_trades": self.trade_count, - "total_commission": self.trade_count * 1.0, - "final_cash": self.cash, - "sharpe_ratio": float(sharpe_ratio), - "max_drawdown": float(max_drawdown * 100), - } - - logger.info("Successfully calculated portfolio metrics") - return metrics - - except Exception as e: - logger.error( - f"Error calculating portfolio metrics: {str(e)}" - ) - raise - - -class FinancialAgent: - """ - AI Agent for making trading decisions using the Swarms framework - - Attributes: - model: OpenAI chat model instance - agent: Swarms agent instance - """ - - def __init__(self, api_key: str) -> None: - logger.info("Initializing Financial Agent") - - self.model = OpenAIChat( - openai_api_key=api_key, - model_name="gpt-4-0125-preview", - temperature=0.1, - ) - - self.agent = Agent( - agent_name="Financial-Trading-Agent", - system_prompt="""You are an AI trading agent. Analyze the provided price data and technical indicators to make trading decisions. - Output only one of these decisions: BUY, SELL, or HOLD. Consider the following in your analysis: - 1. Trend direction using moving averages (SMA_20 and SMA_50) - 2. RSI for overbought/oversold conditions (>70 overbought, <30 oversold) - 3. MACD crossovers and momentum - 4. Recent price action and volume - - Provide your decision in a single word: BUY, SELL, or HOLD.""", - llm=self.model, - max_loops=1, - autosave=True, - dashboard=False, - verbose=True, - ) - - @logger.catch - def make_decision(self, price_data: pd.DataFrame) -> str: - """ - Makes trading decision based on price data and technical indicators - - Args: - price_data: DataFrame containing price and indicator data - - Returns: - Trading decision: 'BUY', 'SELL', or 'HOLD' - """ - try: - latest_data = price_data.tail(1).to_dict("records")[0] - - prompt = f""" - Current Market Data: - Price: ${latest_data['Close']:.2f} - SMA_20: ${latest_data['SMA_20']:.2f} - SMA_50: ${latest_data['SMA_50']:.2f} - RSI: {latest_data['RSI']:.2f} - MACD: {latest_data['MACD']:.2f} - Signal Line: {latest_data['Signal_Line']:.2f} - Volume: {latest_data['Volume']} - - Based on this data, what is your trading decision? - """ - - decision = self.agent.run(prompt) - decision = decision.strip().upper() - - if decision not in ["BUY", "SELL", "HOLD"]: - logger.warning( - f"Invalid decision '{decision}', defaulting to HOLD" - ) - decision = "HOLD" - - logger.info(f"Agent decision: {decision}") - return decision - - except Exception as e: - logger.error(f"Error making trading decision: {str(e)}") - raise - - -class Backtester: - """ - Runs trading strategy backtests and analyzes performance - - Attributes: - agent: Trading agent instance - portfolio: Portfolio instance - results: List of backtest results - """ - - def __init__( - self, agent: FinancialAgent, portfolio: Portfolio - ) -> None: - self.agent = agent - self.portfolio = portfolio - self.results: List[Dict] = [] - logger.info("Initialized Backtester") - - @logger.catch - def run_backtest( - self, price_data: pd.DataFrame, trade_size: float = 100 - ) -> None: - """ - Runs backtest simulation - - Args: - price_data: Historical price data - trade_size: Number of shares per trade - """ - logger.info("Starting backtest") - - try: - df = FinancialData().get_technical_indicators(price_data) - df = df.dropna() - - for i in range(len(df)): - current_data = df.iloc[i] - current_price = current_data["Close"] - current_date = current_data["date"] - - decision = self.agent.make_decision( - df.iloc[max(0, i - 10) : i + 1] - ) - - if decision == "BUY": - self.portfolio.execute_trade( - symbol=current_data["symbol"], - action="BUY", - price=current_price, - quantity=trade_size, - date=current_date, - ) - elif decision == "SELL": - self.portfolio.execute_trade( - symbol=current_data["symbol"], - action="SELL", - price=current_price, - quantity=trade_size, - date=current_date, - ) - - portfolio_value = self.portfolio.get_metrics()[ - "final_cash" - ] - - self.results.append( - { - "date": current_date, - "price": current_price, - "decision": decision, - "portfolio_value": portfolio_value, - "SMA_20": current_data["SMA_20"], - "SMA_50": current_data["SMA_50"], - "RSI": current_data["RSI"], - "MACD": current_data["MACD"], - } - ) - - logger.success("Backtest completed successfully") - - except Exception as e: - logger.error(f"Error during backtest: {str(e)}") - raise - - def get_results(self) -> pd.DataFrame: - """ - Returns backtest results as DataFrame - """ - return pd.DataFrame(self.results) - - -def main() -> None: - """ - Main function to run the backtesting system - """ - try: - # Load environment variables - load_dotenv() - api_key = os.getenv("OPENAI_API_KEY") - if not api_key: - raise ValueError( - "OpenAI API key not found in environment variables" - ) - - # Initialize components - data_provider = FinancialData() - agent = FinancialAgent(api_key) - portfolio = Portfolio(initial_cash=100000.0) - backtester = Backtester(agent, portfolio) - - # Get historical data - symbol = "AAPL" - start_date = "2023-01-01" - end_date = "2023-12-31" - - logger.info( - f"Starting backtest for {symbol} from {start_date} to {end_date}" - ) - - price_data = data_provider.get_historical_prices( - symbol, start_date, end_date - ) - backtester.run_backtest(price_data) - - # Get and display results - results = backtester.get_results() - metrics = portfolio.get_metrics() - - logger.info("Backtest Results:") - logger.info(f"Initial Portfolio Value: ${100000:.2f}") - logger.info( - f"Final Portfolio Value: ${metrics['final_cash']:.2f}" - ) - logger.info(f"Total Return: {metrics['total_return']:.2f}%") - logger.info(f"Total Trades: {metrics['total_trades']}") - logger.info( - f"Total Commission: ${metrics['total_commission']:.2f}" - ) - logger.info(f"Sharpe Ratio: {metrics['sharpe_ratio']:.2f}") - logger.info(f"Max Drawdown: {metrics['max_drawdown']:.2f}%") - - except Exception as e: - logger.error(f"Error in main function: {str(e)}") - raise - - -if __name__ == "__main__": - main() diff --git a/pyproject.toml b/pyproject.toml index 0e902b39..ca879b48 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "6.1.1" +version = "6.1.3" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] diff --git a/swarms/prompts/prompt.py b/swarms/prompts/prompt.py index 65f3e191..221250f0 100644 --- a/swarms/prompts/prompt.py +++ b/swarms/prompts/prompt.py @@ -18,7 +18,7 @@ from swarms.telemetry.capture_sys_data import ( from swarms.tools.base_tool import BaseTool from swarms.utils.loguru_logger import initialize_logger -logger = initialize_logger(file_name="prompts") +logger = initialize_logger("prompt") class Prompt(BaseModel): @@ -133,9 +133,11 @@ class Prompt(BaseModel): self.content = new_content self.edit_count += 1 self.last_modified_at = time.strftime("%Y-%m-%d %H:%M:%S") - logger.debug( - f"Prompt {self.id} updated. Edit count: {self.edit_count}. New content: '{self.content}'" - ) + + + # logger.debug( + # f"Prompt {self.id} updated. Edit count: {self.edit_count}. New content: '{self.content}'" + # ) if self.autosave: self._autosave() @@ -256,7 +258,9 @@ class Prompt(BaseModel): ) with open(file_path, "w") as file: json.dump(self.model_dump(), file) - logger.info(f"Autosaved prompt {self.id} to {file_path}.") + # logger.info(f"Autosaved prompt {self.id} to {file_path}.") + + # return "Prompt autosaved successfully." # def auto_generate_prompt(self): # logger.info(f"Auto-generating prompt for {self.name}")